2023-04-15 23:39:07 +00:00
|
|
|
GOCMD=go
|
|
|
|
GOTEST=$(GOCMD) test
|
|
|
|
GOVET=$(GOCMD) vet
|
2023-04-19 16:43:10 +00:00
|
|
|
BINARY_NAME=local-ai
|
2023-05-04 10:26:59 +00:00
|
|
|
|
2023-07-04 17:02:02 +00:00
|
|
|
# llama.cpp versions
|
2023-10-22 06:55:44 +00:00
|
|
|
GOLLAMA_VERSION?=aeba71ee842819da681ea537e78846dc75949ac0
|
2023-07-04 17:02:02 +00:00
|
|
|
|
2023-08-20 14:35:42 +00:00
|
|
|
GOLLAMA_STABLE_VERSION?=50cee7712066d9e38306eccadcfbb44ea87df4b7
|
|
|
|
|
2024-01-22 22:22:01 +00:00
|
|
|
CPPLLAMA_VERSION?=6f9939d119b2d004c264952eb510bd106455531e
|
2023-10-16 19:46:29 +00:00
|
|
|
|
2023-07-04 17:02:02 +00:00
|
|
|
# gpt4all version
|
2023-06-14 16:07:05 +00:00
|
|
|
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
2023-08-24 23:19:48 +00:00
|
|
|
GPT4ALL_VERSION?=27a8b020c36b0df8f8b82a252d261cda47cf44b8
|
2023-07-04 17:02:02 +00:00
|
|
|
|
|
|
|
# go-ggml-transformers version
|
2023-07-16 07:57:16 +00:00
|
|
|
GOGGMLTRANSFORMERS_VERSION?=ffb09d7dd71e2cbc6c5d7d05357d230eea6f369a
|
2023-07-04 17:02:02 +00:00
|
|
|
|
|
|
|
# go-rwkv version
|
2023-06-04 15:25:35 +00:00
|
|
|
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
2023-12-22 14:02:32 +00:00
|
|
|
RWKV_VERSION?=633c5a3485c403cb2520693dc0991a25dace9f0f
|
2023-07-04 17:02:02 +00:00
|
|
|
|
|
|
|
# whisper.cpp version
|
2023-12-24 07:53:29 +00:00
|
|
|
WHISPER_CPP_VERSION?=37a709f6558c6d9783199e2b8cbb136e1c41d346
|
2023-07-04 17:02:02 +00:00
|
|
|
|
|
|
|
# bert.cpp version
|
2023-07-17 21:58:25 +00:00
|
|
|
BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d
|
2023-07-04 17:02:02 +00:00
|
|
|
|
|
|
|
# go-piper version
|
2023-12-16 08:21:56 +00:00
|
|
|
PIPER_VERSION?=d6b6275ba037dabdba4a8b65dfdf6b2a73a67f07
|
2023-07-04 17:02:02 +00:00
|
|
|
|
|
|
|
# stablediffusion version
|
2023-11-18 07:18:43 +00:00
|
|
|
STABLEDIFFUSION_VERSION?=902db5f066fd137697e3b69d0fa10d4782bd2c2f
|
2023-07-04 17:02:02 +00:00
|
|
|
|
2023-12-24 19:27:24 +00:00
|
|
|
# tinydream version
|
|
|
|
TINYDREAM_VERSION?=772a9c0d9aaf768290e63cca3c904fe69faf677a
|
|
|
|
|
2023-06-05 12:26:20 +00:00
|
|
|
export BUILD_TYPE?=
|
2023-09-28 19:42:20 +00:00
|
|
|
export STABLE_BUILD_TYPE?=$(BUILD_TYPE)
|
2023-09-08 16:38:22 +00:00
|
|
|
export CMAKE_ARGS?=
|
2023-12-15 07:26:48 +00:00
|
|
|
|
2023-05-16 14:26:25 +00:00
|
|
|
CGO_LDFLAGS?=
|
|
|
|
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
2023-05-16 17:32:53 +00:00
|
|
|
GO_TAGS?=
|
2023-05-23 15:12:48 +00:00
|
|
|
BUILD_ID?=git
|
2023-06-26 13:12:43 +00:00
|
|
|
|
2023-10-19 11:50:40 +00:00
|
|
|
TEST_DIR=/tmp/test
|
|
|
|
|
|
|
|
RANDOM := $(shell bash -c 'echo $$RANDOM')
|
|
|
|
|
2023-07-02 09:14:09 +00:00
|
|
|
VERSION?=$(shell git describe --always --tags || echo "dev" )
|
2023-06-26 13:12:43 +00:00
|
|
|
# go tool nm ./local-ai | grep Commit
|
2023-06-22 15:53:10 +00:00
|
|
|
LD_FLAGS?=
|
2023-06-26 13:12:43 +00:00
|
|
|
override LD_FLAGS += -X "github.com/go-skynet/LocalAI/internal.Version=$(VERSION)"
|
|
|
|
override LD_FLAGS += -X "github.com/go-skynet/LocalAI/internal.Commit=$(shell git rev-parse HEAD)"
|
|
|
|
|
2023-05-16 17:32:53 +00:00
|
|
|
OPTIONAL_TARGETS?=
|
2023-05-03 09:45:22 +00:00
|
|
|
|
2023-05-23 15:12:48 +00:00
|
|
|
OS := $(shell uname -s)
|
|
|
|
ARCH := $(shell uname -m)
|
2023-04-15 23:39:07 +00:00
|
|
|
GREEN := $(shell tput -Txterm setaf 2)
|
|
|
|
YELLOW := $(shell tput -Txterm setaf 3)
|
|
|
|
WHITE := $(shell tput -Txterm setaf 7)
|
|
|
|
CYAN := $(shell tput -Txterm setaf 6)
|
|
|
|
RESET := $(shell tput -Txterm sgr0)
|
|
|
|
|
2023-10-19 11:50:40 +00:00
|
|
|
# Default Docker bridge IP
|
|
|
|
E2E_BRIDGE_IP?=172.17.0.1
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
ifndef UNAME_S
|
|
|
|
UNAME_S := $(shell uname -s)
|
|
|
|
endif
|
|
|
|
|
2023-11-30 18:50:50 +00:00
|
|
|
ifeq ($(OS),Darwin)
|
2023-09-04 17:11:28 +00:00
|
|
|
CGO_LDFLAGS += -lcblas -framework Accelerate
|
2023-11-30 18:50:50 +00:00
|
|
|
ifeq ($(OSX_SIGNING_IDENTITY),)
|
|
|
|
OSX_SIGNING_IDENTITY := $(shell security find-identity -v -p codesigning | grep '"' | head -n 1 | sed -E 's/.*"(.*)"/\1/')
|
|
|
|
endif
|
|
|
|
|
|
|
|
# on OSX, if BUILD_TYPE is blank, we should default to use Metal
|
|
|
|
ifeq ($(BUILD_TYPE),)
|
|
|
|
BUILD_TYPE=metal
|
|
|
|
# disable metal if on Darwin and any other value is explicitly passed.
|
|
|
|
else ifneq ($(BUILD_TYPE),metal)
|
|
|
|
CMAKE_ARGS+=-DLLAMA_METAL=OFF
|
|
|
|
endif
|
2023-07-14 23:19:43 +00:00
|
|
|
endif
|
|
|
|
|
2023-05-16 14:26:25 +00:00
|
|
|
ifeq ($(BUILD_TYPE),openblas)
|
|
|
|
CGO_LDFLAGS+=-lopenblas
|
2023-12-02 10:10:18 +00:00
|
|
|
export WHISPER_OPENBLAS=1
|
2023-04-21 22:29:32 +00:00
|
|
|
endif
|
|
|
|
|
2023-05-16 14:26:25 +00:00
|
|
|
ifeq ($(BUILD_TYPE),cublas)
|
|
|
|
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH)
|
2023-05-19 15:11:28 +00:00
|
|
|
export LLAMA_CUBLAS=1
|
2023-12-02 10:10:18 +00:00
|
|
|
export WHISPER_CUBLAS=1
|
2023-04-21 22:29:32 +00:00
|
|
|
endif
|
|
|
|
|
2023-09-28 19:42:20 +00:00
|
|
|
ifeq ($(BUILD_TYPE),hipblas)
|
|
|
|
ROCM_HOME ?= /opt/rocm
|
|
|
|
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
|
|
|
export CC=$(ROCM_HOME)/llvm/bin/clang
|
2023-11-18 07:18:43 +00:00
|
|
|
# llama-ggml has no hipblas support, so override it here.
|
2023-09-28 19:42:20 +00:00
|
|
|
export STABLE_BUILD_TYPE=
|
2023-12-02 10:10:18 +00:00
|
|
|
export WHISPER_HIPBLAS=1
|
2023-09-28 19:42:20 +00:00
|
|
|
GPU_TARGETS ?= gfx900,gfx90a,gfx1030,gfx1031,gfx1100
|
|
|
|
AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
|
|
|
|
CMAKE_ARGS+=-DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
|
|
|
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link
|
|
|
|
endif
|
|
|
|
|
2023-06-05 12:26:20 +00:00
|
|
|
ifeq ($(BUILD_TYPE),metal)
|
|
|
|
CGO_LDFLAGS+=-framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
|
|
|
export LLAMA_METAL=1
|
2023-12-02 10:10:18 +00:00
|
|
|
export WHISPER_METAL=1
|
2023-06-05 12:26:20 +00:00
|
|
|
endif
|
|
|
|
|
2023-05-29 13:17:38 +00:00
|
|
|
ifeq ($(BUILD_TYPE),clblas)
|
|
|
|
CGO_LDFLAGS+=-lOpenCL -lclblast
|
2023-12-02 10:10:18 +00:00
|
|
|
export WHISPER_CLBLAST=1
|
2023-05-29 13:17:38 +00:00
|
|
|
endif
|
|
|
|
|
2023-05-24 14:42:24 +00:00
|
|
|
# glibc-static or glibc-devel-static required
|
|
|
|
ifeq ($(STATIC),true)
|
|
|
|
LD_FLAGS=-linkmode external -extldflags -static
|
|
|
|
endif
|
|
|
|
|
2023-06-22 15:53:10 +00:00
|
|
|
ifeq ($(findstring stablediffusion,$(GO_TAGS)),stablediffusion)
|
2023-07-14 23:19:43 +00:00
|
|
|
# OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a
|
2023-07-14 23:19:43 +00:00
|
|
|
OPTIONAL_GRPC+=backend-assets/grpc/stablediffusion
|
2023-05-16 17:32:53 +00:00
|
|
|
endif
|
|
|
|
|
2023-12-24 19:27:24 +00:00
|
|
|
ifeq ($(findstring tinydream,$(GO_TAGS)),tinydream)
|
|
|
|
# OPTIONAL_TARGETS+=go-tiny-dream/libtinydream.a
|
|
|
|
OPTIONAL_GRPC+=backend-assets/grpc/tinydream
|
|
|
|
endif
|
|
|
|
|
2023-06-22 15:53:10 +00:00
|
|
|
ifeq ($(findstring tts,$(GO_TAGS)),tts)
|
2023-07-14 23:19:43 +00:00
|
|
|
# OPTIONAL_TARGETS+=go-piper/libpiper_binding.a
|
|
|
|
# OPTIONAL_TARGETS+=backend-assets/espeak-ng-data
|
2024-01-10 08:39:50 +00:00
|
|
|
PIPER_CGO_CXXFLAGS+=-I$(CURDIR)/sources/go-piper/piper/src/cpp -I$(CURDIR)/sources/go-piper/piper/build/fi/include -I$(CURDIR)/sources/go-piper/piper/build/pi/include -I$(CURDIR)/sources/go-piper/piper/build/si/include
|
|
|
|
PIPER_CGO_LDFLAGS+=-L$(CURDIR)/sources/go-piper/piper/build/fi/lib -L$(CURDIR)/sources/go-piper/piper/build/pi/lib -L$(CURDIR)/sources/go-piper/piper/build/si/lib -lfmt -lspdlog -lucd
|
2023-07-14 23:19:43 +00:00
|
|
|
OPTIONAL_GRPC+=backend-assets/grpc/piper
|
2023-06-22 15:53:10 +00:00
|
|
|
endif
|
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
ALL_GRPC_BACKENDS=backend-assets/grpc/langchain-huggingface backend-assets/grpc/falcon-ggml backend-assets/grpc/bert-embeddings backend-assets/grpc/llama backend-assets/grpc/llama-cpp backend-assets/grpc/llama-ggml backend-assets/grpc/gpt4all backend-assets/grpc/dolly backend-assets/grpc/gpt2 backend-assets/grpc/gptj backend-assets/grpc/gptneox backend-assets/grpc/mpt backend-assets/grpc/replit backend-assets/grpc/starcoder backend-assets/grpc/rwkv backend-assets/grpc/whisper $(OPTIONAL_GRPC)
|
2023-10-21 09:34:59 +00:00
|
|
|
GRPC_BACKENDS?=$(ALL_GRPC_BACKENDS) $(OPTIONAL_GRPC)
|
|
|
|
|
|
|
|
# If empty, then we build all
|
|
|
|
ifeq ($(GRPC_BACKENDS),)
|
|
|
|
GRPC_BACKENDS=$(ALL_GRPC_BACKENDS)
|
|
|
|
endif
|
2023-08-13 18:04:08 +00:00
|
|
|
|
2024-01-19 14:38:43 +00:00
|
|
|
ifeq ($(BUILD_API_ONLY),true)
|
|
|
|
GRPC_BACKENDS=
|
|
|
|
endif
|
|
|
|
|
2023-04-15 23:39:07 +00:00
|
|
|
.PHONY: all test build vendor
|
|
|
|
|
|
|
|
all: help
|
|
|
|
|
2023-05-11 12:31:19 +00:00
|
|
|
## GPT4ALL
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/gpt4all:
|
|
|
|
git clone --recurse-submodules $(GPT4ALL_REPO) sources/gpt4all
|
|
|
|
cd sources/gpt4all && git checkout -b build $(GPT4ALL_VERSION) && git submodule update --init --recursive --depth 1
|
2023-06-01 21:38:52 +00:00
|
|
|
|
2023-06-22 15:53:10 +00:00
|
|
|
## go-piper
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-piper:
|
|
|
|
git clone --recurse-submodules https://github.com/mudler/go-piper sources/go-piper
|
|
|
|
cd sources/go-piper && git checkout -b build $(PIPER_VERSION) && git submodule update --init --recursive --depth 1
|
2023-04-19 15:10:29 +00:00
|
|
|
|
2023-05-10 13:20:21 +00:00
|
|
|
## BERT embeddings
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-bert:
|
|
|
|
git clone --recurse-submodules https://github.com/go-skynet/go-bert.cpp sources/go-bert
|
|
|
|
cd sources/go-bert && git checkout -b build $(BERT_VERSION) && git submodule update --init --recursive --depth 1
|
2023-05-10 13:20:21 +00:00
|
|
|
|
2023-05-16 17:32:53 +00:00
|
|
|
## stable diffusion
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-stable-diffusion:
|
|
|
|
git clone --recurse-submodules https://github.com/mudler/go-stable-diffusion sources/go-stable-diffusion
|
|
|
|
cd sources/go-stable-diffusion && git checkout -b build $(STABLEDIFFUSION_VERSION) && git submodule update --init --recursive --depth 1
|
2023-05-16 17:32:53 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-stable-diffusion/libstablediffusion.a:
|
|
|
|
$(MAKE) -C sources/go-stable-diffusion libstablediffusion.a
|
2023-05-16 17:32:53 +00:00
|
|
|
|
2023-12-24 19:27:24 +00:00
|
|
|
## tiny-dream
|
|
|
|
sources/go-tiny-dream:
|
|
|
|
git clone --recurse-submodules https://github.com/M0Rf30/go-tiny-dream sources/go-tiny-dream
|
|
|
|
cd sources/go-tiny-dream && git checkout -b build $(TINYDREAM_VERSION) && git submodule update --init --recursive --depth 1
|
|
|
|
|
|
|
|
sources/go-tiny-dream/libtinydream.a:
|
|
|
|
$(MAKE) -C sources/go-tiny-dream libtinydream.a
|
|
|
|
|
2023-05-03 09:45:22 +00:00
|
|
|
## RWKV
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-rwkv:
|
|
|
|
git clone --recurse-submodules $(RWKV_REPO) sources/go-rwkv
|
|
|
|
cd sources/go-rwkv && git checkout -b build $(RWKV_VERSION) && git submodule update --init --recursive --depth 1
|
2023-05-03 09:45:22 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-rwkv/librwkv.a: sources/go-rwkv
|
|
|
|
cd sources/go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a ..
|
2023-05-03 09:45:22 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-bert/libgobert.a: sources/go-bert
|
|
|
|
$(MAKE) -C sources/go-bert libgobert.a
|
2023-05-10 13:20:21 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/gpt4all: sources/gpt4all/gpt4all-bindings/golang/libgpt4all.a
|
2023-06-01 21:38:52 +00:00
|
|
|
mkdir -p backend-assets/gpt4all
|
2023-11-18 07:18:43 +00:00
|
|
|
@cp sources/gpt4all/gpt4all-bindings/golang/buildllm/*.so backend-assets/gpt4all/ || true
|
|
|
|
@cp sources/gpt4all/gpt4all-bindings/golang/buildllm/*.dylib backend-assets/gpt4all/ || true
|
|
|
|
@cp sources/gpt4all/gpt4all-bindings/golang/buildllm/*.dll backend-assets/gpt4all/ || true
|
2023-06-01 21:38:52 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/espeak-ng-data: sources/go-piper
|
2023-06-22 15:53:10 +00:00
|
|
|
mkdir -p backend-assets/espeak-ng-data
|
2023-11-18 07:18:43 +00:00
|
|
|
$(MAKE) -C sources/go-piper piper.o
|
2023-12-07 21:58:41 +00:00
|
|
|
@cp -rf sources/go-piper/piper-phonemize/pi/share/espeak-ng-data/. backend-assets/espeak-ng-data
|
2023-06-22 15:53:10 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/gpt4all/gpt4all-bindings/golang/libgpt4all.a: sources/gpt4all
|
|
|
|
$(MAKE) -C sources/gpt4all/gpt4all-bindings/golang/ libgpt4all.a
|
2023-04-15 23:39:07 +00:00
|
|
|
|
2023-05-03 09:45:22 +00:00
|
|
|
## CEREBRAS GPT
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-ggml-transformers:
|
|
|
|
git clone --recurse-submodules https://github.com/go-skynet/go-ggml-transformers.cpp sources/go-ggml-transformers
|
|
|
|
cd sources/go-ggml-transformers && git checkout -b build $(GOGPT2_VERSION) && git submodule update --init --recursive --depth 1
|
2023-05-23 19:47:47 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-ggml-transformers/libtransformers.a: sources/go-ggml-transformers
|
|
|
|
$(MAKE) -C sources/go-ggml-transformers BUILD_TYPE=$(BUILD_TYPE) libtransformers.a
|
2023-04-20 17:33:36 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/whisper.cpp:
|
|
|
|
git clone https://github.com/ggerganov/whisper.cpp.git sources/whisper.cpp
|
|
|
|
cd sources/whisper.cpp && git checkout -b build $(WHISPER_CPP_VERSION) && git submodule update --init --recursive --depth 1
|
2023-05-09 09:43:50 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/whisper.cpp/libwhisper.a: sources/whisper.cpp
|
|
|
|
cd sources/whisper.cpp && make libwhisper.a
|
2023-05-09 09:43:50 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-llama:
|
|
|
|
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp sources/go-llama
|
|
|
|
cd sources/go-llama && git checkout -b build $(GOLLAMA_VERSION) && git submodule update --init --recursive --depth 1
|
2023-04-19 15:10:29 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-llama-ggml:
|
|
|
|
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp sources/go-llama-ggml
|
|
|
|
cd sources/go-llama-ggml && git checkout -b build $(GOLLAMA_STABLE_VERSION) && git submodule update --init --recursive --depth 1
|
2023-08-20 14:35:42 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-llama/libbinding.a: sources/go-llama
|
|
|
|
$(MAKE) -C sources/go-llama BUILD_TYPE=$(BUILD_TYPE) libbinding.a
|
2023-04-19 15:10:29 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-llama-ggml/libbinding.a: sources/go-llama-ggml
|
|
|
|
$(MAKE) -C sources/go-llama-ggml BUILD_TYPE=$(STABLE_BUILD_TYPE) libbinding.a
|
2023-08-20 14:35:42 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
sources/go-piper/libpiper_binding.a: sources/go-piper
|
|
|
|
$(MAKE) -C sources/go-piper libpiper_binding.a example/main
|
2023-06-22 15:53:10 +00:00
|
|
|
|
2023-11-25 07:48:24 +00:00
|
|
|
backend/cpp/llama/llama.cpp:
|
2023-12-15 07:26:48 +00:00
|
|
|
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/llama llama.cpp
|
2023-11-25 07:48:24 +00:00
|
|
|
|
2023-12-24 19:27:24 +00:00
|
|
|
get-sources: backend/cpp/llama/llama.cpp sources/go-llama sources/go-llama-ggml sources/go-ggml-transformers sources/gpt4all sources/go-piper sources/go-rwkv sources/whisper.cpp sources/go-bert sources/go-stable-diffusion sources/go-tiny-dream
|
2023-06-26 20:34:03 +00:00
|
|
|
touch $@
|
|
|
|
|
2023-04-20 17:33:36 +00:00
|
|
|
replace:
|
2024-01-10 08:39:50 +00:00
|
|
|
$(GOCMD) mod edit -replace github.com/nomic-ai/gpt4all/gpt4all-bindings/golang=$(CURDIR)/sources/gpt4all/gpt4all-bindings/golang
|
|
|
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-ggml-transformers.cpp=$(CURDIR)/sources/go-ggml-transformers
|
|
|
|
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(CURDIR)/sources/go-rwkv
|
|
|
|
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
|
|
|
|
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
|
|
|
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(CURDIR)/sources/go-bert
|
|
|
|
$(GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion=$(CURDIR)/sources/go-stable-diffusion
|
|
|
|
$(GOCMD) mod edit -replace github.com/M0Rf30/go-tiny-dream=$(CURDIR)/sources/go-tiny-dream
|
|
|
|
$(GOCMD) mod edit -replace github.com/mudler/go-piper=$(CURDIR)/sources/go-piper
|
2023-04-20 17:33:36 +00:00
|
|
|
|
2023-06-26 20:34:03 +00:00
|
|
|
prepare-sources: get-sources replace
|
2023-04-29 18:38:37 +00:00
|
|
|
$(GOCMD) mod download
|
2023-11-25 07:48:24 +00:00
|
|
|
touch $@
|
2023-04-29 18:38:37 +00:00
|
|
|
|
2023-05-03 09:45:22 +00:00
|
|
|
## GENERIC
|
|
|
|
rebuild: ## Rebuilds the project
|
2023-07-05 16:24:55 +00:00
|
|
|
$(GOCMD) clean -cache
|
2023-11-18 07:18:43 +00:00
|
|
|
$(MAKE) -C sources/go-llama clean
|
|
|
|
$(MAKE) -C sources/go-llama-ggml clean
|
|
|
|
$(MAKE) -C sources/gpt4all/gpt4all-bindings/golang/ clean
|
|
|
|
$(MAKE) -C sources/go-ggml-transformers clean
|
|
|
|
$(MAKE) -C sources/go-rwkv clean
|
|
|
|
$(MAKE) -C sources/whisper.cpp clean
|
|
|
|
$(MAKE) -C sources/go-stable-diffusion clean
|
|
|
|
$(MAKE) -C sources/go-bert clean
|
|
|
|
$(MAKE) -C sources/go-piper clean
|
2023-12-24 19:27:24 +00:00
|
|
|
$(MAKE) -C sources/go-tiny-dream clean
|
2023-04-29 18:38:37 +00:00
|
|
|
$(MAKE) build
|
|
|
|
|
2023-09-04 17:11:28 +00:00
|
|
|
prepare: prepare-sources $(OPTIONAL_TARGETS)
|
2023-06-26 20:34:03 +00:00
|
|
|
touch $@
|
2023-04-19 15:10:29 +00:00
|
|
|
|
2023-04-15 23:39:07 +00:00
|
|
|
clean: ## Remove build related file
|
2023-07-05 16:24:55 +00:00
|
|
|
$(GOCMD) clean -cache
|
2023-07-25 21:02:46 +00:00
|
|
|
rm -f prepare
|
2023-11-18 07:18:43 +00:00
|
|
|
rm -rf ./sources
|
2023-04-19 15:10:29 +00:00
|
|
|
rm -rf $(BINARY_NAME)
|
2023-05-23 15:12:48 +00:00
|
|
|
rm -rf release/
|
2024-01-19 14:38:43 +00:00
|
|
|
rm -rf backend-assets
|
2024-01-13 09:08:26 +00:00
|
|
|
$(MAKE) -C backend/cpp/grpc clean
|
2023-10-16 19:46:29 +00:00
|
|
|
$(MAKE) -C backend/cpp/llama clean
|
2023-04-15 23:39:07 +00:00
|
|
|
|
2023-05-03 09:45:22 +00:00
|
|
|
## Build:
|
|
|
|
|
2024-01-19 14:38:43 +00:00
|
|
|
build: backend-assets grpcs prepare ## Build the project
|
2023-05-03 09:45:22 +00:00
|
|
|
$(info ${GREEN}I local-ai build info:${RESET})
|
|
|
|
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
|
2023-05-16 17:32:53 +00:00
|
|
|
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
|
2023-06-26 13:12:43 +00:00
|
|
|
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
|
2023-07-14 23:19:43 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
|
2023-05-03 09:45:22 +00:00
|
|
|
|
2023-05-23 15:12:48 +00:00
|
|
|
dist: build
|
|
|
|
mkdir -p release
|
|
|
|
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
|
|
|
|
|
2023-11-23 14:22:54 +00:00
|
|
|
osx-signed: build
|
|
|
|
codesign --deep --force --sign "$(OSX_SIGNING_IDENTITY)" --entitlements "./Entitlements.plist" "./$(BINARY_NAME)"
|
|
|
|
|
2023-05-03 09:45:22 +00:00
|
|
|
## Run
|
|
|
|
run: prepare ## run local-ai
|
2023-07-14 23:19:43 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./
|
2023-04-15 23:39:07 +00:00
|
|
|
|
2023-04-21 22:44:52 +00:00
|
|
|
test-models/testmodel:
|
|
|
|
mkdir test-models
|
2023-05-12 08:04:20 +00:00
|
|
|
mkdir test-dir
|
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
|
|
|
wget -q https://huggingface.co/nnakasato/ggml-model-test/resolve/main/ggml-model-q4.bin -O test-models/testmodel
|
|
|
|
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
|
|
|
|
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
|
|
|
|
wget -q https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
|
|
|
|
wget -q https://huggingface.co/mudler/rwkv-4-raven-1.5B-ggml/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%2525-Other1%2525-20230425-ctx4096_Q4_0.bin -O test-models/rwkv
|
|
|
|
wget -q https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O test-models/rwkv.tokenizer.json
|
2023-05-18 13:59:03 +00:00
|
|
|
cp tests/models_fixtures/* test-models
|
2023-04-21 22:44:52 +00:00
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
prepare-test: grpcs
|
2023-07-14 23:19:43 +00:00
|
|
|
cp -rf backend-assets api
|
2023-05-18 13:59:03 +00:00
|
|
|
cp tests/models_fixtures/* test-models
|
2023-07-14 23:19:43 +00:00
|
|
|
|
|
|
|
test: prepare test-models/testmodel grpcs
|
|
|
|
@echo 'Running tests'
|
|
|
|
export GO_TAGS="tts stablediffusion"
|
|
|
|
$(MAKE) prepare-test
|
2023-11-20 20:21:17 +00:00
|
|
|
HUGGINGFACE_GRPC=$(abspath ./)/backend/python/sentencetransformers/run.sh TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
2023-11-05 08:01:03 +00:00
|
|
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="!gpt4all && !llama && !llama-gguf" --flake-attempts 5 --fail-fast -v -r ./api ./pkg
|
2023-07-14 23:19:43 +00:00
|
|
|
$(MAKE) test-gpt4all
|
|
|
|
$(MAKE) test-llama
|
2023-08-23 23:18:58 +00:00
|
|
|
$(MAKE) test-llama-gguf
|
2023-07-14 23:19:43 +00:00
|
|
|
$(MAKE) test-tts
|
|
|
|
$(MAKE) test-stablediffusion
|
|
|
|
|
2023-10-19 11:50:40 +00:00
|
|
|
prepare-e2e:
|
|
|
|
mkdir -p $(TEST_DIR)
|
|
|
|
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
|
|
|
|
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
|
|
|
|
docker build --build-arg BUILD_GRPC=true --build-arg GRPC_BACKENDS="$(GRPC_BACKENDS)" --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=11 --build-arg CUDA_MINOR_VERSION=7 --build-arg FFMPEG=true -t localai-tests .
|
|
|
|
|
|
|
|
run-e2e-image:
|
|
|
|
ls -liah $(abspath ./tests/e2e-fixtures)
|
|
|
|
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --gpus all --name e2e-tests-$(RANDOM) localai-tests
|
|
|
|
|
|
|
|
test-e2e:
|
|
|
|
@echo 'Running e2e tests'
|
|
|
|
BUILD_TYPE=$(BUILD_TYPE) \
|
|
|
|
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 \
|
|
|
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e
|
|
|
|
|
|
|
|
teardown-e2e:
|
|
|
|
rm -rf $(TEST_DIR) || true
|
|
|
|
docker stop $$(docker ps -q --filter ancestor=localai-tests)
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
test-gpt4all: prepare-test
|
|
|
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
|
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="gpt4all" --flake-attempts 5 -v -r ./api ./pkg
|
|
|
|
|
|
|
|
test-llama: prepare-test
|
|
|
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
|
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama" --flake-attempts 5 -v -r ./api ./pkg
|
|
|
|
|
2023-08-23 23:18:58 +00:00
|
|
|
test-llama-gguf: prepare-test
|
|
|
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
|
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama-gguf" --flake-attempts 5 -v -r ./api ./pkg
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
test-tts: prepare-test
|
|
|
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
|
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="tts" --flake-attempts 1 -v -r ./api ./pkg
|
|
|
|
|
|
|
|
test-stablediffusion: prepare-test
|
|
|
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
|
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stablediffusion" --flake-attempts 1 -v -r ./api ./pkg
|
|
|
|
|
|
|
|
test-container:
|
|
|
|
docker build --target requirements -t local-ai-test-container .
|
2023-07-20 20:10:42 +00:00
|
|
|
docker run -ti --rm --entrypoint /bin/bash -ti -v $(abspath ./):/build local-ai-test-container
|
2023-04-15 23:39:07 +00:00
|
|
|
|
|
|
|
## Help:
|
|
|
|
help: ## Show this help.
|
|
|
|
@echo ''
|
|
|
|
@echo 'Usage:'
|
|
|
|
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}'
|
|
|
|
@echo ''
|
|
|
|
@echo 'Targets:'
|
|
|
|
@awk 'BEGIN {FS = ":.*?## "} { \
|
|
|
|
if (/^[a-zA-Z_-]+:.*?##.*$$/) {printf " ${YELLOW}%-20s${GREEN}%s${RESET}\n", $$1, $$2} \
|
|
|
|
else if (/^## .*$$/) {printf " ${CYAN}%s${RESET}\n", substr($$1,4)} \
|
2023-04-17 19:34:02 +00:00
|
|
|
}' $(MAKEFILE_LIST)
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-07-20 20:10:42 +00:00
|
|
|
protogen: protogen-go protogen-python
|
|
|
|
|
|
|
|
protogen-go:
|
2023-12-13 18:20:22 +00:00
|
|
|
protoc -Ibackend/ --go_out=pkg/grpc/proto/ --go_opt=paths=source_relative --go-grpc_out=pkg/grpc/proto/ --go-grpc_opt=paths=source_relative \
|
2023-11-13 21:40:16 +00:00
|
|
|
backend/backend.proto
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-07-20 20:10:42 +00:00
|
|
|
protogen-python:
|
2023-11-20 20:21:17 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/sentencetransformers/ --grpc_python_out=backend/python/sentencetransformers/ backend/backend.proto
|
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/transformers/ --grpc_python_out=backend/python/transformers/ backend/backend.proto
|
2023-12-08 09:01:02 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/transformers-musicgen/ --grpc_python_out=backend/python/transformers-musicgen/ backend/backend.proto
|
2023-11-20 20:21:17 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/autogptq/ --grpc_python_out=backend/python/autogptq/ backend/backend.proto
|
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/exllama/ --grpc_python_out=backend/python/exllama/ backend/backend.proto
|
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/bark/ --grpc_python_out=backend/python/bark/ backend/backend.proto
|
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/diffusers/ --grpc_python_out=backend/python/diffusers/ backend/backend.proto
|
2023-12-24 18:38:54 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/coqui/ --grpc_python_out=backend/python/coqui/ backend/backend.proto
|
2023-11-20 20:21:17 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/vall-e-x/ --grpc_python_out=backend/python/vall-e-x/ backend/backend.proto
|
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/vllm/ --grpc_python_out=backend/python/vllm/ backend/backend.proto
|
2023-11-28 08:01:46 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/petals/ --grpc_python_out=backend/python/petals/ backend/backend.proto
|
2024-01-19 22:42:50 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/mamba/ --grpc_python_out=backend/python/mamba/ backend/backend.proto
|
2023-12-05 07:15:37 +00:00
|
|
|
python3 -m grpc_tools.protoc -Ibackend/ --python_out=backend/python/exllama2/ --grpc_python_out=backend/python/exllama2/ backend/backend.proto
|
2023-07-20 20:10:42 +00:00
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
## GRPC
|
2023-11-05 09:31:33 +00:00
|
|
|
# Note: it is duplicated in the Dockerfile
|
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
|
|
|
prepare-extra-conda-environments:
|
2023-11-13 21:40:16 +00:00
|
|
|
$(MAKE) -C backend/python/autogptq
|
|
|
|
$(MAKE) -C backend/python/bark
|
2023-12-24 18:38:54 +00:00
|
|
|
$(MAKE) -C backend/python/coqui
|
2023-11-13 21:40:16 +00:00
|
|
|
$(MAKE) -C backend/python/diffusers
|
|
|
|
$(MAKE) -C backend/python/vllm
|
2024-01-19 22:42:50 +00:00
|
|
|
$(MAKE) -C backend/python/mamba
|
2023-11-20 20:21:17 +00:00
|
|
|
$(MAKE) -C backend/python/sentencetransformers
|
|
|
|
$(MAKE) -C backend/python/transformers
|
2023-12-08 09:01:02 +00:00
|
|
|
$(MAKE) -C backend/python/transformers-musicgen
|
2023-11-13 21:40:16 +00:00
|
|
|
$(MAKE) -C backend/python/vall-e-x
|
|
|
|
$(MAKE) -C backend/python/exllama
|
2023-11-28 08:01:46 +00:00
|
|
|
$(MAKE) -C backend/python/petals
|
2023-12-05 07:15:37 +00:00
|
|
|
$(MAKE) -C backend/python/exllama2
|
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
|
|
|
|
2023-12-08 14:45:04 +00:00
|
|
|
prepare-test-extra:
|
|
|
|
$(MAKE) -C backend/python/transformers
|
2023-12-11 07:20:34 +00:00
|
|
|
$(MAKE) -C backend/python/diffusers
|
2023-12-08 14:45:04 +00:00
|
|
|
|
|
|
|
test-extra: prepare-test-extra
|
|
|
|
$(MAKE) -C backend/python/transformers test
|
2023-12-11 07:20:34 +00:00
|
|
|
$(MAKE) -C backend/python/diffusers test
|
feat(conda): conda environments (#1144)
* feat(autogptq): add a separate conda environment for autogptq (#1137)
**Description**
This PR related to #1117
**Notes for Reviewers**
Here we lock down the version of the dependencies. Make sure it can be
used all the time without failed if the version of dependencies were
upgraded.
I change the order of importing packages according to the pylint, and no
change the logic of code. It should be ok.
I will do more investigate on writing some test cases for every backend.
I can run the service in my environment, but there is not exist a way to
test it. So, I am not confident on it.
Add a README.md in the `grpc` root. This is the common commands for
creating `conda` environment. And it can be used to the reference file
for creating extral gRPC backend document.
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* [Extra backend] Add seperate environment for ttsbark (#1141)
**Description**
This PR relates to #1117
**Notes for Reviewers**
Same to the latest PR:
* The code is also changed, but only the order of the import package
parts. And some code comments are also added.
* Add a configuration of the `conda` environment
* Add a simple test case for testing if the service can be startup in
current `conda` environment. It is succeed in VSCode, but the it is not
out of box on terminal. So, it is hard to say the test case really
useful.
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): add make target and entrypoints for the dockerfile
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add seperate conda env for diffusers (#1145)
**Description**
This PR relates to #1117
**Notes for Reviewers**
* Add `conda` env `diffusers.yml`
* Add Makefile to create it automatically
* Add `run.sh` to support running as a extra backend
* Also adding it to the main Dockerfile
* Add make command in the root Makefile
* Testing the server, it can start up under the env
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for vllm (#1148)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server can be started as normal
* The test case can be triggered in VSCode
* Same to other this kind of PRs, add `vllm.yml` Makefile and add
`run.sh` to the main Dockerfile, and command to the main Makefile
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate env for huggingface (#1146)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* Add conda env `huggingface.yml`
* Change the import order, and also remove the no-used packages
* Add `run.sh` and `make command` to the main Dockerfile and Makefile
* Add test cases for it. It can be triggered and succeed under VSCode
Python extension but it is hang by using `python -m unites
test_huggingface.py` in the terminal
```
Running tests (unittest): /workspaces/LocalAI/extra/grpc/huggingface
Running tests: /workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_embedding
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_load_model
/workspaces/LocalAI/extra/grpc/huggingface/test_huggingface.py::TestBackendServicer::test_server_startup
./test_huggingface.py::TestBackendServicer::test_embedding Passed
./test_huggingface.py::TestBackendServicer::test_load_model Passed
./test_huggingface.py::TestBackendServicer::test_server_startup Passed
Total number of tests expected to run: 3
Total number of tests run: 3
Total number of tests passed: 3
Total number of tests failed: 0
Total number of tests failed with errors: 0
Total number of tests skipped: 0
Finished running tests!
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda): Add the seperate conda env for VALL-E X (#1147)
**Description**
This PR is related to #1117
**Notes for Reviewers**
* The gRPC server cannot start up
```
(ttsvalle) @Aisuko ➜ /workspaces/LocalAI (feat/vall-e-x) $ /opt/conda/envs/ttsvalle/bin/python /workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py
Traceback (most recent call last):
File "/workspaces/LocalAI/extra/grpc/vall-e-x/ttsvalle.py", line 14, in <module>
from utils.generation import SAMPLE_RATE, generate_audio, preload_models
ModuleNotFoundError: No module named 'utils'
```
The installation steps follow
https://github.com/Plachtaa/VALL-E-X#-installation below:
* Under the `ttsvalle` conda env
```
git clone https://github.com/Plachtaa/VALL-E-X.git
cd VALL-E-X
pip install -r requirements.txt
```
**[Signed
commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
- [x] Yes, I signed my commits.
<!--
Thank you for contributing to LocalAI!
Contributing Conventions
-------------------------
The draft above helps to give a quick overview of your PR.
Remember to remove this comment and to at least:
1. Include descriptive PR titles with [<component-name>] prepended. We
use [conventional
commits](https://www.conventionalcommits.org/en/v1.0.0/).
2. Build and test your changes before submitting a PR (`make build`).
3. Sign your commits
4. **Tag maintainer:** for a quicker response, tag the relevant
maintainer (see below).
5. **X/Twitter handle:** we announce bigger features on X/Twitter. If
your PR gets announced, and you'd like a mention, we'll gladly shout you
out!
By following the community's contribution conventions upfront, the
review process will
be accelerated and your PR merged more quickly.
If no one reviews your PR within a few days, please @-mention @mudler.
-->
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fix: set image type
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* feat(conda):Add seperate conda env for exllama (#1149)
Add seperate env for exllama
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Setup conda
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Set image_type arg
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* ci: prepare only conda env in tests
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* Dockerfile: comment manual pip calls
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* conda: add conda to PATH
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* fixes
* add shebang
* Fixups
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* file perms
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
* debug
* Install new conda in the worker
* Disable GPU tests for now until the worker is back
* Rename workflows
* debug
* Fixup conda install
* fixup(wrapper): pass args
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
---------
Signed-off-by: GitHub <noreply@github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Aisuko <urakiny@gmail.com>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Co-authored-by: Aisuko <urakiny@gmail.com>
2023-11-04 14:30:32 +00:00
|
|
|
|
2024-01-19 14:38:43 +00:00
|
|
|
backend-assets:
|
|
|
|
mkdir -p backend-assets
|
|
|
|
ifeq ($(BUILD_API_ONLY),true)
|
|
|
|
touch backend-assets/keep
|
|
|
|
endif
|
|
|
|
|
2023-07-14 23:19:43 +00:00
|
|
|
backend-assets/grpc:
|
|
|
|
mkdir -p backend-assets/grpc
|
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/llama: backend-assets/grpc sources/go-llama/libbinding.a
|
2024-01-10 08:39:50 +00:00
|
|
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(CURDIR)/sources/go-llama
|
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-llama LIBRARY_PATH=$(CURDIR)/sources/go-llama \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama ./backend/go/llm/llama/
|
2023-11-30 18:50:50 +00:00
|
|
|
# TODO: every binary should have its own folder instead, so can have different implementations
|
2023-07-20 20:00:07 +00:00
|
|
|
ifeq ($(BUILD_TYPE),metal)
|
2023-11-30 18:50:50 +00:00
|
|
|
cp backend/cpp/llama/llama.cpp/ggml-metal.metal backend-assets/grpc/
|
2023-07-20 20:00:07 +00:00
|
|
|
endif
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-06 18:07:46 +00:00
|
|
|
## BACKEND CPP LLAMA START
|
|
|
|
# Sets the variables in case it has to build the gRPC locally.
|
|
|
|
INSTALLED_PACKAGES=$(CURDIR)/backend/cpp/grpc/installed_packages
|
|
|
|
INSTALLED_LIB_CMAKE=$(INSTALLED_PACKAGES)/lib/cmake
|
|
|
|
ADDED_CMAKE_ARGS=-Dabsl_DIR=${INSTALLED_LIB_CMAKE}/absl \
|
|
|
|
-DProtobuf_DIR=${INSTALLED_LIB_CMAKE}/protobuf \
|
|
|
|
-Dutf8_range_DIR=${INSTALLED_LIB_CMAKE}/utf8_range \
|
|
|
|
-DgRPC_DIR=${INSTALLED_LIB_CMAKE}/grpc \
|
|
|
|
-DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES=${INSTALLED_PACKAGES}/include
|
2023-10-16 19:46:29 +00:00
|
|
|
|
2023-11-06 18:07:46 +00:00
|
|
|
backend/cpp/llama/grpc-server:
|
|
|
|
ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
|
2024-01-13 09:08:26 +00:00
|
|
|
$(MAKE) -C backend/cpp/grpc build
|
2023-11-06 18:07:46 +00:00
|
|
|
export _PROTOBUF_PROTOC=${INSTALLED_PACKAGES}/bin/proto && \
|
|
|
|
export _GRPC_CPP_PLUGIN_EXECUTABLE=${INSTALLED_PACKAGES}/bin/grpc_cpp_plugin && \
|
2024-01-16 13:47:47 +00:00
|
|
|
export PATH="${INSTALLED_PACKAGES}/bin:${PATH}" && \
|
2023-11-25 12:53:51 +00:00
|
|
|
CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/llama grpc-server
|
2023-11-06 18:07:46 +00:00
|
|
|
else
|
|
|
|
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
|
|
|
|
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/llama grpc-server
|
|
|
|
endif
|
|
|
|
## BACKEND CPP LLAMA END
|
2024-01-13 09:08:26 +00:00
|
|
|
|
2023-11-06 18:07:46 +00:00
|
|
|
##
|
2023-10-16 19:46:29 +00:00
|
|
|
backend-assets/grpc/llama-cpp: backend-assets/grpc backend/cpp/llama/grpc-server
|
|
|
|
cp -rfv backend/cpp/llama/grpc-server backend-assets/grpc/llama-cpp
|
|
|
|
# TODO: every binary should have its own folder instead, so can have different metal implementations
|
|
|
|
ifeq ($(BUILD_TYPE),metal)
|
|
|
|
cp backend/cpp/llama/llama.cpp/build/bin/ggml-metal.metal backend-assets/grpc/
|
|
|
|
endif
|
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/llama-ggml: backend-assets/grpc sources/go-llama-ggml/libbinding.a
|
2024-01-10 08:39:50 +00:00
|
|
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(CURDIR)/sources/go-llama-ggml
|
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-llama-ggml LIBRARY_PATH=$(CURDIR)/sources/go-llama-ggml \
|
2023-11-18 07:18:43 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama-ggml ./backend/go/llm/llama-ggml/
|
2023-08-20 14:35:42 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/gpt4all: backend-assets/grpc backend-assets/gpt4all sources/gpt4all/gpt4all-bindings/golang/libgpt4all.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/gpt4all/gpt4all-bindings/golang/ LIBRARY_PATH=$(CURDIR)/sources/gpt4all/gpt4all-bindings/golang/ \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt4all ./backend/go/llm/gpt4all/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/dolly: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/dolly ./backend/go/llm/dolly/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/gpt2: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt2 ./backend/go/llm/gpt2/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/gptj: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gptj ./backend/go/llm/gptj/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/gptneox: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gptneox ./backend/go/llm/gptneox/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/mpt: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/mpt ./backend/go/llm/mpt/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/replit: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/replit ./backend/go/llm/replit/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/falcon-ggml: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/falcon-ggml ./backend/go/llm/falcon-ggml/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/starcoder: backend-assets/grpc sources/go-ggml-transformers/libtransformers.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-ggml-transformers LIBRARY_PATH=$(CURDIR)/sources/go-ggml-transformers \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/starcoder ./backend/go/llm/starcoder/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/rwkv: backend-assets/grpc sources/go-rwkv/librwkv.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-rwkv LIBRARY_PATH=$(CURDIR)/sources/go-rwkv \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/rwkv ./backend/go/llm/rwkv
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/bert-embeddings: backend-assets/grpc sources/go-bert/libgobert.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-bert LIBRARY_PATH=$(CURDIR)/sources/go-bert \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/bert-embeddings ./backend/go/llm/bert/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
|
|
|
backend-assets/grpc/langchain-huggingface: backend-assets/grpc
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/langchain-huggingface ./backend/go/llm/langchain/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-10-16 19:46:29 +00:00
|
|
|
backend-assets/grpc/stablediffusion: backend-assets/grpc
|
|
|
|
if [ ! -f backend-assets/grpc/stablediffusion ]; then \
|
2023-11-18 07:18:43 +00:00
|
|
|
$(MAKE) sources/go-stable-diffusion/libstablediffusion.a; \
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-stable-diffusion/ LIBRARY_PATH=$(CURDIR)/sources/go-stable-diffusion/ \
|
2023-12-24 19:27:24 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/stablediffusion ./backend/go/image/stablediffusion; \
|
2023-10-16 19:46:29 +00:00
|
|
|
fi
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-12-24 19:27:24 +00:00
|
|
|
backend-assets/grpc/tinydream: backend-assets/grpc sources/go-tiny-dream/libtinydream.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" LIBRARY_PATH=$(CURDIR)/go-tiny-dream \
|
2023-12-24 19:27:24 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/tinydream ./backend/go/image/tinydream
|
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/piper: backend-assets/grpc backend-assets/espeak-ng-data sources/go-piper/libpiper_binding.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_CXXFLAGS="$(PIPER_CGO_CXXFLAGS)" CGO_LDFLAGS="$(PIPER_CGO_LDFLAGS)" LIBRARY_PATH=$(CURDIR)/sources/go-piper \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/piper ./backend/go/tts/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-11-18 07:18:43 +00:00
|
|
|
backend-assets/grpc/whisper: backend-assets/grpc sources/whisper.cpp/libwhisper.a
|
2024-01-10 08:39:50 +00:00
|
|
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/whisper.cpp LIBRARY_PATH=$(CURDIR)/sources/whisper.cpp \
|
2023-11-13 21:40:16 +00:00
|
|
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/whisper ./backend/go/transcribe/
|
2023-07-14 23:19:43 +00:00
|
|
|
|
2023-08-13 18:04:08 +00:00
|
|
|
grpcs: prepare $(GRPC_BACKENDS)
|