mirror of
https://github.com/mudler/LocalAI.git
synced 2024-06-07 19:40:48 +00:00
fdb45153fe
* feat(llama.cpp): Enable decentralized, distributed inference As https://github.com/mudler/LocalAI/pull/2324 introduced distributed inferencing thanks to @rgerganov implementation in https://github.com/ggerganov/llama.cpp/pull/6829 in upstream llama.cpp, now it is possible to distribute the workload to remote llama.cpp gRPC server. This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token. The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token). As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols, the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on. When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally. Then llama.cpp is configured to use the services. This feature is behind the "p2p" GO_FLAGS Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * go mod tidy Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * ci: add p2p tag Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * better message Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
69 lines
2.0 KiB
Go
69 lines
2.0 KiB
Go
package cli
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"github.com/go-skynet/LocalAI/core/backend"
|
|
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
|
"github.com/go-skynet/LocalAI/core/config"
|
|
"github.com/go-skynet/LocalAI/pkg/model"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
type TTSCMD struct {
|
|
Text []string `arg:""`
|
|
|
|
Backend string `short:"b" default:"piper" help:"Backend to run the TTS model"`
|
|
Model string `short:"m" required:"" help:"Model name to run the TTS"`
|
|
Voice string `short:"v" help:"Voice name to run the TTS"`
|
|
OutputFile string `short:"o" type:"path" help:"The path to write the output wav file"`
|
|
ModelsPath string `env:"LOCALAI_MODELS_PATH,MODELS_PATH" type:"path" default:"${basepath}/models" help:"Path containing models used for inferencing" group:"storage"`
|
|
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
|
|
}
|
|
|
|
func (t *TTSCMD) Run(ctx *cliContext.Context) error {
|
|
outputFile := t.OutputFile
|
|
outputDir := t.BackendAssetsPath
|
|
if outputFile != "" {
|
|
outputDir = filepath.Dir(outputFile)
|
|
}
|
|
|
|
text := strings.Join(t.Text, " ")
|
|
|
|
opts := &config.ApplicationConfig{
|
|
ModelPath: t.ModelsPath,
|
|
Context: context.Background(),
|
|
AudioDir: outputDir,
|
|
AssetsDestination: t.BackendAssetsPath,
|
|
}
|
|
ml := model.NewModelLoader(opts.ModelPath)
|
|
|
|
defer func() {
|
|
err := ml.StopAllGRPC()
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("unable to stop all grpc processes")
|
|
}
|
|
}()
|
|
|
|
options := config.BackendConfig{}
|
|
options.SetDefaults()
|
|
|
|
filePath, _, err := backend.ModelTTS(t.Backend, text, t.Model, t.Voice, ml, opts, options)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if outputFile != "" {
|
|
if err := os.Rename(filePath, outputFile); err != nil {
|
|
return err
|
|
}
|
|
fmt.Printf("Generate file %s\n", outputFile)
|
|
} else {
|
|
fmt.Printf("Generate file %s\n", filePath)
|
|
}
|
|
return nil
|
|
}
|