LocalAI/pkg/model/loader.go

210 lines
6.1 KiB
Go
Raw Normal View History

2023-04-11 22:02:39 +00:00
package model
import (
"context"
"fmt"
"os"
"path/filepath"
2023-04-10 10:02:40 +00:00
"strings"
"sync"
"github.com/go-skynet/LocalAI/pkg/templates"
"github.com/go-skynet/LocalAI/pkg/functions"
"github.com/go-skynet/LocalAI/pkg/grpc"
"github.com/go-skynet/LocalAI/pkg/utils"
process "github.com/mudler/go-processmanager"
"github.com/rs/zerolog/log"
)
// Rather than pass an interface{} to the prompt template:
// These are the definitions of all possible variables LocalAI will currently populate for use in a prompt template file
// Please note: Not all of these are populated on every endpoint - your template should either be tested for each endpoint you map it to, or tolerant of zero values.
type PromptTemplateData struct {
SystemPrompt string
SuppressSystemPrompt bool // used by chat specifically to indicate that SystemPrompt above should be _ignored_
Input string
Instruction string
Functions []functions.Function
MessageIndex int
}
// TODO: Ask mudler about FunctionCall stuff being useful at the message level?
type ChatMessageTemplateData struct {
SystemPrompt string
Role string
RoleName string
FunctionName string
Content string
MessageIndex int
Function bool
FunctionCall interface{}
LastMessage bool
}
// new idea: what if we declare a struct of these here, and use a loop to check?
// TODO: Split ModelLoader and TemplateLoader? Just to keep things more organized. Left together to share a mutex until I look into that. Would split if we seperate directories for .bin/.yaml and .tmpl
type ModelLoader struct {
ModelPath string
mu sync.Mutex
2023-05-10 13:20:21 +00:00
// TODO: this needs generics
grpcClients map[string]grpc.Backend
models map[string]ModelAddress
grpcProcesses map[string]*process.Process
templates *templates.TemplateCache
wd *WatchDog
}
type ModelAddress string
func (m ModelAddress) GRPC(parallel bool, wd *WatchDog) grpc.Backend {
enableWD := false
if wd != nil {
enableWD = true
}
return grpc.NewClient(string(m), parallel, wd, enableWD)
}
func NewModelLoader(modelPath string) *ModelLoader {
nml := &ModelLoader{
ModelPath: modelPath,
grpcClients: make(map[string]grpc.Backend),
models: make(map[string]ModelAddress),
templates: templates.NewTemplateCache(modelPath),
grpcProcesses: make(map[string]*process.Process),
}
return nml
}
func (ml *ModelLoader) SetWatchDog(wd *WatchDog) {
ml.wd = wd
}
2023-04-20 16:33:02 +00:00
func (ml *ModelLoader) ExistsInModelPath(s string) bool {
return utils.ExistsInPath(ml.ModelPath, s)
2023-04-20 16:33:02 +00:00
}
2023-04-10 10:02:40 +00:00
func (ml *ModelLoader) ListModels() ([]string, error) {
files, err := os.ReadDir(ml.ModelPath)
2023-04-10 10:02:40 +00:00
if err != nil {
return []string{}, err
}
models := []string{}
for _, file := range files {
// Skip templates, YAML, .keep, .json, and .DS_Store files - TODO: as this list grows, is there a more efficient method?
if strings.HasSuffix(file.Name(), ".tmpl") ||
strings.HasSuffix(file.Name(), ".keep") ||
strings.HasSuffix(file.Name(), ".yaml") ||
strings.HasSuffix(file.Name(), ".yml") ||
strings.HasSuffix(file.Name(), ".json") ||
strings.HasSuffix(file.Name(), ".DS_Store") ||
strings.HasPrefix(file.Name(), ".") {
2023-04-20 16:33:02 +00:00
continue
2023-04-10 10:02:40 +00:00
}
2023-04-20 16:33:02 +00:00
models = append(models, file.Name())
2023-04-10 10:02:40 +00:00
}
return models, nil
}
func (ml *ModelLoader) LoadModel(modelName string, loader func(string, string) (ModelAddress, error)) (ModelAddress, error) {
ml.mu.Lock()
defer ml.mu.Unlock()
// Check if we already have a loaded model
if model := ml.CheckIsLoaded(modelName); model != "" {
return model, nil
}
2023-04-20 16:33:02 +00:00
// Load the model and keep it in memory for later use
modelFile := filepath.Join(ml.ModelPath, modelName)
2023-04-20 16:33:02 +00:00
log.Debug().Msgf("Loading model in memory from file: %s", modelFile)
model, err := loader(modelName, modelFile)
if err != nil {
return "", err
}
// TODO: Add a helper method to iterate all prompt templates associated with a config if and only if it's YAML?
// Minor perf loss here until this is fixed, but we initialize on first request
// // If there is a prompt template, load it
// if err := ml.loadTemplateIfExists(modelName); err != nil {
// return nil, err
// }
2023-04-08 08:46:51 +00:00
2023-04-20 16:33:02 +00:00
ml.models[modelName] = model
2023-05-11 14:34:16 +00:00
return model, nil
}
func (ml *ModelLoader) ShutdownModel(modelName string) error {
ml.mu.Lock()
defer ml.mu.Unlock()
return ml.stopModel(modelName)
}
func (ml *ModelLoader) stopModel(modelName string) error {
defer ml.deleteProcess(modelName)
if _, ok := ml.models[modelName]; !ok {
return fmt.Errorf("model %s not found", modelName)
}
return nil
//return ml.deleteProcess(modelName)
}
func (ml *ModelLoader) CheckIsLoaded(s string) ModelAddress {
var client grpc.Backend
if m, ok := ml.models[s]; ok {
log.Debug().Msgf("Model already loaded in memory: %s", s)
if c, ok := ml.grpcClients[s]; ok {
client = c
} else {
client = m.GRPC(false, ml.wd)
}
alive, err := client.HealthCheck(context.Background())
if !alive {
log.Warn().Msgf("GRPC Model not responding: %s", err.Error())
log.Warn().Msgf("Deleting the process in order to recreate it")
if !ml.grpcProcesses[s].IsAlive() {
log.Debug().Msgf("GRPC Process is not responding: %s", s)
// stop and delete the process, this forces to re-load the model and re-create again the service
err := ml.deleteProcess(s)
if err != nil {
log.Error().Err(err).Str("process", s).Msg("error stopping process")
}
return ""
}
}
return m
}
return ""
}
const (
ChatPromptTemplate templates.TemplateType = iota
ChatMessageTemplate
CompletionPromptTemplate
EditPromptTemplate
FunctionsPromptTemplate
)
func (ml *ModelLoader) EvaluateTemplateForPrompt(templateType templates.TemplateType, templateName string, in PromptTemplateData) (string, error) {
// TODO: should this check be improved?
if templateType == ChatMessageTemplate {
return "", fmt.Errorf("invalid templateType: ChatMessage")
}
return ml.templates.EvaluateTemplate(templateType, templateName, in)
}
func (ml *ModelLoader) EvaluateTemplateForChatMessage(templateName string, messageData ChatMessageTemplateData) (string, error) {
return ml.templates.EvaluateTemplate(ChatMessageTemplate, templateName, messageData)
}