2023-04-11 22:02:39 +00:00
|
|
|
package model
|
2023-04-07 09:30:59 +00:00
|
|
|
|
|
|
|
import (
|
2023-04-08 08:46:51 +00:00
|
|
|
"bytes"
|
2023-04-07 09:30:59 +00:00
|
|
|
"fmt"
|
2023-04-10 10:02:40 +00:00
|
|
|
"io/ioutil"
|
2023-04-07 09:30:59 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2023-04-10 10:02:40 +00:00
|
|
|
"strings"
|
2023-04-07 09:30:59 +00:00
|
|
|
"sync"
|
2023-04-08 08:46:51 +00:00
|
|
|
"text/template"
|
2023-04-07 09:30:59 +00:00
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
"github.com/rs/zerolog/log"
|
|
|
|
|
2023-04-19 15:10:29 +00:00
|
|
|
gptj "github.com/go-skynet/go-gpt4all-j.cpp"
|
2023-04-07 09:30:59 +00:00
|
|
|
llama "github.com/go-skynet/go-llama.cpp"
|
|
|
|
)
|
|
|
|
|
|
|
|
type ModelLoader struct {
|
2023-04-08 08:46:51 +00:00
|
|
|
modelPath string
|
|
|
|
mu sync.Mutex
|
|
|
|
models map[string]*llama.LLama
|
2023-04-19 15:10:29 +00:00
|
|
|
gptmodels map[string]*gptj.GPTJ
|
2023-04-08 08:46:51 +00:00
|
|
|
promptsTemplates map[string]*template.Template
|
2023-04-07 09:30:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewModelLoader(modelPath string) *ModelLoader {
|
2023-04-19 15:10:29 +00:00
|
|
|
return &ModelLoader{modelPath: modelPath, gptmodels: make(map[string]*gptj.GPTJ), models: make(map[string]*llama.LLama), promptsTemplates: make(map[string]*template.Template)}
|
2023-04-07 09:30:59 +00:00
|
|
|
}
|
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
func (ml *ModelLoader) ExistsInModelPath(s string) bool {
|
|
|
|
_, err := os.Stat(filepath.Join(ml.modelPath, s))
|
|
|
|
return err == nil
|
|
|
|
}
|
|
|
|
|
2023-04-10 10:02:40 +00:00
|
|
|
func (ml *ModelLoader) ListModels() ([]string, error) {
|
|
|
|
files, err := ioutil.ReadDir(ml.modelPath)
|
|
|
|
if err != nil {
|
|
|
|
return []string{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
models := []string{}
|
|
|
|
for _, file := range files {
|
2023-04-20 16:33:02 +00:00
|
|
|
// Skip templates, YAML and .keep files
|
|
|
|
if strings.HasSuffix(file.Name(), ".tmpl") || strings.HasSuffix(file.Name(), ".keep") || strings.HasSuffix(file.Name(), ".yaml") || strings.HasSuffix(file.Name(), ".yml") {
|
|
|
|
continue
|
2023-04-10 10:02:40 +00:00
|
|
|
}
|
2023-04-20 16:33:02 +00:00
|
|
|
|
|
|
|
models = append(models, file.Name())
|
2023-04-10 10:02:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return models, nil
|
|
|
|
}
|
|
|
|
|
2023-04-08 08:46:51 +00:00
|
|
|
func (ml *ModelLoader) TemplatePrefix(modelName string, in interface{}) (string, error) {
|
|
|
|
ml.mu.Lock()
|
|
|
|
defer ml.mu.Unlock()
|
|
|
|
|
|
|
|
m, ok := ml.promptsTemplates[modelName]
|
|
|
|
if !ok {
|
2023-04-20 16:33:02 +00:00
|
|
|
return "", fmt.Errorf("no prompt template available")
|
2023-04-08 08:46:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
if err := m.Execute(&buf, in); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return buf.String(), nil
|
|
|
|
}
|
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
func (ml *ModelLoader) loadTemplateIfExists(modelName, modelFile string) error {
|
|
|
|
// Check if the template was already loaded
|
|
|
|
if _, ok := ml.promptsTemplates[modelName]; ok {
|
|
|
|
return nil
|
|
|
|
}
|
2023-04-19 15:10:29 +00:00
|
|
|
|
|
|
|
// Check if the model path exists
|
2023-04-20 16:33:02 +00:00
|
|
|
// skip any error here - we run anyway if a template is not exist
|
|
|
|
modelTemplateFile := fmt.Sprintf("%s.tmpl", modelName)
|
|
|
|
|
|
|
|
if !ml.ExistsInModelPath(modelTemplateFile) {
|
2023-04-19 15:10:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
dat, err := os.ReadFile(filepath.Join(ml.modelPath, modelTemplateFile))
|
2023-04-19 15:10:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse the template
|
|
|
|
tmpl, err := template.New("prompt").Parse(string(dat))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ml.promptsTemplates[modelName] = tmpl
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ml *ModelLoader) LoadGPTJModel(modelName string) (*gptj.GPTJ, error) {
|
2023-04-07 09:30:59 +00:00
|
|
|
ml.mu.Lock()
|
|
|
|
defer ml.mu.Unlock()
|
|
|
|
|
|
|
|
// Check if we already have a loaded model
|
2023-04-20 16:33:02 +00:00
|
|
|
if !ml.ExistsInModelPath(modelName) {
|
|
|
|
return nil, fmt.Errorf("model does not exist")
|
2023-04-07 09:30:59 +00:00
|
|
|
}
|
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
if m, ok := ml.gptmodels[modelName]; ok {
|
|
|
|
log.Debug().Msgf("Model already loaded in memory: %s", modelName)
|
|
|
|
return m, nil
|
2023-04-07 09:30:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load the model and keep it in memory for later use
|
2023-04-20 16:33:02 +00:00
|
|
|
modelFile := filepath.Join(ml.modelPath, modelName)
|
|
|
|
log.Debug().Msgf("Loading model in memory from file: %s", modelFile)
|
|
|
|
|
2023-04-19 15:10:29 +00:00
|
|
|
model, err := gptj.New(modelFile)
|
2023-04-07 09:30:59 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-04-08 08:46:51 +00:00
|
|
|
// If there is a prompt template, load it
|
2023-04-20 16:33:02 +00:00
|
|
|
if err := ml.loadTemplateIfExists(modelName, modelFile); err != nil {
|
2023-04-19 15:10:29 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
ml.gptmodels[modelName] = model
|
2023-04-19 15:10:29 +00:00
|
|
|
return model, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ml *ModelLoader) LoadLLaMAModel(modelName string, opts ...llama.ModelOption) (*llama.LLama, error) {
|
|
|
|
ml.mu.Lock()
|
|
|
|
defer ml.mu.Unlock()
|
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
log.Debug().Msgf("Loading model name: %s", modelName)
|
|
|
|
|
2023-04-19 15:10:29 +00:00
|
|
|
// Check if we already have a loaded model
|
2023-04-20 16:33:02 +00:00
|
|
|
if !ml.ExistsInModelPath(modelName) {
|
|
|
|
return nil, fmt.Errorf("model does not exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
if m, ok := ml.models[modelName]; ok {
|
|
|
|
log.Debug().Msgf("Model already loaded in memory: %s", modelName)
|
2023-04-19 15:10:29 +00:00
|
|
|
return m, nil
|
|
|
|
}
|
2023-04-20 16:33:02 +00:00
|
|
|
|
2023-04-19 15:10:29 +00:00
|
|
|
// TODO: This needs refactoring, it's really bad to have it in here
|
2023-04-20 16:33:02 +00:00
|
|
|
// Check if we have a GPTJ model loaded instead - if we do we return an error so the API tries with GPTJ
|
|
|
|
if _, ok := ml.gptmodels[modelName]; ok {
|
|
|
|
log.Debug().Msgf("Model is GPTJ: %s", modelName)
|
2023-04-19 15:10:29 +00:00
|
|
|
return nil, fmt.Errorf("this model is a GPTJ one")
|
|
|
|
}
|
2023-04-08 08:46:51 +00:00
|
|
|
|
2023-04-19 15:10:29 +00:00
|
|
|
// Load the model and keep it in memory for later use
|
2023-04-20 16:33:02 +00:00
|
|
|
modelFile := filepath.Join(ml.modelPath, modelName)
|
|
|
|
log.Debug().Msgf("Loading model in memory from file: %s", modelFile)
|
|
|
|
|
2023-04-19 15:10:29 +00:00
|
|
|
model, err := llama.New(modelFile, opts...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there is a prompt template, load it
|
2023-04-20 16:33:02 +00:00
|
|
|
if err := ml.loadTemplateIfExists(modelName, modelFile); err != nil {
|
2023-04-19 15:10:29 +00:00
|
|
|
return nil, err
|
2023-04-08 08:46:51 +00:00
|
|
|
}
|
|
|
|
|
2023-04-20 16:33:02 +00:00
|
|
|
ml.models[modelName] = model
|
2023-04-07 09:30:59 +00:00
|
|
|
return model, err
|
|
|
|
}
|