From b85dad0286304993b4fd32f22006d30c6c2fd337 Mon Sep 17 00:00:00 2001 From: cryptk <421501+cryptk@users.noreply.github.com> Date: Thu, 4 Apr 2024 02:24:22 -0500 Subject: [PATCH] feat: first pass at improving logging (#1956) Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com> --- .vscode/extensions.json | 5 +++++ core/config/application_config.go | 2 +- core/http/endpoints/openai/chat.go | 10 +++++----- core/http/endpoints/openai/files_test.go | 5 +++-- core/services/backend_monitor.go | 10 +++++----- core/startup/config_file_watcher.go | 2 +- core/startup/startup.go | 6 +++--- main.go | 10 +++++----- pkg/gallery/models.go | 2 +- pkg/model/initializers.go | 6 +++--- pkg/model/watchdog.go | 8 ++++---- pkg/startup/model_preload.go | 19 +++++++++++-------- pkg/utils/config.go | 11 ++++++----- 13 files changed, 53 insertions(+), 43 deletions(-) create mode 100644 .vscode/extensions.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 00000000..7203cb3f --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,5 @@ +{ + "recommendations": [ + "golang.go" + ] +} \ No newline at end of file diff --git a/core/config/application_config.go b/core/config/application_config.go index 49b35f97..9525553a 100644 --- a/core/config/application_config.go +++ b/core/config/application_config.go @@ -169,7 +169,7 @@ func WithStringGalleries(galls string) AppOption { } var galleries []gallery.Gallery if err := json.Unmarshal([]byte(galls), &galleries); err != nil { - log.Error().Msgf("failed loading galleries: %s", err.Error()) + log.Error().Err(err).Msg("failed loading galleries") } o.Galleries = append(o.Galleries, galleries...) } diff --git a/core/http/endpoints/openai/chat.go b/core/http/endpoints/openai/chat.go index 837b6e12..871ae6c1 100644 --- a/core/http/endpoints/openai/chat.go +++ b/core/http/endpoints/openai/chat.go @@ -84,7 +84,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup result, err := handleQuestion(config, req, ml, startupOptions, results[0].arguments, prompt) if err != nil { - log.Error().Msgf("error handling question: %s", err.Error()) + log.Error().Err(err).Msg("error handling question") return } @@ -268,7 +268,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup } templatedChatMessage, err := ml.EvaluateTemplateForChatMessage(config.TemplateConfig.ChatMessage, chatMessageData) if err != nil { - log.Error().Msgf("error processing message %+v using template \"%s\": %v. Skipping!", chatMessageData, config.TemplateConfig.ChatMessage, err) + log.Error().Err(err).Interface("message", chatMessageData).Str("template", config.TemplateConfig.ChatMessage).Msg("error processing message with template, skipping") } else { if templatedChatMessage == "" { log.Warn().Msgf("template \"%s\" produced blank output for %+v. Skipping!", config.TemplateConfig.ChatMessage, chatMessageData) @@ -455,7 +455,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup case noActionsToRun: result, err := handleQuestion(config, input, ml, startupOptions, results[0].arguments, predInput) if err != nil { - log.Error().Msgf("error handling question: %s", err.Error()) + log.Error().Err(err).Msg("error handling question") return } *c = append(*c, schema.Choice{ @@ -565,13 +565,13 @@ func handleQuestion(config *config.BackendConfig, input *schema.OpenAIRequest, m predFunc, err := backend.ModelInference(input.Context, prompt, images, ml, *config, o, nil) if err != nil { - log.Error().Msgf("inference error: %s", err.Error()) + log.Error().Err(err).Msg("model inference failed") return "", err } prediction, err := predFunc() if err != nil { - log.Error().Msgf("inference error: %s", err.Error()) + log.Error().Err(err).Msg("prediction failed") return "", err } return backend.Finetune(*config, prompt, prediction.Response), nil diff --git a/core/http/endpoints/openai/files_test.go b/core/http/endpoints/openai/files_test.go index e1c1011e..fc77ae45 100644 --- a/core/http/endpoints/openai/files_test.go +++ b/core/http/endpoints/openai/files_test.go @@ -3,7 +3,6 @@ package openai import ( "encoding/json" "fmt" - "github.com/rs/zerolog/log" "io" "mime/multipart" "net/http" @@ -12,6 +11,8 @@ import ( "path/filepath" "strings" + "github.com/rs/zerolog/log" + "github.com/go-skynet/LocalAI/core/config" utils2 "github.com/go-skynet/LocalAI/pkg/utils" @@ -297,7 +298,7 @@ func responseToListFile(t *testing.T, resp *http.Response) ListFiles { err := json.NewDecoder(strings.NewReader(responseToString)).Decode(&listFiles) if err != nil { - log.Error().Msgf("Failed to decode response: %s", err) + log.Error().Err(err).Msg("failed to decode response") } return listFiles diff --git a/core/services/backend_monitor.go b/core/services/backend_monitor.go index 88176753..979a67a3 100644 --- a/core/services/backend_monitor.go +++ b/core/services/backend_monitor.go @@ -63,7 +63,7 @@ func (bm *BackendMonitor) SampleLocalBackendProcess(model string) (*schema.Backe pid, err := bm.modelLoader.GetGRPCPID(backend) if err != nil { - log.Error().Msgf("model %s : failed to find pid %+v", model, err) + log.Error().Err(err).Str("model", model).Msg("failed to find GRPC pid") return nil, err } @@ -71,26 +71,26 @@ func (bm *BackendMonitor) SampleLocalBackendProcess(model string) (*schema.Backe backendProcess, err := gopsutil.NewProcess(int32(pid)) if err != nil { - log.Error().Msgf("model %s [PID %d] : error getting process info %+v", model, pid, err) + log.Error().Err(err).Str("model", model).Int("pid", pid).Msg("error getting process info") return nil, err } memInfo, err := backendProcess.MemoryInfo() if err != nil { - log.Error().Msgf("model %s [PID %d] : error getting memory info %+v", model, pid, err) + log.Error().Err(err).Str("model", model).Int("pid", pid).Msg("error getting memory info") return nil, err } memPercent, err := backendProcess.MemoryPercent() if err != nil { - log.Error().Msgf("model %s [PID %d] : error getting memory percent %+v", model, pid, err) + log.Error().Err(err).Str("model", model).Int("pid", pid).Msg("error getting memory percent") return nil, err } cpuPercent, err := backendProcess.CPUPercent() if err != nil { - log.Error().Msgf("model %s [PID %d] : error getting cpu percent %+v", model, pid, err) + log.Error().Err(err).Str("model", model).Int("pid", pid).Msg("error getting cpu percent") return nil, err } diff --git a/core/startup/config_file_watcher.go b/core/startup/config_file_watcher.go index 0c7eff2d..9c758e25 100644 --- a/core/startup/config_file_watcher.go +++ b/core/startup/config_file_watcher.go @@ -85,7 +85,7 @@ func WatchConfigDirectory(configDir string, appConfig *config.ApplicationConfig) if !ok { return } - log.Error().Msgf("WatchConfigDirectory goroutine error: %+v", err) + log.Error().Err(err).Msg("error encountered while watching config directory") } } }() diff --git a/core/startup/startup.go b/core/startup/startup.go index 828eb7a7..6298f034 100644 --- a/core/startup/startup.go +++ b/core/startup/startup.go @@ -61,17 +61,17 @@ func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.Mode configLoaderOpts := options.ToConfigLoaderOptions() if err := cl.LoadBackendConfigsFromPath(options.ModelPath, configLoaderOpts...); err != nil { - log.Error().Msgf("error loading config files: %s", err.Error()) + log.Error().Err(err).Msg("error loading config files") } if options.ConfigFile != "" { if err := cl.LoadBackendConfigFile(options.ConfigFile, configLoaderOpts...); err != nil { - log.Error().Msgf("error loading config file: %s", err.Error()) + log.Error().Err(err).Msg("error loading config file") } } if err := cl.Preload(options.ModelPath); err != nil { - log.Error().Msgf("error downloading models: %s", err.Error()) + log.Error().Err(err).Msg("error downloading models") } if options.PreloadJSONModels != "" { diff --git a/main.go b/main.go index 0d8befcb..53966ba5 100644 --- a/main.go +++ b/main.go @@ -45,7 +45,7 @@ func main() { path, err := os.Getwd() if err != nil { - log.Error().Msgf("error: %s", err.Error()) + log.Error().Err(err).Msg("failed to get current directory") os.Exit(1) } @@ -340,7 +340,7 @@ For a list of compatible model, check out: https://localai.io/model-compatibilit appHTTP, err := http.App(cl, ml, options) if err != nil { - log.Error().Msg("Error during HTTP App constructor") + log.Error().Err(err).Msg("error during HTTP App construction") return err } @@ -357,7 +357,7 @@ For a list of compatible model, check out: https://localai.io/model-compatibilit Action: func(ctx *cli.Context) error { var galleries []gallery.Gallery if err := json.Unmarshal([]byte(ctx.String("galleries")), &galleries); err != nil { - log.Error().Msgf("unable to load galleries: %s", err.Error()) + log.Error().Err(err).Msg("unable to load galleries") } models, err := gallery.AvailableGalleryModels(galleries, ctx.String("models-path")) @@ -382,7 +382,7 @@ For a list of compatible model, check out: https://localai.io/model-compatibilit var galleries []gallery.Gallery if err := json.Unmarshal([]byte(ctx.String("galleries")), &galleries); err != nil { - log.Error().Msgf("unable to load galleries: %s", err.Error()) + log.Error().Err(err).Msg("unable to load galleries") } progressBar := progressbar.NewOptions( @@ -547,7 +547,7 @@ For a list of compatible model, check out: https://localai.io/model-compatibilit err = app.Run(os.Args) if err != nil { - log.Error().Msgf("error: %s", err.Error()) + log.Error().Err(err).Msg("application runtime error") os.Exit(1) } } diff --git a/pkg/gallery/models.go b/pkg/gallery/models.go index 65d0401f..10caedee 100644 --- a/pkg/gallery/models.go +++ b/pkg/gallery/models.go @@ -67,7 +67,7 @@ func GetGalleryConfigFromURL(url string) (Config, error) { return yaml.Unmarshal(d, &config) }) if err != nil { - log.Error().Msgf("GetGalleryConfigFromURL error for url %s\n%s", url, err.Error()) + log.Error().Err(err).Str("url", url).Msg("failed to get gallery config for url") return config, err } return config, nil diff --git a/pkg/model/initializers.go b/pkg/model/initializers.go index 85744f9a..5d9808a4 100644 --- a/pkg/model/initializers.go +++ b/pkg/model/initializers.go @@ -15,8 +15,8 @@ import ( ) var Aliases map[string]string = map[string]string{ - "go-llama": LLamaCPP, - "llama": LLamaCPP, + "go-llama": LLamaCPP, + "llama": LLamaCPP, "embedded-store": LocalStoreBackend, } @@ -127,7 +127,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string break } if err != nil && i == o.grpcAttempts-1 { - log.Error().Msgf("Failed starting/connecting to the gRPC service: %s", err.Error()) + log.Error().Err(err).Msg("failed starting/connecting to the gRPC service") } time.Sleep(time.Duration(o.grpcAttemptsDelay) * time.Second) } diff --git a/pkg/model/watchdog.go b/pkg/model/watchdog.go index c93cb99a..b5381832 100644 --- a/pkg/model/watchdog.go +++ b/pkg/model/watchdog.go @@ -110,10 +110,10 @@ func (wd *WatchDog) checkIdle() { log.Debug().Msgf("[WatchDog] %s: idle connection", address) if time.Since(t) > wd.idletimeout { log.Warn().Msgf("[WatchDog] Address %s is idle for too long, killing it", address) - p, ok := wd.addressModelMap[address] + model, ok := wd.addressModelMap[address] if ok { - if err := wd.pm.ShutdownModel(p); err != nil { - log.Error().Msgf("[watchdog] Error shutting down model %s: %v", p, err) + if err := wd.pm.ShutdownModel(model); err != nil { + log.Error().Err(err).Str("model", model).Msg("[watchdog] error shutting down model") } log.Debug().Msgf("[WatchDog] model shut down: %s", address) delete(wd.idleTime, address) @@ -141,7 +141,7 @@ func (wd *WatchDog) checkBusy() { if ok { log.Warn().Msgf("[WatchDog] Model %s is busy for too long, killing it", model) if err := wd.pm.ShutdownModel(model); err != nil { - log.Error().Msgf("[watchdog] Error shutting down model %s: %v", model, err) + log.Error().Err(err).Str("model", model).Msg("[watchdog] error shutting down model") } log.Debug().Msgf("[WatchDog] model shut down: %s", address) delete(wd.timetable, address) diff --git a/pkg/startup/model_preload.go b/pkg/startup/model_preload.go index 979b4d83..b09516a7 100644 --- a/pkg/startup/model_preload.go +++ b/pkg/startup/model_preload.go @@ -35,14 +35,15 @@ func PreloadModelsConfigurations(modelLibraryURL string, modelPath string, model modelYAML, err := embedded.ResolveContent(url) // If we resolve something, just save it to disk and continue if err != nil { - log.Error().Msgf("error loading model: %s", err.Error()) + log.Error().Err(err).Msg("error resolving model content") continue } log.Debug().Msgf("[startup] resolved embedded model: %s", url) md5Name := utils.MD5(url) - if err := os.WriteFile(filepath.Join(modelPath, md5Name)+".yaml", modelYAML, os.ModePerm); err != nil { - log.Error().Msgf("error loading model: %s", err.Error()) + modelDefinitionFilePath := filepath.Join(modelPath, md5Name) + ".yaml" + if err := os.WriteFile(modelDefinitionFilePath, modelYAML, os.ModePerm); err != nil { + log.Error().Err(err).Str("filepath", modelDefinitionFilePath).Msg("error writing model definition") } case downloader.LooksLikeURL(url): log.Debug().Msgf("[startup] resolved model to download: %s", url) @@ -52,11 +53,12 @@ func PreloadModelsConfigurations(modelLibraryURL string, modelPath string, model // check if file exists if _, err := os.Stat(filepath.Join(modelPath, md5Name)); errors.Is(err, os.ErrNotExist) { - err := downloader.DownloadFile(url, filepath.Join(modelPath, md5Name)+".yaml", "", func(fileName, current, total string, percent float64) { + modelDefinitionFilePath := filepath.Join(modelPath, md5Name) + ".yaml" + err := downloader.DownloadFile(url, modelDefinitionFilePath, "", func(fileName, current, total string, percent float64) { utils.DisplayDownloadFunction(fileName, current, total, percent) }) if err != nil { - log.Error().Msgf("error loading model: %s", err.Error()) + log.Error().Err(err).Str("url", url).Str("filepath", modelDefinitionFilePath).Msg("error downloading model") } } default: @@ -67,12 +69,13 @@ func PreloadModelsConfigurations(modelLibraryURL string, modelPath string, model modelYAML, err := os.ReadFile(url) if err != nil { - log.Error().Msgf("error loading model: %s", err.Error()) + log.Error().Err(err).Str("filepath", url).Msg("error reading model definition") continue } - if err := os.WriteFile(filepath.Join(modelPath, md5Name)+".yaml", modelYAML, os.ModePerm); err != nil { - log.Error().Msgf("error loading model: %s", err.Error()) + modelDefinitionFilePath := filepath.Join(modelPath, md5Name) + ".yaml" + if err := os.WriteFile(modelDefinitionFilePath, modelYAML, os.ModePerm); err != nil { + log.Error().Err(err).Str("filepath", modelDefinitionFilePath).Msg("error loading model: %s") } } else { log.Warn().Msgf("[startup] failed resolving model '%s'", url) diff --git a/pkg/utils/config.go b/pkg/utils/config.go index a9167ed3..929e1f9f 100644 --- a/pkg/utils/config.go +++ b/pkg/utils/config.go @@ -2,21 +2,22 @@ package utils import ( "encoding/json" - "github.com/rs/zerolog/log" "os" "path/filepath" + + "github.com/rs/zerolog/log" ) func SaveConfig(filePath, fileName string, obj any) { file, err := json.MarshalIndent(obj, "", " ") if err != nil { - log.Error().Msgf("Failed to JSON marshal the uploadedFiles: %s", err) + log.Error().Err(err).Msg("failed to JSON marshal the uploadedFiles") } absolutePath := filepath.Join(filePath, fileName) err = os.WriteFile(absolutePath, file, 0644) if err != nil { - log.Error().Msgf("Failed to save configuration file to %s: %s", absolutePath, err) + log.Error().Err(err).Str("filepath", absolutePath).Msg("failed to save configuration file") } } @@ -31,11 +32,11 @@ func LoadConfig(filePath, fileName string, obj interface{}) { file, err := os.ReadFile(uploadFilePath) if err != nil { - log.Error().Msgf("Failed to read file: %s", err) + log.Error().Err(err).Str("filepath", uploadFilePath).Msg("failed to read file") } else { err = json.Unmarshal(file, &obj) if err != nil { - log.Error().Msgf("Failed to JSON unmarshal the file %s: %v", uploadFilePath, err) + log.Error().Err(err).Str("filepath", uploadFilePath).Msg("failed to parse file as JSON") } } }