fix: security scanner warning noise: error handlers part 2 (#2145)

check off a few more error handlers

Signed-off-by: Dave Lee <dave@gray101.com>
This commit is contained in:
Dave 2024-04-29 09:11:42 -04:00 committed by GitHub
parent b7ea9602f5
commit 11c48a0004
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 82 additions and 24 deletions

View File

@ -64,7 +64,11 @@ func (mi *ModelsInstall) Run(ctx *Context) error {
progressbar.OptionClearOnFinish(),
)
progressCallback := func(fileName string, current string, total string, percentage float64) {
progressBar.Set(int(percentage * 10))
v := int(percentage * 10)
err := progressBar.Set(v)
if err != nil {
log.Error().Err(err).Str("filename", fileName).Int("value", v).Msg("error while updating progress bar")
}
}
err := gallery.InstallModelFromGallery(galleries, modelName, mi.ModelsPath, gallery.GalleryModel{}, progressCallback)
if err != nil {

View File

@ -8,6 +8,7 @@ import (
"github.com/go-skynet/LocalAI/core/backend"
"github.com/go-skynet/LocalAI/core/config"
"github.com/go-skynet/LocalAI/pkg/model"
"github.com/rs/zerolog/log"
)
type TranscriptCMD struct {
@ -41,7 +42,12 @@ func (t *TranscriptCMD) Run(ctx *Context) error {
c.Threads = &t.Threads
defer ml.StopAllGRPC()
defer func() {
err := ml.StopAllGRPC()
if err != nil {
log.Error().Err(err).Msg("unable to stop all grpc processes")
}
}()
tr, err := backend.ModelTranscription(t.Filename, t.Language, ml, c, opts)
if err != nil {

View File

@ -10,6 +10,7 @@ import (
"github.com/go-skynet/LocalAI/core/backend"
"github.com/go-skynet/LocalAI/core/config"
"github.com/go-skynet/LocalAI/pkg/model"
"github.com/rs/zerolog/log"
)
type TTSCMD struct {
@ -40,7 +41,12 @@ func (t *TTSCMD) Run(ctx *Context) error {
}
ml := model.NewModelLoader(opts.ModelPath)
defer ml.StopAllGRPC()
defer func() {
err := ml.StopAllGRPC()
if err != nil {
log.Error().Err(err).Msg("unable to stop all grpc processes")
}
}()
options := config.BackendConfig{}
options.SetDefaults()

View File

@ -31,8 +31,14 @@ func newConfigFileHandler(appConfig *config.ApplicationConfig) configFileHandler
handlers: make(map[string]fileHandler),
appConfig: appConfig,
}
c.Register("api_keys.json", readApiKeysJson(*appConfig), true)
c.Register("external_backends.json", readExternalBackendsJson(*appConfig), true)
err := c.Register("api_keys.json", readApiKeysJson(*appConfig), true)
if err != nil {
log.Error().Err(err).Str("file", "api_keys.json").Msg("unable to register config file handler")
}
err = c.Register("external_backends.json", readExternalBackendsJson(*appConfig), true)
if err != nil {
log.Error().Err(err).Str("file", "external_backends.json").Msg("unable to register config file handler")
}
return c
}
@ -118,8 +124,8 @@ func (c *configFileHandler) Watch() error {
}
// TODO: When we institute graceful shutdown, this should be called
func (c *configFileHandler) Stop() {
c.watcher.Close()
func (c *configFileHandler) Stop() error {
return c.watcher.Close()
}
func readApiKeysJson(startupAppConfig config.ApplicationConfig) fileHandler {

View File

@ -100,7 +100,10 @@ func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.Mode
go func() {
<-options.Context.Done()
log.Debug().Msgf("Context canceled, shutting down")
ml.StopAllGRPC()
err := ml.StopAllGRPC()
if err != nil {
log.Error().Err(err).Msg("error while stopping all grpc backends")
}
}()
if options.WatchDog {

View File

@ -2,6 +2,8 @@ package functions
import (
"encoding/json"
"github.com/rs/zerolog/log"
)
type Function struct {
@ -30,8 +32,14 @@ func (f Functions) ToJSONStructure() JSONFunctionStructure {
prop := map[string]interface{}{}
defsD := map[string]interface{}{}
json.Unmarshal(dat, &prop)
json.Unmarshal(dat2, &defsD)
err := json.Unmarshal(dat, &prop)
if err != nil {
log.Error().Err(err).Msg("error unmarshalling dat")
}
err = json.Unmarshal(dat2, &defsD)
if err != nil {
log.Error().Err(err).Msg("error unmarshalling dat2")
}
if js.Defs == nil {
js.Defs = defsD
}

View File

@ -59,7 +59,10 @@ func ParseFunctionCall(llmresult string, functionConfig FunctionsConfig) []FuncC
if multipleResults {
ss := []map[string]interface{}{}
s := utils.EscapeNewLines(llmresult)
json.Unmarshal([]byte(s), &ss)
err := json.Unmarshal([]byte(s), &ss)
if err != nil {
log.Error().Err(err).Str("escapedLLMResult", s).Msg("multiple results: unable to unmarshal llm result")
}
log.Debug().Msgf("Function return: %s %+v", s, ss)
for _, s := range ss {
@ -83,7 +86,10 @@ func ParseFunctionCall(llmresult string, functionConfig FunctionsConfig) []FuncC
ss := map[string]interface{}{}
// This prevent newlines to break JSON parsing for clients
s := utils.EscapeNewLines(llmresult)
json.Unmarshal([]byte(s), &ss)
err := json.Unmarshal([]byte(s), &ss)
if err != nil {
log.Error().Err(err).Str("escapedLLMResult", s).Msg("unable to unmarshal llm result")
}
log.Debug().Msgf("Function return: %s %+v", s, ss)
// The grammar defines the function name as "function", while OpenAI returns "name"

View File

@ -70,7 +70,10 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string
// If no specific model path is set for transformers/HF, set it to the model path
for _, env := range []string{"HF_HOME", "TRANSFORMERS_CACHE", "HUGGINGFACE_HUB_CACHE"} {
if os.Getenv(env) == "" {
os.Setenv(env, ml.ModelPath)
err := os.Setenv(env, ml.ModelPath)
if err != nil {
log.Error().Err(err).Str("name", env).Str("modelPath", ml.ModelPath).Msg("unable to set environment variable to modelPath")
}
}
}
@ -184,8 +187,13 @@ func (ml *ModelLoader) BackendLoader(opts ...Option) (client grpc.Backend, err e
if o.singleActiveBackend {
ml.mu.Lock()
log.Debug().Msgf("Stopping all backends except '%s'", o.model)
ml.StopAllExcept(o.model)
err := ml.StopAllExcept(o.model)
ml.mu.Unlock()
if err != nil {
log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel")
return nil, err
}
}
var backendToConsume string
@ -224,7 +232,10 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) {
// If we can have only one backend active, kill all the others (except external backends)
if o.singleActiveBackend {
log.Debug().Msgf("Stopping all backends except '%s'", o.model)
ml.StopAllExcept(o.model)
err := ml.StopAllExcept(o.model)
if err != nil {
log.Error().Err(err).Str("keptModel", o.model).Msg("error while shutting down all backends except for the keptModel - greedyloader continuing")
}
}
ml.mu.Unlock()

View File

@ -174,7 +174,10 @@ func (ml *ModelLoader) CheckIsLoaded(s string) ModelAddress {
if !ml.grpcProcesses[s].IsAlive() {
log.Debug().Msgf("GRPC Process is not responding: %s", s)
// stop and delete the process, this forces to re-load the model and re-create again the service
ml.deleteProcess(s)
err := ml.deleteProcess(s)
if err != nil {
log.Error().Err(err).Str("process", s).Msg("error stopping process")
}
return ""
}
}

View File

@ -1,6 +1,7 @@
package model
import (
"errors"
"fmt"
"os"
"os/signal"
@ -14,8 +15,8 @@ import (
"github.com/rs/zerolog/log"
)
func (ml *ModelLoader) StopAllExcept(s string) {
ml.StopGRPC(func(id string, p *process.Process) bool {
func (ml *ModelLoader) StopAllExcept(s string) error {
return ml.StopGRPC(func(id string, p *process.Process) bool {
if id != s {
for ml.models[id].GRPC(false, ml.wd).IsBusy() {
log.Debug().Msgf("%s busy. Waiting.", id)
@ -43,16 +44,19 @@ func includeAllProcesses(_ string, _ *process.Process) bool {
return true
}
func (ml *ModelLoader) StopGRPC(filter GRPCProcessFilter) {
func (ml *ModelLoader) StopGRPC(filter GRPCProcessFilter) error {
var err error = nil
for k, p := range ml.grpcProcesses {
if filter(k, p) {
ml.deleteProcess(k)
e := ml.deleteProcess(k)
err = errors.Join(err, e)
}
}
return err
}
func (ml *ModelLoader) StopAllGRPC() {
ml.StopGRPC(includeAllProcesses)
func (ml *ModelLoader) StopAllGRPC() error {
return ml.StopGRPC(includeAllProcesses)
}
func (ml *ModelLoader) GetGRPCPID(id string) (int, error) {

View File

@ -63,8 +63,9 @@ var _ = Describe("Integration tests for the stores backend(s) and internal APIs"
})
AfterEach(func() {
sl.StopAllGRPC()
err := os.RemoveAll(tmpdir)
err := sl.StopAllGRPC()
Expect(err).ToNot(HaveOccurred())
err = os.RemoveAll(tmpdir)
Expect(err).ToNot(HaveOccurred())
})