diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 00000000..64a88f43 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,19 @@ +enhancements: + - head-branch: ['^feature', 'feature'] + +kind/documentation: +- any: + - changed-files: + - any-glob-to-any-file: 'docs/*' + - changed-files: + - any-glob-to-any-file: '*.md' + +examples: +- any: + - changed-files: + - any-glob-to-any-file: 'examples/*' + +ci: +- any: + - changed-files: + - any-glob-to-any-file: '.github/*' \ No newline at end of file diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 00000000..e3ecf923 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,12 @@ +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 \ No newline at end of file diff --git a/.github/workflows/secscan.yaml b/.github/workflows/secscan.yaml new file mode 100644 index 00000000..a5221b40 --- /dev/null +++ b/.github/workflows/secscan.yaml @@ -0,0 +1,27 @@ +name: "Security Scan" + +# Run workflow each time code is pushed to your repository and on a schedule. +# The scheduled workflow runs every at 00:00 on Sunday UTC time. +on: + push: + schedule: + - cron: '0 0 * * 0' + +jobs: + tests: + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v3 + - name: Run Gosec Security Scanner + uses: securego/gosec@master + with: + # we let the report trigger content trigger a failure using the GitHub Security features. + args: '-no-fail -fmt sarif -out results.sarif ./...' + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v2 + with: + # Path to SARIF file relative to the root of the repository + sarif_file: results.sarif \ No newline at end of file diff --git a/core/backend/options.go b/core/backend/options.go index bc7fa5a4..143a9332 100644 --- a/core/backend/options.go +++ b/core/backend/options.go @@ -1,6 +1,7 @@ package backend import ( + "math/rand" "os" "path/filepath" @@ -33,12 +34,20 @@ func modelOpts(c config.BackendConfig, so *config.ApplicationConfig, opts []mode return opts } +func getSeed(c config.BackendConfig) int32 { + seed := int32(*c.Seed) + if seed == config.RAND_SEED { + seed = rand.Int31() + } + + return seed +} + func gRPCModelOpts(c config.BackendConfig) *pb.ModelOptions { b := 512 if c.Batch != 0 { b = c.Batch } - return &pb.ModelOptions{ CUDA: c.CUDA || c.Diffusers.CUDA, SchedulerType: c.Diffusers.SchedulerType, @@ -54,7 +63,7 @@ func gRPCModelOpts(c config.BackendConfig) *pb.ModelOptions { CLIPSkip: int32(c.Diffusers.ClipSkip), ControlNet: c.Diffusers.ControlNet, ContextSize: int32(*c.ContextSize), - Seed: int32(*c.Seed), + Seed: getSeed(c), NBatch: int32(b), NoMulMatQ: c.NoMulMatQ, DraftModel: c.DraftModel, @@ -129,7 +138,7 @@ func gRPCPredictOpts(c config.BackendConfig, modelPath string) *pb.PredictOption NKeep: int32(c.Keep), Batch: int32(c.Batch), IgnoreEOS: c.IgnoreEOS, - Seed: int32(*c.Seed), + Seed: getSeed(c), FrequencyPenalty: float32(c.FrequencyPenalty), MLock: *c.MMlock, MMap: *c.MMap, diff --git a/core/config/backend_config.go b/core/config/backend_config.go index 9b227578..25edd343 100644 --- a/core/config/backend_config.go +++ b/core/config/backend_config.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io/fs" - "math/rand" "os" "path/filepath" "sort" @@ -20,6 +19,10 @@ import ( "github.com/charmbracelet/glamour" ) +const ( + RAND_SEED = -1 +) + type BackendConfig struct { schema.PredictionOptions `yaml:"parameters"` Name string `yaml:"name"` @@ -218,7 +221,7 @@ func (cfg *BackendConfig) SetDefaults(opts ...ConfigLoaderOption) { if cfg.Seed == nil { // random number generator seed - defaultSeed := int(rand.Int31()) + defaultSeed := RAND_SEED cfg.Seed = &defaultSeed } diff --git a/docs/content/docs/features/text-generation.md b/docs/content/docs/features/text-generation.md index 1d0e1e9e..c11894e7 100644 --- a/docs/content/docs/features/text-generation.md +++ b/docs/content/docs/features/text-generation.md @@ -304,6 +304,7 @@ The backend will automatically download the required files in order to run the m | Type | Description | | --- | --- | | `AutoModelForCausalLM` | `AutoModelForCausalLM` is a model that can be used to generate sequences. | +| `OVModelForCausalLM` | for OpenVINO models | | N/A | Defaults to `AutoModel` | @@ -324,4 +325,35 @@ curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d "prompt": "Hello, my name is", "temperature": 0.1, "top_p": 0.1 }' +``` + +#### Examples + +##### OpenVINO + +A model configuration file for openvion and starling model: + +```yaml +name: starling-openvino +backend: transformers +parameters: + model: fakezeta/Starling-LM-7B-beta-openvino-int8 +context_size: 8192 +threads: 6 +f16: true +type: OVModelForCausalLM +stopwords: +- <|end_of_turn|> +- <|endoftext|> +prompt_cache_path: "cache" +prompt_cache_all: true +template: + chat_message: | + {{if eq .RoleName "system"}}{{.Content}}<|end_of_turn|>{{end}}{{if eq .RoleName "assistant"}}<|end_of_turn|>GPT4 Correct Assistant: {{.Content}}<|end_of_turn|>{{end}}{{if eq .RoleName "user"}}GPT4 Correct User: {{.Content}}{{end}} + + chat: | + {{.Input}}<|end_of_turn|>GPT4 Correct Assistant: + + completion: | + {{.Input}} ``` \ No newline at end of file diff --git a/tests/e2e-aio/e2e_suite_test.go b/tests/e2e-aio/e2e_suite_test.go index fa61c408..0aa68230 100644 --- a/tests/e2e-aio/e2e_suite_test.go +++ b/tests/e2e-aio/e2e_suite_test.go @@ -23,6 +23,7 @@ var containerImageTag = os.Getenv("LOCALAI_IMAGE_TAG") var modelsDir = os.Getenv("LOCALAI_MODELS_DIR") var apiPort = os.Getenv("LOCALAI_API_PORT") var apiEndpoint = os.Getenv("LOCALAI_API_ENDPOINT") +var apiKey = os.Getenv("LOCALAI_API_KEY") func TestLocalAI(t *testing.T) { RegisterFailHandler(Fail) @@ -38,11 +39,11 @@ var _ = BeforeSuite(func() { var defaultConfig openai.ClientConfig if apiEndpoint == "" { startDockerImage() - defaultConfig = openai.DefaultConfig("") + defaultConfig = openai.DefaultConfig(apiKey) defaultConfig.BaseURL = "http://localhost:" + apiPort + "/v1" } else { fmt.Println("Default ", apiEndpoint) - defaultConfig = openai.DefaultConfig("") + defaultConfig = openai.DefaultConfig(apiKey) defaultConfig.BaseURL = apiEndpoint }