2024-01-05 17:04:46 +00:00
|
|
|
package openai
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
fiberContext "github.com/go-skynet/LocalAI/core/http/ctx"
|
2024-02-21 01:21:19 +00:00
|
|
|
"github.com/go-skynet/LocalAI/core/schema"
|
2024-04-13 07:45:34 +00:00
|
|
|
"github.com/go-skynet/LocalAI/core/services"
|
2024-01-05 17:04:46 +00:00
|
|
|
"github.com/gofiber/fiber/v2"
|
|
|
|
"github.com/rs/zerolog/log"
|
|
|
|
"github.com/valyala/fasthttp"
|
|
|
|
)
|
|
|
|
|
2024-03-29 21:29:33 +00:00
|
|
|
// ChatEndpoint is the OpenAI Completion API endpoint https://platform.openai.com/docs/api-reference/chat/create
|
|
|
|
// @Summary Generate a chat completions for a given prompt and model.
|
|
|
|
// @Param request body schema.OpenAIRequest true "query params"
|
|
|
|
// @Success 200 {object} schema.OpenAIResponse "Response"
|
|
|
|
// @Router /v1/chat/completions [post]
|
2024-04-13 07:45:34 +00:00
|
|
|
func ChatEndpoint(fce *fiberContext.FiberContextExtractor, oais *services.OpenAIService) func(c *fiber.Ctx) error {
|
2024-01-05 17:04:46 +00:00
|
|
|
return func(c *fiber.Ctx) error {
|
2024-04-13 07:45:34 +00:00
|
|
|
_, request, err := fce.OpenAIRequestFromContext(c, false)
|
2024-01-05 17:04:46 +00:00
|
|
|
if err != nil {
|
2024-04-13 07:45:34 +00:00
|
|
|
return fmt.Errorf("failed reading parameters from request: %w", err)
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
traceID, finalResultChannel, _, tokenChannel, err := oais.Chat(request, false, request.Stream)
|
2024-01-05 17:04:46 +00:00
|
|
|
if err != nil {
|
2024-04-13 07:45:34 +00:00
|
|
|
return err
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
if request.Stream {
|
2024-04-11 17:20:22 +00:00
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
log.Debug().Msgf("Chat Stream request received")
|
2024-01-05 17:04:46 +00:00
|
|
|
|
|
|
|
c.Context().SetContentType("text/event-stream")
|
|
|
|
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
|
2024-04-13 07:45:34 +00:00
|
|
|
//
|
2024-01-05 17:04:46 +00:00
|
|
|
c.Set("Cache-Control", "no-cache")
|
|
|
|
c.Set("Connection", "keep-alive")
|
|
|
|
c.Set("Transfer-Encoding", "chunked")
|
|
|
|
|
|
|
|
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
|
|
|
|
usage := &schema.OpenAIUsage{}
|
2024-02-17 09:00:34 +00:00
|
|
|
toolsCalled := false
|
2024-04-13 07:45:34 +00:00
|
|
|
for ev := range tokenChannel {
|
|
|
|
if ev.Error != nil {
|
|
|
|
log.Debug().Err(ev.Error).Msg("chat streaming responseChannel error")
|
|
|
|
request.Cancel()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
usage = &ev.Value.Usage // Copy a pointer to the latest usage chunk so that the stop message can reference it
|
|
|
|
|
|
|
|
if len(ev.Value.Choices[0].Delta.ToolCalls) > 0 {
|
2024-02-17 09:00:34 +00:00
|
|
|
toolsCalled = true
|
|
|
|
}
|
2024-01-05 17:04:46 +00:00
|
|
|
var buf bytes.Buffer
|
|
|
|
enc := json.NewEncoder(&buf)
|
2024-04-13 07:45:34 +00:00
|
|
|
if ev.Error != nil {
|
|
|
|
log.Debug().Err(ev.Error).Msg("[ChatEndpoint] error to debug during tokenChannel handler")
|
|
|
|
enc.Encode(ev.Error)
|
|
|
|
} else {
|
|
|
|
enc.Encode(ev.Value)
|
|
|
|
}
|
|
|
|
log.Debug().Msgf("chat streaming sending chunk: %s", buf.String())
|
2024-01-05 17:04:46 +00:00
|
|
|
_, err := fmt.Fprintf(w, "data: %v\n", buf.String())
|
|
|
|
if err != nil {
|
2024-04-13 07:45:34 +00:00
|
|
|
log.Debug().Err(err).Msgf("Sending chunk failed")
|
|
|
|
request.Cancel()
|
|
|
|
break
|
|
|
|
}
|
|
|
|
err = w.Flush()
|
|
|
|
if err != nil {
|
|
|
|
log.Debug().Msg("error while flushing, closing connection")
|
|
|
|
request.Cancel()
|
2024-01-05 17:04:46 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-17 09:00:34 +00:00
|
|
|
finishReason := "stop"
|
|
|
|
if toolsCalled {
|
|
|
|
finishReason = "tool_calls"
|
2024-04-13 07:45:34 +00:00
|
|
|
} else if toolsCalled && len(request.Tools) == 0 {
|
2024-02-17 09:00:34 +00:00
|
|
|
finishReason = "function_call"
|
|
|
|
}
|
|
|
|
|
2024-01-05 17:04:46 +00:00
|
|
|
resp := &schema.OpenAIResponse{
|
2024-04-13 07:45:34 +00:00
|
|
|
ID: traceID.ID,
|
|
|
|
Created: traceID.Created,
|
|
|
|
Model: request.Model, // we have to return what the user sent here, due to OpenAI spec.
|
2024-01-05 17:04:46 +00:00
|
|
|
Choices: []schema.Choice{
|
|
|
|
{
|
2024-02-17 09:00:34 +00:00
|
|
|
FinishReason: finishReason,
|
2024-01-05 17:04:46 +00:00
|
|
|
Index: 0,
|
2024-04-13 07:45:34 +00:00
|
|
|
Delta: &schema.Message{Content: ""},
|
2024-01-05 17:04:46 +00:00
|
|
|
}},
|
|
|
|
Object: "chat.completion.chunk",
|
|
|
|
Usage: *usage,
|
|
|
|
}
|
|
|
|
respData, _ := json.Marshal(resp)
|
|
|
|
|
|
|
|
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
|
|
|
|
w.WriteString("data: [DONE]\n\n")
|
|
|
|
w.Flush()
|
|
|
|
}))
|
2024-02-17 09:00:34 +00:00
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
return nil
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
// TODO is this proper to have exclusive from Stream, or do we need to issue both responses?
|
|
|
|
rawResponse := <-finalResultChannel
|
2024-02-17 09:00:34 +00:00
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
if rawResponse.Error != nil {
|
|
|
|
return rawResponse.Error
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
2024-02-17 09:00:34 +00:00
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
jsonResult, _ := json.Marshal(rawResponse.Value)
|
|
|
|
log.Debug().Str("jsonResult", string(jsonResult)).Msg("Chat Final Response")
|
2024-01-05 17:04:46 +00:00
|
|
|
|
2024-04-13 07:45:34 +00:00
|
|
|
// Return the prediction in the response body
|
|
|
|
return c.JSON(rawResponse.Value)
|
2024-01-05 17:04:46 +00:00
|
|
|
}
|
|
|
|
}
|