From 8379cce209bdb6f29e3eaffc9213e260b571311a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 18 Jul 2023 00:04:21 +0200 Subject: [PATCH] example(functions): Add OpenAI functions example (#767) --- examples/README.md | 8 +++ examples/functions/.env | 9 +++ examples/functions/Dockerfile | 5 ++ examples/functions/README.md | 18 ++++++ examples/functions/docker-compose.yaml | 23 ++++++++ examples/functions/functions-openai.py | 76 ++++++++++++++++++++++++++ examples/functions/requirements.txt | 2 + 7 files changed, 141 insertions(+) create mode 100644 examples/functions/.env create mode 100644 examples/functions/Dockerfile create mode 100644 examples/functions/README.md create mode 100644 examples/functions/docker-compose.yaml create mode 100644 examples/functions/functions-openai.py create mode 100644 examples/functions/requirements.txt diff --git a/examples/README.md b/examples/README.md index 29a4f857..e938b81f 100644 --- a/examples/README.md +++ b/examples/README.md @@ -64,6 +64,14 @@ A ready to use example to show e2e how to integrate LocalAI with langchain [Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/) +### LocalAI functions + +_by [@mudler](https://github.com/mudler)_ + +A ready to use example to show how to use OpenAI functions with LocalAI + +[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/functions/) + ### LocalAI WebUI _by [@dhruvgera](https://github.com/dhruvgera)_ diff --git a/examples/functions/.env b/examples/functions/.env new file mode 100644 index 00000000..8dcea057 --- /dev/null +++ b/examples/functions/.env @@ -0,0 +1,9 @@ +OPENAI_API_KEY=sk---anystringhere +OPENAI_API_BASE=http://api:8080/v1 +# Models to preload at start +# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings +PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/openllama-7b-open-instruct.yaml", "name": "gpt-3.5-turbo"}] + +## Change the default number of threads +#THREADS=14 + diff --git a/examples/functions/Dockerfile b/examples/functions/Dockerfile new file mode 100644 index 00000000..b23a3c6f --- /dev/null +++ b/examples/functions/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.10-bullseye +COPY . /app +WORKDIR /app +RUN pip install --no-cache-dir -r requirements.txt +ENTRYPOINT [ "python", "./functions-openai.py" ]; \ No newline at end of file diff --git a/examples/functions/README.md b/examples/functions/README.md new file mode 100644 index 00000000..8bd4cf5d --- /dev/null +++ b/examples/functions/README.md @@ -0,0 +1,18 @@ +# LocalAI functions + +Example of using LocalAI functions, see the [OpenAI](https://openai.com/blog/function-calling-and-other-api-updates) blog post. + +## Run + +```bash +# Clone LocalAI +git clone https://github.com/go-skynet/LocalAI + +cd LocalAI/examples/functions + +docker-compose run --rm functions +``` + +Note: The example automatically downloads the `openllama` model as it is under a permissive license. + +See the `.env` configuration file to set a different model with the [model-gallery](https://github.com/go-skynet/model-gallery) by editing `PRELOAD_MODELS`. diff --git a/examples/functions/docker-compose.yaml b/examples/functions/docker-compose.yaml new file mode 100644 index 00000000..e6aac91d --- /dev/null +++ b/examples/functions/docker-compose.yaml @@ -0,0 +1,23 @@ +version: "3.9" +services: + api: + image: quay.io/go-skynet/local-ai:master + ports: + - 8080:8080 + env_file: + - .env + environment: + - DEBUG=true + - MODELS_PATH=/models + volumes: + - ./models:/models:cached + command: ["/usr/bin/local-ai" ] + functions: + build: + context: . + dockerfile: Dockerfile + depends_on: + api: + condition: service_healthy + env_file: + - .env \ No newline at end of file diff --git a/examples/functions/functions-openai.py b/examples/functions/functions-openai.py new file mode 100644 index 00000000..4bb63368 --- /dev/null +++ b/examples/functions/functions-openai.py @@ -0,0 +1,76 @@ +import openai +import json + +# Example dummy function hard coded to return the same weather +# In production, this could be your backend API or an external API +def get_current_weather(location, unit="fahrenheit"): + """Get the current weather in a given location""" + weather_info = { + "location": location, + "temperature": "72", + "unit": unit, + "forecast": ["sunny", "windy"], + } + return json.dumps(weather_info) + + +def run_conversation(): + # Step 1: send the conversation and available functions to GPT + messages = [{"role": "user", "content": "What's the weather like in Boston?"}] + functions = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + ] + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=messages, + functions=functions, + function_call="auto", # auto is default, but we'll be explicit + ) + response_message = response["choices"][0]["message"] + + # Step 2: check if GPT wanted to call a function + if response_message.get("function_call"): + # Step 3: call the function + # Note: the JSON response may not always be valid; be sure to handle errors + available_functions = { + "get_current_weather": get_current_weather, + } # only one function in this example, but you can have multiple + function_name = response_message["function_call"]["name"] + fuction_to_call = available_functions[function_name] + function_args = json.loads(response_message["function_call"]["arguments"]) + function_response = fuction_to_call( + location=function_args.get("location"), + unit=function_args.get("unit"), + ) + + # Step 4: send the info on the function call and function response to GPT + messages.append(response_message) # extend conversation with assistant's reply + messages.append( + { + "role": "function", + "name": function_name, + "content": function_response, + } + ) # extend conversation with function response + second_response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=messages, + ) # get a new response from GPT where it can see the function response + return second_response + + +print(run_conversation()) \ No newline at end of file diff --git a/examples/functions/requirements.txt b/examples/functions/requirements.txt new file mode 100644 index 00000000..7164e011 --- /dev/null +++ b/examples/functions/requirements.txt @@ -0,0 +1,2 @@ +langchain==0.0.234 +openai==0.27.8