2023-11-02 20:35:06 +00:00
|
|
|
import weaviate
|
2024-01-20 09:06:32 +00:00
|
|
|
from llama_index import ServiceContext, VectorStoreIndex
|
|
|
|
from llama_index.llms import LOCALAI_DEFAULTS, OpenAILike
|
2023-11-02 20:35:06 +00:00
|
|
|
from llama_index.vector_stores import WeaviateVectorStore
|
|
|
|
|
|
|
|
# Weaviate vector store setup
|
2024-01-20 09:06:32 +00:00
|
|
|
vector_store = WeaviateVectorStore(
|
|
|
|
weaviate_client=weaviate.Client("http://weviate.default"), index_name="AIChroma"
|
|
|
|
)
|
2023-11-02 20:35:06 +00:00
|
|
|
|
2024-01-20 09:06:32 +00:00
|
|
|
# LLM setup, served via LocalAI
|
|
|
|
llm = OpenAILike(temperature=0, model="gpt-3.5-turbo", **LOCALAI_DEFAULTS)
|
2023-11-02 20:35:06 +00:00
|
|
|
|
|
|
|
# Service context setup
|
|
|
|
service_context = ServiceContext.from_defaults(llm=llm, embed_model="local")
|
|
|
|
|
|
|
|
# Load index from stored vectors
|
|
|
|
index = VectorStoreIndex.from_vector_store(
|
2024-01-20 09:06:32 +00:00
|
|
|
vector_store, service_context=service_context
|
2023-11-02 20:35:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# Query engine setup
|
2024-01-20 09:06:32 +00:00
|
|
|
query_engine = index.as_query_engine(
|
|
|
|
similarity_top_k=1, vector_store_query_mode="hybrid"
|
|
|
|
)
|
2023-11-02 20:35:06 +00:00
|
|
|
|
|
|
|
# Query example
|
|
|
|
response = query_engine.query("What is LocalAI?")
|
2024-01-20 09:06:32 +00:00
|
|
|
print(response)
|