fix: update langchainjs (#2136)

quick update of the langchainjs example to quiet down some dependency security scanner noise

Signed-off-by: Dave Lee <dave@gray101.com>
This commit is contained in:
Dave 2024-04-25 18:47:35 -04:00 committed by GitHub
parent c8dd8e5ef4
commit eed285f9de
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 1407 additions and 255 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{
"name": "langchainjs-localai-example",
"version": "0.1.0",
"version": "0.1.1",
"description": "Trivial Example of using langchain + the OpenAI API + LocalAI together",
"main": "index.mjs",
"scripts": {
@ -15,7 +15,11 @@
"typescript": "^5.0.4"
},
"dependencies": {
"langchain": "^0.0.67",
"typeorm": "^0.3.15"
"@langchain/community": "^0.0.52",
"@langchain/openai": "^0.0.28",
"langchain": "^0.1.36"
},
"overrides": {
"@langchain/core": "0.1.5"
}
}

View File

@ -1,15 +1,17 @@
import { OpenAIChat } from "langchain/llms/openai";
import { loadQAStuffChain } from "langchain/chains";
import { Document } from "langchain/document";
import { initializeAgentExecutorWithOptions } from "langchain/agents";
import {Calculator} from "langchain/tools/calculator";
import { pull } from "langchain/hub";
import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents";
import {Calculator} from "@langchain/community/tools/calculator";
import { ChatOpenAI } from "@langchain/openai";
import type { ChatPromptTemplate } from "@langchain/core/prompts";
const pathToLocalAI = process.env['OPENAI_API_BASE'] || 'http://api:8080/v1';
const fakeApiKey = process.env['OPENAI_API_KEY'] || '-';
const modelName = process.env['MODEL_NAME'] || 'gpt-3.5-turbo';
function getModel(): OpenAIChat {
return new OpenAIChat({
function getModel(): ChatOpenAI {
return new ChatOpenAI({
prefixMessages: [
{
role: "system",
@ -29,8 +31,8 @@ function getModel(): OpenAIChat {
// Minimal example.
export const run = async () => {
const model = getModel();
console.log(`about to model.call at ${new Date().toUTCString()}`);
const res = await model.call(
console.log(`about to model.invoke at ${new Date().toUTCString()}`);
const res = await model.invoke(
"What would be a good company name a company that makes colorful socks?"
);
console.log(`${new Date().toUTCString()}`);
@ -47,7 +49,7 @@ export const run2 = async () => {
new Document({ pageContent: "Harrison went to Harvard." }),
new Document({ pageContent: "Ankush went to Princeton." }),
];
const resA = await chainA.call({
const resA = await chainA.invoke({
input_documents: docs,
question: "Where did Harrison go to college?",
});
@ -58,22 +60,33 @@ await run2();
// Quickly thrown together example of using tools + agents.
// This seems like it should work, but it doesn't yet.
export const temporarilyBrokenToolTest = async () => {
export const toolAgentTest = async () => {
const model = getModel();
const executor = await initializeAgentExecutorWithOptions([new Calculator(true)], model, {
agentType: "zero-shot-react-description",
const prompt = await pull<ChatPromptTemplate>("hwchase17/openai-tools-agent");
const tools = [new Calculator()];
const agent = await createOpenAIToolsAgent({
llm: model,
tools: tools,
prompt: prompt
});
console.log("Loaded agent.");
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = `What is the value of (500 *2) + 350 - 13?`;
console.log(`Executing with input "${input}"...`);
const result = await executor.call({ input });
const result = await agentExecutor.invoke({ input });
console.log(`Got output ${result.output}`);
}
await temporarilyBrokenToolTest();
await toolAgentTest();

View File

@ -8,7 +8,8 @@
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"isolatedModules": true,
"outDir": "./dist"
"outDir": "./dist",
"skipLibCheck": true
},
"include": ["src", "test"],
"exclude": ["node_modules", "dist"]