diff --git a/examples/llms/providers/groq.ts b/examples/llms/providers/groq.ts index 510bf650..5edec49b 100644 --- a/examples/llms/providers/groq.ts +++ b/examples/llms/providers/groq.ts @@ -12,10 +12,10 @@ const llm = new GroqChatLLM({ }); console.info("Meta", await llm.meta()); -const response = await llm.stream([ +const response = await llm.generate([ BaseMessage.of({ role: "user", text: "Hello world!", }), ]); -console.info(response); +console.info(response.getTextContent()); diff --git a/package.json b/package.json index 3712d82a..2bc57c54 100644 --- a/package.json +++ b/package.json @@ -87,7 +87,6 @@ "dirty-json": "0.9.2", "duck-duck-scrape": "^2.2.5", "fast-xml-parser": "^4.4.1", - "groq-sdk": "^0.7.0", "header-generator": "^2.1.54", "joplin-turndown-plugin-gfm": "^1.0.12", "mathjs": "^13.1.1", @@ -111,6 +110,7 @@ "@langchain/community": "~0.2.28", "@langchain/core": "~0.2.27", "@langchain/langgraph": "~0.0.34", + "groq-sdk": "^0.7.0", "ollama": "^0.5.8", "openai": "^4.56.0", "openai-chat-tokens": "^0.2.8" @@ -140,6 +140,7 @@ "eslint-config-prettier": "^9.1.0", "eslint-plugin-unused-imports": "^4.1.3", "glob": "^11.0.0", + "groq-sdk": "^0.7.0", "husky": "^9.1.5", "langchain": "~0.2.16", "lint-staged": "^15.2.9", diff --git a/src/adapters/groq/chat.ts b/src/adapters/groq/chat.ts index f665d450..d2fe14ce 100644 --- a/src/adapters/groq/chat.ts +++ b/src/adapters/groq/chat.ts @@ -129,7 +129,7 @@ export class GroqChatLLM extends ChatLLM { } else if (this.modelId.includes("llava-v1.5")) { return { tokenLimit: 4 * 1024 }; } else if (this.modelId.includes("llama-3.1-70b") || this.modelId.includes("llama-3.1-8b")) { - return { tokenLimit: 131 * 1024 }; + return { tokenLimit: 128 * 1024 }; } else if (this.modelId.includes("mixtral-8x7b")) { return { tokenLimit: 32 * 1024 }; } diff --git a/yarn.lock b/yarn.lock index 66ea5bad..11d1359d 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2590,6 +2590,7 @@ __metadata: "@langchain/community": ~0.2.28 "@langchain/core": ~0.2.27 "@langchain/langgraph": ~0.0.34 + groq-sdk: ^0.7.0 ollama: ^0.5.8 openai: ^4.56.0 openai-chat-tokens: ^0.2.8