diff --git a/.github/workflows/validate_new_notebooks.yml b/.github/workflows/validate_new_notebooks.yml index cce0fc05b67f..bb6bffebc062 100644 --- a/.github/workflows/validate_new_notebooks.yml +++ b/.github/workflows/validate_new_notebooks.yml @@ -36,23 +36,23 @@ jobs: - name: Get changed files id: changed-files uses: tj-actions/changed-files@v44 - - name: Check for new or modified notebooks + - name: Check for new or modified notebooks in docs/core_docs id: check_notebooks run: | - notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '\.ipynb$' || true) + notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '^docs/core_docs/.*\.ipynb$' || true) echo "Affected notebooks: $notebooks" echo "has_affected_notebooks=$([ -n "$notebooks" ] && echo 'true' || echo 'false')" >> $GITHUB_OUTPUT - name: Build examples if: steps.check_notebooks.outputs.has_affected_notebooks == 'true' run: yarn turbo:command build --filter=examples - - name: Validate affected notebooks + - name: Validate affected notebooks in docs/core_docs if: steps.check_notebooks.outputs.has_affected_notebooks == 'true' run: | - notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '\.ipynb$' || true) + notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '^docs/core_docs/.*\.ipynb$' || true) if [ -n "$notebooks" ]; then for notebook in $notebooks; do yarn notebook:validate "$notebook" done else - echo "No notebooks to validate." + echo "No notebooks in docs/core_docs to validate." fi \ No newline at end of file diff --git a/deno.json b/deno.json index 35e488d9e0e0..dbb94a073424 100644 --- a/deno.json +++ b/deno.json @@ -30,6 +30,14 @@ "youtubei.js": "npm:/youtubei.js", "youtube-transcript": "npm:/youtube-transcript", "neo4j-driver": "npm:/neo4j-driver", - "axios": "npm:/axios" + "axios": "npm:/axios", + "@mendable/firecrawl-js": "npm:/@mendable/firecrawl-js", + "@aws-crypto/sha256-js": "npm:/@aws-crypto/sha256-js", + "@aws-sdk/credential-provider-node": "npm:/@aws-sdk/credential-provider-node", + "@smithy/protocol-http": "npm:/@smithy/protocol-http", + "@smithy/signature-v4": "npm:/@smithy/signature-v4", + "@smithy/eventstream-codec": "npm:/@smithy/eventstream-codec", + "@smithy/util-utf8": "npm:/@smithy/util-utf8", + "@aws-sdk/types": "npm:/@aws-sdk/types" } } \ No newline at end of file diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore index df8abd01615f..3b2f9ca94e78 100644 --- a/docs/core_docs/.gitignore +++ b/docs/core_docs/.gitignore @@ -34,6 +34,26 @@ yarn-error.log* /.quarto/ # AUTO_GENERATED_DOCS +docs/tutorials/rag.md +docs/tutorials/rag.mdx +docs/tutorials/query_analysis.md +docs/tutorials/query_analysis.mdx +docs/tutorials/qa_chat_history.md +docs/tutorials/qa_chat_history.mdx +docs/tutorials/pdf_qa.md +docs/tutorials/pdf_qa.mdx +docs/tutorials/local_rag.md +docs/tutorials/local_rag.mdx +docs/tutorials/llm_chain.md +docs/tutorials/llm_chain.mdx +docs/tutorials/graph.md +docs/tutorials/graph.mdx +docs/tutorials/extraction.md +docs/tutorials/extraction.mdx +docs/tutorials/classification.md +docs/tutorials/classification.mdx +docs/tutorials/chatbot.md +docs/tutorials/chatbot.mdx docs/how_to/trim_messages.md docs/how_to/trim_messages.mdx docs/how_to/tools_prompting.md @@ -188,27 +208,29 @@ docs/how_to/assign.md docs/how_to/assign.mdx docs/how_to/agent_executor.md docs/how_to/agent_executor.mdx -docs/tutorials/rag.md -docs/tutorials/rag.mdx -docs/tutorials/query_analysis.md -docs/tutorials/query_analysis.mdx -docs/tutorials/qa_chat_history.md -docs/tutorials/qa_chat_history.mdx -docs/tutorials/pdf_qa.md -docs/tutorials/pdf_qa.mdx -docs/tutorials/local_rag.md -docs/tutorials/local_rag.mdx -docs/tutorials/llm_chain.md -docs/tutorials/llm_chain.mdx -docs/tutorials/graph.md -docs/tutorials/graph.mdx -docs/tutorials/extraction.md -docs/tutorials/extraction.mdx -docs/tutorials/classification.md -docs/tutorials/classification.mdx -docs/tutorials/chatbot.md -docs/tutorials/chatbot.mdx docs/integrations/llms/mistral.md docs/integrations/llms/mistral.mdx +docs/integrations/chat/togetherai.md +docs/integrations/chat/togetherai.mdx +docs/integrations/chat/openai.md +docs/integrations/chat/openai.mdx +docs/integrations/chat/ollama.md +docs/integrations/chat/ollama.mdx docs/integrations/chat/mistral.md -docs/integrations/chat/mistral.mdx \ No newline at end of file +docs/integrations/chat/mistral.mdx +docs/integrations/chat/groq.md +docs/integrations/chat/groq.mdx +docs/integrations/chat/google_vertex_ai.md +docs/integrations/chat/google_vertex_ai.mdx +docs/integrations/chat/google_generativeai.md +docs/integrations/chat/google_generativeai.mdx +docs/integrations/chat/fireworks.md +docs/integrations/chat/fireworks.mdx +docs/integrations/chat/cohere.md +docs/integrations/chat/cohere.mdx +docs/integrations/chat/azure.md +docs/integrations/chat/azure.mdx +docs/integrations/chat/anthropic.md +docs/integrations/chat/anthropic.mdx +docs/integrations/document_loaders/web_loaders/web_cheerio.md +docs/integrations/document_loaders/web_loaders/web_cheerio.mdx \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/anthropic.ipynb b/docs/core_docs/docs/integrations/chat/anthropic.ipynb new file mode 100644 index 000000000000..3aea237d0a3a --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/anthropic.ipynb @@ -0,0 +1,1011 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Anthropic\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatAnthropic\n", + "\n", + "This will help you getting started with ChatAnthropic [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatAnthropic features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/anthropic/) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatAnthropic](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html) | [@langchain/anthropic](https://api.js.langchain.com/modules/langchain_anthropic.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/anthropic?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/anthropic?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "You'll need to sign up and obtain an [Anthropic API key](https://www.anthropic.com/), and install the `@langchain/anthropic` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [Anthropic's website](https://www.anthropic.com/) to sign up to Anthropic and generate an API key. Once you've done this set the `ANTHROPIC_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export ANTHROPIC_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatAnthropic integration lives in the `@langchain/anthropic` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/anthropic\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\" \n", + "\n", + "const llm = new ChatAnthropic({\n", + " model: \"claude-3-haiku-20240307\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_01M9yt3aSqKJKM1RnZF4f44Q\",\n", + " \"content\": \"Voici la traduction en français :\\n\\nJ'adore la programmation.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01M9yt3aSqKJKM1RnZF4f44Q\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 20\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01M9yt3aSqKJKM1RnZF4f44Q\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 20\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 29,\n", + " \"output_tokens\": 20,\n", + " \"total_tokens\": 49\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Voici la traduction en français :\n", + "\n", + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_012gUKUG65teaois31W3bfGF\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_012gUKUG65teaois31W3bfGF\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 11\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_012gUKUG65teaois31W3bfGF\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 11\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 23,\n", + " \"output_tokens\": 11,\n", + " \"total_tokens\": 34\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Multimodal inputs\n", + "\n", + "Claude-3 models support image multimodal inputs. The passed input must be a base64 encoded image with the\n", + "filetype as a prefix (e.g. `data:image/png;base64,{YOUR_BASE64_ENCODED_DATA}`).\n", + "Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "1cb65e95", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_01AuGpm6xbacTwoUFdNiCnzu\",\n", + " \"content\": \"The image shows a hot dog. It consists of a cylindrical bread roll or bun that has been sliced lengthwise, revealing the bright red hot dog sausage filling inside. The hot dog sausage appears to be made from seasoned and smoked meat. This classic fast food item is a popular snack or meal, commonly enjoyed at sporting events, cookouts, and casual eateries.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01AuGpm6xbacTwoUFdNiCnzu\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 276,\n", + " \"output_tokens\": 88\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01AuGpm6xbacTwoUFdNiCnzu\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 276,\n", + " \"output_tokens\": 88\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 276,\n", + " \"output_tokens\": 88,\n", + " \"total_tokens\": 364\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import fs from \"fs/promises\";\n", + "\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const imageData2 = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n", + "const llm2 = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + "});\n", + "const message2 = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"What's in this image?\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: `data:image/jpeg;base64,${imageData2.toString(\"base64\")}`,\n", + " },\n", + " },\n", + " ],\n", + "});\n", + "\n", + "await llm2.invoke([message2]);" + ] + }, + { + "cell_type": "markdown", + "id": "5c14fbc0", + "metadata": {}, + "source": [ + "See [the official docs](https://docs.anthropic.com/claude/docs/vision#what-image-file-types-does-claude-support)\n", + "for a complete list of supported file types." + ] + }, + { + "cell_type": "markdown", + "id": "9bce78a1", + "metadata": {}, + "source": [ + "## Agents\n", + "\n", + "Anthropic models that support tool calling can be used in the Tool Calling agent. Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0648b504", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " index: 0,\n", + " type: 'text',\n", + " text: '\\n\\nThe current weather in San Francisco, CA is 28°C.'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { z } from \"zod\";\n", + "\n", + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n", + "\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const llm3 = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + " temperature: 0,\n", + "});\n", + "\n", + "// Prompt template must have \"input\" and \"agent_scratchpad input variables\"\n", + "const prompt3 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"placeholder\", \"{chat_history}\"],\n", + " [\"human\", \"{input}\"],\n", + " [\"placeholder\", \"{agent_scratchpad}\"],\n", + "]);\n", + "\n", + "const currentWeatherTool3 = tool(async () => \"28 °C\", {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather in a given location\",\n", + " schema: z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + " }),\n", + "});\n", + "\n", + "const agent3 = createToolCallingAgent({\n", + " llm: llm3,\n", + " tools: [currentWeatherTool3],\n", + " prompt: prompt3,\n", + "});\n", + "\n", + "const agentExecutor3 = new AgentExecutor({\n", + " agent: agent3,\n", + " tools: [currentWeatherTool3],\n", + "});\n", + "\n", + "const input3 = \"What's the weather like in SF?\";\n", + "const result3 = await agentExecutor3.invoke({ input: input3 });\n", + "\n", + "console.log(result3.output);" + ] + }, + { + "cell_type": "markdown", + "id": "d452d4b6", + "metadata": {}, + "source": [ + "## Custom headers\n", + "\n", + "You can pass custom headers in your requests like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "41943f0a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_013Ft3kN62gNtiMWRqg6xxt8\",\n", + " \"content\": \"The sky appears blue due to a phenomenon called Rayleigh scattering. Here's a brief explanation:\\n\\n1) Sunlight is made up of different wavelengths of light, including the visible spectrum that we see as colors.\\n\\n2) As sunlight passes through the Earth's atmosphere, the different wavelengths of light interact with the gas molecules in the air.\\n\\n3) The shorter wavelengths of light, such as the blue and violet colors, get scattered more easily by the tiny gas molecules. This is because the wavelengths are similar in size to the molecules.\\n\\n4) The longer wavelengths of light, such as red and orange, get scattered much less by the gas molecules and travel more directly through the atmosphere.\\n\\n5) The blue wavelengths that are scattered in different directions become scattered across the entire sky, making the sky appear blue to our eyes.\\n\\n6) During sunrise and sunset, the sun's rays travel through more atmosphere before reaching our eyes, causing the blue light to get scattered away and allowing more of the red/orange wavelengths to pass through, giving those colors in the sky.\\n\\nSo in essence, the abundant scattering of blue light by the gas molecules in the atmosphere is what causes the sky to appear blue during the daytime.\",\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_013Ft3kN62gNtiMWRqg6xxt8\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 13,\n", + " \"output_tokens\": 272\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_013Ft3kN62gNtiMWRqg6xxt8\",\n", + " \"model\": \"claude-3-sonnet-20240229\",\n", + " \"stop_reason\": \"end_turn\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 13,\n", + " \"output_tokens\": 272\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 13,\n", + " \"output_tokens\": 272,\n", + " \"total_tokens\": 285\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "\n", + "const llm4 = new ChatAnthropic({\n", + " model: \"claude-3-sonnet-20240229\",\n", + " maxTokens: 1024,\n", + " clientOptions: {\n", + " defaultHeaders: {\n", + " \"X-Api-Key\": process.env.ANTHROPIC_API_KEY,\n", + " },\n", + " },\n", + "});\n", + "\n", + "const res4 = await llm4.invoke(\"Why is the sky blue?\");\n", + "\n", + "console.log(res4);" + ] + }, + { + "cell_type": "markdown", + "id": "985c4b4b", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "The Anthropic API supports tool calling, along with multi-tool calling. The following examples demonstrate how to call tools:\n", + "\n", + "### Single Tool" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "2ce56548", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_01XPUHrR4sNCqPr1i9zcsAsg\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"text\",\n", + " \"text\": \"Okay, let me use the calculator tool to find the answer:\"\n", + " },\n", + " {\n", + " \"type\": \"tool_use\",\n", + " \"id\": \"toolu_01MhUVuUedc1drBKLarhedFZ\",\n", + " \"name\": \"calculator\",\n", + " \"input\": {\n", + " \"number1\": 2,\n", + " \"number2\": 2,\n", + " \"operation\": \"add\"\n", + " }\n", + " }\n", + " ],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01XPUHrR4sNCqPr1i9zcsAsg\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 449,\n", + " \"output_tokens\": 101\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01XPUHrR4sNCqPr1i9zcsAsg\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 449,\n", + " \"output_tokens\": 101\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"calculator\",\n", + " \"args\": {\n", + " \"number1\": 2,\n", + " \"number2\": 2,\n", + " \"operation\": \"add\"\n", + " },\n", + " \"id\": \"toolu_01MhUVuUedc1drBKLarhedFZ\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 449,\n", + " \"output_tokens\": 101,\n", + " \"total_tokens\": 550\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { z } from \"zod\";\n", + "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", + "\n", + "const calculatorSchema5 = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const tool5 = {\n", + " name: \"calculator\",\n", + " description: \"A simple calculator tool\",\n", + " input_schema: zodToJsonSchema(calculatorSchema5),\n", + "};\n", + "\n", + "const llm5 = new ChatAnthropic({\n", + " apiKey: process.env.ANTHROPIC_API_KEY,\n", + " model: \"claude-3-haiku-20240307\",\n", + "}).bind({\n", + " tools: [tool5],\n", + "});\n", + "\n", + "const prompt5 = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt and model together\n", + "const chain5 = prompt5.pipe(llm5);\n", + "\n", + "const response5 = await chain5.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "console.log(response5);" + ] + }, + { + "cell_type": "markdown", + "id": "6e91f97b", + "metadata": {}, + "source": [ + "### Forced tool calling\n", + "\n", + "In this example we'll provide the model with two tools:\n", + "\n", + "- `calculator`\n", + "- `get_weather`\n", + "\n", + "Then, when we call `bindTools`, we'll force the model to use the `get_weather` tool by passing the `tool_choice` arg like this:\n", + "\n", + "```typescript\n", + ".bindTools({\n", + " tools,\n", + " tool_choice: {\n", + " type: \"tool\",\n", + " name: \"get_weather\",\n", + " }\n", + "});\n", + "```\n", + "\n", + "Finally, we'll invoke the model, but instead of asking about the weather, we'll ask it to do some math.\n", + "Since we explicitly forced the model to use the `get_weather` tool, it will ignore the input and return the weather information (in this case it returned ``, which is expected.)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "8d6e4828", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"msg_018G4mEZu8KNKtaQxZQ3o8YB\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"tool_use\",\n", + " \"id\": \"toolu_01DS9RwsFKdhHNYmhwPJHdHa\",\n", + " \"name\": \"get_weather\",\n", + " \"input\": {\n", + " \"city\": \"\",\n", + " \"state\": \"\"\n", + " }\n", + " }\n", + " ],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_018G4mEZu8KNKtaQxZQ3o8YB\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 672,\n", + " \"output_tokens\": 51\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_018G4mEZu8KNKtaQxZQ3o8YB\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 672,\n", + " \"output_tokens\": 51\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"get_weather\",\n", + " \"args\": {\n", + " \"city\": \"\",\n", + " \"state\": \"\"\n", + " },\n", + " \"id\": \"toolu_01DS9RwsFKdhHNYmhwPJHdHa\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 672,\n", + " \"output_tokens\": 51,\n", + " \"total_tokens\": 723\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { z } from \"zod\";\n", + "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", + "\n", + "const calculatorSchema6 = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const weatherSchema6 = z.object({\n", + " city: z.string().describe(\"The city to get the weather from\"),\n", + " state: z.string().optional().describe(\"The state to get the weather from\"),\n", + "});\n", + "\n", + "const tools6 = [\n", + " {\n", + " name: \"calculator\",\n", + " description: \"A simple calculator tool\",\n", + " input_schema: zodToJsonSchema(calculatorSchema6),\n", + " },\n", + " {\n", + " name: \"get_weather\",\n", + " description:\n", + " \"Get the weather of a specific location and return the temperature in Celsius.\",\n", + " input_schema: zodToJsonSchema(weatherSchema6),\n", + " },\n", + "];\n", + "\n", + "const llm6 = new ChatAnthropic({\n", + " apiKey: process.env.ANTHROPIC_API_KEY,\n", + " model: \"claude-3-haiku-20240307\",\n", + "}).bind({\n", + " tools: tools6,\n", + " tool_choice: {\n", + " type: \"tool\",\n", + " name: \"get_weather\",\n", + " },\n", + "});\n", + "\n", + "const prompt6 = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt and model together\n", + "const chain6 = prompt6.pipe(llm6);\n", + "\n", + "const response6 = await chain6.invoke({\n", + " input: \"What is the sum of 2725 and 273639\",\n", + "});\n", + "\n", + "console.log(response6);" + ] + }, + { + "cell_type": "markdown", + "id": "1aa777bc", + "metadata": {}, + "source": [ + "The `tool_choice` argument has three possible values:\n", + "\n", + "- `{ type: \"tool\", name: \"tool_name\" }` | `string` - Forces the model to use the specified tool. If passing a single string, it will be treated as the tool name.\n", + "- `\"any\"` - Allows the model to choose the tool, but still forcing it to choose at least one.\n", + "- `\"auto\"` - The default value. Allows the model to select any tool, or none." + ] + }, + { + "cell_type": "markdown", + "id": "15253085", + "metadata": {}, + "source": [ + "### `withStructuredOutput`" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "5e466d35", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ operation: 'add', number1: 2, number2: 2 }\n" + ] + } + ], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { z } from \"zod\";\n", + "\n", + "const calculatorSchema7 = z\n", + " .object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + " })\n", + " .describe(\"A simple calculator tool\");\n", + "\n", + "const llm7 = new ChatAnthropic({\n", + " apiKey: process.env.ANTHROPIC_API_KEY,\n", + " model: \"claude-3-haiku-20240307\",\n", + "});\n", + "\n", + "// Pass the schema and tool name to the withStructuredOutput method\n", + "const modelWithTool7 = llm7.withStructuredOutput(calculatorSchema7);\n", + "\n", + "const prompt7 = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt and model together\n", + "const chain7 = prompt7.pipe(modelWithTool7);\n", + "\n", + "const response7 = await chain7.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "console.log(response7);" + ] + }, + { + "cell_type": "markdown", + "id": "4973b265", + "metadata": {}, + "source": [ + "You can supply a \"name\" field to give the LLM additional context around what you are trying to generate. You can also pass `includeRaw` to get the raw message back from the model too." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "951c5352", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " raw: AIMessage {\n", + " \"id\": \"msg_01TrkHbEkioCYNHQhqxw5unu\",\n", + " \"content\": [\n", + " {\n", + " \"type\": \"tool_use\",\n", + " \"id\": \"toolu_01XMrGHXeSVTfSw1oKFZokzG\",\n", + " \"name\": \"calculator\",\n", + " \"input\": {\n", + " \"number1\": 2,\n", + " \"number2\": 2,\n", + " \"operation\": \"add\"\n", + " }\n", + " }\n", + " ],\n", + " \"additional_kwargs\": {\n", + " \"id\": \"msg_01TrkHbEkioCYNHQhqxw5unu\",\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 552,\n", + " \"output_tokens\": 69\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"id\": \"msg_01TrkHbEkioCYNHQhqxw5unu\",\n", + " \"model\": \"claude-3-haiku-20240307\",\n", + " \"stop_reason\": \"tool_use\",\n", + " \"stop_sequence\": null,\n", + " \"usage\": {\n", + " \"input_tokens\": 552,\n", + " \"output_tokens\": 69\n", + " },\n", + " \"type\": \"message\",\n", + " \"role\": \"assistant\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"calculator\",\n", + " \"args\": {\n", + " \"number1\": 2,\n", + " \"number2\": 2,\n", + " \"operation\": \"add\"\n", + " },\n", + " \"id\": \"toolu_01XMrGHXeSVTfSw1oKFZokzG\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 552,\n", + " \"output_tokens\": 69,\n", + " \"total_tokens\": 621\n", + " }\n", + " },\n", + " parsed: { operation: 'add', number1: 2, number2: 2 }\n", + "}\n" + ] + } + ], + "source": [ + "const includeRawModel7 = llm7.withStructuredOutput(calculatorSchema7, {\n", + " name: \"calculator\",\n", + " includeRaw: true,\n", + "});\n", + "const includeRawChain7 = prompt7.pipe(includeRawModel7);\n", + "\n", + "const includeRawResponse7 = await includeRawChain7.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "\n", + "console.log(includeRawResponse7);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatAnthropic features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/anthropic.mdx b/docs/core_docs/docs/integrations/chat/anthropic.mdx deleted file mode 100644 index 416a1ac16132..000000000000 --- a/docs/core_docs/docs/integrations/chat/anthropic.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -sidebar_label: Anthropic ---- - -# ChatAnthropic - -LangChain supports Anthropic's Claude family of chat models. - -You'll first need to install the [`@langchain/anthropic`](https://www.npmjs.com/package/@langchain/anthropic) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/anthropic -``` - -You'll also need to sign up and obtain an [Anthropic API key](https://www.anthropic.com/). -Set it as an environment variable named `ANTHROPIC_API_KEY`, or pass it into the constructor as shown below. - -## Usage - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -You can initialize an instance like this: - -import CodeBlock from "@theme/CodeBlock"; -import Anthropic from "@examples/models/chat/integration_anthropic.ts"; - -{Anthropic} - -## Multimodal inputs - -Claude-3 models support image multimodal inputs. The passed input must be a base64 encoded image with the -filetype as a prefix (e.g. `data:image/png;base64,{YOUR_BASE64_ENCODED_DATA}`). -Here's an example: - -import AnthropicMultimodal from "@examples/models/chat/integration_anthropic_multimodal.ts"; - -{AnthropicMultimodal} - -See [the official docs](https://docs.anthropic.com/claude/docs/vision#what-image-file-types-does-claude-support) -for a complete list of supported file types. - -## Agents - -Anthropic models that support tool calling can be used in the Tool Calling agent. Here's an example: - -import AnthropicToolCallingAgent from "@examples/models/chat/integration_anthropic_tool_calling_agent.ts"; - -{AnthropicToolCallingAgent} - -:::tip -See the LangSmith trace [here](https://smith.langchain.com/public/e93ff7f6-03f7-4eb1-96c8-09a17dee1462/r) -::: - -## Custom headers - -You can pass custom headers in your requests like this: - -import AnthropicCustomHeaders from "@examples/models/chat/integration_anthropic_custom_headers.ts"; - -{AnthropicCustomHeaders} - -## Tools - -The Anthropic API supports tool calling, along with multi-tool calling. The following examples demonstrate how to call tools: - -### Single Tool - -import AnthropicSingleTool from "@examples/models/chat/integration_anthropic_single_tool.ts"; - -{AnthropicSingleTool} - -:::tip -See the LangSmith trace [here](https://smith.langchain.com/public/90c03ed0-154b-4a50-afbf-83dcbf302647/r) -::: - -### Forced tool calling - -import AnthropicForcedTool from "@examples/models/chat/integration_anthropic_forced_tool.ts"; - -In this example we'll provide the model with two tools: - -- `calculator` -- `get_weather` - -Then, when we call `bindTools`, we'll force the model to use the `get_weather` tool by passing the `tool_choice` arg like this: - -```typescript -.bindTools({ - tools, - tool_choice: { - type: "tool", - name: "get_weather", - } -}); -``` - -Finally, we'll invoke the model, but instead of asking about the weather, we'll ask it to do some math. -Since we explicitly forced the model to use the `get_weather` tool, it will ignore the input and return the weather information (in this case it returned ``, which is expected.) - -{AnthropicForcedTool} - -The `bind_tools` argument has three possible values: - -- `{ type: "tool", name: "tool_name" }` - Forces the model to use the specified tool. -- `"any"` - Allows the model to choose the tool, but still forcing it to choose at least one. -- `"auto"` - The default value. Allows the model to select any tool, or none. - -:::tip -See the LangSmith trace [here](https://smith.langchain.com/public/c5cc8fe7-5e76-4607-8c43-1e0b30e4f5ca/r) -::: - -### `withStructuredOutput` - -import AnthropicWSA from "@examples/models/chat/integration_anthropic_wsa.ts"; - -{AnthropicWSA} - -:::tip -See the LangSmith trace [here](https://smith.langchain.com/public/efbd11c5-886e-4e07-be1a-951690fa8a27/r) -::: diff --git a/docs/core_docs/docs/integrations/chat/azure.ipynb b/docs/core_docs/docs/integrations/chat/azure.ipynb new file mode 100644 index 000000000000..dbae00d112e2 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/azure.ipynb @@ -0,0 +1,399 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Azure OpenAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# AzureChatOpenAI\n", + "\n", + "This will help you getting started with AzureChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/v0.2/docs/integrations/chat/azure) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [AzureChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond.\n", + "\n", + "LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node).\n", + "\n", + "You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview).\n", + "\n", + "### Credentials\n", + "\n", + "If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started.\n", + "\n", + "You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", + "\n", + "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance. Then, if using Node.js, you can set your credentials as environment variables:\n", + "\n", + "```bash\n", + "AZURE_OPENAI_API_INSTANCE_NAME=\n", + "AZURE_OPENAI_API_DEPLOYMENT_NAME=\n", + "AZURE_OPENAI_API_KEY=\n", + "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain AzureChatOpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureChatOpenAI } from \"@langchain/openai\" \n", + "\n", + "const llm = new AzureChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", + " azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9qrWKByvVrzWMxSn8joRZAklHoB32\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9qrWR7WiNjZ3leSG4Wd77cnKEVivv\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Using Azure Managed Identity\n", + "\n", + "If you're using Azure Managed Identity, you can configure the credentials like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d7f47b2a", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " DefaultAzureCredential,\n", + " getBearerTokenProvider,\n", + "} from \"@azure/identity\";\n", + "import { AzureChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const credentials = new DefaultAzureCredential();\n", + "const azureADTokenProvider = getBearerTokenProvider(\n", + " credentials,\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ");\n", + "\n", + "const llmWithManagedIdentity = new AzureChatOpenAI({\n", + " azureADTokenProvider,\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "6a889856", + "metadata": {}, + "source": [ + "## Using a different domain\n", + "\n", + "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", + "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ace7f876", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmWithDifferentDomain = new AzureChatOpenAI({\n", + " temperature: 0.9,\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " azureOpenAIBasePath:\n", + " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "0ac0310c", + "metadata": {}, + "source": [ + "## Migration from Azure OpenAI SDK\n", + "\n", + "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", + "\n", + "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```\n", + "\n", + "```bash\n", + "npm uninstall @langchain/azure-openai\n", + "```\n", + "\n", + " \n", + "2. Update your imports to use the new `AzureChatOpenAI` class from the `@langchain/openai` package:\n", + " ```typescript\n", + " import { AzureChatOpenAI } from \"@langchain/openai\";\n", + " ```\n", + "3. Update your code to use the new `AzureChatOpenAI` class and pass the required parameters:\n", + "\n", + " ```typescript\n", + " const model = new AzureChatOpenAI({\n", + " azureOpenAIApiKey: \"\",\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + " });\n", + " ```\n", + "\n", + " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", + "\n", + " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", + "\n", + " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/azure.mdx b/docs/core_docs/docs/integrations/chat/azure.mdx deleted file mode 100644 index 912bd1e72fdd..000000000000 --- a/docs/core_docs/docs/integrations/chat/azure.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -sidebar_label: Azure OpenAI -keywords: [AzureChatOpenAI] ---- - -import CodeBlock from "@theme/CodeBlock"; - -# Azure OpenAI - -[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond. - -LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node). - -You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview). If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started. - -:::info - -Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seemless transition between the OpenAI API and Azure OpenAI. - -If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API. - -::: - -## Setup - -You'll first need to install the [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install -S @langchain/openai -``` - -You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal). - -Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the "Keys and Endpoint" section of your instance. - -If you're using Node.js, you can define the following environment variables to use the service: - -```bash -AZURE_OPENAI_API_INSTANCE_NAME= -AZURE_OPENAI_API_DEPLOYMENT_NAME= -AZURE_OPENAI_API_KEY= -AZURE_OPENAI_API_VERSION="2024-02-01" -``` - -Alternatively, you can pass the values directly to the `AzureOpenAI` constructor: - -import AzureOpenAI from "@examples/models/chat/integration_azure_openai.ts"; - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -{AzureOpenAI} - -:::info - -You can find the list of supported API versions in the [Azure OpenAI documentation](https://learn.microsoft.com/azure/ai-services/openai/reference). - -::: - -### Using Azure Managed Identity - -If you're using Azure Managed Identity, you can configure the credentials like this: - -import AzureOpenAIManagedIdentity from "@examples/models/chat/integration_azure_openai_managed_identity.ts"; - -{AzureOpenAIManagedIdentity} - -### Using a different domain - -If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable. -For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`: - -import AzureOpenAIBasePath from "@examples/models/chat/integration_azure_openai_base_path.ts"; - -{AzureOpenAIBasePath} - -## Usage example - -import Example from "@examples/models/chat/integration_azure_chat_openai.ts"; - -{Example} - -## Migration from Azure OpenAI SDK - -If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps: - -1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package: - ```bash npm2yarn - npm install @langchain/openai - npm uninstall @langchain/azure-openai - ``` -2. Update your imports to use the new `AzureChatOpenAI` class from the `@langchain/openai` package: - ```typescript - import { AzureChatOpenAI } from "@langchain/openai"; - ``` -3. Update your code to use the new `AzureChatOpenAI` class and pass the required parameters: - - ```typescript - const model = new AzureChatOpenAI({ - azureOpenAIApiKey: "", - azureOpenAIApiInstanceName: "", - azureOpenAIApiDeploymentName: "", - azureOpenAIApiVersion: "", - }); - ``` - - Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version. - - - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details. - - - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version. diff --git a/docs/core_docs/docs/integrations/chat/cohere.ipynb b/docs/core_docs/docs/integrations/chat/cohere.ipynb new file mode 100644 index 000000000000..5d8eee36f4dd --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/cohere.ipynb @@ -0,0 +1,1830 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Cohere\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatCohere\n", + "\n", + "This will help you getting started with ChatCohere [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatCohere features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/cohere) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatCohere](https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html) | [@langchain/cohere](https://api.js.langchain.com/modules/langchain_cohere.html) | ❌ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "In order to use the LangChain.js Cohere integration you'll need an API key.\n", + "You can sign up for a Cohere account and create an API key [here](https://dashboard.cohere.com/welcome/register).\n", + "\n", + "You'll first need to install the [`@langchain/cohere`](https://www.npmjs.com/package/@langchain/cohere) package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [Cohere's website](https://dashboard.cohere.com/welcome/register) to sign up to Cohere and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export COHERE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatCohere integration lives in the `@langchain/cohere` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cohere\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\" \n", + "\n", + "const llm = new ChatCohere({\n", + " model: \"command-r-plus\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"J'adore programmer.\",\n", + " \"additional_kwargs\": {\n", + " \"response_id\": \"0056057a-6075-4436-b75a-b9455ac39f74\",\n", + " \"generationId\": \"3a0985db-92ff-41d8-b6b9-b7b77e300f3b\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to French. Translate the user sentence.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"J'adore programmer.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 20,\n", + " \"outputTokens\": 5\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 89,\n", + " \"outputTokens\": 5\n", + " }\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"estimatedTokenUsage\": {\n", + " \"completionTokens\": 5,\n", + " \"promptTokens\": 89,\n", + " \"totalTokens\": 94\n", + " },\n", + " \"response_id\": \"0056057a-6075-4436-b75a-b9455ac39f74\",\n", + " \"generationId\": \"3a0985db-92ff-41d8-b6b9-b7b77e300f3b\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to French. Translate the user sentence.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"J'adore programmer.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 20,\n", + " \"outputTokens\": 5\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 89,\n", + " \"outputTokens\": 5\n", + " }\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 89,\n", + " \"output_tokens\": 5,\n", + " \"total_tokens\": 94\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore programmer.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe Programmieren.\",\n", + " \"additional_kwargs\": {\n", + " \"response_id\": \"271e1439-7220-40fa-953d-c9f2947e451a\",\n", + " \"generationId\": \"f99970a4-7b1c-4d76-a73a-4467a1db759c\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to German.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"Ich liebe Programmieren.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 15,\n", + " \"outputTokens\": 6\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 84,\n", + " \"outputTokens\": 6\n", + " }\n", + " }\n", + " },\n", + " \"response_metadata\": {\n", + " \"estimatedTokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 84,\n", + " \"totalTokens\": 90\n", + " },\n", + " \"response_id\": \"271e1439-7220-40fa-953d-c9f2947e451a\",\n", + " \"generationId\": \"f99970a4-7b1c-4d76-a73a-4467a1db759c\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"SYSTEM\",\n", + " \"message\": \"You are a helpful assistant that translates English to German.\"\n", + " },\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"I love programming.\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"Ich liebe Programmieren.\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 15,\n", + " \"outputTokens\": 6\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 84,\n", + " \"outputTokens\": 6\n", + " }\n", + " }\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 84,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 90\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Streaming\n", + "\n", + "Cohere's API also supports streaming token responses. The example below demonstrates how to use this feature." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "4e1fe6b2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "stream tokens: The sky appears blue to human observers because blue light from the sun is scattered in all directions by the gases and particles in the Earth's atmosphere. This process is called Rayleigh scattering.\n", + "stream iters: 38\n" + ] + } + ], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "const streamingLLM = new ChatCohere({\n", + " apiKey: process.env.COHERE_API_KEY, // Default\n", + "});\n", + "const promptForStreaming = ChatPromptTemplate.fromMessages([\n", + " [\"ai\", \"You are a helpful assistant\"],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "const outputParserForStreaming = new StringOutputParser();\n", + "const chainForStreaming = promptForStreaming.pipe(streamingLLM).pipe(outputParserForStreaming);\n", + "const streamRes = await chainForStreaming.stream({\n", + " input: \"Why is the sky blue? Be concise with your answer.\",\n", + "});\n", + "let streamTokens = \"\";\n", + "let streamIters = 0;\n", + "for await (const streamChunk of streamRes) {\n", + " streamTokens += streamChunk;\n", + " streamIters += 1;\n", + "}\n", + "console.log(\"stream tokens:\", streamTokens);\n", + "console.log(\"stream iters:\", streamIters);" + ] + }, + { + "cell_type": "markdown", + "id": "1f756341", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "The Cohere API supports tool calling, along with multi-hop-tool calling. The following example demonstrates how to call tools:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b285e783", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"I will use the magic_function tool to answer this question.\",\n", + " \"additional_kwargs\": {\n", + " \"response_id\": \"be5be048-94b5-4fb6-a5fb-5ed321b87120\",\n", + " \"generationId\": \"ea2a1e0b-9eea-4c5b-a77b-5e34226ffe80\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"What is the magic function of number 5?\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"I will use the magic_function tool to answer this question.\",\n", + " \"toolCalls\": \"[Array]\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 30,\n", + " \"outputTokens\": 21\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 904,\n", + " \"outputTokens\": 54\n", + " }\n", + " },\n", + " \"toolCalls\": [\n", + " {\n", + " \"id\": \"eafa35bd-7e2c-4601-a754-15219be4\",\n", + " \"function\": \"[Object]\",\n", + " \"type\": \"function\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"estimatedTokenUsage\": {\n", + " \"completionTokens\": 54,\n", + " \"promptTokens\": 904,\n", + " \"totalTokens\": 958\n", + " },\n", + " \"response_id\": \"be5be048-94b5-4fb6-a5fb-5ed321b87120\",\n", + " \"generationId\": \"ea2a1e0b-9eea-4c5b-a77b-5e34226ffe80\",\n", + " \"chatHistory\": [\n", + " {\n", + " \"role\": \"USER\",\n", + " \"message\": \"What is the magic function of number 5?\"\n", + " },\n", + " {\n", + " \"role\": \"CHATBOT\",\n", + " \"message\": \"I will use the magic_function tool to answer this question.\",\n", + " \"toolCalls\": \"[Array]\"\n", + " }\n", + " ],\n", + " \"finishReason\": \"COMPLETE\",\n", + " \"meta\": {\n", + " \"apiVersion\": {\n", + " \"version\": \"1\"\n", + " },\n", + " \"billedUnits\": {\n", + " \"inputTokens\": 30,\n", + " \"outputTokens\": 21\n", + " },\n", + " \"tokens\": {\n", + " \"inputTokens\": 904,\n", + " \"outputTokens\": 54\n", + " }\n", + " },\n", + " \"toolCalls\": [\n", + " {\n", + " \"id\": \"eafa35bd-7e2c-4601-a754-15219be4\",\n", + " \"function\": \"[Object]\",\n", + " \"type\": \"function\"\n", + " }\n", + " ]\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"magic_function\",\n", + " \"args\": {\n", + " \"num\": 5\n", + " },\n", + " \"id\": \"eafa35bd-7e2c-4601-a754-15219be4\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 904,\n", + " \"output_tokens\": 54,\n", + " \"total_tokens\": 958\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const llmForTools = new ChatCohere({\n", + " apiKey: process.env.COHERE_API_KEY, // Default\n", + "});\n", + "\n", + "const magicFunctionTool = tool(\n", + " async ({ num }) => {\n", + " return `The magic function of ${num} is ${num + 5}`;\n", + " },\n", + " {\n", + " name: \"magic_function\",\n", + " description: \"Apply a magic function to the input number\",\n", + " schema: z.object({\n", + " num: z.number().describe(\"The number to apply the magic function for\"),\n", + " }),\n", + " }\n", + ");\n", + "\n", + "const llmWithTools = llmForTools.bindTools([magicFunctionTool]);\n", + "const responseWithTools = await llmWithTools.invoke([new HumanMessage(\"What is the magic function of number 5?\")]);\n", + "\n", + "console.log(responseWithTools);" + ] + }, + { + "cell_type": "markdown", + "id": "4fecf4e4", + "metadata": {}, + "source": [ + "## RAG\n", + "\n", + "Cohere also comes out of the box with RAG support.\n", + "You can pass in documents as context to the API request and Cohere's models will use them when generating responses." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "74d6320e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Harrison worked at Kensho as an engineer for 3 years.\n" + ] + } + ], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const llmForRag = new ChatCohere({\n", + " apiKey: process.env.COHERE_API_KEY, // Default\n", + "});\n", + "\n", + "const documents = [\n", + " {\n", + " title: \"Harrison's work\",\n", + " snippet: \"Harrison worked at Kensho as an engineer.\",\n", + " },\n", + " {\n", + " title: \"Harrison's work duration\",\n", + " snippet: \"Harrison worked at Kensho for 3 years.\",\n", + " },\n", + " {\n", + " title: \"Polar berars in the Appalachian Mountains\",\n", + " snippet:\n", + " \"Polar bears have surprisingly adapted to the Appalachian Mountains, thriving in the diverse, forested terrain despite their traditional arctic habitat. This unique situation has sparked significant interest and study in climate adaptability and wildlife behavior.\",\n", + " },\n", + "];\n", + "\n", + "const ragResponse = await llmForRag.invoke(\n", + " [new HumanMessage(\"Where did Harrison work and for how long?\")],\n", + " {\n", + " documents,\n", + " }\n", + ");\n", + "console.log(ragResponse.content);" + ] + }, + { + "cell_type": "markdown", + "id": "aa13bae8", + "metadata": {}, + "source": [ + "## Connectors\n", + "\n", + "The API also allows for other connections which are not static documents.\n", + "An example of this is their `web-search` connector which allows you to pass in a query and the API will search the web for relevant documents.\n", + "The example below demonstrates how to use this feature." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "478f7c9e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.',\n", + " additional_kwargs: {\n", + " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", + " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", + " chatHistory: [\n", + " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", + " {\n", + " role: 'CHATBOT',\n", + " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", + " }\n", + " ],\n", + " finishReason: 'COMPLETE',\n", + " meta: {\n", + " apiVersion: { version: '1' },\n", + " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", + " tokens: { inputTokens: 11198, outputTokens: 286 }\n", + " },\n", + " citations: [\n", + " {\n", + " start: 43,\n", + " end: 54,\n", + " text: 'prehistoric',\n", + " documentIds: [ 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 55,\n", + " end: 79,\n", + " text: 'Palaeeudyptes klekowskii',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 84,\n", + " end: 102,\n", + " text: '\"colossus penguin\"',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 119,\n", + " end: 125,\n", + " text: '6 feet',\n", + " documentIds: [ 'web-search_0', 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 126,\n", + " end: 134,\n", + " text: '6 inches',\n", + " documentIds: [ 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 161,\n", + " end: 172,\n", + " text: 'alive today',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " },\n", + " {\n", + " start: 180,\n", + " end: 195,\n", + " text: 'emperor penguin',\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ]\n", + " },\n", + " {\n", + " start: 213,\n", + " end: 235,\n", + " text: 'just over 4 feet tall.',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " }\n", + " ],\n", + " documents: [\n", + " {\n", + " id: 'web-search_1',\n", + " snippet: 'Largest species of penguin ever\\n' +\n", + " '\\n' +\n", + " 'TencentContact an Account Manager\\n' +\n", + " '\\n' +\n", + " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", + " '\\n' +\n", + " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", + " '\\n' +\n", + " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", + " '\\n' +\n", + " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", + " '\\n' +\n", + " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", + " '\\n' +\n", + " 'Comments below may relate to previous holders of this record.',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Largest species of penguin ever',\n", + " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", + " },\n", + " {\n", + " id: 'web-search_2',\n", + " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", + " '\\n' +\n", + " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", + " '\\n' +\n", + " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", + " '\\n' +\n", + " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", + " '\\n' +\n", + " 'Sign up to our Wild Wild Life newsletter\\n' +\n", + " '\\n' +\n", + " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", + " '\\n' +\n", + " 'Sign up to newsletter\\n' +\n", + " '\\n' +\n", + " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Petradyptes stonehousei\\n' +\n", + " '\\n' +\n", + " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", + " '\\n' +\n", + " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", + " '\\n' +\n", + " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", + " '\\n' +\n", + " 'Nature Picture Library / Alamy\\n' +\n", + " '\\n' +\n", + " 'Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", + " '\\n' +\n", + " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", + " '\\n' +\n", + " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", + " '\\n' +\n", + " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", + " '\\n' +\n", + " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", + " '\\n' +\n", + " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", + " '\\n' +\n", + " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", + " '\\n' +\n", + " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Sign up to our weekly newsletter\\n' +\n", + " '\\n' +\n", + " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", + " '\\n' +\n", + " 'More from New Scientist\\n' +\n", + " '\\n' +\n", + " 'Explore the latest news, articles and features\\n' +\n", + " '\\n' +\n", + " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", + " '\\n' +\n", + " 'How you can help with penguin research by browsing images at home\\n' +\n", + " '\\n' +\n", + " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", + " '\\n' +\n", + " 'Penguins adapt their accents to sound more like their friends\\n' +\n", + " '\\n' +\n", + " 'Trending New Scientist articles\\n' +\n", + " '\\n' +\n", + " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", + " '\\n' +\n", + " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", + " '\\n' +\n", + " 'When is the best time to exercise to get the most from your workout?\\n' +\n", + " '\\n' +\n", + " 'Why slow running could be even more beneficial than running fast\\n' +\n", + " '\\n' +\n", + " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", + " '\\n' +\n", + " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", + " '\\n' +\n", + " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", + " '\\n' +\n", + " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", + " '\\n' +\n", + " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", + " '\\n' +\n", + " 'Why midlife is the perfect time to take control of your future health',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", + " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", + " },\n", + " {\n", + " id: 'web-search_0',\n", + " snippet: 'Sustainability for All.\\n' +\n", + " '\\n' +\n", + " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", + " '\\n' +\n", + " 'University of Houston\\n' +\n", + " '\\n' +\n", + " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", + " '\\n' +\n", + " 'Learn about our editorial process\\n' +\n", + " '\\n' +\n", + " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", + " '\\n' +\n", + " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", + " '\\n' +\n", + " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", + " '\\n' +\n", + " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", + " '\\n' +\n", + " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", + " '\\n' +\n", + " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", + " '\\n' +\n", + " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", + " '\\n' +\n", + " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", + " '\\n' +\n", + " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", + " '\\n' +\n", + " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", + " '\\n' +\n", + " '10 of the Largest Living Sea Creatures\\n' +\n", + " '\\n' +\n", + " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", + " '\\n' +\n", + " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", + " '\\n' +\n", + " 'National Monuments Designated By President Obama\\n' +\n", + " '\\n' +\n", + " '20 Pygmy Animal Species From Around the World\\n' +\n", + " '\\n' +\n", + " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", + " '\\n' +\n", + " '16 of the Most Surreal Landscapes on Earth\\n' +\n", + " '\\n' +\n", + " '12 Peculiar Penguin Facts\\n' +\n", + " '\\n' +\n", + " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", + " '\\n' +\n", + " '8 Titanic Facts About Patagotitans\\n' +\n", + " '\\n' +\n", + " '9 Extinct Megafauna That Are Out of This World\\n' +\n", + " '\\n' +\n", + " '10 Places Where Penguins Live in the Wild\\n' +\n", + " '\\n' +\n", + " '16 Animals That Are Living Fossils\\n' +\n", + " '\\n' +\n", + " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", + " '\\n' +\n", + " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", + " '\\n' +\n", + " '12 Dinosaur Theme Parks\\n' +\n", + " '\\n' +\n", + " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", + " '\\n' +\n", + " 'Cookies Settings Accept All Cookies',\n", + " timestamp: '2024-07-27T06:29:15',\n", + " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", + " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", + " },\n", + " {\n", + " id: 'web-search_5',\n", + " snippet: 'Skip to main content\\n' +\n", + " '\\n' +\n", + " 'Smithsonian Institution\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean\\n' +\n", + " '\\n' +\n", + " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", + " '\\n' +\n", + " 'How Big Do Penguins Get?\\n' +\n", + " '\\n' +\n", + " '(Smithsonian Institution)\\n' +\n", + " '\\n' +\n", + " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", + " '\\n' +\n", + " 'Coasts & Shallow Water\\n' +\n", + " '\\n' +\n", + " 'Census of Marine Life\\n' +\n", + " '\\n' +\n", + " 'Waves, Storms & Tsunamis\\n' +\n", + " '\\n' +\n", + " 'Temperature & Chemistry\\n' +\n", + " '\\n' +\n", + " 'Solutions & Success Stories\\n' +\n", + " '\\n' +\n", + " 'Books, Film & The Arts\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean',\n", + " timestamp: '2024-07-30T03:47:03',\n", + " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", + " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", + " },\n", + " {\n", + " id: 'web-search_4',\n", + " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", + " '\\n' +\n", + " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", + " '\\n' +\n", + " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", + " '\\n' +\n", + " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", + " '\\n' +\n", + " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", + " '\\n' +\n", + " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", + " '\\n' +\n", + " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", + " '\\n' +\n", + " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", + " '\\n' +\n", + " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", + " '\\n' +\n", + " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", + " '\\n' +\n", + " 'Adaptations to pressure and low oxygen\\n' +\n", + " '\\n' +\n", + " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", + " '\\n' +\n", + " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", + " '\\n' +\n", + " 'Distribution and habitat\\n' +\n", + " '\\n' +\n", + " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", + " timestamp: '2024-07-31T07:59:36',\n", + " title: 'Emperor penguin - Wikipedia',\n", + " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", + " }\n", + " ],\n", + " searchResults: [\n", + " {\n", + " searchQuery: {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " },\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_3',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ],\n", + " connector: { id: 'web-search' }\n", + " }\n", + " ],\n", + " searchQueries: [\n", + " {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " }\n", + " ]\n", + " },\n", + " tool_calls: [],\n", + " usage_metadata: { input_tokens: 11198, output_tokens: 286, total_tokens: 11484 },\n", + " invalid_tool_calls: [],\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.',\n", + " name: undefined,\n", + " additional_kwargs: {\n", + " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", + " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", + " chatHistory: [\n", + " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", + " {\n", + " role: 'CHATBOT',\n", + " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", + " }\n", + " ],\n", + " finishReason: 'COMPLETE',\n", + " meta: {\n", + " apiVersion: { version: '1' },\n", + " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", + " tokens: { inputTokens: 11198, outputTokens: 286 }\n", + " },\n", + " citations: [\n", + " {\n", + " start: 43,\n", + " end: 54,\n", + " text: 'prehistoric',\n", + " documentIds: [ 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 55,\n", + " end: 79,\n", + " text: 'Palaeeudyptes klekowskii',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 84,\n", + " end: 102,\n", + " text: '\"colossus penguin\"',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 119,\n", + " end: 125,\n", + " text: '6 feet',\n", + " documentIds: [ 'web-search_0', 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 126,\n", + " end: 134,\n", + " text: '6 inches',\n", + " documentIds: [ 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 161,\n", + " end: 172,\n", + " text: 'alive today',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " },\n", + " {\n", + " start: 180,\n", + " end: 195,\n", + " text: 'emperor penguin',\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ]\n", + " },\n", + " {\n", + " start: 213,\n", + " end: 235,\n", + " text: 'just over 4 feet tall.',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " }\n", + " ],\n", + " documents: [\n", + " {\n", + " id: 'web-search_1',\n", + " snippet: 'Largest species of penguin ever\\n' +\n", + " '\\n' +\n", + " 'TencentContact an Account Manager\\n' +\n", + " '\\n' +\n", + " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", + " '\\n' +\n", + " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", + " '\\n' +\n", + " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", + " '\\n' +\n", + " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", + " '\\n' +\n", + " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", + " '\\n' +\n", + " 'Comments below may relate to previous holders of this record.',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Largest species of penguin ever',\n", + " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", + " },\n", + " {\n", + " id: 'web-search_2',\n", + " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", + " '\\n' +\n", + " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", + " '\\n' +\n", + " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", + " '\\n' +\n", + " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", + " '\\n' +\n", + " 'Sign up to our Wild Wild Life newsletter\\n' +\n", + " '\\n' +\n", + " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", + " '\\n' +\n", + " 'Sign up to newsletter\\n' +\n", + " '\\n' +\n", + " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Petradyptes stonehousei\\n' +\n", + " '\\n' +\n", + " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", + " '\\n' +\n", + " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", + " '\\n' +\n", + " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", + " '\\n' +\n", + " 'Nature Picture Library / Alamy\\n' +\n", + " '\\n' +\n", + " 'Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", + " '\\n' +\n", + " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", + " '\\n' +\n", + " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", + " '\\n' +\n", + " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", + " '\\n' +\n", + " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", + " '\\n' +\n", + " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", + " '\\n' +\n", + " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", + " '\\n' +\n", + " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Sign up to our weekly newsletter\\n' +\n", + " '\\n' +\n", + " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", + " '\\n' +\n", + " 'More from New Scientist\\n' +\n", + " '\\n' +\n", + " 'Explore the latest news, articles and features\\n' +\n", + " '\\n' +\n", + " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", + " '\\n' +\n", + " 'How you can help with penguin research by browsing images at home\\n' +\n", + " '\\n' +\n", + " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", + " '\\n' +\n", + " 'Penguins adapt their accents to sound more like their friends\\n' +\n", + " '\\n' +\n", + " 'Trending New Scientist articles\\n' +\n", + " '\\n' +\n", + " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", + " '\\n' +\n", + " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", + " '\\n' +\n", + " 'When is the best time to exercise to get the most from your workout?\\n' +\n", + " '\\n' +\n", + " 'Why slow running could be even more beneficial than running fast\\n' +\n", + " '\\n' +\n", + " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", + " '\\n' +\n", + " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", + " '\\n' +\n", + " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", + " '\\n' +\n", + " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", + " '\\n' +\n", + " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", + " '\\n' +\n", + " 'Why midlife is the perfect time to take control of your future health',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", + " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", + " },\n", + " {\n", + " id: 'web-search_0',\n", + " snippet: 'Sustainability for All.\\n' +\n", + " '\\n' +\n", + " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", + " '\\n' +\n", + " 'University of Houston\\n' +\n", + " '\\n' +\n", + " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", + " '\\n' +\n", + " 'Learn about our editorial process\\n' +\n", + " '\\n' +\n", + " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", + " '\\n' +\n", + " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", + " '\\n' +\n", + " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", + " '\\n' +\n", + " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", + " '\\n' +\n", + " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", + " '\\n' +\n", + " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", + " '\\n' +\n", + " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", + " '\\n' +\n", + " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", + " '\\n' +\n", + " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", + " '\\n' +\n", + " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", + " '\\n' +\n", + " '10 of the Largest Living Sea Creatures\\n' +\n", + " '\\n' +\n", + " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", + " '\\n' +\n", + " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", + " '\\n' +\n", + " 'National Monuments Designated By President Obama\\n' +\n", + " '\\n' +\n", + " '20 Pygmy Animal Species From Around the World\\n' +\n", + " '\\n' +\n", + " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", + " '\\n' +\n", + " '16 of the Most Surreal Landscapes on Earth\\n' +\n", + " '\\n' +\n", + " '12 Peculiar Penguin Facts\\n' +\n", + " '\\n' +\n", + " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", + " '\\n' +\n", + " '8 Titanic Facts About Patagotitans\\n' +\n", + " '\\n' +\n", + " '9 Extinct Megafauna That Are Out of This World\\n' +\n", + " '\\n' +\n", + " '10 Places Where Penguins Live in the Wild\\n' +\n", + " '\\n' +\n", + " '16 Animals That Are Living Fossils\\n' +\n", + " '\\n' +\n", + " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", + " '\\n' +\n", + " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", + " '\\n' +\n", + " '12 Dinosaur Theme Parks\\n' +\n", + " '\\n' +\n", + " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", + " '\\n' +\n", + " 'Cookies Settings Accept All Cookies',\n", + " timestamp: '2024-07-27T06:29:15',\n", + " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", + " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", + " },\n", + " {\n", + " id: 'web-search_5',\n", + " snippet: 'Skip to main content\\n' +\n", + " '\\n' +\n", + " 'Smithsonian Institution\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean\\n' +\n", + " '\\n' +\n", + " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", + " '\\n' +\n", + " 'How Big Do Penguins Get?\\n' +\n", + " '\\n' +\n", + " '(Smithsonian Institution)\\n' +\n", + " '\\n' +\n", + " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", + " '\\n' +\n", + " 'Coasts & Shallow Water\\n' +\n", + " '\\n' +\n", + " 'Census of Marine Life\\n' +\n", + " '\\n' +\n", + " 'Waves, Storms & Tsunamis\\n' +\n", + " '\\n' +\n", + " 'Temperature & Chemistry\\n' +\n", + " '\\n' +\n", + " 'Solutions & Success Stories\\n' +\n", + " '\\n' +\n", + " 'Books, Film & The Arts\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean',\n", + " timestamp: '2024-07-30T03:47:03',\n", + " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", + " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", + " },\n", + " {\n", + " id: 'web-search_4',\n", + " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", + " '\\n' +\n", + " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", + " '\\n' +\n", + " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", + " '\\n' +\n", + " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", + " '\\n' +\n", + " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", + " '\\n' +\n", + " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", + " '\\n' +\n", + " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", + " '\\n' +\n", + " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", + " '\\n' +\n", + " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", + " '\\n' +\n", + " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", + " '\\n' +\n", + " 'Adaptations to pressure and low oxygen\\n' +\n", + " '\\n' +\n", + " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", + " '\\n' +\n", + " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", + " '\\n' +\n", + " 'Distribution and habitat\\n' +\n", + " '\\n' +\n", + " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", + " timestamp: '2024-07-31T07:59:36',\n", + " title: 'Emperor penguin - Wikipedia',\n", + " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", + " }\n", + " ],\n", + " searchResults: [\n", + " {\n", + " searchQuery: {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " },\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_3',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ],\n", + " connector: { id: 'web-search' }\n", + " }\n", + " ],\n", + " searchQueries: [\n", + " {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " }\n", + " ]\n", + " },\n", + " response_metadata: {\n", + " estimatedTokenUsage: { completionTokens: 286, promptTokens: 11198, totalTokens: 11484 },\n", + " response_id: '8d5ae032-4c8e-492e-8686-289f198b5eb5',\n", + " generationId: '2224736b-430c-46cf-9ca0-a7f5737466aa',\n", + " chatHistory: [\n", + " { role: 'USER', message: 'How tall are the largest pengiuns?' },\n", + " {\n", + " role: 'CHATBOT',\n", + " message: 'The largest penguin ever discovered is the prehistoric Palaeeudyptes klekowskii, or \"colossus penguin\", which stood at 6 feet 6 inches tall. The tallest penguin alive today is the emperor penguin, which stands at just over 4 feet tall.'\n", + " }\n", + " ],\n", + " finishReason: 'COMPLETE',\n", + " meta: {\n", + " apiVersion: { version: '1' },\n", + " billedUnits: { inputTokens: 10474, outputTokens: 62 },\n", + " tokens: { inputTokens: 11198, outputTokens: 286 }\n", + " },\n", + " citations: [\n", + " {\n", + " start: 43,\n", + " end: 54,\n", + " text: 'prehistoric',\n", + " documentIds: [ 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 55,\n", + " end: 79,\n", + " text: 'Palaeeudyptes klekowskii',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 84,\n", + " end: 102,\n", + " text: '\"colossus penguin\"',\n", + " documentIds: [ 'web-search_0', 'web-search_1', 'web-search_2' ]\n", + " },\n", + " {\n", + " start: 119,\n", + " end: 125,\n", + " text: '6 feet',\n", + " documentIds: [ 'web-search_0', 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 126,\n", + " end: 134,\n", + " text: '6 inches',\n", + " documentIds: [ 'web-search_1' ]\n", + " },\n", + " {\n", + " start: 161,\n", + " end: 172,\n", + " text: 'alive today',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " },\n", + " {\n", + " start: 180,\n", + " end: 195,\n", + " text: 'emperor penguin',\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ]\n", + " },\n", + " {\n", + " start: 213,\n", + " end: 235,\n", + " text: 'just over 4 feet tall.',\n", + " documentIds: [ 'web-search_0', 'web-search_5' ]\n", + " }\n", + " ],\n", + " documents: [\n", + " {\n", + " id: 'web-search_1',\n", + " snippet: 'Largest species of penguin ever\\n' +\n", + " '\\n' +\n", + " 'TencentContact an Account Manager\\n' +\n", + " '\\n' +\n", + " \"The largest species of penguin ever recorded is a newly described prehistoric species, Kumimanu fordycei, known from fossil remains discovered inside boulders in North Otago, on New Zealand's South Island. By comparing the size and density of its bones with those of modern-day penguins, researchers estimate that it weighed 154 kilograms (340 pounds), which is three times that of today's largest species, the emperor penguin (Aptenodytes forsteri). The rocks containing the remains of this new giant fossil species date between 55.5 million years and 59.5 million years old, meaning that it existed during the Late Palaeocene. Details of the record-breaking prehistoric penguin were published in the Journal of Paleontology on 8 February 2023.\\n\" +\n", + " '\\n' +\n", + " 'The height of K. fordycei is debated, though a related extinct species, K. biceae, has been estimated to have stood up to 1.77 m (5 ft). A lack of complete skeletons of extinct giant penguins found to date makes it difficult for height to be determined with any degree of certainty.\\n' +\n", + " '\\n' +\n", + " \"Prior to the recent discovery and description of K. fordycei, the largest species of penguin known to science was the colossus penguin (Palaeeudyptes klekowskii), which is estimated to have weighed as much as 115 kg (253 lb 8 oz), and stood up to 2 m (6 ft 6 in) tall. It lived in Antarctica's Seymour Island approximately 37 million years ago, during the Late Eocene, and is represented by the most complete fossil remains ever found for a penguin species in Antarctica.\\n\" +\n", + " '\\n' +\n", + " \"This species exceeds in height the previous record holder, Nordenskjoeld's giant penguin (Anthropornis nordenskjoeldi), which stood 1.7 m (5 ft 6 in) tall and also existed during the Eocene epoch, occurring in New Zealand and in Antarctica's Seymour Island.\\n\" +\n", + " '\\n' +\n", + " 'Records change on a daily basis and are not immediately published online. For a full list of record titles, please use our Record Application Search. (You will need to register / login for access)\\n' +\n", + " '\\n' +\n", + " 'Comments below may relate to previous holders of this record.',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Largest species of penguin ever',\n", + " url: 'https://www.guinnessworldrecords.com/world-records/84903-largest-species-of-penguin'\n", + " },\n", + " {\n", + " id: 'web-search_2',\n", + " snippet: 'Mega penguins: These are the largest penguins to have ever lived\\n' +\n", + " '\\n' +\n", + " 'No penguin alive today can compare with some of the extinct giants that once roamed the planet, including Kumimanu fordycei, Petradyptes stonehousei and Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'An illustration of Kumimanu fordycei (the larger, single bird) and Petradyptes stonehousei penguins on an ancient New Zealand beach\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Penguins come in all shapes and sizes, from the fairy penguin (Eudyptula minor) which stands at just over 30 centimetres tall to the 1-metre-high emperor penguin (Aptenodytes forsteri). But even the biggest emperors alive today would be dwarfed by the mega-penguins that roamed Earth millions of years ago. Here are the most impressive of these ancient giants.\\n' +\n", + " '\\n' +\n", + " 'The title of the largest penguin ever documented goes to the species Kumimanu fordycei, which was first described in February 2023.\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka at the Bruce Museum in Connecticut and his colleagues unearthed an unusually huge flipper bone of a penguin in southern New Zealand in 2018. “The big humerus was shocking to me,” he says. “I almost thought it was maybe some other animal.”\\n' +\n", + " '\\n' +\n", + " 'The team quickly determined that this belonged to a new species of penguin that lived in what is now New Zealand over 55 million years ago. The sheer size of the bone suggested that the bird probably weighed between 148 and 160 kilograms and stood around 1.6 metres tall. “The emperor penguin just looks like a child next to it,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'The species was named after palaeontologist Ewan Fordyce, who made his own mega penguin discoveries in the 1970s (see below).\\n' +\n", + " '\\n' +\n", + " 'Sign up to our Wild Wild Life newsletter\\n' +\n", + " '\\n' +\n", + " 'A monthly celebration of the biodiversity of our planet’s animals, plants and other organisms.\\n' +\n", + " '\\n' +\n", + " 'Sign up to newsletter\\n' +\n", + " '\\n' +\n", + " 'Skeletons of Kumimanu, Petradyptes and a modern emperor penguin\\n' +\n", + " '\\n' +\n", + " 'Artwork by Dr. Simone Giovanardi\\n' +\n", + " '\\n' +\n", + " 'Petradyptes stonehousei\\n' +\n", + " '\\n' +\n", + " 'Ksepka and his colleagues discovered another giant penguin alongside K. fordycei, called Petradyptes stonehousei. With an estimated mass of 50 kilograms, it was quite a bit smaller than its contemporary. Its name comes from the Greek “petra” for rock and “dyptes” for diver, while “stonehousei” was chosen to honour British polar scientist Bernard Stonehouse.\\n' +\n", + " '\\n' +\n", + " 'Both K. fordycei and P. stonehousei retained features seen in much earlier penguin species, such as slimmer flipper bones and muscle attachment points that look like those seen in flying birds.\\n' +\n", + " '\\n' +\n", + " '“Both penguins really add to the case that penguins got their start in New Zealand,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Illustration of the extinct Palaeeudyptes klekowskii with a human and emperor penguin for scale\\n' +\n", + " '\\n' +\n", + " 'Nature Picture Library / Alamy\\n' +\n", + " '\\n' +\n", + " 'Palaeeudyptes klekowskii\\n' +\n", + " '\\n' +\n", + " 'While K. fordycei was the heaviest penguin, it wasn’t the tallest. That award goes to Palaeeudyptes klekowskii, dubbed the colossus penguin, which towered at 2 metres and weighed a hefty 115 kilograms.\\n' +\n", + " '\\n' +\n", + " 'The species lived 37 to 40 million years ago along the Antarctic coast. Its fossil, which included the longest fused ankle-foot bone, is one of the most complete ever uncovered from the Antarctic.\\n' +\n", + " '\\n' +\n", + " 'Owing to their larger body size, giant penguins could remain underwater longer than smaller ones. Experts reckon that a species such as P. klekowskii could have remained submerged for up to 40 minutes hunting for fish.\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus\\n' +\n", + " '\\n' +\n", + " 'Pachydyptes ponderosus is prehistoric giant that lived more recently than those already mentioned – around 37 to 34 million years ago. Based on the few bones from the species that have been recovered, in 2006 Ksepka and his colleagues put it around 1.5 metres tall with a weight of over 100 kilograms.\\n' +\n", + " '\\n' +\n", + " '“We really only have parts of the flipper and shoulder, but we think it would have been quite a thick, stocky animal,” says Ksepka. “Its humerus is just so wide.”\\n' +\n", + " '\\n' +\n", + " 'Daniel Ksepka with a model of a Kairuku penguin\\n' +\n", + " '\\n' +\n", + " 'The three species that belonged to the genus Kairuku (K. grebneffi, K. waitaki and K. waewaeroa), however, were the complete opposite.\\n' +\n", + " '\\n' +\n", + " '“If Pachydyptes is like a big, heavy football lineman, then you can think of Kairuku as a really tall, skinny basketball player,” says Ksepka. “They’re both really big, but in different ways.”\\n' +\n", + " '\\n' +\n", + " 'The first Kairuku bones were discovered by Ewan Fordyce in the 1970s, in New Zealand. All three species lived roughly 34 to 27 million years ago. The tallest, K. waewaeroa, stood at a height of around 1.4 metres and weighed around 80 kilograms.\\n' +\n", + " '\\n' +\n", + " '“They were graceful penguins, with slender trunks,” says Ksepka.\\n' +\n", + " '\\n' +\n", + " 'Sign up to our weekly newsletter\\n' +\n", + " '\\n' +\n", + " \"Receive a weekly dose of discovery in your inbox! We'll also keep you up to date with New Scientist events and special offers. Sign up\\n\" +\n", + " '\\n' +\n", + " 'More from New Scientist\\n' +\n", + " '\\n' +\n", + " 'Explore the latest news, articles and features\\n' +\n", + " '\\n' +\n", + " 'Extremely rare black penguin spotted in Antarctica\\n' +\n", + " '\\n' +\n", + " 'How you can help with penguin research by browsing images at home\\n' +\n", + " '\\n' +\n", + " 'Adélie penguins show signs of self-awareness on the mirror test\\n' +\n", + " '\\n' +\n", + " 'Penguins adapt their accents to sound more like their friends\\n' +\n", + " '\\n' +\n", + " 'Trending New Scientist articles\\n' +\n", + " '\\n' +\n", + " \"SpaceX prepares for Starship flight with first 'chopstick' landing\\n\" +\n", + " '\\n' +\n", + " 'Evidence mounts that shingles vaccines protect against dementia\\n' +\n", + " '\\n' +\n", + " 'When is the best time to exercise to get the most from your workout?\\n' +\n", + " '\\n' +\n", + " 'Why slow running could be even more beneficial than running fast\\n' +\n", + " '\\n' +\n", + " 'Wafer-thin light sail could help us reach another star sooner\\n' +\n", + " '\\n' +\n", + " 'The remarkable science-backed ways to get fit as fast as possible\\n' +\n", + " '\\n' +\n", + " \"One of Earth's major carbon sinks collapsed in 2023\\n\" +\n", + " '\\n' +\n", + " 'How to use psychology to hack your mind and fall in love with exercise\\n' +\n", + " '\\n' +\n", + " 'Gene therapy enables five children who were born deaf to hear\\n' +\n", + " '\\n' +\n", + " 'Why midlife is the perfect time to take control of your future health',\n", + " timestamp: '2024-07-28T02:56:04',\n", + " title: 'Mega penguins: The tallest, largest, most amazing penguin species to have ever lived | New Scientist',\n", + " url: 'https://www.newscientist.com/article/2397894-mega-penguins-these-are-the-largest-penguins-to-have-ever-lived/'\n", + " },\n", + " {\n", + " id: 'web-search_0',\n", + " snippet: 'Sustainability for All.\\n' +\n", + " '\\n' +\n", + " 'Giant 6-Foot-8 Penguin Discovered in Antarctica\\n' +\n", + " '\\n' +\n", + " 'University of Houston\\n' +\n", + " '\\n' +\n", + " 'Bryan Nelson is a science writer and award-winning documentary filmmaker with over a decade of experience covering technology, astronomy, medicine, animals, and more.\\n' +\n", + " '\\n' +\n", + " 'Learn about our editorial process\\n' +\n", + " '\\n' +\n", + " 'Updated May 9, 2020 10:30AM EDT\\n' +\n", + " '\\n' +\n", + " \"Modern emperor penguins are certainly statuesque, but not quite as impressive as the 'colossus penguin' would have been. . Christopher Michel/flickr\\n\" +\n", + " '\\n' +\n", + " 'The largest penguin species ever discovered has been unearthed in Antarctica, and its size is almost incomprehensible. Standing at 6 foot 8 inches from toe to beak tip, the mountainous bird would have dwarfed most adult humans, reports the Guardian.\\n' +\n", + " '\\n' +\n", + " 'In fact, if it were alive today the penguin could have looked basketball superstar LeBron James square in the eyes.\\n' +\n", + " '\\n' +\n", + " \"Fossils Provide Clues to the Bird's Size\\n\" +\n", + " '\\n' +\n", + " `The bird's 37-million-year-old fossilized remains, which include the longest recorded fused ankle-foot bone as well as parts of the animal's wing bone, represent the most complete fossil ever uncovered in the Antarctic. Appropriately dubbed the \"colossus penguin,\" Palaeeudyptes klekowskii was truly the Godzilla of aquatic birds.\\n` +\n", + " '\\n' +\n", + " `Scientists calculated the penguin's dimensions by scaling the sizes of its bones against those of modern penguin species. They estimate that the bird probably would have weighed about 250 pounds — again, roughly comparable to LeBron James. By comparison, the largest species of penguin alive today, the emperor penguin, is \"only\" about 4 feet tall and can weigh as much as 100 pounds.\\n` +\n", + " '\\n' +\n", + " 'Interestingly, because larger bodied penguins can hold their breath for longer, the colossus penguin probably could have stayed underwater for 40 minutes or more. It boggles the mind to imagine the kinds of huge, deep sea fish this mammoth bird might have been capable of hunting.\\n' +\n", + " '\\n' +\n", + " \"The fossil was found at the La Meseta formation on Seymour Island, an island in a chain of 16 major islands around the tip of the Graham Land on the Antarctic Peninsula. (It's the region that is the closest part of Antarctica to South America.) The area is known for its abundance of penguin bones, though in prehistoric times it would have been much warmer than it is today.\\n\" +\n", + " '\\n' +\n", + " \"P. klekowskii towers over the next largest penguin ever discovered, a 5-foot-tall bird that lived about 36 million years ago in Peru. Since these two species were near contemporaries, it's fun to imagine a time between 35 and 40 million years ago when giant penguins walked the Earth, and perhaps swam alongside the ancestors of whales.\\n\" +\n", + " '\\n' +\n", + " '10 of the Largest Living Sea Creatures\\n' +\n", + " '\\n' +\n", + " '11 Facts About Blue Whales, the Largest Animals Ever on Earth\\n' +\n", + " '\\n' +\n", + " '16 Ocean Creatures That Live in Total Darkness\\n' +\n", + " '\\n' +\n", + " 'National Monuments Designated By President Obama\\n' +\n", + " '\\n' +\n", + " '20 Pygmy Animal Species From Around the World\\n' +\n", + " '\\n' +\n", + " 'School Kids Discover New Penguin Species in New Zealand\\n' +\n", + " '\\n' +\n", + " '16 of the Most Surreal Landscapes on Earth\\n' +\n", + " '\\n' +\n", + " '12 Peculiar Penguin Facts\\n' +\n", + " '\\n' +\n", + " \"10 Amazing Hoodoos Around the World and How They're Formed\\n\" +\n", + " '\\n' +\n", + " '8 Titanic Facts About Patagotitans\\n' +\n", + " '\\n' +\n", + " '9 Extinct Megafauna That Are Out of This World\\n' +\n", + " '\\n' +\n", + " '10 Places Where Penguins Live in the Wild\\n' +\n", + " '\\n' +\n", + " '16 Animals That Are Living Fossils\\n' +\n", + " '\\n' +\n", + " 'A Timeline of the Distant Future for Life on Earth\\n' +\n", + " '\\n' +\n", + " '12 Animals That May Have Inspired Mythical Creatures\\n' +\n", + " '\\n' +\n", + " '12 Dinosaur Theme Parks\\n' +\n", + " '\\n' +\n", + " 'By clicking “Accept All Cookies”, you agree to the storing of cookies on your device to enhance site navigation, analyze site usage, and assist in our marketing efforts.\\n' +\n", + " '\\n' +\n", + " 'Cookies Settings Accept All Cookies',\n", + " timestamp: '2024-07-27T06:29:15',\n", + " title: 'Giant 6-Foot-8 Penguin Discovered in Antarctica',\n", + " url: 'https://www.treehugger.com/giant-foot-penguin-discovered-in-antarctica-4864169'\n", + " },\n", + " {\n", + " id: 'web-search_5',\n", + " snippet: 'Skip to main content\\n' +\n", + " '\\n' +\n", + " 'Smithsonian Institution\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean\\n' +\n", + " '\\n' +\n", + " 'Follow us on Facebook Follow us on Twitter Follow us on Flickr Follow us on Tumbr\\n' +\n", + " '\\n' +\n", + " 'How Big Do Penguins Get?\\n' +\n", + " '\\n' +\n", + " '(Smithsonian Institution)\\n' +\n", + " '\\n' +\n", + " 'The largest of the penguins, the emperor, stands at just over four feet while the smallest, the little penguin, has a maximum height of a foot. \\n' +\n", + " '\\n' +\n", + " 'Coasts & Shallow Water\\n' +\n", + " '\\n' +\n", + " 'Census of Marine Life\\n' +\n", + " '\\n' +\n", + " 'Waves, Storms & Tsunamis\\n' +\n", + " '\\n' +\n", + " 'Temperature & Chemistry\\n' +\n", + " '\\n' +\n", + " 'Solutions & Success Stories\\n' +\n", + " '\\n' +\n", + " 'Books, Film & The Arts\\n' +\n", + " '\\n' +\n", + " 'Search Smithsonian Ocean',\n", + " timestamp: '2024-07-30T03:47:03',\n", + " title: 'How Big Do Penguins Get? | Smithsonian Ocean',\n", + " url: 'https://ocean.si.edu/ocean-life/seabirds/how-big-do-penguins-get'\n", + " },\n", + " {\n", + " id: 'web-search_4',\n", + " snippet: 'The emperor penguin (Aptenodytes forsteri) is the tallest and heaviest of all living penguin species and is endemic to Antarctica. The male and female are similar in plumage and size, reaching 100 cm (39 in) in length and weighing from 22 to 45 kg (49 to 99 lb). Feathers of the head and back are black and sharply delineated from the white belly, pale-yellow breast and bright-yellow ear patches.\\n' +\n", + " '\\n' +\n", + " 'Like all penguins, it is flightless, with a streamlined body, and wings stiffened and flattened into flippers for a marine habitat. Its diet consists primarily of fish, but also includes crustaceans, such as krill, and cephalopods, such as squid. While hunting, the species can remain submerged around 20 minutes, diving to a depth of 535 m (1,755 ft). It has several adaptations to facilitate this, including an unusually structured haemoglobin to allow it to function at low oxygen levels, solid bones to reduce barotrauma, and the ability to reduce its metabolism and shut down non-essential organ functions.\\n' +\n", + " '\\n' +\n", + " 'The only penguin species that breeds during the Antarctic winter, emperor penguins trek 50–120 km (31–75 mi) over the ice to breeding colonies which can contain up to several thousand individuals. The female lays a single egg, which is incubated for just over two months by the male while the female returns to the sea to feed; parents subsequently take turns foraging at sea and caring for their chick in the colony. The lifespan is typically 20 years in the wild, although observations suggest that some individuals may live to 50 years of age.\\n' +\n", + " '\\n' +\n", + " 'Emperor penguins were described in 1844 by English zoologist George Robert Gray, who created the generic name from Ancient Greek word elements, ἀ-πτηνο-δύτης [a-ptēno-dytēs], \"without-wings-diver\". Its specific name is in honour of the German naturalist Johann Reinhold Forster, who accompanied Captain James Cook on his second voyage and officially named five other penguin species. Forster may have been the first person to see the penguins in 1773–74, when he recorded a sighting of what he believed was the similar king penguin (A. patagonicus) but given the location, may very well have been A. forsteri.\\n' +\n", + " '\\n' +\n", + " \"Together with the king penguin, the emperor penguin is one of two extant species in the genus Aptenodytes. Fossil evidence of a third species—Ridgen's penguin (A. ridgeni)—has been found in fossil records from the late Pliocene, about three million years ago, in New Zealand. Studies of penguin behaviour and genetics have proposed that the genus Aptenodytes is basal; in other words, that it split off from a branch which led to all other living penguin species. Mitochondrial and nuclear DNA evidence suggests this split occurred around 40 million years ago.\\n\" +\n", + " '\\n' +\n", + " 'Adult emperor penguins are 110–120 cm (43–47 in) in length, averaging 115 centimetres (45 in) according to Stonehouse (1975). Due to method of bird measurement that measures length between bill to tail, sometimes body length and standing height are confused, and some reported height even reaching 1.5 metres (4.9 ft) tall. There are still more than a few papers mentioning that they reach a standing height of 1.2 metres (3.9 ft) instead of body length. Although standing height of emperor penguin is rarely provided at scientific reports, Prévost (1961) recorded 86 wild individuals and measured maximum height of 1.08 metres (3.5 ft). Friedman (1945) recorded measurements from 22 wild individuals and resulted height ranging 83–97 cm (33–38 in). Ksepka et al. (2012) measured standing height of 81–94 cm (32–37 in) according to 11 complete skins collected in American Museum of Natural History. The weight ranges from 22.7 to 45.4 kg (50 to 100 lb) and varies by sex, with males weighing more than females. It is the fifth heaviest living bird species, after only the larger varieties of ratite. The weight also varies by season, as both male and female penguins lose substantial mass while raising hatchlings and incubating their egg. A male emperor penguin must withstand the extreme Antarctic winter cold for more than two months while protecting his egg. He eats nothing during this time. Most male emperors will lose around 12 kg (26 lb) while they wait for their eggs to hatch. The mean weight of males at the start of the breeding season is 38 kg (84 lb) and that of females is 29.5 kg (65 lb). After the breeding season this drops to 23 kg (51 lb) for both sexes.\\n' +\n", + " '\\n' +\n", + " 'Like all penguin species, emperor penguins have streamlined bodies to minimize drag while swimming, and wings that are more like stiff, flat flippers. The tongue is equipped with rear-facing barbs to prevent prey from escaping when caught. Males and females are similar in size and colouration. The adult has deep black dorsal feathers, covering the head, chin, throat, back, dorsal part of the flippers, and tail. The black plumage is sharply delineated from the light-coloured plumage elsewhere. The underparts of the wings and belly are white, becoming pale yellow in the upper breast, while the ear patches are bright yellow. The upper mandible of the 8 cm (3 in) long bill is black, and the lower mandible can be pink, orange or lilac. In juveniles, the auricular patches, chin and throat are white, while its bill is black. Emperor penguin chicks are typically covered with silver-grey down and have black heads and white masks. A chick with all-white plumage was seen in 2001, but was not considered to be an albino as it did not have pink eyes. Chicks weigh around 315 g (11 oz) after hatching, and fledge when they reach about 50% of adult weight.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin's dark plumage fades to brown from November until February (the Antarctic summer), before the yearly moult in January and February. Moulting is rapid in this species compared with other birds, taking only around 34 days. Emperor penguin feathers emerge from the skin after they have grown to a third of their total length, and before old feathers are lost, to help reduce heat loss. New feathers then push out the old ones before finishing their growth.\\n\" +\n", + " '\\n' +\n", + " 'The average yearly survival rate of an adult emperor penguin has been measured at 95.1%, with an average life expectancy of 19.9 years. The same researchers estimated that 1% of emperor penguins hatched could feasibly reach an age of 50 years. In contrast, only 19% of chicks survive their first year of life. Therefore, 80% of the emperor penguin population comprises adults five years and older.\\n' +\n", + " '\\n' +\n", + " 'As the species has no fixed nest sites that individuals can use to locate their own partner or chick, emperor penguins must rely on vocal calls alone for identification. They use a complex set of calls that are critical to individual recognition between parents, offspring and mates, displaying the widest variation in individual calls of all penguins. Vocalizing emperor penguins use two frequency bands simultaneously. Chicks use a frequency-modulated whistle to beg for food and to contact parents.\\n' +\n", + " '\\n' +\n", + " \"The emperor penguin breeds in the coldest environment of any bird species; air temperatures may reach −40 °C (−40 °F), and wind speeds may reach 144 km/h (89 mph). Water temperature is a frigid −1.8 °C (28.8 °F), which is much lower than the emperor penguin's average body temperature of 39 °C (102 °F). The species has adapted in several ways to counteract heat loss. Dense feathers provide 80–90% of its insulation and it has a layer of sub-dermal fat which may be up to 3 cm (1.2 in) thick before breeding. While the density of contour feathers is approximately 9 per square centimetre (58 per square inch), a combination of dense afterfeathers and down feathers (plumules) likely play a critical role for insulation. Muscles allow the feathers to be held erect on land, reducing heat loss by trapping a layer of air next to the skin. Conversely, the plumage is flattened in water, thus waterproofing the skin and the downy underlayer. Preening is vital in facilitating insulation and in keeping the plumage oily and water-repellent.\\n\" +\n", + " '\\n' +\n", + " 'The emperor penguin is able to thermoregulate (maintain its core body temperature) without altering its metabolism, over a wide range of temperatures. Known as the thermoneutral range, this extends from −10 to 20 °C (14 to 68 °F). Below this temperature range, its metabolic rate increases significantly, although an individual can maintain its core temperature from 38.0 °C (100.4 °F) down to −47 °C (−53 °F). Movement by swimming, walking, and shivering are three mechanisms for increasing metabolism; a fourth process involves an increase in the breakdown of fats by enzymes, which is induced by the hormone glucagon. At temperatures above 20 °C (68 °F), an emperor penguin may become agitated as its body temperature and metabolic rate rise to increase heat loss. Raising its wings and exposing the undersides increases the exposure of its body surface to the air by 16%, facilitating further heat loss.\\n' +\n", + " '\\n' +\n", + " 'Adaptations to pressure and low oxygen\\n' +\n", + " '\\n' +\n", + " 'In addition to the cold, the emperor penguin encounters another stressful condition on deep dives—markedly increased pressure of up to 40 times that of the surface, which in most other terrestrial organisms would cause barotrauma. The bones of the penguin are solid rather than air-filled, which eliminates the risk of mechanical barotrauma.\\n' +\n", + " '\\n' +\n", + " \"While diving, the emperor penguin's oxygen use is markedly reduced, as its heart rate is reduced to as low as 15–20 beats per minute and non-essential organs are shut down, thus facilitating longer dives. Its haemoglobin and myoglobin are able to bind and transport oxygen at low blood concentrations; this allows the bird to function with very low oxygen levels that would otherwise result in loss of consciousness.\\n\" +\n", + " '\\n' +\n", + " 'Distribution and habitat\\n' +\n", + " '\\n' +\n", + " 'The emperor penguin has a circumpolar distribution in the Antarctic almost exclusively between the 66° and 77° south latitudes. It almost always breeds on stable pack ice near the coast and up to 18 km (11 mi) offshore. Breeding colonies are usually in areas where ice cliffs and i'... 22063 more characters,\n", + " timestamp: '2024-07-31T07:59:36',\n", + " title: 'Emperor penguin - Wikipedia',\n", + " url: 'https://en.wikipedia.org/wiki/Emperor_penguin'\n", + " }\n", + " ],\n", + " searchResults: [\n", + " {\n", + " searchQuery: {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " },\n", + " documentIds: [\n", + " 'web-search_0',\n", + " 'web-search_1',\n", + " 'web-search_2',\n", + " 'web-search_3',\n", + " 'web-search_4',\n", + " 'web-search_5'\n", + " ],\n", + " connector: { id: 'web-search' }\n", + " }\n", + " ],\n", + " searchQueries: [\n", + " {\n", + " text: 'How tall are the largest penguins?',\n", + " generationId: '8d5ae032-4c8e-492e-8686-289f198b5eb5'\n", + " }\n", + " ]\n", + " },\n", + " id: undefined,\n", + " tool_calls: [],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: { input_tokens: 11198, output_tokens: 286, total_tokens: 11484 }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatCohere } from \"@langchain/cohere\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const llmWithConnectors = new ChatCohere({\n", + " apiKey: process.env.COHERE_API_KEY, // Default\n", + "});\n", + "\n", + "const connectorsRes = await llmWithConnectors.invoke(\n", + " [new HumanMessage(\"How tall are the largest pengiuns?\")],\n", + " {\n", + " connectors: [{ id: \"web-search\" }],\n", + " }\n", + ");\n", + "console.dir(connectorsRes, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "f90cedf9", + "metadata": {}, + "source": [ + "We can see in the `additional_kwargs` object that the API request did a few things:\n", + "\n", + "- Performed a search query, storing the result data in the `searchQueries` and `searchResults` fields. In the `searchQueries` field we see they rephrased our query for better results.\n", + "- Generated three documents from the search query.\n", + "- Generated a list of citations\n", + "- Generated a final response based on the above actions & content." + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatCohere features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.ChatCohere.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/cohere.mdx b/docs/core_docs/docs/integrations/chat/cohere.mdx deleted file mode 100644 index 20f55b1eb54a..000000000000 --- a/docs/core_docs/docs/integrations/chat/cohere.mdx +++ /dev/null @@ -1,100 +0,0 @@ ---- -sidebar_label: Cohere ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatCohere - -## Setup - -In order to use the LangChain.js Cohere integration you'll need an API key. -You can sign up for a Cohere account and create an API key [here](https://dashboard.cohere.com/welcome/register). - -You'll first need to install the [`@langchain/cohere`](https://www.npmjs.com/package/@langchain/cohere) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/cohere -``` - -## Usage - -import BasicExample from "@examples/models/chat/cohere/chat_cohere.ts"; - -{BasicExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/69ccd2aa-b651-4f07-9223-ecc0b77e645e/r) -::: - -### Streaming - -Cohere's API also supports streaming token responses. The example below demonstrates how to use this feature. - -import ChatStreamExample from "@examples/models/chat/cohere/chat_stream_cohere.ts"; - -{ChatStreamExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/36ae0564-b096-4ec1-9318-1f82fe705fe8/r) -::: - -### Tools - -The Cohere API supports tool calling, along with multi-hop-tool calling. The following example demonstrates how to call tools: - -import ToolCallingExample from "@examples/models/chat/cohere/tool_calling.ts"; - -{ToolCallingExample} - -### Stateful conversation API - -Cohere's chat API supports stateful conversations. -This means the API stores previous chat messages which can be accessed by passing in a `conversation_id` field. -The example below demonstrates how to use this feature. - -import StatefulChatExample from "@examples/models/chat/cohere/stateful_conversation.ts"; - -{StatefulChatExample} - -:::info -You can see the LangSmith traces from this example [here](https://smith.langchain.com/public/8e67b05a-4e63-414e-ac91-a91acf21b262/r) and [here](https://smith.langchain.com/public/50fabc25-46fe-4727-a59c-7e4eb0de8e70/r) -::: - -### RAG - -Cohere also comes out of the box with RAG support. -You can pass in documents as context to the API request and Cohere's models will use them when generating responses. - -import RAGExample from "@examples/models/chat/cohere/rag.ts"; - -{RAGExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/de71fffe-6f01-4c36-9b49-40d1bc87dea3/r) -::: - -### Connectors - -The API also allows for other connections which are not static documents. -An example of this is their `web-search` connector which allows you to pass in a query and the API will search the web for relevant documents. -The example below demonstrates how to use this feature. - -import ConnectorsExample from "@examples/models/chat/cohere/connectors.ts"; - -{ConnectorsExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/9a6f996b-cff2-4f3f-916a-640469a5a963/r) -::: - -We can see in the `kwargs` object that the API request did a few things: - -- Performed a search query, storing the result data in the `searchQueries` and `searchResults` fields. In the `searchQueries` field we see they rephrased our query to `largest penguin species height` for better results. -- Generated three documents from the search query. -- Generated a list of citations -- Generated a final response based on the above actions & content. diff --git a/docs/core_docs/docs/integrations/chat/fireworks.ipynb b/docs/core_docs/docs/integrations/chat/fireworks.ipynb index e5276cd34c6b..aaaf3e03e75a 100644 --- a/docs/core_docs/docs/integrations/chat/fireworks.ipynb +++ b/docs/core_docs/docs/integrations/chat/fireworks.ipynb @@ -3,10 +3,14 @@ { "cell_type": "raw", "id": "afaf8039", - "metadata": {}, + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ "---\n", - "sidebar_label: ChatFireworks\n", + "sidebar_label: Fireworks\n", "---" ] }, @@ -22,9 +26,9 @@ "## Overview\n", "### Integration details\n", "\n", - "| Class | Package | Local | Serializable | [PY support](https:/python.langchain.com/v0.2/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatFireworks](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_chat_models_fireworks.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20) |\n", + "| [ChatFireworks](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_chat_models_fireworks.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", "\n", "### Model features\n", "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb new file mode 100644 index 000000000000..192339ddce01 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/google_generativeai.ipynb @@ -0,0 +1,614 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "46f7ac07", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Google GenAI\n", + "keywords: [gemini, gemini-pro, ChatGoogleGenerativeAI]\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatGoogleGenerativeAI\n", + "\n", + "This will help you getting started with `ChatGoogleGenerativeAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatGoogleGenerativeAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/google_generative_ai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatGoogleGenerativeAI](https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html) | [@langchain/google-genai](https://api.js.langchain.com/modules/langchain_google_genai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-genai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-genai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "You can access Google's `gemini` and `gemini-vision` models, as well as other\n", + "generative models in LangChain through `ChatGoogleGenerativeAI` class in the\n", + "`@langchain/google-genai` integration package.\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip\n", + "You can also access Google's `gemini` family of models via the LangChain VertexAI and VertexAI-web integrations.\n", + "\n", + "Click [here](/docs/integrations/chat/google_vertex_ai) to read the docs.\n", + ":::\n", + "\n", + "```\n", + "\n", + "### Credentials\n", + "\n", + "Get an API key here: [ai.google.dev/tutorials/setup](https://ai.google.dev/tutorials/setup)\n", + "\n", + "Then set the `GOOGLE_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export GOOGLE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatGoogleGenerativeAI integration lives in the `@langchain/google-genai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/google-genai\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\"\n", + "\n", + "const llm = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-1.5-pro\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"J'adore programmer. \\n\",\n", + " \"additional_kwargs\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 21,\n", + " \"output_tokens\": 5,\n", + " \"total_tokens\": 26\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore programmer. \n", + "\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe das Programmieren. \\n\",\n", + " \"additional_kwargs\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 16,\n", + " \"output_tokens\": 7,\n", + " \"total_tokens\": 23\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "\n", + "The Google GenerativeAI API does not allow tool schemas to contain an object with unknown properties.\n", + "\n", + "For example, the following Zod schemas will throw an error:\n", + "\n", + "`const invalidSchema = z.object({ properties: z.record(z.unknown()) });`\n", + "\n", + "and\n", + "\n", + "`const invalidSchema2 = z.record(z.unknown());`\n", + "\n", + "Instead, you should explicitly define the properties of the object field.\n", + "\n", + ":::\n", + "\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d6805c40", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'browser_tool',\n", + " args: {\n", + " url: 'https://www.weather.com',\n", + " query: 'weather tonight in new york'\n", + " },\n", + " type: 'tool_call'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "import { z } from \"zod\";\n", + "\n", + "// Define your tool\n", + "const fakeBrowserTool = tool((_) => {\n", + " return \"The search result is xyz...\"\n", + "}, {\n", + " name: \"browser_tool\",\n", + " description: \"Useful for when you need to find something on the web or summarize a webpage.\",\n", + " schema: z.object({\n", + " url: z.string().describe(\"The URL of the webpage to search.\"),\n", + " query: z.string().optional().describe(\"An optional search query to use.\"),\n", + " }),\n", + "})\n", + "\n", + "const llmWithTool = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-pro\",\n", + "}).bindTools([fakeBrowserTool]) // Bind your tools to the model\n", + "\n", + "const toolRes = await llmWithTool.invoke([\n", + " [\n", + " \"human\",\n", + " \"Search the web and tell me what the weather will be like tonight in new york. use a popular weather website\",\n", + " ],\n", + "]);\n", + "\n", + "console.log(toolRes.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "83061805", + "metadata": {}, + "source": [ + "### `.withStructuredOutput`" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "ef24448c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " url: 'https://www.accuweather.com/en/us/new-york-ny/10007/current-weather/349333',\n", + " query: 'weather tonight'\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "import { z } from \"zod\";\n", + "\n", + "// Define your model\n", + "const llmForWSO = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-pro\",\n", + "});\n", + "\n", + "const browserSchema = z.object({\n", + " url: z.string().describe(\"The URL of the webpage to search.\"),\n", + " query: z.string().optional().describe(\"An optional search query to use.\"),\n", + "});\n", + "\n", + "const llmWithStructuredOutput = llmForWSO.withStructuredOutput(browserSchema, {\n", + " name: \"browser_tool\"\n", + "})\n", + "\n", + "const structuredOutputRes = await llmWithStructuredOutput.invoke([\n", + " [\n", + " \"human\",\n", + " \"Search the web and tell me what the weather will be like tonight in new york. use a popular weather website\",\n", + " ],\n", + "]);\n", + "\n", + "console.log(structuredOutputRes);" + ] + }, + { + "cell_type": "markdown", + "id": "3987f0cb", + "metadata": {}, + "source": [ + "## Multimodal support\n", + "\n", + "To provide an image, pass a human message with a `content` field set to an array of content objects. Each content object\n", + "where each dict contains either an image value (type of image_url) or a text (type of text) value. The value of image_url must be a base64\n", + "encoded image (e.g., data:image/png;base64,abcd124):" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0b60fc5d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"The image shows a hot dog in a bun, isolated against a white background. The hot dog is grilled and has a slightly crispy texture. The bun is soft and fluffy, and it appears to be lightly toasted. The hot dog is positioned horizontally, with the bun covering most of the sausage. The image captures the classic American snack food, highlighting its simplicity and appeal.\",\n", + " \"additional_kwargs\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"finishReason\": \"STOP\",\n", + " \"index\": 0,\n", + " \"safetyRatings\": [\n", + " {\n", + " \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_HARASSMENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " },\n", + " {\n", + " \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n", + " \"probability\": \"NEGLIGIBLE\"\n", + " }\n", + " ]\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 264,\n", + " \"output_tokens\": 74,\n", + " \"total_tokens\": 338\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import fs from \"fs\";\n", + "import { ChatGoogleGenerativeAI } from \"@langchain/google-genai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "// Multi-modal\n", + "const llmWithVisionModel = new ChatGoogleGenerativeAI({\n", + " model: \"gemini-1.5-flash\",\n", + " maxOutputTokens: 2048,\n", + " maxRetries: 1,\n", + "});\n", + "const image = fs.readFileSync(\"../../../../../examples/hotdog.jpg\").toString(\"base64\");\n", + "const visionPrompt = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"human\", \n", + " [\n", + " {\n", + " type: \"text\",\n", + " text: \"Describe the following image.\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: \"data:image/png;base64,{image}\",\n", + " },\n", + " ]\n", + " ]\n", + "])\n", + "\n", + "const visionRes = await visionPrompt.pipe(llmWithVisionModel).invoke({\n", + " image,\n", + "});\n", + "\n", + "console.log(visionRes);" + ] + }, + { + "cell_type": "markdown", + "id": "0c6a950f", + "metadata": {}, + "source": [ + "## Gemini Prompting FAQs\n", + "\n", + "As of the time this doc was written (2023/12/12), Gemini has some restrictions on the types and structure of prompts it accepts. Specifically:\n", + "\n", + "1. When providing multimodal (image) inputs, you are restricted to at most 1 message of \"human\" (user) type. You cannot pass multiple messages (though the single human message may have multiple content entries)\n", + "2. System messages are not natively supported, and will be merged with the first human message if present.\n", + "3. For regular chat conversations, messages must follow the human/ai/human/ai alternating pattern. You may not provide 2 AI or human messages in sequence.\n", + "4. Message may be blocked if they violate the safety checks of the LLM. In this case, the model will return an empty response.\n" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatGoogleGenerativeAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_genai.ChatGoogleGenerativeAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/google_generativeai.mdx b/docs/core_docs/docs/integrations/chat/google_generativeai.mdx deleted file mode 100644 index a97b768eae1d..000000000000 --- a/docs/core_docs/docs/integrations/chat/google_generativeai.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -sidebar_label: Google GenAI -keywords: [gemini, gemini-pro, ChatGoogleGenerativeAI] ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatGoogleGenerativeAI - -You can access Google's `gemini` and `gemini-vision` models, as well as other -generative models in LangChain through `ChatGoogleGenerativeAI` class in the -`@langchain/google-genai` integration package. - -:::tip -You can also access Google's `gemini` family of models via the LangChain VertexAI and VertexAI-web integrations. - -Click [here](/docs/integrations/chat/google_vertex_ai) to read the docs. -::: - -Get an API key here: https://ai.google.dev/tutorials/setup - -You'll first need to install the `@langchain/google-genai` package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/google-genai -``` - -## Usage - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -import GoogleGenerativeAI from "@examples/models/chat/googlegenerativeai.ts"; - -{GoogleGenerativeAI} - -## Tool calling - -:::caution -The Google GenerativeAI package as of version `0.0.23` does not allow tool schemas to contain an object with unknown properties. -The Google VertexAI package (as of version `0.0.20`) does support this pattern. -[Click here for the Google VertexAI package documentation](/docs/integrations/chat/google_vertex_ai). - -For example, the following Zod schema will throw an error: - -```typescript -const schema = z.object({ - properties: z.record(z.unknown()), // Not allowed -}); -``` - -or - -```typescript -const schema = z.record(z.unknown()); // Not allowed -``` - -Instead, you should explicitly define the properties of the object field, or use the Google VertexAI package. -::: - -import GoogleGenerativeAIToolCalling from "@examples/models/chat/googlegenerativeai_tools.ts"; - -{GoogleGenerativeAIToolCalling} - -:::tip -See the above run's LangSmith trace [here](https://smith.langchain.com/public/31faf31b-dbd0-436c-a425-b9eb1bccf8b7/r) -::: - -## `.withStructuredOutput` - -import GoogleGenerativeAIWSO from "@examples/models/chat/googlegenerativeai_wso.ts"; - -{GoogleGenerativeAIWSO} - -:::tip -See the above run's LangSmith trace [here](https://smith.langchain.com/public/4506314e-21ea-43a9-9718-22cad0bbbb38/r) -::: - -## Multimodal support - -To provide an image, pass a human message with a `content` field set to an array of content objects. Each content object -where each dict contains either an image value (type of image_url) or a text (type of text) value. The value of image_url must be a base64 -encoded image (e.g., data:image/png;base64,abcd124): - -import GoogleGenerativeAIMultimodal from "@examples/models/chat/googlegenerativeai_multimodal.ts"; - -{GoogleGenerativeAIMultimodal} - -## Gemini Prompting FAQs - -As of the time this doc was written (2023/12/12), Gemini has some restrictions on the types and structure of prompts it accepts. Specifically: - -1. When providing multimodal (image) inputs, you are restricted to at most 1 message of "human" (user) type. You cannot pass multiple messages (though the single human message may have multiple content entries) -2. System messages are not natively supported, and will be merged with the first human message if present. -3. For regular chat conversations, messages must follow the human/ai/human/ai alternating pattern. You may not provide 2 AI or human messages in sequence. -4. Message may be blocked if they violate the safety checks of the LLM. In this case, the model will return an empty response. diff --git a/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb new file mode 100644 index 000000000000..68f96df67e16 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/google_vertex_ai.ipynb @@ -0,0 +1,526 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Google VertexAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatVertexAI\n", + "\n", + "This will help you getting started with `ChatVertexAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatVertexAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "LangChain.js supports Google Vertex AI chat models as an integration.\n", + "It supports two different methods of authentication based on whether you're running\n", + "in a Node environment or a web environment.\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/google_vertex_ai_palm) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatVertexAI](https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html) | [@langchain/google-vertexai](https://api.js.langchain.com/modules/langchain_google_vertexai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/google-vertexai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/google-vertexai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ❌ | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatVertexAI` models you'll need to setup Google VertexAI in your Google Cloud Platform (GCP) account, save the credentials file, and install the `@langchain/google-vertexai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to GCP and generate a credentials file. Once you've done this set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable:\n", + "\n", + "```bash\n", + "export GOOGLE_APPLICATION_CREDENTIALS=\"path/to/your/credentials.json\"\n", + "```\n", + "\n", + "If running in a web environment, you should set the `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable as a JSON stringified object, and install the `@langchain/google-vertexai-web` package:\n", + "\n", + "```bash\n", + "GOOGLE_VERTEX_AI_WEB_CREDENTIALS={\"type\":\"service_account\",\"project_id\":\"YOUR_PROJECT-12345\",...}\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatVertexAI integration lives in the `@langchain/google-vertexai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/google-vertexai\n", + "\n", + "\n", + "Or if using in a web environment:\n", + "\n", + "\n", + " @langchain/google-vertexai-web\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatVertexAI } from \"@langchain/google-vertexai\"\n", + "// Uncomment the following line if you're running in a web environment:\n", + "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\"\n", + "\n", + "const llm = new ChatVertexAI({\n", + " model: \"gemini-1.5-pro\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " authOptions: {\n", + " // ... auth options\n", + " }\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessageChunk {\n", + " \"content\": \"J'adore programmer. \\n\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"tool_call_chunks\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 20,\n", + " \"output_tokens\": 7,\n", + " \"total_tokens\": 27\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore programmer. \n", + "\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessageChunk {\n", + " \"content\": \"Ich liebe das Programmieren. \\n\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {},\n", + " \"tool_calls\": [],\n", + " \"tool_call_chunks\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 15,\n", + " \"output_tokens\": 9,\n", + " \"total_tokens\": 24\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Multimodal\n", + "\n", + "The Gemini API can process multimodal inputs. The example below demonstrates how to do this:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "5981e230", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The image shows a hot dog in a bun. The hot dog is grilled and has a red color. The bun is white and soft.\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n", + "import fs from \"node:fs\";\n", + "\n", + "const llmForMultiModal = new ChatVertexAI({\n", + " model: \"gemini-pro-vision\",\n", + " temperature: 0.7,\n", + "});\n", + "\n", + "const image = fs.readFileSync(\"../../../../../examples/hotdog.jpg\").toString(\"base64\");\n", + "const promptForMultiModal = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"human\",\n", + " [\n", + " {\n", + " type: \"text\",\n", + " text: \"Describe the following image.\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: \"data:image/png;base64,{image_base64}\",\n", + " },\n", + " ],\n", + " ],\n", + "]);\n", + "\n", + "const multiModalRes = await promptForMultiModal.pipe(llmForMultiModal).invoke({\n", + " image_base64: image,\n", + "});\n", + "\n", + "console.log(multiModalRes.content);" + ] + }, + { + "cell_type": "markdown", + "id": "aa6a51dd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "`ChatVertexAI` also supports calling the model with a tool:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "bc64485f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'calculator',\n", + " args: { number2: 81623836, operation: 'multiply', number1: 1628253239 },\n", + " id: 'a219d75748f445ab8c7ca8b516898e18',\n", + " type: 'tool_call'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n", + "import { zodToGeminiParameters } from \"@langchain/google-vertexai/utils\";\n", + "import { z } from \"zod\";\n", + "// Or, if using the web entrypoint:\n", + "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\";\n", + "\n", + "const calculatorSchema = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const geminiCalculatorTool = {\n", + " functionDeclarations: [\n", + " {\n", + " name: \"calculator\",\n", + " description: \"A simple calculator tool\",\n", + " parameters: zodToGeminiParameters(calculatorSchema),\n", + " },\n", + " ],\n", + "};\n", + "\n", + "const llmWithTool = new ChatVertexAI({\n", + " temperature: 0.7,\n", + " model: \"gemini-1.5-flash-001\",\n", + "}).bindTools([geminiCalculatorTool]);\n", + "\n", + "const toolRes = await llmWithTool.invoke(\"What is 1628253239 times 81623836?\");\n", + "console.dir(toolRes.tool_calls, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "46ce27ae", + "metadata": {}, + "source": [ + "### `withStructuredOutput`\n", + "\n", + "Alternatively, you can also use the `withStructuredOutput` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "012a9afc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ operation: 'multiply', number1: 1628253239, number2: 81623836 }\n" + ] + } + ], + "source": [ + "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n", + "import { z } from \"zod\";\n", + "// Or, if using the web entrypoint:\n", + "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\";\n", + "\n", + "const calculatorSchemaForWSO = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const llmWithStructuredOutput = new ChatVertexAI({\n", + " temperature: 0.7,\n", + " model: \"gemini-1.5-flash-001\",\n", + "}).withStructuredOutput(calculatorSchemaForWSO, {\n", + " name: \"calculator\"\n", + "});\n", + "\n", + "const wsoRes = await llmWithStructuredOutput.invoke(\"What is 1628253239 times 81623836?\");\n", + "console.log(wsoRes);" + ] + }, + { + "cell_type": "markdown", + "id": "3b306e5b", + "metadata": {}, + "source": [ + "## VertexAI tools agent\n", + "\n", + "The Gemini family of models not only support tool calling, but can also be used in the Tool Calling agent.\n", + "Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "0391002b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The weather in Paris, France is 28 degrees Celsius. \n", + "\n" + ] + } + ], + "source": [ + "import { z } from \"zod\";\n", + "\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n", + "\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatVertexAI } from \"@langchain/google-vertexai\";\n", + "// Uncomment this if you're running inside a web/edge environment.\n", + "// import { ChatVertexAI } from \"@langchain/google-vertexai-web\";\n", + "\n", + "const llmAgent = new ChatVertexAI({\n", + " temperature: 0,\n", + " model: \"gemini-1.5-pro\",\n", + "});\n", + "\n", + "// Prompt template must have \"input\" and \"agent_scratchpad input variables\"\n", + "const agentPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"placeholder\", \"{chat_history}\"],\n", + " [\"human\", \"{input}\"],\n", + " [\"placeholder\", \"{agent_scratchpad}\"],\n", + "]);\n", + "\n", + "// Mocked tool\n", + "const currentWeatherTool = tool(async () => \"28 °C\", {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather in a given location\",\n", + " schema: z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + " }),\n", + "});\n", + "\n", + "const agent = await createToolCallingAgent({\n", + " llm: llmAgent,\n", + " tools: [currentWeatherTool],\n", + " prompt: agentPrompt,\n", + "});\n", + "\n", + "const agentExecutor = new AgentExecutor({\n", + " agent,\n", + " tools: [currentWeatherTool],\n", + "});\n", + "\n", + "const input = \"What's the weather like in Paris?\";\n", + "const agentRes = await agentExecutor.invoke({ input });\n", + "\n", + "console.log(agentRes.output);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatVertexAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_google_vertexai.ChatVertexAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/google_vertex_ai.mdx b/docs/core_docs/docs/integrations/chat/google_vertex_ai.mdx deleted file mode 100644 index ce6f8f0d1d23..000000000000 --- a/docs/core_docs/docs/integrations/chat/google_vertex_ai.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -sidebar_label: Google Vertex AI -keywords: [gemini, gemini-pro, ChatVertexAI, vertex] ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatVertexAI - -LangChain.js supports Google Vertex AI chat models as an integration. -It supports two different methods of authentication based on whether you're running -in a Node environment or a web environment. - -## Setup - -### Node - -To call Vertex AI models in Node, you'll need to install the `@langchain/google-vertexai` package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/google-vertexai -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -You should make sure the Vertex AI API is -enabled for the relevant project and that you've authenticated to -Google Cloud using one of these methods: - -- You are logged into an account (using `gcloud auth application-default login`) - permitted to that project. -- You are running on a machine using a service account that is permitted - to the project. -- You have downloaded the credentials for a service account that is permitted - to the project and set the `GOOGLE_APPLICATION_CREDENTIALS` environment - variable to the path of this file. - - - -```bash npm2yarn -npm install @langchain/google-vertexai -``` - -### Web - -To call Vertex AI models in web environments (like Edge functions), you'll need to install -the `@langchain/google-vertexai-web` package: - -```bash npm2yarn -npm install @langchain/google-vertexai-web -``` - -Then, you'll need to add your service account credentials directly as a `GOOGLE_VERTEX_AI_WEB_CREDENTIALS` environment variable: - -``` -GOOGLE_VERTEX_AI_WEB_CREDENTIALS={"type":"service_account","project_id":"YOUR_PROJECT-12345",...} -``` - -Lastly, you may also pass your credentials directly in code like this: - -```typescript -import { ChatVertexAI } from "@langchain/google-vertexai-web"; - -const model = new ChatVertexAI({ - authOptions: { - credentials: {"type":"service_account","project_id":"YOUR_PROJECT-12345",...}, - }, -}); -``` - -## Usage - -The entire family of `gemini` models are available by specifying the `modelName` parameter. - -For example: - -import ChatVertexAI from "@examples/models/chat/integration_googlevertexai.ts"; - -{ChatVertexAI} - -:::tip -See the LangSmith trace for the example above [here](https://smith.langchain.com/public/9403290d-1ca6-41e5-819c-f3ec233194c5/r). -::: - -## Multimodal - -The Gemini API can process multimodal inputs. The example below demonstrates how to do this: - -import MultiModalVertexAI from "@examples/models/chat/integration_googlevertexai-multimodal.ts"; - -{MultiModalVertexAI} - -:::tip -See the LangSmith trace for the example above [here](https://smith.langchain.com/public/4cb2707d-bcf8-417e-8965-310b3045eb62/r). -::: - -### Streaming - -`ChatVertexAI` also supports streaming in multiple chunks for faster responses: - -import ChatVertexAIStreaming from "@examples/models/chat/integration_googlevertexai-streaming.ts"; - -{ChatVertexAIStreaming} - -:::tip -See the LangSmith trace for the example above [here](https://smith.langchain.com/public/011c26dc-b7db-4fad-b0f2-3653f41a7667/r). -::: - -### Tool calling - -`ChatVertexAI` also supports calling the model with a tool: - -import ChatVertexAITool from "@examples/models/chat/integration_googlevertexai-tools.ts"; - -{ChatVertexAITool} - -:::tip -See the LangSmith trace for the example above [here](https://smith.langchain.com/public/e6714fb3-ef24-447c-810d-7ff2c80c7db4/r). -::: - -### `withStructuredOutput` - -Alternatively, you can also use the `withStructuredOutput` method: - -import ChatVertexAIWSO from "@examples/models/chat/integration_googlevertexai-wso.ts"; - -{ChatVertexAIWSO} - -:::tip -See the LangSmith trace for the example above [here](https://smith.langchain.com/public/d7b9860a-a761-4f76-ba57-195759eb38e7/r). -::: - -### VertexAI tools agent - -The Gemini family of models not only support tool calling, but can also be used in the Tool Calling agent. -Here's an example: - -import AgentsExample from "@examples/models/chat/chat_vertexai_agents.ts"; - -{AgentsExample} - -:::tip -See the LangSmith trace for the agent example above [here](https://smith.langchain.com/public/5615ee35-ba76-433b-8639-9b321cb6d4bf/r). -::: diff --git a/docs/core_docs/docs/integrations/chat/groq.ipynb b/docs/core_docs/docs/integrations/chat/groq.ipynb new file mode 100644 index 000000000000..342a48ce48a8 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/groq.ipynb @@ -0,0 +1,577 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Groq\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatGroq\n", + "\n", + "This will help you getting started with ChatGroq [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatGroq features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/groq) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatGroq](https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html) | [@langchain/groq](https://api.js.langchain.com/modules/langchain_groq.html) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/groq?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/groq?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access ChatGroq models you'll need to create a ChatGroq account, get an API key, and install the `@langchain/groq` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "In order to use the Groq API you'll need an API key. You can sign up for a Groq account and create an API key [here](https://wow.groq.com/).\n", + "Then, you can set the API key as an environment variable in your terminal:\n", + "\n", + "```bash\n", + "export GROQ_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatGroq integration lives in the `@langchain/groq` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/groq\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatGroq } from \"@langchain/groq\" \n", + "\n", + "const llm = new ChatGroq({\n", + " model: \"mixtral-8x7b-32768\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"I enjoy programming. (The French translation is: \\\"J'aime programmer.\\\")\\n\\nNote: I chose to translate \\\"I love programming\\\" as \\\"J'aime programmer\\\" instead of \\\"Je suis amoureux de programmer\\\" because the latter has a romantic connotation that is not present in the original English sentence.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 73,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 104\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I enjoy programming. (The French translation is: \"J'aime programmer.\")\n", + "\n", + "Note: I chose to translate \"I love programming\" as \"J'aime programmer\" instead of \"Je suis amoureux de programmer\" because the latter has a romantic connotation that is not present in the original English sentence.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"That's great! I can help you translate English phrases related to programming into German.\\n\\n\\\"I love programming\\\" can be translated to German as \\\"Ich liebe Programmieren\\\".\\n\\nHere are some more programming-related phrases translated into German:\\n\\n* \\\"Programming language\\\" = \\\"Programmiersprache\\\"\\n* \\\"Code\\\" = \\\"Code\\\"\\n* \\\"Variable\\\" = \\\"Variable\\\"\\n* \\\"Function\\\" = \\\"Funktion\\\"\\n* \\\"Array\\\" = \\\"Array\\\"\\n* \\\"Object-oriented programming\\\" = \\\"Objektorientierte Programmierung\\\"\\n* \\\"Algorithm\\\" = \\\"Algorithmus\\\"\\n* \\\"Data structure\\\" = \\\"Datenstruktur\\\"\\n* \\\"Debugging\\\" = \\\"Debuggen\\\"\\n* \\\"Compile\\\" = \\\"Kompilieren\\\"\\n* \\\"Link\\\" = \\\"Verknüpfen\\\"\\n* \\\"Run\\\" = \\\"Ausführen\\\"\\n* \\\"Test\\\" = \\\"Testen\\\"\\n* \\\"Deploy\\\" = \\\"Bereitstellen\\\"\\n* \\\"Version control\\\" = \\\"Versionskontrolle\\\"\\n* \\\"Open source\\\" = \\\"Open Source\\\"\\n* \\\"Software development\\\" = \\\"Softwareentwicklung\\\"\\n* \\\"Agile methodology\\\" = \\\"Agile Methodik\\\"\\n* \\\"DevOps\\\" = \\\"DevOps\\\"\\n* \\\"Cloud computing\\\" = \\\"Cloud Computing\\\"\\n\\nI hope this helps! Let me know if you have any other questions or if you need further translations.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 327,\n", + " \"promptTokens\": 25,\n", + " \"totalTokens\": 352\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": []\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Groq chat models support calling multiple functions to get all required data to answer a question.\n", + "Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "aa42d55a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'get_current_weather',\n", + " args: { location: 'San Francisco', unit: 'fahrenheit' },\n", + " type: 'tool_call',\n", + " id: 'call_1mpy'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatGroq } from \"@langchain/groq\";\n", + "import { z } from \"zod\";\n", + "\n", + "// Mocked out function, could be a database/API call in production\n", + "const getCurrentWeatherTool = tool((input) => {\n", + " if (input.location.toLowerCase().includes(\"tokyo\")) {\n", + " return JSON.stringify({ location: input.location, temperature: \"10\", unit: \"celsius\" });\n", + " } else if (input.location.toLowerCase().includes(\"san francisco\")) {\n", + " return JSON.stringify({\n", + " location: input.location,\n", + " temperature: \"72\",\n", + " unit: \"fahrenheit\",\n", + " });\n", + " } else {\n", + " return JSON.stringify({ location: input.location, temperature: \"22\", unit: \"celsius\" });\n", + " }\n", + "}, {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather in a given location\",\n", + " schema: z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + " unit: z.enum([\"celsius\", \"fahrenheit\"]).optional(),\n", + " }),\n", + "})\n", + "\n", + "// Bind function to the model as a tool\n", + "const llmWithTools = new ChatGroq({\n", + " model: \"mixtral-8x7b-32768\",\n", + " maxTokens: 128,\n", + "}).bindTools([getCurrentWeatherTool], {\n", + " tool_choice: \"auto\",\n", + "});\n", + "\n", + "const resWithTools = await llmWithTools.invoke([\n", + " [\"human\", \"What's the weather like in San Francisco?\"],\n", + "]);\n", + "\n", + "console.dir(resWithTools.tool_calls, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "ae6d3948", + "metadata": {}, + "source": [ + "### `.withStructuredOutput({ ... })`\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info\n", + "The `.withStructuredOutput` method is in beta. It is actively being worked on, so the API may change.\n", + ":::\n", + "\n", + "```\n", + "\n", + "You can also use the `.withStructuredOutput({ ... })` method to coerce `ChatGroq` into returning a structured output.\n", + "\n", + "The method allows for passing in either a Zod object, or a valid JSON schema (like what is returned from [`zodToJsonSchema`](https://www.npmjs.com/package/zod-to-json-schema)).\n", + "\n", + "Using the method is simple. Just define your LLM and call `.withStructuredOutput({ ... })` on it, passing the desired schema.\n", + "\n", + "Here is an example using a Zod schema and the `functionCalling` mode (default mode):" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1ad6c77d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ operation: 'add', number1: 2, number2: 2 }\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatGroq } from \"@langchain/groq\";\n", + "import { z } from \"zod\";\n", + "\n", + "const calculatorSchema = z.object({\n", + " operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", + " number1: z.number(),\n", + " number2: z.number(),\n", + "});\n", + "\n", + "const llmForWSO = new ChatGroq({\n", + " temperature: 0,\n", + " model: \"mixtral-8x7b-32768\",\n", + "});\n", + "const modelWithStructuredOutput = llmForWSO.withStructuredOutput(calculatorSchema);\n", + "\n", + "const promptWSO = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are VERY bad at math and must always use a calculator.\"],\n", + " [\"human\", \"Please help me!! What is 2 + 2?\"],\n", + "]);\n", + "const chainWSO = promptWSO.pipe(modelWithStructuredOutput);\n", + "const resultWSO = await chainWSO.invoke({});\n", + "console.log(resultWSO);" + ] + }, + { + "cell_type": "markdown", + "id": "24757550", + "metadata": {}, + "source": [ + "You can also specify 'includeRaw' to return the parsed and raw output in the result." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "1d13ed6f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " raw: AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: '',\n", + " additional_kwargs: {\n", + " tool_calls: [\n", + " {\n", + " id: 'call_7z1y',\n", + " type: 'function',\n", + " function: {\n", + " name: 'calculator',\n", + " arguments: '{\"number1\":2,\"number2\":2,\"operation\":\"add\"}'\n", + " }\n", + " }\n", + " ]\n", + " },\n", + " tool_calls: [\n", + " {\n", + " name: 'calculator',\n", + " args: { number1: 2, number2: 2, operation: 'add' },\n", + " type: 'tool_call',\n", + " id: 'call_7z1y'\n", + " }\n", + " ],\n", + " invalid_tool_calls: [],\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: '',\n", + " name: undefined,\n", + " additional_kwargs: {\n", + " tool_calls: [\n", + " {\n", + " id: 'call_7z1y',\n", + " type: 'function',\n", + " function: {\n", + " name: 'calculator',\n", + " arguments: '{\"number1\":2,\"number2\":2,\"operation\":\"add\"}'\n", + " }\n", + " }\n", + " ]\n", + " },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: 111, promptTokens: 1257, totalTokens: 1368 },\n", + " finish_reason: 'tool_calls'\n", + " },\n", + " id: undefined,\n", + " tool_calls: [\n", + " {\n", + " name: 'calculator',\n", + " args: { number1: 2, number2: 2, operation: 'add' },\n", + " type: 'tool_call',\n", + " id: 'call_7z1y'\n", + " }\n", + " ],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: undefined\n", + " },\n", + " parsed: { operation: 'add', number1: 2, number2: 2 }\n", + "}\n" + ] + } + ], + "source": [ + "const includeRawModel = llmForWSO.withStructuredOutput(calculatorSchema, {\n", + " name: \"calculator\",\n", + " includeRaw: true,\n", + "});\n", + "\n", + "const includeRawChain = promptWSO.pipe(includeRawModel);\n", + "const includeRawResult = await includeRawChain.invoke(\"\");\n", + "console.dir(includeRawResult, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "7944c7c3", + "metadata": {}, + "source": [ + "## Streaming\n", + "\n", + "Groq's API also supports streaming token responses. The example below demonstrates how to use this feature." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "4ae5fb48", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "stream: \n", + "stream: Hello\n", + "stream: Hello!\n", + "stream: Hello! I\n", + "stream: Hello! I'\n", + "stream: Hello! I'm\n", + "stream: Hello! I'm here\n", + "stream: Hello! I'm here to\n", + "stream: Hello! I'm here to help\n", + "stream: Hello! I'm here to help you\n", + "stream: Hello! I'm here to help you.\n", + "stream: Hello! I'm here to help you. Is\n", + "stream: Hello! I'm here to help you. Is there\n", + "stream: Hello! I'm here to help you. Is there something\n", + "stream: Hello! I'm here to help you. Is there something you\n", + "stream: Hello! I'm here to help you. Is there something you would\n", + "stream: Hello! I'm here to help you. Is there something you would like\n", + "stream: Hello! I'm here to help you. Is there something you would like to\n", + "stream: Hello! I'm here to help you. Is there something you would like to know\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with?\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me anything\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me anything.\n", + "stream: Hello! I'm here to help you. Is there something you would like to know or a task you need assistance with? Please feel free to ask me anything.\n" + ] + } + ], + "source": [ + "import { ChatGroq } from \"@langchain/groq\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "\n", + "const llmForStreaming = new ChatGroq({\n", + " apiKey: process.env.GROQ_API_KEY,\n", + "});\n", + "const promptForStreaming = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "const outputParserForStreaming = new StringOutputParser();\n", + "const chainForStreaming = promptForStreaming.pipe(llmForStreaming).pipe(outputParserForStreaming);\n", + "const streamRes = await chainForStreaming.stream({\n", + " input: \"Hello\",\n", + "});\n", + "let streamedRes = \"\";\n", + "for await (const item of streamRes) {\n", + " streamedRes += item;\n", + " console.log(\"stream:\", streamedRes);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatGroq features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_groq.ChatGroq.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/groq.mdx b/docs/core_docs/docs/integrations/chat/groq.mdx deleted file mode 100644 index 794a06b75a4e..000000000000 --- a/docs/core_docs/docs/integrations/chat/groq.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -sidebar_label: Groq ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatGroq - -## Setup - -In order to use the Groq API you'll need an API key. You can sign up for a Groq account and create an API key [here](https://wow.groq.com/). - -You'll first need to install the [`@langchain/groq`](https://www.npmjs.com/package/@langchain/groq) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/groq -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -## Usage - -import ChatGroqExample from "@examples/models/chat/chat_groq.ts"; - -{ChatGroqExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/2ba59207-1383-4e42-b6a6-c1ddcfcd5710/r) -::: - -## Tool calling - -Groq chat models support calling multiple functions to get all required data to answer a question. -Here's an example: - -import GroqTools from "@examples/models/chat/integration_groq_tool_calls.ts"; - -{GroqTools} - -### `.withStructuredOutput({ ... })` - -:::info -The `.withStructuredOutput` method is in beta. It is actively being worked on, so the API may change. -::: - -You can also use the `.withStructuredOutput({ ... })` method to coerce `ChatGroq` into returning a structured output. - -The method allows for passing in either a Zod object, or a valid JSON schema (like what is returned from [`zodToJsonSchema`](https://www.npmjs.com/package/zod-to-json-schema)). - -Using the method is simple. Just define your LLM and call `.withStructuredOutput({ ... })` on it, passing the desired schema. - -Here is an example using a Zod schema and the `functionCalling` mode (default mode): - -import WSAZodExample from "@examples/models/chat/integration_groq_wsa_zod.ts"; - -{WSAZodExample} - -## Streaming - -Groq's API also supports streaming token responses. The example below demonstrates how to use this feature. - -import ChatStreamGroqExample from "@examples/models/chat/chat_stream_groq.ts"; - -{ChatStreamGroqExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/72832eb5-b9ae-4ce0-baa2-c2e95eca61a7/r) -::: diff --git a/docs/core_docs/docs/integrations/chat/mistral.ipynb b/docs/core_docs/docs/integrations/chat/mistral.ipynb index f308e0c1d768..f3f61fec8bff 100644 --- a/docs/core_docs/docs/integrations/chat/mistral.ipynb +++ b/docs/core_docs/docs/integrations/chat/mistral.ipynb @@ -3,10 +3,14 @@ { "cell_type": "raw", "id": "afaf8039", - "metadata": {}, + "metadata": { + "vscode": { + "languageId": "raw" + } + }, "source": [ "---\n", - "sidebar_label: ChatMistralAI\n", + "sidebar_label: MistralAI\n", "---" ] }, @@ -22,9 +26,9 @@ "## Overview\n", "### Integration details\n", "\n", - "| Class | Package | Local | Serializable | [PY support](https:/python.langchain.com/v0.2/docs/integrations/chat/mistralai) | Package downloads | Package latest |\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/mistralai) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [ChatMistralAI](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html) | [@langchain/mistralai](https://api.js.langchain.com/modules/langchain_mistralai.html) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20) |\n", + "| [ChatMistralAI](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html) | [@langchain/mistralai](https://api.js.langchain.com/modules/langchain_mistralai.html) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", "\n", "### Model features\n", "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", @@ -250,7 +254,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 1, "id": "98d9034c", "metadata": {}, "outputs": [ @@ -263,7 +267,7 @@ " name: 'calculator',\n", " args: { operation: 'add', number1: 2, number2: 2 },\n", " type: 'tool_call',\n", - " id: '2HFfjvCvo'\n", + " id: 'Tn8X3UCSP'\n", " }\n", "]\n" ] @@ -275,7 +279,7 @@ "import { z } from \"zod\";\n", "import { tool } from \"@langchain/core/tools\";\n", "\n", - "const calculatorSchema2 = z.object({\n", + "const calculatorSchema = z.object({\n", " operation: z\n", " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", " .describe(\"The type of operation to execute.\"),\n", @@ -283,24 +287,23 @@ " number2: z.number().describe(\"The second number to operate on.\"),\n", "});\n", "\n", - "const calculatorTool2 = tool((input) => {\n", + "const calculatorTool = tool((input) => {\n", " return JSON.stringify(input);\n", "}, {\n", " name: \"calculator\",\n", " description: \"A simple calculator tool\",\n", - " schema: calculatorSchema2,\n", + " schema: calculatorSchema,\n", "});\n", "\n", - "const llm2 = new ChatMistralAI({\n", + "// Bind the tool to the model\n", + "const modelWithTool = new ChatMistralAI({\n", " model: \"mistral-large-latest\",\n", + "}).bind({\n", + " tools: [calculatorTool],\n", "});\n", "\n", - "// Bind the tool to the model\n", - "const modelWithTool2 = llm2.bind({\n", - " tools: [calculatorTool2],\n", - "});\n", "\n", - "const prompt2 = ChatPromptTemplate.fromMessages([\n", + "const calcToolPrompt = ChatPromptTemplate.fromMessages([\n", " [\n", " \"system\",\n", " \"You are a helpful assistant who always needs to use a calculator.\",\n", @@ -309,12 +312,12 @@ "]);\n", "\n", "// Chain your prompt, model, and output parser together\n", - "const chain2 = prompt2.pipe(modelWithTool2);\n", + "const chainWithCalcTool = calcToolPrompt.pipe(modelWithTool);\n", "\n", - "const response2 = await chain2.invoke({\n", + "const calcToolRes = await chainWithCalcTool.invoke({\n", " input: \"What is 2 + 2?\",\n", "});\n", - "console.log(response2.tool_calls);" + "console.log(calcToolRes.tool_calls);" ] }, { @@ -337,7 +340,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "id": "a8638d82", "metadata": {}, "outputs": [ @@ -354,7 +357,7 @@ "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", "import { z } from \"zod\";\n", "\n", - "const calculatorSchema3 = z\n", + "const calculatorSchemaForWSO = z\n", " .object({\n", " operation: z\n", " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", @@ -364,14 +367,16 @@ " })\n", " .describe(\"A simple calculator tool\");\n", "\n", - "const llm3 = new ChatMistralAI({\n", + "const llmForWSO = new ChatMistralAI({\n", " model: \"mistral-large-latest\",\n", - "});\n", + "})\n", "\n", "// Pass the schema and tool name to the withStructuredOutput method\n", - "const modelWithTool3 = llm3.withStructuredOutput(calculatorSchema3);\n", + "const modelWithStructuredOutput = llmForWSO.withStructuredOutput(calculatorSchemaForWSO, {\n", + " name: \"calculator\",\n", + "});\n", "\n", - "const prompt3 = ChatPromptTemplate.fromMessages([\n", + "const promptForWSO = ChatPromptTemplate.fromMessages([\n", " [\n", " \"system\",\n", " \"You are a helpful assistant who always needs to use a calculator.\",\n", @@ -380,12 +385,12 @@ "]);\n", "\n", "// Chain your prompt and model together\n", - "const chain3 = prompt3.pipe(modelWithTool3);\n", + "const chainWSO = promptForWSO.pipe(modelWithStructuredOutput);\n", "\n", - "const response3 = await chain3.invoke({\n", + "const responseWSO = await chainWSO.invoke({\n", " input: \"What is 2 + 2?\",\n", "});\n", - "console.log(response3);" + "console.log(responseWSO);" ] }, { @@ -398,7 +403,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "id": "9786b41a", "metadata": {}, "outputs": [ @@ -416,14 +421,14 @@ " name: 'calculator',\n", " args: { operation: 'add', number1: 2, number2: 2 },\n", " type: 'tool_call',\n", - " id: 'qVxKofNLR'\n", + " id: 'w48T6Nc3d'\n", " }\n", " ],\n", " invalid_tool_calls: [],\n", " additional_kwargs: {\n", " tool_calls: [\n", " {\n", - " id: 'qVxKofNLR',\n", + " id: 'w48T6Nc3d',\n", " function: {\n", " name: 'calculator',\n", " arguments: '{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}'\n", @@ -441,7 +446,7 @@ " additional_kwargs: {\n", " tool_calls: [\n", " {\n", - " id: 'qVxKofNLR',\n", + " id: 'w48T6Nc3d',\n", " function: {\n", " name: 'calculator',\n", " arguments: '{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}'\n", @@ -460,7 +465,7 @@ " name: 'calculator',\n", " args: { operation: 'add', number1: 2, number2: 2 },\n", " type: 'tool_call',\n", - " id: 'qVxKofNLR'\n", + " id: 'w48T6Nc3d'\n", " }\n", " ],\n", " invalid_tool_calls: [],\n", @@ -472,16 +477,16 @@ } ], "source": [ - "const includeRawModel3 = llm3.withStructuredOutput(calculatorSchema3, {\n", + "const includeRawModel = llmForWSO.withStructuredOutput(calculatorSchemaForWSO, {\n", " name: \"calculator\",\n", " includeRaw: true,\n", "});\n", - "const includeRawChain3 = prompt3.pipe(includeRawModel3);\n", + "const includeRawChain = promptForWSO.pipe(includeRawModel);\n", "\n", - "const includeRawResponse3 = await includeRawChain3.invoke({\n", + "const includeRawResponse = await includeRawChain.invoke({\n", " input: \"What is 2 + 2?\",\n", "});\n", - "console.dir(includeRawResponse3, { depth: null });" + "console.dir(includeRawResponse, { depth: null });" ] }, { @@ -494,7 +499,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "id": "9f1dc9bd", "metadata": {}, "outputs": [ @@ -510,7 +515,7 @@ "import { ChatMistralAI } from \"@langchain/mistralai\";\n", "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", "\n", - "const calculatorJsonSchema4 = {\n", + "const calculatorJsonSchema = {\n", " type: \"object\",\n", " properties: {\n", " operation: {\n", @@ -528,14 +533,14 @@ " description: \"A simple calculator tool\",\n", "};\n", "\n", - "const llm4 = new ChatMistralAI({\n", + "const llmForJsonSchema = new ChatMistralAI({\n", " model: \"mistral-large-latest\",\n", "});\n", "\n", "// Pass the schema and tool name to the withStructuredOutput method\n", - "const modelWithTool4 = llm4.withStructuredOutput(calculatorJsonSchema4);\n", + "const modelWithJsonSchemaTool = llmForJsonSchema.withStructuredOutput(calculatorJsonSchema);\n", "\n", - "const prompt4 = ChatPromptTemplate.fromMessages([\n", + "const promptForJsonSchema = ChatPromptTemplate.fromMessages([\n", " [\n", " \"system\",\n", " \"You are a helpful assistant who always needs to use a calculator.\",\n", @@ -544,12 +549,12 @@ "]);\n", "\n", "// Chain your prompt and model together\n", - "const chain4 = prompt4.pipe(modelWithTool4);\n", + "const chainWithJsonSchema = promptForJsonSchema.pipe(modelWithJsonSchemaTool);\n", "\n", - "const response4 = await chain4.invoke({\n", + "const responseFromJsonSchema = await chainWithJsonSchema.invoke({\n", " input: \"What is 2 + 2?\",\n", "});\n", - "console.log(response4);\n" + "console.log(responseFromJsonSchema);\n" ] }, { @@ -565,7 +570,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "id": "76bd0061", "metadata": {}, "outputs": [ @@ -573,7 +578,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "The weather in Paris is 28 °C.\n" + "It's 28 °C in Paris.\n" ] } ], @@ -585,13 +590,13 @@ "\n", "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", "\n", - "const llm5 = new ChatMistralAI({\n", + "const llmForAgent = new ChatMistralAI({\n", " temperature: 0,\n", " model: \"mistral-large-latest\",\n", "});\n", "\n", "// Prompt template must have \"input\" and \"agent_scratchpad input variables\"\n", - "const prompt5 = ChatPromptTemplate.fromMessages([\n", + "const agentPrompt = ChatPromptTemplate.fromMessages([\n", " [\"system\", \"You are a helpful assistant\"],\n", " [\"placeholder\", \"{chat_history}\"],\n", " [\"human\", \"{input}\"],\n", @@ -599,7 +604,7 @@ "]);\n", "\n", "// Mocked tool\n", - "const currentWeatherTool5 = tool(async () => \"28 °C\", {\n", + "const currentWeatherToolForAgent = tool(async () => \"28 °C\", {\n", " name: \"get_current_weather\",\n", " description: \"Get the current weather in a given location\",\n", " schema: z.object({\n", @@ -608,20 +613,20 @@ "});\n", "\n", "const agent = createToolCallingAgent({\n", - " llm: llm5,\n", - " tools: [currentWeatherTool5],\n", - " prompt: prompt5,\n", + " llm: llmForAgent,\n", + " tools: [currentWeatherToolForAgent],\n", + " prompt: agentPrompt,\n", "});\n", "\n", "const agentExecutor = new AgentExecutor({\n", " agent,\n", - " tools: [currentWeatherTool5],\n", + " tools: [currentWeatherToolForAgent],\n", "});\n", "\n", - "const input = \"What's the weather like in Paris?\";\n", - "const { output } = await agentExecutor.invoke({ input });\n", + "const agentInput = \"What's the weather like in Paris?\";\n", + "const agentRes = await agentExecutor.invoke({ input: agentInput });\n", "\n", - "console.log(output);\n" + "console.log(agentRes.output);\n" ] }, { diff --git a/docs/core_docs/docs/integrations/chat/ollama.ipynb b/docs/core_docs/docs/integrations/chat/ollama.ipynb new file mode 100644 index 000000000000..1ed84b3130f8 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/ollama.ipynb @@ -0,0 +1,559 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Ollama\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatOllama\n", + "\n", + "This will help you getting started with `ChatOllama` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatOllama` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally.\n", + "\n", + "Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage.\n", + "\n", + "This example goes over how to use LangChain to interact with an Ollama-run Llama 2 7b instance as a chat model.\n", + "For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library).\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/v0.2/docs/integrations/chat/ollama) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatOllama](https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html) | [@langchain/ollama](https://api.js.langchain.com/modules/langchain_ollama.html) | ✅ | beta | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/ollama?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/ollama?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance. Then, download the `@langchain/ollama` package.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatOllama integration lives in the `@langchain/ollama` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/ollama\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\"\n", + "\n", + "const llm = new ChatOllama({\n", + " model: \"llama3\",\n", + " temperature: 0,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Je adore le programmation.\\n\\n(Note: \\\"programmation\\\" is the feminine form of the noun in French, but if you want to use the masculine form, it would be \\\"le programme\\\" instead.)\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3\",\n", + " \"created_at\": \"2024-08-01T16:59:17.359302Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 6399311167,\n", + " \"load_duration\": 5575776417,\n", + " \"prompt_eval_count\": 35,\n", + " \"prompt_eval_duration\": 110053000,\n", + " \"eval_count\": 43,\n", + " \"eval_duration\": 711744000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 35,\n", + " \"output_tokens\": 43,\n", + " \"total_tokens\": 78\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Je adore le programmation.\n", + "\n", + "(Note: \"programmation\" is the feminine form of the noun in French, but if you want to use the masculine form, it would be \"le programme\" instead.)\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe Programmieren!\\n\\n(Note: \\\"Ich liebe\\\" means \\\"I love\\\", \\\"Programmieren\\\" is the verb for \\\"programming\\\")\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3\",\n", + " \"created_at\": \"2024-08-01T16:59:18.088423Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 585146125,\n", + " \"load_duration\": 27557166,\n", + " \"prompt_eval_count\": 30,\n", + " \"prompt_eval_duration\": 74241000,\n", + " \"eval_count\": 29,\n", + " \"eval_duration\": 481195000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 30,\n", + " \"output_tokens\": 29,\n", + " \"total_tokens\": 59\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tools\n", + "\n", + "Ollama now offers support for native tool calling. The example below demonstrates how you can invoke a tool from an Ollama model." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "d2502c0d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3-groq-tool-use\",\n", + " \"created_at\": \"2024-08-01T18:43:13.2181Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 2311023875,\n", + " \"load_duration\": 1560670292,\n", + " \"prompt_eval_count\": 177,\n", + " \"prompt_eval_duration\": 263603000,\n", + " \"eval_count\": 30,\n", + " \"eval_duration\": 485582000\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"get_current_weather\",\n", + " \"args\": {\n", + " \"location\": \"San Francisco, CA\"\n", + " },\n", + " \"id\": \"c7a9d590-99ad-42af-9996-41b90efcf827\",\n", + " \"type\": \"tool_call\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 177,\n", + " \"output_tokens\": 30,\n", + " \"total_tokens\": 207\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { tool } from \"@langchain/core/tools\";\n", + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { z } from \"zod\";\n", + "\n", + "const weatherTool = tool((_) => \"Da weather is weatherin\", {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather in a given location\",\n", + " schema: z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + " }),\n", + "});\n", + "\n", + "// Define the model\n", + "const llmForTool = new ChatOllama({\n", + " model: \"llama3-groq-tool-use\",\n", + "});\n", + "\n", + "// Bind the tool to the model\n", + "const llmWithTools = llmForTool.bindTools([weatherTool]);\n", + "\n", + "const resultFromTool = await llmWithTools.invoke(\n", + " \"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool.\"\n", + ");\n", + "\n", + "console.log(resultFromTool);" + ] + }, + { + "cell_type": "markdown", + "id": "47faa093", + "metadata": {}, + "source": [ + "### `.withStructuredOutput`\n", + "\n", + "Since `ChatOllama` supports the `.bindTools()` method, you can also call `.withStructuredOutput()` to get a structured output from the tool." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "759924f6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ location: 'San Francisco, CA' }\n" + ] + } + ], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { z } from \"zod\";\n", + "\n", + "// Define the model\n", + "const llmForWSO = new ChatOllama({\n", + " model: \"llama3-groq-tool-use\",\n", + "});\n", + "\n", + "// Define the tool schema you'd like the model to use.\n", + "const schemaForWSO = z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + "});\n", + "\n", + "// Pass the schema to the withStructuredOutput method to bind it to the model.\n", + "const llmWithStructuredOutput = llmForWSO.withStructuredOutput(schemaForWSO, {\n", + " name: \"get_current_weather\",\n", + "});\n", + "\n", + "const resultFromWSO = await llmWithStructuredOutput.invoke(\n", + " \"What's the weather like today in San Francisco? Ensure you use the 'get_current_weather' tool.\"\n", + ");\n", + "console.log(resultFromWSO);" + ] + }, + { + "cell_type": "markdown", + "id": "cb1377af", + "metadata": {}, + "source": [ + "### JSON mode\n", + "\n", + "Ollama also supports a JSON mode that coerces model outputs to only return JSON. Here's an example of how this can be useful for extraction:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "de94282b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"{\\n\\\"original\\\": \\\"I love programming\\\",\\n\\\"translated\\\": \\\"Ich liebe Programmierung\\\"\\n}\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llama3\",\n", + " \"created_at\": \"2024-08-01T17:24:54.35568Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 1754811583,\n", + " \"load_duration\": 1297200208,\n", + " \"prompt_eval_count\": 47,\n", + " \"prompt_eval_duration\": 128532000,\n", + " \"eval_count\": 20,\n", + " \"eval_duration\": 318519000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 47,\n", + " \"output_tokens\": 20,\n", + " \"total_tokens\": 67\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const promptForJsonMode = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " `You are an expert translator. Format all responses as JSON objects with two keys: \"original\" and \"translated\".`,\n", + " ],\n", + " [\"human\", `Translate \"{input}\" into {language}.`],\n", + "]);\n", + "\n", + "const llmJsonMode = new ChatOllama({\n", + " baseUrl: \"http://localhost:11434\", // Default value\n", + " model: \"llama3\",\n", + " format: \"json\",\n", + "});\n", + "\n", + "const chainForJsonMode = promptForJsonMode.pipe(llmJsonMode);\n", + "\n", + "const resultFromJsonMode = await chainForJsonMode.invoke({\n", + " input: \"I love programming\",\n", + " language: \"German\",\n", + "});\n", + "\n", + "console.log(resultFromJsonMode);" + ] + }, + { + "cell_type": "markdown", + "id": "9881d422", + "metadata": {}, + "source": [ + "## Multimodal models\n", + "\n", + "Ollama supports open source multimodal models like [LLaVA](https://ollama.ai/library/llava) in versions 0.1.15 and up.\n", + "You can pass images as part of a message's `content` field to multimodal-capable models like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "958171d7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \" The image shows a hot dog in a bun, which appears to be a footlong. It has been cooked or grilled to the point where it's browned and possibly has some blackened edges, indicating it might be slightly overcooked. Accompanying the hot dog is a bun that looks toasted as well. There are visible char marks on both the hot dog and the bun, suggesting they have been cooked directly over a source of heat, such as a grill or broiler. The background is white, which puts the focus entirely on the hot dog and its bun. \",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"model\": \"llava\",\n", + " \"created_at\": \"2024-08-01T17:25:02.169957Z\",\n", + " \"done_reason\": \"stop\",\n", + " \"done\": true,\n", + " \"total_duration\": 5700249458,\n", + " \"load_duration\": 2543040666,\n", + " \"prompt_eval_count\": 1,\n", + " \"prompt_eval_duration\": 1032591000,\n", + " \"eval_count\": 127,\n", + " \"eval_duration\": 2114201000\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 1,\n", + " \"output_tokens\": 127,\n", + " \"total_tokens\": 128\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOllama } from \"@langchain/ollama\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "import * as fs from \"node:fs/promises\";\n", + "\n", + "const imageData = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n", + "const llmForMultiModal = new ChatOllama({\n", + " model: \"llava\",\n", + " baseUrl: \"http://127.0.0.1:11434\",\n", + "});\n", + "const multiModalRes = await llmForMultiModal.invoke([\n", + " new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"What is in this image?\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: `data:image/jpeg;base64,${imageData.toString(\"base64\")}`,\n", + " },\n", + " ],\n", + " }),\n", + "]);\n", + "console.log(multiModalRes);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatOllama features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_ollama.ChatOllama.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/ollama.mdx b/docs/core_docs/docs/integrations/chat/ollama.mdx deleted file mode 100644 index d7c50b190630..000000000000 --- a/docs/core_docs/docs/integrations/chat/ollama.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -sidebar_label: Ollama ---- - -# ChatOllama - -[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally. - -Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. It optimizes setup and configuration details, including GPU usage. - -This example goes over how to use LangChain to interact with an Ollama-run Llama 2 7b instance as a chat model. -For a complete list of supported models and model variants, see the [Ollama model library](https://github.com/jmorganca/ollama#model-library). - -## Setup - -Follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance. Then, download the `@langchain/ollama` package. - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/ollama -``` - -## Usage - -import CodeBlock from "@theme/CodeBlock"; -import OllamaExample from "@examples/models/chat/integration_ollama.ts"; - -{OllamaExample} - -:::tip -See a LangSmith trace of the above example [here](https://smith.langchain.com/public/c4f3cb1a-4496-40bd-854c-898118b21809/r) -::: - -## Tools - -Ollama now offers support for native tool calling. The example below demonstrates how you can invoke a tool from an Ollama model. - -import OllamaToolsExample from "@examples/models/chat/integration_ollama_tools.ts"; - -{OllamaToolsExample} - -:::tip -You can see the LangSmith trace of the above example [here](https://smith.langchain.com/public/940f4279-6825-4d19-9653-4c50d3c70625/r) -::: - -Since `ChatOllama` supports the `.bindTools()` method, you can also call `.withStructuredOutput()` to get a structured output from the tool. - -import OllamaWSOExample from "@examples/models/chat/integration_ollama_wso.ts"; - -{OllamaWSOExample} - -:::tip -You can see the LangSmith trace of the above example [here](https://smith.langchain.com/public/ed113c53-1299-4814-817e-1157c9eac47e/r) -::: - -## JSON mode - -Ollama also supports a JSON mode that coerces model outputs to only return JSON. Here's an example of how this can be useful for extraction: - -import OllamaJSONModeExample from "@examples/models/chat/integration_ollama_json_mode.ts"; - -{OllamaJSONModeExample} - -:::tip -You can see a simple LangSmith trace of this [here](https://smith.langchain.com/public/54c0430c-0d59-4121-b1ca-34ac1e95e0bb/r) -::: - -## Multimodal models - -Ollama supports open source multimodal models like [LLaVA](https://ollama.ai/library/llava) in versions 0.1.15 and up. -You can pass images as part of a message's `content` field to multimodal-capable models like this: - -import OllamaMultimodalExample from "@examples/models/chat/integration_ollama_multimodal.ts"; - -{OllamaMultimodalExample} - -This will currently not use the image's position within the prompt message as additional information, and will just pass -the image along as context with the rest of the prompt messages. diff --git a/docs/core_docs/docs/integrations/chat/openai.ipynb b/docs/core_docs/docs/integrations/chat/openai.ipynb new file mode 100644 index 000000000000..d63d918e2da9 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/openai.ipynb @@ -0,0 +1,1440 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: OpenAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatOpenAI\n", + "\n", + "This will help you getting started with ChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/openai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access ChatOpenAI models you'll need to create a ChatOpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [OpenAI's website](https://platform.openai.com/) to sign up to ChatOpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatOpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\" \n", + "\n", + "const llm = new ChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rB4GvhlRb0x3hxupLBQYOKKmTxvV\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rB4JD9rVBLzTuMee9AabulowEH0d\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Multimodal messages\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info\n", + "This feature is currently in preview. The message schema may change in future releases.\n", + ":::\n", + "\n", + "```\n", + "\n", + "OpenAI supports interleaving images with text in input messages with their `gpt-4-vision-preview`. Here's an example of how this looks:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "fd55c000", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rB59AKTPDrSHuTv0y7BNUcM0QDV2\",\n", + " \"content\": \"The image shows a classic hot dog, consisting of a grilled or steamed sausage served in the slit of a partially sliced bun. The sausage appears to have grill marks, indicating it may have been cooked on a grill. This is a typical and popular snack or fast food item often enjoyed at sporting events, barbecues, and fairs.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 69,\n", + " \"promptTokens\": 438,\n", + " \"totalTokens\": 507\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 438,\n", + " \"output_tokens\": 69,\n", + " \"total_tokens\": 507\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import * as fs from \"node:fs/promises\";\n", + "\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "const imageData2 = await fs.readFile(\"../../../../../examples/hotdog.jpg\");\n", + "const llm2 = new ChatOpenAI({\n", + " model: \"gpt-4-vision-preview\",\n", + " maxTokens: 1024,\n", + " apiKey: process.env.OPENAI_API_KEY,\n", + "});\n", + "const message2 = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"What's in this image?\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: `data:image/jpeg;base64,${imageData2.toString(\"base64\")}`,\n", + " },\n", + " },\n", + " ],\n", + "});\n", + "\n", + "const res2 = await llm2.invoke([message2]);\n", + "console.log(res2);\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "eafbba15", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rB5EWz5AyOHg6UiFkt4HC8H4UZJu\",\n", + " \"content\": \"The image contains text that reads \\\"LangChain\\\". Additionally, there is an illustration of a parrot on the left side and two interlinked rings on the right.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 33,\n", + " \"promptTokens\": 778,\n", + " \"totalTokens\": 811\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 778,\n", + " \"output_tokens\": 33,\n", + " \"total_tokens\": 811\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const hostedImageMessage3 = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"What does this image say?\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url:\n", + " \"https://www.freecodecamp.org/news/content/images/2023/05/Screenshot-2023-05-29-at-5.40.38-PM.png\",\n", + " },\n", + " ],\n", + "});\n", + "const res3 = await llm2.invoke([hostedImageMessage3]);\n", + "console.log(res3);" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "a3832fc3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rB5IUbzvMo5nsOGYW3jvrQjaCiCg\",\n", + " \"content\": \"The image shows a user interface of a digital service or platform called \\\"WebLangChain\\\" which appears to be powered by \\\"Tailify.\\\" There is a prompt that encourages users to \\\"Ask me anything about anything!\\\" Alongside this, there is a text input field labeled \\\"Ask anything...\\\" which also features some example questions or search queries such as \\\"what is langchain?\\\", \\\"history of mesopotamia\\\", \\\"how to build a discord bot\\\", \\\"leonardo dicaprio girlfriend\\\", \\\"fun gift ideas for software engineers\\\", \\\"how does a prism separate light\\\", and \\\"what bear is best\\\". The overall design is clean, with a dark background and a send button represented by a blue icon with a paper airplane, which typically symbolizes sending a message or submitting a query.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 158,\n", + " \"promptTokens\": 101,\n", + " \"totalTokens\": 259\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 101,\n", + " \"output_tokens\": 158,\n", + " \"total_tokens\": 259\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const lowDetailImage4 = new HumanMessage({\n", + " content: [\n", + " {\n", + " type: \"text\",\n", + " text: \"Summarize the contents of this image.\",\n", + " },\n", + " {\n", + " type: \"image_url\",\n", + " image_url: {\n", + " url: \"https://blog.langchain.dev/content/images/size/w1248/format/webp/2023/10/Screenshot-2023-10-03-at-4.55.29-PM.png\",\n", + " detail: \"low\",\n", + " },\n", + " },\n", + " ],\n", + "});\n", + "const res4 = await llm2.invoke([lowDetailImage4]);\n", + "console.log(res4);" + ] + }, + { + "cell_type": "markdown", + "id": "1a39ecb3", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "OpenAI chat models support calling multiple functions to get all required data to answer a question.\n", + "Here's an example how a conversation turn with this functionality might look:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "c65f489f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'get_current_weather',\n", + " args: { location: 'San Francisco', unit: 'celsius' },\n", + " type: 'tool_call',\n", + " id: 'call_2ytmjITA18j3kLOzzjF5QSC4'\n", + " },\n", + " {\n", + " name: 'get_current_weather',\n", + " args: { location: 'Tokyo', unit: 'celsius' },\n", + " type: 'tool_call',\n", + " id: 'call_3sU2dCNZ8e8A8wrYlYa7Xq0G'\n", + " },\n", + " {\n", + " name: 'get_current_weather',\n", + " args: { location: 'Paris', unit: 'celsius' },\n", + " type: 'tool_call',\n", + " id: 'call_Crmc0QG4x1VHRUyiwPsqzmQS'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "// Bind function to the model as a tool\n", + "const llm5 = new ChatOpenAI({\n", + " model: \"gpt-3.5-turbo-1106\",\n", + " maxTokens: 128,\n", + "}).bind({\n", + " tools: [\n", + " {\n", + " type: \"function\",\n", + " function: {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather in a given location\",\n", + " parameters: {\n", + " type: \"object\",\n", + " properties: {\n", + " location: {\n", + " type: \"string\",\n", + " description: \"The city and state, e.g. San Francisco, CA\",\n", + " },\n", + " unit: { type: \"string\", enum: [\"celsius\", \"fahrenheit\"] },\n", + " },\n", + " required: [\"location\"],\n", + " },\n", + " },\n", + " },\n", + " ],\n", + " tool_choice: \"auto\",\n", + "});\n", + "\n", + "// Ask initial question that requires multiple tool calls\n", + "const res5 = await llm5.invoke([\n", + " [\"human\", \"What's the weather like in San Francisco, Tokyo, and Paris?\"],\n", + "]);\n", + "console.log(res5.tool_calls);" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "c0d3a6a1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rB5Sc3ERHpRymmAAsGS67zczVhAl\",\n", + " \"content\": \"The current weather in:\\n- San Francisco is 72°F\\n- Tokyo is 10°C\\n- Paris is 22°C\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 27,\n", + " \"promptTokens\": 236,\n", + " \"totalTokens\": 263\n", + " },\n", + " \"finish_reason\": \"stop\",\n", + " \"system_fingerprint\": \"fp_adbef9f124\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 236,\n", + " \"output_tokens\": 27,\n", + " \"total_tokens\": 263\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ToolMessage } from \"@langchain/core/messages\";\n", + "\n", + "// Mocked out function, could be a database/API call in production\n", + "function getCurrentWeather(location: string, _unit?: string) {\n", + " if (location.toLowerCase().includes(\"tokyo\")) {\n", + " return JSON.stringify({ location, temperature: \"10\", unit: \"celsius\" });\n", + " } else if (location.toLowerCase().includes(\"san francisco\")) {\n", + " return JSON.stringify({\n", + " location,\n", + " temperature: \"72\",\n", + " unit: \"fahrenheit\",\n", + " });\n", + " } else {\n", + " return JSON.stringify({ location, temperature: \"22\", unit: \"celsius\" });\n", + " }\n", + "}\n", + "\n", + "// Format the results from calling the tool calls back to OpenAI as ToolMessages\n", + "const toolMessages5 = res5.additional_kwargs.tool_calls?.map((toolCall) => {\n", + " const toolCallResult5 = getCurrentWeather(\n", + " JSON.parse(toolCall.function.arguments).location\n", + " );\n", + " return new ToolMessage({\n", + " tool_call_id: toolCall.id,\n", + " name: toolCall.function.name,\n", + " content: toolCallResult5,\n", + " });\n", + "});\n", + "\n", + "// Send the results back as the next step in the conversation\n", + "const finalResponse5 = await llm5.invoke([\n", + " [\"human\", \"What's the weather like in San Francisco, Tokyo, and Paris?\"],\n", + " res5,\n", + " ...(toolMessages5 ?? []),\n", + "]);\n", + "\n", + "console.log(finalResponse5);" + ] + }, + { + "cell_type": "markdown", + "id": "067715fe", + "metadata": {}, + "source": [ + "### `.withStructuredOutput({ ... })`\n", + "\n", + "You can also use the `.withStructuredOutput({ ... })` method to coerce `ChatOpenAI` into returning a structured output.\n", + "\n", + "The method allows for passing in either a Zod object, or a valid JSON schema (like what is returned from [`zodToJsonSchema`](https://www.npmjs.com/package/zod-to-json-schema)).\n", + "\n", + "Using the method is simple. Just define your LLM and call `.withStructuredOutput({ ... })` on it, passing the desired schema.\n", + "\n", + "Here is an example using a Zod schema and the `functionCalling` mode (default mode):" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "94bab2ee", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ operation: 'add', number1: 2, number2: 2 }\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { z } from \"zod\";\n", + "\n", + "const llm6 = new ChatOpenAI({\n", + " temperature: 0,\n", + " model: \"gpt-4-turbo-preview\",\n", + "});\n", + "\n", + "const calculatorSchema6 = z.object({\n", + " operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", + " number1: z.number(),\n", + " number2: z.number(),\n", + "});\n", + "\n", + "const modelWithStructuredOutput6 = llm6.withStructuredOutput(calculatorSchema6);\n", + "\n", + "const prompt6 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are VERY bad at math and must always use a calculator.\"],\n", + " [\"human\", \"Please help me!! What is 2 + 2?\"],\n", + "]);\n", + "const chain6 = prompt6.pipe(modelWithStructuredOutput6);\n", + "const result6 = await chain6.invoke({});\n", + "console.log(result6);\n" + ] + }, + { + "cell_type": "markdown", + "id": "b6e97547", + "metadata": {}, + "source": [ + "You can also specify `includeRaw` to return the parsed and raw output in the result." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "640acaf4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " raw: AIMessage {\n", + " \"id\": \"chatcmpl-9rB5emIYRslBFrUIsC2368dXltljw\",\n", + " \"content\": \"\",\n", + " \"additional_kwargs\": {\n", + " \"tool_calls\": [\n", + " {\n", + " \"id\": \"call_JaH5OB3KYvKF76TUOt6Lp8mu\",\n", + " \"type\": \"function\",\n", + " \"function\": \"[Object]\"\n", + " }\n", + " ]\n", + " },\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 15,\n", + " \"promptTokens\": 93,\n", + " \"totalTokens\": 108\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [\n", + " {\n", + " \"name\": \"calculator\",\n", + " \"args\": {\n", + " \"number1\": 2,\n", + " \"number2\": 2,\n", + " \"operation\": \"add\"\n", + " },\n", + " \"type\": \"tool_call\",\n", + " \"id\": \"call_JaH5OB3KYvKF76TUOt6Lp8mu\"\n", + " }\n", + " ],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 93,\n", + " \"output_tokens\": 15,\n", + " \"total_tokens\": 108\n", + " }\n", + " },\n", + " parsed: { operation: 'add', number1: 2, number2: 2 }\n", + "}\n" + ] + } + ], + "source": [ + "const includeRawModel6 = llm6.withStructuredOutput(calculatorSchema6, {\n", + " name: \"calculator\",\n", + " includeRaw: true,\n", + "});\n", + "\n", + "const includeRawChain6 = prompt6.pipe(includeRawModel6);\n", + "const includeRawResult6 = await includeRawChain6.invoke({});\n", + "console.log(includeRawResult6);" + ] + }, + { + "cell_type": "markdown", + "id": "f92f236c", + "metadata": {}, + "source": [ + "Additionally, you can pass in an OpenAI function definition or JSON schema directly:\n", + "\n", + "```{=mdx}\n", + "\n", + ":::info\n", + "If using `jsonMode` as the `method` you must include context in your prompt about the structured output you want. This _must_ include the keyword: `JSON`.\n", + ":::\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "02e01d32", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ number1: 2, number2: 2, operation: 'add' }\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm7 = new ChatOpenAI({\n", + " temperature: 0,\n", + " model: \"gpt-4-turbo-preview\",\n", + "});\n", + "\n", + "const calculatorSchema7 = {\n", + " type: \"object\",\n", + " properties: {\n", + " operation: {\n", + " type: \"string\",\n", + " enum: [\"add\", \"subtract\", \"multiply\", \"divide\"],\n", + " },\n", + " number1: { type: \"number\" },\n", + " number2: { type: \"number\" },\n", + " },\n", + " required: [\"operation\", \"number1\", \"number2\"],\n", + "};\n", + "\n", + "// Default mode is \"functionCalling\"\n", + "const modelWithStructuredOutput7 = llm7.withStructuredOutput(calculatorSchema7);\n", + "\n", + "const prompt7 = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " `You are VERY bad at math and must always use a calculator.\n", + "Respond with a JSON object containing three keys:\n", + "'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',\n", + "'number1': the first number to operate on,\n", + "'number2': the second number to operate on.\n", + "`,\n", + " ],\n", + " [\"human\", \"Please help me!! What is 2 + 2?\"],\n", + "]);\n", + "const chain7 = prompt7.pipe(modelWithStructuredOutput7);\n", + "const result7 = await chain7.invoke({});\n", + "console.log(result7);" + ] + }, + { + "cell_type": "markdown", + "id": "ae798f49", + "metadata": {}, + "source": [ + "You can also specify 'includeRaw' to return the parsed and raw output in the result, as well as a \"name\" field to give the LLM additional context as to what you are generating." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "9a5579e4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " raw: AIMessage {\n", + " \"id\": \"chatcmpl-9rB5lkylQMLSP9CQ4SaQB9zGw1rP1\",\n", + " \"content\": \"{\\n \\\"operation\\\": \\\"add\\\",\\n \\\"number1\\\": 2,\\n \\\"number2\\\": 2\\n}\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 25,\n", + " \"promptTokens\": 91,\n", + " \"totalTokens\": 116\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 91,\n", + " \"output_tokens\": 25,\n", + " \"total_tokens\": 116\n", + " }\n", + " },\n", + " parsed: { operation: 'add', number1: 2, number2: 2 }\n", + "}\n" + ] + } + ], + "source": [ + "const includeRawModel7 = llm7.withStructuredOutput(calculatorSchema7, {\n", + " name: \"calculator\",\n", + " includeRaw: true,\n", + " method: \"jsonMode\",\n", + "});\n", + "\n", + "const includeRawChain7 = prompt7.pipe(includeRawModel7);\n", + "const includeRawResult7 = await includeRawChain7.invoke({});\n", + "console.log(includeRawResult7);" + ] + }, + { + "cell_type": "markdown", + "id": "bf343e65", + "metadata": {}, + "source": [ + "### Disabling parallel tool calls\n", + "\n", + "If you have multiple tools bound to the model, but you'd only like for a single tool to be called at a time, you can pass the `parallel_tool_calls` call option to enable/disable this behavior.\n", + "By default, `parallel_tool_calls` is set to `true`." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "5cb759f2", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { z } from \"zod\";\n", + "import { zodToJsonSchema } from \"zod-to-json-schema\";\n", + "\n", + "const llm8 = new ChatOpenAI({\n", + " temperature: 0,\n", + " model: \"gpt-4o\",\n", + "});\n", + "\n", + "// Define your tools\n", + "const calculatorSchema8 = z\n", + " .object({\n", + " operation: z.enum([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", + " number1: z.number(),\n", + " number2: z.number(),\n", + " })\n", + " .describe(\"A tool to perform basic arithmetic operations\");\n", + "const weatherSchema8 = z\n", + " .object({\n", + " city: z.string(),\n", + " })\n", + " .describe(\"A tool to get the weather in a city\");\n", + "\n", + "// Bind tools to the model\n", + "const modelWithTools8 = llm8.bindTools([\n", + " {\n", + " type: \"function\",\n", + " function: {\n", + " name: \"calculator\",\n", + " description: calculatorSchema8.description,\n", + " parameters: zodToJsonSchema(calculatorSchema8),\n", + " },\n", + " },\n", + " {\n", + " type: \"function\",\n", + " function: {\n", + " name: \"weather\",\n", + " description: weatherSchema8.description,\n", + " parameters: zodToJsonSchema(weatherSchema8),\n", + " },\n", + " },\n", + "]);\n", + "\n", + "// Invoke the model with `parallel_tool_calls` set to `true`\n", + "const response8 = await modelWithTools8.invoke(\n", + " [\"What is the weather in san francisco and what is 23716 times 27342?\"],\n", + " {\n", + " parallel_tool_calls: true,\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "42a69645", + "metadata": {}, + "source": [ + "We can see it called two tools:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "95db614b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'weather',\n", + " args: { city: 'san francisco' },\n", + " type: 'tool_call',\n", + " id: 'call_FyxazII0M0OgKMnk2UuXDhjv'\n", + " },\n", + " {\n", + " name: 'calculator',\n", + " args: { operation: 'multiply', number1: 23716, number2: 27342 },\n", + " type: 'tool_call',\n", + " id: 'call_raQz2ABUtVpbkruA2K6vBNYd'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "console.log(response8.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "6a46a7bb", + "metadata": {}, + "source": [ + "Invoke the model with `parallel_tool_calls` set to `false`" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "6a2bac84", + "metadata": {}, + "outputs": [], + "source": [ + "const response9 = await modelWithTools8.invoke(\n", + " [\"What is the weather in san francisco and what is 23716 times 27342?\"],\n", + " {\n", + " parallel_tool_calls: false,\n", + " }\n", + ");" + ] + }, + { + "cell_type": "markdown", + "id": "17d91e9f", + "metadata": {}, + "source": [ + "We can see it called one tool" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "5731d51d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'weather',\n", + " args: { city: 'san francisco' },\n", + " type: 'tool_call',\n", + " id: 'call_xFbViRUVYj8BFnJIVedU7GVn'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "console.log(response9.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "06ffc86b", + "metadata": {}, + "source": [ + "## Custom URLs\n", + "\n", + "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19a092b9", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm10 = new ChatOpenAI({\n", + " temperature: 0.9,\n", + " configuration: {\n", + " baseURL: \"https://your_custom_url.com\",\n", + " },\n", + "});\n", + "\n", + "const message10 = await llm10.invoke(\"Hi there!\");" + ] + }, + { + "cell_type": "markdown", + "id": "013b6300", + "metadata": {}, + "source": [ + "You can also pass other `ClientOptions` parameters accepted by the official SDK.\n", + "\n", + "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/chat/azure).\n", + "\n", + "## Calling fine-tuned models\n", + "\n", + "You can call fine-tuned OpenAI models by passing in your corresponding `modelName` parameter.\n", + "\n", + "This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7448f6a9", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm11 = new ChatOpenAI({\n", + " temperature: 0.9,\n", + " model: \"ft:gpt-3.5-turbo-0613:{ORG_NAME}::{MODEL_ID}\",\n", + "});\n", + "\n", + "const message11 = await llm11.invoke(\"Hi there!\");" + ] + }, + { + "cell_type": "markdown", + "id": "a2270901", + "metadata": {}, + "source": [ + "## Generation metadata\n", + "\n", + "If you need additional information like logprobs or token usage, these will be returned directly in the `.invoke` response.\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip\n", + "Requires `@langchain/core` version >=0.1.48.\n", + ":::\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "2b675330", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " content: [\n", + " {\n", + " token: 'Hello',\n", + " logprob: -0.0004585519,\n", + " bytes: [ 72, 101, 108, 108, 111 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '!',\n", + " logprob: -0.000049305523,\n", + " bytes: [ 33 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' How',\n", + " logprob: -0.000029517714,\n", + " bytes: [ 32, 72, 111, 119 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' can',\n", + " logprob: -0.00073185476,\n", + " bytes: [ 32, 99, 97, 110 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' I',\n", + " logprob: -9.0883464e-7,\n", + " bytes: [ 32, 73 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' assist',\n", + " logprob: -0.104538105,\n", + " bytes: [\n", + " 32, 97, 115,\n", + " 115, 105, 115,\n", + " 116\n", + " ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' you',\n", + " logprob: -6.704273e-7,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' today',\n", + " logprob: -0.000052643223,\n", + " bytes: [ 32, 116, 111, 100, 97, 121 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '?',\n", + " logprob: -0.00001247159,\n", + " bytes: [ 63 ],\n", + " top_logprobs: []\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "// See https://cookbook.openai.com/examples/using_logprobs for details\n", + "const llm12 = new ChatOpenAI({\n", + " logprobs: true,\n", + " // topLogprobs: 5,\n", + "});\n", + "\n", + "const responseMessage12 = await llm12.invoke(\"Hi there!\");\n", + "console.dir(responseMessage12.response_metadata.logprobs, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "899c364f", + "metadata": {}, + "source": [ + "### With callbacks\n", + "\n", + "You can also use the callbacks system:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "01e74121", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " content: [\n", + " {\n", + " token: 'Hello',\n", + " logprob: -0.0005182436,\n", + " bytes: [ 72, 101, 108, 108, 111 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '!',\n", + " logprob: -0.000040246043,\n", + " bytes: [ 33 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' How',\n", + " logprob: -0.000035716304,\n", + " bytes: [ 32, 72, 111, 119 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' can',\n", + " logprob: -0.0006764544,\n", + " bytes: [ 32, 99, 97, 110 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' I',\n", + " logprob: -0.0000010280384,\n", + " bytes: [ 32, 73 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' assist',\n", + " logprob: -0.12827769,\n", + " bytes: [\n", + " 32, 97, 115,\n", + " 115, 105, 115,\n", + " 116\n", + " ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' you',\n", + " logprob: -4.3202e-7,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' today',\n", + " logprob: -0.000059914648,\n", + " bytes: [ 32, 116, 111, 100, 97, 121 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '?',\n", + " logprob: -0.000012352386,\n", + " bytes: [ 63 ],\n", + " top_logprobs: []\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "// See https://cookbook.openai.com/examples/using_logprobs for details\n", + "const llm13 = new ChatOpenAI({\n", + " logprobs: true,\n", + " // topLogprobs: 5,\n", + "});\n", + "\n", + "const result13 = await llm13.invoke(\"Hi there!\", {\n", + " callbacks: [\n", + " {\n", + " handleLLMEnd(output) {\n", + " console.dir(output.generations[0][0].generationInfo.logprobs, { depth: null });\n", + " },\n", + " },\n", + " ],\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "7f9f01aa", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " content: [\n", + " {\n", + " token: 'Hello',\n", + " logprob: -0.0005182436,\n", + " bytes: [ 72, 101, 108, 108, 111 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '!',\n", + " logprob: -0.000040246043,\n", + " bytes: [ 33 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' How',\n", + " logprob: -0.000035716304,\n", + " bytes: [ 32, 72, 111, 119 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' can',\n", + " logprob: -0.0006764544,\n", + " bytes: [ 32, 99, 97, 110 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' I',\n", + " logprob: -0.0000010280384,\n", + " bytes: [ 32, 73 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' assist',\n", + " logprob: -0.12827769,\n", + " bytes: [\n", + " 32, 97, 115,\n", + " 115, 105, 115,\n", + " 116\n", + " ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' you',\n", + " logprob: -4.3202e-7,\n", + " bytes: [ 32, 121, 111, 117 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: ' today',\n", + " logprob: -0.000059914648,\n", + " bytes: [ 32, 116, 111, 100, 97, 121 ],\n", + " top_logprobs: []\n", + " },\n", + " {\n", + " token: '?',\n", + " logprob: -0.000012352386,\n", + " bytes: [ 63 ],\n", + " top_logprobs: []\n", + " }\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "console.dir(result13.response_metadata.logprobs, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "5194627d", + "metadata": {}, + "source": [ + "## Streaming tokens\n", + "\n", + "OpenAI supports streaming token counts via an opt-in call option. This can be set by passing `{ stream_options: { include_usage: true } }`.\n", + "Setting this call option will cause the model to return an additional chunk at the end of the stream, containing the token usage." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "f6efaebb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ input_tokens: 13, output_tokens: 33, total_tokens: 46 }\n" + ] + } + ], + "source": [ + "import type { AIMessageChunk } from \"@langchain/core/messages\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "import { concat } from \"@langchain/core/utils/stream\";\n", + "\n", + "// Instantiate the model\n", + "const llm14 = new ChatOpenAI();\n", + "\n", + "const response14 = await llm14.stream(\"Hello, how are you?\", {\n", + " // Pass the stream options\n", + " stream_options: {\n", + " include_usage: true,\n", + " },\n", + "});\n", + "\n", + "// Iterate over the response, only saving the last chunk\n", + "let finalResult14: AIMessageChunk | undefined;\n", + "for await (const chunk14 of response14) {\n", + " finalResult14 = !finalResult14 ? chunk14 : concat(finalResult14, chunk14);\n", + "}\n", + "\n", + "console.log(finalResult14?.usage_metadata);" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/openai.mdx b/docs/core_docs/docs/integrations/chat/openai.mdx deleted file mode 100644 index 4ef51a28c603..000000000000 --- a/docs/core_docs/docs/integrations/chat/openai.mdx +++ /dev/null @@ -1,153 +0,0 @@ ---- -sidebar_label: OpenAI ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatOpenAI - -You can use OpenAI's chat models as follows: - -import OpenAI from "@examples/models/chat/integration_openai.ts"; - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/openai -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -{OpenAI} - -If you're part of an organization, you can set `process.env.OPENAI_ORGANIZATION` with your OpenAI organization id, or pass it in as `organization` when -initializing the model. - -## Multimodal messages - -:::info -This feature is currently in preview. The message schema may change in future releases. -::: - -OpenAI supports interleaving images with text in input messages with their `gpt-4-vision-preview`. Here's an example of how this looks: - -import OpenAIVision from "@examples/models/chat/integration_openai_vision.ts"; - -{OpenAIVision} - -## Tool calling - -:::info -This feature is currently only available for `gpt-3.5-turbo-1106` and `gpt-4-1106-preview` models. -::: - -More recent OpenAI chat models support calling multiple functions to get all required data to answer a question. -Here's an example how a conversation turn with this functionality might look: - -import OpenAITools from "@examples/models/chat/integration_openai_tool_calls.ts"; - -{OpenAITools} - -### `.withStructuredOutput({ ... })` - -:::info -The `.withStructuredOutput` method is in beta. It is actively being worked on, so the API may change. -::: - -You can also use the `.withStructuredOutput({ ... })` method to coerce `ChatOpenAI` into returning a structured output. - -The method allows for passing in either a Zod object, or a valid JSON schema (like what is returned from [`zodToJsonSchema`](https://www.npmjs.com/package/zod-to-json-schema)). - -Using the method is simple. Just define your LLM and call `.withStructuredOutput({ ... })` on it, passing the desired schema. - -Here is an example using a Zod schema and the `functionCalling` mode (default mode): - -import WSAZodExample from "@examples/models/chat/integration_openai_wsa_zod.ts"; - -{WSAZodExample} - -Additionally, you can pass in an OpenAI function definition or JSON schema directly: - -:::info -If using `jsonMode` as the `method` you must include context in your prompt about the structured output you want. This _must_ include the keyword: `JSON`. -::: - -import WSAJSONSchemaExample from "@examples/models/chat/integration_openai_wsa_json_schema.ts"; - -{WSAJSONSchemaExample} - -## Custom URLs - -You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this: - -import OpenAICustomBase from "@examples/models/chat/integration_openai_custom_base.ts"; - -{OpenAICustomBase} - -You can also pass other `ClientOptions` parameters accepted by the official SDK. - -If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/chat/azure). - -## Calling fine-tuned models - -You can call fine-tuned OpenAI models by passing in your corresponding `modelName` parameter. - -This generally takes the form of `ft:{OPENAI_MODEL_NAME}:{ORG_NAME}::{MODEL_ID}`. For example: - -import OpenAIFineTuned from "@examples/models/chat/integration_openai_fine_tune.ts"; - -{OpenAIFineTuned} - -## Generation metadata - -If you need additional information like logprobs or token usage, these will be returned directly in the `.invoke` response. - -:::tip -Requires `@langchain/core` version >=0.1.48. -::: - -import OpenAIInvokeInfo from "@examples/models/chat/integration_openai_invoke_info.ts"; -import OpenAIGenerationInfo from "@examples/models/chat/integration_openai_generation_info.ts"; -import OpenAICallbacks from "@examples/models/chat/integration_openai_callbacks.ts"; - -{OpenAIInvokeInfo} - -### With callbacks - -You can also use the callbacks system: - -{OpenAICallbacks} - -### With `.generate()` - -{OpenAIGenerationInfo} - -### Streaming tokens - -OpenAI supports streaming token counts via an opt-in call option. This can be set by passing `{ stream_options: { include_usage: true } }`. -Setting this call option will cause the model to return an additional chunk at the end of the stream, containing the token usage. - -import OpenAIStreamTokens from "@examples/models/chat/integration_openai_stream_tokens.ts"; - -{OpenAIStreamTokens} - -:::tip -See the LangSmith trace [here](https://smith.langchain.com/public/66bf7377-cc69-4676-91b6-25929a05e8b7/r) -::: - -### Disabling parallel tool calls - -If you have multiple tools bound to the model, but you'd only like for a single tool to be called at a time, you can pass the `parallel_tool_calls` call option to enable/disable this behavior. -By default, `parallel_tool_calls` is set to `true`. - -import OpenAIParallelToolCallsTokens from "@examples/models/chat/integration_openai_parallel_tool_calls.ts"; - -{OpenAIParallelToolCallsTokens} - -:::tip -See the LangSmith trace for the first invocation [here](https://smith.langchain.com/public/68f2ff13-6331-47d8-a8c0-d1745788e84e/r) and the second invocation [here](https://smith.langchain.com/public/6c2fff29-9470-486a-8715-805fda631024/r) -::: diff --git a/docs/core_docs/docs/integrations/chat/togetherai.ipynb b/docs/core_docs/docs/integrations/chat/togetherai.ipynb new file mode 100644 index 000000000000..8ed09f8d41c6 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/togetherai.ipynb @@ -0,0 +1,343 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Together\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatTogetherAI\n", + "\n", + "This will help you getting started with `ChatTogetherAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatTogetherAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/togetherai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatTogetherAI](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_chat_models_togetherai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatTogetherAI` models you'll need to create a Together account, get an API key [here](https://api.together.xyz/), and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [api.together.ai](https://api.together.ai/) to sign up to TogetherAI and generate an API key. Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatTogetherAI integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatTogetherAI } from \"@langchain/community/chat_models/togetherai\"\n", + "\n", + "const llm = new ChatTogetherAI({\n", + " model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rT9qEDPZ6iLCk6jt3XTzVDDH6pcI\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rT9wolZWfJ3xovORxnkdf1rcPbbY\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling & JSON mode\n", + "\n", + "The TogetherAI chat supports JSON mode and calling tools.\n", + "\n", + "### Tool calling" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8de584a8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'calculator',\n", + " args: { input: '2 + 3' },\n", + " type: 'tool_call',\n", + " id: 'call_nhtnmganqJPAG9I1cN8ULI9R'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatTogetherAI } from \"@langchain/community/chat_models/togetherai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { convertToOpenAITool } from \"@langchain/core/utils/function_calling\";\n", + "import { Calculator } from \"@langchain/community/tools/calculator\";\n", + "\n", + "// Use a pre-built tool\n", + "const calculatorTool = convertToOpenAITool(new Calculator());\n", + "\n", + "const modelWithCalculator = new ChatTogetherAI({\n", + " temperature: 0,\n", + " // This is the default env variable name it will look for if none is passed.\n", + " apiKey: process.env.TOGETHER_AI_API_KEY,\n", + " // Together JSON mode/tool calling only supports a select number of models\n", + " model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n", + "}).bind({\n", + " // Bind the tool to the model.\n", + " tools: [calculatorTool],\n", + " tool_choice: calculatorTool, // Specify what tool the model should use\n", + "});\n", + "\n", + "const promptForTools = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a super not-so-smart mathmatician.\"],\n", + " [\"human\", \"Help me out, how can I add {math}?\"],\n", + "]);\n", + "\n", + "// Use LCEL to chain the prompt to the model.\n", + "const responseWithTool = await promptForTools.pipe(modelWithCalculator).invoke({\n", + " math: \"2 plus 3\",\n", + "});\n", + "\n", + "console.dir(responseWithTool.tool_calls, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "Behind the scenes, TogetherAI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", + "\n", + "- Certain properties are not supported by the TogetherAI API, see [here](https://docs.together.ai/reference/chat-completions).\n", + "\n", + "## API reference\n", + "\n", + "For detailed documentation of all ChatTogetherAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/togetherai.mdx b/docs/core_docs/docs/integrations/chat/togetherai.mdx deleted file mode 100644 index f938be05c8c4..000000000000 --- a/docs/core_docs/docs/integrations/chat/togetherai.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_label: TogetherAI ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatTogetherAI - -## Setup - -1. Create a TogetherAI account and get your API key [here](https://api.together.xyz/). -2. Export or set your API key inline. The ChatTogetherAI class defaults to `process.env.TOGETHER_AI_API_KEY`. - -```bash -export TOGETHER_AI_API_KEY=your-api-key -``` - -You can use models provided by TogetherAI as follows: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/community -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -import TogetherAI from "@examples/models/chat/integration_togetherai.ts"; - -{TogetherAI} - -## Tool calling & JSON mode - -The TogetherAI chat supports JSON mode and calling tools. - -### Tool calling - -import TogetherToolsExample from "@examples/models/chat/integration_togetherai_tools.ts"; - -{TogetherToolsExample} - -:::tip -See a LangSmith trace of the above example [here](https://smith.langchain.com/public/5082ea20-c2de-410f-80e2-dbdfbf4d8adb/r). -::: - -### JSON mode - -To use JSON mode you must include the string "JSON" inside the prompt. -Typical conventions include telling the model to use JSON, eg: `Respond to the user in JSON format`. - -import TogetherJSONModeExample from "@examples/models/chat/integration_togetherai_json.ts"; - -{TogetherJSONModeExample} - -:::tip -See a LangSmith trace of the above example [here](https://smith.langchain.com/public/3864aebb-5096-4b5f-b096-e54ddd1ec3d2/r). -::: - -Behind the scenes, TogetherAI uses the OpenAI SDK and OpenAI compatible API, with some caveats: - -- Certain properties are not supported by the TogetherAI API, see [here](https://docs.together.ai/reference/chat-completions). diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb new file mode 100644 index 000000000000..ab81ec8b86c6 --- /dev/null +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.ipynb @@ -0,0 +1,221 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: FireCrawl\n", + "\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# FireCrawlLoader\n", + "\n", + "This notebook provides a quick overview for getting started with [FireCrawlLoader](/docs/integrations/document_loaders/). For detailed documentation of all FireCrawlLoader features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_firecrawl.FireCrawlLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/document_loaders/firecrawl)|\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [FireCrawlLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_firecrawl.FireCrawlLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_web_firecrawl.html) | 🟠 (see details below) | beta | ✅ | \n", + "### Loader features\n", + "| Source | Web Loader | Node Envs Only\n", + "| :---: | :---: | :---: | \n", + "| FireCrawlLoader | ✅ | ❌ | \n", + "\n", + "[FireCrawl](https://firecrawl.dev) crawls and convert any website into LLM-ready data. It crawls all accessible sub-pages and give you clean markdown and metadata for each. No sitemap required.\n", + "\n", + "FireCrawl handles complex tasks such as reverse proxies, caching, rate limits, and content blocked by JavaScript. Built by the [mendable.ai](https://mendable.ai) team.\n", + "\n", + "This guide shows how to scrap and crawl entire websites and load them using the `FireCrawlLoader` in LangChain.\n", + "\n", + "## Setup\n", + "\n", + "To access `FireCrawlLoader` document loader you'll need to install the `@langchain/community` integration, and the `@mendable/firecrawl-js` package. Then create a **[FireCrawl](https://firecrawl.dev)** account and get an API key.\n", + "\n", + "### Credentials\n", + "\n", + "Sign up and get your free [FireCrawl API key](https://firecrawl.dev) to start. FireCrawl offers 300 free credits to get you started, and it's [open-source](https://github.com/mendableai/firecrawl) in case you want to self-host.\n", + "\n", + "Once you've done this set the `FIRECRAWL_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export FIRECRAWL_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain FireCrawlLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community @mendable/firecrawl-js\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Here's an example of how to use the `FireCrawlLoader` to load web search results:\n", + "\n", + "Firecrawl offers 2 modes: `scrape` and `crawl`. In `scrape` mode, Firecrawl will only scrape the page you provide. In `crawl` mode, Firecrawl will crawl the entire website.\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import \"@mendable/firecrawl-js\";\n", + "import { FireCrawlLoader } from \"@langchain/community/document_loaders/web/firecrawl\"\n", + "\n", + "const loader = new FireCrawlLoader({\n", + " url: \"https://firecrawl.dev\", // The URL to scrape\n", + " apiKey: \"...\", // Optional, defaults to `FIRECRAWL_API_KEY` in your env.\n", + " mode: \"scrape\", // The mode to run the crawler in. Can be \"scrape\" for single urls or \"crawl\" for all accessible subpages\n", + " params: {\n", + " // optional parameters based on Firecrawl API docs\n", + " // For API documentation, visit https://docs.firecrawl.dev\n", + " },\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Document {\n", + " pageContent: \u001b[32m\"Introducing [Smart Crawl!](https://www.firecrawl.dev/smart-crawl)\\n\"\u001b[39m +\n", + " \u001b[32m\" Join the waitlist to turn any web\"\u001b[39m... 18721 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Home - Firecrawl\"\u001b[39m,\n", + " description: \u001b[32m\"Firecrawl crawls and converts any website into clean markdown.\"\u001b[39m,\n", + " keywords: \u001b[32m\"Firecrawl,Markdown,Data,Mendable,Langchain\"\u001b[39m,\n", + " robots: \u001b[32m\"follow, index\"\u001b[39m,\n", + " ogTitle: \u001b[32m\"Firecrawl\"\u001b[39m,\n", + " ogDescription: \u001b[32m\"Turn any website into LLM-ready data.\"\u001b[39m,\n", + " ogUrl: \u001b[32m\"https://www.firecrawl.dev/\"\u001b[39m,\n", + " ogImage: \u001b[32m\"https://www.firecrawl.dev/og.png?123\"\u001b[39m,\n", + " ogLocaleAlternate: [],\n", + " ogSiteName: \u001b[32m\"Firecrawl\"\u001b[39m,\n", + " sourceURL: \u001b[32m\"https://firecrawl.dev\"\u001b[39m,\n", + " pageStatusCode: \u001b[33m500\u001b[39m\n", + " },\n", + " id: \u001b[90mundefined\u001b[39m\n", + "}" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " title: \"Home - Firecrawl\",\n", + " description: \"Firecrawl crawls and converts any website into clean markdown.\",\n", + " keywords: \"Firecrawl,Markdown,Data,Mendable,Langchain\",\n", + " robots: \"follow, index\",\n", + " ogTitle: \"Firecrawl\",\n", + " ogDescription: \"Turn any website into LLM-ready data.\",\n", + " ogUrl: \"https://www.firecrawl.dev/\",\n", + " ogImage: \"https://www.firecrawl.dev/og.png?123\",\n", + " ogLocaleAlternate: [],\n", + " ogSiteName: \"Firecrawl\",\n", + " sourceURL: \"https://firecrawl.dev\",\n", + " pageStatusCode: 500\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Additional Parameters\n", + "\n", + "For `params` you can pass any of the params according to the [Firecrawl documentation](https://docs.firecrawl.dev)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all FireCrawlLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_web_firecrawl.FireCrawlLoader.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.mdx deleted file mode 100644 index 59fecb799db3..000000000000 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/firecrawl.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -hide_table_of_contents: true ---- - -# Firecrawl - -This guide shows how to use [Firecrawl](https://firecrawl.dev) with LangChain to load web data into an LLM-ready format using Firecrawl. - -## Overview - -[FireCrawl](https://firecrawl.dev) crawls and convert any website into LLM-ready data. It crawls all accessible subpages and give you clean markdown and metadata for each. No sitemap required. - -FireCrawl handles complex tasks such as reverse proxies, caching, rate limits, and content blocked by JavaScript. Built by the [mendable.ai](https://mendable.ai) team. - -This guide shows how to scrap and crawl entire websites and load them using the `FireCrawlLoader` in LangChain. - -## Setup - -Sign up and get your free [FireCrawl API key](https://firecrawl.dev) to start. FireCrawl offers 300 free credits to get you started, and it's [open-source](https://github.com/mendableai/firecrawl) in case you want to self-host. - -## Usage - -Here's an example of how to use the `FireCrawlLoader` to load web search results: - -Firecrawl offers 2 modes: `scrape` and `crawl`. In `scrape` mode, Firecrawl will only scrape the page you provide. In `crawl` mode, Firecrawl will crawl the entire website. - -import CodeBlock from "@theme/CodeBlock"; -import Example from "@examples/document_loaders/firecrawl.ts"; - -```bash npm2yarn -npm install @mendable/firecrawl-js -``` - -{Example} - -### Additional Parameters - -For `params` you can pass any of the params according to the [Firecrawl documentation](https://docs.firecrawl.dev). diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.ipynb new file mode 100644 index 000000000000..812ed2961124 --- /dev/null +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.ipynb @@ -0,0 +1,323 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: PDF files\n", + "\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# WebPDFLoader\n", + "\n", + "This notebook provides a quick overview for getting started with [WebPDFLoader](/docs/integrations/document_loaders/). For detailed documentation of all WebPDFLoader features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_pdf.WebPDFLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | PY support |\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [WebPDFLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_pdf.WebPDFLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_web_pdf.html) | ✅ | beta | ❌ | \n", + "### Loader features\n", + "| Source | Web Loader | Node Envs Only\n", + "| :---: | :---: | :---: | \n", + "| WebPDFLoader | ✅ | ❌ | \n", + "\n", + "You can use this version of the popular PDFLoader in web environments.\n", + "By default, one document will be created for each page in the PDF file, you can change this behavior by setting the `splitPages` option to `false`.\n", + "\n", + "## Setup\n", + "\n", + "To access `WebPDFLoader` document loader you'll need to install the `@langchain/community` integration, along with the `pdf-parse` package:\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain WebPDFLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community pdf-parse\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:\n", + "\n", + "- TODO: Update model instantiation with relevant params." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import fs from \"fs/promises\";\n", + "import { WebPDFLoader } from \"@langchain/community/document_loaders/web/pdf\"\n", + "\n", + "const nike10kPDFPath = \"../../../../data/nke-10k-2023.pdf\";\n", + "\n", + "// Read the file as a buffer\n", + "const buffer = await fs.readFile(nike10kPDFPath);\n", + "\n", + "// Create a Blob from the buffer\n", + "const nike10kPDFBlob = new Blob([buffer], { type: 'application/pdf' });\n", + "\n", + "const loader = new WebPDFLoader(nike10kPDFBlob, {\n", + " // required params = ...\n", + " // optional params = ...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: 'Table of Contents\\n' +\n", + " 'UNITED STATES\\n' +\n", + " 'SECURITIES AND EXCHANGE COMMISSION\\n' +\n", + " 'Washington, D.C. 20549\\n' +\n", + " 'FORM 10-K\\n' +\n", + " '(Mark One)\\n' +\n", + " '☑ ANNUAL REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\\n' +\n", + " 'FOR THE FISCAL YEAR ENDED MAY 31, 2023\\n' +\n", + " 'OR\\n' +\n", + " '☐ TRANSITION REPORT PURSUANT TO SECTION 13 OR 15(D) OF THE SECURITIES EXCHANGE ACT OF 1934\\n' +\n", + " 'FOR THE TRANSITION PERIOD FROM TO .\\n' +\n", + " 'Commission File No. 1-10635\\n' +\n", + " 'NIKE, Inc.\\n' +\n", + " '(Exact name of Registrant as specified in its charter)\\n' +\n", + " 'Oregon93-0584541\\n' +\n", + " '(State or other jurisdiction of incorporation)(IRS Employer Identification No.)\\n' +\n", + " 'One Bowerman Drive, Beaverton, Oregon 97005-6453\\n' +\n", + " '(Address of principal executive offices and zip code)\\n' +\n", + " '(503) 671-6453\\n' +\n", + " \"(Registrant's telephone number, including area code)\\n\" +\n", + " 'SECURITIES REGISTERED PURSUANT TO SECTION 12(B) OF THE ACT:\\n' +\n", + " 'Class B Common StockNKENew York Stock Exchange\\n' +\n", + " '(Title of each class)(Trading symbol)(Name of each exchange on which registered)\\n' +\n", + " 'SECURITIES REGISTERED PURSUANT TO SECTION 12(G) OF THE ACT:\\n' +\n", + " 'NONE\\n' +\n", + " 'Indicate by check mark:YESNO\\n' +\n", + " '•if the registrant is a well-known seasoned issuer, as defined in Rule 405 of the Securities Act.þ ̈\\n' +\n", + " '•if the registrant is not required to file reports pursuant to Section 13 or Section 15(d) of the Act. ̈þ\\n' +\n", + " '•whether the registrant (1) has filed all reports required to be filed by Section 13 or 15(d) of the Securities Exchange Act of 1934 during the preceding\\n' +\n", + " '12 months (or for such shorter period that the registrant was required to file such reports), and (2) has been subject to such filing requirements for the\\n' +\n", + " 'past 90 days.\\n' +\n", + " 'þ ̈\\n' +\n", + " '•whether the registrant has submitted electronically every Interactive Data File required to be submitted pursuant to Rule 405 of Regulation S-T\\n' +\n", + " '(§232.405 of this chapter) during the preceding 12 months (or for such shorter period that the registrant was required to submit such files).\\n' +\n", + " 'þ ̈\\n' +\n", + " '•whether the registrant is a large accelerated filer, an accelerated filer, a non-accelerated filer, a smaller reporting company or an emerging growth company. See the definitions of “large accelerated filer,”\\n' +\n", + " '“accelerated filer,” “smaller reporting company,” and “emerging growth company” in Rule 12b-2 of the Exchange Act.\\n' +\n", + " 'Large accelerated filerþAccelerated filer☐Non-accelerated filer☐Smaller reporting company☐Emerging growth company☐\\n' +\n", + " '•if an emerging growth company, if the registrant has elected not to use the extended transition period for complying with any new or revised financial\\n' +\n", + " 'accounting standards provided pursuant to Section 13(a) of the Exchange Act.\\n' +\n", + " ' ̈\\n' +\n", + " \"•whether the registrant has filed a report on and attestation to its management's assessment of the effectiveness of its internal control over financial\\n\" +\n", + " 'reporting under Section 404(b) of the Sarbanes-Oxley Act (15 U.S.C. 7262(b)) by the registered public accounting firm that prepared or issued its audit\\n' +\n", + " 'report.\\n' +\n", + " 'þ\\n' +\n", + " '•if securities are registered pursuant to Section 12(b) of the Act, whether the financial statements of the registrant included in the filing reflect the\\n' +\n", + " 'correction of an error to previously issued financial statements.\\n' +\n", + " ' ̈\\n' +\n", + " '•whether any of those error corrections are restatements that required a recovery analysis of incentive-based compensation received by any of the\\n' +\n", + " \"registrant's executive officers during the relevant recovery period pursuant to § 240.10D-1(b).\\n\" +\n", + " ' ̈\\n' +\n", + " '•\\n' +\n", + " 'whether the registrant is a shell company (as defined in Rule 12b-2 of the Act).☐þ\\n' +\n", + " \"As of November 30, 2022, the aggregate market values of the Registrant's Common Stock held by non-affiliates were:\\n\" +\n", + " 'Class A$7,831,564,572 \\n' +\n", + " 'Class B136,467,702,472 \\n' +\n", + " '$144,299,267,044 ',\n", + " metadata: {\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: [Object],\n", + " metadata: null,\n", + " totalPages: 107\n", + " },\n", + " loc: { pageNumber: 1 }\n", + " },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " pdf: {\n", + " version: '1.10.100',\n", + " info: {\n", + " PDFFormatVersion: '1.4',\n", + " IsAcroFormPresent: false,\n", + " IsXFAPresent: false,\n", + " Title: '0000320187-23-000039',\n", + " Author: 'EDGAR Online, a division of Donnelley Financial Solutions',\n", + " Subject: 'Form 10-K filed on 2023-07-20 for the period ending 2023-05-31',\n", + " Keywords: '0000320187-23-000039; ; 10-K',\n", + " Creator: 'EDGAR Filing HTML Converter',\n", + " Producer: 'EDGRpdf Service w/ EO.Pdf 22.0.40.0',\n", + " CreationDate: \"D:20230720162200-04'00'\",\n", + " ModDate: \"D:20230720162208-04'00'\"\n", + " },\n", + " metadata: null,\n", + " totalPages: 107\n", + " },\n", + " loc: { pageNumber: 1 }\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Usage, custom `pdfjs` build\n", + "\n", + "By default we use the `pdfjs` build bundled with `pdf-parse`, which is compatible with most environments, including Node.js and modern browsers. If you want to use a more recent version of `pdfjs-dist` or if you want to use a custom build of `pdfjs-dist`, you can do so by providing a custom `pdfjs` function that returns a promise that resolves to the `PDFJS` object.\n", + "\n", + "In the following example we use the \"legacy\" (see [pdfjs docs](https://github.com/mozilla/pdf.js/wiki/Frequently-Asked-Questions#which-browsersenvironments-are-supported)) build of `pdfjs-dist`, which includes several polyfills not included in the default build.\n", + "\n", + "```{=mdx}\n", + "\n", + " pdfjs-dist\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import { WebPDFLoader } from \"@langchain/community/document_loaders/web/pdf\";\n", + "\n", + "const blob = new Blob(); // e.g. from a file input\n", + "\n", + "const customBuildLoader = new WebPDFLoader(blob, {\n", + " // you may need to add `.then(m => m.default)` to the end of the import\n", + " // @lc-ts-ignore\n", + " pdfjs: () => import(\"pdfjs-dist/legacy/build/pdf.js\"),\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Eliminating extra spaces\n", + "\n", + "PDFs come in many varieties, which makes reading them a challenge. The loader parses individual text elements and joins them together with a space by default, but\n", + "if you are seeing excessive spaces, this may not be the desired behavior. In that case, you can override the separator with an empty string like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import { WebPDFLoader } from \"@langchain/community/document_loaders/web/pdf\";\n", + "\n", + "// new Blob(); e.g. from a file input\n", + "const eliminatingExtraSpacesLoader = new WebPDFLoader(new Blob(), {\n", + " parsedItemSeparator: \"\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all WebPDFLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_web_pdf.WebPDFLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.mdx deleted file mode 100644 index 64e96247765c..000000000000 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/pdf.mdx +++ /dev/null @@ -1,53 +0,0 @@ -# PDF files - -You can use this version of the popular PDFLoader in web environments. -By default, one document will be created for each page in the PDF file, you can change this behavior by setting the `splitPages` option to `false`. - -## Setup - -```bash npm2yarn -npm install pdf-parse -``` - -## Usage - -import CodeBlock from "@theme/CodeBlock"; -import Example from "@examples/document_loaders/web_pdf.ts"; - -{Example} - -## Usage, custom `pdfjs` build - -By default we use the `pdfjs` build bundled with `pdf-parse`, which is compatible with most environments, including Node.js and modern browsers. If you want to use a more recent version of `pdfjs-dist` or if you want to use a custom build of `pdfjs-dist`, you can do so by providing a custom `pdfjs` function that returns a promise that resolves to the `PDFJS` object. - -In the following example we use the "legacy" (see [pdfjs docs](https://github.com/mozilla/pdf.js/wiki/Frequently-Asked-Questions#which-browsersenvironments-are-supported)) build of `pdfjs-dist`, which includes several polyfills not included in the default build. - -```bash npm2yarn -npm install pdfjs-dist -``` - -```typescript -import { WebPDFLoader } from "@langchain/community/document_loaders/web/pdf"; - -const blob = new Blob(); // e.g. from a file input - -const loader = new WebPDFLoader(blob, { - // you may need to add `.then(m => m.default)` to the end of the import - pdfjs: () => import("pdfjs-dist/legacy/build/pdf.js"), -}); -``` - -## Eliminating extra spaces - -PDFs come in many varieties, which makes reading them a challenge. The loader parses individual text elements and joins them together with a space by default, but -if you are seeing excessive spaces, this may not be the desired behavior. In that case, you can override the separator with an empty string like this: - -```typescript -import { WebPDFLoader } from "@langchain/community/document_loaders/web/pdf"; - -const blob = new Blob(); // e.g. from a file input - -const loader = new WebPDFLoader(blob, { - parsedItemSeparator: "", -}); -``` diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.ipynb new file mode 100644 index 000000000000..ec13013b245c --- /dev/null +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.ipynb @@ -0,0 +1,449 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: RecursiveUrlLoader\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# RecursiveUrlLoader\n", + "\n", + "```{=mdx}\n", + "\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This notebook provides a quick overview for getting started with [RecursiveUrlLoader](/docs/integrations/document_loaders/). For detailed documentation of all RecursiveUrlLoader features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_recursive_url.RecursiveUrlLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | PY support |\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [RecursiveUrlLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_recursive_url.RecursiveUrlLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_web_recursive_url.html) | ✅ | beta | ❌ | \n", + "### Loader features\n", + "| Source | Web Loader | Node Envs Only\n", + "| :---: | :---: | :---: | \n", + "| RecursiveUrlLoader | ✅ | ✅ | \n", + "\n", + "When loading content from a website, we may want to process load all URLs on a page.\n", + "\n", + "For example, let's look at the [LangChain.js introduction](/docs/introduction) docs.\n", + "\n", + "This has many interesting child pages that we may want to load, split, and later retrieve in bulk.\n", + "\n", + "The challenge is traversing the tree of child pages and assembling a list!\n", + "\n", + "We do this using the `RecursiveUrlLoader`.\n", + "\n", + "This also gives us the flexibility to exclude some children, customize the extractor, and more.\n", + "\n", + "## Setup\n", + "\n", + "To access `RecursiveUrlLoader` document loader you'll need to install the `@langchain/community` integration, and the [`jsdom`](https://www.npmjs.com/package/jsdom) package.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain RecursiveUrlLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community jsdom\n", + "\n", + "\n", + "We also suggest adding a package like [`html-to-text`](https://www.npmjs.com/package/html-to-text) or\n", + "[`@mozilla/readability`](https://www.npmjs.com/package/@mozilla/readability) for extracting the raw text from the page.\n", + "\n", + "\n", + " html-to-text\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { RecursiveUrlLoader } from \"@langchain/community/document_loaders/web/recursive_url\"\n", + "import { compile } from \"html-to-text\";\n", + "\n", + "const compiledConvert = compile({ wordwrap: 130 }); // returns (text: string) => string;\n", + "\n", + "const loader = new RecursiveUrlLoader(\"https://langchain.com/\", {\n", + " extractor: compiledConvert,\n", + " maxDepth: 1,\n", + " excludeDirs: [\"/docs/api/\"],\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " pageContent: '\\n' +\n", + " '/\\n' +\n", + " 'Products\\n' +\n", + " '\\n' +\n", + " 'LangChain [/langchain]LangSmith [/langsmith]LangGraph [/langgraph]\\n' +\n", + " 'Methods\\n' +\n", + " '\\n' +\n", + " 'Retrieval [/retrieval]Agents [/agents]Evaluation [/evaluation]\\n' +\n", + " 'Resources\\n' +\n", + " '\\n' +\n", + " 'Blog [https://blog.langchain.dev/]Case Studies [/case-studies]Use Case Inspiration [/use-cases]Experts [/experts]Changelog\\n' +\n", + " '[https://changelog.langchain.com/]\\n' +\n", + " 'Docs\\n' +\n", + " '\\n' +\n", + " 'LangChain Docs [https://python.langchain.com/v0.2/docs/introduction/]LangSmith Docs [https://docs.smith.langchain.com/]\\n' +\n", + " 'Company\\n' +\n", + " '\\n' +\n", + " 'About [/about]Careers [/careers]\\n' +\n", + " 'Pricing [/pricing]\\n' +\n", + " 'Get a demo [/contact-sales]\\n' +\n", + " 'Sign up [https://smith.langchain.com/]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'LangChain’s suite of products supports developers along each step of the LLM application lifecycle.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'APPLICATIONS THAT CAN REASON. POWERED BY LANGCHAIN.\\n' +\n", + " '\\n' +\n", + " 'Get a demo [/contact-sales]Sign up for free [https://smith.langchain.com/]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'FROM STARTUPS TO GLOBAL ENTERPRISES,\\n' +\n", + " 'AMBITIOUS BUILDERS CHOOSE\\n' +\n", + " 'LANGCHAIN PRODUCTS.\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c22746faa78338532_logo_Ally.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c08e67bb7eefba4c2_logo_Rakuten.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c576fdde32d03c1a0_logo_Elastic.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c6d5592036dae24e5_logo_BCG.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/667f19528c3557c2c19c3086_the-home-depot-2%201.png][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7cbcf6473519b06d84_logo_IDEO.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7cb5f96dcc100ee3b7_logo_Zapier.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/6606183e52d49bc369acc76c_mdy_logo_rgb_moodysblue.png][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c8ad7db6ed6ec611e_logo_Adyen.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c737d50036a62768b_logo_Infor.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/667f59d98444a5f98aabe21c_acxiom-vector-logo-2022%201.png][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c09a158ffeaab0bd2_logo_Replit.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c9d2b23d292a0cab0_logo_Retool.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c44e67a3d0a996bf3_logo_Databricks.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/667f5a1299d6ba453c78a849_image%20(19).png][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ca3b7c63af578816bafcc3_logo_Instacart.svg][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/665dc1dabc940168384d9596_podium%20logo.svg]\\n' +\n", + " '\\n' +\n", + " 'Build\\n' +\n", + " '\\n' +\n", + " 'LangChain is a framework to build with LLMs by chaining interoperable components. LangGraph is the framework for building\\n' +\n", + " 'controllable agentic workflows.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Run\\n' +\n", + " '\\n' +\n", + " 'Deploy your LLM applications at scale with LangGraph Cloud, our infrastructure purpose-built for agents.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Manage\\n' +\n", + " '\\n' +\n", + " \"Debug, collaborate, test, and monitor your LLM app in LangSmith - whether it's built with a LangChain framework or not. \\n\" +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'BUILD YOUR APP WITH LANGCHAIN\\n' +\n", + " '\\n' +\n", + " 'Build context-aware, reasoning applications with LangChain’s flexible framework that leverages your company’s data and APIs.\\n' +\n", + " 'Future-proof your application by making vendor optionality part of your LLM infrastructure design.\\n' +\n", + " '\\n' +\n", + " 'Learn more about LangChain\\n' +\n", + " '\\n' +\n", + " '[/langchain]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'RUN AT SCALE WITH LANGGRAPH CLOUD\\n' +\n", + " '\\n' +\n", + " 'Deploy your LangGraph app with LangGraph Cloud for fault-tolerant scalability - including support for async background jobs,\\n' +\n", + " 'built-in persistence, and distributed task queues.\\n' +\n", + " '\\n' +\n", + " 'Learn more about LangGraph\\n' +\n", + " '\\n' +\n", + " '[/langgraph]\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/667c6d7284e58f4743a430e6_Langgraph%20UI-home-2.webp]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'MANAGE LLM PERFORMANCE WITH LANGSMITH\\n' +\n", + " '\\n' +\n", + " 'Ship faster with LangSmith’s debug, test, deploy, and monitoring workflows. Don’t rely on “vibes” – add engineering rigor to your\\n' +\n", + " 'LLM-development workflow, whether you’re building with LangChain or not.\\n' +\n", + " '\\n' +\n", + " 'Learn more about LangSmith\\n' +\n", + " '\\n' +\n", + " '[/langsmith]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'HEAR FROM OUR HAPPY CUSTOMERS\\n' +\n", + " '\\n' +\n", + " 'LangChain, LangGraph, and LangSmith help teams of all sizes, across all industries - from ambitious startups to established\\n' +\n", + " 'enterprises.\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c5308aee06d9826765c897_Retool_logo%201.png]\\n' +\n", + " '\\n' +\n", + " '“LangSmith helped us improve the accuracy and performance of Retool’s fine-tuned models. Not only did we deliver a better product\\n' +\n", + " 'by iterating with LangSmith, but we’re shipping new AI features to our users in a fraction of the time it would have taken without\\n' +\n", + " 'it.”\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c5308abdd2dbbdde5a94a1_Jamie%20Cuffe.png]\\n' +\n", + " 'Jamie Cuffe\\n' +\n", + " 'Head of Self-Serve and New Products\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c5308a04d37cf7d3eb1341_Rakuten_Global_Brand_Logo.png]\\n' +\n", + " '\\n' +\n", + " '“By combining the benefits of LangSmith and standing on the shoulders of a gigantic open-source community, we’re able to identify\\n' +\n", + " 'the right approaches of using LLMs in an enterprise-setting faster.”\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c5308a8b6137d44c621cb4_Yusuke%20Kaji.png]\\n' +\n", + " 'Yusuke Kaji\\n' +\n", + " 'General Manager of AI\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c5308aea1371b447cc4af9_elastic-ar21.png]\\n' +\n", + " '\\n' +\n", + " '“Working with LangChain and LangSmith on the Elastic AI Assistant had a significant positive impact on the overall pace and\\n' +\n", + " 'quality of the development and shipping experience. We couldn’t have achieved  the product experience delivered to our customers\\n' +\n", + " 'without LangChain, and we couldn’t have done it at the same pace without LangSmith.”\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c5308a4095d5a871de7479_James%20Spiteri.png]\\n' +\n", + " 'James Spiteri\\n' +\n", + " 'Director of Security Products\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c530539f4824b828357352_Logo_de_Fintual%201.png]\\n' +\n", + " '\\n' +\n", + " '“As soon as we heard about LangSmith, we moved our entire development stack onto it. We could have built evaluation, testing and\\n' +\n", + " 'monitoring tools in house, but with LangSmith it took us 10x less time to get a 1000x better tool.”\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c53058acbff86f4c2dcee2_jose%20pena.png]\\n' +\n", + " 'Jose Peña\\n' +\n", + " 'Senior Manager\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'THE REFERENCE ARCHITECTURE ENTERPRISES ADOPT FOR SUCCESS.\\n' +\n", + " '\\n' +\n", + " 'LangChain’s suite of products can be used independently or stacked together for multiplicative impact – guiding you through\\n' +\n", + " 'building, running, and managing your LLM apps.\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/6695b116b0b60c78fd4ef462_15.07.24%20-Updated%20stack%20diagram%20-%20lightfor%20website-3.webp][https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/667d392696fc0bc3e17a6d04_New%20LC%20stack%20-%20light-2.webp]\\n' +\n", + " '15M+\\n' +\n", + " 'Monthly Downloads\\n' +\n", + " '100K+\\n' +\n", + " 'Apps Powered\\n' +\n", + " '75K+\\n' +\n", + " 'GitHub Stars\\n' +\n", + " '3K+\\n' +\n", + " 'Contributors\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'THE BIGGEST DEVELOPER COMMUNITY IN GENAI\\n' +\n", + " '\\n' +\n", + " 'Learn alongside the 1M+ developers who are pushing the industry forward.\\n' +\n", + " '\\n' +\n", + " 'Explore LangChain\\n' +\n", + " '\\n' +\n", + " '[/langchain]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'GET STARTED WITH THE LANGSMITH PLATFORM TODAY\\n' +\n", + " '\\n' +\n", + " 'Get a demo [/contact-sales]Sign up for free [https://smith.langchain.com/]\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65ccf12801bc39bf912a58f3_Home%20C.webp]\\n' +\n", + " '\\n' +\n", + " 'Teams building with LangChain are driving operational efficiency, increasing discovery & personalization, and delivering premium\\n' +\n", + " 'products that generate revenue.\\n' +\n", + " '\\n' +\n", + " 'Discover Use Cases\\n' +\n", + " '\\n' +\n", + " '[/use-cases]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'GET INSPIRED BY COMPANIES WHO HAVE DONE IT.\\n' +\n", + " '\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65bcd7ee85507bdf350399c3_Ally_Financial%201.svg]\\n' +\n", + " 'Financial Services\\n' +\n", + " '\\n' +\n", + " '[https://blog.langchain.dev/ally-financial-collaborates-with-langchain-to-deliver-critical-coding-module-to-mask-personal-identifying-information-in-a-compliant-and-safe-manner/]\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65bcd8b3ae4dc901daa3037a_Adyen_Corporate_Logo%201.svg]\\n' +\n", + " 'FinTech\\n' +\n", + " '\\n' +\n", + " '[https://blog.langchain.dev/llms-accelerate-adyens-support-team-through-smart-ticket-routing-and-support-agent-copilot/]\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c534b3fa387379c0f4ebff_elastic-ar21%20(1).png]\\n' +\n", + " 'Technology\\n' +\n", + " '\\n' +\n", + " '[https://blog.langchain.dev/langchain-partners-with-elastic-to-launch-the-elastic-ai-assistant/]\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'LANGSMITH IS THE ENTERPRISE DEVOPS PLATFORM BUILT FOR LLMS.\\n' +\n", + " '\\n' +\n", + " 'Explore LangSmith\\n' +\n", + " '\\n' +\n", + " '[/langsmith]\\n' +\n", + " 'Gain visibility to make trade offs between cost, latency, and quality.\\n' +\n", + " 'Increase developer productivity.\\n' +\n", + " 'Eliminate manual, error-prone testing.\\n' +\n", + " 'Reduce hallucinations and improve reliability.\\n' +\n", + " 'Enterprise deployment options to keep data secure.\\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'READY TO START SHIPPING 
RELIABLE GENAI APPS FASTER?\\n' +\n", + " '\\n' +\n", + " 'Get started with LangChain, LangGraph, and LangSmith to enhance your LLM app development, from prototype to production.\\n' +\n", + " '\\n' +\n", + " 'Get a demo [/contact-sales]Sign up for free [https://smith.langchain.com/]\\n' +\n", + " 'Products\\n' +\n", + " 'LangChain [/langchain]LangSmith [/langsmith]LangGraph [/langgraph]Agents [/agents]Evaluation [/evaluation]Retrieval [/retrieval]\\n' +\n", + " 'Resources\\n' +\n", + " 'Python Docs [https://python.langchain.com/]JS/TS Docs [https://js.langchain.com/docs/get_started/introduction/]GitHub\\n' +\n", + " '[https://github.com/langchain-ai]Integrations [https://python.langchain.com/v0.2/docs/integrations/platforms/]Templates\\n' +\n", + " '[https://templates.langchain.com/]Changelog [https://changelog.langchain.com/]LangSmith Trust Portal\\n' +\n", + " '[https://trust.langchain.com/]\\n' +\n", + " 'Company\\n' +\n", + " 'About [/about]Blog [https://blog.langchain.dev/]Twitter [https://twitter.com/LangChainAI]LinkedIn\\n' +\n", + " '[https://www.linkedin.com/company/langchain/]YouTube [https://www.youtube.com/@LangChain]Community [/join-community]Marketing\\n' +\n", + " 'Assets [https://drive.google.com/drive/folders/17xybjzmVBdsQA-VxouuGLxF6bDsHDe80?usp=sharing]\\n' +\n", + " 'Sign up for our newsletter to stay up to date\\n' +\n", + " 'Thank you! Your submission has been received!\\n' +\n", + " 'Oops! Something went wrong while submitting the form.\\n' +\n", + " '[https://cdn.prod.website-files.com/65b8cd72835ceeacd4449a53/65c6a38f9c53ec71f5fc73de_langchain-word.svg]\\n' +\n", + " 'All systems operational\\n' +\n", + " '[https://status.smith.langchain.com/]Privacy Policy [/'... 111 more characters,\n", + " metadata: {\n", + " source: 'https://langchain.com/',\n", + " title: 'LangChain',\n", + " description: 'LangChain’s suite of products supports developers along each step of their development journey.',\n", + " language: 'en'\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " source: 'https://langchain.com/',\n", + " title: 'LangChain',\n", + " description: 'LangChain’s suite of products supports developers along each step of their development journey.',\n", + " language: 'en'\n", + "}\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Options\n", + "\n", + "```typescript\n", + "interface Options {\n", + " excludeDirs?: string[]; // webpage directories to exclude.\n", + " extractor?: (text: string) => string; // a function to extract the text of the document from the webpage, by default it returns the page as it is. It is recommended to use tools like html-to-text to extract the text. By default, it just returns the page as it is.\n", + " maxDepth?: number; // the maximum depth to crawl. By default, it is set to 2. If you need to crawl the whole website, set it to a number that is large enough would simply do the job.\n", + " timeout?: number; // the timeout for each request, in the unit of seconds. By default, it is set to 10000 (10 seconds).\n", + " preventOutside?: boolean; // whether to prevent crawling outside the root url. By default, it is set to true.\n", + " callerOptions?: AsyncCallerConstructorParams; // the options to call the AsyncCaller for example setting max concurrency (default is 64)\n", + "}\n", + "```\n", + "\n", + "However, since it's hard to perform a perfect filter, you may still see some irrelevant results in the results. You can perform a filter on the returned documents by yourself, if it's needed. Most of the time, the returned results are good enough." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all RecursiveUrlLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_web_recursive_url.RecursiveUrlLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.mdx deleted file mode 100644 index ddcb358c3056..000000000000 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/recursive_url_loader.mdx +++ /dev/null @@ -1,67 +0,0 @@ ---- -sidebar_class_name: node-only -hide_table_of_contents: true ---- - -# Recursive URL Loader - -When loading content from a website, we may want to process load all URLs on a page. - -For example, let's look at the [LangChain.js introduction](/docs/introduction) docs. - -This has many interesting child pages that we may want to load, split, and later retrieve in bulk. - -The challenge is traversing the tree of child pages and assembling a list! - -We do this using the RecursiveUrlLoader. - -This also gives us the flexibility to exclude some children, customize the extractor, and more. - -## Setup - -To get started, you'll need to install the [`jsdom`](https://www.npmjs.com/package/jsdom) package: - -```bash npm2yarn -npm i jsdom -``` - -We also suggest adding a package like [`html-to-text`](https://www.npmjs.com/package/html-to-text) or -[`@mozilla/readability`](https://www.npmjs.com/package/@mozilla/readability) for extracting the raw text from the page. - -```bash npm2yarn -npm i html-to-text -``` - -## Usage - -```typescript -import { compile } from "html-to-text"; -import { RecursiveUrlLoader } from "@langchain/community/document_loaders/web/recursive_url"; - -const url = "/docs/introduction"; - -const compiledConvert = compile({ wordwrap: 130 }); // returns (text: string) => string; - -const loader = new RecursiveUrlLoader(url, { - extractor: compiledConvert, - maxDepth: 1, - excludeDirs: ["/docs/api/"], -}); - -const docs = await loader.load(); -``` - -## Options - -```typescript -interface Options { - excludeDirs?: string[]; // webpage directories to exclude. - extractor?: (text: string) => string; // a function to extract the text of the document from the webpage, by default it returns the page as it is. It is recommended to use tools like html-to-text to extract the text. By default, it just returns the page as it is. - maxDepth?: number; // the maximum depth to crawl. By default, it is set to 2. If you need to crawl the whole website, set it to a number that is large enough would simply do the job. - timeout?: number; // the timeout for each request, in the unit of seconds. By default, it is set to 10000 (10 seconds). - preventOutside?: boolean; // whether to prevent crawling outside the root url. By default, it is set to true. - callerOptions?: AsyncCallerConstructorParams; // the options to call the AsyncCaller for example setting max concurrency (default is 64) -} -``` - -However, since it's hard to perform a perfect filter, you may still see some irrelevant results in the results. You can perform a filter on the returned documents by yourself, if it's needed. Most of the time, the returned results are good enough. diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb new file mode 100644 index 000000000000..488205129b1d --- /dev/null +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb @@ -0,0 +1,300 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Cheerio\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cheerio\n", + "\n", + "This notebook provides a quick overview for getting started with [CheerioWebBaseLoader](/docs/integrations/document_loaders/). For detailed documentation of all CheerioWebBaseLoader features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_cheerio.CheerioWebBaseLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "This example goes over how to load data from webpages using Cheerio. One document will be created for each webpage.\n", + "\n", + "Cheerio is a fast and lightweight library that allows you to parse and traverse HTML documents using a jQuery-like syntax. You can use Cheerio to extract data from web pages, without having to render them in a browser.\n", + "\n", + "However, Cheerio does not simulate a web browser, so it cannot execute JavaScript code on the page. This means that it cannot extract data from dynamic web pages that require JavaScript to render. To do that, you can use the [`PlaywrightWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_playwright) or [`PuppeteerWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_puppeteer) instead.\n", + "\n", + "| Class | Package | Local | Serializable | PY support|\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [CheerioWebBaseLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_cheerio.CheerioWebBaseLoader.html) | @langchain/community | ✅ | ✅ | ❌ | \n", + "### Loader features\n", + "| Source | Web Support | Node Support\n", + "| :---: | :---: | :---: | \n", + "| CheerioWebBaseLoader | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `CheerioWebBaseLoader` document loader you'll need to install the `@langchain/community` integration package, along with the `cheerio` peer dependency.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain CheerioWebBaseLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community cheerio\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\"\n", + "\n", + "const loader = new CheerioWebBaseLoader(\"https://news.ycombinator.com/item?id=34817881\", {\n", + " // optional params: ...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: '\\n' +\n", + " ' \\n' +\n", + " ' Hacker News\\n' +\n", + " ' new | past | comments | ask | show | jobs | submit \\n' +\n", + " ' login\\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " '\\n' +\n", + " ' \\n' +\n", + " ' What Lights the Universe’s Standard Candles? (quantamagazine.org)\\n' +\n", + " ' 75 points by Amorymeltzer on Feb 17, 2023 | hide | past | favorite | 6 comments \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' delta_p_delta_x on Feb 17, 2023 \\n' +\n", + " ' | next [–] \\n' +\n", + " ' \\n' +\n", + " \" Astrophysical and cosmological simulations are often insightful. They're also very cross-disciplinary; besides the obvious astrophysics, there's networking and sysadmin, parallel computing and algorithm theory (so that the simulation programs are actually fast but still accurate), systems design, and even a bit of graphic design for the visualisations.Some of my favourite simulation projects:- IllustrisTNG: https://www.tng-project.org/- SWIFT: https://swift.dur.ac.uk/- CO5BOLD: https://www.astro.uu.se/~bf/co5bold_main.html (which produced these animations of a red-giant star: https://www.astro.uu.se/~bf/movie/AGBmovie.html)- AbacusSummit: https://abacussummit.readthedocs.io/en/latest/And I can add the simulations in the article, too.\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' froeb on Feb 18, 2023 \\n' +\n", + " ' | parent | next [–] \\n' +\n", + " ' \\n' +\n", + " \" Supernova simulations are especially interesting too. I have heard them described as the only time in physics when all 4 of the fundamental forces are important. The explosion can be quite finicky too. If I remember right, you can't get supernova to explode properly in 1D simulations, only in higher dimensions. This was a mystery until the realization that turbulence is necessary for supernova to trigger--there is no turbulent flow in 1D.\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' andrewflnr on Feb 17, 2023 \\n' +\n", + " ' | prev | next [–] \\n' +\n", + " ' \\n' +\n", + " \" Whoa. I didn't know the accretion theory of Ia supernovae was dead, much less that it had been since 2011.\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' andreareina on Feb 17, 2023 \\n' +\n", + " ' | prev | next [–] \\n' +\n", + " ' \\n' +\n", + " ' This seems to be the paper https://academic.oup.com/mnras/article/517/4/5260/6779709\\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' andreareina on Feb 17, 2023 \\n' +\n", + " ' | prev [–] \\n' +\n", + " ' \\n' +\n", + " \" Wouldn't double detonation show up as variance in the brightness?\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' yencabulator on Feb 18, 2023 \\n' +\n", + " ' | parent [–] \\n' +\n", + " ' \\n' +\n", + " ' Or widening of the peak. If one type Ia supernova goes 1,2,3,2,1, the sum of two could go 1+0=1\\n' +\n", + " ' 2+1=3\\n' +\n", + " ' 3+2=5\\n' +\n", + " ' 2+3=5\\n' +\n", + " ' 1+2=3\\n' +\n", + " ' 0+1=1\\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Guidelines | FAQ | Lists | API | Security | Legal | Apply to YC | Contact\\n' +\n", + " 'Search: \\n' +\n", + " ' \\n' +\n", + " ' \\n',\n", + " metadata: { source: 'https://news.ycombinator.com/item?id=34817881' },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ source: 'https://news.ycombinator.com/item?id=34817881' }\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Additional configurations\n", + "\n", + "`CheerioWebBaseLoader` supports additional configuration when instantiating the loader. Here is an example of how to use it with the `selector` field passed, making it only load content from the provided HTML class names:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Some of my favourite simulation projects:- IllustrisTNG: https://www.tng-project.org/- SWIFT: https://swift.dur.ac.uk/- CO5BOLD: https://www.astro.uu.se/~bf/co5bold_main.html (which produced these animations of a red-giant star: https://www.astro.uu.se/~bf/movie/AGBmovie.html)- AbacusSummit: https://abacussummit.readthedocs.io/en/latest/And I can add the simulations in the article, too.\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n" + ] + } + ], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\"\n", + "\n", + "const loaderWithSelector = new CheerioWebBaseLoader(\"https://news.ycombinator.com/item?id=34817881\", {\n", + " selector: \"p\",\n", + "});\n", + "\n", + "const docsWithSelector = await loaderWithSelector.load();\n", + "docsWithSelector[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all CheerioWebBaseLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_web_cheerio.CheerioWebBaseLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.mdx deleted file mode 100644 index a33912f424ae..000000000000 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: Cheerio -hide_table_of_contents: true ---- - -# Webpages, with Cheerio - -This example goes over how to load data from webpages using Cheerio. One document will be created for each webpage. - -Cheerio is a fast and lightweight library that allows you to parse and traverse HTML documents using a jQuery-like syntax. You can use Cheerio to extract data from web pages, without having to render them in a browser. - -However, Cheerio does not simulate a web browser, so it cannot execute JavaScript code on the page. This means that it cannot extract data from dynamic web pages that require JavaScript to render. To do that, you can use the [`PlaywrightWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_playwright) or [`PuppeteerWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_puppeteer) instead. - -## Setup - -```bash npm2yarn -npm install cheerio -``` - -## Usage - -```typescript -import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; - -const loader = new CheerioWebBaseLoader( - "https://news.ycombinator.com/item?id=34817881" -); - -const docs = await loader.load(); -``` - -## Usage, with a custom selector - -```typescript -import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; - -const loader = new CheerioWebBaseLoader( - "https://news.ycombinator.com/item?id=34817881", - { - selector: "p.athing", - } -); - -const docs = await loader.load(); -``` diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.ipynb new file mode 100644 index 000000000000..332ded7ae820 --- /dev/null +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.ipynb @@ -0,0 +1,543 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Puppeteer\n", + "sidebar_class_name: node-only\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# PuppeteerWebBaseLoader\n", + "\n", + "```{=mdx}\n", + ":::tip Compatibility\n", + "\n", + "Only available on Node.js.\n", + "\n", + ":::\n", + "```\n", + "\n", + "This notebook provides a quick overview for getting started with [PuppeteerWebBaseLoader](/docs/integrations/document_loaders/). For detailed documentation of all PuppeteerWebBaseLoader features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_puppeteer.PuppeteerWebBaseLoader.html).\n", + "\n", + "Puppeteer is a Node.js library that provides a high-level API for controlling headless Chrome or Chromium. You can use Puppeteer to automate web page interactions, including extracting data from dynamic web pages that require JavaScript to render.\n", + "\n", + "If you want a lighterweight solution, and the webpages you want to load do not require JavaScript to render, you can use the [CheerioWebBaseLoader](/docs/integrations/document_loaders/web_loaders/web_cheerio) instead.\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | PY support |\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [PuppeteerWebBaseLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_puppeteer.PuppeteerWebBaseLoader.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders_web_puppeteer.html) | ✅ | beta | ❌ | \n", + "### Loader features\n", + "| Source | Web Loader | Node Envs Only\n", + "| :---: | :---: | :---: | \n", + "| PuppeteerWebBaseLoader | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `PuppeteerWebBaseLoader` document loader you'll need to install the `@langchain/community` integration package, along with the `puppeteer` peer dependency.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain PuppeteerWebBaseLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community puppeteer\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { PuppeteerWebBaseLoader } from \"@langchain/community/document_loaders/web/puppeteer\"\n", + "\n", + "const loader = new PuppeteerWebBaseLoader(\"https://langchain.com\", {\n", + " // required params = ...\n", + " // optional params = ...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: '
\\n' +\n", + " '\\n' +\n", + " ' Promise;\n", + "};\n", + "```\n", + "\n", + "1. `launchOptions`: an optional object that specifies additional options to pass to the puppeteer.launch() method. This can include options such as the headless flag to launch the browser in headless mode, or the slowMo option to slow down Puppeteer's actions to make them easier to follow.\n", + "\n", + "2. `gotoOptions`: an optional object that specifies additional options to pass to the page.goto() method. This can include options such as the timeout option to specify the maximum navigation time in milliseconds, or the waitUntil option to specify when to consider the navigation as successful.\n", + "\n", + "3. `evaluate`: an optional function that can be used to evaluate JavaScript code on the page using the page.evaluate() method. This can be useful for extracting data from the page or interacting with page elements. The function should return a Promise that resolves to a string containing the result of the evaluation.\n", + "\n", + "By passing these options to the `PuppeteerWebBaseLoader` constructor, you can customize the behavior of the loader and use Puppeteer's powerful features to scrape and interact with web pages.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Screenshots\n", + "\n", + "To take a screenshot of a site, initialize the loader the same as above, and call the `.screenshot()` method.\n", + "This will return an instance of `Document` where the page content is a base64 encoded image, and the metadata contains a `source` field with the URL of the page." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "iVBORw0KGgoAAAANSUhEUgAACWAAAAdoCAIAAAA/Q2IJAAAAAXNSR0IArs4c6QAAIABJREFUeJzsvUuzHUeSJuaPiMjMk3nOuU88\n", + "{ source: 'https://langchain.com' }\n" + ] + } + ], + "source": [ + "import { PuppeteerWebBaseLoader } from \"@langchain/community/document_loaders/web/puppeteer\";\n", + "\n", + "const loaderForScreenshot = new PuppeteerWebBaseLoader(\"https://langchain.com\", {\n", + " launchOptions: {\n", + " headless: true,\n", + " },\n", + " gotoOptions: {\n", + " waitUntil: \"domcontentloaded\",\n", + " },\n", + "});\n", + "const screenshot = await loaderForScreenshot.screenshot();\n", + "\n", + "console.log(screenshot.pageContent.slice(0, 100));\n", + "console.log(screenshot.metadata);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all PuppeteerWebBaseLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_web_puppeteer.PuppeteerWebBaseLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.mdx deleted file mode 100644 index 8398a175eec8..000000000000 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_puppeteer.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -sidebar_position: 2 -sidebar_label: Puppeteer -hide_table_of_contents: true -sidebar_class_name: node-only ---- - -# Webpages, with Puppeteer - -:::tip Compatibility -Only available on Node.js. -::: - -This example goes over how to load data from webpages using Puppeteer. One document will be created for each webpage. - -Puppeteer is a Node.js library that provides a high-level API for controlling headless Chrome or Chromium. You can use Puppeteer to automate web page interactions, including extracting data from dynamic web pages that require JavaScript to render. - -If you want a lighterweight solution, and the webpages you want to load do not require JavaScript to render, you can use the [CheerioWebBaseLoader](/docs/integrations/document_loaders/web_loaders/web_cheerio) instead. - -## Setup - -```bash npm2yarn -npm install puppeteer -``` - -## Usage - -```typescript -import { PuppeteerWebBaseLoader } from "@langchain/community/document_loaders/web/puppeteer"; - -/** - * Loader uses `page.evaluate(() => document.body.innerHTML)` - * as default evaluate function - **/ -const loader = new PuppeteerWebBaseLoader("https://www.tabnews.com.br/"); - -const docs = await loader.load(); -``` - -## Options - -Here's an explanation of the parameters you can pass to the PuppeteerWebBaseLoader constructor using the PuppeteerWebBaseLoaderOptions interface: - -```typescript -type PuppeteerWebBaseLoaderOptions = { - launchOptions?: PuppeteerLaunchOptions; - gotoOptions?: PuppeteerGotoOptions; - evaluate?: (page: Page, browser: Browser) => Promise; -}; -``` - -1. `launchOptions`: an optional object that specifies additional options to pass to the puppeteer.launch() method. This can include options such as the headless flag to launch the browser in headless mode, or the slowMo option to slow down Puppeteer's actions to make them easier to follow. - -2. `gotoOptions`: an optional object that specifies additional options to pass to the page.goto() method. This can include options such as the timeout option to specify the maximum navigation time in milliseconds, or the waitUntil option to specify when to consider the navigation as successful. - -3. `evaluate`: an optional function that can be used to evaluate JavaScript code on the page using the page.evaluate() method. This can be useful for extracting data from the page or interacting with page elements. The function should return a Promise that resolves to a string containing the result of the evaluation. - -By passing these options to the `PuppeteerWebBaseLoader` constructor, you can customize the behavior of the loader and use Puppeteer's powerful features to scrape and interact with web pages. - -Here is a basic example to do it: - -import CodeBlock from "@theme/CodeBlock"; -import Example from "@examples/document_loaders/puppeteer_web.ts"; -import ScreenshotExample from "@examples/document_loaders/puppeteer_screenshot_web.ts"; - -{Example} - -### Screenshots - -To take a screenshot of a site, initialize the loader the same as above, and call the `.screenshot()` method. -This will return an instance of `Document` where the page content is a base64 encoded image, and the metadata contains a `source` field with the URL of the page. - -{ScreenshotExample} diff --git a/docs/core_docs/docs/integrations/llms/azure.ipynb b/docs/core_docs/docs/integrations/llms/azure.ipynb new file mode 100644 index 000000000000..607eeb2b6263 --- /dev/null +++ b/docs/core_docs/docs/integrations/llms/azure.ipynb @@ -0,0 +1,346 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Azure OpenAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# AzureOpenAI\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Azure OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular Azure OpenAI models are [chat completion models](/docs/concepts/#chat-models).\n", + "\n", + "Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/azure/).\n", + ":::\n", + "\n", + ":::info\n", + "\n", + "Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seemless transition between the OpenAI API and Azure OpenAI.\n", + "\n", + "If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API.\n", + "\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you get started with AzureOpenAI completion models (LLMs) using LangChain. For detailed documentation on `AzureOpenAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "- TODO: Fill in table features.\n", + "- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n", + "- TODO: Make sure API reference links are correct.\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/openai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [AzureOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access AzureOpenAI models you'll need to create an Azure account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [azure.microsoft.com](https://azure.microsoft.com/) to sign up to AzureOpenAI and generate an API key. \n", + "\n", + "You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", + "\n", + "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance.\n", + "\n", + "If you're using Node.js, you can define the following environment variables to use the service:\n", + "\n", + "```bash\n", + "AZURE_OPENAI_API_INSTANCE_NAME=\n", + "AZURE_OPENAI_API_DEPLOYMENT_NAME=\n", + "AZURE_OPENAI_API_KEY=\n", + "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", + "```\n", + "\n", + "Alternatively, you can pass the values directly to the `AzureOpenAI` constructor.\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain AzureOpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureOpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new AzureOpenAI({\n", + " model: \"gpt-3.5-turbo-instruct\",\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiInstanceName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", + " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "provides AI solutions to businesses. They offer a range of services including natural language processing, computer vision, and machine learning. Their solutions are designed to help businesses automate processes, gain insights from data, and improve decision-making. AzureOpenAI also offers consulting services to help businesses identify and implement the best AI solutions for their specific needs. They work with a variety of industries, including healthcare, finance, and retail. With their expertise in AI and their partnership with Microsoft Azure, AzureOpenAI is a trusted provider of AI solutions for businesses looking to stay ahead in the rapidly evolving world of technology.\n" + ] + } + ], + "source": [ + "const inputText = \"AzureOpenAI is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Ich liebe Programmieren.\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = new PromptTemplate({\n", + " template: \"How to say {input} in {output_language}:\\n\",\n", + " inputVariables: [\"input\", \"output_language\"],\n", + "})\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "## Using Azure Managed Identity\n", + "\n", + "If you're using Azure Managed Identity, you can configure the credentials like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c21d1eb8", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " DefaultAzureCredential,\n", + " getBearerTokenProvider,\n", + "} from \"@azure/identity\";\n", + "import { AzureOpenAI } from \"@langchain/openai\";\n", + "\n", + "const credentials = new DefaultAzureCredential();\n", + "const azureADTokenProvider = getBearerTokenProvider(\n", + " credentials,\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ");\n", + "\n", + "const managedIdentityLLM = new AzureOpenAI({\n", + " azureADTokenProvider,\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "94c2572b", + "metadata": {}, + "source": [ + "## Using a different domain\n", + "\n", + "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", + "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bbf107a2", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureOpenAI } from \"@langchain/openai\";\n", + "\n", + "const differentDomainLLM = new AzureOpenAI({\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " azureOpenAIBasePath:\n", + " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "afcff984", + "metadata": {}, + "source": [ + "## Migration from Azure OpenAI SDK\n", + "\n", + "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", + "\n", + "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", + " ```bash\n", + " npm install @langchain/openai\n", + " npm uninstall @langchain/azure-openai\n", + " ```\n", + "2. Update your imports to use the new `AzureOpenAI` and `AzureChatOpenAI` classes from the `@langchain/openai` package:\n", + " ```typescript\n", + " import { AzureOpenAI } from \"@langchain/openai\";\n", + " ```\n", + "3. Update your code to use the new `AzureOpenAI` and `AzureChatOpenAI` classes and pass the required parameters:\n", + "\n", + " ```typescript\n", + " const model = new AzureOpenAI({\n", + " azureOpenAIApiKey: \"\",\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + " });\n", + " ```\n", + "\n", + " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", + "\n", + " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", + "\n", + " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all AzureOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureOpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/llms/azure.mdx b/docs/core_docs/docs/integrations/llms/azure.mdx deleted file mode 100644 index 36ed1b3b65ad..000000000000 --- a/docs/core_docs/docs/integrations/llms/azure.mdx +++ /dev/null @@ -1,123 +0,0 @@ -import CodeBlock from "@theme/CodeBlock"; - -# Azure OpenAI - -:::caution -You are currently on a page documenting the use of Azure OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular Azure OpenAI models are [chat completion models](/docs/concepts/#chat-models). - -Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/azure/). -::: - -[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond. - -LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node). - -You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview). If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started. - -:::info - -Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seemless transition between the OpenAI API and Azure OpenAI. - -If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API. - -::: - -## Setup - -You'll first need to install the [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install -S @langchain/openai -``` - -You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal). - -Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the "Keys and Endpoint" section of your instance. - -If you're using Node.js, you can define the following environment variables to use the service: - -```bash -AZURE_OPENAI_API_INSTANCE_NAME= -AZURE_OPENAI_API_DEPLOYMENT_NAME= -AZURE_OPENAI_API_KEY= -AZURE_OPENAI_API_VERSION="2024-02-01" -``` - -Alternatively, you can pass the values directly to the `AzureOpenAI` constructor: - -import AzureOpenAI from "@examples/models/llm/azure_openai.ts"; - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -{AzureOpenAI} - -:::info - -You can find the list of supported API versions in the [Azure OpenAI documentation](https://learn.microsoft.com/azure/ai-services/openai/reference). - -::: - -### Using Azure Managed Identity - -If you're using Azure Managed Identity, you can configure the credentials like this: - -import AzureOpenAIManagedIdentity from "@examples/models/llm/azure_openai-managed_identity.ts"; - -{AzureOpenAIManagedIdentity} - -### Using a different domain - -If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable. -For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`: - -import AzureOpenAIBasePath from "@examples/models/llm/azure_openai-base_path.ts"; - -{AzureOpenAIBasePath} - -### LLM usage example - -import LLMExample from "@examples/llms/azure_openai.ts"; - -{LLMExample} - -### Chat usage example - -import ChatExample from "@examples/llms/azure_openai-chat.ts"; - -{ChatExample} - -## Migration from Azure OpenAI SDK - -If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps: - -1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package: - ```bash npm2yarn - npm install @langchain/openai - npm uninstall @langchain/azure-openai - ``` -2. Update your imports to use the new `AzureOpenAI` and `AzureChatOpenAI` classes from the `@langchain/openai` package: - ```typescript - import { AzureOpenAI } from "@langchain/openai"; - ``` -3. Update your code to use the new `AzureOpenAI` and `AzureChatOpenAI` classes and pass the required parameters: - - ```typescript - const model = new AzureOpenAI({ - azureOpenAIApiKey: "", - azureOpenAIApiInstanceName: "", - azureOpenAIApiDeploymentName: "", - azureOpenAIApiVersion: "", - }); - ``` - - Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version. - - - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details. - - - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version. diff --git a/docs/core_docs/docs/integrations/llms/bedrock.ipynb b/docs/core_docs/docs/integrations/llms/bedrock.ipynb new file mode 100644 index 000000000000..be1ff211a38e --- /dev/null +++ b/docs/core_docs/docs/integrations/llms/bedrock.ipynb @@ -0,0 +1,284 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Bedrock\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Bedrock\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Amazon Bedrock models as [text completion models](/docs/concepts/#llms). Many popular models available on Bedrock are [chat completion models](/docs/concepts/#chat-models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/bedrock/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "> [Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that makes Foundation Models (FMs)\n", + "> from leading AI startups and Amazon available via an API. You can choose from a wide range of FMs to find the model that is best suited for your use case.\n", + "\n", + "This will help you get started with Bedrock completion models (LLMs) using LangChain. For detailed documentation on `Bedrock` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "- TODO: Fill in table features.\n", + "- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n", + "- TODO: Make sure API reference links are correct.\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/bedrock) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [Bedrock](https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_bedrock.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Bedrock models you'll need to create an AWS account, get an API key, and install the `@langchain/community` integration, along with a few peer dependencies.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [aws.amazon.com](https://aws.amazon.com) to sign up to AWS Bedrock and generate an API key. Once you've done this set the environment variables:\n", + "\n", + "```bash\n", + "export BEDROCK_AWS_REGION=\"your-region-url\"\n", + "export BEDROCK_AWS_ACCESS_KEY_ID=\"your-access-key-id\"\n", + "export BEDROCK_AWS_SECRET_ACCESS_KEY=\"your-secret-access-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Bedrock integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community\n", + "\n", + "\n", + "And install the peer dependencies:\n", + "\n", + "\n", + " @aws-crypto/sha256-js @aws-sdk/credential-provider-node @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", + "\n", + "\n", + "You can also use Bedrock in web environments such as Edge functions or Cloudflare Workers by omitting the `@aws-sdk/credential-provider-node` dependency\n", + "and using the `web` entrypoint:\n", + "\n", + "\n", + " @aws-crypto/sha256-js @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "093ae37f", + "metadata": {}, + "outputs": [], + "source": [ + "// @lc-docs-hide-cell\n", + "// Deno requires these imports, and way of loading env vars.\n", + "// we don't want to expose in the docs.\n", + "// Below this cell we have a typescript markdown codeblock with\n", + "// the node code.\n", + "import \"@aws-sdk/credential-provider-node\";\n", + "import \"@smithy/protocol-http\";\n", + "import \"@aws-crypto/sha256-js\";\n", + "import \"@smithy/protocol-http\";\n", + "import \"@smithy/signature-v4\";\n", + "import \"@smithy/eventstream-codec\";\n", + "import \"@smithy/util-utf8\";\n", + "import \"@aws-sdk/types\";\n", + "import { Bedrock } from \"@langchain/community/llms/bedrock\"\n", + "import { getEnvironmentVariable } from \"@langchain/core/utils/env\";\n", + "\n", + "const llm = new Bedrock({\n", + " model: \"anthropic.claude-v2\",\n", + " region: \"us-east-1\",\n", + " // endpointUrl: \"custom.amazonaws.com\",\n", + " credentials: {\n", + " accessKeyId: getEnvironmentVariable(\"BEDROCK_AWS_ACCESS_KEY_ID\"),\n", + " secretAccessKey: getEnvironmentVariable(\"BEDROCK_AWS_SECRET_ACCESS_KEY\"),\n", + " },\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "a0562a13", + "metadata": {}, + "source": [ + "```typescript\n", + "import { Bedrock } from \"@langchain/community/llms/bedrock\"\n", + "\n", + "const llm = new Bedrock({\n", + " model: \"anthropic.claude-v2\",\n", + " region: process.env.BEDROCK_AWS_REGION ?? \"us-east-1\",\n", + " // endpointUrl: \"custom.amazonaws.com\",\n", + " credentials: {\n", + " accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID,\n", + " secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY,\n", + " },\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "Note that some models require specific prompting techniques. For example, Anthropic's Claude-v2 model will throw an error if\n", + "the prompt does not start with `Human: `." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\" Here are a few key points about Bedrock AI:\\n\"\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"- Bedrock was founded in 2021 and is based in San Fran\"\u001b[39m... 116 more characters" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const inputText = \"Human: Bedrock is an AI company that\\nAssistant: \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m' Here is how to say \"I love programming\" in German:\\n'\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"Ich liebe das Programmieren.\"\u001b[39m" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"Human: How to say {input} in {output_language}:\\nAssistant:\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all Bedrock features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_bedrock.Bedrock.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/llms/bedrock.mdx b/docs/core_docs/docs/integrations/llms/bedrock.mdx deleted file mode 100644 index 12627060a129..000000000000 --- a/docs/core_docs/docs/integrations/llms/bedrock.mdx +++ /dev/null @@ -1,43 +0,0 @@ -# Bedrock - -:::caution -You are currently on a page documenting the use of Amazon Bedrock models as [text completion models](/docs/concepts/#llms). Many popular models available on Bedrock are [chat completion models](/docs/concepts/#chat-models). - -You may be looking for [this page instead](/docs/integrations/chat/bedrock/). -::: - -> [Amazon Bedrock](https://aws.amazon.com/bedrock/) is a fully managed service that makes Foundation Models (FMs) -> from leading AI startups and Amazon available via an API. You can choose from a wide range of FMs to find the model that is best suited for your use case. - -## Setup - -You'll need to install a few official AWS packages as peer dependencies: - -```bash npm2yarn -npm install @aws-crypto/sha256-js @aws-sdk/credential-provider-node @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types -``` - -You can also use Bedrock in web environments such as Edge functions or Cloudflare Workers by omitting the `@aws-sdk/credential-provider-node` dependency -and using the `web` entrypoint: - -```bash npm2yarn -npm install @aws-crypto/sha256-js @smithy/protocol-http @smithy/signature-v4 @smithy/eventstream-codec @smithy/util-utf8 @aws-sdk/types -``` - -## Usage - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/community -``` - -Note that some models require specific prompting techniques. For example, Anthropic's Claude-v2 model will throw an error if -the prompt does not start with `Human: `. - -import CodeBlock from "@theme/CodeBlock"; -import BedrockExample from "@examples/models/llm/bedrock.ts"; - -{BedrockExample} diff --git a/docs/core_docs/docs/integrations/llms/cohere.ipynb b/docs/core_docs/docs/integrations/llms/cohere.ipynb new file mode 100644 index 000000000000..054d857e7aca --- /dev/null +++ b/docs/core_docs/docs/integrations/llms/cohere.ipynb @@ -0,0 +1,225 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Cohere\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Cohere\n", + "\n", + "```{=mdx}\n", + "\n", + ":::warning Legacy\n", + "\n", + "Cohere has marked their `generate` endpoint for LLMs as deprecated. Follow their [migration guide](https://docs.cohere.com/docs/migrating-from-cogenerate-to-cochat) to start using their Chat API via the [`ChatCohere`](/docs/integrations/chat/cohere) integration.\n", + "\n", + ":::\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Cohere models as [text completion models](/docs/concepts/#llms). Many popular models available on Cohere are [chat completion models](/docs/concepts/#chat-models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/cohere/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you get started with Cohere completion models (LLMs) using LangChain. For detailed documentation on `Cohere` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_cohere.Cohere.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/cohere) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [Cohere](https://api.js.langchain.com/classes/langchain_cohere.Cohere.html) | [@langchain/cohere](https://api.js.langchain.com/modules/langchain_cohere.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/cohere?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/cohere?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Cohere models you'll need to create a Cohere account, get an API key, and install the `@langchain/cohere` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [cohere.com](https://cohere.com) to sign up to Cohere and generate an API key. Once you've done this set the `COHERE_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export COHERE_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Cohere integration lives in the `@langchain/cohere` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/cohere\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { Cohere } from \"@langchain/cohere\"\n", + "\n", + "const llm = new Cohere({\n", + " model: \"command\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Cohere is a company that provides natural language processing models that help companies improve human-machine interactions. Cohere was founded in 2019 by Aidan Gomez, Ivan Zhang, and Nick Frosst. \n" + ] + } + ], + "source": [ + "const inputText = \"Cohere is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Ich liebe Programming.\n", + "\n", + "But for day to day purposes Ich mag Programming. would be enough and perfectly understood.\n", + "\n", + "I love programming is \"Ich liebe Programming\" and I like programming is \"Ich mag Programming\" respectively.\n", + "\n", + "There are also other ways to express this feeling, such as \"Ich habe Spaß mit Programming\", which means \"I enjoy programming\". But \"Ich mag\" and \"Ich liebe\" are the most common expressions for this.\n", + "\n", + "Let me know if I can be of further help with something else! \n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = new PromptTemplate({\n", + " template: \"How to say {input} in {output_language}:\\n\",\n", + " inputVariables: [\"input\", \"output_language\"],\n", + "})\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all Cohere features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_cohere.Cohere.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/llms/cohere.mdx b/docs/core_docs/docs/integrations/llms/cohere.mdx deleted file mode 100644 index 3085c31a2332..000000000000 --- a/docs/core_docs/docs/integrations/llms/cohere.mdx +++ /dev/null @@ -1,25 +0,0 @@ -# Cohere - -:::caution -You are currently on a page documenting the use of Cohere models as [text completion models](/docs/concepts/#llms). Many popular models available on Cohere are [chat completion models](/docs/concepts/#chat-models). - -You may be looking for [this page instead](/docs/integrations/chat/cohere/). -::: - -import CodeBlock from "@theme/CodeBlock"; - -LangChain.js supports Cohere LLMs. Here's an example: - -You'll first need to install the [`@langchain/cohere`](https://www.npmjs.com/package/@langchain/cohere) package. - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/cohere -``` - -import BasicExample from "@examples/models/llm/cohere.ts"; - -{BasicExample} diff --git a/docs/core_docs/docs/integrations/llms/fireworks.ipynb b/docs/core_docs/docs/integrations/llms/fireworks.ipynb new file mode 100644 index 000000000000..ead90271e243 --- /dev/null +++ b/docs/core_docs/docs/integrations/llms/fireworks.ipynb @@ -0,0 +1,279 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: Fireworks\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# Fireworks\n", + "\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of Fireworks models as [text completion models](/docs/concepts/#llms). Many popular models available on Fireworks are [chat completion models](/docs/concepts/#chat-models).\n", + "\n", + "You may be looking for [this page instead](/docs/integrations/chat/fireworks/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you get started with Fireworks completion models (LLMs) using LangChain. For detailed documentation on `Fireworks` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/fireworks) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [Fireworks](https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_llms_fireworks.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access Fireworks models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [fireworks.ai](https://fireworks.ai/) to sign up to Fireworks and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export FIREWORKS_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain Fireworks integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { Fireworks } from \"@langchain/community/llms/fireworks\"\n", + "\n", + "const llm = new Fireworks({\n", + " model: \"accounts/fireworks/models/llama-v3-70b-instruct\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " helps businesses automate their workflows and processes using AI and machine learning. Our platform provides a suite of tools that enable companies to automate repetitive tasks, extract insights from data, and make predictions about future outcomes.\n", + "\n", + "We're looking for a talented and motivated **Machine Learning Engineer** to join our team. As a Machine Learning Engineer at Fireworks, you will be responsible for designing, developing, and deploying machine learning models that drive business value for our customers. You will work closely with our data science team to develop and improve our AI models, and collaborate with our engineering team to integrate these models into our platform.\n", + "\n", + "**Responsibilities:**\n", + "\n", + "* Design, develop, and deploy machine learning models that drive business value for our customers\n", + "* Collaborate with data scientists to develop and improve AI models\n", + "* Work with the engineering team to integrate machine learning models into our platform\n", + "* Develop and maintain scalable and efficient machine learning pipelines\n", + "* Stay up-to-date with the latest developments in machine learning and AI\n", + "* Communicate complex technical concepts to non-technical stakeholders\n", + "\n", + "**Requirements:**\n", + "\n", + "* Bachelor's or Master's degree in Computer Science, Machine Learning, or a related field\n", + "* 3+ years of experience in machine learning engineering\n", + "* Strong programming skills in Python and experience with machine learning frameworks\n" + ] + } + ], + "source": [ + "const inputText = \"Fireworks is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ich liebe Programmieren.\n", + "\n", + "How to say I love coding. in German:\n", + "Ich liebe Coden.\n", + "\n", + "How to say I love to code. in German:\n", + "Ich liebe es zu coden.\n", + "\n", + "How to say I'm a programmer. in German:\n", + "Ich bin Programmierer.\n", + "\n", + "How to say I'm a coder. in German:\n", + "Ich bin Coder.\n", + "\n", + "How to say I'm a developer. in German:\n", + "Ich bin Entwickler.\n", + "\n", + "How to say I'm a software engineer. in German:\n", + "Ich bin Software-Ingenieur.\n", + "\n", + "How to say I'm a tech enthusiast. in German:\n", + "Ich bin Technik-Enthusiast.\n", + "\n", + "How to say I'm passionate about technology. in German:\n", + "Ich bin leidenschaftlich für Technologie.\n", + "\n", + "How to say I'm passionate about coding. in German:\n", + "Ich bin leidenschaftlich für Coden.\n", + "\n", + "How to say I'm passionate about programming. in German:\n", + "Ich bin leidenschaftlich für Programmieren.\n", + "\n", + "How to say I enjoy coding. in German:\n", + "Ich genieße Coden.\n", + "\n", + "How to say I enjoy programming. in German:\n", + "Ich genieße Programmieren.\n", + "\n", + "How to say I'm good at coding. in German:\n", + "Ich bin gut im Coden.\n", + "\n", + "How to say I'm good at programming. in\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "4989353f", + "metadata": {}, + "source": [ + "Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", + "\n", + "- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility).\n", + "- Generation using multiple prompts is not supported.\n" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all Fireworks features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_llms_fireworks.Fireworks.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/llms/fireworks.mdx b/docs/core_docs/docs/integrations/llms/fireworks.mdx deleted file mode 100644 index 8548ef1e64f7..000000000000 --- a/docs/core_docs/docs/integrations/llms/fireworks.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -sidebar_label: Fireworks ---- - -import CodeBlock from "@theme/CodeBlock"; - -# Fireworks - -:::caution -You are currently on a page documenting the use of Fireworks models as [text completion models](/docs/concepts/#llms). Many popular models available on Fireworks are [chat completion models](/docs/concepts/#chat-models). - -You may be looking for [this page instead](/docs/integrations/chat/fireworks/). -::: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/community -``` - -You can use models provided by Fireworks AI as follows: - -import Fireworks from "@examples/models/llm/fireworks.ts"; - -{Fireworks} - -Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats: - -- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility). -- Generation using multiple prompts is not supported. diff --git a/docs/core_docs/docs/integrations/llms/openai.ipynb b/docs/core_docs/docs/integrations/llms/openai.ipynb new file mode 100644 index 000000000000..bd08a1a333e7 --- /dev/null +++ b/docs/core_docs/docs/integrations/llms/openai.ipynb @@ -0,0 +1,260 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: OpenAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# OpenAI\n", + "\n", + "```{=mdx}\n", + "\n", + ":::caution\n", + "You are currently on a page documenting the use of OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular OpenAI models are [chat completion models](/docs/concepts/#chat-models).\n", + "\n", + "Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/openai/).\n", + ":::\n", + "\n", + "```\n", + "\n", + "This will help you get started with OpenAI completion models (LLMs) using LangChain. For detailed documentation on `OpenAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_openai.OpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/openai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [OpenAI](https://api.js.langchain.com/classes/langchain_openai.OpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "To access OpenAI models you'll need to create an OpenAI account, get an API key, and install the `@langchain/openai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [platform.openai.com](https://platform.openai.com/) to sign up to OpenAI and generate an API key. Once you've done this set the `OPENAI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain OpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a0562a13", + "metadata": {}, + "outputs": [], + "source": [ + "import { OpenAI } from \"@langchain/openai\"\n", + "\n", + "const llm = new OpenAI({\n", + " model: \"gpt-3.5-turbo-instruct\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " apiKey: process.env.OPENAI_API_KEY,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "035dea0f", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "develops and promotes friendly AI for the benefit of humanity. It was founded in 2015 by Elon Musk, Sam Altman, Greg Brockman, Ilya Sutskever, Wojciech Zaremba, John Schulman, and Chris Olah. The company's mission is to create and promote artificial general intelligence (AGI) that is safe and beneficial to humanity.\n", + "\n", + "OpenAI conducts research in various areas of AI, including deep learning, reinforcement learning, robotics, and natural language processing. The company also develops and releases open-source tools and platforms for AI research, such as the GPT-3 language model and the Gym toolkit for reinforcement learning.\n", + "\n", + "One of the main goals of OpenAI is to ensure that the development of AI is aligned with human values and does not pose a threat to humanity. To this end, the company has established a set of principles for safe and ethical AI development, and it actively collaborates with other organizations and researchers in the field.\n", + "\n", + "OpenAI has received funding from various sources, including tech giants like Microsoft and Amazon, as well as individual investors. It has also partnered with companies and organizations such as Google, IBM, and the United Nations to advance its research and promote responsible AI development.\n", + "\n", + "In addition to its research and development\n" + ] + } + ], + "source": [ + "const inputText = \"OpenAI is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "078e9db2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Ich liebe Programmieren.\n" + ] + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = new PromptTemplate({\n", + " template: \"How to say {input} in {output_language}:\\n\",\n", + " inputVariables: [\"input\", \"output_language\"],\n", + "})\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "If you're part of an organization, you can set `process.env.OPENAI_ORGANIZATION` to your OpenAI organization id, or pass it in as `organization` when\n", + "initializing the model.\n", + "\n", + "## Custom URLs\n", + "\n", + "You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d958ab00", + "metadata": {}, + "outputs": [], + "source": [ + "const llmCustomURL = new OpenAI({\n", + " temperature: 0.9,\n", + " configuration: {\n", + " baseURL: \"https://your_custom_url.com\",\n", + " },\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "81a5e2ea", + "metadata": {}, + "source": [ + "You can also pass other `ClientOptions` parameters accepted by the official SDK.\n", + "\n", + "If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/llms/azure).\n" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all OpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.OpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/llms/openai.mdx b/docs/core_docs/docs/integrations/llms/openai.mdx deleted file mode 100644 index e8ba5cd1e8a9..000000000000 --- a/docs/core_docs/docs/integrations/llms/openai.mdx +++ /dev/null @@ -1,55 +0,0 @@ -# OpenAI - -:::caution -You are currently on a page documenting the use of OpenAI [text completion models](/docs/concepts/#llms). The latest and most popular OpenAI models are [chat completion models](/docs/concepts/#chat-models). - -Unless you are specifically using `gpt-3.5-turbo-instruct`, you are probably looking for [this page instead](/docs/integrations/chat/openai/). -::: - -Here's how you can initialize an `OpenAI` LLM instance: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/openai -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -```typescript -import { OpenAI } from "@langchain/openai"; - -const model = new OpenAI({ - model: "gpt-3.5-turbo-instruct", // Defaults to "gpt-3.5-turbo-instruct" if no model provided. - temperature: 0.9, - apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.OPENAI_API_KEY -}); -const res = await model.invoke( - "What would be a good company name a company that makes colorful socks?" -); -console.log({ res }); -``` - -If you're part of an organization, you can set `process.env.OPENAI_ORGANIZATION` to your OpenAI organization id, or pass it in as `organization` when -initializing the model. - -## Custom URLs - -You can customize the base URL the SDK sends requests to by passing a `configuration` parameter like this: - -```typescript -const model = new OpenAI({ - temperature: 0.9, - configuration: { - baseURL: "https://your_custom_url.com", - }, -}); -``` - -You can also pass other `ClientOptions` parameters accepted by the official SDK. - -If you are hosting on Azure OpenAI, see the [dedicated page instead](/docs/integrations/llms/azure). diff --git a/docs/core_docs/scripts/quarto-build.js b/docs/core_docs/scripts/quarto-build.js index e502be91dd7d..128111f8d095 100644 --- a/docs/core_docs/scripts/quarto-build.js +++ b/docs/core_docs/scripts/quarto-build.js @@ -3,6 +3,7 @@ const { glob } = require("glob"); const { execSync } = require("node:child_process"); const IGNORED_CELL_REGEX = /```\w*?\n\/\/ ?@lc-docs-hide-cell\n[\s\S]*?```/g; +const LC_TS_IGNORE_REGEX = /\/\/ ?@lc-ts-ignore\n/g; async function main() { const allIpynb = await glob("./docs/**/*.ipynb"); @@ -20,8 +21,13 @@ async function main() { for (const renamedFilepath of allRenames) { if (fs.existsSync(renamedFilepath)) { let content = fs.readFileSync(renamedFilepath).toString(); - if (content.match(IGNORED_CELL_REGEX)) { - content = content.replace(IGNORED_CELL_REGEX, ""); + if ( + content.match(IGNORED_CELL_REGEX) || + content.match(LC_TS_IGNORE_REGEX) + ) { + content = content + .replace(IGNORED_CELL_REGEX, "") + .replace(LC_TS_IGNORE_REGEX, ""); fs.writeFileSync(renamedFilepath, content); } } diff --git a/docs/core_docs/scripts/validate_notebook.ts b/docs/core_docs/scripts/validate_notebook.ts index 776684ebc73f..fd0c4cba7242 100644 --- a/docs/core_docs/scripts/validate_notebook.ts +++ b/docs/core_docs/scripts/validate_notebook.ts @@ -8,8 +8,11 @@ export function extract(filepath: string) { const sourceFile = project.createSourceFile("temp.ts", ""); cells.forEach((cell: Record) => { + const source = cell.source + .join("") + .replace(/\/\/ ?@lc-ts-ignore/g, "// @ts-ignore"); if (cell.cell_type === "code") { - sourceFile.addStatements(cell.source.join("")); + sourceFile.addStatements(source); } }); @@ -17,7 +20,7 @@ export function extract(filepath: string) { const importDeclarations = sourceFile.getImportDeclarations(); const uniqueImports = new Map< string, - { default?: string; named: Set } + { default?: string; namespace?: string; named: Set } >(); importDeclarations.forEach((importDecl) => { @@ -29,6 +32,10 @@ export function extract(filepath: string) { if (defaultImport) { uniqueImports.get(moduleSpecifier)!.default = defaultImport.getText(); } + const namespaceImport = importDecl.getNamespaceImport(); + if (namespaceImport) { + uniqueImports.get(moduleSpecifier)!.namespace = namespaceImport.getText(); + } importDecl.getNamedImports().forEach((namedImport) => { uniqueImports.get(moduleSpecifier)!.named.add(namedImport.getText()); }); @@ -39,10 +46,11 @@ export function extract(filepath: string) { // Add deduplicated imports at the top uniqueImports.forEach( - ({ default: defaultImport, named }, moduleSpecifier) => { + ({ default: defaultImport, namespace, named }, moduleSpecifier) => { sourceFile.addImportDeclaration({ moduleSpecifier, defaultImport, + namespaceImport: namespace, namedImports: Array.from(named), }); } diff --git a/langchain-core/package.json b/langchain-core/package.json index 387f154b5170..8b4153454f47 100644 --- a/langchain-core/package.json +++ b/langchain-core/package.json @@ -1,6 +1,6 @@ { "name": "@langchain/core", - "version": "0.2.18", + "version": "0.2.19", "description": "Core LangChain.js abstractions and schemas", "type": "module", "engines": { diff --git a/langchain-core/src/callbacks/manager.ts b/langchain-core/src/callbacks/manager.ts index 9a6cbe9405cf..222e543cb09f 100644 --- a/langchain-core/src/callbacks/manager.ts +++ b/langchain-core/src/callbacks/manager.ts @@ -154,6 +154,42 @@ export class BaseRunManager { ) ); } + + async handleCustomEvent( + eventName: string, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + data: any, + _runId?: string, + _tags?: string[], + // eslint-disable-next-line @typescript-eslint/no-explicit-any + _metadata?: Record + ): Promise { + await Promise.all( + this.handlers.map((handler) => + consumeCallback(async () => { + try { + await handler.handleCustomEvent?.( + eventName, + data, + this.runId, + this.tags, + this.metadata + ); + } catch (err) { + const logFunction = handler.raiseError + ? console.error + : console.warn; + logFunction( + `Error in handler ${handler.constructor.name}, handleCustomEvent: ${err}` + ); + if (handler.raiseError) { + throw err; + } + } + }, handler.awaitHandlers) + ) + ); + } } /** diff --git a/langchain-core/src/language_models/tests/chat_models.test.ts b/langchain-core/src/language_models/tests/chat_models.test.ts index 865ba1d23b2a..940cc50802b2 100644 --- a/langchain-core/src/language_models/tests/chat_models.test.ts +++ b/langchain-core/src/language_models/tests/chat_models.test.ts @@ -228,3 +228,40 @@ test("Test ChatModel can cache complex messages", async () => { const cachedMsg = value[0].message as AIMessage; expect(cachedMsg.content).toEqual(JSON.stringify(contentToCache, null, 2)); }); + +test("Test ChatModel can emit a custom event", async () => { + const model = new FakeListChatModel({ + responses: ["hi"], + emitCustomEvent: true, + }); + let customEvent; + const response = await model.invoke([["human", "Hello there!"]], { + callbacks: [ + { + handleCustomEvent(_, data) { + customEvent = data; + }, + }, + ], + }); + await new Promise((resolve) => setTimeout(resolve, 100)); + expect(response.content).toEqual("hi"); + expect(customEvent).toBeDefined(); +}); + +test("Test ChatModel can stream back a custom event", async () => { + const model = new FakeListChatModel({ + responses: ["hi"], + emitCustomEvent: true, + }); + let customEvent; + const eventStream = await model.streamEvents([["human", "Hello there!"]], { + version: "v2", + }); + for await (const event of eventStream) { + if (event.event === "on_custom_event") { + customEvent = event; + } + } + expect(customEvent).toBeDefined(); +}); diff --git a/langchain-core/src/tools/index.ts b/langchain-core/src/tools/index.ts index c9e2c98402ae..a2c488e58bca 100644 --- a/langchain-core/src/tools/index.ts +++ b/langchain-core/src/tools/index.ts @@ -20,6 +20,7 @@ import { ZodObjectAny } from "../types/zod.js"; import { MessageContent } from "../messages/base.js"; import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js"; import { _isToolCall, ToolInputParsingException } from "./utils.js"; +import { isZodSchema } from "../utils/types/is_zod_schema.js"; export { ToolInputParsingException }; @@ -319,16 +320,19 @@ export interface DynamicToolInput extends BaseDynamicToolInput { * Interface for the input parameters of the DynamicStructuredTool class. */ export interface DynamicStructuredToolInput< - T extends ZodObjectAny = ZodObjectAny + // eslint-disable-next-line @typescript-eslint/no-explicit-any + T extends ZodObjectAny | Record = ZodObjectAny > extends BaseDynamicToolInput { func: ( input: BaseDynamicToolInput["responseFormat"] extends "content_and_artifact" ? ToolCall - : z.infer, + : T extends ZodObjectAny + ? z.infer + : T, runManager?: CallbackManagerForToolRun, config?: RunnableConfig ) => Promise; - schema: T; + schema: T extends ZodObjectAny ? T : T; } /** @@ -382,10 +386,14 @@ export class DynamicTool extends Tool { * description, designed to work with structured data. It extends the * StructuredTool class and overrides the _call method to execute the * provided function when the tool is called. + * + * Schema can be passed as Zod or JSON schema. The tool will not validate + * input if JSON schema is passed. */ export class DynamicStructuredTool< - T extends ZodObjectAny = ZodObjectAny -> extends StructuredTool { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + T extends ZodObjectAny | Record = ZodObjectAny +> extends StructuredTool { static lc_name() { return "DynamicStructuredTool"; } @@ -396,7 +404,7 @@ export class DynamicStructuredTool< func: DynamicStructuredToolInput["func"]; - schema: T; + schema: T extends ZodObjectAny ? T : ZodObjectAny; constructor(fields: DynamicStructuredToolInput) { super(fields); @@ -404,14 +412,16 @@ export class DynamicStructuredTool< this.description = fields.description; this.func = fields.func; this.returnDirect = fields.returnDirect ?? this.returnDirect; - this.schema = fields.schema; + this.schema = ( + isZodSchema(fields.schema) ? fields.schema : z.object({}) + ) as T extends ZodObjectAny ? T : ZodObjectAny; } /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. */ async call( - arg: z.output | ToolCall, + arg: (T extends ZodObjectAny ? z.output : T) | ToolCall, configArg?: RunnableConfig | Callbacks, /** @deprecated */ tags?: string[] @@ -424,11 +434,12 @@ export class DynamicStructuredTool< } protected _call( - arg: z.output | ToolCall, + arg: (T extends ZodObjectAny ? z.output : T) | ToolCall, runManager?: CallbackManagerForToolRun, parentConfig?: RunnableConfig ): Promise { - return this.func(arg, runManager, parentConfig); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return this.func(arg as any, runManager, parentConfig); } } @@ -447,10 +458,16 @@ export abstract class BaseToolkit { /** * Parameters for the tool function. - * @template {ZodObjectAny | z.ZodString = ZodObjectAny} RunInput The input schema for the tool. Either any Zod object, or a Zod string. + * Schema can be provided as Zod or JSON schema. + * If you pass JSON schema, tool inputs will not be validated. + * @template {ZodObjectAny | z.ZodString | Record = ZodObjectAny} RunInput The input schema for the tool. Either any Zod object, a Zod string, or JSON schema. */ interface ToolWrapperParams< - RunInput extends ZodObjectAny | z.ZodString = ZodObjectAny + RunInput extends + | ZodObjectAny + | z.ZodString + // eslint-disable-next-line @typescript-eslint/no-explicit-any + | Record = ZodObjectAny > extends ToolParams { /** * The name of the tool. If using with an LLM, this @@ -483,8 +500,11 @@ interface ToolWrapperParams< /** * Creates a new StructuredTool instance with the provided function, name, description, and schema. * + * Schema can be provided as Zod or JSON schema. + * If you pass JSON schema, tool inputs will not be validated. + * * @function - * @template {ZodObjectAny | z.ZodString = ZodObjectAny} T The input schema for the tool. Either any Zod object, or a Zod string. + * @template {ZodObjectAny | z.ZodString | Record = ZodObjectAny} T The input schema for the tool. Either any Zod object, a Zod string, or JSON schema instance. * * @param {RunnableFunc, ToolReturnType>} func - The function to invoke when the tool is called. * @param {ToolWrapperParams} fields - An object containing the following properties: @@ -494,18 +514,27 @@ interface ToolWrapperParams< * * @returns {DynamicStructuredTool} A new StructuredTool instance. */ -export function tool( +export function tool( func: RunnableFunc, ToolReturnType>, fields: ToolWrapperParams ): DynamicTool; -export function tool( +export function tool( func: RunnableFunc, ToolReturnType>, fields: ToolWrapperParams ): DynamicStructuredTool; -export function tool( - func: RunnableFunc, ToolReturnType>, +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function tool>( + func: RunnableFunc, + fields: ToolWrapperParams +): DynamicStructuredTool; + +export function tool< + // eslint-disable-next-line @typescript-eslint/no-explicit-any + T extends ZodObjectAny | z.ZodString | Record = ZodObjectAny +>( + func: RunnableFunc : T, ToolReturnType>, fields: ToolWrapperParams ): | DynamicStructuredTool @@ -518,7 +547,9 @@ export function tool( fields.description ?? fields.schema?.description ?? `${fields.name} tool`, - func, + // TS doesn't restrict the type here based on the guard above + // eslint-disable-next-line @typescript-eslint/no-explicit-any + func: func as any, }); } @@ -528,7 +559,8 @@ export function tool( return new DynamicStructuredTool({ ...fields, description, - schema: fields.schema as T extends ZodObjectAny ? T : ZodObjectAny, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + schema: fields.schema as any, // TODO: Consider moving into DynamicStructuredTool constructor func: async (input, runManager, config) => { return new Promise((resolve, reject) => { @@ -539,7 +571,9 @@ export function tool( childConfig, async () => { try { - resolve(func(input, childConfig)); + // TS doesn't restrict the type here based on the guard above + // eslint-disable-next-line @typescript-eslint/no-explicit-any + resolve(func(input as any, childConfig)); } catch (e) { reject(e); } diff --git a/langchain-core/src/tools/tests/tools.test.ts b/langchain-core/src/tools/tests/tools.test.ts index bf577a4a1dc9..4c38800b3489 100644 --- a/langchain-core/src/tools/tests/tools.test.ts +++ b/langchain-core/src/tools/tests/tools.test.ts @@ -1,6 +1,6 @@ import { test, expect } from "@jest/globals"; import { z } from "zod"; -import { tool } from "../index.js"; +import { DynamicStructuredTool, tool } from "../index.js"; import { ToolMessage } from "../../messages/tool.js"; test("Tool should error if responseFormat is content_and_artifact but the function doesn't return a tuple", async () => { @@ -115,3 +115,100 @@ test("Tool can accept single string input", async () => { const result = await stringTool.invoke("b"); expect(result).toBe("ba"); }); + +test("Tool declared with JSON schema", async () => { + const weatherSchema = { + type: "object", + properties: { + location: { + type: "string", + description: "A place", + }, + }, + required: ["location"], + }; + const weatherTool = tool( + (_) => { + return "Sunny"; + }, + { + name: "weather", + schema: weatherSchema, + } + ); + + const weatherTool2 = new DynamicStructuredTool({ + name: "weather", + description: "get the weather", + func: async (_) => { + return "Sunny"; + }, + schema: weatherSchema, + }); + // No validation on JSON schema tools + await weatherTool.invoke({ + somethingSilly: true, + }); + await weatherTool2.invoke({ + somethingSilly: true, + }); +}); + +test("Tool input typing is enforced", async () => { + const weatherSchema = z.object({ + location: z.string(), + }); + + const weatherTool = tool( + (_) => { + return "Sunny"; + }, + { + name: "weather", + schema: weatherSchema, + } + ); + + const weatherTool2 = new DynamicStructuredTool({ + name: "weather", + description: "get the weather", + func: async (_) => { + return "Sunny"; + }, + schema: weatherSchema, + }); + + const weatherTool3 = tool( + async (_) => { + return "Sunny"; + }, + { + name: "weather", + description: "get the weather", + schema: z.string(), + } + ); + + await expect(async () => { + await weatherTool.invoke({ + // @ts-expect-error Invalid argument + badval: "someval", + }); + }).rejects.toThrow(); + const res = await weatherTool.invoke({ + location: "somewhere", + }); + expect(res).toEqual("Sunny"); + await expect(async () => { + await weatherTool2.invoke({ + // @ts-expect-error Invalid argument + badval: "someval", + }); + }).rejects.toThrow(); + const res2 = await weatherTool2.invoke({ + location: "someval", + }); + expect(res2).toEqual("Sunny"); + const res3 = await weatherTool3.invoke("blah"); + expect(res3).toEqual("Sunny"); +}); diff --git a/langchain-core/src/utils/testing/index.ts b/langchain-core/src/utils/testing/index.ts index 73e79f892fc1..685fae8d3749 100644 --- a/langchain-core/src/utils/testing/index.ts +++ b/langchain-core/src/utils/testing/index.ts @@ -320,6 +320,8 @@ export interface FakeChatInput extends BaseChatModelParams { /** Time to sleep in milliseconds between responses */ sleep?: number; + + emitCustomEvent?: boolean; } /** @@ -353,10 +355,13 @@ export class FakeListChatModel extends BaseChatModel { sleep?: number; - constructor({ responses, sleep }: FakeChatInput) { + emitCustomEvent = false; + + constructor({ responses, sleep, emitCustomEvent }: FakeChatInput) { super({}); this.responses = responses; this.sleep = sleep; + this.emitCustomEvent = emitCustomEvent ?? this.emitCustomEvent; } _combineLLMOutput() { @@ -369,9 +374,15 @@ export class FakeListChatModel extends BaseChatModel { async _generate( _messages: BaseMessage[], - options?: this["ParsedCallOptions"] + options?: this["ParsedCallOptions"], + runManager?: CallbackManagerForLLMRun ): Promise { await this._sleepIfRequested(); + if (this.emitCustomEvent) { + await runManager?.handleCustomEvent("some_test_event", { + someval: true, + }); + } if (options?.stop?.length) { return { @@ -402,6 +413,11 @@ export class FakeListChatModel extends BaseChatModel { ): AsyncGenerator { const response = this._currentResponse(); this._incrementResponse(); + if (this.emitCustomEvent) { + await runManager?.handleCustomEvent("some_test_event", { + someval: true, + }); + } for await (const text of response) { await this._sleepIfRequested(); diff --git a/langchain/package.json b/langchain/package.json index 7fa98d21ce8c..641cb9980e49 100644 --- a/langchain/package.json +++ b/langchain/package.json @@ -941,8 +941,7 @@ "js-tiktoken": "^1.0.12", "js-yaml": "^4.1.0", "jsonpointer": "^5.0.1", - "langchainhub": "~0.0.8", - "langsmith": "~0.1.30", + "langsmith": "~0.1.40", "ml-distance": "^4.0.0", "openapi-types": "^12.1.3", "p-retry": "4", diff --git a/langchain/src/hub.ts b/langchain/src/hub.ts index ac2f958b08a6..53abe8e8c1fc 100644 --- a/langchain/src/hub.ts +++ b/langchain/src/hub.ts @@ -1,4 +1,4 @@ -import { Client, ClientConfiguration, HubPushOptions } from "langchainhub"; +import { Client } from "langsmith"; import { Runnable } from "@langchain/core/runnables"; import { load } from "./load/index.js"; @@ -13,10 +13,30 @@ import { load } from "./load/index.js"; export async function push( repoFullName: string, runnable: Runnable, - options?: HubPushOptions & ClientConfiguration + options?: { + apiKey?: string; + apiUrl?: string; + parentCommitHash?: string; + /** @deprecated Use isPublic instead. */ + newRepoIsPublic?: boolean; + isPublic?: boolean; + /** @deprecated Use description instead. */ + newRepoDescription?: string; + description?: string; + readme?: string; + tags?: string[]; + } ) { const client = new Client(options); - return client.push(repoFullName, JSON.stringify(runnable), options); + const payloadOptions = { + object: runnable, + parentCommitHash: options?.parentCommitHash, + isPublic: options?.isPublic ?? options?.newRepoIsPublic, + description: options?.description ?? options?.newRepoDescription, + readme: options?.readme, + tags: options?.tags, + }; + return client.pushPrompt(repoFullName, payloadOptions); } /** @@ -27,9 +47,11 @@ export async function push( */ export async function pull( ownerRepoCommit: string, - options?: ClientConfiguration + options?: { apiKey?: string; apiUrl?: string; includeModel?: boolean } ) { const client = new Client(options); - const result = await client.pull(ownerRepoCommit); + const result = await client._pullPrompt(ownerRepoCommit, { + includeModel: options?.includeModel, + }); return load(result); } diff --git a/langchain/src/tools/sql.ts b/langchain/src/tools/sql.ts index 09584fe980e3..6a139af3d5c2 100644 --- a/langchain/src/tools/sql.ts +++ b/langchain/src/tools/sql.ts @@ -156,7 +156,7 @@ export class QueryCheckerTool extends Tool { template = ` {query} -Double check the sqlite query above for common mistakes, including: +Double check the SQL query above for common mistakes, including: - Using NOT IN with NULL values - Using UNION when UNION ALL should have been used - Using BETWEEN for exclusive ranges diff --git a/libs/langchain-community/src/llms/layerup_security.ts b/libs/langchain-community/src/llms/layerup_security.ts index e60676094892..5d84e7188add 100644 --- a/libs/langchain-community/src/llms/layerup_security.ts +++ b/libs/langchain-community/src/llms/layerup_security.ts @@ -1,7 +1,7 @@ import { LLM, BaseLLM, - type BaseLLMParams, + type BaseLLMCallOptions, } from "@langchain/core/language_models/llms"; import { GuardrailResponse, @@ -9,7 +9,7 @@ import { LLMMessage, } from "@layerup/layerup-security"; -export interface LayerupSecurityOptions extends BaseLLMParams { +export interface LayerupSecurityOptions extends BaseLLMCallOptions { llm: BaseLLM; layerupApiKey?: string; layerupApiBaseUrl?: string; @@ -101,7 +101,7 @@ export class LayerupSecurity extends LLM { return "layerup_security"; } - async _call(input: string, options?: BaseLLMParams): Promise { + async _call(input: string, options?: BaseLLMCallOptions): Promise { // Since LangChain LLMs only support string inputs, we will wrap each call to Layerup in a single-message // array of messages, then extract the string element when we need to access it. let messages: LLMMessage[] = [ diff --git a/libs/langchain-community/src/llms/tests/layerup_security.test.ts b/libs/langchain-community/src/llms/tests/layerup_security.test.ts index 670a56ca200b..883dca6fd0cb 100644 --- a/libs/langchain-community/src/llms/tests/layerup_security.test.ts +++ b/libs/langchain-community/src/llms/tests/layerup_security.test.ts @@ -1,5 +1,8 @@ import { test } from "@jest/globals"; -import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms"; +import { + LLM, + type BaseLLMCallOptions, +} from "@langchain/core/language_models/llms"; import { GuardrailResponse } from "@layerup/layerup-security/types.js"; import { LayerupSecurity, @@ -18,7 +21,7 @@ export class MockLLM extends LLM { return "mock_llm"; } - async _call(_input: string, _options?: BaseLLMParams): Promise { + async _call(_input: string, _options?: BaseLLMCallOptions): Promise { return "Hi Bob! How are you?"; } } diff --git a/libs/langchain-scripts/package.json b/libs/langchain-scripts/package.json index 82048e1f88a6..1b9e36114282 100644 --- a/libs/langchain-scripts/package.json +++ b/libs/langchain-scripts/package.json @@ -44,6 +44,7 @@ "axios": "^1.6.7", "commander": "^11.1.0", "glob": "^10.3.10", + "lodash": "^4.17.21", "readline": "^1.3.0", "rimraf": "^5.0.1", "rollup": "^4.5.2", @@ -55,6 +56,7 @@ "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", + "@types/lodash": "^4", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", diff --git a/libs/langchain-scripts/src/cli/docs/chat.ts b/libs/langchain-scripts/src/cli/docs/chat.ts index a272a553ef39..196caabddc19 100644 --- a/libs/langchain-scripts/src/cli/docs/chat.ts +++ b/libs/langchain-scripts/src/cli/docs/chat.ts @@ -69,57 +69,57 @@ type ExtraFields = { async function promptExtraFields(): Promise { const hasToolCalling = await getUserInput( - "Does the tool support tool calling? (y/n) ", + "Does this integration support tool calling? (y/n) ", undefined, true ); const hasJsonMode = await getUserInput( - "Does the tool support JSON mode? (y/n) ", + "Does this integration support JSON mode? (y/n) ", undefined, true ); const hasImageInput = await getUserInput( - "Does the tool support image input? (y/n) ", + "Does this integration support image input? (y/n) ", undefined, true ); const hasAudioInput = await getUserInput( - "Does the tool support audio input? (y/n) ", + "Does this integration support audio input? (y/n) ", undefined, true ); const hasVideoInput = await getUserInput( - "Does the tool support video input? (y/n) ", + "Does this integration support video input? (y/n) ", undefined, true ); const hasTokenLevelStreaming = await getUserInput( - "Does the tool support token level streaming? (y/n) ", + "Does this integration support token level streaming? (y/n) ", undefined, true ); const hasTokenUsage = await getUserInput( - "Does the tool support token usage? (y/n) ", + "Does this integration support token usage? (y/n) ", undefined, true ); const hasLogprobs = await getUserInput( - "Does the tool support logprobs? (y/n) ", + "Does this integration support logprobs? (y/n) ", undefined, true ); const hasLocal = await getUserInput( - "Does the tool support local usage? (y/n) ", + "Does this integration support local usage? (y/n) ", undefined, true ); const hasSerializable = await getUserInput( - "Does the tool support serializable output? (y/n) ", + "Does this integration support serializable output? (y/n) ", undefined, true ); const hasPySupport = await getUserInput( - "Does the tool support Python support? (y/n) ", + "Does this integration have Python support? (y/n) ", undefined, true ); diff --git a/libs/langchain-scripts/src/cli/docs/document_loaders.ts b/libs/langchain-scripts/src/cli/docs/document_loaders.ts new file mode 100644 index 000000000000..9fe5118af0b4 --- /dev/null +++ b/libs/langchain-scripts/src/cli/docs/document_loaders.ts @@ -0,0 +1,188 @@ +import * as path from "node:path"; +import * as fs from "node:fs"; +import _ from "lodash"; +import { + boldText, + getUserInput, + greenText, + redBackground, +} from "../utils/get-input.js"; + +const NODE_OR_WEB_PLACEHOLDER = "__fs_or_web__"; +const NODE_OR_WEB_IMPORT_PATH_PLACEHOLDER = "__fs_or_web_import_path__"; +const FILE_NAME_PLACEHOLDER = "__file_name__"; +const MODULE_NAME_PLACEHOLDER = "__ModuleName__"; + +const API_REF_BASE_PACKAGE_URL = `https://api.js.langchain.com/modules/langchain_community_document_loaders_${NODE_OR_WEB_PLACEHOLDER}_${FILE_NAME_PLACEHOLDER}.html`; +const API_REF_BASE_MODULE_URL = `https://v02.api.js.langchain.com/classes/langchain_community_document_loaders_${NODE_OR_WEB_PLACEHOLDER}_${FILE_NAME_PLACEHOLDER}.${MODULE_NAME_PLACEHOLDER}.html`; + +const SERIALIZABLE_PLACEHOLDER = "__serializable__"; +const LOCAL_PLACEHOLDER = "__local__"; +const PY_SUPPORT_PLACEHOLDER = "__py_support__"; + +const NODE_SUPPORT_PLACEHOLDER = "__fs_support__"; + +const NODE_ONLY_SIDEBAR_BADGE_PLACEHOLDER = "__node_only_sidebar__"; +const NODE_ONLY_TOOL_TIP_PLACEHOLDER = "__node_only_tooltip__"; + +// This should not be suffixed with `Loader` as it's used for API keys. +const MODULE_NAME_ALL_CAPS_PLACEHOLDER = "__MODULE_NAME_ALL_CAPS__"; + +const TEMPLATE_PATH = path.resolve( + "./src/cli/docs/templates/document_loaders.ipynb" +); +const INTEGRATIONS_DOCS_PATH = path.resolve( + "../../docs/core_docs/docs/integrations/document_loaders" +); + +const NODE_ONLY_TOOLTIP = + "```{=mdx}\n\n:::tip Compatibility\n\nOnly available on Node.js.\n\n:::\n\n```\n"; +const NODE_ONLY_SIDEBAR_BADGE = `sidebar_class_name: node-only`; + +const fetchAPIRefUrl = async (url: string): Promise => { + try { + const res = await fetch(url); + if (res.status !== 200) { + throw new Error(`API Reference URL ${url} not found.`); + } + return true; + } catch (_) { + return false; + } +}; + +type ExtraFields = { + webLoader: boolean; + nodeOnly: boolean; + serializable: boolean; + pySupport: boolean; + local: boolean; +}; + +async function promptExtraFields(): Promise { + const isWebLoader = await getUserInput( + "Is this integration a web loader? (y/n) ", + undefined, + true + ); + const isNodeOnly = await getUserInput( + "Does this integration _only_ support Node environments? (y/n) ", + undefined, + true + ); + const isSerializable = await getUserInput( + "Does this integration support serializable output? (y/n) ", + undefined, + true + ); + const hasPySupport = await getUserInput( + "Does this integration have Python support? (y/n) ", + undefined, + true + ); + const hasLocalSupport = await getUserInput( + "Does this integration support running locally? (y/n) ", + undefined, + true + ); + + return { + webLoader: isWebLoader.toLowerCase() === "y", + nodeOnly: isNodeOnly.toLowerCase() === "y", + serializable: isSerializable.toLowerCase() === "y", + pySupport: hasPySupport.toLowerCase() === "y", + local: hasLocalSupport.toLowerCase() === "y", + }; +} + +export async function fillDocLoaderIntegrationDocTemplate(fields: { + packageName: string; + moduleName: string; + webSupport?: boolean; + nodeSupport?: boolean; +}) { + // Ask the user if they'd like to fill in extra fields, if so, prompt them. + let extraFields: ExtraFields | undefined; + const shouldPromptExtraFields = await getUserInput( + "Would you like to fill out optional fields? (y/n) ", + "white_background" + ); + if (shouldPromptExtraFields.toLowerCase() === "y") { + extraFields = await promptExtraFields(); + } + + const formattedPackageApiRefUrl = API_REF_BASE_PACKAGE_URL.replace( + NODE_OR_WEB_PLACEHOLDER, + extraFields?.webLoader ? "web" : "fs" + ).replace(FILE_NAME_PLACEHOLDER, fields.packageName); + + const formattedApiRefModuleUrl = API_REF_BASE_MODULE_URL.replace( + NODE_OR_WEB_PLACEHOLDER, + extraFields?.webLoader ? "web" : "fs" + ) + .replace(FILE_NAME_PLACEHOLDER, fields.packageName) + .replace(MODULE_NAME_PLACEHOLDER, fields.moduleName); + + const success = await Promise.all([ + fetchAPIRefUrl(formattedApiRefModuleUrl), + fetchAPIRefUrl(formattedPackageApiRefUrl), + ]); + if (success.find((s) => s === false)) { + // Don't error out because this might be used before the package is released. + console.error("Invalid package or module name. API reference not found."); + } + + let moduleNameAllCaps = _.snakeCase(fields.moduleName).toUpperCase(); + if (moduleNameAllCaps.endsWith("_LOADER")) { + moduleNameAllCaps = moduleNameAllCaps.replace("_LOADER", ""); + } + + const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8")) + .replaceAll(NODE_OR_WEB_PLACEHOLDER, extraFields?.webLoader ? "web" : "fs") + .replaceAll(MODULE_NAME_PLACEHOLDER, fields.moduleName) + .replaceAll(MODULE_NAME_ALL_CAPS_PLACEHOLDER, moduleNameAllCaps) + .replaceAll( + NODE_OR_WEB_IMPORT_PATH_PLACEHOLDER, + extraFields?.webLoader ? "web" : "fs" + ) + .replaceAll(FILE_NAME_PLACEHOLDER, fields.packageName) + .replaceAll( + NODE_ONLY_SIDEBAR_BADGE_PLACEHOLDER, + extraFields?.nodeOnly ? NODE_ONLY_SIDEBAR_BADGE : "" + ) + .replaceAll( + NODE_ONLY_TOOL_TIP_PLACEHOLDER, + extraFields?.nodeOnly ? NODE_ONLY_TOOLTIP : "" + ) + .replaceAll( + NODE_SUPPORT_PLACEHOLDER, + extraFields?.nodeOnly ? "Node-only" : "All environments" + ) + .replaceAll(LOCAL_PLACEHOLDER, extraFields?.local ? "✅" : "❌") + .replaceAll( + SERIALIZABLE_PLACEHOLDER, + extraFields?.serializable ? "beta" : "❌" + ) + .replaceAll(PY_SUPPORT_PLACEHOLDER, extraFields?.pySupport ? "✅" : "❌"); + + const docPath = path.join( + INTEGRATIONS_DOCS_PATH, + extraFields?.webLoader ? "web_loaders" : "file_loaders", + `${fields.packageName}.ipynb` + ); + await fs.promises.writeFile(docPath, docTemplate); + const prettyDocPath = docPath.split("docs/core_docs/")[1]; + + const updatePythonDocUrlText = ` ${redBackground( + "- Update the Python documentation URL with the proper URL." + )}`; + const successText = `\nSuccessfully created new document loader integration doc at ${prettyDocPath}.`; + + console.log( + `${greenText(successText)}\n +${boldText("Next steps:")} +${extraFields?.pySupport ? updatePythonDocUrlText : ""} + - Run all code cells in the generated doc to record the outputs. + - Add extra sections on integration specific features.\n` + ); +} diff --git a/libs/langchain-scripts/src/cli/docs/index.ts b/libs/langchain-scripts/src/cli/docs/index.ts index d86109618e0d..d664a220a240 100644 --- a/libs/langchain-scripts/src/cli/docs/index.ts +++ b/libs/langchain-scripts/src/cli/docs/index.ts @@ -3,6 +3,8 @@ // --------------------------------------------- import { Command } from "commander"; import { fillChatIntegrationDocTemplate } from "./chat.js"; +import { fillDocLoaderIntegrationDocTemplate } from "./document_loaders.js"; +import { fillLLMIntegrationDocTemplate } from "./llms.js"; type CLIInput = { package: string; @@ -15,10 +17,7 @@ async function main() { const program = new Command(); program .description("Create a new integration doc.") - .option( - "--package ", - "Package name, eg openai. Should be value of @langchain/" - ) + .option("--package ", "Package name, eg openai.") .option("--module ", "Module name, e.g ChatOpenAI") .option("--type ", "Type of integration, e.g. 'chat'") .option( @@ -45,9 +44,22 @@ async function main() { isCommunity, }); break; + case "doc_loader": + await fillDocLoaderIntegrationDocTemplate({ + packageName, + moduleName, + }); + break; + case "llm": + await fillLLMIntegrationDocTemplate({ + packageName, + moduleName, + isCommunity, + }); + break; default: console.error( - `Invalid type: ${type}.\nOnly 'chat' is supported at this time.` + `Invalid type: ${type}.\nOnly 'chat', 'llm' and 'doc_loader' are supported at this time.` ); process.exit(1); } diff --git a/libs/langchain-scripts/src/cli/docs/llms.ts b/libs/langchain-scripts/src/cli/docs/llms.ts new file mode 100644 index 000000000000..bd538bb55ac1 --- /dev/null +++ b/libs/langchain-scripts/src/cli/docs/llms.ts @@ -0,0 +1,179 @@ +import * as path from "node:path"; +import * as fs from "node:fs"; +import { + boldText, + getUserInput, + greenText, + redBackground, +} from "../utils/get-input.js"; + +const PACKAGE_NAME_PLACEHOLDER = "__package_name__"; +const PACKAGE_NAME_SHORT_SNAKE_CASE_PLACEHOLDER = + "__package_name_short_snake_case__"; +const PACKAGE_NAME_SNAKE_CASE_PLACEHOLDER = "__package_name_snake_case__"; +const PACKAGE_NAME_PRETTY_PLACEHOLDER = "__package_name_pretty__"; +const PACKAGE_IMPORT_PATH_PLACEHOLDER = "__import_path__"; +const MODULE_NAME_PLACEHOLDER = "__ModuleName__"; +// This should not be prefixed with `Chat` as it's used for API keys. +const MODULE_NAME_ALL_CAPS_PLACEHOLDER = "__MODULE_NAME_ALL_CAPS__"; + +const SERIALIZABLE_PLACEHOLDER = "__serializable__"; +const LOCAL_PLACEHOLDER = "__local__"; +const PY_SUPPORT_PLACEHOLDER = "__py_support__"; + +const API_REF_BASE_PACKAGE_URL = `https://api.js.langchain.com/modules/langchain_${PACKAGE_NAME_PLACEHOLDER}.html`; +const API_REF_BASE_MODULE_URL = `https://api.js.langchain.com/classes/langchain_${PACKAGE_NAME_PLACEHOLDER}.${MODULE_NAME_PLACEHOLDER}.html`; + +const TEMPLATE_PATH = path.resolve("./src/cli/docs/templates/llms.ipynb"); +const INTEGRATIONS_DOCS_PATH = path.resolve( + "../../docs/core_docs/docs/integrations/llms" +); + +const fetchAPIRefUrl = async (url: string): Promise => { + try { + const res = await fetch(url); + if (res.status !== 200) { + throw new Error(`API Reference URL ${url} not found.`); + } + return true; + } catch (_) { + return false; + } +}; + +type ExtraFields = { + local: boolean; + serializable: boolean; + pySupport: boolean; +}; + +async function promptExtraFields(): Promise { + const hasLocal = await getUserInput( + "Does this integration support local usage? (y/n) ", + undefined, + true + ); + const hasSerializable = await getUserInput( + "Does this integration support serializable output? (y/n) ", + undefined, + true + ); + const hasPySupport = await getUserInput( + "Does this integration have Python support? (y/n) ", + undefined, + true + ); + + return { + local: hasLocal.toLowerCase() === "y", + serializable: hasSerializable.toLowerCase() === "y", + pySupport: hasPySupport.toLowerCase() === "y", + }; +} + +export async function fillLLMIntegrationDocTemplate(fields: { + packageName: string; + moduleName: string; + isCommunity: boolean; +}) { + // Ask the user if they'd like to fill in extra fields, if so, prompt them. + let extraFields: ExtraFields | undefined; + const shouldPromptExtraFields = await getUserInput( + "Would you like to fill out optional fields? (y/n) ", + "white_background" + ); + if (shouldPromptExtraFields.toLowerCase() === "y") { + extraFields = await promptExtraFields(); + } + + let formattedApiRefPackageUrl = ""; + let formattedApiRefModuleUrl = ""; + if (fields.isCommunity) { + formattedApiRefPackageUrl = API_REF_BASE_PACKAGE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + `community_llms_${fields.packageName}` + ); + formattedApiRefModuleUrl = API_REF_BASE_MODULE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + `community_llms_${fields.packageName}` + ).replace(MODULE_NAME_PLACEHOLDER, fields.moduleName); + } else { + formattedApiRefPackageUrl = API_REF_BASE_PACKAGE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + fields.packageName + ); + formattedApiRefModuleUrl = API_REF_BASE_MODULE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + fields.packageName + ).replace(MODULE_NAME_PLACEHOLDER, fields.moduleName); + } + + const success = await Promise.all([ + fetchAPIRefUrl(formattedApiRefPackageUrl), + fetchAPIRefUrl(formattedApiRefModuleUrl), + ]); + if (success.some((s) => s === false)) { + // Don't error out because this might be used before the package is released. + console.error("Invalid package or module name. API reference not found."); + } + + const packageNameShortSnakeCase = fields.packageName.replaceAll("-", "_"); + let fullPackageNameSnakeCase = ""; + let packageNamePretty = ""; + let fullPackageImportPath = ""; + + if (fields.isCommunity) { + fullPackageNameSnakeCase = `langchain_community_llms_${packageNameShortSnakeCase}`; + fullPackageImportPath = `@langchain/community/llms/${fields.packageName}`; + packageNamePretty = "@langchain/community"; + } else { + fullPackageNameSnakeCase = `langchain_${packageNameShortSnakeCase}`; + packageNamePretty = `@langchain/${fields.packageName}`; + fullPackageImportPath = packageNamePretty; + } + + let moduleNameAllCaps = fields.moduleName.toUpperCase(); + if (moduleNameAllCaps.endsWith("_LLM")) { + moduleNameAllCaps = moduleNameAllCaps.replace("_LLM", ""); + } else if (moduleNameAllCaps.endsWith("LLM")) { + moduleNameAllCaps = moduleNameAllCaps.replace("LLM", ""); + } + + const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8")) + .replaceAll(PACKAGE_NAME_PLACEHOLDER, fields.packageName) + .replaceAll(PACKAGE_NAME_SNAKE_CASE_PLACEHOLDER, fullPackageNameSnakeCase) + .replaceAll( + PACKAGE_NAME_SHORT_SNAKE_CASE_PLACEHOLDER, + packageNameShortSnakeCase + ) + .replaceAll(PACKAGE_NAME_PRETTY_PLACEHOLDER, packageNamePretty) + .replaceAll(PACKAGE_IMPORT_PATH_PLACEHOLDER, fullPackageImportPath) + .replaceAll(MODULE_NAME_PLACEHOLDER, fields.moduleName) + .replaceAll(MODULE_NAME_ALL_CAPS_PLACEHOLDER, moduleNameAllCaps) + .replace(LOCAL_PLACEHOLDER, extraFields?.local ? "✅" : "❌") + .replace( + SERIALIZABLE_PLACEHOLDER, + extraFields?.serializable ? "✅" : "beta" + ) + .replace(PY_SUPPORT_PLACEHOLDER, extraFields?.pySupport ? "✅" : "❌"); + + const docPath = path.join( + INTEGRATIONS_DOCS_PATH, + `${packageNameShortSnakeCase}.ipynb` + ); + await fs.promises.writeFile(docPath, docTemplate); + const prettyDocPath = docPath.split("docs/core_docs/")[1]; + + const updatePythonDocUrlText = ` ${redBackground( + "- Update the Python documentation URL with the proper URL." + )}`; + const successText = `\nSuccessfully created new chat model integration doc at ${prettyDocPath}.`; + + console.log( + `${greenText(successText)}\n +${boldText("Next steps:")} +${extraFields?.pySupport ? updatePythonDocUrlText : ""} + - Run all code cells in the generated doc to record the outputs. + - Add extra sections on integration specific features.\n` + ); +} diff --git a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb index 57019e3eb0d6..1f6508d8a861 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb @@ -24,9 +24,9 @@ "\n", "- TODO: Make sure Python integration doc link is correct, if applicable.\n", "\n", - "| Class | Package | Local | Serializable | [PY support](https:/python.langchain.com/v0.2/docs/integrations/chat/__package_name_short_snake_case__) | Package downloads | Package latest |\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/__package_name_short_snake_case__) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [__ModuleName__](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html) | [__package_name_pretty__](https://api.js.langchain.com/modules/__package_name_snake_case__.html) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name_pretty__?style=flat-square&label=%20) | ![NPM - Version](https://img.shields.io/npm/v/__package_name_pretty__?style=flat-square&label=%20) |\n", + "| [__ModuleName__](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html) | [__package_name_pretty__](https://api.js.langchain.com/modules/__package_name_snake_case__.html) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name_pretty__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name_pretty__?style=flat-square&label=%20&) |\n", "\n", "### Model features\n", "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", diff --git a/libs/langchain-scripts/src/cli/docs/templates/document_loaders.ipynb b/libs/langchain-scripts/src/cli/docs/templates/document_loaders.ipynb new file mode 100644 index 000000000000..09781453dea8 --- /dev/null +++ b/libs/langchain-scripts/src/cli/docs/templates/document_loaders.ipynb @@ -0,0 +1,181 @@ +{ + "cells": [ + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: __ModuleName__\n", + "__node_only_sidebar__\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# __ModuleName__\n", + "\n", + "- TODO: Make sure API reference link is correct.\n", + "\n", + "__node_only_tooltip__\n", + "\n", + "This notebook provides a quick overview for getting started with `__ModuleName__` [document loaders](/docs/concepts/#document-loaders). For detailed documentation of all `__ModuleName__` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders___fs_or_web_____file_name__.__ModuleName__.html).\n", + "\n", + "- TODO: Add any other relevant links, like information about underlying API, etc.\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "- TODO: Fill in table features.\n", + "- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n", + "- TODO: Make sure API reference links are correct.\n", + "\n", + "| Class | Package | Compatibility | Local | [PY support](https://python.langchain.com/docs/integrations/document_loaders/__file_name__)| \n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [__ModuleName__](https://api.js.langchain.com/classes/langchain_community_document_loaders___fs_or_web_____file_name__.__ModuleName__.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_document_loaders___fs_or_web_____file_name__.html) | __fs_support__ | __local__ | __py_support__ |\n", + "\n", + "## Setup\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "To access `__ModuleName__` document loader you'll need to install the `@langchain/community` integration package, and create a **__ModuleName__** account and get an API key.\n", + "\n", + "### Credentials\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the `__MODULE_NAME_ALL_CAPS___API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export __MODULE_NAME_ALL_CAPS___API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain __ModuleName__ integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:\n", + "\n", + "- TODO: Update model instantiation with relevant params." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import { __ModuleName__ } from \"@langchain/community/document_loaders/__fs_or_web_import_path__/__file_name__\"\n", + "\n", + "const loader = new __ModuleName__({\n", + " // required params = ...\n", + " // optional params = ...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load\n", + "\n", + "- TODO: Run cells to show loading capabilities" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TODO: Any functionality specific to this document loader\n", + "\n", + "E.g. using specific configs for different loading behavior. Delete if not relevant." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders___fs_or_web_____file_name__.__ModuleName__.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb b/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb new file mode 100644 index 000000000000..eac6a24b611d --- /dev/null +++ b/libs/langchain-scripts/src/cli/docs/templates/llms.ipynb @@ -0,0 +1,221 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "67db2992", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: __ModuleName__\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "9597802c", + "metadata": {}, + "source": [ + "# __ModuleName__\n", + "\n", + "- [ ] TODO: Make sure API reference link is correct\n", + "\n", + "This will help you get started with __ModuleName__ completion models (LLMs) using LangChain. For detailed documentation on `__ModuleName__` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "- TODO: Fill in table features.\n", + "- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n", + "- TODO: Make sure API reference links are correct.\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/llms/__package_name_short_snake_case__) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [__ModuleName__](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html) | [__package_name_pretty__](https://api.js.langchain.com/modules/__package_name_snake_case__.html) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name_pretty__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name_pretty__?style=flat-square&label=%20&) |\n", + "\n", + "## Setup\n", + "\n", + "- [ ] TODO: Update with relevant info.\n", + "\n", + "To access __ModuleName__ models you'll need to create a/an __ModuleName__ account, get an API key, and install the `__package_name_pretty__` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the `__MODULE_NAME_ALL_CAPS___API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export __MODULE_NAME_ALL_CAPS___API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain __ModuleName__ integration lives in the `__package_name_pretty__` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " __package_name_pretty__\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "0a760037", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:\n", + "\n", + "- TODO: Update model instantiation with relevant params." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a0562a13", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import { __ModuleName__ } from \"__import_path__\"\n", + "\n", + "const llm = new __ModuleName__({\n", + " model: \"model-name\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "0ee90032", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "- [ ] TODO: Run cells so output can be seen." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "035dea0f", + "metadata": { + "tags": [], + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "const inputText = \"__ModuleName__ is an AI company that \"\n", + "\n", + "const completion = await llm.invoke(inputText)\n", + "completion" + ] + }, + { + "cell_type": "markdown", + "id": "add38532", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our completion model with a prompt template like so:\n", + "\n", + "- TODO: Run cells so output can be seen." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "078e9db2", + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"How to say {input} in {output_language}:\\n\")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "e99eef30", + "metadata": {}, + "source": [ + "## TODO: Any functionality specific to this model provider\n", + "\n", + "E.g. creating/using finetuned models via this provider. Delete if not relevant" + ] + }, + { + "cell_type": "markdown", + "id": "e9bdfcef", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.11.1 64-bit", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + }, + "vscode": { + "interpreter": { + "hash": "e971737741ff4ec9aff7dc6155a1060a59a8a6d52c757dbbe66bf8ee389494b1" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/yarn.lock b/yarn.lock index 62d276842b77..b323c7092a91 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12340,6 +12340,7 @@ __metadata: "@swc/core": ^1.3.90 "@swc/jest": ^0.2.29 "@tsconfig/recommended": ^1.0.3 + "@types/lodash": ^4 "@typescript-eslint/eslint-plugin": ^6.12.0 "@typescript-eslint/parser": ^6.12.0 axios: ^1.6.7 @@ -12355,6 +12356,7 @@ __metadata: glob: ^10.3.10 jest: ^29.5.0 jest-environment-node: ^29.6.4 + lodash: ^4.17.21 prettier: ^2.8.3 readline: ^1.3.0 release-it: ^15.10.1 @@ -31603,8 +31605,7 @@ __metadata: js-yaml: ^4.1.0 jsdom: ^22.1.0 jsonpointer: ^5.0.1 - langchainhub: ~0.0.8 - langsmith: ~0.1.30 + langsmith: ~0.1.40 mammoth: ^1.5.1 ml-distance: ^4.0.0 mongodb: ^5.2.0 @@ -31867,6 +31868,31 @@ __metadata: languageName: node linkType: hard +"langsmith@npm:~0.1.40": + version: 0.1.40 + resolution: "langsmith@npm:0.1.40" + dependencies: + "@types/uuid": ^9.0.1 + commander: ^10.0.1 + p-queue: ^6.6.2 + p-retry: 4 + semver: ^7.6.3 + uuid: ^9.0.0 + peerDependencies: + "@langchain/core": "*" + langchain: "*" + openai: "*" + peerDependenciesMeta: + "@langchain/core": + optional: true + langchain: + optional: true + openai: + optional: true + checksum: 8c5bcf5137e93a9a17203fbe21d6a61f45c98fccafc2040d56e9cc15a4ee432456d986adf0e590d8c436b72d18143053ce6e65f021115f1596dd4519ec2805d7 + languageName: node + linkType: hard + "language-subtag-registry@npm:^0.3.20, language-subtag-registry@npm:~0.3.2": version: 0.3.22 resolution: "language-subtag-registry@npm:0.3.22"