From f44584c26c64e7d72dd6cb2a11f170248d6d963b Mon Sep 17 00:00:00 2001 From: Sam Goldman Date: Sat, 13 Jul 2024 15:50:12 -0400 Subject: [PATCH] docs: add contact page, improve tables and examples --- CHANGELOG.md | 2 +- README.md | 243 ++++++++++++------------ docs/README.md | 358 ++++++++++++++++++++--------------- docs/SUMMARY.md | 5 +- docs/contact-us.md | 9 + docs/providers/README.md | 2 +- docs/providers/ai21.md | 58 +++--- docs/providers/anthropic.md | 74 +++++--- docs/providers/bedrock.md | 98 ++++++---- docs/providers/cohere.md | 72 ++++--- docs/providers/gemini.md | 62 +++--- docs/providers/groq.md | 70 ++++--- docs/providers/mistral.md | 86 +++++---- docs/providers/openai.md | 100 +++++----- docs/providers/perplexity.md | 74 +++++--- scripts/docs/generate.ts | 12 +- src/models.ts | 4 +- 17 files changed, 765 insertions(+), 564 deletions(-) create mode 100644 docs/contact-us.md diff --git a/CHANGELOG.md b/CHANGELOG.md index ddd792e..018012b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# token.js +# Token.js ## 0.0.1 diff --git a/README.md b/README.md index d9d735b..55b7049 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,25 @@ -# token.js +# Token.js -Integrate 9 LLM providers with a single Typescript SDK using OpenAIs format. Free and opensource with no proxy server required. - -### [Documentation](http://tokenjs.ai) +Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required. ## Features -* Define prompts in OpenAIs format and have them translated automatially for each LLM provider. -* Support for tools, JSON output, image inputs, streaming, and more. -* Support for 9 popular LLM providers: AI21, Anthropic, AWS Bedrock, Cohere, Gemini, Groq, Mistral, OpenAI, and Perplexity with more coming soon. -* Free and opensource under GPLv3. -* No proxy server required. +* Use OpenAI's format to call 60+ LLMs from 9 providers. +* Supports tools, JSON outputs, image inputs, streaming, and more. +* Runs completely on the client side. No proxy server needed. +* Free and open source under GPLv3. + +## Supported Providers + +* AI21 +* Anthropic +* AWS Bedrock +* Cohere +* Gemini +* Groq +* Mistral +* OpenAI +* Perplexity ## Setup @@ -20,173 +29,165 @@ Integrate 9 LLM providers with a single Typescript SDK using OpenAIs format. Fre npm install token.js ``` -### Environment Variables - -```env -OPENAI_API_KEY= -GEMINI_API_KEY= -ANTHROPIC_API_KEY= -``` - ### Usage +Import the Token.js client and call the `create` function with a prompt in OpenAI's format. Specify the model and LLM provider using their respective fields. + +```bash +OPENAI_API_KEY= +``` ```ts -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' +// Create the Token.js client const tokenjs = new TokenJS() -const messages: ChatCompletionMessageParam[] = [ - { - role: 'user', - content: `How are you?`, - }, -] - -// Call OpenAI -const result = await tokenjs.chat.completions.create({ - provider: 'openai', - model: 'gpt-4o', - messages, -}) - -// Call Gemini -const result = await tokenjs.chat.completions.create({ - provider: 'gemini', - model: 'gemini-1.5-pro', - messages, -}) - -// Call Anthropic -const result = await tokenjs.chat.completions.create({ - provider: 'anthropic', - model: 'claude-2.0', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'openai', + model: 'gpt-4o', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -## Access Credential Configuration +### Access Credentials -token.js uses environment variables to configure access to different LLM providers. Configure your api keys using the following environment variables: +We recommend using environment variables to configure the credentials for each LLM provider. -``` +```bash # OpenAI OPENAI_API_KEY= - # AI21 AI21_API_KEY= - # Anthropic ANTHROPIC_API_KEY= - # Cohere COHERE_API_KEY= - # Gemini GEMINI_API_KEY= - # Groq GROQ_API_KEY= - # Mistral MISTRAL_API_KEY= - # Perplexity PERPLEXITY_API_KEY= - # AWS Bedrock AWS_REGION_NAME= AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= ``` -Then you can select the `provider` and `model` you would like to use when calling the `create` function, and token.js will use the correct access credentials for the provider. - -## Streaming +### Streaming -token.js supports streaming for all providers that support it. +Token.js supports streaming responses for all providers that offer it. ```ts import { TokenJS } from 'token.js' const tokenjs = new TokenJS() -const result = await tokenjs.chat.completions.create({ - stream: true, - provider: 'gemini', - model: 'gemini-1.5-pro', - messages: [ - { - role: 'user', - content: `How are you?`, - }, - ], -}) -for await (const part of result) { - process.stdout.write(part.choices[0]?.delta?.content || '') +async function main() { + const result = await tokenjs.chat.completions.create({ + stream: true, + provider: 'openai', + model: 'gpt-4o', + messages: [ + { + role: 'user', + content: `Tell me about yourself.`, + }, + ], + }) + + for await (const part of result) { + process.stdout.write(part.choices[0]?.delta?.content || '') + } } +main() ``` -## Tools +### Function Calling -token.js supports tools for all providers and models that support it. +Token.js supports the function calling tool for all providers and models that offer it. ```ts import { TokenJS, ChatCompletionTool } from 'token.js' const tokenjs = new TokenJS() -const tools: ChatCompletionTool[] = [ - { - type: 'function', - function: { - name: 'getCurrentWeather', - description: 'Get the current weather in a given location', - parameters: { - type: 'object', - properties: { - location: { - type: 'string', - description: 'The city and state, e.g. San Francisco, CA', +async function main() { + const tools: ChatCompletionTool[] = [ + { + type: 'function', + function: { + name: 'get_current_weather', + description: 'Get the current weather in a given location', + parameters: { + type: 'object', + properties: { + location: { + type: 'string', + description: 'The city and state, e.g. San Francisco, CA', + }, }, - unit: { type: 'string', enum: ['celsius', 'fahrenheit'] }, + required: ['location'], }, - required: ['location', 'unit'], }, }, - }, -] + ] + + const result = await tokenjs.chat.completions.create({ + provider: 'gemini', + model: 'gemini-1.5-pro', + messages: [ + { + role: 'user', + content: `What's the weather like in San Francisco?`, + }, + ], + tools, + tool_choice: 'auto', + }) -const result = await tokenjs.chat.completions.create({ - provider: 'gemini', - model: 'gemini-1.5-pro', - messages: [ - { - role: 'user', - content: `What's the weather like in San Francisco?`, - }, - ], - tools, - tool_choice: 'auto', -}) + console.log(result.choices[0].message.tool_calls) +} +main() ``` -## Providers +## Feature Compatibility -Not every feature is supported by every provider and model. This table provides a general overview of what features are supported by each provider. For details on which features are supported by individual models from different providers see the [provider documentation](todo\(md\)/). +This table provides an overview of the features that Token.js supports from each LLM provider. -| Provider | Completion | Streaming | Tools | JSON Output | Image Input | +| Provider | Chat Completion | Streaming | Function Calling Tool | JSON Output | Image Input | | ---------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | -| openai | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | -| anthropic | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | -| bedrock | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | -| mistral | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | | -| cohere | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | | | -| AI21 | :white\_check\_mark: | :white\_check\_mark: | | | | +| OpenAI | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | +| Anthropic | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | +| Bedrock | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | +| Mistral | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | +| Cohere | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :heavy_minus_sign: | +| AI21 | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | | Gemini | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | -| Groq | :white\_check\_mark: | :white\_check\_mark: | | :white\_check\_mark: | | -| Perplexity | :white\_check\_mark: | :white\_check\_mark: | | | | +| Groq | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :white\_check\_mark: | :heavy_minus_sign: | +| Perplexity | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | -If there are more providers or features you would like to see implemented in token.js please let us know by opening an issue! +**Note**: Certain LLMs, particularly older or weaker models, do not support some features in this table. For details about these restrictions, see our [LLM provider documentation](https://docs.tokenjs.ai/providers). ## Contributing @@ -216,8 +217,16 @@ pnpm test pnpm lint ``` -### Open a pull request! +## Contact Us + +Please reach out if there's any way that we can improve Token.js! + +Here are a few ways you can reach us: +* [Discord](TODO) +* [Schedule a meeting](https://calendly.com/sam_goldman/tokenjs) +* Call or text: [+1 (516) 206-6928](tel:+15162066928) +* Email: [sam@glade.so](mailto:sam@glade.so) ## License -token.js is free and open source under the GPLv3 license. +Token.js is free and open source software licensed under [GPLv3](https://github.com/token-js/token.js/blob/main/LICENSE). diff --git a/docs/README.md b/docs/README.md index 806231f..c0beca4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,18 +1,29 @@ --- description: >- - Integrate 9 LLM providers with a single Typescript SDK using OpenAIs format. - Free and opensource with no proxy server required. + Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. + Free and open source. No proxy server required. --- -# token.js +# Token.js ## Features -* Define prompts in OpenAIs format and have them translated automatially for each LLM provider. -* Support for tools, JSON output, image inputs, streaming, and more. -* Support for 9 popular LLM providers: AI21, Anthropic, AWS Bedrock, Cohere, Gemini, Groq, Mistral, OpenAI, and Perplexity with more coming soon. -* Free and opensource under GPLv3. -* No proxy server required. +* Use OpenAI's format to call 60+ LLMs from 9 providers. +* Supports tools, JSON outputs, image inputs, streaming, and more. +* Runs completely on the client side. No proxy server needed. +* Free and open source under GPLv3. + +## Supported Providers + +* AI21 +* Anthropic +* AWS Bedrock +* Cohere +* Gemini +* Groq +* Mistral +* OpenAI +* Perplexity ## Setup @@ -46,7 +57,7 @@ bun add token.js ### Usage -Import the token.js client and call the `create` function with the same input messages you would use with OpenAIs SDK. Specify the model and LLM provider you would like use with their respective fields. +Import the Token.js client and call the `create` function with a prompt in OpenAI's format. Specify the model and LLM provider using their respective fields. {% tabs %} {% tab title="OpenAI" %} @@ -58,24 +69,28 @@ OPENAI_API_KEY= {% code fullWidth="false" %} ```ts -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'openai', - model: 'gpt-4o', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'openai', + model: 'gpt-4o', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` {% endcode %} {% endtab %} @@ -88,24 +103,28 @@ ANTHROPIC_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'anthropic', - model: 'claude-2.0', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'anthropic', + model: 'claude-3-sonnet-20240229', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` {% endtab %} @@ -117,24 +136,28 @@ GEMINI_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'gemini', - model: 'gemini-1.5-pro', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'gemini', + model: 'gemini-1.5-pro', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` {% endtab %} @@ -148,24 +171,28 @@ AWS_SECRET_ACCESS_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'bedrock', - model: 'amazon.titan-text-express-v1', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'bedrock', + model: 'meta.llama3-70b-instruct-v1:0', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` {% endtab %} @@ -177,24 +204,28 @@ COHERE_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'cohere', - model: 'command-r', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'cohere', + model: 'command-r-plus', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` {% endtab %} @@ -206,24 +237,28 @@ MISTRAL_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'mistral', - model: 'mistral-large-2402', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'mistral', + model: 'open-mixtral-8x22b', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` {% endtab %} {% endtabs %} @@ -257,91 +292,104 @@ AWS_SECRET_ACCESS_KEY= ### Streaming -token.js supports streaming for all providers that support it. +Token.js supports streaming responses for all providers that offer it. ```ts import { TokenJS } from 'token.js' const tokenjs = new TokenJS() -const result = await tokenjs.chat.completions.create({ - stream: true, - provider: 'gemini', - model: 'gemini-1.5-pro', - messages: [ - { - role: 'user', - content: `How are you?`, - }, - ], -}) -for await (const part of result) { - process.stdout.write(part.choices[0]?.delta?.content || '') +async function main() { + const result = await tokenjs.chat.completions.create({ + stream: true, + provider: 'openai', + model: 'gpt-4o', + messages: [ + { + role: 'user', + content: `Tell me about yourself.`, + }, + ], + }) + + for await (const part of result) { + process.stdout.write(part.choices[0]?.delta?.content || '') + } } +main() ``` -### Tools +### Function Calling -token.js supports tools for all providers and models that support it. +Token.js supports the function calling tool for all providers and models that offer it.
import { TokenJS, ChatCompletionTool } from 'token.js'
 
 const tokenjs = new TokenJS()
 
-const tools: ChatCompletionTool[] = [{
-  type: 'function',
-  function: {
-    name: 'getCurrentWeather',
-    description: 'Get the current weather in a given location',
-    parameters: {
-      type: 'object',
-      properties: {
-        location: {
-          type: 'string',
-          description: 'The city and state, e.g. San Francisco, CA',
-        },
-        unit: {
-          type: 'string',
-          description: 'The temperature unit, e.g. Fahrenheit or Celsius'
+async function main() {
+  const tools: ChatCompletionTool[] = [
+    {
+      type: 'function',
+      function: {
+        name: 'get_current_weather',
+        description: 'Get the current weather in a given location',
+        parameters: {
+          type: 'object',
+          properties: {
+            location: {
+              type: 'string',
+              description: 'The city and state, e.g. San Francisco, CA',
+            },
+          },
+          required: ['location'],
         },
       },
-      required: ['location', 'unit'],
     },
-  },
-}]
+  ]
+
+  const result = await tokenjs.chat.completions.create({
+    provider: 'gemini',
+    model: 'gemini-1.5-pro',
+    messages: [
+      {
+        role: 'user',
+        content: `What's the weather like in San Francisco?`,
+      },
+    ],
+    tools,
+    tool_choice: 'auto',
+  })
 
-const result = await tokenjs.chat.completions.create({
-  provider: 'gemini',
-  model: 'gemini-1.5-pro',
-  messages: [
-    {
-      role: 'user',
-      content: `What's the weather like in San Francisco?`,
-    },
-  ],
-  tools,
-  tool_choice: 'auto',
-})
+  console.log(result.choices[0].message.tool_calls)
+}
+main()
 
## Feature Compatibility -Not every feature is supported by every provider and model. This table provides a general overview of what features are supported by each provider. For details on which features are supported by individual models from different providers see the provider documentation. +This table provides an overview of the features that Token.js supports from each LLM provider. -| Provider | Completion | Streaming | Tools | JSON Output | Image Input | +| Provider | Chat Completion | Streaming | Function Calling Tool | JSON Output | Image Input | | ---------- | -------------------- | -------------------- | -------------------- | -------------------- | -------------------- | | OpenAI | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | | Anthropic | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | | Bedrock | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | -| Mistral | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | | -| Cohere | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | | | -| AI21 | :white\_check\_mark: | :white\_check\_mark: | | | | +| Mistral | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | +| Cohere | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :heavy_minus_sign: | +| AI21 | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | | Gemini | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | :white\_check\_mark: | -| Groq | :white\_check\_mark: | :white\_check\_mark: | | :white\_check\_mark: | | -| Perplexity | :white\_check\_mark: | :white\_check\_mark: | | | | +| Groq | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :white\_check\_mark: | :heavy_minus_sign: | +| Perplexity | :white\_check\_mark: | :white\_check\_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | -If there are providers or features you would like to see implemented in token.js please let us know by opening an issue on [Github](https://github.com/token-js/token.js?tab=readme-ov-file#contributing)! +**Note**: Certain LLMs, particularly older or weaker models, do not support some features in this table. For details about these restrictions, see our [LLM provider documentation](providers/README.md). -## Contributing +## License -token.js is free and opensource under the GPLv3 license. If you would like to contribute, [please visit our Github.](https://github.com/token-js/token.js?tab=readme-ov-file#contributing) +Token.js is free and open source software licensed under [GPLv3](https://github.com/token-js/token.js/blob/main/LICENSE). diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index b47abd3..fcc4548 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,6 +1,6 @@ -# Table of contents +# Table of Contents -* [token.js](README.md) +* [Token.js](README.md) * [Providers](providers/README.md) * [AI21](providers/ai21.md) * [Anthropic](providers/anthropic.md) @@ -11,3 +11,4 @@ * [Mistral](providers/mistral.md) * [OpenAI](providers/openai.md) * [Perplexity](providers/perplexity.md) +* [Contact Us](contact-us.md) diff --git a/docs/contact-us.md b/docs/contact-us.md new file mode 100644 index 0000000..ed5d1ee --- /dev/null +++ b/docs/contact-us.md @@ -0,0 +1,9 @@ +# Contact Us + +Please reach out if there's any way that we can improve Token.js! + +Here are a few ways you can reach us: +* [Discord](TODO) +* [Schedule a meeting](https://calendly.com/sam_goldman/tokenjs) +* Call or text: [+1 (516) 206-6928](tel:+15162066928) +* Email: [sam@glade.so](mailto:sam@glade.so) diff --git a/docs/providers/README.md b/docs/providers/README.md index 62808c2..147b474 100644 --- a/docs/providers/README.md +++ b/docs/providers/README.md @@ -1,5 +1,5 @@ --- -description: Learn how to work with different LLM providers and models using token.js. +description: Integrate LLM providers and models using Token.js. --- # Providers diff --git a/docs/providers/ai21.md b/docs/providers/ai21.md index a742524..8a51e60 100644 --- a/docs/providers/ai21.md +++ b/docs/providers/ai21.md @@ -1,6 +1,8 @@ # AI21 -### Usage +[Get an AI21 API key](https://studio.ai21.com/account/api-key?source=docs) + +## Usage {% code title=".env" %} ```bash @@ -9,32 +11,44 @@ AI21_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'ai21', - model: 'jamba-instruct', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'ai21', + model: 'jamba-instruct', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [AI21 Documentation](https://docs.ai21.com/reference/jamba-instruct-api) - -### Supported Models +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| -------------- | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| jamba-instruct | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| -------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| jamba-instruct | ✅ | | | | | ✅ | +## Additional Resources +* [AI21 Documentation](https://docs.ai21.com) diff --git a/docs/providers/anthropic.md b/docs/providers/anthropic.md index 125375f..4d026b7 100644 --- a/docs/providers/anthropic.md +++ b/docs/providers/anthropic.md @@ -1,6 +1,8 @@ # Anthropic -### Usage +[Get an Anthropic API key](https://console.anthropic.com/settings/keys) + +## Usage {% code title=".env" %} ```bash @@ -9,38 +11,50 @@ ANTHROPIC_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'anthropic', - model: 'claude-2.0', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'anthropic', + model: 'claude-3-sonnet-20240229', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [Anthropic Documentation](https://docs.anthropic.com/en/docs/welcome) - -### Supported Models - -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| -------------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| claude-3-5-sonnet-20240620 | ✅ | | | ✅ | ✅ | | -| claude-3-opus-20240229 | ✅ | | | ✅ | ✅ | | -| claude-3-sonnet-20240229 | ✅ | | | ✅ | ✅ | | -| claude-3-haiku-20240307 | ✅ | | | ✅ | ✅ | | -| claude-2.1 | ✅ | | | | | | -| claude-2.0 | ✅ | | | | | | -| claude-instant-1.2 | ✅ | | | | | | - +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| -------------------------- | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| claude-3-5-sonnet-20240620 | ✅ | ➖ | ➖ | ✅ | ✅ | ➖ | +| claude-3-opus-20240229 | ✅ | ➖ | ➖ | ✅ | ✅ | ➖ | +| claude-3-sonnet-20240229 | ✅ | ➖ | ➖ | ✅ | ✅ | ➖ | +| claude-3-haiku-20240307 | ✅ | ➖ | ➖ | ✅ | ✅ | ➖ | +| claude-2.1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| claude-2.0 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| claude-instant-1.2 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + + +## Additional Resources + +* [Anthropic Documentation](https://docs.anthropic.com) diff --git a/docs/providers/bedrock.md b/docs/providers/bedrock.md index 563c8e9..7fb4259 100644 --- a/docs/providers/bedrock.md +++ b/docs/providers/bedrock.md @@ -1,6 +1,8 @@ -# Bedrock +# AWS Bedrock -### Usage +[Get AWS credentials](https://aws.amazon.com/console/) + +## Usage {% code title=".env" %} ```bash @@ -11,50 +13,62 @@ AWS_SECRET_ACCESS_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'bedrock', - model: 'amazon.titan-text-express-v1', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'bedrock', + model: 'meta.llama3-70b-instruct-v1:0', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [Bedrock Documentation](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html) - -### Supported Models - -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| --------------------------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| amazon.titan-text-lite-v1 | ✅ | | | | | | -| amazon.titan-text-express-v1 | ✅ | | | | | | -| anthropic.claude-3-opus-20240229-v1:0 | ✅ | | | ✅ | ✅ | | -| anthropic.claude-3-sonnet-20240229-v1:0 | ✅ | | | ✅ | ✅ | | -| anthropic.claude-3-haiku-20240307-v1:0 | ✅ | | | ✅ | ✅ | | -| anthropic.claude-v2:1 | ✅ | | | | | | -| anthropic.claude-v2 | ✅ | | | | | | -| anthropic.claude-instant-v1 | ✅ | | | | | | -| cohere.command-r-plus-v1:0 | ✅ | | | | ✅ | | -| cohere.command-r-v1:0 | ✅ | | | | ✅ | | -| cohere.command-text-v14 | ✅ | | | | | | -| cohere.command-light-text-v14 | ✅ | | | | | | -| meta.llama3-8b-instruct-v1:0 | ✅ | | | | | | -| meta.llama3-70b-instruct-v1:0 | ✅ | | | | | | -| meta.llama2-13b-chat-v1 | ✅ | | | | | | -| meta.llama2-70b-chat-v1 | ✅ | | | | | | -| mistral.mistral-7b-instruct-v0:2 | ✅ | | | | | | -| mistral.mixtral-8x7b-instruct-v0:1 | ✅ | | | | | | -| mistral.mistral-large-2402-v1:0 | ✅ | | | | ✅ | | +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| --------------------------------------- | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| amazon.titan-text-lite-v1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| amazon.titan-text-express-v1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| anthropic.claude-3-opus-20240229-v1:0 | ✅ | ➖ | ➖ | ✅ | ✅ | ➖ | +| anthropic.claude-3-sonnet-20240229-v1:0 | ✅ | ➖ | ➖ | ✅ | ✅ | ➖ | +| anthropic.claude-3-haiku-20240307-v1:0 | ✅ | ➖ | ➖ | ✅ | ✅ | ➖ | +| anthropic.claude-v2:1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| anthropic.claude-v2 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| anthropic.claude-instant-v1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| cohere.command-r-plus-v1:0 | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | +| cohere.command-r-v1:0 | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | +| cohere.command-text-v14 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| cohere.command-light-text-v14 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| meta.llama3-8b-instruct-v1:0 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| meta.llama3-70b-instruct-v1:0 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| meta.llama2-13b-chat-v1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| meta.llama2-70b-chat-v1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| mistral.mistral-7b-instruct-v0:2 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| mistral.mixtral-8x7b-instruct-v0:1 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| mistral.mistral-large-2402-v1:0 | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + + +## Additional Resources +* [AWS Bedrock Documentation](https://docs.aws.amazon.com/bedrock/) diff --git a/docs/providers/cohere.md b/docs/providers/cohere.md index c9fec4b..33f336c 100644 --- a/docs/providers/cohere.md +++ b/docs/providers/cohere.md @@ -1,6 +1,8 @@ # Cohere -### Usage +[Get a Cohere API key](https://dashboard.cohere.com/api-keys) + +## Usage {% code title=".env" %} ```bash @@ -9,37 +11,49 @@ COHERE_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'cohere', - model: 'command-r', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'cohere', + model: 'command-r-plus', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [Cohere Documentation](https://docs.cohere.com) - -### Supported Models - -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| --------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| command-r-plus | ✅ | | | | ✅ | | -| command-r | ✅ | | | | ✅ | | -| command | ✅ | | | | | | -| command-nightly | ✅ | | | | ✅ | | -| command-light | ✅ | | | | | | -| command-light-nightly | ✅ | | | | | | - +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| --------------------- | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| command-r-plus | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | +| command-r | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | +| command | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| command-nightly | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | +| command-light | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| command-light-nightly | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + + +## Additional Resources + +* [Cohere Documentation](https://docs.cohere.com) diff --git a/docs/providers/gemini.md b/docs/providers/gemini.md index 3c4164d..debc1a1 100644 --- a/docs/providers/gemini.md +++ b/docs/providers/gemini.md @@ -1,6 +1,8 @@ # Gemini -### Usage +[Get a Gemini API key](https://ai.google.dev/gemini-api/docs/api-key) + +## Usage {% code title=".env" %} ```bash @@ -9,34 +11,46 @@ GEMINI_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'gemini', - model: 'gemini-1.5-pro', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'gemini', + model: 'gemini-1.5-pro', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [Gemini Documentation](https://ai.google.dev/gemini-api/docs) - -### Supported Models +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| ---------------- | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| gemini-1.5-pro | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gemini-1.5-flash | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gemini-1.0-pro | ✅ | ➖ | ➖ | ➖ | ✅ | ✅ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| ---------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| gemini-1.5-pro | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gemini-1.5-flash | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gemini-1.0-pro | ✅ | | | | ✅ | ✅ | +## Additional Resources +* [Gemini Documentation](https://ai.google.dev/gemini-api/docs) diff --git a/docs/providers/groq.md b/docs/providers/groq.md index 07936ac..2fbb437 100644 --- a/docs/providers/groq.md +++ b/docs/providers/groq.md @@ -1,6 +1,8 @@ # Groq -### Usage +[Get a Groq API key](https://console.groq.com/keys) + +## Usage {% code title=".env" %} ```bash @@ -9,36 +11,48 @@ GROQ_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'groq', - model: 'llama3-70b-8192', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'groq', + model: 'llama3-70b-8192', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [Groq Documentation](https://console.groq.com/docs/quickstart) - -### Supported Models - -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| ------------------ | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| llama3-8b-8192 | ✅ | | | | | | -| llama3-70b-8192 | ✅ | | ✅ | | | | -| mixtral-8x7b-32768 | ✅ | | | | | | -| gemma-7b-it | ✅ | | ✅ | | | | -| gemma2-9b-it | ✅ | | ✅ | | | | - +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| ------------------ | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| llama3-8b-8192 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| llama3-70b-8192 | ✅ | ➖ | ✅ | ➖ | ➖ | ➖ | +| mixtral-8x7b-32768 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| gemma-7b-it | ✅ | ➖ | ✅ | ➖ | ➖ | ➖ | +| gemma2-9b-it | ✅ | ➖ | ✅ | ➖ | ➖ | ➖ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + + +## Additional Resources + +* [Groq Documentation](https://console.groq.com/docs/quickstart) diff --git a/docs/providers/mistral.md b/docs/providers/mistral.md index e575e8e..a1796ce 100644 --- a/docs/providers/mistral.md +++ b/docs/providers/mistral.md @@ -1,6 +1,8 @@ # Mistral -### Usage +[Get a Mistral API key](https://console.mistral.ai/api-keys/) + +## Usage {% code title=".env" %} ```bash @@ -9,45 +11,57 @@ MISTRAL_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'mistral', - model: 'mistral-large-2402', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'mistral', + model: 'mistral-large-2402', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [Mistral Documentation](https://docs.mistral.ai) - -### Supported Models - -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| ----------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| open-mistral-7b | ✅ | | ✅ | | | | -| mistral-tiny-2312 | ✅ | | ✅ | | | | -| open-mixtral-8x7b | ✅ | | | | | | -| mistral-small-2312 | ✅ | | | | | | -| open-mixtral-8x22b | ✅ | | ✅ | | ✅ | | -| open-mixtral-8x22b-2404 | ✅ | | ✅ | | ✅ | | -| mistral-small-latest | ✅ | | | | ✅ | | -| mistral-small-2402 | ✅ | | | | ✅ | | -| mistral-medium-latest | ✅ | | | | | | -| mistral-medium-2312 | ✅ | | | | | | -| mistral-large-latest | ✅ | | ✅ | | ✅ | | -| mistral-large-2402 | ✅ | | ✅ | | ✅ | | -| codestral-latest | ✅ | | ✅ | | | | -| codestral-2405 | ✅ | | ✅ | | | | +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| ----------------------- | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| open-mistral-7b | ✅ | ➖ | ✅ | ➖ | ➖ | ➖ | +| mistral-tiny-2312 | ✅ | ➖ | ✅ | ➖ | ➖ | ➖ | +| open-mixtral-8x7b | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| mistral-small-2312 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| open-mixtral-8x22b | ✅ | ➖ | ✅ | ➖ | ✅ | ➖ | +| open-mixtral-8x22b-2404 | ✅ | ➖ | ✅ | ➖ | ✅ | ➖ | +| mistral-small-latest | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | +| mistral-small-2402 | ✅ | ➖ | ➖ | ➖ | ✅ | ➖ | +| mistral-medium-latest | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| mistral-medium-2312 | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| mistral-large-latest | ✅ | ➖ | ✅ | ➖ | ✅ | ➖ | +| mistral-large-2402 | ✅ | ➖ | ✅ | ➖ | ✅ | ➖ | +| codestral-latest | ✅ | ➖ | ✅ | ➖ | ➖ | ➖ | +| codestral-2405 | ✅ | ➖ | ✅ | ➖ | ➖ | ➖ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + + +## Additional Resources +* [Mistral Documentation](https://docs.mistral.ai) diff --git a/docs/providers/openai.md b/docs/providers/openai.md index 4526dba..9143348 100644 --- a/docs/providers/openai.md +++ b/docs/providers/openai.md @@ -1,6 +1,8 @@ # OpenAI -### Usage +[Get an OpenAI API key](https://platform.openai.com/account/api-keys) + +## Usage {% code title=".env" %} ```bash @@ -9,52 +11,64 @@ OPENAI_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'openai', - model: 'gpt-4o', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'openai', + model: 'gpt-4o', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [OpenAI Documentation](https://platform.openai.com/docs/introduction) - -### Supported Models - -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| ---------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| gpt-4o | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gpt-4o-2024-05-13 | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gpt-4-turbo | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gpt-4-turbo-2024-04-09 | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gpt-4-0125-preview | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gpt-4-turbo-preview | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gpt-4-1106-preview | ✅ | | ✅ | ✅ | ✅ | ✅ | -| gpt-4-vision-preview | ✅ | | ✅ | ✅ | | ✅ | -| gpt-4 | ✅ | | | | ✅ | ✅ | -| gpt-4-0314 | ✅ | | | | | ✅ | -| gpt-4-0613 | ✅ | | | | ✅ | ✅ | -| gpt-4-32k | ✅ | | | | | ✅ | -| gpt-4-32k-0314 | ✅ | | | | | ✅ | -| gpt-4-32k-0613 | ✅ | | | | | ✅ | -| gpt-3.5-turbo | ✅ | | ✅ | | ✅ | ✅ | -| gpt-3.5-turbo-16k | ✅ | | | | | ✅ | -| gpt-3.5-turbo-0301 | ✅ | | | | | ✅ | -| gpt-3.5-turbo-0613 | ✅ | | | | ✅ | ✅ | -| gpt-3.5-turbo-1106 | ✅ | | ✅ | | ✅ | ✅ | -| gpt-3.5-turbo-0125 | ✅ | | ✅ | | ✅ | ✅ | -| gpt-3.5-turbo-16k-0613 | ✅ | | | | | ✅ | +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| ---------------------- | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| gpt-4o | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gpt-4o-2024-05-13 | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gpt-4-turbo | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gpt-4-turbo-2024-04-09 | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gpt-4-0125-preview | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gpt-4-turbo-preview | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gpt-4-1106-preview | ✅ | ➖ | ✅ | ✅ | ✅ | ✅ | +| gpt-4-vision-preview | ✅ | ➖ | ✅ | ✅ | ➖ | ✅ | +| gpt-4 | ✅ | ➖ | ➖ | ➖ | ✅ | ✅ | +| gpt-4-0314 | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | +| gpt-4-0613 | ✅ | ➖ | ➖ | ➖ | ✅ | ✅ | +| gpt-4-32k | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | +| gpt-4-32k-0314 | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | +| gpt-4-32k-0613 | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | +| gpt-3.5-turbo | ✅ | ➖ | ✅ | ➖ | ✅ | ✅ | +| gpt-3.5-turbo-16k | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | +| gpt-3.5-turbo-0301 | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | +| gpt-3.5-turbo-0613 | ✅ | ➖ | ➖ | ➖ | ✅ | ✅ | +| gpt-3.5-turbo-1106 | ✅ | ➖ | ✅ | ➖ | ✅ | ✅ | +| gpt-3.5-turbo-0125 | ✅ | ➖ | ✅ | ➖ | ✅ | ✅ | +| gpt-3.5-turbo-16k-0613 | ✅ | ➖ | ➖ | ➖ | ➖ | ✅ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + + +## Additional Resources +* [OpenAI Documentation](https://platform.openai.com/docs/overview) diff --git a/docs/providers/perplexity.md b/docs/providers/perplexity.md index 170df5f..fb5af3e 100644 --- a/docs/providers/perplexity.md +++ b/docs/providers/perplexity.md @@ -1,6 +1,8 @@ # Perplexity -### Usage +[Get a Perplexity API key](https://www.perplexity.ai/settings/api) + +## Usage {% code title=".env" %} ```bash @@ -9,38 +11,50 @@ PERPLEXITY_API_KEY= {% endcode %} ```typescript -import { TokenJS, ChatCompletionMessageParam } from 'token.js' +import { TokenJS } from 'token.js' -// Import and create the token.js client +// Create the Token.js client const tokenjs = new TokenJS() -// Specify OpenAI compatible messages -const messages: ChatCompletionMessageParam = [{ - role: 'user', - content: `How are you?`, -}] - -// Call the create function -const result = await tokenjs.chat.completions.create({ - // Specify the target model and provider - provider: 'perplexity', - model: 'llama-3-70b-instruct', - messages, -}) +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'perplexity', + model: 'llama-3-70b-instruct', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() ``` -### [Perplexity Documentation](https://docs.perplexity.ai/) - -### Supported Models - -| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 | -| ------------------------------ | ---------- | --------- | ----------- | ----------- | ----- | ----- | -| llama-3-sonar-small-32k-chat | ✅ | | | | | | -| llama-3-sonar-small-32k-online | ✅ | | | | | | -| llama-3-sonar-large-32k-chat | ✅ | | | | | | -| llama-3-sonar-large-32k-online | ✅ | | | | | | -| llama-3-8b-instruct | ✅ | | | | | | -| llama-3-70b-instruct | ✅ | | | | | | -| mixtral-8x7b-instruct | ✅ | | | | | | - +## Supported Models + +| Model | Chat Completion | Streaming | JSON Output | Image Input | Function Calling | N > 1 | +| ------------------------------ | --------------- | --------- | ----------- | ----------- | ---------------- | ----- | +| llama-3-sonar-small-32k-chat | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| llama-3-sonar-small-32k-online | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| llama-3-sonar-large-32k-chat | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| llama-3-sonar-large-32k-online | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| llama-3-8b-instruct | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| llama-3-70b-instruct | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | +| mixtral-8x7b-instruct | ✅ | ➖ | ➖ | ➖ | ➖ | ➖ | + +### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it | + + +## Additional Resources + +* [Perplexity Documentation](https://docs.perplexity.ai/) \ No newline at end of file diff --git a/scripts/docs/generate.ts b/scripts/docs/generate.ts index 861960d..b4c0d27 100644 --- a/scripts/docs/generate.ts +++ b/scripts/docs/generate.ts @@ -2,6 +2,12 @@ import { readFileSync, writeFileSync } from 'fs' import { markdownTable } from 'markdown-table' import { TableDisplayNames, models } from '../../src/models' +const legend = `### Legend +| Symbol | Description | +|--------------------|---------------------------------------| +| :white_check_mark: | Supported by Token.js | +| :heavy_minus_sign: | Not supported by the LLM provider, so Token.js cannot support it |\n` + const generateCompatibility = async () => { for (const [provider, compatibility] of Object.entries(models)) { const table: string[][] = [] @@ -21,7 +27,7 @@ const generateCompatibility = async () => { if (allModels || modelInList) { features.push('✅') } else { - features.push('') + features.push('➖') } } if (pushHeader) { @@ -34,7 +40,9 @@ const generateCompatibility = async () => { const mkdTable = markdownTable(table) const providerDocs = readFileSync(`docs/providers/${provider}.md`, 'utf-8') const docsSplit = providerDocs.split('') - const newDocs = `${docsSplit[0]}\n### Supported Models\n\n${mkdTable}\n\n` + const afterCompatibilitySplit = docsSplit[1].split('') + + const newDocs = `${docsSplit[0]}\n## Supported Models\n\n${mkdTable}\n\n${legend}${afterCompatibilitySplit[1]}` writeFileSync(`docs/providers/${provider}.md`, newDocs, 'utf-8') } diff --git a/src/models.ts b/src/models.ts index 75e0e8a..b230b2f 100644 --- a/src/models.ts +++ b/src/models.ts @@ -1,10 +1,10 @@ export const TableDisplayNames = { models: 'Model', - supportsCompletion: 'Completion', + supportsCompletion: 'Chat Completion', supportsStreaming: 'Streaming', supportsJSON: 'JSON Output', supportsImages: 'Image Input', - supportsToolCalls: 'Tools', + supportsToolCalls: 'Function Calling', supportsN: 'N > 1', }