diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 0000000..a2a91d7
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,23 @@
+name: CI
+
+on:
+ pull_request:
+
+jobs:
+ docs:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v4
+ - name: Install pnpm
+ uses: pnpm/action-setup@v4
+ with:
+ version: 9
+ - name: Use Node.js 20
+ uses: actions/setup-node@v4
+ with:
+ node-version: 20
+ cache: 'pnpm'
+ - name: Install dependencies
+ run: pnpm install
+ - name: Check Docs
+ run: pnpm docs:check
diff --git a/docs/providers/ai21.md b/docs/providers/ai21.md
index ca2a8f4..ddac98d 100644
--- a/docs/providers/ai21.md
+++ b/docs/providers/ai21.md
@@ -29,12 +29,12 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| -------------- | ----------- | ----- | ----------- | --------- | ----- |
-| jamba-instruct | | | | | |
+### [AI21 Documentation](https://docs.ai21.com/reference/jamba-instruct-api)
+
+### Supported Models
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| -------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| jamba-instruct | ✅ | | | | | ✅ |
-### [AI21 Documentation](https://docs.ai21.com/reference/jamba-instruct-api)
diff --git a/docs/providers/anthropic.md b/docs/providers/anthropic.md
index 3d64528..7c99ef2 100644
--- a/docs/providers/anthropic.md
+++ b/docs/providers/anthropic.md
@@ -29,18 +29,18 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| -------------------------- | ----------- | ----- | ----------- | --------- | ----- |
-| claude-3-5-sonnet-20240620 | | | | | |
-| claude-3-opus-20240229 | | | | | |
-| claude-3-sonnet-20240229 | | | | | |
-| claude-3-haiku-20240307 | | | | | |
-| claude-2.1 | | | | | |
-| claude-2.0 | | | | | |
-| claude-instant-1.2 | | | | | |
+### [Anthropic Documentation](https://docs.anthropic.com/en/docs/welcome)
+
+### Supported Models
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| -------------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| claude-3-5-sonnet-20240620 | ✅ | | | ✅ | ✅ | |
+| claude-3-opus-20240229 | ✅ | | | ✅ | ✅ | |
+| claude-3-sonnet-20240229 | ✅ | | | ✅ | ✅ | |
+| claude-3-haiku-20240307 | ✅ | | | ✅ | ✅ | |
+| claude-2.1 | ✅ | | | | | |
+| claude-2.0 | ✅ | | | | | |
+| claude-instant-1.2 | ✅ | | | | | |
-### [Anthropic Documentation](https://docs.anthropic.com/en/docs/welcome)
\ No newline at end of file
diff --git a/docs/providers/bedrock.md b/docs/providers/bedrock.md
index 719fa7b..0eda082 100644
--- a/docs/providers/bedrock.md
+++ b/docs/providers/bedrock.md
@@ -31,30 +31,30 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
+### [Bedrock Documentation](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html)
+
+
### Supported Models
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| --------------------------------------- | ----------- | ----- | ----------- | --------- | ----- |
-| amazon.titan-text-lite-v1 | | | | | |
-| amazon.titan-text-express-v1 | | | | | |
-| anthropic.claude-3-opus-20240229-v1:0 | | | | | |
-| anthropic.claude-3-sonnet-20240229-v1:0 | | | | | |
-| anthropic.claude-3-haiku-20240307-v1:0 | | | | | |
-| anthropic.claude-v2:1 | | | | | |
-| anthropic.claude-v2 | | | | | |
-| anthropic.claude-instant-v1 | | | | | |
-| cohere.command-r-plus-v1:0 | | | | | |
-| cohere.command-r-v1:0 | | | | | |
-| cohere.command-text-v14 | | | | | |
-| cohere.command-light-text-v14 | | | | | |
-| meta.llama3-8b-instruct-v1:0 | | | | | |
-| meta.llama3-70b-instruct-v1:0 | | | | | |
-| meta.llama2-13b-chat-v1 | | | | | |
-| meta.llama2-70b-chat-v1 | | | | | |
-| mistral.mistral-7b-instruct-v0:2 | | | | | |
-| mistral.mixtral-8x7b-instruct-v0:1 | | | | | |
-| mistral.mistral-large-2402-v1:0 | | | | | |
-
-
-
-### [Bedrock Documentation](https://docs.aws.amazon.com/bedrock/latest/APIReference/welcome.html)
\ No newline at end of file
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| --------------------------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| amazon.titan-text-lite-v1 | ✅ | | | | | |
+| amazon.titan-text-express-v1 | ✅ | | | | | |
+| anthropic.claude-3-opus-20240229-v1:0 | ✅ | | | ✅ | ✅ | |
+| anthropic.claude-3-sonnet-20240229-v1:0 | ✅ | | | ✅ | ✅ | |
+| anthropic.claude-3-haiku-20240307-v1:0 | ✅ | | | ✅ | ✅ | |
+| anthropic.claude-v2:1 | ✅ | | | | | |
+| anthropic.claude-v2 | ✅ | | | | | |
+| anthropic.claude-instant-v1 | ✅ | | | | | |
+| cohere.command-r-plus-v1:0 | ✅ | | | | ✅ | |
+| cohere.command-r-v1:0 | ✅ | | | | ✅ | |
+| cohere.command-text-v14 | ✅ | | | | | |
+| cohere.command-light-text-v14 | ✅ | | | | | |
+| meta.llama3-8b-instruct-v1:0 | ✅ | | | | | |
+| meta.llama3-70b-instruct-v1:0 | ✅ | | | | | |
+| meta.llama2-13b-chat-v1 | ✅ | | | | | |
+| meta.llama2-70b-chat-v1 | ✅ | | | | | |
+| mistral.mistral-7b-instruct-v0:2 | ✅ | | | | | |
+| mistral.mixtral-8x7b-instruct-v0:1 | ✅ | | | | | |
+| mistral.mistral-large-2402-v1:0 | ✅ | | | | ✅ | |
+
diff --git a/docs/providers/cohere.md b/docs/providers/cohere.md
index 771d4fb..c18956d 100644
--- a/docs/providers/cohere.md
+++ b/docs/providers/cohere.md
@@ -29,17 +29,17 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| --------------------- | ----------- | ----- | ----------- | --------- | ----- |
-| command-r-plus | | | | | |
-| command-r | | | | | |
-| command | | | | | |
-| command-nightly | | | | | |
-| command-light | | | | | |
-| command-light-nightly | | | | | |
+### [Cohere Documentation](https://docs.cohere.com)
+
+### Supported Models
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| --------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| command-r-plus | ✅ | | | | ✅ | |
+| command-r | ✅ | | | | ✅ | |
+| command | ✅ | | | | | |
+| command-nightly | ✅ | | | | ✅ | |
+| command-light | ✅ | | | | | |
+| command-light-nightly | ✅ | | | | | |
-### [Cohere Documentation](https://docs.cohere.com)
\ No newline at end of file
diff --git a/docs/providers/gemini.md b/docs/providers/gemini.md
index 508760c..86b7a76 100644
--- a/docs/providers/gemini.md
+++ b/docs/providers/gemini.md
@@ -29,15 +29,14 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| ---------------- | ----------- | ----- | ----------- | --------- | ----- |
-| gemini-1.5-pro | | | | | |
-| gemini-1.5-flash | | | | | |
-| gemini-1.0-pro | | | | | |
-
+### [Gemini Documentation](https://ai.google.dev/gemini-api/docs)
+
+### Supported Models
-### [Gemini Documentation](https://ai.google.dev/gemini-api/docs)
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| ---------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| gemini-1.5-pro | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gemini-1.5-flash | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gemini-1.0-pro | ✅ | | | | ✅ | ✅ |
diff --git a/docs/providers/groq.md b/docs/providers/groq.md
index 4465cc3..03a3608 100644
--- a/docs/providers/groq.md
+++ b/docs/providers/groq.md
@@ -29,10 +29,16 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-
Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
---|
llama3-8b-8192 | | | | | |
llama3-70b-8192 | | | | | |
mixtral-8x7b-32768 | | | | | |
gemma-7b-it | | | | | |
gemma2-9b-it | | | | | |
+### [Groq Documentation](https://console.groq.com/docs/quickstart)
+
+### Supported Models
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| ------------------ | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| llama3-8b-8192 | ✅ | | | | | |
+| llama3-70b-8192 | ✅ | | ✅ | | | |
+| mixtral-8x7b-32768 | ✅ | | | | | |
+| gemma-7b-it | ✅ | | ✅ | | | |
+| gemma2-9b-it | ✅ | | ✅ | | | |
-### [Groq Documentation](https://console.groq.com/docs/quickstart)
\ No newline at end of file
diff --git a/docs/providers/mistral.md b/docs/providers/mistral.md
index 56c3a62..0e42499 100644
--- a/docs/providers/mistral.md
+++ b/docs/providers/mistral.md
@@ -29,25 +29,25 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| ----------------------- | ----------- | ----- | ----------- | --------- | ----- |
-| open-mistral-7b | | | | | |
-| mistral-tiny-2312 | | | | | |
-| open-mixtral-8x7b | | | | | |
-| mistral-small-2312 | | | | | |
-| open-mixtral-8x22b | | | | | |
-| open-mixtral-8x22b-2404 | | | | | |
-| mistral-small-latest | | | | | |
-| mistral-small-2402 | | | | | |
-| mistral-medium-latest | | | | | |
-| mistral-medium-2312 | | | | | |
-| mistral-large-latest | | | | | |
-| mistral-large-2402 | | | | | |
-| codestral-latest | | | | | |
-| codestral-2405 | | | | | |
+### [Mistral Documentation](https://docs.mistral.ai)
+
+### Supported Models
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| ----------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| open-mistral-7b | ✅ | | ✅ | | | |
+| mistral-tiny-2312 | ✅ | | ✅ | | | |
+| open-mixtral-8x7b | ✅ | | | | | |
+| mistral-small-2312 | ✅ | | | | | |
+| open-mixtral-8x22b | ✅ | | ✅ | | ✅ | |
+| open-mixtral-8x22b-2404 | ✅ | | ✅ | | ✅ | |
+| mistral-small-latest | ✅ | | | | ✅ | |
+| mistral-small-2402 | ✅ | | | | ✅ | |
+| mistral-medium-latest | ✅ | | | | | |
+| mistral-medium-2312 | ✅ | | | | | |
+| mistral-large-latest | ✅ | | ✅ | | ✅ | |
+| mistral-large-2402 | ✅ | | ✅ | | ✅ | |
+| codestral-latest | ✅ | | ✅ | | | |
+| codestral-2405 | ✅ | | ✅ | | | |
-### [Mistral Documentation](https://docs.mistral.ai)
diff --git a/docs/providers/openai.md b/docs/providers/openai.md
index b42af90..55ff844 100644
--- a/docs/providers/openai.md
+++ b/docs/providers/openai.md
@@ -29,32 +29,32 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| ---------------------- | ----------- | ----- | ----------- | --------- | ----- |
-| gpt-4o | | | | | |
-| gpt-4o-2024-05-13 | | | | | |
-| gpt-4-turbo | | | | | |
-| gpt-4-turbo-2024-04-09 | | | | | |
-| gpt-4-0125-preview | | | | | |
-| gpt-4-turbo-preview | | | | | |
-| gpt-4-1106-preview | | | | | |
-| gpt-4-vision-preview | | | | | |
-| gpt-4 | | | | | |
-| gpt-4-0314 | | | | | |
-| gpt-4-0613 | | | | | |
-| gpt-4-32k | | | | | |
-| gpt-4-32k-0314 | | | | | |
-| gpt-4-32k-0613 | | | | | |
-| gpt-3.5-turbo | | | | | |
-| gpt-3.5-turbo-16k | | | | | |
-| gpt-3.5-turbo-0301 | | | | | |
-| gpt-3.5-turbo-0613 | | | | | |
-| gpt-3.5-turbo-1106 | | | | | |
-| gpt-3.5-turbo-0125 | | | | | |
-| gpt-3.5-turbo-16k-0613 | | | | | |
+### [OpenAI Documentation](https://platform.openai.com/docs/introduction)
+
+### Supported Models
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| ---------------------- | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| gpt-4o | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gpt-4o-2024-05-13 | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gpt-4-turbo | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gpt-4-turbo-2024-04-09 | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gpt-4-0125-preview | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gpt-4-turbo-preview | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gpt-4-1106-preview | ✅ | | ✅ | ✅ | ✅ | ✅ |
+| gpt-4-vision-preview | ✅ | | ✅ | ✅ | | ✅ |
+| gpt-4 | ✅ | | | | ✅ | ✅ |
+| gpt-4-0314 | ✅ | | | | | ✅ |
+| gpt-4-0613 | ✅ | | | | ✅ | ✅ |
+| gpt-4-32k | ✅ | | | | | ✅ |
+| gpt-4-32k-0314 | ✅ | | | | | ✅ |
+| gpt-4-32k-0613 | ✅ | | | | | ✅ |
+| gpt-3.5-turbo | ✅ | | ✅ | | ✅ | ✅ |
+| gpt-3.5-turbo-16k | ✅ | | | | | ✅ |
+| gpt-3.5-turbo-0301 | ✅ | | | | | ✅ |
+| gpt-3.5-turbo-0613 | ✅ | | | | ✅ | ✅ |
+| gpt-3.5-turbo-1106 | ✅ | | ✅ | | ✅ | ✅ |
+| gpt-3.5-turbo-0125 | ✅ | | ✅ | | ✅ | ✅ |
+| gpt-3.5-turbo-16k-0613 | ✅ | | | | | ✅ |
-### [OpenAI Documentation](https://platform.openai.com/docs/introduction)
diff --git a/docs/providers/perplexity.md b/docs/providers/perplexity.md
index eb3cfa1..8f9cf49 100644
--- a/docs/providers/perplexity.md
+++ b/docs/providers/perplexity.md
@@ -29,18 +29,18 @@ const result: ChatCompletionMessageParam[] = await tokenjs.chat.completions.crea
})
```
-### Supported Models
-
-| Model | JSON Output | Tools | Image Input | Streaming | N > 1 |
-| ------------------------------ | ----------- | ----- | ----------- | --------- | ----- |
-| llama-3-sonar-small-32k-chat | | | | | |
-| llama-3-sonar-small-32k-online | | | | | |
-| llama-3-sonar-large-32k-chat | | | | | |
-| llama-3-sonar-large-32k-online | | | | | |
-| llama-3-8b-instruct | | | | | |
-| llama-3-70b-instruct | | | | | |
-| mixtral-8x7b-instruct | | | | | |
+### [Perplexity Documentation](https://docs.perplexity.ai/)
+
+### Supported Models
+| Model | Completion | Streaming | JSON Output | Image Input | Tools | N > 1 |
+| ------------------------------ | ---------- | --------- | ----------- | ----------- | ----- | ----- |
+| llama-3-sonar-small-32k-chat | ✅ | | | | | |
+| llama-3-sonar-small-32k-online | ✅ | | | | | |
+| llama-3-sonar-large-32k-chat | ✅ | | | | | |
+| llama-3-sonar-large-32k-online | ✅ | | | | | |
+| llama-3-8b-instruct | ✅ | | | | | |
+| llama-3-70b-instruct | ✅ | | | | | |
+| mixtral-8x7b-instruct | ✅ | | | | | |
-### [Perplexity Documentation](https://docs.perplexity.ai/)
diff --git a/package.json b/package.json
index a583ee2..88b4259 100644
--- a/package.json
+++ b/package.json
@@ -12,7 +12,9 @@
"lint:fix": "pnpm lint:check --fix",
"lint:check": "eslint . --max-warnings=0",
"format": "prettier --write .",
- "build": "tsc -p ./tsconfig.json"
+ "build": "tsc -p ./tsconfig.json",
+ "docs:update": "vite-node ./scripts/docs/generate.ts",
+ "docs:check": "vite-node ./scripts/docs/check.ts"
},
"author": "Sam Goldman",
"keywords": [],
@@ -31,18 +33,14 @@
"openai": "^4.52.2"
},
"devDependencies": {
+ "@babel/eslint-parser": "^7.18.2",
"@changesets/cli": "^2.16.0",
+ "@typescript-eslint/eslint-plugin": "^5.26.0",
+ "@typescript-eslint/parser": "^4.26.0",
"@vitest/coverage-v8": "^2.0.1",
"@vitest/ui": "^2.0.1",
"dotenv": "^16.4.5",
- "typescript": "^5.5.3",
- "vite-node": "^1.6.0",
- "vitest": "^2.0.1",
- "prettier": "^2.3.1",
"eslint": "^8.16.0",
- "@babel/eslint-parser": "^7.18.2",
- "@typescript-eslint/eslint-plugin": "^5.26.0",
- "@typescript-eslint/parser": "^4.26.0",
"eslint-config-prettier": "^8.3.0",
"eslint-config-standard": "^16.0.3",
"eslint-import-resolver-typescript": "^3.5.2",
@@ -53,6 +51,11 @@
"eslint-plugin-prettier": "^4.0.0",
"eslint-plugin-promise": "^5.1.0",
"eslint-plugin-react": "^7.24.0",
- "eslint-plugin-unicorn": "^42.0.0"
+ "eslint-plugin-unicorn": "^42.0.0",
+ "markdown-table": "^3.0.3",
+ "prettier": "^2.3.1",
+ "typescript": "^5.5.3",
+ "vite-node": "^1.6.0",
+ "vitest": "^2.0.1"
}
}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 9bda61c..db0f413 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -97,6 +97,9 @@ devDependencies:
eslint-plugin-unicorn:
specifier: ^42.0.0
version: 42.0.0(eslint@8.57.0)
+ markdown-table:
+ specifier: ^3.0.3
+ version: 3.0.3
prettier:
specifier: ^2.3.1
version: 2.8.8
@@ -4639,6 +4642,10 @@ packages:
semver: 7.6.2
dev: true
+ /markdown-table@3.0.3:
+ resolution: {integrity: sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==}
+ dev: true
+
/merge-stream@2.0.0:
resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==}
dev: true
diff --git a/scripts/docs/check.ts b/scripts/docs/check.ts
new file mode 100644
index 0000000..006dcec
--- /dev/null
+++ b/scripts/docs/check.ts
@@ -0,0 +1,21 @@
+const { execSync } = require('child_process')
+
+try {
+ // Step 1: Run your Node.js script
+ execSync('vite-node ./scripts/docs/generate.ts', { stdio: 'inherit' })
+
+ // Step 2: Check for modified files using Git
+ const gitStatus = execSync('git status --porcelain docs').toString()
+
+ if (gitStatus) {
+ // Step 3: If there are modifications, throw an error
+ throw new Error(
+ 'Generated documentation files are not up to date. Please run `pnpm docs:update` and commit the changes.'
+ )
+ } else {
+ console.log('Documentation files are up to date.')
+ }
+} catch (error) {
+ console.error(error.message)
+ process.exit(1)
+}
diff --git a/scripts/docs/generate.ts b/scripts/docs/generate.ts
new file mode 100644
index 0000000..861960d
--- /dev/null
+++ b/scripts/docs/generate.ts
@@ -0,0 +1,44 @@
+import { readFileSync, writeFileSync } from 'fs'
+import { markdownTable } from 'markdown-table'
+import { TableDisplayNames, models } from '../../src/models'
+
+const generateCompatibility = async () => {
+ for (const [provider, compatibility] of Object.entries(models)) {
+ const table: string[][] = []
+
+ let pushHeader = true
+
+ for (const model of compatibility.models) {
+ const header: string[] = []
+ const features: string[] = [model]
+ for (const [feature, models] of Object.entries(compatibility)) {
+ header.push(TableDisplayNames[feature])
+
+ if (feature === 'models') continue
+
+ const allModels = typeof models === 'boolean' && models === true
+ const modelInList = Array.isArray(models) && models.includes(model)
+ if (allModels || modelInList) {
+ features.push('✅')
+ } else {
+ features.push('')
+ }
+ }
+ if (pushHeader) {
+ table.push(header)
+ pushHeader = false
+ }
+ table.push(features)
+ }
+
+ const mkdTable = markdownTable(table)
+ const providerDocs = readFileSync(`docs/providers/${provider}.md`, 'utf-8')
+ const docsSplit = providerDocs.split('')
+ const newDocs = `${docsSplit[0]}\n### Supported Models\n\n${mkdTable}\n\n`
+
+ writeFileSync(`docs/providers/${provider}.md`, newDocs, 'utf-8')
+ }
+ console.log(`Successfully updated model compatibility tables.`)
+}
+
+generateCompatibility()
diff --git a/src/handlers/anthropic.ts b/src/handlers/anthropic.ts
index e9ffda7..8ff91c8 100644
--- a/src/handlers/anthropic.ts
+++ b/src/handlers/anthropic.ts
@@ -503,12 +503,6 @@ export class AnthropicHandler extends BaseHandler {
validateInputs(body: ProviderCompletionParams<'anthropic'>): void {
super.validateInputs(body)
- if (typeof body.n === 'number' && body.n > 1) {
- throw new InputError(
- `Anthropic does not support setting 'n' greater than 1.`
- )
- }
-
let logImageDetailWarning: boolean = false
for (const message of body.messages) {
if (Array.isArray(message.content)) {
diff --git a/src/handlers/base.ts b/src/handlers/base.ts
index c2f97d4..e1bbc43 100644
--- a/src/handlers/base.ts
+++ b/src/handlers/base.ts
@@ -12,19 +12,22 @@ export abstract class BaseHandler {
protected supportsJSON: readonly T[]
protected supportsImages: readonly T[]
protected supportsToolCalls: readonly T[]
+ protected supportsN: readonly T[] | boolean
constructor(
opts: ConfigOptions,
models: readonly T[],
supportsJSON: readonly T[],
supportsImages: readonly T[],
- supportsToolCalls: readonly T[]
+ supportsToolCalls: readonly T[],
+ suportsN: readonly T[] | boolean
) {
this.opts = opts
this.models = models
this.supportsJSON = supportsJSON
this.supportsImages = supportsImages
this.supportsToolCalls = supportsToolCalls
+ this.supportsN = suportsN
}
abstract create(
@@ -36,19 +39,13 @@ export abstract class BaseHandler {
throw new InputError(`Invalid 'model' field: ${body.model}.`)
}
- if (
- body.tools !== undefined &&
- !this.supportsToolCalls.includes(body.model)
- ) {
+ if (body.tools !== undefined && !this.supportsTools(body.model)) {
throw new InputError(
`Detected a 'tools' parameter, but the following model does not support tools: ${body.model}`
)
}
- if (
- body.tool_choice !== undefined &&
- !this.supportsToolCalls.includes(body.model)
- ) {
+ if (body.tool_choice !== undefined && !this.supportsTools(body.model)) {
throw new InputError(
`Detected a 'tool_choice' parameter, but the following model does not support tools: ${body.model}`
)
@@ -83,6 +80,16 @@ export abstract class BaseHandler {
}
}
+ if (
+ typeof body.n === 'number' &&
+ body.n > 1 &&
+ !this.supportsNGreaterThanOne(body.model)
+ ) {
+ throw new InputError(
+ `The model ${body.model} does not support setting 'n' greater than 1.`
+ )
+ }
+
if (body.response_format?.type === 'json_object') {
if (!this.supportsJSONMode(body.model)) {
throw new InputError(
@@ -120,15 +127,34 @@ export abstract class BaseHandler {
}
}
+ protected isSupportedFeature(
+ featureSupport: readonly T[] | boolean,
+ model: T
+ ): boolean {
+ if (typeof featureSupport === 'boolean') {
+ return featureSupport
+ } else {
+ return featureSupport.includes(model)
+ }
+ }
+
protected isSupportedModel(model: LLMChatModel): model is T {
return this.models.includes(model as T)
}
protected supportsJSONMode(model: T): boolean {
- return this.supportsJSON.includes(model)
+ return this.isSupportedFeature(this.supportsJSON, model)
}
protected supportsImageMessages(model: T): boolean {
- return this.supportsImages.includes(model)
+ return this.isSupportedFeature(this.supportsImages, model)
+ }
+
+ protected supportsNGreaterThanOne(model: T): boolean {
+ return this.isSupportedFeature(this.supportsN, model)
+ }
+
+ protected supportsTools(model: T): boolean {
+ return this.isSupportedFeature(this.supportsToolCalls, model)
}
}
diff --git a/src/handlers/bedrock.ts b/src/handlers/bedrock.ts
index 4a2ee03..28fd2af 100644
--- a/src/handlers/bedrock.ts
+++ b/src/handlers/bedrock.ts
@@ -572,12 +572,6 @@ export class BedrockHandler extends BaseHandler {
`Bedrock does not support the 'detail' field for images. The default image quality will be used.`
)
}
-
- if (typeof body.n === 'number' && body.n > 1) {
- throw new InputError(
- `Bedrock does not support setting 'n' greater than 1.`
- )
- }
}
async create(
diff --git a/src/handlers/cohere.ts b/src/handlers/cohere.ts
index 2b4f58f..eeb3626 100644
--- a/src/handlers/cohere.ts
+++ b/src/handlers/cohere.ts
@@ -497,12 +497,6 @@ export class CohereHandler extends BaseHandler {
)
}
- if (typeof body.n === 'number' && body.n > 1) {
- throw new InputError(
- `Cohere does not support setting 'n' greater than 1.`
- )
- }
-
const apiKey = this.opts.apiKey ?? process.env.COHERE_API_KEY
if (apiKey === undefined) {
throw new InputError(
diff --git a/src/handlers/groq.ts b/src/handlers/groq.ts
index 98a99f9..912c16f 100644
--- a/src/handlers/groq.ts
+++ b/src/handlers/groq.ts
@@ -4,7 +4,6 @@ import { GroqModel, ProviderCompletionParams } from '../chat'
import { CompletionResponse, StreamCompletionResponse } from '../userTypes'
import { BaseHandler } from './base'
import { InputError } from './types'
-import { assertNIsOne } from './utils'
// Groq is very compatible with OpenAI's API, so we could likely reuse the OpenAI SDK for this handler
// to reducee the bundle size.
@@ -45,7 +44,6 @@ export class GroqHandler extends BaseHandler {
)
}
- assertNIsOne(body.n, 'Groq')
return client.chat.completions.create({
stream: body.stream,
messages: body.messages as Groq.Chat.ChatCompletionMessageParam[],
diff --git a/src/handlers/perplexity.ts b/src/handlers/perplexity.ts
index bbd6fd8..aeee41d 100644
--- a/src/handlers/perplexity.ts
+++ b/src/handlers/perplexity.ts
@@ -20,6 +20,7 @@ export class PerplexityHandler extends BaseHandler {
async create(
body: ProviderCompletionParams<'perplexity'>
): Promise {
+ this.validateInputs(body)
const apiKey = this.opts.apiKey ?? process.env.PERPLEXITY_API_KEY
if (apiKey === undefined) {
@@ -28,12 +29,6 @@ export class PerplexityHandler extends BaseHandler {
)
}
- if (typeof body.n === 'number' && body.n > 1) {
- throw new InputError(
- `Perplexity does not support setting 'n' greater than 1.`
- )
- }
-
const openai = new OpenAI({
...this.opts,
baseURL: 'https://api.perplexity.ai',
diff --git a/src/handlers/utils.ts b/src/handlers/utils.ts
index 058a5e7..1d2dedb 100644
--- a/src/handlers/utils.ts
+++ b/src/handlers/utils.ts
@@ -24,7 +24,8 @@ export const Handlers: Record any> = {
models.openai.models,
models.openai.supportsJSON,
models.openai.supportsImages,
- models.openai.supportsToolCalls
+ models.openai.supportsToolCalls,
+ models.openai.supportsN
),
['anthropic']: (opts: ConfigOptions) =>
new AnthropicHandler(
@@ -32,7 +33,8 @@ export const Handlers: Record any> = {
models.anthropic.models,
models.anthropic.supportsJSON,
models.anthropic.supportsImages,
- models.anthropic.supportsToolCalls
+ models.anthropic.supportsToolCalls,
+ models.anthropic.supportsN
),
['gemini']: (opts: ConfigOptions) =>
new GeminiHandler(
@@ -40,7 +42,8 @@ export const Handlers: Record any> = {
models.gemini.models,
models.gemini.supportsJSON,
models.gemini.supportsImages,
- models.gemini.supportsToolCalls
+ models.gemini.supportsToolCalls,
+ models.gemini.supportsN
),
['cohere']: (opts: ConfigOptions) =>
new CohereHandler(
@@ -48,7 +51,8 @@ export const Handlers: Record any> = {
models.cohere.models,
models.cohere.supportsJSON,
models.cohere.supportsImages,
- models.cohere.supportsToolCalls
+ models.cohere.supportsToolCalls,
+ models.cohere.supportsN
),
['bedrock']: (opts: ConfigOptions) =>
new BedrockHandler(
@@ -56,7 +60,8 @@ export const Handlers: Record any> = {
models.bedrock.models,
models.bedrock.supportsJSON,
models.bedrock.supportsImages,
- models.bedrock.supportsToolCalls
+ models.bedrock.supportsToolCalls,
+ models.bedrock.supportsN
),
['mistral']: (opts: ConfigOptions) =>
new MistralHandler(
@@ -64,7 +69,8 @@ export const Handlers: Record any> = {
models.mistral.models,
models.mistral.supportsJSON,
models.mistral.supportsImages,
- models.mistral.supportsToolCalls
+ models.mistral.supportsToolCalls,
+ models.mistral.supportsN
),
['groq']: (opts: ConfigOptions) =>
new GroqHandler(
@@ -72,7 +78,8 @@ export const Handlers: Record any> = {
models.groq.models,
models.groq.supportsJSON,
models.groq.supportsImages,
- models.groq.supportsToolCalls
+ models.groq.supportsToolCalls,
+ models.groq.supportsN
),
['ai21']: (opts: ConfigOptions) =>
new AI21Handler(
@@ -80,7 +87,8 @@ export const Handlers: Record any> = {
models.ai21.models,
models.ai21.supportsJSON,
models.ai21.supportsImages,
- models.ai21.supportsToolCalls
+ models.ai21.supportsToolCalls,
+ models.ai21.supportsN
),
['perplexity']: (opts: ConfigOptions) =>
new PerplexityHandler(
@@ -88,7 +96,8 @@ export const Handlers: Record any> = {
models.perplexity.models,
models.perplexity.supportsJSON,
models.perplexity.supportsImages,
- models.perplexity.supportsToolCalls
+ models.perplexity.supportsToolCalls,
+ models.perplexity.supportsN
),
}
@@ -193,17 +202,6 @@ export const consoleWarn = (message: string): void => {
console.warn(chalk.yellow.bold(`Warning: ${message}\n`))
}
-export const assertNIsOne = (
- n: number | null | undefined,
- provider: string
-): void => {
- if (typeof n === 'number' && n > 1) {
- throw new InputError(
- `${provider} does not support setting 'n' greater than 1.`
- )
- }
-}
-
export const normalizeTemperature = (
temperature: number,
provider: LLMProvider,
diff --git a/src/models.ts b/src/models.ts
index 76322dc..75e0e8a 100644
--- a/src/models.ts
+++ b/src/models.ts
@@ -1,3 +1,13 @@
+export const TableDisplayNames = {
+ models: 'Model',
+ supportsCompletion: 'Completion',
+ supportsStreaming: 'Streaming',
+ supportsJSON: 'JSON Output',
+ supportsImages: 'Image Input',
+ supportsToolCalls: 'Tools',
+ supportsN: 'N > 1',
+}
+
export const models = {
openai: {
models: [
@@ -23,6 +33,8 @@ export const models = {
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-16k-0613',
] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
supportsJSON: [
'gpt-4o',
'gpt-4o-2024-05-13',
@@ -61,12 +73,16 @@ export const models = {
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-0613',
] as const,
+ supportsN: true,
},
ai21: {
models: ['jamba-instruct'] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
supportsJSON: [] as const,
supportsImages: [] as const,
supportsToolCalls: [] as const,
+ supportsN: true,
},
anthropic: {
models: [
@@ -78,6 +94,8 @@ export const models = {
'claude-2.0',
'claude-instant-1.2',
] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
supportsJSON: [] as const,
supportsImages: [
'claude-3-5-sonnet-20240620',
@@ -91,9 +109,12 @@ export const models = {
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
] as const,
+ supportsN: false,
},
gemini: {
models: ['gemini-1.5-pro', 'gemini-1.5-flash', 'gemini-1.0-pro'] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
supportsJSON: ['gemini-1.5-pro', 'gemini-1.5-flash'] as const,
supportsImages: ['gemini-1.5-pro', 'gemini-1.5-flash'] as const,
supportsToolCalls: [
@@ -101,6 +122,7 @@ export const models = {
'gemini-1.5-flash',
'gemini-1.0-pro',
] as const,
+ supportsN: true,
},
cohere: {
models: [
@@ -111,6 +133,8 @@ export const models = {
'command-light',
'command-light-nightly',
] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
supportsJSON: [] as const,
supportsImages: [] as const,
supportsToolCalls: [
@@ -118,6 +142,7 @@ export const models = {
'command-r',
'command-nightly',
] as const,
+ supportsN: false,
},
bedrock: {
models: [
@@ -141,6 +166,8 @@ export const models = {
'mistral.mixtral-8x7b-instruct-v0:1',
'mistral.mistral-large-2402-v1:0',
] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
// At the time of writing, the only models that Bedrock supports which allow JSON are Mistral
// models. However, Bedrock's `additionalModelRequestFields` field, which is designed to allow
// us to pass arbitrary parameters to the model, does not appear to work for Mistral's
@@ -159,6 +186,7 @@ export const models = {
'cohere.command-r-v1:0',
'mistral.mistral-large-2402-v1:0',
] as const,
+ supportsN: false,
},
mistral: {
models: [
@@ -177,6 +205,8 @@ export const models = {
'codestral-latest',
'codestral-2405',
] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
// Mistral claims that all of its models support JSON, but several of their weaker models either
// fail to produce valid JSON or produce very low quality results for the following prompt:
// "Generate a JSON that maps ten athletes to their jersey numbers". We removed these models
@@ -200,6 +230,7 @@ export const models = {
'mistral-large-latest',
'mistral-large-2402',
] as const,
+ supportsN: false,
},
groq: {
models: [
@@ -209,6 +240,8 @@ export const models = {
'gemma-7b-it',
'gemma2-9b-it',
] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
// Groq claims that all of its models support JSON, but some of the weaker models either fail to
// produce valid JSON or produce very low quality results for the following prompt: "Generate a
// JSON that maps ten athletes to their jersey numbers". We removed these models from the list
@@ -216,6 +249,7 @@ export const models = {
supportsJSON: ['llama3-70b-8192', 'gemma-7b-it', 'gemma2-9b-it'] as const,
supportsImages: [] as const,
supportsToolCalls: [] as const,
+ supportsN: false,
},
perplexity: {
models: [
@@ -227,8 +261,11 @@ export const models = {
'llama-3-70b-instruct',
'mixtral-8x7b-instruct',
] as const,
+ supportsCompletion: true,
+ supportsStreaming: [] as const,
supportsJSON: [] as const,
supportsImages: [] as const,
supportsToolCalls: [] as const,
+ supportsN: false,
},
}
diff --git a/test/handlers/base.test.ts b/test/handlers/base.test.ts
index 66b7a8d..145d1c7 100644
--- a/test/handlers/base.test.ts
+++ b/test/handlers/base.test.ts
@@ -70,4 +70,21 @@ describe('Base Handler', () => {
)
)
})
+
+ it("throws an error when n > 1 but the model doesn't support n > 1", async () => {
+ const tokenjs = new TokenJS()
+ await expect(
+ tokenjs.chat.completions.create({
+ provider: 'anthropic',
+ model: 'claude-2.0',
+ messages: getDummyMessages(),
+ temperature: 0.5,
+ n: 2,
+ })
+ ).rejects.toThrow(
+ new InputError(
+ `The model claude-2.0 does not support setting 'n' greater than 1.`
+ )
+ )
+ })
})
diff --git a/test/handlers/gemini.test.ts b/test/handlers/gemini.test.ts
index 8e09b40..4cb3f4b 100644
--- a/test/handlers/gemini.test.ts
+++ b/test/handlers/gemini.test.ts
@@ -1393,7 +1393,8 @@ describe('GeminiHandler', () => {
models.gemini.models,
models.gemini.supportsJSON,
models.gemini.supportsImages,
- models.gemini.supportsToolCalls
+ models.gemini.supportsToolCalls,
+ models.gemini.supportsN
)
;(GoogleGenerativeAI as any).mockImplementationOnce(() => ({
@@ -1456,7 +1457,8 @@ describe('GeminiHandler', () => {
models.gemini.models,
models.gemini.supportsJSON,
models.gemini.supportsImages,
- models.gemini.supportsToolCalls
+ models.gemini.supportsToolCalls,
+ models.gemini.supportsN
)
;(GoogleGenerativeAI as any).mockImplementationOnce(() => ({
@@ -1513,7 +1515,8 @@ describe('GeminiHandler', () => {
models.gemini.models,
models.gemini.supportsJSON,
models.gemini.supportsImages,
- models.gemini.supportsToolCalls
+ models.gemini.supportsToolCalls,
+ models.gemini.supportsN
)
;(GoogleGenerativeAI as any).mockImplementationOnce(() => ({