diff --git a/.changeset/eight-goats-jam.md b/.changeset/eight-goats-jam.md new file mode 100644 index 0000000..306b808 --- /dev/null +++ b/.changeset/eight-goats-jam.md @@ -0,0 +1,5 @@ +--- +'token.js': patch +--- + +Support OpenRouter diff --git a/.env.example b/.env.example index 7e3827d..0c3b39e 100644 --- a/.env.example +++ b/.env.example @@ -19,6 +19,9 @@ GROQ_API_KEY= # Mistral MISTRAL_API_KEY= +# OpenRouter +OPENROUTER_API_KEY= + # Perplexity PERPLEXITY_API_KEY= diff --git a/README.md b/README.md index 48902a4..a6679d1 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # Token.js -Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required. +Integrate 200+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required. ## Features -* Use OpenAI's format to call 60+ LLMs from 9 providers. +* Use OpenAI's format to call 200+ LLMs from 10 providers. * Supports tools, JSON outputs, image inputs, streaming, and more. * Runs completely on the client side. No proxy server needed. * Free and open source under MIT. @@ -20,6 +20,7 @@ Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. Free and open * Mistral * OpenAI * Perplexity +* OpenRouter ## [Documentation](https://docs.tokenjs.ai/) diff --git a/docs/README.md b/docs/README.md index 0489fda..ae0dd3d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,6 +1,6 @@ --- description: >- - Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. + Integrate 200+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required. --- @@ -8,7 +8,7 @@ description: >- ## Features -* Use OpenAI's format to call 60+ LLMs from 9 providers. +* Use OpenAI's format to call 200+ LLMs from 10 providers. * Supports tools, JSON outputs, image inputs, streaming, and more. * Runs completely on the client side. No proxy server needed. * Free and open source under MIT. @@ -24,6 +24,7 @@ description: >- * Mistral * OpenAI * Perplexity +* OpenRouter ## Setup @@ -261,6 +262,39 @@ async function main() { main() ``` {% endtab %} + +{% tab title="OpenRouter" %} +{% code title=".env" %} +```bash +OPENROUTER_API_KEY= +``` +{% endcode %} + +```typescript +import { TokenJS } from 'token.js' + +// Create the Token.js client +const tokenjs = new TokenJS() + +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'openrouter', + model: 'nvidia/nemotron-4-340b-instruct', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() +``` +{% endtab %} {% endtabs %} ### Access Credentials @@ -284,6 +318,8 @@ GROQ_API_KEY= MISTRAL_API_KEY= # Perplexity PERPLEXITY_API_KEY= +# OpenRouter +OPENROUTER_API_KEY= # AWS Bedrock AWS_REGION_NAME= AWS_ACCESS_KEY_ID= diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index a592790..07ff077 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -10,6 +10,7 @@ * [Groq](providers/groq.md) * [Mistral](providers/mistral.md) * [OpenAI](providers/openai.md) + * [OpenRouter](providers/openrouter.md) * [Perplexity](providers/perplexity.md) * [Contact Us](contact-us.md) * [Contributing](https://github.com/token-js/token.js/blob/main/CONTRIBUTING.md) diff --git a/docs/providers/README.md b/docs/providers/README.md index 147b474..6c2b92f 100644 --- a/docs/providers/README.md +++ b/docs/providers/README.md @@ -4,4 +4,4 @@ description: Integrate LLM providers and models using Token.js. # Providers -
AI21ai21.md
Anthropicanthropic.md
Bedrockbedrock.md
Coherecohere.md
Geminigemini.md
Groqgroq.md
Mistralmistral.md
OpenAIopenai.md
Perplexityperplexity.md
+
AI21ai21.md
Anthropicanthropic.md
Bedrockbedrock.md
Coherecohere.md
Geminigemini.md
Groqgroq.md
Mistralmistral.md
OpenAIopenai.md
Perplexityperplexity.md
OpenRouteropenrouter.md
diff --git a/docs/providers/openrouter.md b/docs/providers/openrouter.md new file mode 100644 index 0000000..47ad714 --- /dev/null +++ b/docs/providers/openrouter.md @@ -0,0 +1,44 @@ +# Perplexity + +[Get an OpenRouter API key](https://openrouter.ai/settings/keys) + +## Usage + +{% code title=".env" %} +```bash +OPENROUTER_API_KEY= +``` +{% endcode %} + +```typescript +import { TokenJS } from 'token.js' + +// Create the Token.js client +const tokenjs = new TokenJS() + +async function main() { + // Create a model response + const completion = await tokenjs.chat.completions.create({ + // Specify the provider and model + provider: 'openrouter', + model: 'nvidia/nemotron-4-340b-instruct', + // Define your message + messages: [ + { + role: 'user', + content: 'Hello!', + }, + ], + }) + console.log(completion.choices[0]) +} +main() +``` + +## Compatibility +OpenRouter supports more than 180 models from a variety of providers which may have varying feature support. We recommend reviewing the OpenRouter and provider documentation for specific compatibility information. + +## Additional Resources + +* [Supported Models](https://openrouter.ai/models) +* [OpenRouter Documentation](https://openrouter.ai/docs/quick-start) \ No newline at end of file diff --git a/scripts/docs/generate.ts b/scripts/docs/generate.ts index b4c0d27..d93a4b1 100644 --- a/scripts/docs/generate.ts +++ b/scripts/docs/generate.ts @@ -14,10 +14,22 @@ const generateCompatibility = async () => { let pushHeader = true + if (compatibility.generateDocs === false) { + continue + } + + if (typeof compatibility.models === 'boolean') { + throw new Error( + 'Auto-generating model compatibility tables is not supported for providers that do not have explicitly defined models.' + ) + } + for (const model of compatibility.models) { const header: string[] = [] const features: string[] = [model] for (const [feature, models] of Object.entries(compatibility)) { + if (feature === 'generateDocs') continue + header.push(TableDisplayNames[feature]) if (feature === 'models') continue @@ -40,7 +52,9 @@ const generateCompatibility = async () => { const mkdTable = markdownTable(table) const providerDocs = readFileSync(`docs/providers/${provider}.md`, 'utf-8') const docsSplit = providerDocs.split('') - const afterCompatibilitySplit = docsSplit[1].split('') + const afterCompatibilitySplit = docsSplit[1].split( + '' + ) const newDocs = `${docsSplit[0]}\n## Supported Models\n\n${mkdTable}\n\n${legend}${afterCompatibilitySplit[1]}` diff --git a/src/chat/index.ts b/src/chat/index.ts index 075e0f0..9f439cb 100644 --- a/src/chat/index.ts +++ b/src/chat/index.ts @@ -17,6 +17,7 @@ export type BedrockModel = (typeof models.bedrock.models)[number] export type MistralModel = (typeof models.mistral.models)[number] export type PerplexityModel = (typeof models.perplexity.models)[number] export type GroqModel = (typeof models.groq.models)[number] +export type OpenRouterModel = string export type LLMChatModel = | OpenAIModel @@ -28,6 +29,7 @@ export type LLMChatModel = | MistralModel | PerplexityModel | GroqModel + | OpenRouterModel export type LLMProvider = keyof typeof models @@ -41,6 +43,7 @@ type ProviderModelMap = { mistral: MistralModel perplexity: PerplexityModel groq: GroqModel + openrouter: OpenRouterModel } type CompletionBase

= Pick< diff --git a/src/handlers/base.ts b/src/handlers/base.ts index 5bc12c3..2228881 100644 --- a/src/handlers/base.ts +++ b/src/handlers/base.ts @@ -8,21 +8,21 @@ import { InputError } from './types.js' export abstract class BaseHandler { opts: ConfigOptions - protected models: readonly T[] - protected supportsJSON: readonly T[] - protected supportsImages: readonly T[] - protected supportsToolCalls: readonly T[] + protected models: readonly T[] | boolean + protected supportsJSON: readonly T[] | boolean + protected supportsImages: readonly T[] | boolean + protected supportsToolCalls: readonly T[] | boolean protected supportsN: readonly T[] | boolean - protected supportsStreamingMessages: readonly T[] + protected supportsStreamingMessages: readonly T[] | boolean constructor( opts: ConfigOptions, - models: readonly T[], - supportsJSON: readonly T[], - supportsImages: readonly T[], - supportsToolCalls: readonly T[], + models: readonly T[] | boolean, + supportsJSON: readonly T[] | boolean, + supportsImages: readonly T[] | boolean, + supportsToolCalls: readonly T[] | boolean, suportsN: readonly T[] | boolean, - supportsStreamingMessages: readonly T[] + supportsStreamingMessages: readonly T[] | boolean ) { this.opts = opts this.models = models @@ -38,6 +38,10 @@ export abstract class BaseHandler { ): Promise protected validateInputs(body: CompletionParams): void { + // We remove the provider key from the body just in case the provider does validation which errors due to it. + // This can only occur on OpenAI compatible providers, but we do it for all providers for consistency. + delete (body as any).provider + if (!this.isSupportedModel(body.model)) { throw new InputError(`Invalid 'model' field: ${body.model}.`) } @@ -149,8 +153,8 @@ export abstract class BaseHandler { // We make this public so that we can mock it in tests, which is fine because the `BaseHandler` // class isn't exposed to the user. - public isSupportedModel(model: LLMChatModel): model is T { - return this.models.includes(model as T) + public isSupportedModel(model: string): model is T { + return this.isSupportedFeature(this.models, model as T) } protected supportsJSONMode(model: T): boolean { diff --git a/src/handlers/openrouter.ts b/src/handlers/openrouter.ts new file mode 100644 index 0000000..36638ca --- /dev/null +++ b/src/handlers/openrouter.ts @@ -0,0 +1,43 @@ +import OpenAI from 'openai' + +import { OpenRouterModel, ProviderCompletionParams } from '../chat/index.js' +import { + CompletionResponse, + StreamCompletionResponse, +} from '../userTypes/index.js' +import { BaseHandler } from './base.js' +import { InputError } from './types.js' + +// Groq is very compatible with OpenAI's API, so we could likely reuse the OpenAI SDK for this handler +// to reducee the bundle size. +export class OpenRouterHandler extends BaseHandler { + validateInputs(body: ProviderCompletionParams<'openrouter'>): void { + super.validateInputs(body) + } + + async create( + body: ProviderCompletionParams<'openrouter'> + ): Promise { + this.validateInputs(body) + + console.log('open router') + + const apiKey = this.opts.apiKey ?? process.env.OPENROUTER_API_KEY + const client = new OpenAI({ + apiKey, + baseURL: 'https://openrouter.ai/api/v1', + defaultHeaders: { + 'HTTP-Referer': 'docs.tokenjs.ai', + 'X-Title': 'Token.js', + }, + }) + + if (apiKey === undefined) { + throw new InputError( + 'API key is required for OpenRouter, define OPENROUTER_API_KEY in your environment or specifty the apiKey option.' + ) + } + + return client.chat.completions.create(body) + } +} diff --git a/src/handlers/utils.ts b/src/handlers/utils.ts index 1c7de2c..8361202 100644 --- a/src/handlers/utils.ts +++ b/src/handlers/utils.ts @@ -14,6 +14,7 @@ import { GeminiHandler } from './gemini.js' import { GroqHandler } from './groq.js' import { MistralHandler } from './mistral.js' import { OpenAIHandler } from './openai.js' +import { OpenRouterHandler } from './openrouter.js' import { PerplexityHandler } from './perplexity.js' import { InputError, MIMEType } from './types.js' @@ -108,6 +109,16 @@ export const Handlers: Record any> = { models.perplexity.supportsN, models.perplexity.supportsStreaming ), + ['openrouter']: (opts: ConfigOptions) => + new OpenRouterHandler( + opts, + models.openrouter.models, + models.openrouter.supportsJSON, + models.openrouter.supportsImages, + models.openrouter.supportsToolCalls, + models.openrouter.supportsN, + models.openrouter.supportsStreaming + ), } export const getHandler = ( diff --git a/src/models.ts b/src/models.ts index 8578220..ab10424 100644 --- a/src/models.ts +++ b/src/models.ts @@ -96,6 +96,7 @@ export const models = { 'gpt-3.5-turbo-0613', ] as const, supportsN: true, + generateDocs: true, }, ai21: { models: ['jamba-instruct'] as const, @@ -105,6 +106,7 @@ export const models = { supportsImages: [] as const, supportsToolCalls: [] as const, supportsN: true, + generateDocs: true, }, anthropic: { models: [ @@ -140,6 +142,7 @@ export const models = { 'claude-3-haiku-20240307', ] as const, supportsN: false, + generateDocs: true, }, gemini: { models: ['gemini-1.5-pro', 'gemini-1.5-flash', 'gemini-1.0-pro'] as const, @@ -157,6 +160,7 @@ export const models = { 'gemini-1.0-pro', ] as const, supportsN: true, + generateDocs: true, }, cohere: { models: [ @@ -184,6 +188,7 @@ export const models = { 'command-nightly', ] as const, supportsN: false, + generateDocs: true, }, bedrock: { models: [ @@ -248,6 +253,7 @@ export const models = { 'mistral.mistral-large-2402-v1:0', ] as const, supportsN: false, + generateDocs: true, }, mistral: { models: [ @@ -311,6 +317,7 @@ export const models = { 'codestral-mamba-2407', ] as const, supportsN: false, + generateDocs: true, }, groq: { models: [ @@ -336,6 +343,7 @@ export const models = { supportsImages: [] as const, supportsToolCalls: [] as const, supportsN: false, + generateDocs: true, }, perplexity: { models: [ @@ -361,5 +369,16 @@ export const models = { supportsImages: [] as const, supportsToolCalls: [] as const, supportsN: false, + generateDocs: true, + }, + openrouter: { + models: true, + supportsCompletion: true, + supportsStreaming: true, + supportsJSON: true, + supportsImages: true, + supportsToolCalls: true, + supportsN: true, + generateDocs: false, }, }