Skip to content

Commit

Permalink
feat: Support OpenRouter
Browse files Browse the repository at this point in the history
  • Loading branch information
RPate97 committed Jul 17, 2024
1 parent 2fea7e7 commit 50a0584
Show file tree
Hide file tree
Showing 13 changed files with 202 additions and 18 deletions.
5 changes: 5 additions & 0 deletions .changeset/eight-goats-jam.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'token.js': patch
---

Support OpenRouter
3 changes: 3 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@ GROQ_API_KEY=
# Mistral
MISTRAL_API_KEY=

# OpenRouter
OPENROUTER_API_KEY=

# Perplexity
PERPLEXITY_API_KEY=

Expand Down
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# Token.js

Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required.
Integrate 200+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required.

## Features

* Use OpenAI's format to call 60+ LLMs from 9 providers.
* Use OpenAI's format to call 200+ LLMs from 10 providers.
* Supports tools, JSON outputs, image inputs, streaming, and more.
* Runs completely on the client side. No proxy server needed.
* Free and open source under MIT.
Expand All @@ -20,6 +20,7 @@ Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. Free and open
* Mistral
* OpenAI
* Perplexity
* OpenRouter

## [Documentation](https://docs.tokenjs.ai/)

Expand Down
40 changes: 38 additions & 2 deletions docs/README.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
---
description: >-
Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format.
Integrate 200+ LLMs with one TypeScript SDK using OpenAI's format.
Free and open source. No proxy server required.
---

# Token.js

## Features

* Use OpenAI's format to call 60+ LLMs from 9 providers.
* Use OpenAI's format to call 200+ LLMs from 10 providers.
* Supports tools, JSON outputs, image inputs, streaming, and more.
* Runs completely on the client side. No proxy server needed.
* Free and open source under MIT.
Expand All @@ -24,6 +24,7 @@ description: >-
* Mistral
* OpenAI
* Perplexity
* OpenRouter

## Setup

Expand Down Expand Up @@ -261,6 +262,39 @@ async function main() {
main()
```
{% endtab %}

{% tab title="OpenRouter" %}
{% code title=".env" %}
```bash
OPENROUTER_API_KEY=<openrouter api key>
```
{% endcode %}

```typescript
import { TokenJS } from 'token.js'

// Create the Token.js client
const tokenjs = new TokenJS()

async function main() {
// Create a model response
const completion = await tokenjs.chat.completions.create({
// Specify the provider and model
provider: 'openrouter',
model: 'nvidia/nemotron-4-340b-instruct',
// Define your message
messages: [
{
role: 'user',
content: 'Hello!',
},
],
})
console.log(completion.choices[0])
}
main()
```
{% endtab %}
{% endtabs %}

### Access Credentials
Expand All @@ -284,6 +318,8 @@ GROQ_API_KEY=
MISTRAL_API_KEY=
# Perplexity
PERPLEXITY_API_KEY=
# OpenRouter
OPENROUTER_API_KEY=
# AWS Bedrock
AWS_REGION_NAME=
AWS_ACCESS_KEY_ID=
Expand Down
1 change: 1 addition & 0 deletions docs/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
* [Groq](providers/groq.md)
* [Mistral](providers/mistral.md)
* [OpenAI](providers/openai.md)
* [OpenRouter](providers/openrouter.md)
* [Perplexity](providers/perplexity.md)
* [Contact Us](contact-us.md)
* [Contributing](https://github.com/token-js/token.js/blob/main/CONTRIBUTING.md)
2 changes: 1 addition & 1 deletion docs/providers/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ description: Integrate LLM providers and models using Token.js.

# Providers

<table data-card-size="large" data-view="cards"><thead><tr><th></th><th data-hidden></th><th data-hidden></th><th data-hidden data-card-target data-type="content-ref"></th></tr></thead><tbody><tr><td>AI21</td><td></td><td></td><td><a href="ai21.md">ai21.md</a></td></tr><tr><td>Anthropic</td><td></td><td></td><td><a href="anthropic.md">anthropic.md</a></td></tr><tr><td>Bedrock</td><td></td><td></td><td><a href="bedrock.md">bedrock.md</a></td></tr><tr><td>Cohere</td><td></td><td></td><td><a href="cohere.md">cohere.md</a></td></tr><tr><td>Gemini</td><td></td><td></td><td><a href="gemini.md">gemini.md</a></td></tr><tr><td>Groq</td><td></td><td></td><td><a href="groq.md">groq.md</a></td></tr><tr><td>Mistral</td><td></td><td></td><td><a href="mistral.md">mistral.md</a></td></tr><tr><td>OpenAI</td><td></td><td></td><td><a href="openai.md">openai.md</a></td></tr><tr><td>Perplexity</td><td></td><td></td><td><a href="perplexity.md">perplexity.md</a></td></tr></tbody></table>
<table data-card-size="large" data-view="cards"><thead><tr><th></th><th data-hidden></th><th data-hidden></th><th data-hidden data-card-target data-type="content-ref"></th></tr></thead><tbody><tr><td>AI21</td><td></td><td></td><td><a href="ai21.md">ai21.md</a></td></tr><tr><td>Anthropic</td><td></td><td></td><td><a href="anthropic.md">anthropic.md</a></td></tr><tr><td>Bedrock</td><td></td><td></td><td><a href="bedrock.md">bedrock.md</a></td></tr><tr><td>Cohere</td><td></td><td></td><td><a href="cohere.md">cohere.md</a></td></tr><tr><td>Gemini</td><td></td><td></td><td><a href="gemini.md">gemini.md</a></td></tr><tr><td>Groq</td><td></td><td></td><td><a href="groq.md">groq.md</a></td></tr><tr><td>Mistral</td><td></td><td></td><td><a href="mistral.md">mistral.md</a></td></tr><tr><td>OpenAI</td><td></td><td></td><td><a href="openai.md">openai.md</a></td></tr><tr><td>Perplexity</td><td></td><td></td><td><a href="perplexity.md">perplexity.md</a></td></tr><tr><td>OpenRouter</td><td></td><td></td><td><a href="openrouter.md">openrouter.md</a></td></tr></tbody></table>
44 changes: 44 additions & 0 deletions docs/providers/openrouter.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Perplexity

[Get an OpenRouter API key](https://openrouter.ai/settings/keys)

## Usage

{% code title=".env" %}
```bash
OPENROUTER_API_KEY=
```
{% endcode %}

```typescript
import { TokenJS } from 'token.js'

// Create the Token.js client
const tokenjs = new TokenJS()

async function main() {
// Create a model response
const completion = await tokenjs.chat.completions.create({
// Specify the provider and model
provider: 'openrouter',
model: 'nvidia/nemotron-4-340b-instruct',
// Define your message
messages: [
{
role: 'user',
content: 'Hello!',
},
],
})
console.log(completion.choices[0])
}
main()
```

## Compatibility
OpenRouter supports more than 180 models from a variety of providers which may have varying feature support. We recommend reviewing the OpenRouter and provider documentation for specific compatibility information.

## Additional Resources

* [Supported Models](https://openrouter.ai/models)
* [OpenRouter Documentation](https://openrouter.ai/docs/quick-start)
16 changes: 15 additions & 1 deletion scripts/docs/generate.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,22 @@ const generateCompatibility = async () => {

let pushHeader = true

if (compatibility.generateDocs === false) {
continue
}

if (typeof compatibility.models === 'boolean') {
throw new Error(
'Auto-generating model compatibility tables is not supported for providers that do not have explicitly defined models.'
)
}

for (const model of compatibility.models) {
const header: string[] = []
const features: string[] = [model]
for (const [feature, models] of Object.entries(compatibility)) {
if (feature === 'generateDocs') continue

header.push(TableDisplayNames[feature])

if (feature === 'models') continue
Expand All @@ -40,7 +52,9 @@ const generateCompatibility = async () => {
const mkdTable = markdownTable(table)
const providerDocs = readFileSync(`docs/providers/${provider}.md`, 'utf-8')
const docsSplit = providerDocs.split('<!-- compatibility -->')
const afterCompatibilitySplit = docsSplit[1].split('<!-- end compatibility -->')
const afterCompatibilitySplit = docsSplit[1].split(
'<!-- end compatibility -->'
)

const newDocs = `${docsSplit[0]}<!-- compatibility -->\n## Supported Models\n\n${mkdTable}\n\n${legend}<!-- end compatibility -->${afterCompatibilitySplit[1]}`

Expand Down
3 changes: 3 additions & 0 deletions src/chat/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ export type BedrockModel = (typeof models.bedrock.models)[number]
export type MistralModel = (typeof models.mistral.models)[number]
export type PerplexityModel = (typeof models.perplexity.models)[number]
export type GroqModel = (typeof models.groq.models)[number]
export type OpenRouterModel = string

export type LLMChatModel =
| OpenAIModel
Expand All @@ -28,6 +29,7 @@ export type LLMChatModel =
| MistralModel
| PerplexityModel
| GroqModel
| OpenRouterModel

export type LLMProvider = keyof typeof models

Expand All @@ -41,6 +43,7 @@ type ProviderModelMap = {
mistral: MistralModel
perplexity: PerplexityModel
groq: GroqModel
openrouter: OpenRouterModel
}

type CompletionBase<P extends LLMProvider> = Pick<
Expand Down
28 changes: 16 additions & 12 deletions src/handlers/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,21 @@ import { InputError } from './types.js'

export abstract class BaseHandler<T extends LLMChatModel> {
opts: ConfigOptions
protected models: readonly T[]
protected supportsJSON: readonly T[]
protected supportsImages: readonly T[]
protected supportsToolCalls: readonly T[]
protected models: readonly T[] | boolean
protected supportsJSON: readonly T[] | boolean
protected supportsImages: readonly T[] | boolean
protected supportsToolCalls: readonly T[] | boolean
protected supportsN: readonly T[] | boolean
protected supportsStreamingMessages: readonly T[]
protected supportsStreamingMessages: readonly T[] | boolean

constructor(
opts: ConfigOptions,
models: readonly T[],
supportsJSON: readonly T[],
supportsImages: readonly T[],
supportsToolCalls: readonly T[],
models: readonly T[] | boolean,
supportsJSON: readonly T[] | boolean,
supportsImages: readonly T[] | boolean,
supportsToolCalls: readonly T[] | boolean,
suportsN: readonly T[] | boolean,
supportsStreamingMessages: readonly T[]
supportsStreamingMessages: readonly T[] | boolean
) {
this.opts = opts
this.models = models
Expand All @@ -38,6 +38,10 @@ export abstract class BaseHandler<T extends LLMChatModel> {
): Promise<CompletionResponse | StreamCompletionResponse>

protected validateInputs(body: CompletionParams): void {
// We remove the provider key from the body just in case the provider does validation which errors due to it.
// This can only occur on OpenAI compatible providers, but we do it for all providers for consistency.
delete (body as any).provider

if (!this.isSupportedModel(body.model)) {
throw new InputError(`Invalid 'model' field: ${body.model}.`)
}
Expand Down Expand Up @@ -149,8 +153,8 @@ export abstract class BaseHandler<T extends LLMChatModel> {

// We make this public so that we can mock it in tests, which is fine because the `BaseHandler`
// class isn't exposed to the user.
public isSupportedModel(model: LLMChatModel): model is T {
return this.models.includes(model as T)
public isSupportedModel(model: string): model is T {
return this.isSupportedFeature(this.models, model as T)
}

protected supportsJSONMode(model: T): boolean {
Expand Down
43 changes: 43 additions & 0 deletions src/handlers/openrouter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import OpenAI from 'openai'

import { OpenRouterModel, ProviderCompletionParams } from '../chat/index.js'
import {
CompletionResponse,
StreamCompletionResponse,
} from '../userTypes/index.js'
import { BaseHandler } from './base.js'
import { InputError } from './types.js'

// Groq is very compatible with OpenAI's API, so we could likely reuse the OpenAI SDK for this handler
// to reducee the bundle size.
export class OpenRouterHandler extends BaseHandler<OpenRouterModel> {
validateInputs(body: ProviderCompletionParams<'openrouter'>): void {
super.validateInputs(body)
}

async create(
body: ProviderCompletionParams<'openrouter'>
): Promise<CompletionResponse | StreamCompletionResponse> {
this.validateInputs(body)

console.log('open router')

const apiKey = this.opts.apiKey ?? process.env.OPENROUTER_API_KEY
const client = new OpenAI({
apiKey,
baseURL: 'https://openrouter.ai/api/v1',
defaultHeaders: {
'HTTP-Referer': 'docs.tokenjs.ai',
'X-Title': 'Token.js',
},
})

if (apiKey === undefined) {
throw new InputError(
'API key is required for OpenRouter, define OPENROUTER_API_KEY in your environment or specifty the apiKey option.'
)
}

return client.chat.completions.create(body)
}
}
11 changes: 11 additions & 0 deletions src/handlers/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import { GeminiHandler } from './gemini.js'
import { GroqHandler } from './groq.js'
import { MistralHandler } from './mistral.js'
import { OpenAIHandler } from './openai.js'
import { OpenRouterHandler } from './openrouter.js'
import { PerplexityHandler } from './perplexity.js'
import { InputError, MIMEType } from './types.js'

Expand Down Expand Up @@ -108,6 +109,16 @@ export const Handlers: Record<string, (opts: ConfigOptions) => any> = {
models.perplexity.supportsN,
models.perplexity.supportsStreaming
),
['openrouter']: (opts: ConfigOptions) =>
new OpenRouterHandler(
opts,
models.openrouter.models,
models.openrouter.supportsJSON,
models.openrouter.supportsImages,
models.openrouter.supportsToolCalls,
models.openrouter.supportsN,
models.openrouter.supportsStreaming
),
}

export const getHandler = (
Expand Down
Loading

0 comments on commit 50a0584

Please sign in to comment.