Skip to content

Commit

Permalink
Configurable API Host
Browse files Browse the repository at this point in the history
Closes #32. Enable Users / Deployments to change the host where
OpenAI API calls are directed to. This enables project like
[Helicone](https://www.helicone.ai/) (Observability of LLM ops)
for tracking prompt/responses quality in real-time.

Configuration:
- User: App > Settings > Advanced > API host (e.g. "oai.hconeai.com")
- Deployment: set the 'API_API_HOST=...' environment variable

User takes precedence over deployment over api.openai.com. Realtime
switching in chat apps works well.

Note: the Helicone team is fixing dashboard reporting for 'streaming'
over the /v1/chat/completions endpoint.
  • Loading branch information
enricoros committed Mar 31, 2023
1 parent 03fb926 commit 7154db7
Show file tree
Hide file tree
Showing 6 changed files with 51 additions and 15 deletions.
7 changes: 5 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
# Set the backend API key for OpenAI, so that users don't have to provide one
OPENAI_API_KEY=
# [Recommended for local deployments] Backend API key for OpenAI, so that users don't have to provide one
OPENAI_API_KEY=

# [Not needed] Set the backend host for the OpenAI API, to enable platforms such as Helicone
OPENAI_API_HOST=
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ designed to be easy to use, customize, and extend. We encourage you to contribut

Or click fork & run on Vercel

[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fnextjs-chatgpt-app&env=OPENAI_API_KEY&envDescription=API%20Keys%20needed%20in%20your%20deployment)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fnextjs-chatgpt-app&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)

## Roadmap 🛣️

Expand Down
10 changes: 6 additions & 4 deletions components/Chat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ function createDMessage(role: DMessage['role'], text: string): DMessage {
*/
async function _streamAssistantResponseMessage(
conversationId: string, history: DMessage[],
apiKey: string, chatModelId: string, modelTemperature: number, modelMaxTokens: number, abortSignal: AbortSignal,
apiKey: string | undefined, apiHost: string | undefined,
chatModelId: string, modelTemperature: number, modelMaxTokens: number, abortSignal: AbortSignal,
addMessage: (conversationId: string, message: DMessage) => void,
editMessage: (conversationId: string, messageId: string, updatedMessage: Partial<DMessage>) => void,
) {
Expand All @@ -44,7 +45,8 @@ async function _streamAssistantResponseMessage(
const messageId = assistantMessage.id;

const payload: ApiChatInput = {
apiKey: apiKey,
...(apiKey ? { apiKey } : {}),
...(apiHost ? { apiHost } : {}),
model: chatModelId,
messages: history.map(({ role, text }) => ({
role: role,
Expand Down Expand Up @@ -143,8 +145,8 @@ export function Chat(props: { onShowSettings: () => void, sx?: SxProps }) {
const controller = new AbortController();
setAbortController(controller);

const { apiKey, modelTemperature, modelMaxTokens } = useSettingsStore.getState();
await _streamAssistantResponseMessage(conversationId, replaceHistory, apiKey, chatModelId, modelTemperature, modelMaxTokens, controller.signal, addMessage, editMessage);
const { apiKey, modelTemperature, modelMaxTokens, modelApiHost } = useSettingsStore.getState();
await _streamAssistantResponseMessage(conversationId, replaceHistory, apiKey, modelApiHost, chatModelId, modelTemperature, modelMaxTokens, controller.signal, addMessage, editMessage);

// clear to send, again
setAbortController(null);
Expand Down
24 changes: 21 additions & 3 deletions components/SettingsModal.tsx
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import * as React from 'react';
import { shallow } from 'zustand/shallow';

import { Box, Button, Grid, IconButton, Input, Modal, ModalClose, ModalDialog, Slider, Stack, Typography } from '@mui/joy';
import { Box, Button, Grid, IconButton, Input, Modal, ModalClose, ModalDialog, Slider, Stack, Tooltip, Typography } from '@mui/joy';
import InfoIcon from '@mui/icons-material/Info';
import KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';
import KeyboardArrowUpIcon from '@mui/icons-material/KeyboardArrowUp';

Expand All @@ -25,10 +26,11 @@ export function SettingsModal({ open, onClose }: { open: boolean, onClose: () =>
const [showAdvanced, setShowAdvanced] = React.useState(false);

// global state
const { apiKey, setApiKey, modelTemperature, setModelTemperature, modelMaxTokens, setModelMaxTokens } = useSettingsStore(state => ({
const { apiKey, setApiKey, modelTemperature, setModelTemperature, modelMaxTokens, setModelMaxTokens, modelApiHost, setModelApiHost } = useSettingsStore(state => ({
apiKey: state.apiKey, setApiKey: state.setApiKey,
modelTemperature: state.modelTemperature, setModelTemperature: state.setModelTemperature,
modelMaxTokens: state.modelMaxTokens, setModelMaxTokens: state.setModelMaxTokens,
modelApiHost: state.modelApiHost, setModelApiHost: state.setModelApiHost,
}), shallow);

const handleApiKeyChange = (e: React.ChangeEvent) =>
Expand All @@ -41,6 +43,8 @@ export function SettingsModal({ open, onClose }: { open: boolean, onClose: () =>

const handleMaxTokensChange = (event: Event, newValue: number | number[]) => setModelMaxTokens(newValue as number);

const handleModelApiHostChange = (e: React.ChangeEvent) => setModelApiHost((e.target as HTMLInputElement).value);

const needsApiKey = !!process.env.REQUIRE_USER_API_KEYS;
const isValidKey = isValidOpenAIApiKey(apiKey);

Expand Down Expand Up @@ -95,7 +99,7 @@ export function SettingsModal({ open, onClose }: { open: boolean, onClose: () =>
</Grid>
<Grid xs={6} md={5} xl={3}>
<Typography level='body2' sx={{ textAlign: 'right', mr: 1 }}>
Max. tokens
Max tokens
</Typography>
</Grid>
<Grid xs={6} md={7} xl={7}>
Expand All @@ -107,6 +111,20 @@ export function SettingsModal({ open, onClose }: { open: boolean, onClose: () =>
sx={{ py: 1, mt: 1.1 }}
/>
</Grid>
<Grid xs={6} md={5} xl={3}>
<Typography level='body2' sx={{ textAlign: 'right', mr: 1 }}>
API host
<Tooltip title='Change API host for compatibility with services like Helicone' variant='solid'>
<InfoIcon sx={{ ml: 1, cursor: 'pointer' }} />
</Tooltip>
</Typography>
</Grid>
<Grid xs={6} md={7} xl={7}>
<Input
variant='plain' placeholder='api.openai.com'
value={modelApiHost} onChange={handleModelApiHostChange}
/>
</Grid>
</Grid>
<Typography level='body2' sx={{ mb: 1 }}>
Adjust only if you&apos;re familiar with these settings
Expand Down
6 changes: 6 additions & 0 deletions lib/store.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ interface SettingsStore {
modelMaxTokens: number;
setModelMaxTokens: (modelMaxTokens: number) => void;

modelApiHost: string;
setModelApiHost: (modelApiHost: string) => void;

showSystemMessages: boolean;
setShowSystemMessages: (showSystemMessages: boolean) => void;
}
Expand All @@ -45,6 +48,9 @@ export const useSettingsStore = create<SettingsStore>()(
modelMaxTokens: 2048,
setModelMaxTokens: (modelMaxTokens: number) => set({ modelMaxTokens }),

modelApiHost: '',
setModelApiHost: (modelApiHost: string) => set({ modelApiHost }),

showSystemMessages: false,
setShowSystemMessages: (showSystemMessages: boolean) => set({ showSystemMessages }),
}),
Expand Down
17 changes: 12 additions & 5 deletions pages/api/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ namespace OpenAIAPI.Chat {

}

async function OpenAIStream(apiKey: string, payload: Omit<OpenAIAPI.Chat.CompletionsRequest, 'stream' | 'n'>, signal: AbortSignal): Promise<ReadableStream> {
async function OpenAIStream(apiKey: string, apiHost: string, payload: Omit<OpenAIAPI.Chat.CompletionsRequest, 'stream' | 'n'>, signal: AbortSignal): Promise<ReadableStream> {
const encoder = new TextEncoder();
const decoder = new TextDecoder();

Expand All @@ -54,7 +54,7 @@ async function OpenAIStream(apiKey: string, payload: Omit<OpenAIAPI.Chat.Complet
};

try {
const res = await fetch('https://api.openai.com/v1/chat/completions', {
const res = await fetch(`https://${apiHost}/v1/chat/completions`, {
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
Expand Down Expand Up @@ -143,7 +143,12 @@ async function OpenAIStream(apiKey: string, payload: Omit<OpenAIAPI.Chat.Complet
});
} else {
console.error('Fetch request failed:', error);
return new ReadableStream(); // Return an empty ReadableStream
return new ReadableStream({
start(controller) {
controller.enqueue(encoder.encode(`{"model":"network error"}Network issue: ${error?.cause?.message}`));
controller.close();
},
});
}

}
Expand All @@ -155,6 +160,7 @@ async function OpenAIStream(apiKey: string, payload: Omit<OpenAIAPI.Chat.Complet

export interface ApiChatInput {
apiKey?: string;
apiHost?: string;
model: string;
messages: OpenAIAPI.Chat.CompletionMessage[];
temperature?: number;
Expand All @@ -172,14 +178,15 @@ export interface ApiChatFirstOutput {

export default async function handler(req: NextRequest): Promise<Response> {

const { apiKey: userApiKey, model, messages, temperature = 0.5, max_tokens = 2048 } = await req.json() as ApiChatInput;
const { apiKey: userApiKey, apiHost: userApiHost, model, messages, temperature = 0.5, max_tokens = 2048 } = await req.json() as ApiChatInput;
const apiHost = (userApiHost || process.env.OPENAI_API_HOST || 'api.openai.com').replaceAll('https://', '');
const apiKey = userApiKey || process.env.OPENAI_API_KEY || '';
if (!apiKey)
return new Response('Error: missing OpenAI API Key. Add it on the client side (Settings icon) or server side (your deployment).', { status: 400 });

try {

const stream: ReadableStream = await OpenAIStream(apiKey, {
const stream: ReadableStream = await OpenAIStream(apiKey, apiHost, {
model,
messages,
temperature,
Expand Down

1 comment on commit 7154db7

@vercel
Copy link

@vercel vercel bot commented on 7154db7 Mar 31, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Successfully deployed to the following URLs:

nextjs-chatgpt-app – ./

nextjs-chatgpt-app-git-main-enricoros.vercel.app
gpt4.enrico.ai
nextjs-chatgpt-app-enricoros.vercel.app

Please sign in to comment.