diff --git a/nx-dev/data-access-ai/src/lib/data-access-ai.ts b/nx-dev/data-access-ai/src/lib/data-access-ai.ts index a0e8d94ab3f0a2..d10ead503caa1f 100644 --- a/nx-dev/data-access-ai/src/lib/data-access-ai.ts +++ b/nx-dev/data-access-ai/src/lib/data-access-ai.ts @@ -7,13 +7,7 @@ import { createClient, } from '@supabase/supabase-js'; import GPT3Tokenizer from 'gpt3-tokenizer'; -import { - Configuration, - OpenAIApi, - CreateModerationResponse, - CreateEmbeddingResponse, - CreateCompletionResponseUsage, -} from 'openai'; +import { CreateEmbeddingResponse, CreateCompletionResponseUsage } from 'openai'; import { ApplicationError, ChatItem, @@ -37,13 +31,8 @@ const MIN_CONTENT_LENGTH = 50; // This is a temporary solution const MAX_HISTORY_LENGTH = 30; -const openAiKey = process.env['NX_OPENAI_KEY']; const supabaseUrl = process.env['NX_NEXT_PUBLIC_SUPABASE_URL']; const supabaseServiceKey = process.env['NX_SUPABASE_SERVICE_ROLE_KEY']; -const config = new Configuration({ - apiKey: openAiKey, -}); -const openai = new OpenAIApi(config); let chatFullHistory: ChatItem[] = []; @@ -72,7 +61,7 @@ export async function queryAi( } try { - checkEnvVariables(openAiKey, supabaseUrl, supabaseServiceKey); + checkEnvVariables(supabaseUrl, supabaseServiceKey); if (!query) { throw new UserError('Missing query in request data'); @@ -80,9 +69,17 @@ export async function queryAi( // Moderate the content to comply with OpenAI T&C const sanitizedQuery = query.trim(); - const moderationResponse: CreateModerationResponse = await openai - .createModeration({ input: sanitizedQuery }) - .then((res) => res.data); + const moderationResponseObj = await fetch('/api/openai-handler', { + method: 'POST', + body: JSON.stringify({ + input: sanitizedQuery, + }), + headers: { + 'Content-Type': 'application/json', + }, + }); + + const moderationResponse = await moderationResponseObj.json(); const [results] = moderationResponse.results; @@ -109,14 +106,29 @@ export async function queryAi( * input: sanitizedQuery + aiResponse, * }); * - * This costs more tokens, so if we see conts skyrocket we remove it. + * This costs more tokens, so if we see costs skyrocket we remove it. * As it says in the docs, it's a design decision, and it may or may not really improve results. */ - const embeddingResponse = await openai.createEmbedding({ - model: 'text-embedding-ada-002', - input: sanitizedQuery + aiResponse, + const embeddingResponseObj = await fetch('/api/openai-handler', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + action: 'embedding', + input: { + model: 'text-embedding-ada-002', + input: sanitizedQuery + aiResponse, + }, + }), }); + if (!embeddingResponseObj.ok) { + throw new Error( + `API call failed with status ${embeddingResponseObj.status}` + ); + } + + const embeddingResponse = await embeddingResponseObj.json(); + if (embeddingResponse.status !== 200) { throw new ApplicationError( 'Failed to create embedding for question', @@ -196,13 +208,26 @@ export async function queryAi( chatFullHistory = chatHistory; - const response = await openai.createChatCompletion({ - model: 'gpt-3.5-turbo-16k', - messages: chatGptMessages, - temperature: 0, - stream: false, + const responseObj = await fetch('/api/openai-handler', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + action: 'chatCompletion', + input: { + model: 'gpt-3.5-turbo-16k', + messages: chatGptMessages, + temperature: 0, + stream: false, + }, + }), }); + if (!responseObj.ok) { + throw new Error(`API call failed with status ${responseObj.status}`); + } + + const response = await responseObj.json(); + if (response.status !== 200) { const error = response.data; throw new ApplicationError('Failed to generate completion', error); diff --git a/nx-dev/nx-dev/pages/api/openai-handler.ts b/nx-dev/nx-dev/pages/api/openai-handler.ts new file mode 100644 index 00000000000000..0211dff09d3135 --- /dev/null +++ b/nx-dev/nx-dev/pages/api/openai-handler.ts @@ -0,0 +1,55 @@ +import { NextRequest } from 'next/server'; +import { Configuration, OpenAIApi } from 'openai'; + +const openAiKey = process.env['NX_OPENAI_KEY']; +const config = new Configuration({ + apiKey: openAiKey, +}); +const openai = new OpenAIApi(config); + +export default async function handler(request: NextRequest) { + const { action, input } = await request.json(); + + if (action === 'embedding') { + try { + const embeddingResponse = await openai.createEmbedding(input); + return new Response(JSON.stringify(embeddingResponse.data), { + status: embeddingResponse.status, + headers: { + 'content-type': 'application/json', + }, + }); + } catch (e) { + console.error('Error processing the request:', e.message); + return new Response(e.message, { status: 500 }); + } + } else if (action === 'chatCompletion') { + try { + const chatCompletionResponse = await openai.createChatCompletion(input); + return new Response(JSON.stringify(chatCompletionResponse.data), { + status: chatCompletionResponse.status, + headers: { + 'content-type': 'application/json', + }, + }); + } catch (e) { + console.error('Error processing the request:', e.message); + return new Response(e.message, { status: 500 }); + } + } else if (action === 'moderation') { + try { + const moderationResponse = await openai.createModeration(input); + return new Response(JSON.stringify(moderationResponse.data), { + status: moderationResponse.status, + headers: { + 'content-type': 'application/json', + }, + }); + } catch (e) { + console.error('Error processing the request:', e.message); + return new Response(e.message, { status: 500 }); + } + } else { + return new Response('Invalid action', { status: 400 }); + } +} diff --git a/nx-dev/nx-dev/pages/api/openai.ts b/nx-dev/nx-dev/pages/api/openai.ts deleted file mode 100644 index ca505acfae55b0..00000000000000 --- a/nx-dev/nx-dev/pages/api/openai.ts +++ /dev/null @@ -1,23 +0,0 @@ -import type { NextRequest } from 'next/server'; - -export const config = { - runtime: 'edge', -}; - -export default function handler(request: NextRequest) { - const { searchParams } = new URL(request.url); - - return new Response( - JSON.stringify({ - body: 'Hello world!', - query: searchParams.get('query'), - cookies: request.cookies, - }), - { - status: 200, - headers: { - 'content-type': 'application/json', - }, - } - ); -}