From cdf64ef72ea78fd9e8404d8ea4e6497cf486f44d Mon Sep 17 00:00:00 2001 From: Stef Lewandowski Date: Mon, 16 Sep 2024 11:36:37 +0100 Subject: [PATCH] fix: add a toggle to enable/disable structured outputs (#133) --- packages/aila/src/core/llm/OpenAIService.ts | 15 +++++++++++---- .../prompts/lesson-assistant/parts/interactive.ts | 12 +++++++++++- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/packages/aila/src/core/llm/OpenAIService.ts b/packages/aila/src/core/llm/OpenAIService.ts index 58c748d9d..1ee0c4e71 100644 --- a/packages/aila/src/core/llm/OpenAIService.ts +++ b/packages/aila/src/core/llm/OpenAIService.ts @@ -7,6 +7,8 @@ import { ZodSchema } from "zod"; import { Message } from "../chat"; import { LLMService } from "./LLMService"; +const STRUCTURED_OUTPUTS_ENABLED = + process.env.NEXT_PUBLIC_STRUCTURED_OUTPUTS_ENABLED === "true" ? true : false; export class OpenAIService implements LLMService { private _openAIProvider: OpenAIProvider; @@ -44,15 +46,20 @@ export class OpenAIService implements LLMService { temperature: number; // eslint-disable-next-line @typescript-eslint/no-explicit-any }): Promise> { + const { model, messages, temperature, schema, schemaName } = params; + if (!STRUCTURED_OUTPUTS_ENABLED) { + return this.createChatCompletionStream({ model, messages, temperature }); + } const { textStream: stream } = await streamObject({ - model: this._openAIProvider(params.model, { structuredOutputs: true }), + model: this._openAIProvider(model, { structuredOutputs: true }), output: "object", - schema: params.schema, - messages: params.messages.map((m) => ({ + schema, + schemaName, + messages: messages.map((m) => ({ role: m.role, content: m.content, })), - temperature: params.temperature, + temperature, }); return stream.getReader(); diff --git a/packages/core/src/prompts/lesson-assistant/parts/interactive.ts b/packages/core/src/prompts/lesson-assistant/parts/interactive.ts index 45cd9a6f9..31b3d99f5 100644 --- a/packages/core/src/prompts/lesson-assistant/parts/interactive.ts +++ b/packages/core/src/prompts/lesson-assistant/parts/interactive.ts @@ -1,12 +1,22 @@ import { TemplateProps } from ".."; +const STRUCTURED_OUTPUTS_ENABLED = + process.env.NEXT_PUBLIC_STRUCTURED_OUTPUTS_ENABLED === "true" ? true : false; + +const responseFormatWithStructuredOutputs = `{"response":"llmMessage", patches:[{},{}...], prompt:{}}`; +const responseFormatWithoutStructuredOutputs = `A series of JSON documents separated using the JSON Text Sequences specification, where each row is separated by the ␞ character and ends with a new line character. +Your response should be a series of patches followed by one and only one prompt to the user.`; + +const responseFormat = STRUCTURED_OUTPUTS_ENABLED + ? responseFormatWithStructuredOutputs + : responseFormatWithoutStructuredOutputs; export const interactive = ({ llmResponseJsonSchema, }: TemplateProps) => `RULES FOR RESPONDING TO THE USER INTERACTIVELY WHILE CREATING THE LESSON PLAN Your response to the user should be in the following format. -{"response":"llmMessage", patches:[{},{}...], prompt:{}} +${responseFormat} "prompt" is a JSON document which represents your message to the user. "patches" is series of JSON documents that represent the changes you are making to the lesson plan presented in the form of a series of JSON documents separated using the JSON Text Sequences specification.