Skip to content

Commit

Permalink
fix: add a toggle to enable/disable structured outputs (#133)
Browse files Browse the repository at this point in the history
  • Loading branch information
stefl authored Sep 16, 2024
1 parent b370823 commit cdf64ef
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 5 deletions.
15 changes: 11 additions & 4 deletions packages/aila/src/core/llm/OpenAIService.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import { ZodSchema } from "zod";
import { Message } from "../chat";
import { LLMService } from "./LLMService";

const STRUCTURED_OUTPUTS_ENABLED =
process.env.NEXT_PUBLIC_STRUCTURED_OUTPUTS_ENABLED === "true" ? true : false;
export class OpenAIService implements LLMService {
private _openAIProvider: OpenAIProvider;

Expand Down Expand Up @@ -44,15 +46,20 @@ export class OpenAIService implements LLMService {
temperature: number;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
}): Promise<ReadableStreamDefaultReader<string>> {
const { model, messages, temperature, schema, schemaName } = params;
if (!STRUCTURED_OUTPUTS_ENABLED) {
return this.createChatCompletionStream({ model, messages, temperature });
}
const { textStream: stream } = await streamObject({
model: this._openAIProvider(params.model, { structuredOutputs: true }),
model: this._openAIProvider(model, { structuredOutputs: true }),
output: "object",
schema: params.schema,
messages: params.messages.map((m) => ({
schema,
schemaName,
messages: messages.map((m) => ({
role: m.role,
content: m.content,
})),
temperature: params.temperature,
temperature,
});

return stream.getReader();
Expand Down
12 changes: 11 additions & 1 deletion packages/core/src/prompts/lesson-assistant/parts/interactive.ts
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
import { TemplateProps } from "..";

const STRUCTURED_OUTPUTS_ENABLED =
process.env.NEXT_PUBLIC_STRUCTURED_OUTPUTS_ENABLED === "true" ? true : false;

const responseFormatWithStructuredOutputs = `{"response":"llmMessage", patches:[{},{}...], prompt:{}}`;
const responseFormatWithoutStructuredOutputs = `A series of JSON documents separated using the JSON Text Sequences specification, where each row is separated by the ␞ character and ends with a new line character.
Your response should be a series of patches followed by one and only one prompt to the user.`;

const responseFormat = STRUCTURED_OUTPUTS_ENABLED
? responseFormatWithStructuredOutputs
: responseFormatWithoutStructuredOutputs;
export const interactive = ({
llmResponseJsonSchema,
}: TemplateProps) => `RULES FOR RESPONDING TO THE USER INTERACTIVELY WHILE CREATING THE LESSON PLAN
Your response to the user should be in the following format.
{"response":"llmMessage", patches:[{},{}...], prompt:{}}
${responseFormat}
"prompt" is a JSON document which represents your message to the user.
"patches" is series of JSON documents that represent the changes you are making to the lesson plan presented in the form of a series of JSON documents separated using the JSON Text Sequences specification.
Expand Down

0 comments on commit cdf64ef

Please sign in to comment.