diff --git a/src/config/modelProviders/fireworksai.ts b/src/config/modelProviders/fireworksai.ts index bb0301296d57..8f3781173d4a 100644 --- a/src/config/modelProviders/fireworksai.ts +++ b/src/config/modelProviders/fireworksai.ts @@ -5,13 +5,15 @@ import { ModelProviderCard } from '@/types/llm'; const FireworksAI: ModelProviderCard = { chatModels: [ { + description: 'Fireworks latest and most performant function-calling model. Firefunction-v2 is based on Llama-3 and trained to excel at function-calling as well as chat and instruction-following. See blog post for more details https://fireworks.ai/blog/firefunction-v2-launch-post', displayName: 'Firefunction V2', enabled: true, - //functionCall: true, + functionCall: true, id: 'accounts/fireworks/models/firefunction-v2', tokens: 8192, }, { + description: 'Vision-language model allowing both image and text as inputs (single image is recommended), trained on OSS model generated training data and open sourced on huggingface at fireworks-ai/FireLLaVA-13b', displayName: 'FireLLaVA-13B', enabled: true, functionCall: false, diff --git a/src/libs/agent-runtime/fireworksai/index.test.ts b/src/libs/agent-runtime/fireworksai/index.test.ts index 16c4c4e64065..cbd740279a38 100644 --- a/src/libs/agent-runtime/fireworksai/index.test.ts +++ b/src/libs/agent-runtime/fireworksai/index.test.ts @@ -8,7 +8,6 @@ import { ModelProvider, } from '@/libs/agent-runtime'; -import * as debugStreamModule from '../utils/debugStream'; import { LobeFireworksAI } from './index'; const provider = ModelProvider.FireworksAI; @@ -209,47 +208,5 @@ describe('LobeFireworksAI', () => { } }); }); - - describe('DEBUG', () => { - it('should call debugStream and return StreamingTextResponse when DEBUG_FIREWORKSAI_CHAT_COMPLETION is 1', async () => { - // Arrange - const mockProdStream = new ReadableStream() as any; // 模拟的 prod 流 - const mockDebugStream = new ReadableStream({ - start(controller) { - controller.enqueue('Debug stream content'); - controller.close(); - }, - }) as any; - mockDebugStream.toReadableStream = () => mockDebugStream; // 添加 toReadableStream 方法 - - // 模拟 chat.completions.create 返回值,包括模拟的 tee 方法 - (instance['client'].chat.completions.create as Mock).mockResolvedValue({ - tee: () => [mockProdStream, { toReadableStream: () => mockDebugStream }], - }); - - // 保存原始环境变量值 - const originalDebugValue = process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION; - - // 模拟环境变量 - process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION = '1'; - vi.spyOn(debugStreamModule, 'debugStream').mockImplementation(() => Promise.resolve()); - - // 执行测试 - // 运行你的测试函数,确保它会在条件满足时调用 debugStream - // 假设的测试函数调用,你可能需要根据实际情况调整 - await instance.chat({ - messages: [{ content: 'Hello', role: 'user' }], - model: 'accounts/fireworks/models/firefunction-v2', - stream: true, - temperature: 0, - }); - - // 验证 debugStream 被调用 - expect(debugStreamModule.debugStream).toHaveBeenCalled(); - - // 恢复原始环境变量值 - process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION = originalDebugValue; - }); - }); }); }); diff --git a/src/libs/agent-runtime/fireworksai/index.ts b/src/libs/agent-runtime/fireworksai/index.ts index 3f3fe872d8fb..dbca228437c9 100644 --- a/src/libs/agent-runtime/fireworksai/index.ts +++ b/src/libs/agent-runtime/fireworksai/index.ts @@ -1,8 +1,18 @@ +import OpenAI from 'openai'; + import { ModelProvider } from '../types'; import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory'; export const LobeFireworksAI = LobeOpenAICompatibleFactory({ baseURL: 'https://api.fireworks.ai/inference/v1', + chatCompletion: { + handlePayload: (payload) => { + return { + ...payload, + stream: false, + } as unknown as OpenAI.ChatCompletionCreateParamsStreaming; + }, + }, debug: { chatCompletion: () => process.env.DEBUG_FIREWORKSAI_CHAT_COMPLETION === '1', },