diff --git a/src/app/api/config/__snapshots__/route.test.ts.snap b/src/app/api/config/__snapshots__/route.test.ts.snap new file mode 100644 index 000000000000..72ba47643538 --- /dev/null +++ b/src/app/api/config/__snapshots__/route.test.ts.snap @@ -0,0 +1,214 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`GET /api/config > Model Provider env > CUSTOM_MODELS > custom deletion, addition, and renaming of models 1`] = ` +[ + { + "displayName": "llama", + "enabled": true, + "functionCall": true, + "id": "llama", + "vision": true, + }, + { + "displayName": "claude-2", + "enabled": true, + "functionCall": true, + "id": "claude-2", + "vision": true, + }, + { + "displayName": "gpt-4-32k", + "enabled": true, + "functionCall": true, + "id": "gpt-4-0125-preview", + "tokens": 128000, + }, +] +`; + +exports[`GET /api/config > Model Provider env > OPENAI_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = ` +[ + { + "displayName": "llama", + "enabled": true, + "functionCall": true, + "id": "llama", + "vision": true, + }, + { + "displayName": "claude-2", + "enabled": true, + "functionCall": true, + "id": "claude-2", + "vision": true, + }, + { + "displayName": "gpt-4-32k", + "enabled": true, + "functionCall": true, + "id": "gpt-4-0125-preview", + "tokens": 128000, + }, +] +`; + +exports[`GET /api/config > Model Provider env > OPENAI_MODEL_LIST > should work correct with gpt-4 1`] = ` +[ + { + "displayName": "GPT-3.5 Turbo (1106)", + "enabled": true, + "functionCall": true, + "id": "gpt-3.5-turbo-1106", + "tokens": 16385, + }, + { + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务", + "displayName": "GPT-3.5 Turbo", + "enabled": true, + "functionCall": true, + "id": "gpt-3.5-turbo", + "tokens": 16385, + }, + { + "displayName": "GPT-3.5 Turbo 16K", + "enabled": true, + "id": "gpt-3.5-turbo-16k", + "tokens": 16385, + }, + { + "displayName": "GPT-4", + "enabled": true, + "functionCall": true, + "id": "gpt-4", + "tokens": 8192, + }, + { + "displayName": "GPT-4 32K", + "enabled": true, + "functionCall": true, + "id": "gpt-4-32k", + "tokens": 32768, + }, + { + "displayName": "GPT-4 Turbo Preview (1106)", + "enabled": true, + "functionCall": true, + "id": "gpt-4-1106-preview", + "tokens": 128000, + }, + { + "description": "GPT-4 视觉预览版,支持视觉任务", + "displayName": "GPT-4 Turbo Vision Preview", + "enabled": true, + "id": "gpt-4-vision-preview", + "tokens": 128000, + "vision": true, + }, +] +`; + +exports[`GET /api/config > Model Provider env > OPENROUTER_MODEL_LIST > custom deletion, addition, and renaming of models 1`] = ` +[ + { + "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务", + "displayName": "GPT-3.5 Turbo", + "enabled": true, + "functionCall": true, + "id": "gpt-3.5-turbo", + "tokens": 16385, + }, + { + "displayName": "GPT-3.5 Turbo (0125)", + "functionCall": true, + "id": "gpt-3.5-turbo-0125", + "tokens": 16385, + }, + { + "displayName": "GPT-3.5 Turbo (1106)", + "functionCall": true, + "id": "gpt-3.5-turbo-1106", + "tokens": 16385, + }, + { + "displayName": "GPT-3.5 Turbo Instruct", + "id": "gpt-3.5-turbo-instruct", + "tokens": 4096, + }, + { + "displayName": "GPT-3.5 Turbo 16K", + "id": "gpt-3.5-turbo-16k", + "tokens": 16385, + }, + { + "displayName": "GPT-3.5 Turbo (0613)", + "id": "gpt-3.5-turbo-0613", + "legacy": true, + "tokens": 4096, + }, + { + "displayName": "GPT-3.5 Turbo 16K (0613)", + "id": "gpt-3.5-turbo-16k-0613", + "legacy": true, + "tokens": 4096, + }, + { + "displayName": "GPT-4 Turbo Preview", + "enabled": true, + "functionCall": true, + "id": "gpt-4-turbo-preview", + "tokens": 128000, + }, + { + "displayName": "GPT-4 Turbo Preview (0125)", + "functionCall": true, + "id": "gpt-4-0125-preview", + "tokens": 128000, + }, + { + "description": "GPT-4 视觉预览版,支持视觉任务", + "displayName": "GPT-4 Turbo Vision Preview", + "enabled": true, + "id": "gpt-4-vision-preview", + "tokens": 128000, + "vision": true, + }, + { + "displayName": "GPT-4 Turbo Preview (1106)", + "functionCall": true, + "id": "gpt-4-1106-preview", + "tokens": 128000, + }, + { + "displayName": "GPT-4", + "functionCall": true, + "id": "gpt-4", + "tokens": 8192, + }, + { + "displayName": "GPT-4 (0613)", + "functionCall": true, + "id": "gpt-4-0613", + "tokens": 8192, + }, + { + "displayName": "GPT-4 32K", + "functionCall": true, + "id": "gpt-4-32k", + "tokens": 32768, + }, + { + "displayName": "GPT-4 32K (0613)", + "functionCall": true, + "id": "gpt-4-32k-0613", + "tokens": 32768, + }, + { + "displayName": "GPT-4 ALL", + "files": true, + "functionCall": true, + "id": "gpt-4-all", + "tokens": 32768, + "vision": true, + }, +] +`; diff --git a/src/app/api/config/route.test.ts b/src/app/api/config/route.test.ts new file mode 100644 index 000000000000..0440ab19f691 --- /dev/null +++ b/src/app/api/config/route.test.ts @@ -0,0 +1,172 @@ +import { beforeEach, describe, expect, it, vi } from 'vitest'; + +import { OllamaProvider, OpenRouterProvider, TogetherAIProvider } from '@/config/modelProviders'; +import { getServerConfig } from '@/config/server'; +import { GlobalServerConfig } from '@/types/settings'; + +import { GET } from './route'; + +beforeEach(() => { + vi.resetAllMocks(); +}); + +describe('GET /api/config', () => { + describe('Model Provider env', () => { + describe('OPENAI_MODEL_LIST', () => { + it('custom deletion, addition, and renaming of models', async () => { + process.env.OPENAI_MODEL_LIST = + '-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k'; + + const response = await GET(); + + // Assert + expect(response).toBeInstanceOf(Response); + expect(response.status).toBe(200); + + const jsonResponse: GlobalServerConfig = await response.json(); + + const result = jsonResponse.languageModel?.openai?.serverModelCards; + + expect(result).toMatchSnapshot(); + process.env.OPENAI_MODEL_LIST = ''; + }); + + it('should work correct with gpt-4', async () => { + process.env.OPENAI_MODEL_LIST = + '-all,+gpt-3.5-turbo-1106,+gpt-3.5-turbo,+gpt-3.5-turbo-16k,+gpt-4,+gpt-4-32k,+gpt-4-1106-preview,+gpt-4-vision-preview'; + + const response = await GET(); + const jsonResponse: GlobalServerConfig = await response.json(); + + const result = jsonResponse.languageModel?.openai?.serverModelCards; + + expect(result).toMatchSnapshot(); + + process.env.OPENAI_MODEL_LIST = ''; + }); + + it('duplicate naming model', async () => { + process.env.OPENAI_MODEL_LIST = + 'gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k'; + + const response = await GET(); + const jsonResponse: GlobalServerConfig = await response.json(); + + const result = jsonResponse.languageModel?.openai?.serverModelCards; + + expect(result?.find((s) => s.id === 'gpt-4-0125-preview')?.displayName).toEqual( + 'gpt-4-32k', + ); + + process.env.OPENAI_MODEL_LIST = ''; + }); + + it('should delete model', async () => { + process.env.OPENAI_MODEL_LIST = '-gpt-4'; + + const res = await GET(); + const data: GlobalServerConfig = await res.json(); + + const result = data.languageModel?.openai?.serverModelCards; + + expect(result?.find((r) => r.id === 'gpt-4')).toBeUndefined(); + + process.env.OPENAI_MODEL_LIST = ''; + }); + + it('show the hidden model', async () => { + process.env.OPENAI_MODEL_LIST = '+gpt-4-1106-preview'; + + const res = await GET(); + const data: GlobalServerConfig = await res.json(); + + const result = data.languageModel?.openai?.serverModelCards; + + expect(result?.find((o) => o.id === 'gpt-4-1106-preview')).toEqual({ + displayName: 'GPT-4 Turbo Preview (1106)', + functionCall: true, + enabled: true, + id: 'gpt-4-1106-preview', + tokens: 128000, + }); + + process.env.OPENAI_MODEL_LIST = ''; + }); + + it('only add the model', async () => { + process.env.OPENAI_MODEL_LIST = 'model1,model2,model3,model4'; + + const res = await GET(); + const data: GlobalServerConfig = await res.json(); + + const result = data.languageModel?.openai?.serverModelCards; + + expect(result).toContainEqual({ + displayName: 'model1', + functionCall: true, + id: 'model1', + enabled: true, + vision: true, + }); + expect(result).toContainEqual({ + displayName: 'model2', + functionCall: true, + enabled: true, + id: 'model2', + vision: true, + }); + expect(result).toContainEqual({ + displayName: 'model3', + enabled: true, + functionCall: true, + id: 'model3', + vision: true, + }); + expect(result).toContainEqual({ + displayName: 'model4', + functionCall: true, + enabled: true, + id: 'model4', + vision: true, + }); + + process.env.OPENAI_MODEL_LIST = ''; + }); + }); + + describe('CUSTOM_MODELS', () => { + it('custom deletion, addition, and renaming of models', async () => { + process.env.CUSTOM_MODELS = + '-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k'; + + const response = await GET(); + + // Assert + expect(response).toBeInstanceOf(Response); + expect(response.status).toBe(200); + + const jsonResponse: GlobalServerConfig = await response.json(); + + const result = jsonResponse.languageModel?.openai?.serverModelCards; + + expect(result).toMatchSnapshot(); + }); + }); + + describe('OPENROUTER_MODEL_LIST', () => { + it('custom deletion, addition, and renaming of models', async () => { + process.env.OPENROUTER_MODEL_LIST = + '-all,+google/gemma-7b-it,+mistralai/mistral-7b-instruct=Mistral-7B-Instruct'; + + const res = await GET(); + const data: GlobalServerConfig = await res.json(); + + const result = data.languageModel?.openai?.serverModelCards; + + expect(result).toMatchSnapshot(); + + process.env.OPENROUTER_MODEL_LIST = ''; + }); + }); + }); +}); diff --git a/src/app/api/config/route.ts b/src/app/api/config/route.ts index 861f8f867745..bea31e50ab04 100644 --- a/src/app/api/config/route.ts +++ b/src/app/api/config/route.ts @@ -1,5 +1,7 @@ +import { OllamaProvider, OpenRouterProvider, TogetherAIProvider } from '@/config/modelProviders'; import { getServerConfig } from '@/config/server'; import { GlobalServerConfig } from '@/types/settings'; +import { transformToChatModelCards } from '@/utils/parseModels'; import { parseAgentConfig } from './parseDefaultAgent'; @@ -11,27 +13,32 @@ export const runtime = 'edge'; export const GET = async () => { const { ENABLE_LANGFUSE, - CUSTOM_MODELS, + ENABLE_OAUTH_SSO, + + DEFAULT_AGENT_CONFIG, + OPENAI_MODEL_LIST, + ENABLED_MOONSHOT, ENABLED_ZHIPU, ENABLED_AWS_BEDROCK, ENABLED_GOOGLE, ENABLED_GROQ, - ENABLE_OAUTH_SSO, - ENABLE_OLLAMA, ENABLED_PERPLEXITY, ENABLED_ANTHROPIC, ENABLED_MISTRAL, + + ENABLE_OLLAMA, + OLLAMA_MODEL_LIST, + ENABLED_OPENROUTER, + OPENROUTER_MODEL_LIST, + ENABLED_ZEROONE, ENABLED_TOGETHERAI, - DEFAULT_AGENT_CONFIG, - OLLAMA_CUSTOM_MODELS, - OPENROUTER_CUSTOM_MODELS, + TOGETHERAI_MODEL_LIST, } = getServerConfig(); const config: GlobalServerConfig = { - customModelName: CUSTOM_MODELS, defaultAgent: { config: parseAgentConfig(DEFAULT_AGENT_CONFIG), }, @@ -44,10 +51,31 @@ export const GET = async () => { groq: { enabled: ENABLED_GROQ }, mistral: { enabled: ENABLED_MISTRAL }, moonshot: { enabled: ENABLED_MOONSHOT }, - ollama: { customModelName: OLLAMA_CUSTOM_MODELS, enabled: ENABLE_OLLAMA }, - openrouter: { customModelName: OPENROUTER_CUSTOM_MODELS, enabled: ENABLED_OPENROUTER }, + + ollama: { + enabled: ENABLE_OLLAMA, + serverModelCards: transformToChatModelCards(OLLAMA_MODEL_LIST, OllamaProvider.chatModels), + }, + openai: { + serverModelCards: transformToChatModelCards(OPENAI_MODEL_LIST), + }, + openrouter: { + enabled: ENABLED_OPENROUTER, + serverModelCards: transformToChatModelCards( + OPENROUTER_MODEL_LIST, + OpenRouterProvider.chatModels, + ), + }, perplexity: { enabled: ENABLED_PERPLEXITY }, - togetherai: { enabled: ENABLED_TOGETHERAI }, + + togetherai: { + enabled: ENABLED_TOGETHERAI, + serverModelCards: transformToChatModelCards( + TOGETHERAI_MODEL_LIST, + TogetherAIProvider.chatModels, + ), + }, + zeroone: { enabled: ENABLED_ZEROONE }, zhipu: { enabled: ENABLED_ZHIPU }, }, diff --git a/src/config/server/provider.ts b/src/config/server/provider.ts index c51d7d2adbb4..5ac45109faa1 100644 --- a/src/config/server/provider.ts +++ b/src/config/server/provider.ts @@ -4,13 +4,14 @@ declare global { // eslint-disable-next-line @typescript-eslint/no-namespace namespace NodeJS { interface ProcessEnv { - CUSTOM_MODELS?: string; - API_KEY_SELECT_MODE?: string; // OpenAI Provider + ENABLED_OPENAI?: string; OPENAI_API_KEY?: string; OPENAI_PROXY_URL?: string; + OPENAI_MODEL_LIST?: string; + OPENAI_ENABLED_MODELS?: string; OPENAI_FUNCTION_REGIONS?: string; // Azure OpenAI Provider @@ -20,38 +21,49 @@ declare global { USE_AZURE_OPENAI?: string; // ZhiPu Provider + ENABLED_ZHIPU?: string; ZHIPU_API_KEY?: string; ZHIPU_PROXY_URL?: string; // Google Provider + ENABLED_GOOGLE?: string; GOOGLE_API_KEY?: string; // Moonshot Provider + ENABLED_MOONSHOT?: string; MOONSHOT_API_KEY?: string; MOONSHOT_PROXY_URL?: string; // Perplexity Provider + ENABLED_PERPLEXITY?: string; PERPLEXITY_API_KEY?: string; // Anthropic Provider + ENABLED_ANTHROPIC?: string; ANTHROPIC_API_KEY?: string; ANTHROPIC_PROXY_URL?: string; // Mistral Provider + ENABLED_MISTRAL?: string; MISTRAL_API_KEY?: string; // Groq Provider + ENABLED_GROQ?: string; GROQ_API_KEY?: string; // OpenRouter Provider + ENABLED_OPENROUTER?: string; OPENROUTER_API_KEY?: string; - OPENROUTER_CUSTOM_MODELS?: string; + OPENROUTER_MODEL_LIST?: string; // ZeroOne Provider + ENABLED_ZEROONE?: string; ZEROONE_API_KEY?: string; // TogetherAI Provider + ENABLED_TOGETHERAI?: string; TOGETHERAI_API_KEY?: string; + TOGETHERAI_MODEL_LIST?: string; // AWS Credentials AWS_REGION?: string; @@ -60,6 +72,16 @@ declare global { // Ollama Provider; OLLAMA_PROXY_URL?: string; + + OLLAMA_MODEL_LIST?: string; + + /** + * @deprecated + */ + CUSTOM_MODELS?: string; + /** + * @deprecated + */ OLLAMA_CUSTOM_MODELS?: string; } } @@ -98,12 +120,11 @@ export const getProviderConfig = () => { } return { - CUSTOM_MODELS: process.env.CUSTOM_MODELS, - API_KEY_SELECT_MODE: process.env.API_KEY_SELECT_MODE, OPENAI_API_KEY: process.env.OPENAI_API_KEY, OPENAI_PROXY_URL: process.env.OPENAI_PROXY_URL, + OPENAI_MODEL_LIST: process.env.OPENAI_MODEL_LIST || process.env.CUSTOM_MODELS, OPENAI_FUNCTION_REGIONS: regions, ENABLED_ZHIPU: !!ZHIPU_API_KEY, @@ -124,10 +145,12 @@ export const getProviderConfig = () => { ENABLED_OPENROUTER: !!OPENROUTER_API_KEY, OPENROUTER_API_KEY, - OPENROUTER_CUSTOM_MODELS: process.env.OPENROUTER_CUSTOM_MODELS, + OPENROUTER_MODEL_LIST: + process.env.OPENROUTER_MODEL_LIST || process.env.OPENROUTER_CUSTOM_MODELS, ENABLED_TOGETHERAI: !!TOGETHERAI_API_KEY, TOGETHERAI_API_KEY, + TOGETHERAI_MODEL_LIST: process.env.TOGETHERAI_MODEL_LIST, ENABLED_MOONSHOT: !!MOONSHOT_API_KEY, MOONSHOT_API_KEY, @@ -151,6 +174,6 @@ export const getProviderConfig = () => { ENABLE_OLLAMA: !!process.env.OLLAMA_PROXY_URL, OLLAMA_PROXY_URL: process.env.OLLAMA_PROXY_URL || '', - OLLAMA_CUSTOM_MODELS: process.env.OLLAMA_CUSTOM_MODELS, + OLLAMA_MODEL_LIST: process.env.OLLAMA_MODEL_LIST || process.env.OLLAMA_CUSTOM_MODELS, }; }; diff --git a/src/const/settings/index.ts b/src/const/settings/index.ts index c2f86a98836a..1f9410afd2ca 100644 --- a/src/const/settings/index.ts +++ b/src/const/settings/index.ts @@ -77,7 +77,7 @@ export const DEFAULT_LLM_CONFIG: GlobalLLMConfig = { bedrock: { accessKeyId: '', enabled: false, - models: filterEnabledModels(BedrockProvider), + enabledModels: filterEnabledModels(BedrockProvider), region: 'us-east-1', secretAccessKey: '', }, @@ -124,7 +124,7 @@ export const DEFAULT_LLM_CONFIG: GlobalLLMConfig = { togetherai: { apiKey: '', enabled: false, - models: filterEnabledModels(TogetherAIProvider), + enabledModels: filterEnabledModels(TogetherAIProvider), }, zeroone: { apiKey: '', diff --git a/src/migrations/FromV3ToV4/index.ts b/src/migrations/FromV3ToV4/index.ts index 7df62a502103..6ca902501b37 100644 --- a/src/migrations/FromV3ToV4/index.ts +++ b/src/migrations/FromV3ToV4/index.ts @@ -15,12 +15,12 @@ export class MigrationV3ToV4 implements Migration { ...data, state: { ...data.state, - settings: !settings ? undefined : this.migrateSettings(settings), + settings: !settings ? undefined : MigrationV3ToV4.migrateSettings(settings), }, }; } - migrateSettings = (settings: V3Settings): V4Settings => { + static migrateSettings = (settings: V3Settings): V4Settings => { const { languageModel } = settings; const { openAI, togetherai, openrouter, ollama, ...res } = languageModel; const { openai, azure } = this.migrateOpenAI(openAI); @@ -38,7 +38,7 @@ export class MigrationV3ToV4 implements Migration { }; }; - migrateOpenAI = ( + static migrateOpenAI = ( openai: V3OpenAIConfig, ): { azure: V4ProviderConfig; openai: V4ProviderConfig } => { if (openai.useAzure) { @@ -76,7 +76,7 @@ export class MigrationV3ToV4 implements Migration { }; }; - migrateProvider = (provider: V3LegacyConfig): V4ProviderConfig => { + static migrateProvider = (provider: V3LegacyConfig): V4ProviderConfig => { const customModelCards = transformToChatModelCards(provider.customModelName, []); return { apiKey: provider.apiKey, diff --git a/src/migrations/index.ts b/src/migrations/index.ts index cf23b07adbd4..a7c9304bbe8b 100644 --- a/src/migrations/index.ts +++ b/src/migrations/index.ts @@ -4,9 +4,10 @@ import { ConfigStateAll } from '@/types/exportConfig'; import { MigrationV0ToV1 } from './FromV0ToV1'; import { MigrationV1ToV2 } from './FromV1ToV2'; +import { MigrationV3ToV4 } from './FromV3ToV4'; // Current latest version -export const CURRENT_CONFIG_VERSION = 3; +export const CURRENT_CONFIG_VERSION = 4; // Version migrations module const ConfigMigrations = [ @@ -16,7 +17,7 @@ const ConfigMigrations = [ * - from `openAI` to `openai`, `azure` * - from customModelName to `enabledModels` and `customModelCards` */ - // MigrationV3ToV4, + MigrationV3ToV4, /** * 2024.01.22 * from `group = pinned` to `pinned:true` diff --git a/src/store/global/slices/settings/selectors/__snapshots__/modelProvider.test.ts.snap b/src/store/global/slices/settings/selectors/__snapshots__/modelProvider.test.ts.snap deleted file mode 100644 index 0e017d5aefac..000000000000 --- a/src/store/global/slices/settings/selectors/__snapshots__/modelProvider.test.ts.snap +++ /dev/null @@ -1,230 +0,0 @@ -// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html - -exports[`modelProviderSelectors > CUSTOM_MODELS > custom deletion, addition, and renaming of models 1`] = ` -[ - { - "chatModels": [ - { - "displayName": "llama", - "enabled": true, - "functionCall": true, - "id": "llama", - "vision": true, - }, - { - "displayName": "claude-2", - "enabled": true, - "functionCall": true, - "id": "claude-2", - "vision": true, - }, - { - "displayName": "gpt-4-32k", - "enabled": true, - "functionCall": true, - "id": "gpt-4-0125-preview", - "tokens": 128000, - }, - ], - "enabled": true, - "id": "openai", - }, -] -`; - -exports[`modelProviderSelectors > CUSTOM_MODELS > should work correct with gpt-4 1`] = ` -[ - { - "displayName": "GPT-3.5 Turbo (1106)", - "enabled": true, - "functionCall": true, - "id": "gpt-3.5-turbo-1106", - "tokens": 16385, - }, - { - "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务", - "displayName": "GPT-3.5 Turbo", - "enabled": true, - "functionCall": true, - "id": "gpt-3.5-turbo", - "tokens": 16385, - }, - { - "displayName": "GPT-3.5 Turbo 16K", - "enabled": true, - "id": "gpt-3.5-turbo-16k", - "tokens": 16385, - }, - { - "displayName": "GPT-4", - "enabled": true, - "functionCall": true, - "id": "gpt-4", - "tokens": 8192, - }, - { - "displayName": "GPT-4 32K", - "enabled": true, - "functionCall": true, - "id": "gpt-4-32k", - "tokens": 32768, - }, - { - "displayName": "GPT-4 Turbo Preview (1106)", - "enabled": true, - "functionCall": true, - "id": "gpt-4-1106-preview", - "tokens": 128000, - }, - { - "description": "GPT-4 视觉预览版,支持视觉任务", - "displayName": "GPT-4 Turbo Vision Preview", - "enabled": true, - "id": "gpt-4-vision-preview", - "tokens": 128000, - "vision": true, - }, -] -`; - -exports[`modelProviderSelectors > OPENROUTER_CUSTOM_MODELS > custom deletion, addition, and renaming of models 1`] = ` -[ - { - "chatModels": [ - { - "description": "GPT 3.5 Turbo,适用于各种文本生成和理解任务", - "displayName": "GPT-3.5 Turbo", - "functionCall": true, - "id": "gpt-3.5-turbo", - "tokens": 16385, - }, - { - "displayName": "GPT-3.5 Turbo (0125)", - "functionCall": true, - "hidden": true, - "id": "gpt-3.5-turbo-0125", - "tokens": 16385, - }, - { - "displayName": "GPT-3.5 Turbo (1106)", - "functionCall": true, - "hidden": true, - "id": "gpt-3.5-turbo-1106", - "tokens": 16385, - }, - { - "displayName": "GPT-3.5 Turbo Instruct", - "hidden": true, - "id": "gpt-3.5-turbo-instruct", - "tokens": 4096, - }, - { - "displayName": "GPT-3.5 Turbo 16K", - "hidden": true, - "id": "gpt-3.5-turbo-16k", - "tokens": 16385, - }, - { - "displayName": "GPT-3.5 Turbo (0613)", - "hidden": true, - "id": "gpt-3.5-turbo-0613", - "legacy": true, - "tokens": 4096, - }, - { - "displayName": "GPT-3.5 Turbo 16K (0613)", - "hidden": true, - "id": "gpt-3.5-turbo-16k-0613", - "legacy": true, - "tokens": 4096, - }, - { - "displayName": "GPT-4 Turbo Preview", - "functionCall": true, - "id": "gpt-4-turbo-preview", - "tokens": 128000, - }, - { - "displayName": "GPT-4 Turbo Preview (0125)", - "functionCall": true, - "hidden": true, - "id": "gpt-4-0125-preview", - "tokens": 128000, - }, - { - "description": "GPT-4 视觉预览版,支持视觉任务", - "displayName": "GPT-4 Turbo Vision Preview", - "id": "gpt-4-vision-preview", - "tokens": 128000, - "vision": true, - }, - { - "displayName": "GPT-4 Turbo Preview (1106)", - "functionCall": true, - "hidden": true, - "id": "gpt-4-1106-preview", - "tokens": 128000, - }, - { - "displayName": "GPT-4", - "functionCall": true, - "hidden": true, - "id": "gpt-4", - "tokens": 8192, - }, - { - "displayName": "GPT-4 (0613)", - "functionCall": true, - "hidden": true, - "id": "gpt-4-0613", - "tokens": 8192, - }, - { - "displayName": "GPT-4 32K", - "functionCall": true, - "hidden": true, - "id": "gpt-4-32k", - "tokens": 32768, - }, - { - "displayName": "GPT-4 32K (0613)", - "functionCall": true, - "hidden": true, - "id": "gpt-4-32k-0613", - "tokens": 32768, - }, - { - "displayName": "GPT-4 ALL", - "files": true, - "functionCall": true, - "hidden": true, - "id": "gpt-4-all", - "tokens": 32768, - "vision": true, - }, - ], - "enabled": true, - "id": "openai", - }, - { - "chatModels": [ - { - "displayName": "google/gemma-7b-it", - "functionCall": true, - "id": "google/gemma-7b-it", - "isCustom": true, - "vision": true, - }, - { - "displayName": "Mistral-7B-Instruct", - "functionCall": true, - "id": "mistralai/mistral-7b-instruct", - "isCustom": true, - "vision": true, - }, - ], - "enabled": true, - "id": "openrouter", - }, -] -`; diff --git a/src/store/global/slices/settings/selectors/modelProvider.test.ts b/src/store/global/slices/settings/selectors/modelProvider.test.ts index b058f25f2204..feb7a2ce7a36 100644 --- a/src/store/global/slices/settings/selectors/modelProvider.test.ts +++ b/src/store/global/slices/settings/selectors/modelProvider.test.ts @@ -39,141 +39,6 @@ describe('modelProviderSelectors', () => { expect(result).toBeUndefined(); }); }); - describe('CUSTOM_MODELS', () => { - it('custom deletion, addition, and renaming of models', () => { - const s = merge(initialSettingsState, { - serverConfig: { - customModelName: - '-all,+llama,+claude-2,-gpt-3.5-turbo,gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k', - }, - }) as unknown as GlobalStore; - - const result = modelProviderSelectors.providerModelList(s).filter((r) => r.enabled); - - expect(result).toMatchSnapshot(); - }); - - it('should work correct with gpt-4', () => { - const s = merge(initialSettingsState, { - serverConfig: { - customModelName: - '-all,+gpt-3.5-turbo-1106,+gpt-3.5-turbo,+gpt-3.5-turbo-16k,+gpt-4,+gpt-4-32k,+gpt-4-1106-preview,+gpt-4-vision-preview', - }, - }) as unknown as GlobalStore; - - const result = modelProviderSelectors.providerModelList(s).filter((r) => r.enabled); - - expect(result[0].chatModels).toMatchSnapshot(); - }); - it('duplicate naming model', () => { - const s = merge(initialSettingsState, { - serverConfig: { - customModelName: 'gpt-4-0125-preview=gpt-4-turbo,gpt-4-0125-preview=gpt-4-32k', - }, - }) as unknown as GlobalStore; - - const result = modelProviderSelectors.providerModelList(s).filter((r) => r.enabled); - - expect(result[0].chatModels.find((s) => s.id === 'gpt-4-0125-preview')?.displayName).toEqual( - 'gpt-4-32k', - ); - }); - - it('should delete model', () => { - const s = merge(initialSettingsState, { - serverConfig: { customModelName: '-gpt-4' }, - }) as unknown as GlobalStore; - - const result = modelProviderSelectors.providerModelList(s).filter((r) => r.enabled); - - expect(result.find((r) => r.id === 'gpt-4')).toBeUndefined(); - }); - - it('show the hidden model', () => { - const s = merge(initialSettingsState, { - serverConfig: { - customModelName: '+gpt-4-1106-preview', - }, - }) as unknown as GlobalStore; - - const result = modelProviderSelectors.providerModelList(s).filter((r) => r.enabled); - - expect(result[0].chatModels.find((o) => o.id === 'gpt-4-1106-preview')).toEqual({ - displayName: 'GPT-4 Turbo Preview (1106)', - functionCall: true, - enabled: true, - id: 'gpt-4-1106-preview', - tokens: 128000, - }); - }); - - it('only add the model', () => { - const s = merge(initialSettingsState, { - serverConfig: { - customModelName: 'model1,model2,model3,model4', - }, - }) as unknown as GlobalStore; - - const result = modelProviderSelectors.providerModelList(s).filter((r) => r.enabled); - - expect(result[0].chatModels).toContainEqual({ - displayName: 'model1', - functionCall: true, - id: 'model1', - enabled: true, - vision: true, - }); - expect(result[0].chatModels).toContainEqual({ - displayName: 'model2', - functionCall: true, - enabled: true, - id: 'model2', - vision: true, - }); - expect(result[0].chatModels).toContainEqual({ - displayName: 'model3', - enabled: true, - functionCall: true, - id: 'model3', - vision: true, - }); - expect(result[0].chatModels).toContainEqual({ - displayName: 'model4', - functionCall: true, - enabled: true, - id: 'model4', - vision: true, - }); - }); - }); - - // TODO: need to be fixed - describe('OPENROUTER_CUSTOM_MODELS', () => { - it.skip('custom deletion, addition, and renaming of models', () => { - const s = merge(initialSettingsState, { - settings: { - languageModel: { - openrouter: { - enabled: true, - }, - }, - }, - serverConfig: { - languageModel: { - openrouter: { - apiKey: 'test-openrouter-api-key', - customModelName: - '-all,+google/gemma-7b-it,+mistralai/mistral-7b-instruct=Mistral-7B-Instruct', - }, - }, - }, - }) as unknown as GlobalStore; - - const result = modelProviderSelectors.providerModelList(s).filter((r) => r.enabled); - expect(result).toMatchSnapshot(); - }); - }); - describe('modelEnabledVision', () => { it('should return true if the model has vision ability', () => { const hasAbility = modelProviderSelectors.modelEnabledVision('gpt-4-vision-preview')( diff --git a/src/store/global/slices/settings/selectors/modelProvider.ts b/src/store/global/slices/settings/selectors/modelProvider.ts index 2d9ae34f5d77..a94e14cbef93 100644 --- a/src/store/global/slices/settings/selectors/modelProvider.ts +++ b/src/store/global/slices/settings/selectors/modelProvider.ts @@ -14,11 +14,10 @@ import { ZhiPuProvider, filterEnabledModels, } from '@/config/modelProviders'; -import { ModelProviderCard } from '@/types/llm'; -import { transformToChatModelCards } from '@/utils/parseModels'; +import { ChatModelCard, ModelProviderCard } from '@/types/llm'; +import { GeneralModelProviderConfig, GlobalLLMProviderKey } from '@/types/settings'; import { GlobalStore } from '../../../store'; -import { currentSettings } from './settings'; // const azureModelList = (s: GlobalStore): ModelProviderCard => { // const azure = azureConfig(s); @@ -28,38 +27,38 @@ import { currentSettings } from './settings'; // }; // }; +const serverProviderModelCards = + (provider: GlobalLLMProviderKey) => + (s: GlobalStore): ChatModelCard[] | undefined => { + const config = s.serverConfig.languageModel?.[provider] as + | GeneralModelProviderConfig + | undefined; + + if (!config) return; + + return config.serverModelCards; + }; + /** * define all the model list of providers */ const providerModelList = (s: GlobalStore): ModelProviderCard[] => { - const openaiChatModels = transformToChatModelCards(s.serverConfig.customModelName); - - const ollamaChatModels = transformToChatModelCards( - s.serverConfig.languageModel?.ollama?.customModelName, - OllamaProvider.chatModels, - ); - - const openrouterChatModels = transformToChatModelCards( - s.serverConfig.languageModel?.openrouter?.customModelName, - OpenRouterProvider.chatModels, - ); - - const togetheraiChatModels = transformToChatModelCards( - currentSettings(s).languageModel.togetherai.customModelName, - TogetherAIProvider.chatModels, - ); + const openaiChatModels = serverProviderModelCards('openai')(s); + const ollamaChatModels = serverProviderModelCards('ollama')(s); + const openrouterChatModels = serverProviderModelCards('openrouter')(s); + const togetheraiChatModels = serverProviderModelCards('openrouter')(s); return [ { ...OpenAIProvider, - chatModels: openaiChatModels, + chatModels: openaiChatModels ?? OpenAIProvider.chatModels, }, // { ...azureModelList(s), enabled: enableAzure(s) }, - { ...OllamaProvider, chatModels: ollamaChatModels }, + { ...OllamaProvider, chatModels: ollamaChatModels ?? OllamaProvider.chatModels }, AnthropicProvider, GoogleProvider, - { ...OpenRouterProvider, chatModels: openrouterChatModels }, - { ...TogetherAIProvider, chatModels: togetheraiChatModels }, + { ...OpenRouterProvider, chatModels: openrouterChatModels ?? OpenRouterProvider.chatModels }, + { ...TogetherAIProvider, chatModels: togetheraiChatModels ?? TogetherAIProvider.chatModels }, BedrockProvider, PerplexityProvider, MistralProvider, diff --git a/src/types/settings/index.ts b/src/types/settings/index.ts index aa56e5ab7127..57ce6a620853 100644 --- a/src/types/settings/index.ts +++ b/src/types/settings/index.ts @@ -21,7 +21,6 @@ export interface GlobalTool { } export interface GlobalServerConfig { - customModelName?: string; defaultAgent?: DeepPartial; enabledOAuthSSO?: boolean; languageModel?: DeepPartial; diff --git a/src/types/settings/modelProvider.ts b/src/types/settings/modelProvider.ts index c16421aa2548..fc92f81e7f5e 100644 --- a/src/types/settings/modelProvider.ts +++ b/src/types/settings/modelProvider.ts @@ -11,6 +11,11 @@ export interface GeneralModelProviderConfig { */ enabledModels?: string[] | null; endpoint?: string; + + /** + * the model cards defined in server config + */ + serverModelCards?: ChatModelCard[]; } export interface AzureOpenAIConfig {