From 5f50fde8e0f2378e9eccde76ce7408bcac66ca75 Mon Sep 17 00:00:00 2001 From: Brian Joseph Petro Date: Wed, 18 Dec 2024 21:59:44 -0500 Subject: [PATCH] Add new adapters for LM Studio and Groq; refactor existing adapter methods - Introduced SmartChatModelLmStudioAdapter and SmartChatModelGroqAdapter for new API integrations. - Updated SmartChatModelRequestAdapter and SmartChatModelResponseAdapter methods to handle message content more effectively. - Enhanced SmartChatModelCustomAdapter with custom protocol, hostname, port, and path getters. - Refactored message handling in SmartThread to improve tool usage logic based on message roles. --- smart-chat-model/adapters.js | 6 + smart-chat-model/adapters/_api.js | 12 +- smart-chat-model/adapters/_custom.js | 25 ++++ smart-chat-model/adapters/groq.js | 139 ++++++++++++++++++++++ smart-chat-model/adapters/lm_studio.js | 155 +++++++++++++++++++++++++ smart-chats/smart_thread.js | 9 +- 6 files changed, 339 insertions(+), 7 deletions(-) create mode 100644 smart-chat-model/adapters/groq.js create mode 100644 smart-chat-model/adapters/lm_studio.js diff --git a/smart-chat-model/adapters.js b/smart-chat-model/adapters.js index 2fe6c518..5ec2e667 100644 --- a/smart-chat-model/adapters.js +++ b/smart-chat-model/adapters.js @@ -5,6 +5,8 @@ import { SmartChatModelCohereAdapter } from './adapters/cohere.js'; import { SmartChatModelOpenRouterAdapter } from './adapters/open_router.js'; import { SmartChatModelCustomAdapter } from './adapters/_custom.js'; import { SmartChatModelOllamaAdapter } from './adapters/ollama.js'; +import { SmartChatModelLmStudioAdapter } from './adapters/lm_studio.js'; +import { SmartChatModelGroqAdapter } from './adapters/groq.js'; export { SmartChatModelAnthropicAdapter, SmartChatModelOpenaiAdapter, @@ -13,6 +15,8 @@ export { SmartChatModelOpenRouterAdapter, SmartChatModelCustomAdapter, SmartChatModelOllamaAdapter, + SmartChatModelLmStudioAdapter, + SmartChatModelGroqAdapter, SmartChatModelAnthropicAdapter as anthropic, SmartChatModelOpenaiAdapter as openai, SmartChatModelGeminiAdapter as gemini, @@ -20,4 +24,6 @@ export { SmartChatModelOpenRouterAdapter as open_router, SmartChatModelCustomAdapter as custom, SmartChatModelOllamaAdapter as ollama, + SmartChatModelLmStudioAdapter as lm_studio, + SmartChatModelGroqAdapter as groq, }; diff --git a/smart-chat-model/adapters/_api.js b/smart-chat-model/adapters/_api.js index 4dc521b6..47ef780c 100644 --- a/smart-chat-model/adapters/_api.js +++ b/smart-chat-model/adapters/_api.js @@ -473,7 +473,7 @@ export class SmartChatModelRequestAdapter { _transform_single_message_to_openai(message) { const transformed = { role: this._get_openai_role(message.role), - content: this._get_openai_content(message.content), + content: this._get_openai_content(message), }; if (message.name) transformed.name = message.name; @@ -501,9 +501,9 @@ export class SmartChatModelRequestAdapter { * @returns {string} The transformed content. * @private */ - _get_openai_content(content) { + _get_openai_content(message) { // Override in subclasses if needed - return content; + return message.content; } /** @@ -721,7 +721,7 @@ export class SmartChatModelResponseAdapter { _transform_message_to_openai(message={}) { const transformed = { role: this._get_openai_role(message.role), - content: this._get_openai_content(message.content), + content: this._get_openai_content(message), }; if (message.name) transformed.name = message.name; @@ -748,9 +748,9 @@ export class SmartChatModelResponseAdapter { * @returns {string} The transformed content. * @private */ - _get_openai_content(content) { + _get_openai_content(message) { // Override in subclasses if needed - return content; + return message.content; } /** diff --git a/smart-chat-model/adapters/_custom.js b/smart-chat-model/adapters/_custom.js index eebf1cdd..661758ff 100644 --- a/smart-chat-model/adapters/_custom.js +++ b/smart-chat-model/adapters/_custom.js @@ -11,6 +11,31 @@ export class SmartChatModelCustomAdapter extends SmartChatModelApiAdapter { req_adapter = SmartChatModelCustomRequestAdapter; + get custom_protocol() { + return this.adapter_config.protocol || 'http'; + } + get custom_hostname(){ + return this.adapter_config.hostname || 'localhost'; + } + get custom_port(){ + return this.adapter_config.port ? `:${this.adapter_config.port}` : ''; + } + get custom_path(){ + let path = this.adapter_config.path || ''; + if(path && !path.startsWith('/')) path = `/${path}`; + return path; + } + + get endpoint() { + return [ + this.custom_protocol, + '://', + this.custom_hostname, + this.custom_port, + this.custom_path + ].join(''); + } + get settings_config() { return { // LOCAL PLATFORM SETTINGS diff --git a/smart-chat-model/adapters/groq.js b/smart-chat-model/adapters/groq.js new file mode 100644 index 00000000..1a8bf2fb --- /dev/null +++ b/smart-chat-model/adapters/groq.js @@ -0,0 +1,139 @@ +import { SmartChatModelApiAdapter, SmartChatModelRequestAdapter, SmartChatModelResponseAdapter } from './_api.js'; + +/** + * Adapter for Groq API. + * This adapter assumes the Groq endpoint provides a format similar to OpenAI. + * The main difference from openai.js: When processing assistant messages with array or null content, we merge into a single string. + */ +export class SmartChatModelGroqAdapter extends SmartChatModelApiAdapter { + static defaults = { + description: "Groq", + type: "API", + endpoint: "https://api.groq.com/openai/v1/chat/completions", + streaming: true, + adapter: "Groq", + models_endpoint: "https://api.groq.com/openai/v1/models", + default_model: "llama3-8b-8192", + signup_url: "https://groq.com", + can_use_tools: true, + }; + + /** + * Request adapter class + * @returns {typeof SmartChatModelGroqRequestAdapter} + */ + get req_adapter() { return SmartChatModelGroqRequestAdapter; } + + /** + * Response adapter class + * @returns {typeof SmartChatModelGroqResponseAdapter} + */ + get res_adapter() { return SmartChatModelGroqResponseAdapter; } + + /** + * Retrieve the list of models from Groq's API. + * @returns {Promise} A dictionary of models keyed by their id + */ + async get_models(refresh = false) { + if (!refresh && this.adapter_config?.models && Object.keys(this.adapter_config.models).length > 0) { + return this.adapter_config.models; + } + + const request_params = { + url: this.models_endpoint, + method: 'GET', + headers: { + 'Authorization': `Bearer ${this.api_key}` + } + }; + + try { + const resp = await this.http_adapter.request(request_params); + const data = await resp.json(); + const model_data = this.parse_model_data(data); + this.adapter_settings.models = model_data; + this.model.re_render_settings(); + return model_data; + } catch (error) { + console.error('Failed to fetch Groq model data:', error); + return {"_": {id: "Failed to fetch models from Groq"}}; + } + } + + /** + * Parse model data from Groq API format to a dictionary keyed by model ID. + * The API returns a list of model objects like: + * { + * "object": "list", + * "data": [ { "id": "...", "object": "model", ... }, ... ] + * } + * + * We'll convert each model to: + * { + * model_name: model.id, + * id: model.id, + * max_input_tokens: model.context_window, + * description: `Owned by: ${model.owned_by}, context: ${model.context_window}`, + * multimodal: Check if model name or description suggests multimodality + * } + */ + parse_model_data(model_data) { + if (model_data.object !== 'list' || !Array.isArray(model_data.data)) { + return {"_": { id: "No models found." }}; + } + + const parsed = {}; + for (const m of model_data.data) { + parsed[m.id] = { + model_name: m.id, + id: m.id, + max_input_tokens: m.context_window || 8192, + description: `Owned by: ${m.owned_by}, context: ${m.context_window}`, + // A basic heuristic for multimodal: if 'vision' or 'tool' is in model id + // Adjust as needed based on known capabilities + multimodal: m.id.includes('vision'), + }; + } + return parsed; + } + + /** + * Validate configuration for Groq + * @returns {Object} { valid: boolean, message: string } + */ + validate_config() { + if(!this.adapter_config.model_key) return { valid: false, message: "No model selected." }; + if (!this.api_key) { + return { valid: false, message: "API key is missing." }; + } + return { valid: true, message: "Configuration is valid." }; + } +} + +/** + * Request adapter for Groq API + * @class SmartChatModelGroqRequestAdapter + * @extends SmartChatModelRequestAdapter + */ +export class SmartChatModelGroqRequestAdapter extends SmartChatModelRequestAdapter { + _get_openai_content(message) { + if(['assistant', 'tool'].includes(message.role)){ + if(Array.isArray(message.content)) { + return message.content.map(part => { + if (typeof part === 'string') return part; + if (part?.text) return part.text; + return ''; + }).join('\n'); + } + } + return message.content; + } +} + +/** + * Response adapter for Groq API + * @class SmartChatModelGroqResponseAdapter + * @extends SmartChatModelResponseAdapter + */ +export class SmartChatModelGroqResponseAdapter extends SmartChatModelResponseAdapter { +} diff --git a/smart-chat-model/adapters/lm_studio.js b/smart-chat-model/adapters/lm_studio.js new file mode 100644 index 00000000..f48b6bab --- /dev/null +++ b/smart-chat-model/adapters/lm_studio.js @@ -0,0 +1,155 @@ +import { SmartChatModelApiAdapter, SmartChatModelRequestAdapter, SmartChatModelResponseAdapter } from './_api.js'; + +/** + * Adapter for LM Studio's OpenAI-compatible API. + * LM Studio provides OpenAI-like endpoints at /v1/*, allowing reuse of OpenAI clients. + * @class SmartChatModelLmStudioAdapter + * @extends SmartChatModelApiAdapter + * + * @property {Object} static defaults + * @property {string} defaults.description - Human-readable description + * @property {string} defaults.type - Adapter type ("API") + * @property {string} defaults.endpoint - LM Studio's OpenAI-compatible chat completions endpoint + * @property {boolean} defaults.streaming - Whether streaming is supported + * @property {string} defaults.adapter - Adapter identifier + * @property {string} defaults.models_endpoint - Endpoint for retrieving models + * @property {string} defaults.default_model - Default model to use + * @property {string} defaults.signup_url - URL with info + */ +export class SmartChatModelLmStudioAdapter extends SmartChatModelApiAdapter { + static defaults = { + description: "LM Studio (OpenAI-compatible)", + type: "API", + endpoint: "http://localhost:1234/v1/chat/completions", + streaming: true, + adapter: "LM_Studio_OpenAI_Compat", + models_endpoint: "http://localhost:1234/v1/models", + default_model: "gpt-4o-mini", // Replace with a model listed by LM Studio + signup_url: "https://lmstudio.ai/docs/api/openai-api", + can_use_tools: true, + }; + + /** + * Request adapter class + */ + get req_adapter() { return SmartChatModelLmStudioRequestAdapter; } + + /** + * Response adapter class + */ + get res_adapter() { return SmartChatModelLmStudioResponseAdapter; } + + /** + * Validate parameters for getting models + * @returns {boolean} True + */ + validate_get_models_params() { + return true; + } + + /** + * LM Studio's /v1/models returns OpenAI-like response format: + * { + * "object": "list", + * "data": [ + * { "id": "model-name", "object": "model", ... }, + * ... + * ] + * } + * Parse this like the OpenAI format. + * @param {Object} model_data - Raw model data from LM Studio + * @returns {Object} Map of model objects + */ + parse_model_data(model_data) { + if (model_data.object !== 'list' || !Array.isArray(model_data.data)) { + return { "_": { id: "No models found." } }; + } + const parsed = {}; + for (const m of model_data.data) { + parsed[m.id] = { + id: m.id, + model_name: m.id, + // We don't have direct context length info here, can set a default + // or check if LM Studio returns it in the model object + description: `LM Studio model: ${m.id}`, + multimodal: false, // LM Studio doesn't mention multimodal support via /v1 + }; + } + return parsed; + } + + get models_endpoint_method(){ + return 'get'; + } + + /** + * Count tokens in input text (no dedicated endpoint) + * Rough estimate: 1 token ~ 4 chars + * @param {string|Object} input + * @returns {Promise} + */ + async count_tokens(input) { + const text = typeof input === 'string' ? input : JSON.stringify(input); + return Math.ceil(text.length / 4); + } + + /** + * Test API key - LM Studio doesn't require API key. Always true. + * @returns {Promise} + */ + async test_api_key() { + return true; + } + + + /** + * Validate configuration + */ + validate_config() { + if (!this.adapter_config.model_key) { + return { valid: false, message: "No model selected." }; + } + return { valid: true, message: "Configuration is valid." }; + } +} + +/** + * Request adapter for LM Studio OpenAI-compatible API + * Allows forcing tool use by adding a system prompt if tool_choice is set. + * @class SmartChatModelLmStudioRequestAdapter + * @extends SmartChatModelRequestAdapter + */ +export class SmartChatModelLmStudioRequestAdapter extends SmartChatModelRequestAdapter { + to_platform(streaming = false) { + const req = this.to_openai(streaming); + const body = JSON.parse(req.body); + + // If a tool_choice is specified, add a system message to force tool use + if (this.tool_choice?.function?.name) { + if(typeof body.messages[body.messages.length - 1].content === 'string'){ + body.messages[body.messages.length - 1].content = [ + { + type: 'text', + text: body.messages[body.messages.length - 1].content + }, + ] + } + body.messages[body.messages.length - 1].content.push({ + type: 'text', + text: `Use the "${this.tool_choice.function.name}" tool.` + }); + } + + req.body = JSON.stringify(body); + return req; + } +} + +/** + * Response adapter for LM Studio OpenAI-compatible API + * LM Studio returns OpenAI-like responses directly. + * @class SmartChatModelLmStudioResponseAdapter + * @extends SmartChatModelResponseAdapter + */ +export class SmartChatModelLmStudioResponseAdapter extends SmartChatModelResponseAdapter { +} \ No newline at end of file diff --git a/smart-chats/smart_thread.js b/smart-chats/smart_thread.js index 6161a4d1..8eecf61a 100644 --- a/smart-chats/smart_thread.js +++ b/smart-chats/smart_thread.js @@ -227,11 +227,18 @@ export class SmartThread extends SmartSource { // If the message context requires tool usage, define tools if (msg.context?.has_self_ref || msg.context?.folder_refs) { request.tools = [this.tools['lookup']]; - if (msg.is_last_message) { + if (msg.is_last_message && msg.role === 'user') { request.tool_choice = { type: "function", function: { name: "lookup" } }; } } } + // if last message role is tool and send_tool_output_in_user_message is true, remove tools + if(this.last_message_is_tool && this.settings.send_tool_output_in_user_message){ + request.tools = null; + } + if(this.last_message_is_tool){ + request.tool_choice = 'none'; + } // Set default AI model parameters request.temperature = 0.3;