-
Notifications
You must be signed in to change notification settings - Fork 14
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add new adapters for LM Studio and Groq; refactor existing adapter me…
…thods - Introduced SmartChatModelLmStudioAdapter and SmartChatModelGroqAdapter for new API integrations. - Updated SmartChatModelRequestAdapter and SmartChatModelResponseAdapter methods to handle message content more effectively. - Enhanced SmartChatModelCustomAdapter with custom protocol, hostname, port, and path getters. - Refactored message handling in SmartThread to improve tool usage logic based on message roles.
- Loading branch information
Brian Joseph Petro
committed
Dec 19, 2024
1 parent
363694c
commit 5f50fde
Showing
6 changed files
with
339 additions
and
7 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,139 @@ | ||
import { SmartChatModelApiAdapter, SmartChatModelRequestAdapter, SmartChatModelResponseAdapter } from './_api.js'; | ||
|
||
/** | ||
* Adapter for Groq API. | ||
* This adapter assumes the Groq endpoint provides a format similar to OpenAI. | ||
* The main difference from openai.js: When processing assistant messages with array or null content, we merge into a single string. | ||
*/ | ||
export class SmartChatModelGroqAdapter extends SmartChatModelApiAdapter { | ||
static defaults = { | ||
description: "Groq", | ||
type: "API", | ||
endpoint: "https://api.groq.com/openai/v1/chat/completions", | ||
streaming: true, | ||
adapter: "Groq", | ||
models_endpoint: "https://api.groq.com/openai/v1/models", | ||
default_model: "llama3-8b-8192", | ||
signup_url: "https://groq.com", | ||
can_use_tools: true, | ||
}; | ||
|
||
/** | ||
* Request adapter class | ||
* @returns {typeof SmartChatModelGroqRequestAdapter} | ||
*/ | ||
get req_adapter() { return SmartChatModelGroqRequestAdapter; } | ||
|
||
/** | ||
* Response adapter class | ||
* @returns {typeof SmartChatModelGroqResponseAdapter} | ||
*/ | ||
get res_adapter() { return SmartChatModelGroqResponseAdapter; } | ||
|
||
/** | ||
* Retrieve the list of models from Groq's API. | ||
* @returns {Promise<Object>} A dictionary of models keyed by their id | ||
*/ | ||
async get_models(refresh = false) { | ||
if (!refresh && this.adapter_config?.models && Object.keys(this.adapter_config.models).length > 0) { | ||
return this.adapter_config.models; | ||
} | ||
|
||
const request_params = { | ||
url: this.models_endpoint, | ||
method: 'GET', | ||
headers: { | ||
'Authorization': `Bearer ${this.api_key}` | ||
} | ||
}; | ||
|
||
try { | ||
const resp = await this.http_adapter.request(request_params); | ||
const data = await resp.json(); | ||
const model_data = this.parse_model_data(data); | ||
this.adapter_settings.models = model_data; | ||
this.model.re_render_settings(); | ||
return model_data; | ||
} catch (error) { | ||
console.error('Failed to fetch Groq model data:', error); | ||
return {"_": {id: "Failed to fetch models from Groq"}}; | ||
} | ||
} | ||
|
||
/** | ||
* Parse model data from Groq API format to a dictionary keyed by model ID. | ||
* The API returns a list of model objects like: | ||
* { | ||
* "object": "list", | ||
* "data": [ { "id": "...", "object": "model", ... }, ... ] | ||
* } | ||
* | ||
* We'll convert each model to: | ||
* { | ||
* model_name: model.id, | ||
* id: model.id, | ||
* max_input_tokens: model.context_window, | ||
* description: `Owned by: ${model.owned_by}, context: ${model.context_window}`, | ||
* multimodal: Check if model name or description suggests multimodality | ||
* } | ||
*/ | ||
parse_model_data(model_data) { | ||
if (model_data.object !== 'list' || !Array.isArray(model_data.data)) { | ||
return {"_": { id: "No models found." }}; | ||
} | ||
|
||
const parsed = {}; | ||
for (const m of model_data.data) { | ||
parsed[m.id] = { | ||
model_name: m.id, | ||
id: m.id, | ||
max_input_tokens: m.context_window || 8192, | ||
description: `Owned by: ${m.owned_by}, context: ${m.context_window}`, | ||
// A basic heuristic for multimodal: if 'vision' or 'tool' is in model id | ||
// Adjust as needed based on known capabilities | ||
multimodal: m.id.includes('vision'), | ||
}; | ||
} | ||
return parsed; | ||
} | ||
|
||
/** | ||
* Validate configuration for Groq | ||
* @returns {Object} { valid: boolean, message: string } | ||
*/ | ||
validate_config() { | ||
if(!this.adapter_config.model_key) return { valid: false, message: "No model selected." }; | ||
if (!this.api_key) { | ||
return { valid: false, message: "API key is missing." }; | ||
} | ||
return { valid: true, message: "Configuration is valid." }; | ||
} | ||
} | ||
|
||
/** | ||
* Request adapter for Groq API | ||
* @class SmartChatModelGroqRequestAdapter | ||
* @extends SmartChatModelRequestAdapter | ||
*/ | ||
export class SmartChatModelGroqRequestAdapter extends SmartChatModelRequestAdapter { | ||
_get_openai_content(message) { | ||
if(['assistant', 'tool'].includes(message.role)){ | ||
if(Array.isArray(message.content)) { | ||
return message.content.map(part => { | ||
if (typeof part === 'string') return part; | ||
if (part?.text) return part.text; | ||
return ''; | ||
}).join('\n'); | ||
} | ||
} | ||
return message.content; | ||
} | ||
} | ||
|
||
/** | ||
* Response adapter for Groq API | ||
* @class SmartChatModelGroqResponseAdapter | ||
* @extends SmartChatModelResponseAdapter | ||
*/ | ||
export class SmartChatModelGroqResponseAdapter extends SmartChatModelResponseAdapter { | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,155 @@ | ||
import { SmartChatModelApiAdapter, SmartChatModelRequestAdapter, SmartChatModelResponseAdapter } from './_api.js'; | ||
|
||
/** | ||
* Adapter for LM Studio's OpenAI-compatible API. | ||
* LM Studio provides OpenAI-like endpoints at /v1/*, allowing reuse of OpenAI clients. | ||
* @class SmartChatModelLmStudioAdapter | ||
* @extends SmartChatModelApiAdapter | ||
* | ||
* @property {Object} static defaults | ||
* @property {string} defaults.description - Human-readable description | ||
* @property {string} defaults.type - Adapter type ("API") | ||
* @property {string} defaults.endpoint - LM Studio's OpenAI-compatible chat completions endpoint | ||
* @property {boolean} defaults.streaming - Whether streaming is supported | ||
* @property {string} defaults.adapter - Adapter identifier | ||
* @property {string} defaults.models_endpoint - Endpoint for retrieving models | ||
* @property {string} defaults.default_model - Default model to use | ||
* @property {string} defaults.signup_url - URL with info | ||
*/ | ||
export class SmartChatModelLmStudioAdapter extends SmartChatModelApiAdapter { | ||
static defaults = { | ||
description: "LM Studio (OpenAI-compatible)", | ||
type: "API", | ||
endpoint: "http://localhost:1234/v1/chat/completions", | ||
streaming: true, | ||
adapter: "LM_Studio_OpenAI_Compat", | ||
models_endpoint: "http://localhost:1234/v1/models", | ||
default_model: "gpt-4o-mini", // Replace with a model listed by LM Studio | ||
signup_url: "https://lmstudio.ai/docs/api/openai-api", | ||
can_use_tools: true, | ||
}; | ||
|
||
/** | ||
* Request adapter class | ||
*/ | ||
get req_adapter() { return SmartChatModelLmStudioRequestAdapter; } | ||
|
||
/** | ||
* Response adapter class | ||
*/ | ||
get res_adapter() { return SmartChatModelLmStudioResponseAdapter; } | ||
|
||
/** | ||
* Validate parameters for getting models | ||
* @returns {boolean} True | ||
*/ | ||
validate_get_models_params() { | ||
return true; | ||
} | ||
|
||
/** | ||
* LM Studio's /v1/models returns OpenAI-like response format: | ||
* { | ||
* "object": "list", | ||
* "data": [ | ||
* { "id": "model-name", "object": "model", ... }, | ||
* ... | ||
* ] | ||
* } | ||
* Parse this like the OpenAI format. | ||
* @param {Object} model_data - Raw model data from LM Studio | ||
* @returns {Object} Map of model objects | ||
*/ | ||
parse_model_data(model_data) { | ||
if (model_data.object !== 'list' || !Array.isArray(model_data.data)) { | ||
return { "_": { id: "No models found." } }; | ||
} | ||
const parsed = {}; | ||
for (const m of model_data.data) { | ||
parsed[m.id] = { | ||
id: m.id, | ||
model_name: m.id, | ||
// We don't have direct context length info here, can set a default | ||
// or check if LM Studio returns it in the model object | ||
description: `LM Studio model: ${m.id}`, | ||
multimodal: false, // LM Studio doesn't mention multimodal support via /v1 | ||
}; | ||
} | ||
return parsed; | ||
} | ||
|
||
get models_endpoint_method(){ | ||
return 'get'; | ||
} | ||
|
||
/** | ||
* Count tokens in input text (no dedicated endpoint) | ||
* Rough estimate: 1 token ~ 4 chars | ||
* @param {string|Object} input | ||
* @returns {Promise<number>} | ||
*/ | ||
async count_tokens(input) { | ||
const text = typeof input === 'string' ? input : JSON.stringify(input); | ||
return Math.ceil(text.length / 4); | ||
} | ||
|
||
/** | ||
* Test API key - LM Studio doesn't require API key. Always true. | ||
* @returns {Promise<boolean>} | ||
*/ | ||
async test_api_key() { | ||
return true; | ||
} | ||
|
||
|
||
/** | ||
* Validate configuration | ||
*/ | ||
validate_config() { | ||
if (!this.adapter_config.model_key) { | ||
return { valid: false, message: "No model selected." }; | ||
} | ||
return { valid: true, message: "Configuration is valid." }; | ||
} | ||
} | ||
|
||
/** | ||
* Request adapter for LM Studio OpenAI-compatible API | ||
* Allows forcing tool use by adding a system prompt if tool_choice is set. | ||
* @class SmartChatModelLmStudioRequestAdapter | ||
* @extends SmartChatModelRequestAdapter | ||
*/ | ||
export class SmartChatModelLmStudioRequestAdapter extends SmartChatModelRequestAdapter { | ||
to_platform(streaming = false) { | ||
const req = this.to_openai(streaming); | ||
const body = JSON.parse(req.body); | ||
|
||
// If a tool_choice is specified, add a system message to force tool use | ||
if (this.tool_choice?.function?.name) { | ||
if(typeof body.messages[body.messages.length - 1].content === 'string'){ | ||
body.messages[body.messages.length - 1].content = [ | ||
{ | ||
type: 'text', | ||
text: body.messages[body.messages.length - 1].content | ||
}, | ||
] | ||
} | ||
body.messages[body.messages.length - 1].content.push({ | ||
type: 'text', | ||
text: `Use the "${this.tool_choice.function.name}" tool.` | ||
}); | ||
} | ||
|
||
req.body = JSON.stringify(body); | ||
return req; | ||
} | ||
} | ||
|
||
/** | ||
* Response adapter for LM Studio OpenAI-compatible API | ||
* LM Studio returns OpenAI-like responses directly. | ||
* @class SmartChatModelLmStudioResponseAdapter | ||
* @extends SmartChatModelResponseAdapter | ||
*/ | ||
export class SmartChatModelLmStudioResponseAdapter extends SmartChatModelResponseAdapter { | ||
} |
Oops, something went wrong.