Skip to content

Commit

Permalink
Add new adapters for LM Studio and Groq; refactor existing adapter me…
Browse files Browse the repository at this point in the history
…thods

- Introduced SmartChatModelLmStudioAdapter and SmartChatModelGroqAdapter for new API integrations.
- Updated SmartChatModelRequestAdapter and SmartChatModelResponseAdapter methods to handle message content more effectively.
- Enhanced SmartChatModelCustomAdapter with custom protocol, hostname, port, and path getters.
- Refactored message handling in SmartThread to improve tool usage logic based on message roles.
  • Loading branch information
Brian Joseph Petro committed Dec 19, 2024
1 parent 363694c commit 5f50fde
Show file tree
Hide file tree
Showing 6 changed files with 339 additions and 7 deletions.
6 changes: 6 additions & 0 deletions smart-chat-model/adapters.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ import { SmartChatModelCohereAdapter } from './adapters/cohere.js';
import { SmartChatModelOpenRouterAdapter } from './adapters/open_router.js';
import { SmartChatModelCustomAdapter } from './adapters/_custom.js';
import { SmartChatModelOllamaAdapter } from './adapters/ollama.js';
import { SmartChatModelLmStudioAdapter } from './adapters/lm_studio.js';
import { SmartChatModelGroqAdapter } from './adapters/groq.js';
export {
SmartChatModelAnthropicAdapter,
SmartChatModelOpenaiAdapter,
Expand All @@ -13,11 +15,15 @@ export {
SmartChatModelOpenRouterAdapter,
SmartChatModelCustomAdapter,
SmartChatModelOllamaAdapter,
SmartChatModelLmStudioAdapter,
SmartChatModelGroqAdapter,
SmartChatModelAnthropicAdapter as anthropic,
SmartChatModelOpenaiAdapter as openai,
SmartChatModelGeminiAdapter as gemini,
SmartChatModelCohereAdapter as cohere,
SmartChatModelOpenRouterAdapter as open_router,
SmartChatModelCustomAdapter as custom,
SmartChatModelOllamaAdapter as ollama,
SmartChatModelLmStudioAdapter as lm_studio,
SmartChatModelGroqAdapter as groq,
};
12 changes: 6 additions & 6 deletions smart-chat-model/adapters/_api.js
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ export class SmartChatModelRequestAdapter {
_transform_single_message_to_openai(message) {
const transformed = {
role: this._get_openai_role(message.role),
content: this._get_openai_content(message.content),
content: this._get_openai_content(message),
};

if (message.name) transformed.name = message.name;
Expand Down Expand Up @@ -501,9 +501,9 @@ export class SmartChatModelRequestAdapter {
* @returns {string} The transformed content.
* @private
*/
_get_openai_content(content) {
_get_openai_content(message) {
// Override in subclasses if needed
return content;
return message.content;
}

/**
Expand Down Expand Up @@ -721,7 +721,7 @@ export class SmartChatModelResponseAdapter {
_transform_message_to_openai(message={}) {
const transformed = {
role: this._get_openai_role(message.role),
content: this._get_openai_content(message.content),
content: this._get_openai_content(message),
};

if (message.name) transformed.name = message.name;
Expand All @@ -748,9 +748,9 @@ export class SmartChatModelResponseAdapter {
* @returns {string} The transformed content.
* @private
*/
_get_openai_content(content) {
_get_openai_content(message) {
// Override in subclasses if needed
return content;
return message.content;
}

/**
Expand Down
25 changes: 25 additions & 0 deletions smart-chat-model/adapters/_custom.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,31 @@ export class SmartChatModelCustomAdapter extends SmartChatModelApiAdapter {

req_adapter = SmartChatModelCustomRequestAdapter;

get custom_protocol() {
return this.adapter_config.protocol || 'http';
}
get custom_hostname(){
return this.adapter_config.hostname || 'localhost';
}
get custom_port(){
return this.adapter_config.port ? `:${this.adapter_config.port}` : '';
}
get custom_path(){
let path = this.adapter_config.path || '';
if(path && !path.startsWith('/')) path = `/${path}`;
return path;
}

get endpoint() {
return [
this.custom_protocol,
'://',
this.custom_hostname,
this.custom_port,
this.custom_path
].join('');
}

get settings_config() {
return {
// LOCAL PLATFORM SETTINGS
Expand Down
139 changes: 139 additions & 0 deletions smart-chat-model/adapters/groq.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
import { SmartChatModelApiAdapter, SmartChatModelRequestAdapter, SmartChatModelResponseAdapter } from './_api.js';

/**
* Adapter for Groq API.
* This adapter assumes the Groq endpoint provides a format similar to OpenAI.
* The main difference from openai.js: When processing assistant messages with array or null content, we merge into a single string.
*/
export class SmartChatModelGroqAdapter extends SmartChatModelApiAdapter {
static defaults = {
description: "Groq",
type: "API",
endpoint: "https://api.groq.com/openai/v1/chat/completions",
streaming: true,
adapter: "Groq",
models_endpoint: "https://api.groq.com/openai/v1/models",
default_model: "llama3-8b-8192",
signup_url: "https://groq.com",
can_use_tools: true,
};

/**
* Request adapter class
* @returns {typeof SmartChatModelGroqRequestAdapter}
*/
get req_adapter() { return SmartChatModelGroqRequestAdapter; }

/**
* Response adapter class
* @returns {typeof SmartChatModelGroqResponseAdapter}
*/
get res_adapter() { return SmartChatModelGroqResponseAdapter; }

/**
* Retrieve the list of models from Groq's API.
* @returns {Promise<Object>} A dictionary of models keyed by their id
*/
async get_models(refresh = false) {
if (!refresh && this.adapter_config?.models && Object.keys(this.adapter_config.models).length > 0) {
return this.adapter_config.models;
}

const request_params = {
url: this.models_endpoint,
method: 'GET',
headers: {
'Authorization': `Bearer ${this.api_key}`
}
};

try {
const resp = await this.http_adapter.request(request_params);
const data = await resp.json();
const model_data = this.parse_model_data(data);
this.adapter_settings.models = model_data;
this.model.re_render_settings();
return model_data;
} catch (error) {
console.error('Failed to fetch Groq model data:', error);
return {"_": {id: "Failed to fetch models from Groq"}};
}
}

/**
* Parse model data from Groq API format to a dictionary keyed by model ID.
* The API returns a list of model objects like:
* {
* "object": "list",
* "data": [ { "id": "...", "object": "model", ... }, ... ]
* }
*
* We'll convert each model to:
* {
* model_name: model.id,
* id: model.id,
* max_input_tokens: model.context_window,
* description: `Owned by: ${model.owned_by}, context: ${model.context_window}`,
* multimodal: Check if model name or description suggests multimodality
* }
*/
parse_model_data(model_data) {
if (model_data.object !== 'list' || !Array.isArray(model_data.data)) {
return {"_": { id: "No models found." }};
}

const parsed = {};
for (const m of model_data.data) {
parsed[m.id] = {
model_name: m.id,
id: m.id,
max_input_tokens: m.context_window || 8192,
description: `Owned by: ${m.owned_by}, context: ${m.context_window}`,
// A basic heuristic for multimodal: if 'vision' or 'tool' is in model id
// Adjust as needed based on known capabilities
multimodal: m.id.includes('vision'),
};
}
return parsed;
}

/**
* Validate configuration for Groq
* @returns {Object} { valid: boolean, message: string }
*/
validate_config() {
if(!this.adapter_config.model_key) return { valid: false, message: "No model selected." };
if (!this.api_key) {
return { valid: false, message: "API key is missing." };
}
return { valid: true, message: "Configuration is valid." };
}
}

/**
* Request adapter for Groq API
* @class SmartChatModelGroqRequestAdapter
* @extends SmartChatModelRequestAdapter
*/
export class SmartChatModelGroqRequestAdapter extends SmartChatModelRequestAdapter {
_get_openai_content(message) {
if(['assistant', 'tool'].includes(message.role)){
if(Array.isArray(message.content)) {
return message.content.map(part => {
if (typeof part === 'string') return part;
if (part?.text) return part.text;
return '';
}).join('\n');
}
}
return message.content;
}
}

/**
* Response adapter for Groq API
* @class SmartChatModelGroqResponseAdapter
* @extends SmartChatModelResponseAdapter
*/
export class SmartChatModelGroqResponseAdapter extends SmartChatModelResponseAdapter {
}
155 changes: 155 additions & 0 deletions smart-chat-model/adapters/lm_studio.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
import { SmartChatModelApiAdapter, SmartChatModelRequestAdapter, SmartChatModelResponseAdapter } from './_api.js';

/**
* Adapter for LM Studio's OpenAI-compatible API.
* LM Studio provides OpenAI-like endpoints at /v1/*, allowing reuse of OpenAI clients.
* @class SmartChatModelLmStudioAdapter
* @extends SmartChatModelApiAdapter
*
* @property {Object} static defaults
* @property {string} defaults.description - Human-readable description
* @property {string} defaults.type - Adapter type ("API")
* @property {string} defaults.endpoint - LM Studio's OpenAI-compatible chat completions endpoint
* @property {boolean} defaults.streaming - Whether streaming is supported
* @property {string} defaults.adapter - Adapter identifier
* @property {string} defaults.models_endpoint - Endpoint for retrieving models
* @property {string} defaults.default_model - Default model to use
* @property {string} defaults.signup_url - URL with info
*/
export class SmartChatModelLmStudioAdapter extends SmartChatModelApiAdapter {
static defaults = {
description: "LM Studio (OpenAI-compatible)",
type: "API",
endpoint: "http://localhost:1234/v1/chat/completions",
streaming: true,
adapter: "LM_Studio_OpenAI_Compat",
models_endpoint: "http://localhost:1234/v1/models",
default_model: "gpt-4o-mini", // Replace with a model listed by LM Studio
signup_url: "https://lmstudio.ai/docs/api/openai-api",
can_use_tools: true,
};

/**
* Request adapter class
*/
get req_adapter() { return SmartChatModelLmStudioRequestAdapter; }

/**
* Response adapter class
*/
get res_adapter() { return SmartChatModelLmStudioResponseAdapter; }

/**
* Validate parameters for getting models
* @returns {boolean} True
*/
validate_get_models_params() {
return true;
}

/**
* LM Studio's /v1/models returns OpenAI-like response format:
* {
* "object": "list",
* "data": [
* { "id": "model-name", "object": "model", ... },
* ...
* ]
* }
* Parse this like the OpenAI format.
* @param {Object} model_data - Raw model data from LM Studio
* @returns {Object} Map of model objects
*/
parse_model_data(model_data) {
if (model_data.object !== 'list' || !Array.isArray(model_data.data)) {
return { "_": { id: "No models found." } };
}
const parsed = {};
for (const m of model_data.data) {
parsed[m.id] = {
id: m.id,
model_name: m.id,
// We don't have direct context length info here, can set a default
// or check if LM Studio returns it in the model object
description: `LM Studio model: ${m.id}`,
multimodal: false, // LM Studio doesn't mention multimodal support via /v1
};
}
return parsed;
}

get models_endpoint_method(){
return 'get';
}

/**
* Count tokens in input text (no dedicated endpoint)
* Rough estimate: 1 token ~ 4 chars
* @param {string|Object} input
* @returns {Promise<number>}
*/
async count_tokens(input) {
const text = typeof input === 'string' ? input : JSON.stringify(input);
return Math.ceil(text.length / 4);
}

/**
* Test API key - LM Studio doesn't require API key. Always true.
* @returns {Promise<boolean>}
*/
async test_api_key() {
return true;
}


/**
* Validate configuration
*/
validate_config() {
if (!this.adapter_config.model_key) {
return { valid: false, message: "No model selected." };
}
return { valid: true, message: "Configuration is valid." };
}
}

/**
* Request adapter for LM Studio OpenAI-compatible API
* Allows forcing tool use by adding a system prompt if tool_choice is set.
* @class SmartChatModelLmStudioRequestAdapter
* @extends SmartChatModelRequestAdapter
*/
export class SmartChatModelLmStudioRequestAdapter extends SmartChatModelRequestAdapter {
to_platform(streaming = false) {
const req = this.to_openai(streaming);
const body = JSON.parse(req.body);

// If a tool_choice is specified, add a system message to force tool use
if (this.tool_choice?.function?.name) {
if(typeof body.messages[body.messages.length - 1].content === 'string'){
body.messages[body.messages.length - 1].content = [
{
type: 'text',
text: body.messages[body.messages.length - 1].content
},
]
}
body.messages[body.messages.length - 1].content.push({
type: 'text',
text: `Use the "${this.tool_choice.function.name}" tool.`
});
}

req.body = JSON.stringify(body);
return req;
}
}

/**
* Response adapter for LM Studio OpenAI-compatible API
* LM Studio returns OpenAI-like responses directly.
* @class SmartChatModelLmStudioResponseAdapter
* @extends SmartChatModelResponseAdapter
*/
export class SmartChatModelLmStudioResponseAdapter extends SmartChatModelResponseAdapter {
}
Loading

0 comments on commit 5f50fde

Please sign in to comment.