diff --git a/libs/langchain-community/src/llms/togetherai.ts b/libs/langchain-community/src/llms/togetherai.ts index 3d89a724f67f..86d83e31df47 100644 --- a/libs/langchain-community/src/llms/togetherai.ts +++ b/libs/langchain-community/src/llms/togetherai.ts @@ -23,6 +23,7 @@ interface TogetherAIInferenceResult { top_p: number; top_k: number; max_tokens: number; + stop: string[]; }; // eslint-disable-next-line @typescript-eslint/no-explicit-any subjobs: Array; @@ -96,6 +97,10 @@ export interface TogetherAIInputs extends BaseLLMParams { * Limit the number of tokens generated. */ maxTokens?: number; + /** + * A list of tokens at which the generation should stop. + */ + stop?: string[]; } export interface TogetherAICallOptions @@ -110,6 +115,7 @@ export interface TogetherAICallOptions | "logprobs" | "safetyModel" | "maxTokens" + | "stop" > {} export class TogetherAI extends LLM { @@ -137,6 +143,8 @@ export class TogetherAI extends LLM { safetyModel?: string; + stop?: string[]; + private apiKey: string; private inferenceUrl = "https://api.together.xyz/inference"; @@ -162,6 +170,7 @@ export class TogetherAI extends LLM { this.logprobs = inputs.logprobs; this.safetyModel = inputs.safetyModel; this.maxTokens = inputs.maxTokens; + this.stop = inputs.stop; } _llmType() { @@ -188,6 +197,7 @@ export class TogetherAI extends LLM { stream_tokens: this?.streaming, safety_model: this?.safetyModel ?? options?.safetyModel, max_tokens: this?.maxTokens ?? options?.maxTokens, + stop: this?.stop ?? options?.stop, }; return body; }