From 056970ea059f1b3a07d7e33734106f939d60b022 Mon Sep 17 00:00:00 2001 From: gagik Date: Thu, 5 Dec 2024 13:28:54 +0100 Subject: [PATCH] adjust order and fix tests --- src/participant/prompts/promptBase.ts | 6 ++-- .../suite/participant/participant.test.ts | 36 +++++-------------- 2 files changed, 10 insertions(+), 32 deletions(-) diff --git a/src/participant/prompts/promptBase.ts b/src/participant/prompts/promptBase.ts index cf58290fc..19b03f7b7 100644 --- a/src/participant/prompts/promptBase.ts +++ b/src/participant/prompts/promptBase.ts @@ -174,13 +174,11 @@ export abstract class PromptBase { // Go through the history in reverse order to find the last user message. for (let i = history.length - 1; i >= 0; i--) { if (history[i] instanceof vscode.ChatRequestTurn) { + request.prompt = (history[i] as vscode.ChatRequestTurn).prompt; // Rewrite the arguments so that the prompt is the last user message from history args = { ...args, - request: { - ...request, - prompt: (history[i] as vscode.ChatRequestTurn).prompt, - }, + request, }; // Remove the item from the history messages array. diff --git a/src/test/suite/participant/participant.test.ts b/src/test/suite/participant/participant.test.ts index 57cd7eb7a..10d043155 100644 --- a/src/test/suite/participant/participant.test.ts +++ b/src/test/suite/participant/participant.test.ts @@ -232,8 +232,8 @@ suite('Participant Controller Test Suite', function () { chatTokenStub = { onCancellationRequested: sinon.fake(), }; - // Resolve to 0 to prevent undefined being returned - // override to other values to test different count limits. + /** Resolves to 0 by default to prevent undefined being returned. + Resolve to other values to test different count limits. */ countTokensStub = sinon.stub().resolves(0); // The model returned by vscode.lm.selectChatModels is always undefined in tests. sendRequestStub = sinon.stub(); @@ -874,11 +874,6 @@ suite('Participant Controller Test Suite', function () { }; await invokeChatHandler(chatRequestMock); - // +1 call when counting tokens of 3 sample documents - // +1 call when counting tokens in the history. - // +1 to account for zero-based indexing of the offset) - expect(countTokensStub).callCount(callsOffset + 3); - const messages = sendRequestStub.secondCall .args[0] as vscode.LanguageModelChatMessage[]; expect(getMessageContent(messages[1])).to.include( @@ -935,8 +930,9 @@ suite('Participant Controller Test Suite', function () { ]; // This is to offset the previous countTokens calls - // (1 for user prompt and 1 for assistant prompt calculation) - const callsOffset = 2; + // buildMessages gets called twice for namespace so it is adjusted accordingly + // (1 for request prompt and 1 for assistant prompt calculation) + const callsOffset = 4; // Called when including sample documents countTokensStub @@ -946,11 +942,6 @@ suite('Participant Controller Test Suite', function () { .onCall(callsOffset + 1) .resolves(MAX_TOTAL_PROMPT_LENGTH_MOCK); - // Called when calculating the added finalized user prompt - countTokensStub - .onCall(callsOffset + 2) - .resolves(MAX_TOTAL_PROMPT_LENGTH_MOCK); - sampleStub.resolves(sampleDocs); const chatRequestMock = { @@ -960,12 +951,6 @@ suite('Participant Controller Test Suite', function () { }; await invokeChatHandler(chatRequestMock); - // +1 call when counting tokens of 3 sample documents - // +1 call for the retry with 1 sample documents - // +1 call when counting tokens in the history. - // +1 to account for zero-based indexing of the offset) - expect(countTokensStub).callCount(callsOffset + 4); - const messages = sendRequestStub.secondCall .args[0] as vscode.LanguageModelChatMessage[]; expect(getMessageContent(messages[1])).to.include( @@ -996,8 +981,9 @@ suite('Participant Controller Test Suite', function () { test('does not include sample documents when even 1 makes prompt too long', async function () { // This is to offset the previous countTokens calls - // (1 for user prompt and 1 for assistant prompt calculation) - const callsOffset = 2; + // buildMessages gets called twice for namespace so it is adjusted accordingly + // (1 for request prompt and 1 for assistant prompt calculation) + const callsOffset = 4; // Called when including sample documents countTokensStub @@ -1037,12 +1023,6 @@ suite('Participant Controller Test Suite', function () { }; await invokeChatHandler(chatRequestMock); - // +1 call when counting tokens of 3 sample documents - // +1 call for the retry with 1 sample documents - // +1 call when counting tokens in the history. - // +1 to account for zero-based indexing of the offset) - expect(countTokensStub).callCount(callsOffset + 4); - const messages = sendRequestStub.secondCall .args[0] as vscode.LanguageModelChatMessage[]; expect(getMessageContent(messages[1])).to.not.include(