Skip to content

Commit

Permalink
Autocomplete: add request-params tests for the openaicompatible pro…
Browse files Browse the repository at this point in the history
…vider (#5616)

- Adding request params tests for all possible configurations of the
`openaicompatible` provider.
- Follow up for #5604
  • Loading branch information
valerybugakov authored Sep 23, 2024
1 parent 220f821 commit fa96146
Show file tree
Hide file tree
Showing 2 changed files with 87 additions and 1 deletion.
86 changes: 86 additions & 0 deletions vscode/src/completions/providers/openaicompatible.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import { Observable } from 'observable-fns'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'

import { featureFlagProvider, modelsService } from '@sourcegraph/cody-shared'

import { mockLocalStorage } from '../../services/LocalStorageProvider'

import {
type AutocompleteProviderValuesToAssert,
getAutocompleteProviderFromLocalSettings,
getAutocompleteProviderFromServerSideModelConfig,
getAutocompleteProviderFromSiteConfigCodyLLMConfiguration,
getRequestParamsWithoutMessages,
} from './shared/helpers'

describe('openaicompatible autocomplete provider', () => {
beforeEach(async () => {
mockLocalStorage()
vi.spyOn(featureFlagProvider, 'evaluatedFeatureFlag').mockReturnValue(Observable.of(false))
})

afterEach(() => {
modelsService.reset()
})

const valuesToAssert = {
providerId: 'openaicompatible',
legacyModel: 'llama-3.1-70b-versatile',
requestParams: {
maxTokensToSample: 256,
temperature: 0.2,
timeoutMs: 7000,
topK: 0,
},
} satisfies AutocompleteProviderValuesToAssert

it('throws if used with local-editor-settings', async () => {
const createCall = getAutocompleteProviderFromLocalSettings({
providerId: 'openaicompatible',
legacyModel: 'gpt-4o',
isDotCom: true,
})

await expect(createCall).rejects.toThrowErrorMatchingInlineSnapshot(
// biome-ignore lint/style/noUnusedTemplateLiteral: snapshot value
`[Error: Model definition is missing for \`openaicompatible\` provider.]`
)
})

it('[dotcom] server-side-model-config', async () => {
const provider = await getAutocompleteProviderFromServerSideModelConfig({
modelRef: 'groq::v1::llama-3.1-70b-versatile',
isDotCom: true,
})

// Switches to the first available model, because `llama-3.1-70b-versatile` is
// the enterprise tier model and cannot be used on DotCom.
expect(provider.id).toBe('anthropic')
expect(provider.legacyModel).toBe('anthropic/claude-instant-1.2')
})

it('[enterprise] server-side-model-config', async () => {
const provider = await getAutocompleteProviderFromServerSideModelConfig({
modelRef: 'groq::v1::llama-3.1-70b-versatile',
isDotCom: false,
})
const { providerId, legacyModel, requestParams } = valuesToAssert

expect(provider.id).toBe(providerId)
expect(provider.legacyModel).toBe(legacyModel)
expect(getRequestParamsWithoutMessages(provider)).toMatchObject(requestParams)
})

it('throws if used with site-config-cody-llm-configuration', async () => {
const createCall = getAutocompleteProviderFromSiteConfigCodyLLMConfiguration({
providerId: 'openaicompatible',
legacyModel: 'gpt-4o',
isDotCom: true,
})

await expect(createCall).rejects.toThrowErrorMatchingInlineSnapshot(
// biome-ignore lint/style/noUnusedTemplateLiteral: snapshot value
`[Error: Model definition is missing for \`openaicompatible\` provider.]`
)
})
})
2 changes: 1 addition & 1 deletion vscode/src/completions/providers/unstable-openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import {
testAutocompleteProvider,
} from './shared/helpers'

describe('unstable-openai', () => {
describe('unstable-openai autocomplete provider', () => {
beforeEach(async () => {
mockLocalStorage()
vi.spyOn(featureFlagProvider, 'evaluatedFeatureFlag').mockReturnValue(Observable.of(false))
Expand Down

0 comments on commit fa96146

Please sign in to comment.