Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(node): Add @vercel/ai instrumentation #13892

Merged
merged 11 commits into from
Dec 9, 2024
1 change: 1 addition & 0 deletions dev-packages/node-integration-tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"@types/mongodb": "^3.6.20",
"@types/mysql": "^2.15.21",
"@types/pg": "^8.6.5",
"ai": "^4.0.6",
"amqplib": "^0.10.4",
"apollo-server": "^3.11.1",
"axios": "^1.7.7",
Expand Down
59 changes: 59 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/ai/scenario.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
const { loggingTransport } = require('@sentry-internal/node-integration-tests');
const Sentry = require('@sentry/node');

Sentry.init({
debug: true,
dsn: 'https://[email protected]/1337',
release: '1.0',
tracesSampleRate: 1.0,
transport: loggingTransport,
});

const { generateText } = require('ai');
const { MockLanguageModelV1 } = require('ai/test');

async function run() {
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
await generateText({
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'First span here!',
}),
}),
prompt: 'Where is the first span?',
});

// This span should have input and output prompts attached because telemetry is explicitly enabled.
await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Second span here!',
}),
}),
prompt: 'Where is the second span?',
});

// This span should not be captured because we've disabled telemetry
await generateText({
experimental_telemetry: { isEnabled: false },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Third span here!',
}),
}),
prompt: 'Where is the third span?',
});
});
}

run();
129 changes: 129 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/ai/test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
import { cleanupChildProcesses, createRunner } from '../../../utils/runner';

describe('ai', () => {
afterAll(() => {
cleanupChildProcesses();
});

test('creates ai related spans', done => {
const EXPECTED_TRANSACTION = {
transaction: 'main',
spans: expect.arrayContaining([
expect.objectContaining({
data: expect.objectContaining({
'ai.completion_tokens.used': 20,
'ai.model.id': 'mock-model-id',
'ai.model.provider': 'mock-provider',
'ai.model_id': 'mock-model-id',
'ai.operationId': 'ai.generateText',
'ai.pipeline.name': 'generateText',
'ai.prompt_tokens.used': 10,
'ai.response.finishReason': 'stop',
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
'ai.tokens.used': 30,
'ai.usage.completionTokens': 20,
'ai.usage.promptTokens': 10,
'operation.name': 'ai.generateText',
'sentry.op': 'ai.pipeline.generateText',
'sentry.origin': 'auto.vercelai.otel',
}),
description: 'generateText',
op: 'ai.pipeline.generateText',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.vercelai.otel',
'sentry.op': 'ai.run.doGenerate',
'operation.name': 'ai.generateText.doGenerate',
'ai.operationId': 'ai.generateText.doGenerate',
'ai.model.provider': 'mock-provider',
'ai.model.id': 'mock-model-id',
'ai.settings.maxRetries': 2,
'gen_ai.system': 'mock-provider',
'gen_ai.request.model': 'mock-model-id',
'ai.pipeline.name': 'generateText.doGenerate',
'ai.model_id': 'mock-model-id',
'ai.streaming': false,
'ai.response.finishReason': 'stop',
'ai.response.model': 'mock-model-id',
'ai.usage.promptTokens': 10,
'ai.usage.completionTokens': 20,
'gen_ai.response.finish_reasons': ['stop'],
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 20,
'ai.completion_tokens.used': 20,
'ai.prompt_tokens.used': 10,
'ai.tokens.used': 30,
}),
description: 'generateText.doGenerate',
op: 'ai.run.doGenerate',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'ai.completion_tokens.used': 20,
'ai.model.id': 'mock-model-id',
'ai.model.provider': 'mock-provider',
'ai.model_id': 'mock-model-id',
'ai.prompt': '{"prompt":"Where is the second span?"}',
'ai.operationId': 'ai.generateText',
'ai.pipeline.name': 'generateText',
'ai.prompt_tokens.used': 10,
'ai.response.finishReason': 'stop',
'ai.input_messages': '{"prompt":"Where is the second span?"}',
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
'ai.tokens.used': 30,
'ai.usage.completionTokens': 20,
'ai.usage.promptTokens': 10,
'operation.name': 'ai.generateText',
'sentry.op': 'ai.pipeline.generateText',
'sentry.origin': 'auto.vercelai.otel',
}),
description: 'generateText',
op: 'ai.pipeline.generateText',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.vercelai.otel',
'sentry.op': 'ai.run.doGenerate',
'operation.name': 'ai.generateText.doGenerate',
'ai.operationId': 'ai.generateText.doGenerate',
'ai.model.provider': 'mock-provider',
'ai.model.id': 'mock-model-id',
'ai.settings.maxRetries': 2,
'gen_ai.system': 'mock-provider',
'gen_ai.request.model': 'mock-model-id',
'ai.pipeline.name': 'generateText.doGenerate',
'ai.model_id': 'mock-model-id',
'ai.streaming': false,
'ai.response.finishReason': 'stop',
'ai.response.model': 'mock-model-id',
'ai.usage.promptTokens': 10,
'ai.usage.completionTokens': 20,
'gen_ai.response.finish_reasons': ['stop'],
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 20,
'ai.completion_tokens.used': 20,
'ai.prompt_tokens.used': 10,
'ai.tokens.used': 30,
}),
description: 'generateText.doGenerate',
op: 'ai.run.doGenerate',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
]),
};

createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start(done);
});
});
3 changes: 3 additions & 0 deletions packages/node/src/integrations/tracing/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import { instrumentNest, nestIntegration } from './nest/nest';
import { instrumentPostgres, postgresIntegration } from './postgres';
import { instrumentRedis, redisIntegration } from './redis';
import { instrumentTedious, tediousIntegration } from './tedious';
import { instrumentVercelAi, vercelAIIntegration } from './vercelai';

/**
* With OTEL, all performance integrations will be added, as OTEL only initializes them when the patched package is actually required.
Expand Down Expand Up @@ -48,6 +49,7 @@ export function getAutoPerformanceIntegrations(): Integration[] {
kafkaIntegration(),
amqplibIntegration(),
lruMemoizerIntegration(),
vercelAIIntegration(),
];
}

Expand Down Expand Up @@ -78,5 +80,6 @@ export function getOpenTelemetryInstrumentationToPreload(): (((options?: any) =>
instrumentTedious,
instrumentGenericPool,
instrumentAmqplib,
instrumentVercelAi,
];
}
Loading
Loading