diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts index e3598fd15d0e..9b3242b996f7 100644 --- a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts +++ b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts @@ -35,14 +35,14 @@ test('should create AI spans with correct attributes', async ({ page }) => { expect(firstPipelineSpan?.data?.['ai.model.id']).toBe('mock-model-id'); expect(firstPipelineSpan?.data?.['ai.model.provider']).toBe('mock-provider'); expect(firstPipelineSpan?.data?.['ai.prompt']).toContain('Where is the first span?'); - expect(firstPipelineSpan?.data?.['ai.response.text']).toBe('First span here!'); + expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!'); expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10); expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */ // Second AI call - explicitly enabled telemetry const secondPipelineSpan = aiPipelineSpans[0]; expect(secondPipelineSpan?.data?.['ai.prompt']).toContain('Where is the second span?'); - expect(secondPipelineSpan?.data?.['ai.response.text']).toContain('Second span here!'); + expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!'); // Third AI call - with tool calls /* const thirdPipelineSpan = aiPipelineSpans[2]; @@ -51,7 +51,7 @@ test('should create AI spans with correct attributes', async ({ page }) => { expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */ // Tool call span - /* const toolSpan = toolCallSpans[0]; + /* const toolSpan = toolCallSpans[0]; expect(toolSpan?.data?.['ai.toolCall.name']).toBe('getWeather'); expect(toolSpan?.data?.['ai.toolCall.id']).toBe('call-1'); expect(toolSpan?.data?.['ai.toolCall.args']).toContain('San Francisco'); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs index b798e21228f5..d69f7dca5feb 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs @@ -7,5 +7,5 @@ Sentry.init({ tracesSampleRate: 1.0, sendDefaultPii: true, transport: loggingTransport, - integrations: [Sentry.vercelAIIntegration()], + integrations: [Sentry.vercelAIIntegration({ force: true })], }); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs index 5e898ee1949d..e4cd7b9cabd7 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs @@ -6,5 +6,5 @@ Sentry.init({ release: '1.0', tracesSampleRate: 1.0, transport: loggingTransport, - integrations: [Sentry.vercelAIIntegration()], + integrations: [Sentry.vercelAIIntegration({ force: true })], }); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index fdeec051389f..946e2067212b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -73,7 +73,7 @@ describe('Vercel AI integration', () => { 'ai.pipeline.name': 'generateText', 'ai.prompt': '{"prompt":"Where is the second span?"}', 'ai.response.finishReason': 'stop', - 'ai.response.text': expect.any(String), + 'gen_ai.response.text': expect.any(String), 'ai.settings.maxRetries': 2, 'ai.settings.maxSteps': 1, 'ai.streaming': false, @@ -108,10 +108,10 @@ describe('Vercel AI integration', () => { 'ai.response.finishReason': 'stop', 'ai.response.model': 'mock-model-id', 'ai.response.id': expect.any(String), - 'ai.response.text': expect.any(String), + 'gen_ai.response.text': expect.any(String), 'ai.response.timestamp': expect.any(String), 'ai.prompt.format': expect.any(String), - 'ai.prompt.messages': expect.any(String), + 'gen_ai.request.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -210,7 +210,7 @@ describe('Vercel AI integration', () => { 'ai.pipeline.name': 'generateText', 'ai.prompt': '{"prompt":"Where is the first span?"}', 'ai.response.finishReason': 'stop', - 'ai.response.text': 'First span here!', + 'gen_ai.response.text': 'First span here!', 'ai.settings.maxRetries': 2, 'ai.settings.maxSteps': 1, 'ai.streaming': false, @@ -236,11 +236,11 @@ describe('Vercel AI integration', () => { 'ai.operationId': 'ai.generateText.doGenerate', 'ai.pipeline.name': 'generateText.doGenerate', 'ai.prompt.format': 'prompt', - 'ai.prompt.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', + 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', 'ai.response.finishReason': 'stop', 'ai.response.id': expect.any(String), 'ai.response.model': 'mock-model-id', - 'ai.response.text': 'First span here!', + 'gen_ai.response.text': 'First span here!', 'ai.response.timestamp': expect.any(String), 'ai.settings.maxRetries': 2, 'ai.streaming': false, @@ -270,7 +270,7 @@ describe('Vercel AI integration', () => { 'ai.pipeline.name': 'generateText', 'ai.prompt': '{"prompt":"Where is the second span?"}', 'ai.response.finishReason': 'stop', - 'ai.response.text': expect.any(String), + 'gen_ai.response.text': expect.any(String), 'ai.settings.maxRetries': 2, 'ai.settings.maxSteps': 1, 'ai.streaming': false, @@ -305,10 +305,10 @@ describe('Vercel AI integration', () => { 'ai.response.finishReason': 'stop', 'ai.response.model': 'mock-model-id', 'ai.response.id': expect.any(String), - 'ai.response.text': expect.any(String), + 'gen_ai.response.text': expect.any(String), 'ai.response.timestamp': expect.any(String), 'ai.prompt.format': expect.any(String), - 'ai.prompt.messages': expect.any(String), + 'gen_ai.request.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -330,8 +330,8 @@ describe('Vercel AI integration', () => { 'ai.pipeline.name': 'generateText', 'ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', 'ai.response.finishReason': 'tool-calls', - 'ai.response.text': 'Tool call completed!', - 'ai.response.toolCalls': expect.any(String), + 'gen_ai.response.text': 'Tool call completed!', + 'gen_ai.response.tool_calls': expect.any(String), 'ai.settings.maxRetries': 2, 'ai.settings.maxSteps': 1, 'ai.streaming': false, @@ -357,15 +357,15 @@ describe('Vercel AI integration', () => { 'ai.operationId': 'ai.generateText.doGenerate', 'ai.pipeline.name': 'generateText.doGenerate', 'ai.prompt.format': expect.any(String), - 'ai.prompt.messages': expect.any(String), + 'gen_ai.request.messages': expect.any(String), 'ai.prompt.toolChoice': expect.any(String), - 'ai.prompt.tools': expect.any(Array), + 'gen_ai.request.available_tools': expect.any(Array), 'ai.response.finishReason': 'tool-calls', 'ai.response.id': expect.any(String), 'ai.response.model': 'mock-model-id', - 'ai.response.text': 'Tool call completed!', + 'gen_ai.response.text': 'Tool call completed!', 'ai.response.timestamp': expect.any(String), - 'ai.response.toolCalls': expect.any(String), + 'gen_ai.response.tool_calls': expect.any(String), 'ai.settings.maxRetries': 2, 'ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index 6a9d7bdb6d53..5c7a0ed5d959 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -9,6 +9,10 @@ import { AI_MODEL_ID_ATTRIBUTE, AI_MODEL_PROVIDER_ATTRIBUTE, AI_PROMPT_ATTRIBUTE, + AI_PROMPT_MESSAGES_ATTRIBUTE, + AI_PROMPT_TOOLS_ATTRIBUTE, + AI_RESPONSE_TEXT_ATTRIBUTE, + AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, AI_TOOL_CALL_ID_ATTRIBUTE, AI_TOOL_CALL_NAME_ATTRIBUTE, @@ -193,6 +197,24 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { attributes['gen_ai.usage.total_tokens'] = attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; } + + // Rename AI SDK attributes to standardized gen_ai attributes + if (attributes[AI_PROMPT_MESSAGES_ATTRIBUTE] != undefined) { + attributes['gen_ai.request.messages'] = attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]; + delete attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]; + } + if (attributes[AI_RESPONSE_TEXT_ATTRIBUTE] != undefined) { + attributes['gen_ai.response.text'] = attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; + delete attributes[AI_RESPONSE_TEXT_ATTRIBUTE]; + } + if (attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] != undefined) { + attributes['gen_ai.response.tool_calls'] = attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; + delete attributes[AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]; + } + if (attributes[AI_PROMPT_TOOLS_ATTRIBUTE] != undefined) { + attributes['gen_ai.request.available_tools'] = attributes[AI_PROMPT_TOOLS_ATTRIBUTE]; + delete attributes[AI_PROMPT_TOOLS_ATTRIBUTE]; + } } }