diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts index fb520e5e09b6..3241adfc161d 100644 --- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts @@ -1,7 +1,6 @@ import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core'; import { afterAll, describe, expect } from 'vitest'; import { - ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE, @@ -84,7 +83,6 @@ describe('Anthropic integration', () => { // Fourth span - models.retrieve expect.objectContaining({ data: expect.objectContaining({ - [ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2024-05-08T05:20:00.000Z', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models', [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', @@ -212,7 +210,6 @@ describe('Anthropic integration', () => { // Fourth - models.retrieve with PII expect.objectContaining({ data: expect.objectContaining({ - [ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2024-05-08T05:20:00.000Z', [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models', [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307', [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts index 5c61ec320c57..c66f5cb65c6b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts @@ -17,11 +17,6 @@ import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, - OPENAI_RESPONSE_ID_ATTRIBUTE, - OPENAI_RESPONSE_MODEL_ATTRIBUTE, - OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, - OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, } from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; @@ -99,11 +94,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -127,11 +117,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:45.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -153,11 +138,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:32:00.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -181,11 +161,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -217,11 +192,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -248,11 +218,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:45.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 25, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 15, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -277,11 +242,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:32:00.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -308,11 +268,6 @@ describe('OpenAI Tool Calls integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 12, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 8, }, description: 'chat gpt-4', op: 'gen_ai.chat', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index 9e7a1722db11..ae7715e9852c 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -21,11 +21,6 @@ import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, - OPENAI_RESPONSE_ID_ATTRIBUTE, - OPENAI_RESPONSE_MODEL_ATTRIBUTE, - OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, - OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, } from '../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; @@ -52,11 +47,6 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -77,11 +67,6 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -118,12 +103,7 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -145,12 +125,7 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -199,11 +174,6 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -227,11 +197,6 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -276,12 +241,7 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }), description: 'chat gpt-4', op: 'gen_ai.chat', @@ -307,12 +267,7 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }), description: 'chat gpt-4', op: 'gen_ai.chat', @@ -406,8 +361,6 @@ describe('OpenAI integration', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -448,8 +401,6 @@ describe('OpenAI integration', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -483,8 +434,6 @@ describe('OpenAI integration', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -542,11 +491,6 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', @@ -589,11 +533,6 @@ describe('OpenAI integration', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 626e53248e66..b282282305eb 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -20,11 +20,6 @@ import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, - OPENAI_RESPONSE_ID_ATTRIBUTE, - OPENAI_RESPONSE_MODEL_ATTRIBUTE, - OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, - OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, } from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; @@ -51,11 +46,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -76,11 +66,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -117,12 +102,7 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -144,12 +124,7 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }, description: 'chat gpt-4', op: 'gen_ai.chat', @@ -196,11 +171,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -224,11 +194,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:30.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 8, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 5, }, description: 'chat gpt-3.5-turbo', op: 'gen_ai.chat', @@ -271,12 +236,7 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:40.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 18, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 12, }), description: 'chat gpt-4', op: 'gen_ai.chat', @@ -302,12 +262,7 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4', [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:50.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 10, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 6, }), description: 'chat gpt-4', op: 'gen_ai.chat', @@ -375,8 +330,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -417,8 +370,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -452,8 +403,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10, - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small', - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, description: 'embeddings text-embedding-3-small', op: 'gen_ai.embeddings', @@ -596,11 +545,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', @@ -654,11 +598,6 @@ describe('OpenAI integration (V6)', () => { [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10, [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15, [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25, - [OPENAI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123', - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo', - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: '2023-03-01T06:31:28.000Z', - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: 15, - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: 10, }, op: 'gen_ai.chat', origin: 'auto.ai.openai', diff --git a/packages/core/src/tracing/ai/gen-ai-attributes.ts b/packages/core/src/tracing/ai/gen-ai-attributes.ts index 5c740142b4f0..f5d3a206116e 100644 --- a/packages/core/src/tracing/ai/gen-ai-attributes.ts +++ b/packages/core/src/tracing/ai/gen-ai-attributes.ts @@ -282,41 +282,3 @@ export const GEN_AI_TOOL_OUTPUT_ATTRIBUTE = 'gen_ai.tool.output'; * @see https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-tool-description */ export const GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE = 'gen_ai.tool.description'; - -// ============================================================================= -// OPENAI-SPECIFIC ATTRIBUTES -// ============================================================================= - -/** - * The response ID from OpenAI - */ -export const OPENAI_RESPONSE_ID_ATTRIBUTE = 'openai.response.id'; - -/** - * The response model from OpenAI - */ -export const OPENAI_RESPONSE_MODEL_ATTRIBUTE = 'openai.response.model'; - -/** - * The response timestamp from OpenAI (ISO string) - */ -export const OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE = 'openai.response.timestamp'; - -/** - * The number of completion tokens used - */ -export const OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE = 'openai.usage.completion_tokens'; - -/** - * The number of prompt tokens used - */ -export const OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'openai.usage.prompt_tokens'; - -// ============================================================================= -// ANTHROPIC AI OPERATIONS -// ============================================================================= - -/** - * The response timestamp from Anthropic AI (ISO string) - */ -export const ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE = 'anthropic.response.timestamp'; diff --git a/packages/core/src/tracing/ai/utils.ts b/packages/core/src/tracing/ai/utils.ts index d9628e3c75e2..865f6f9aaf31 100644 --- a/packages/core/src/tracing/ai/utils.ts +++ b/packages/core/src/tracing/ai/utils.ts @@ -3,9 +3,24 @@ */ import { captureException } from '../../exports'; import { getClient } from '../../currentScopes'; -import type { Span } from '../../types-hoist/span'; +import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; +import type { Span, SpanAttributeValue } from '../../types-hoist/span'; import { isThenable } from '../../utils/is'; import { + GEN_AI_CONVERSATION_ID_ATTRIBUTE, + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE, + GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE, + GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_STREAM_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_K_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, @@ -55,6 +70,152 @@ export function buildMethodPath(currentPath: string, prop: string): string { return currentPath ? `${currentPath}.${prop}` : prop; } +/** + * Extract model from params or context. + * params.model covers OpenAI/Anthropic, context.model/modelVersion covers Google GenAI chat instances. + */ +export function extractModel(params: Record | undefined, context?: unknown): string { + if (params && 'model' in params && typeof params.model === 'string') { + return params.model; + } + // Google GenAI chat instances store the model on the context object + if (context && typeof context === 'object') { + const ctx = context as Record; + if (typeof ctx.model === 'string') return ctx.model; + if (typeof ctx.modelVersion === 'string') return ctx.modelVersion; + } + return 'unknown'; +} + +/** + * Set an attribute if the key exists in the source object. + */ +function extractIfPresent( + attributes: Record, + source: Record, + key: string, + attribute: string, +): void { + if (key in source) { + attributes[attribute] = source[key] as SpanAttributeValue; + } +} + +/** + * Extract available tools from request parameters. + * Handles OpenAI (params.tools + web_search_options), Anthropic (params.tools), + * and Google GenAI (config.tools[].functionDeclarations). + */ +function extractTools(params: Record, config: Record): string | undefined { + // OpenAI: web_search_options are treated as tools + const hasWebSearchOptions = params.web_search_options && typeof params.web_search_options === 'object'; + const webSearchOptions = hasWebSearchOptions + ? [{ type: 'web_search_options', ...(params.web_search_options as Record) }] + : []; + + // Google GenAI: tools contain functionDeclarations + if ('tools' in config && Array.isArray(config.tools)) { + const hasDeclarations = config.tools.some( + (tool: unknown) => tool && typeof tool === 'object' && 'functionDeclarations' in (tool as Record), + ); + if (hasDeclarations) { + const declarations = (config.tools as Array<{ functionDeclarations?: unknown[] }>).flatMap( + tool => tool.functionDeclarations ?? [], + ); + if (declarations.length > 0) { + return JSON.stringify(declarations); + } + return undefined; + } + } + + // OpenAI / Anthropic: tools are at the top level + const tools = Array.isArray(params.tools) ? params.tools : []; + const availableTools = [...tools, ...webSearchOptions]; + + if (availableTools.length === 0) { + return undefined; + } + + return JSON.stringify(availableTools); +} + +/** + * Extract conversation ID from request parameters. + * Supports OpenAI Conversations API and previous_response_id chaining. + */ +function extractConversationId(params: Record): string | undefined { + if ('conversation' in params && typeof params.conversation === 'string') { + return params.conversation; + } + if ('previous_response_id' in params && typeof params.previous_response_id === 'string') { + return params.previous_response_id; + } + return undefined; +} + +/** + * Extract request attributes from AI method arguments. + * Shared across all AI provider integrations (OpenAI, Anthropic, Google GenAI). + */ +export function extractRequestAttributes( + system: string, + origin: string, + operationName: string, + args: unknown[], + context?: unknown, +): Record { + const params = args.length > 0 && typeof args[0] === 'object' && args[0] !== null + ? (args[0] as Record) + : undefined; + + const attributes: Record = { + [GEN_AI_SYSTEM_ATTRIBUTE]: system, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationName, + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: origin, + [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: extractModel(params, context), + }; + + if (!params) { + return attributes; + } + + // Google GenAI nests generation params under config; OpenAI/Anthropic are flat + const config = ('config' in params && typeof params.config === 'object' && params.config) + ? (params.config as Record) + : params; + + // Generation parameters — handles both snake_case (OpenAI/Anthropic) and camelCase (Google GenAI) + extractIfPresent(attributes, config, 'temperature', GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE); + extractIfPresent(attributes, config, 'top_p', GEN_AI_REQUEST_TOP_P_ATTRIBUTE); + extractIfPresent(attributes, config, 'topP', GEN_AI_REQUEST_TOP_P_ATTRIBUTE); + extractIfPresent(attributes, config, 'top_k', GEN_AI_REQUEST_TOP_K_ATTRIBUTE); + extractIfPresent(attributes, config, 'topK', GEN_AI_REQUEST_TOP_K_ATTRIBUTE); + extractIfPresent(attributes, config, 'frequency_penalty', GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE); + extractIfPresent(attributes, config, 'frequencyPenalty', GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE); + extractIfPresent(attributes, config, 'presence_penalty', GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE); + extractIfPresent(attributes, config, 'presencePenalty', GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE); + extractIfPresent(attributes, config, 'max_tokens', GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE); + extractIfPresent(attributes, config, 'maxOutputTokens', GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE); + extractIfPresent(attributes, params, 'stream', GEN_AI_REQUEST_STREAM_ATTRIBUTE); + extractIfPresent(attributes, params, 'encoding_format', GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE); + extractIfPresent(attributes, params, 'dimensions', GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE); + + // Tools + const tools = extractTools(params, config); + if (tools) { + attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = tools; + } + + // Conversation ID (OpenAI) + const conversationId = extractConversationId(params); + if (conversationId) { + attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE] = conversationId; + } + + return attributes; +} + /** * Set token usage attributes * @param span - The span to add attributes to diff --git a/packages/core/src/tracing/anthropic-ai/index.ts b/packages/core/src/tracing/anthropic-ai/index.ts index 226aa8e1d544..1430aea9645b 100644 --- a/packages/core/src/tracing/anthropic-ai/index.ts +++ b/packages/core/src/tracing/anthropic-ai/index.ts @@ -1,29 +1,19 @@ import { captureException } from '../../exports'; -import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; import { startSpan, startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; import { - ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE, - GEN_AI_OPERATION_NAME_ATTRIBUTE, GEN_AI_PROMPT_ATTRIBUTE, - GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_STREAM_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_TOP_K_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import type { InstrumentedMethodEntry } from '../ai/utils'; import { buildMethodPath, + extractRequestAttributes, resolveAIRecordingOptions, setTokenUsageAttributes, wrapPromiseWithMethods, @@ -33,42 +23,6 @@ import { instrumentAsyncIterableStream, instrumentMessageStream } from './stream import type { AnthropicAiOptions, AnthropicAiResponse, AnthropicAiStreamingEvent, ContentBlock } from './types'; import { handleResponseError, messagesFromParams, setMessagesAttribute } from './utils'; -/** - * Extract request attributes from method arguments - */ -function extractRequestAttributes(args: unknown[], methodPath: string, operationName: string): Record { - const attributes: Record = { - [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationName, - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic', - }; - - if (args.length > 0 && typeof args[0] === 'object' && args[0] !== null) { - const params = args[0] as Record; - if (params.tools && Array.isArray(params.tools)) { - attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = JSON.stringify(params.tools); - } - - attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = params.model ?? 'unknown'; - if ('temperature' in params) attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE] = params.temperature; - if ('top_p' in params) attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE] = params.top_p; - if ('stream' in params) attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] = params.stream; - if ('top_k' in params) attributes[GEN_AI_REQUEST_TOP_K_ATTRIBUTE] = params.top_k; - if ('frequency_penalty' in params) - attributes[GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE] = params.frequency_penalty; - if ('max_tokens' in params) attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE] = params.max_tokens; - } else { - if (methodPath === 'models.retrieve' || methodPath === 'models.get') { - // models.retrieve(model-id) and models.get(model-id) - attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = args[0]; - } else { - attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = 'unknown'; - } - } - - return attributes; -} - /** * Add private request attributes to spans. * This is only recorded if recordInputs is true. @@ -128,17 +82,6 @@ function addMetadataAttributes(span: Span, response: AnthropicAiResponse): void [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: response.model, }); - if ('created' in response && typeof response.created === 'number') { - span.setAttributes({ - [ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(response.created * 1000).toISOString(), - }); - } - if ('created_at' in response && typeof response.created_at === 'number') { - span.setAttributes({ - [ANTHROPIC_AI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(response.created_at * 1000).toISOString(), - }); - } - if ('usage' in response && response.usage) { setTokenUsageAttributes( span, @@ -266,7 +209,13 @@ function instrumentMethod( return new Proxy(originalMethod, { apply(target, thisArg, args: T): R | Promise { const operationName = instrumentedMethod.operation; - const requestAttributes = extractRequestAttributes(args, methodPath, operationName); + const requestAttributes = extractRequestAttributes('anthropic', 'auto.ai.anthropic', operationName, args); + + // Anthropic models.retrieve/models.get take model ID as positional string arg + if ((methodPath === 'models.retrieve' || methodPath === 'models.get') && typeof args[0] === 'string') { + requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = args[0]; + } + const model = requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] ?? 'unknown'; const params = typeof args[0] === 'object' ? (args[0] as Record) : undefined; diff --git a/packages/core/src/tracing/google-genai/index.ts b/packages/core/src/tracing/google-genai/index.ts index a6ce47eedbd8..bd9a6c285212 100644 --- a/packages/core/src/tracing/google-genai/index.ts +++ b/packages/core/src/tracing/google-genai/index.ts @@ -1,5 +1,4 @@ import { captureException } from '../../exports'; -import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; import { startSpan, startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; @@ -7,19 +6,10 @@ import { handleCallbackErrors } from '../../utils/handleCallbackErrors'; import { GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, - GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, - GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_TOP_K_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, @@ -27,105 +17,13 @@ import { } from '../ai/gen-ai-attributes'; import { truncateGenAiMessages } from '../ai/messageTruncation'; import type { InstrumentedMethodEntry } from '../ai/utils'; -import { buildMethodPath, extractSystemInstructions, resolveAIRecordingOptions } from '../ai/utils'; +import { buildMethodPath, extractRequestAttributes, extractSystemInstructions, resolveAIRecordingOptions } from '../ai/utils'; import { CHAT_PATH, CHATS_CREATE_METHOD, GOOGLE_GENAI_METHOD_REGISTRY, GOOGLE_GENAI_SYSTEM_NAME } from './constants'; import { instrumentStream } from './streaming'; import type { Candidate, ContentPart, GoogleGenAIOptions, GoogleGenAIResponse } from './types'; import type { ContentListUnion, ContentUnion, Message, PartListUnion } from './utils'; import { contentUnionToMessages } from './utils'; -/** - * Extract model from parameters or chat context object - * For chat instances, the model is available on the chat object as 'model' (older versions) or 'modelVersion' (newer versions) - */ -export function extractModel(params: Record, context?: unknown): string { - if ('model' in params && typeof params.model === 'string') { - return params.model; - } - - // Try to get model from chat context object (chat instance has model property) - if (context && typeof context === 'object') { - const contextObj = context as Record; - - // Check for 'model' property (older versions, and streaming) - if ('model' in contextObj && typeof contextObj.model === 'string') { - return contextObj.model; - } - - // Check for 'modelVersion' property (newer versions) - if ('modelVersion' in contextObj && typeof contextObj.modelVersion === 'string') { - return contextObj.modelVersion; - } - } - - return 'unknown'; -} - -/** - * Extract generation config parameters - */ -function extractConfigAttributes(config: Record): Record { - const attributes: Record = {}; - - if ('temperature' in config && typeof config.temperature === 'number') { - attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE] = config.temperature; - } - if ('topP' in config && typeof config.topP === 'number') { - attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE] = config.topP; - } - if ('topK' in config && typeof config.topK === 'number') { - attributes[GEN_AI_REQUEST_TOP_K_ATTRIBUTE] = config.topK; - } - if ('maxOutputTokens' in config && typeof config.maxOutputTokens === 'number') { - attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE] = config.maxOutputTokens; - } - if ('frequencyPenalty' in config && typeof config.frequencyPenalty === 'number') { - attributes[GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE] = config.frequencyPenalty; - } - if ('presencePenalty' in config && typeof config.presencePenalty === 'number') { - attributes[GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE] = config.presencePenalty; - } - - return attributes; -} - -/** - * Extract request attributes from method arguments - * Builds the base attributes for span creation including system info, model, and config - */ -function extractRequestAttributes( - operationName: string, - params?: Record, - context?: unknown, -): Record { - const attributes: Record = { - [GEN_AI_SYSTEM_ATTRIBUTE]: GOOGLE_GENAI_SYSTEM_NAME, - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationName, - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', - }; - - if (params) { - attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = extractModel(params, context); - - // Extract generation config parameters - if ('config' in params && typeof params.config === 'object' && params.config) { - const config = params.config as Record; - Object.assign(attributes, extractConfigAttributes(config)); - - // Extract available tools from config - if ('tools' in config && Array.isArray(config.tools)) { - const functionDeclarations = config.tools.flatMap( - (tool: { functionDeclarations: unknown[] }) => tool.functionDeclarations, - ); - attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = JSON.stringify(functionDeclarations); - } - } - } else { - attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = extractModel({}, context); - } - - return attributes; -} /** * Add private request attributes to spans. @@ -257,7 +155,7 @@ function instrumentMethod( apply(target, _, args: T): R | Promise { const operationName = instrumentedMethod.operation; const params = args[0] as Record | undefined; - const requestAttributes = extractRequestAttributes(operationName, params, context); + const requestAttributes = extractRequestAttributes(GOOGLE_GENAI_SYSTEM_NAME, 'auto.ai.google_genai', operationName, args, context); const model = requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] ?? 'unknown'; // Check if this is a streaming method diff --git a/packages/core/src/tracing/openai/index.ts b/packages/core/src/tracing/openai/index.ts index 0f83bf2cd3eb..6fc3edacaf27 100644 --- a/packages/core/src/tracing/openai/index.ts +++ b/packages/core/src/tracing/openai/index.ts @@ -1,24 +1,18 @@ -import { DEBUG_BUILD } from '../../debug-build'; import { captureException } from '../../exports'; -import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; import { startSpan, startSpanManual } from '../../tracing/trace'; import type { Span, SpanAttributeValue } from '../../types-hoist/span'; -import { debug } from '../../utils/debug-logger'; import { GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ATTRIBUTE, GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, - GEN_AI_OPERATION_NAME_ATTRIBUTE, - GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_RESPONSE_TEXT_ATTRIBUTE, - GEN_AI_SYSTEM_ATTRIBUTE, GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import type { InstrumentedMethodEntry } from '../ai/utils'; import { buildMethodPath, + extractRequestAttributes, extractSystemInstructions, getTruncatedJsonString, resolveAIRecordingOptions, @@ -26,94 +20,8 @@ import { } from '../ai/utils'; import { OPENAI_METHOD_REGISTRY } from './constants'; import { instrumentStream } from './streaming'; -import type { ChatCompletionChunk, OpenAiOptions, OpenAiResponse, OpenAIStream, ResponseStreamingEvent } from './types'; -import { - addChatCompletionAttributes, - addConversationAttributes, - addEmbeddingsAttributes, - addResponsesApiAttributes, - extractRequestParameters, - isChatCompletionResponse, - isConversationResponse, - isEmbeddingsResponse, - isResponsesApiResponse, -} from './utils'; - -/** - * Extract available tools from request parameters - */ -function extractAvailableTools(params: Record): string | undefined { - const tools = Array.isArray(params.tools) ? params.tools : []; - const hasWebSearchOptions = params.web_search_options && typeof params.web_search_options === 'object'; - const webSearchOptions = hasWebSearchOptions - ? [{ type: 'web_search_options', ...(params.web_search_options as Record) }] - : []; - - const availableTools = [...tools, ...webSearchOptions]; - if (availableTools.length === 0) { - return undefined; - } - - try { - return JSON.stringify(availableTools); - } catch (error) { - DEBUG_BUILD && debug.error('Failed to serialize OpenAI tools:', error); - return undefined; - } -} - -/** - * Extract request attributes from method arguments - */ -function extractRequestAttributes(args: unknown[], operationName: string): Record { - const attributes: Record = { - [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai', - [GEN_AI_OPERATION_NAME_ATTRIBUTE]: operationName, - [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai', - }; - - if (args.length > 0 && typeof args[0] === 'object' && args[0] !== null) { - const params = args[0] as Record; - - const availableTools = extractAvailableTools(params); - if (availableTools) { - attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] = availableTools; - } - - Object.assign(attributes, extractRequestParameters(params)); - } else { - attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = 'unknown'; - } - - return attributes; -} - -/** - * Add response attributes to spans - * This supports Chat Completion, Responses API, Embeddings, and Conversations API responses - */ -function addResponseAttributes(span: Span, result: unknown, recordOutputs?: boolean): void { - if (!result || typeof result !== 'object') return; - - const response = result as OpenAiResponse; - - if (isChatCompletionResponse(response)) { - addChatCompletionAttributes(span, response, recordOutputs); - if (recordOutputs && response.choices?.length) { - const responseTexts = response.choices.map(choice => choice.message?.content || ''); - span.setAttributes({ [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: JSON.stringify(responseTexts) }); - } - } else if (isResponsesApiResponse(response)) { - addResponsesApiAttributes(span, response, recordOutputs); - if (recordOutputs && response.output_text) { - span.setAttributes({ [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: response.output_text }); - } - } else if (isEmbeddingsResponse(response)) { - addEmbeddingsAttributes(span, response); - } else if (isConversationResponse(response)) { - addConversationAttributes(span, response); - } -} +import type { ChatCompletionChunk, OpenAiOptions, OpenAIStream, ResponseStreamingEvent } from './types'; +import { addResponseAttributes } from './utils'; // Extract and record AI request inputs, if present. This is intentionally separate from response attributes. function addRequestAttributes(span: Span, params: Record, operationName: string): void { @@ -181,7 +89,7 @@ function instrumentMethod( ): (...args: T) => Promise { return function instrumentedCall(...args: T): Promise { const operationName = instrumentedMethod.operation; - const requestAttributes = extractRequestAttributes(args, operationName); + const requestAttributes = extractRequestAttributes('openai', 'auto.ai.openai', operationName, args); const model = (requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] as string) || 'unknown'; const params = args[0] as Record | undefined; diff --git a/packages/core/src/tracing/openai/streaming.ts b/packages/core/src/tracing/openai/streaming.ts index 3a5634df34dd..f9f466228857 100644 --- a/packages/core/src/tracing/openai/streaming.ts +++ b/packages/core/src/tracing/openai/streaming.ts @@ -3,9 +3,14 @@ import { SPAN_STATUS_ERROR } from '../../tracing'; import type { Span } from '../../types-hoist/span'; import { GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, GEN_AI_RESPONSE_STREAMING_ATTRIBUTE, GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { RESPONSE_EVENT_TYPES } from './constants'; import type { @@ -15,12 +20,7 @@ import type { ResponseFunctionCall, ResponseStreamingEvent, } from './types'; -import { - isChatCompletionChunk, - isResponsesApiStreamEvent, - setCommonResponseAttributes, - setTokenUsageAttributes, -} from './utils'; +import { isChatCompletionChunk, isResponsesApiStreamEvent } from './utils'; /** * State object used to accumulate information from a stream of OpenAI events/chunks. @@ -36,8 +36,6 @@ interface StreamingState { responseId: string; /** The model name. */ responseModel: string; - /** The timestamp of the response. */ - responseTimestamp: number; /** Number of prompt/input tokens used. */ promptTokens: number | undefined; /** Number of completion/output tokens used. */ @@ -99,7 +97,6 @@ function processChatCompletionToolCalls(toolCalls: ChatCompletionToolCall[], sta function processChatCompletionChunk(chunk: ChatCompletionChunk, state: StreamingState, recordOutputs: boolean): void { state.responseId = chunk.id ?? state.responseId; state.responseModel = chunk.model ?? state.responseModel; - state.responseTimestamp = chunk.created ?? state.responseTimestamp; if (chunk.usage) { // For stream responses, the input tokens remain constant across all events in the stream. @@ -183,7 +180,6 @@ function processResponsesApiEvent( const { response } = event as { response: OpenAIResponseObject }; state.responseId = response.id ?? state.responseId; state.responseModel = response.model ?? state.responseModel; - state.responseTimestamp = response.created_at ?? state.responseTimestamp; if (response.usage) { // For stream responses, the input tokens remain constant across all events in the stream. @@ -227,7 +223,6 @@ export async function* instrumentStream( finishReasons: [], responseId: '', responseModel: '', - responseTimestamp: 0, promptTokens: undefined, completionTokens: undefined, totalTokens: undefined, @@ -245,35 +240,31 @@ export async function* instrumentStream( yield event; } } finally { - setCommonResponseAttributes(span, state.responseId, state.responseModel, state.responseTimestamp); - setTokenUsageAttributes(span, state.promptTokens, state.completionTokens, state.totalTokens); - - span.setAttributes({ + const attrs: Record = { + [GEN_AI_RESPONSE_ID_ATTRIBUTE]: state.responseId, + [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: state.responseModel, [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true, - }); + }; + + if (state.promptTokens !== undefined) attrs[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = state.promptTokens; + if (state.completionTokens !== undefined) attrs[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = state.completionTokens; + if (state.totalTokens !== undefined) attrs[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = state.totalTokens; if (state.finishReasons.length) { - span.setAttributes({ - [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify(state.finishReasons), - }); + attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE] = JSON.stringify(state.finishReasons); } if (recordOutputs && state.responseTexts.length) { - span.setAttributes({ - [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: state.responseTexts.join(''), - }); + attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE] = state.responseTexts.join(''); } - // Set tool calls attribute if any were accumulated const chatCompletionToolCallsArray = Object.values(state.chatCompletionToolCalls); const allToolCalls = [...chatCompletionToolCallsArray, ...state.responsesApiToolCalls]; - if (allToolCalls.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: JSON.stringify(allToolCalls), - }); + attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] = JSON.stringify(allToolCalls); } + span.setAttributes(attrs); span.end(); } } diff --git a/packages/core/src/tracing/openai/utils.ts b/packages/core/src/tracing/openai/utils.ts index e7ad110e00bb..287087f8ab24 100644 --- a/packages/core/src/tracing/openai/utils.ts +++ b/packages/core/src/tracing/openai/utils.ts @@ -1,87 +1,16 @@ -import type { Span } from '../../types-hoist/span'; +import type { Span, SpanAttributeValue } from '../../types-hoist/span'; import { GEN_AI_CONVERSATION_ID_ATTRIBUTE, - GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE, - GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE, - GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_MODEL_ATTRIBUTE, - GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, - GEN_AI_REQUEST_STREAM_ATTRIBUTE, - GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, - GEN_AI_REQUEST_TOP_P_ATTRIBUTE, GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, GEN_AI_RESPONSE_ID_ATTRIBUTE, GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, - OPENAI_RESPONSE_ID_ATTRIBUTE, - OPENAI_RESPONSE_MODEL_ATTRIBUTE, - OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE, - OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, - OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE, } from '../ai/gen-ai-attributes'; -import type { - ChatCompletionChunk, - OpenAiChatCompletionObject, - OpenAIConversationObject, - OpenAICreateEmbeddingsObject, - OpenAIResponseObject, - ResponseStreamingEvent, -} from './types'; - -/** - * Check if response is a Chat Completion object - */ -export function isChatCompletionResponse(response: unknown): response is OpenAiChatCompletionObject { - return ( - response !== null && - typeof response === 'object' && - 'object' in response && - (response as Record).object === 'chat.completion' - ); -} - -/** - * Check if response is a Responses API object - */ -export function isResponsesApiResponse(response: unknown): response is OpenAIResponseObject { - return ( - response !== null && - typeof response === 'object' && - 'object' in response && - (response as Record).object === 'response' - ); -} - -/** - * Check if response is an Embeddings API object - */ -export function isEmbeddingsResponse(response: unknown): response is OpenAICreateEmbeddingsObject { - if (response === null || typeof response !== 'object' || !('object' in response)) { - return false; - } - const responseObject = response as Record; - return ( - responseObject.object === 'list' && - typeof responseObject.model === 'string' && - responseObject.model.toLowerCase().includes('embedding') - ); -} - -/** - * Check if response is a Conversations API object - * @see https://platform.openai.com/docs/api-reference/conversations - */ -export function isConversationResponse(response: unknown): response is OpenAIConversationObject { - return ( - response !== null && - typeof response === 'object' && - 'object' in response && - (response as Record).object === 'conversation' - ); -} +import type { ChatCompletionChunk, ResponseStreamingEvent } from './types'; /** * Check if streaming event is from the Responses API @@ -109,213 +38,107 @@ export function isChatCompletionChunk(event: unknown): event is ChatCompletionCh } /** - * Add attributes for Chat Completion responses + * Add response attributes to a span using duck-typing. + * Works for Chat Completions, Responses API, Embeddings, and Conversations API responses. */ -export function addChatCompletionAttributes( - span: Span, - response: OpenAiChatCompletionObject, - recordOutputs?: boolean, -): void { - setCommonResponseAttributes(span, response.id, response.model, response.created); - if (response.usage) { - setTokenUsageAttributes( - span, - response.usage.prompt_tokens, - response.usage.completion_tokens, - response.usage.total_tokens, - ); +export function addResponseAttributes(span: Span, result: unknown, recordOutputs?: boolean): void { + if (!result || typeof result !== 'object') return; + + const response = result as Record; + const attrs: Record = {}; + + // Response ID + if (typeof response.id === 'string') { + attrs[GEN_AI_RESPONSE_ID_ATTRIBUTE] = response.id; } + + // Response model + if (typeof response.model === 'string') { + attrs[GEN_AI_RESPONSE_MODEL_ATTRIBUTE] = response.model; + } + + // Conversation ID (conversation objects use id as conversation link) + if (response.object === 'conversation' && typeof response.id === 'string') { + attrs[GEN_AI_CONVERSATION_ID_ATTRIBUTE] = response.id; + } + + // Token usage — supports both naming conventions (chat: prompt_tokens/completion_tokens, responses: input_tokens/output_tokens) + if (response.usage && typeof response.usage === 'object') { + const usage = response.usage as Record; + + const inputTokens = usage.prompt_tokens ?? usage.input_tokens; + if (typeof inputTokens === 'number') { + attrs[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = inputTokens; + } + + const outputTokens = usage.completion_tokens ?? usage.output_tokens; + if (typeof outputTokens === 'number') { + attrs[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = outputTokens; + } + + if (typeof usage.total_tokens === 'number') { + attrs[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = usage.total_tokens; + } + } + + // Finish reasons from choices (chat completions) if (Array.isArray(response.choices)) { - const finishReasons = response.choices + const choices = response.choices as Array>; + const finishReasons = choices .map(choice => choice.finish_reason) - .filter((reason): reason is string => reason !== null); + .filter((reason): reason is string => typeof reason === 'string'); if (finishReasons.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify(finishReasons), - }); + attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE] = JSON.stringify(finishReasons); } - // Extract tool calls from all choices (only if recordOutputs is true) if (recordOutputs) { - const toolCalls = response.choices - .map(choice => choice.message?.tool_calls) + // Response text from choices + const responseTexts = choices.map(choice => { + const message = choice.message as Record | undefined; + return (message?.content as string) || ''; + }); + attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE] = JSON.stringify(responseTexts); + + // Tool calls from choices + const toolCalls = choices + .map(choice => { + const message = choice.message as Record | undefined; + return message?.tool_calls; + }) .filter(calls => Array.isArray(calls) && calls.length > 0) .flat(); if (toolCalls.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: JSON.stringify(toolCalls), - }); + attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] = JSON.stringify(toolCalls); } } } -} -/** - * Add attributes for Responses API responses - */ -export function addResponsesApiAttributes(span: Span, response: OpenAIResponseObject, recordOutputs?: boolean): void { - setCommonResponseAttributes(span, response.id, response.model, response.created_at); - if (response.status) { - span.setAttributes({ - [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify([response.status]), - }); - } - if (response.usage) { - setTokenUsageAttributes( - span, - response.usage.input_tokens, - response.usage.output_tokens, - response.usage.total_tokens, - ); + // Finish reason from status (responses API) + if (typeof response.status === 'string') { + // Only set if not already set from choices + if (!attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]) { + attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE] = JSON.stringify([response.status]); + } } - // Extract function calls from output (only if recordOutputs is true) if (recordOutputs) { - const responseWithOutput = response as OpenAIResponseObject & { output?: unknown[] }; - if (Array.isArray(responseWithOutput.output) && responseWithOutput.output.length > 0) { - // Filter for function_call type objects in the output array - const functionCalls = responseWithOutput.output.filter( - (item): unknown => - // oxlint-disable-next-line typescript/prefer-optional-chain - typeof item === 'object' && item !== null && (item as Record).type === 'function_call', - ); + // Response text from output_text (responses API) + if (typeof response.output_text === 'string' && !attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]) { + attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE] = response.output_text; + } + // Tool calls from output array (responses API) + if (Array.isArray(response.output) && response.output.length > 0 && !attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]) { + const functionCalls = (response.output as Array>).filter( + item => item?.type === 'function_call', + ); if (functionCalls.length > 0) { - span.setAttributes({ - [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: JSON.stringify(functionCalls), - }); + attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] = JSON.stringify(functionCalls); } } } -} - -/** - * Add attributes for Embeddings API responses - */ -export function addEmbeddingsAttributes(span: Span, response: OpenAICreateEmbeddingsObject): void { - span.setAttributes({ - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: response.model, - [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: response.model, - }); - - if (response.usage) { - setTokenUsageAttributes(span, response.usage.prompt_tokens, undefined, response.usage.total_tokens); - } -} - -/** - * Add attributes for Conversations API responses - * @see https://platform.openai.com/docs/api-reference/conversations - */ -export function addConversationAttributes(span: Span, response: OpenAIConversationObject): void { - const { id, created_at } = response; - span.setAttributes({ - [OPENAI_RESPONSE_ID_ATTRIBUTE]: id, - [GEN_AI_RESPONSE_ID_ATTRIBUTE]: id, - // The conversation id is used to link messages across API calls - [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: id, - }); - - if (created_at) { - span.setAttributes({ - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(created_at * 1000).toISOString(), - }); - } + span.setAttributes(attrs); } -/** - * Set token usage attributes - * @param span - The span to add attributes to - * @param promptTokens - The number of prompt tokens - * @param completionTokens - The number of completion tokens - * @param totalTokens - The number of total tokens - */ -export function setTokenUsageAttributes( - span: Span, - promptTokens?: number, - completionTokens?: number, - totalTokens?: number, -): void { - if (promptTokens !== undefined) { - span.setAttributes({ - [OPENAI_USAGE_PROMPT_TOKENS_ATTRIBUTE]: promptTokens, - [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: promptTokens, - }); - } - if (completionTokens !== undefined) { - span.setAttributes({ - [OPENAI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]: completionTokens, - [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: completionTokens, - }); - } - if (totalTokens !== undefined) { - span.setAttributes({ - [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: totalTokens, - }); - } -} - -/** - * Set common response attributes - * @param span - The span to add attributes to - * @param id - The response id - * @param model - The response model - * @param timestamp - The response timestamp - */ -export function setCommonResponseAttributes(span: Span, id: string, model: string, timestamp: number): void { - span.setAttributes({ - [OPENAI_RESPONSE_ID_ATTRIBUTE]: id, - [GEN_AI_RESPONSE_ID_ATTRIBUTE]: id, - }); - span.setAttributes({ - [OPENAI_RESPONSE_MODEL_ATTRIBUTE]: model, - [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: model, - }); - span.setAttributes({ - [OPENAI_RESPONSE_TIMESTAMP_ATTRIBUTE]: new Date(timestamp * 1000).toISOString(), - }); -} - -/** - * Extract conversation ID from request parameters - * Supports both Conversations API and previous_response_id chaining - * @see https://platform.openai.com/docs/guides/conversation-state - */ -function extractConversationId(params: Record): string | undefined { - // Conversations API: conversation parameter (e.g., "conv_...") - if ('conversation' in params && typeof params.conversation === 'string') { - return params.conversation; - } - // Responses chaining: previous_response_id links to parent response - if ('previous_response_id' in params && typeof params.previous_response_id === 'string') { - return params.previous_response_id; - } - return undefined; -} - -/** - * Extract request parameters including model settings and conversation context - */ -export function extractRequestParameters(params: Record): Record { - const attributes: Record = { - [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: params.model ?? 'unknown', - }; - - if ('temperature' in params) attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE] = params.temperature; - if ('top_p' in params) attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE] = params.top_p; - if ('frequency_penalty' in params) attributes[GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE] = params.frequency_penalty; - if ('presence_penalty' in params) attributes[GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE] = params.presence_penalty; - if ('stream' in params) attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] = params.stream; - if ('encoding_format' in params) attributes[GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE] = params.encoding_format; - if ('dimensions' in params) attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE] = params.dimensions; - - // Capture conversation ID for linking messages across API calls - const conversationId = extractConversationId(params); - if (conversationId) { - attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE] = conversationId; - } - - return attributes; -} diff --git a/packages/core/test/lib/utils/openai-utils.test.ts b/packages/core/test/lib/utils/openai-utils.test.ts index 3f8fd0045f2e..5af1ac635264 100644 --- a/packages/core/test/lib/utils/openai-utils.test.ts +++ b/packages/core/test/lib/utils/openai-utils.test.ts @@ -1,12 +1,6 @@ import { describe, expect, it } from 'vitest'; import { buildMethodPath } from '../../../src/tracing/ai/utils'; -import { - isChatCompletionChunk, - isChatCompletionResponse, - isConversationResponse, - isResponsesApiResponse, - isResponsesApiStreamEvent, -} from '../../../src/tracing/openai/utils'; +import { isChatCompletionChunk, isResponsesApiStreamEvent } from '../../../src/tracing/openai/utils'; describe('openai-utils', () => { describe('buildMethodPath', () => { @@ -17,50 +11,6 @@ describe('openai-utils', () => { }); }); - describe('isChatCompletionResponse', () => { - it('should return true for valid chat completion responses', () => { - const validResponse = { - object: 'chat.completion', - id: 'chatcmpl-123', - model: 'gpt-4', - choices: [], - }; - expect(isChatCompletionResponse(validResponse)).toBe(true); - }); - - it('should return false for invalid responses', () => { - expect(isChatCompletionResponse(null)).toBe(false); - expect(isChatCompletionResponse(undefined)).toBe(false); - expect(isChatCompletionResponse('string')).toBe(false); - expect(isChatCompletionResponse(123)).toBe(false); - expect(isChatCompletionResponse({})).toBe(false); - expect(isChatCompletionResponse({ object: 'different' })).toBe(false); - expect(isChatCompletionResponse({ object: null })).toBe(false); - }); - }); - - describe('isResponsesApiResponse', () => { - it('should return true for valid responses API responses', () => { - const validResponse = { - object: 'response', - id: 'resp_123', - model: 'gpt-4', - choices: [], - }; - expect(isResponsesApiResponse(validResponse)).toBe(true); - }); - - it('should return false for invalid responses', () => { - expect(isResponsesApiResponse(null)).toBe(false); - expect(isResponsesApiResponse(undefined)).toBe(false); - expect(isResponsesApiResponse('string')).toBe(false); - expect(isResponsesApiResponse(123)).toBe(false); - expect(isResponsesApiResponse({})).toBe(false); - expect(isResponsesApiResponse({ object: 'different' })).toBe(false); - expect(isResponsesApiResponse({ object: null })).toBe(false); - }); - }); - describe('isResponsesApiStreamEvent', () => { it('should return true for valid responses API stream events', () => { expect(isResponsesApiStreamEvent({ type: 'response.created' })).toBe(true); @@ -103,36 +53,4 @@ describe('openai-utils', () => { expect(isChatCompletionChunk({ object: null })).toBe(false); }); }); - - describe('isConversationResponse', () => { - it('should return true for valid conversation responses', () => { - const validConversation = { - object: 'conversation', - id: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab', - created_at: 1704067200, - }; - expect(isConversationResponse(validConversation)).toBe(true); - }); - - it('should return true for conversation with metadata', () => { - const conversationWithMetadata = { - object: 'conversation', - id: 'conv_123', - created_at: 1704067200, - metadata: { user_id: 'user_123' }, - }; - expect(isConversationResponse(conversationWithMetadata)).toBe(true); - }); - - it('should return false for invalid responses', () => { - expect(isConversationResponse(null)).toBe(false); - expect(isConversationResponse(undefined)).toBe(false); - expect(isConversationResponse('string')).toBe(false); - expect(isConversationResponse(123)).toBe(false); - expect(isConversationResponse({})).toBe(false); - expect(isConversationResponse({ object: 'thread' })).toBe(false); - expect(isConversationResponse({ object: 'response' })).toBe(false); - expect(isConversationResponse({ object: null })).toBe(false); - }); - }); });