From 8c3e1dab811a14ae3dfa1fa0b66b28fb747a1199 Mon Sep 17 00:00:00 2001 From: Abhijeet Prasad Date: Tue, 3 Jun 2025 20:49:50 -0400 Subject: [PATCH 1/2] feat(node): Switch to new semantic conventions --- .../tracing/vercelai/attributes.ts | 794 ++++++++++++++++++ .../integrations/tracing/vercelai/index.ts | 44 +- 2 files changed, 824 insertions(+), 14 deletions(-) create mode 100644 packages/node/src/integrations/tracing/vercelai/attributes.ts diff --git a/packages/node/src/integrations/tracing/vercelai/attributes.ts b/packages/node/src/integrations/tracing/vercelai/attributes.ts new file mode 100644 index 000000000000..8d7b6913a636 --- /dev/null +++ b/packages/node/src/integrations/tracing/vercelai/attributes.ts @@ -0,0 +1,794 @@ +/** + * AI SDK Telemetry Attributes + * Based on https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data + */ + +// ============================================================================= +// COMMON ATTRIBUTES +// ============================================================================= + +/** + * Common attribute for operation name across all functions and spans + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data + */ +export const OPERATION_NAME_ATTRIBUTE = 'operation.name'; + +/** + * Common attribute for AI operation ID across all functions and spans + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#collected-data + */ +export const AI_OPERATION_ID_ATTRIBUTE = 'ai.operationId'; + +// ============================================================================= +// SHARED ATTRIBUTES +// ============================================================================= + +/** + * `generateText` function - `ai.generateText` span + * `streamText` function - `ai.streamText` span + * + * The prompt that was used when calling the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_PROMPT_ATTRIBUTE = 'ai.prompt'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The JSON schema version of the schema that was passed into the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SCHEMA_ATTRIBUTE = 'ai.schema'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The name of the schema that was passed into the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SCHEMA_NAME_ATTRIBUTE = 'ai.schema.name'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The description of the schema that was passed into the function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SCHEMA_DESCRIPTION_ATTRIBUTE = 'ai.schema.description'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The object that was generated (stringified JSON) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_RESPONSE_OBJECT_ATTRIBUTE = 'ai.response.object'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The object generation mode, e.g. `json` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SETTINGS_MODE_ATTRIBUTE = 'ai.settings.mode'; + +/** + * `generateObject` function - `ai.generateObject` span + * `streamObject` function - `ai.streamObject` span + * + * The output type that was used, e.g. `object` or `no-schema` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_SETTINGS_OUTPUT_ATTRIBUTE = 'ai.settings.output'; + +/** + * `embed` function - `ai.embed.doEmbed` span + * `embedMany` function - `ai.embedMany` span + * + * The values that were passed into the function (array) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function + */ +export const AI_VALUES_ATTRIBUTE = 'ai.values'; + +/** + * `embed` function - `ai.embed.doEmbed` span + * `embedMany` function - `ai.embedMany` span + * + * An array of JSON-stringified embeddings + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function + */ +export const AI_EMBEDDINGS_ATTRIBUTE = 'ai.embeddings'; + +// ============================================================================= +// GENERATETEXT FUNCTION - UNIQUE ATTRIBUTES +// ============================================================================= + +/** + * `generateText` function - `ai.generateText` span + * + * The text that was generated + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_RESPONSE_TEXT_ATTRIBUTE = 'ai.response.text'; + +/** + * `generateText` function - `ai.generateText` span + * + * The tool calls that were made as part of the generation (stringified JSON) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_RESPONSE_TOOL_CALLS_ATTRIBUTE = 'ai.response.toolCalls'; + +/** + * `generateText` function - `ai.generateText` span + * + * The reason why the generation finished + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_RESPONSE_FINISH_REASON_ATTRIBUTE = 'ai.response.finishReason'; + +/** + * `generateText` function - `ai.generateText` span + * + * The maximum number of steps that were set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_SETTINGS_MAX_STEPS_ATTRIBUTE = 'ai.settings.maxSteps'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * The format of the prompt + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_FORMAT_ATTRIBUTE = 'ai.prompt.format'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * The messages that were passed into the provider + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_MESSAGES_ATTRIBUTE = 'ai.prompt.messages'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * Array of stringified tool definitions + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_TOOLS_ATTRIBUTE = 'ai.prompt.tools'; + +/** + * `generateText` function - `ai.generateText.doGenerate` span + * + * The stringified tool choice setting (JSON) + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_PROMPT_TOOL_CHOICE_ATTRIBUTE = 'ai.prompt.toolChoice'; + +// ============================================================================= +// STREAMTEXT FUNCTION - UNIQUE ATTRIBUTES +// ============================================================================= + +/** + * `streamText` function - `ai.streamText.doStream` span + * + * The time it took to receive the first chunk in milliseconds + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_RESPONSE_MS_TO_FIRST_CHUNK_ATTRIBUTE = 'ai.response.msToFirstChunk'; + +/** + * `streamText` function - `ai.streamText.doStream` span + * + * The time it took to receive the finish part of the LLM stream in milliseconds + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_RESPONSE_MS_TO_FINISH_ATTRIBUTE = 'ai.response.msToFinish'; + +/** + * `streamText` function - `ai.streamText.doStream` span + * + * The average completion tokens per second + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND_ATTRIBUTE = 'ai.response.avgCompletionTokensPerSecond'; + +// ============================================================================= +// EMBED FUNCTION - UNIQUE ATTRIBUTES +// ============================================================================= + +/** + * `embed` function - `ai.embed` span + * + * The value that was passed into the `embed` function + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_VALUE_ATTRIBUTE = 'ai.value'; + +/** + * `embed` function - `ai.embed` span + * + * A JSON-stringified embedding + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_EMBEDDING_ATTRIBUTE = 'ai.embedding'; + +// ============================================================================= +// BASIC LLM SPAN INFORMATION +// ============================================================================= + +/** + * Basic LLM span information + * Multiple spans + * + * The functionId that was set through `telemetry.functionId` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const RESOURCE_NAME_ATTRIBUTE = 'resource.name'; + +/** + * Basic LLM span information + * Multiple spans + * + * The id of the model + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_MODEL_ID_ATTRIBUTE = 'ai.model.id'; + +/** + * Basic LLM span information + * Multiple spans + * + * The provider of the model + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_MODEL_PROVIDER_ATTRIBUTE = 'ai.model.provider'; + +/** + * Basic LLM span information + * Multiple spans + * + * The request headers that were passed in through `headers` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_REQUEST_HEADERS_ATTRIBUTE = 'ai.request.headers'; + +/** + * Basic LLM span information + * Multiple spans + * + * The maximum number of retries that were set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_SETTINGS_MAX_RETRIES_ATTRIBUTE = 'ai.settings.maxRetries'; + +/** + * Basic LLM span information + * Multiple spans + * + * The functionId that was set through `telemetry.functionId` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE = 'ai.telemetry.functionId'; + +/** + * Basic LLM span information + * Multiple spans + * + * The metadata that was passed in through `telemetry.metadata` + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_TELEMETRY_METADATA_ATTRIBUTE = 'ai.telemetry.metadata'; + +/** + * Basic LLM span information + * Multiple spans + * + * The number of completion tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE = 'ai.usage.completionTokens'; + +/** + * Basic LLM span information + * Multiple spans + * + * The number of prompt tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-llm-span-information + */ +export const AI_USAGE_PROMPT_TOKENS_ATTRIBUTE = 'ai.usage.promptTokens'; + +// ============================================================================= +// CALL LLM SPAN INFORMATION +// ============================================================================= + +/** + * Call LLM span information + * Individual LLM call spans + * + * The model that was used to generate the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const AI_RESPONSE_MODEL_ATTRIBUTE = 'ai.response.model'; + +/** + * Call LLM span information + * Individual LLM call spans + * + * The id of the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const AI_RESPONSE_ID_ATTRIBUTE = 'ai.response.id'; + +/** + * Call LLM span information + * Individual LLM call spans + * + * The timestamp of the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const AI_RESPONSE_TIMESTAMP_ATTRIBUTE = 'ai.response.timestamp'; + +// ============================================================================= +// SEMANTIC CONVENTIONS FOR GENAI OPERATIONS +// ============================================================================= + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The provider that was used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_SYSTEM_ATTRIBUTE = 'gen_ai.system'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The model that was requested + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_MODEL_ATTRIBUTE = 'gen_ai.request.model'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The temperature that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE = 'gen_ai.request.temperature'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The maximum number of tokens that were set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE = 'gen_ai.request.max_tokens'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The frequency penalty that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE = 'gen_ai.request.frequency_penalty'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The presence penalty that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE = 'gen_ai.request.presence_penalty'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The topK parameter value that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_TOP_K_ATTRIBUTE = 'gen_ai.request.top_k'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The topP parameter value that was set + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_TOP_P_ATTRIBUTE = 'gen_ai.request.top_p'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The stop sequences + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE = 'gen_ai.request.stop_sequences'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The finish reasons that were returned by the provider + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE = 'gen_ai.response.finish_reasons'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The model that was used to generate the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_RESPONSE_MODEL_ATTRIBUTE = 'gen_ai.response.model'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The id of the response + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_RESPONSE_ID_ATTRIBUTE = 'gen_ai.response.id'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The number of prompt tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.input_tokens'; + +/** + * Semantic Conventions for GenAI operations + * Individual LLM call spans + * + * The number of completion tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#call-llm-span-information + */ +export const GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE = 'gen_ai.usage.output_tokens'; + +// ============================================================================= +// BASIC EMBEDDING SPAN INFORMATION +// ============================================================================= + +/** + * Basic embedding span information + * Embedding spans + * + * The number of tokens that were used + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#basic-embedding-span-information + */ +export const AI_USAGE_TOKENS_ATTRIBUTE = 'ai.usage.tokens'; + +// ============================================================================= +// TOOL CALL SPANS +// ============================================================================= + +/** + * Tool call spans + * `ai.toolCall` span + * + * The name of the tool + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_NAME_ATTRIBUTE = 'ai.toolCall.name'; + +/** + * Tool call spans + * `ai.toolCall` span + * + * The id of the tool call + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_ID_ATTRIBUTE = 'ai.toolCall.id'; + +/** + * Tool call spans + * `ai.toolCall` span + * + * The parameters of the tool call + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_ARGS_ATTRIBUTE = 'ai.toolCall.args'; + +/** + * Tool call spans + * `ai.toolCall` span + * + * The result of the tool call + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_RESULT_ATTRIBUTE = 'ai.toolCall.result'; + +// ============================================================================= +// SPAN ATTRIBUTE OBJECTS +// ============================================================================= + +/** + * Attributes collected for `ai.generateText` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_GENERATE_TEXT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_PROMPT: AI_PROMPT_ATTRIBUTE, + AI_RESPONSE_TEXT: AI_RESPONSE_TEXT_ATTRIBUTE, + AI_RESPONSE_TOOL_CALLS: AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, + AI_RESPONSE_FINISH_REASON: AI_RESPONSE_FINISH_REASON_ATTRIBUTE, + AI_SETTINGS_MAX_STEPS: AI_SETTINGS_MAX_STEPS_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.generateText.doGenerate` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generatetext-function + */ +export const AI_GENERATE_TEXT_DO_GENERATE_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_PROMPT_FORMAT: AI_PROMPT_FORMAT_ATTRIBUTE, + AI_PROMPT_MESSAGES: AI_PROMPT_MESSAGES_ATTRIBUTE, + AI_PROMPT_TOOLS: AI_PROMPT_TOOLS_ATTRIBUTE, + AI_PROMPT_TOOL_CHOICE: AI_PROMPT_TOOL_CHOICE_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + // Call LLM span information + AI_RESPONSE_MODEL: AI_RESPONSE_MODEL_ATTRIBUTE, + AI_RESPONSE_ID: AI_RESPONSE_ID_ATTRIBUTE, + AI_RESPONSE_TIMESTAMP: AI_RESPONSE_TIMESTAMP_ATTRIBUTE, + // Semantic Conventions for GenAI operations + GEN_AI_SYSTEM: GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_REQUEST_MODEL: GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE: GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS: GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_FREQUENCY_PENALTY: GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_PRESENCE_PENALTY: GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_TOP_K: GEN_AI_REQUEST_TOP_K_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P: GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_REQUEST_STOP_SEQUENCES: GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS: GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL: GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_ID: GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS: GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS: GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.streamText` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_STREAM_TEXT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_PROMPT: AI_PROMPT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.streamText.doStream` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamtext-function + */ +export const AI_STREAM_TEXT_DO_STREAM_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_RESPONSE_MS_TO_FIRST_CHUNK: AI_RESPONSE_MS_TO_FIRST_CHUNK_ATTRIBUTE, + AI_RESPONSE_MS_TO_FINISH: AI_RESPONSE_MS_TO_FINISH_ATTRIBUTE, + AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND: AI_RESPONSE_AVG_COMPLETION_TOKENS_PER_SECOND_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + // Call LLM span information + AI_RESPONSE_MODEL: AI_RESPONSE_MODEL_ATTRIBUTE, + AI_RESPONSE_ID: AI_RESPONSE_ID_ATTRIBUTE, + AI_RESPONSE_TIMESTAMP: AI_RESPONSE_TIMESTAMP_ATTRIBUTE, + // Semantic Conventions for GenAI operations + GEN_AI_SYSTEM: GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_REQUEST_MODEL: GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE: GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS: GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_FREQUENCY_PENALTY: GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_PRESENCE_PENALTY: GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_TOP_K: GEN_AI_REQUEST_TOP_K_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P: GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_REQUEST_STOP_SEQUENCES: GEN_AI_REQUEST_STOP_SEQUENCES_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS: GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL: GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_ID: GEN_AI_RESPONSE_ID_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS: GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS: GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.generateObject` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#generateobject-function + */ +export const AI_GENERATE_OBJECT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_SCHEMA: AI_SCHEMA_ATTRIBUTE, + AI_SCHEMA_NAME: AI_SCHEMA_NAME_ATTRIBUTE, + AI_SCHEMA_DESCRIPTION: AI_SCHEMA_DESCRIPTION_ATTRIBUTE, + AI_RESPONSE_OBJECT: AI_RESPONSE_OBJECT_ATTRIBUTE, + AI_SETTINGS_MODE: AI_SETTINGS_MODE_ATTRIBUTE, + AI_SETTINGS_OUTPUT: AI_SETTINGS_OUTPUT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.streamObject` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#streamobject-function + */ +export const AI_STREAM_OBJECT_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_SCHEMA: AI_SCHEMA_ATTRIBUTE, + AI_SCHEMA_NAME: AI_SCHEMA_NAME_ATTRIBUTE, + AI_SCHEMA_DESCRIPTION: AI_SCHEMA_DESCRIPTION_ATTRIBUTE, + AI_RESPONSE_OBJECT: AI_RESPONSE_OBJECT_ATTRIBUTE, + AI_SETTINGS_MODE: AI_SETTINGS_MODE_ATTRIBUTE, + AI_SETTINGS_OUTPUT: AI_SETTINGS_OUTPUT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS: AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS: AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.embed` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_EMBED_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_VALUE: AI_VALUE_ATTRIBUTE, + AI_EMBEDDING: AI_EMBEDDING_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + // Basic embedding span information + AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.embed.doEmbed` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embed-function + */ +export const AI_EMBED_DO_EMBED_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_VALUES: AI_VALUES_ATTRIBUTE, + AI_EMBEDDINGS: AI_EMBEDDINGS_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + // Basic embedding span information + AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.embedMany` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#embedmany-function + */ +export const AI_EMBED_MANY_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_VALUES: AI_VALUES_ATTRIBUTE, + AI_EMBEDDINGS: AI_EMBEDDINGS_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, + // Basic embedding span information + AI_USAGE_TOKENS: AI_USAGE_TOKENS_ATTRIBUTE, +} as const; + +/** + * Attributes collected for `ai.toolCall` span + * @see https://ai-sdk.dev/docs/ai-sdk-core/telemetry#tool-call-spans + */ +export const AI_TOOL_CALL_SPAN_ATTRIBUTES = { + OPERATION_NAME: OPERATION_NAME_ATTRIBUTE, + AI_OPERATION_ID: AI_OPERATION_ID_ATTRIBUTE, + AI_TOOL_CALL_NAME: AI_TOOL_CALL_NAME_ATTRIBUTE, + AI_TOOL_CALL_ID: AI_TOOL_CALL_ID_ATTRIBUTE, + AI_TOOL_CALL_ARGS: AI_TOOL_CALL_ARGS_ATTRIBUTE, + AI_TOOL_CALL_RESULT: AI_TOOL_CALL_RESULT_ATTRIBUTE, + // Basic LLM span information + RESOURCE_NAME: RESOURCE_NAME_ATTRIBUTE, + AI_MODEL_ID: AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER: AI_MODEL_PROVIDER_ATTRIBUTE, + AI_REQUEST_HEADERS: AI_REQUEST_HEADERS_ATTRIBUTE, + AI_SETTINGS_MAX_RETRIES: AI_SETTINGS_MAX_RETRIES_ATTRIBUTE, + AI_TELEMETRY_FUNCTION_ID: AI_TELEMETRY_FUNCTION_ID_ATTRIBUTE, + AI_TELEMETRY_METADATA: AI_TELEMETRY_METADATA_ATTRIBUTE, +} as const; diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index f68b95f0f815..21ae65b6f71b 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -3,6 +3,16 @@ import type { IntegrationFn } from '@sentry/core'; import { defineIntegration, SEMANTIC_ATTRIBUTE_SENTRY_OP, spanToJSON } from '@sentry/core'; import { generateInstrumentOnce } from '../../../otel/instrument'; import { addOriginToSpan } from '../../../utils/addOriginToSpan'; +import { + AI_MODEL_ID_ATTRIBUTE, + AI_MODEL_PROVIDER_ATTRIBUTE, + AI_PROMPT_ATTRIBUTE, + AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE, + AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, + GEN_AI_RESPONSE_MODEL_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, +} from './attributes'; import { SentryVercelAiInstrumentation } from './instrumentation'; const INTEGRATION_NAME = 'VercelAI'; @@ -27,10 +37,10 @@ const _vercelAIIntegration = (() => { } // The id of the model - const aiModelId = attributes['ai.model.id']; + const aiModelId = attributes[AI_MODEL_ID_ATTRIBUTE]; // the provider of the model - const aiModelProvider = attributes['ai.model.provider']; + const aiModelProvider = attributes[AI_MODEL_PROVIDER_ATTRIBUTE]; // both of these must be defined for the integration to work if (typeof aiModelId !== 'string' || typeof aiModelProvider !== 'string' || !aiModelId || !aiModelProvider) { @@ -114,11 +124,11 @@ const _vercelAIIntegration = (() => { span.setAttribute('ai.pipeline.name', functionId); } - if (attributes['ai.prompt']) { - span.setAttribute('ai.input_messages', attributes['ai.prompt']); + if (attributes[AI_PROMPT_ATTRIBUTE]) { + span.setAttribute('gen_ai.prompt', attributes[AI_PROMPT_ATTRIBUTE]); } - if (attributes['ai.model.id']) { - span.setAttribute('ai.model_id', attributes['ai.model.id']); + if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { + span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); } span.setAttribute('ai.streaming', name.includes('stream')); }); @@ -132,18 +142,24 @@ const _vercelAIIntegration = (() => { continue; } - if (attributes['ai.usage.completionTokens'] != undefined) { - attributes['ai.completion_tokens.used'] = attributes['ai.usage.completionTokens']; + if ( + attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] != undefined && + attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] != undefined + ) { + attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]; } - if (attributes['ai.usage.promptTokens'] != undefined) { - attributes['ai.prompt_tokens.used'] = attributes['ai.usage.promptTokens']; + if ( + attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] != undefined && + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] != undefined + ) { + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE]; } if ( - typeof attributes['ai.usage.completionTokens'] == 'number' && - typeof attributes['ai.usage.promptTokens'] == 'number' + typeof attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] == 'number' && + typeof attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] == 'number' ) { - attributes['ai.total_tokens.used'] = - attributes['ai.usage.completionTokens'] + attributes['ai.usage.promptTokens']; + attributes['gen_ai.usage.total_tokens'] = + attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] + attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE]; } } } From 772d3b071633c360d25a3abd071454cff928cb4c Mon Sep 17 00:00:00 2001 From: Abhijeet Prasad Date: Tue, 3 Jun 2025 21:06:11 -0400 Subject: [PATCH 2/2] further refactors --- .../suites/tracing/ai/test.ts | 50 +++++++++---------- .../integrations/tracing/vercelai/index.ts | 20 ++++---- 2 files changed, 33 insertions(+), 37 deletions(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/ai/test.ts b/dev-packages/node-integration-tests/suites/tracing/ai/test.ts index c0a3ccb4a78a..b97c2b688a69 100644 --- a/dev-packages/node-integration-tests/suites/tracing/ai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/ai/test.ts @@ -12,20 +12,18 @@ describe('ai', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: expect.objectContaining({ - 'ai.completion_tokens.used': 20, 'ai.model.id': 'mock-model-id', 'ai.model.provider': 'mock-provider', - 'ai.model_id': 'mock-model-id', 'ai.operationId': 'ai.generateText', 'ai.pipeline.name': 'generateText', - 'ai.prompt_tokens.used': 10, 'ai.response.finishReason': 'stop', 'ai.settings.maxRetries': 2, 'ai.settings.maxSteps': 1, 'ai.streaming': false, - 'ai.total_tokens.used': 30, - 'ai.usage.completionTokens': 20, - 'ai.usage.promptTokens': 10, + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'ai.pipeline.generateText', 'sentry.origin': 'auto.vercelai.otel', @@ -47,18 +45,17 @@ describe('ai', () => { 'gen_ai.system': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.model_id': 'mock-model-id', 'ai.streaming': false, 'ai.response.finishReason': 'stop', 'ai.response.model': 'mock-model-id', - 'ai.usage.promptTokens': 10, - 'ai.usage.completionTokens': 20, + 'ai.response.id': expect.any(String), + 'ai.response.timestamp': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, - 'ai.completion_tokens.used': 20, - 'ai.prompt_tokens.used': 10, - 'ai.total_tokens.used': 30, + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.total_tokens': 30, }), description: 'generateText.doGenerate', op: 'ai.run.doGenerate', @@ -67,22 +64,21 @@ describe('ai', () => { }), expect.objectContaining({ data: expect.objectContaining({ - 'ai.completion_tokens.used': 20, 'ai.model.id': 'mock-model-id', 'ai.model.provider': 'mock-provider', - 'ai.model_id': 'mock-model-id', - 'ai.prompt': '{"prompt":"Where is the second span?"}', 'ai.operationId': 'ai.generateText', 'ai.pipeline.name': 'generateText', - 'ai.prompt_tokens.used': 10, + 'ai.prompt': '{"prompt":"Where is the second span?"}', 'ai.response.finishReason': 'stop', - 'ai.input_messages': '{"prompt":"Where is the second span?"}', + 'ai.response.text': expect.any(String), 'ai.settings.maxRetries': 2, 'ai.settings.maxSteps': 1, 'ai.streaming': false, - 'ai.total_tokens.used': 30, - 'ai.usage.completionTokens': 20, - 'ai.usage.promptTokens': 10, + 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'ai.pipeline.generateText', 'sentry.origin': 'auto.vercelai.otel', @@ -104,18 +100,20 @@ describe('ai', () => { 'gen_ai.system': 'mock-provider', 'gen_ai.request.model': 'mock-model-id', 'ai.pipeline.name': 'generateText.doGenerate', - 'ai.model_id': 'mock-model-id', 'ai.streaming': false, 'ai.response.finishReason': 'stop', 'ai.response.model': 'mock-model-id', - 'ai.usage.promptTokens': 10, - 'ai.usage.completionTokens': 20, + 'ai.response.id': expect.any(String), + 'ai.response.text': expect.any(String), + 'ai.response.timestamp': expect.any(String), + 'ai.prompt.format': expect.any(String), + 'ai.prompt.messages': expect.any(String), 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, - 'ai.completion_tokens.used': 20, - 'ai.prompt_tokens.used': 10, - 'ai.total_tokens.used': 30, + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.usage.total_tokens': 30, }), description: 'generateText.doGenerate', op: 'ai.run.doGenerate', diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index 21ae65b6f71b..92fe69bea673 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -142,24 +142,22 @@ const _vercelAIIntegration = (() => { continue; } - if ( - attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] != undefined && - attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] != undefined - ) { + if (attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] != undefined) { attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE]; } - if ( - attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] != undefined && - attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] != undefined - ) { + if (attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] != undefined) { attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE]; + // eslint-disable-next-line @typescript-eslint/no-dynamic-delete + delete attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE]; } if ( - typeof attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] == 'number' && - typeof attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE] == 'number' + typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' && + typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' ) { attributes['gen_ai.usage.total_tokens'] = - attributes[AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE] + attributes[AI_USAGE_PROMPT_TOKENS_ATTRIBUTE]; + attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]; } } }