From ada3a9ae32846ff251321244979f8d5bcbf54733 Mon Sep 17 00:00:00 2001 From: Karthik Kalyanaraman Date: Wed, 22 May 2024 08:08:42 -0700 Subject: [PATCH] Add additional LLM span attributes --- docs/attributes-registry/gen-ai.md | 2 ++ model/registry/gen-ai.yaml | 12 ++++++++++++ model/trace/gen-ai.yaml | 4 ++++ 3 files changed, 18 insertions(+) diff --git a/docs/attributes-registry/gen-ai.md b/docs/attributes-registry/gen-ai.md index a58d9c3989..66d20d606e 100644 --- a/docs/attributes-registry/gen-ai.md +++ b/docs/attributes-registry/gen-ai.md @@ -18,6 +18,8 @@ This document defines the attributes used to describe telemetry in the context o | `gen_ai.request.model` | string | The name of the LLM a request is being made to. | `gpt-4` | ![Experimental](https://img.shields.io/badge/-experimental-blue) | | `gen_ai.request.temperature` | double | The temperature setting for the LLM request. | `0.0` | ![Experimental](https://img.shields.io/badge/-experimental-blue) | | `gen_ai.request.top_p` | double | The top_p sampling setting for the LLM request. | `1.0` | ![Experimental](https://img.shields.io/badge/-experimental-blue) | +| `gen_ai.request.stop` | string | Up to 4 sequences where the API will stop generating further tokens. | `1.0` | ![Experimental](https://img.shields.io/badge/-experimental-blue) | +| `gen_ai.request.top_k` | double | The top_k sampling setting for the LLM request. | `1.0` | `gen_ai.response.finish_reasons` | string[] | Array of reasons the model stopped generating tokens, corresponding to each generation received. | `stop` | ![Experimental](https://img.shields.io/badge/-experimental-blue) | | `gen_ai.response.id` | string | The unique identifier for the completion. | `chatcmpl-123` | ![Experimental](https://img.shields.io/badge/-experimental-blue) | | `gen_ai.response.model` | string | The name of the LLM a response was generated from. | `gpt-4-0613` | ![Experimental](https://img.shields.io/badge/-experimental-blue) | diff --git a/model/registry/gen-ai.yaml b/model/registry/gen-ai.yaml index ab2ea3700c..73abc385e2 100644 --- a/model/registry/gen-ai.yaml +++ b/model/registry/gen-ai.yaml @@ -45,6 +45,18 @@ groups: brief: The top_p sampling setting for the LLM request. examples: [1.0] tag: llm-generic-request + - id: request.top_k + stability: experimental + type: double + brief: The top_k sampling setting for the LLM request. + examples: [1.0] + tag: llm-generic-request + - id: request.stop + stability: experimental + type: double + brief: The stop sequences to provide. + examples: ['\n'] + tag: llm-generic-request - id: response.id stability: experimental type: string diff --git a/model/trace/gen-ai.yaml b/model/trace/gen-ai.yaml index bf1d112e37..ef493b1a99 100644 --- a/model/trace/gen-ai.yaml +++ b/model/trace/gen-ai.yaml @@ -23,6 +23,10 @@ groups: requirement_level: recommended - ref: gen_ai.request.top_p requirement_level: recommended + - ref: gen_ai.request.top_k + requirement_level: recommended + - ref: gen_ai.request.stop + requirement_level: recommended - ref: gen_ai.response.id requirement_level: recommended - ref: gen_ai.response.model