Skip to content

Commit

Permalink
opentelemetry-instrumentation-openai-v2: coerce openai response_forma…
Browse files Browse the repository at this point in the history
…t to semconv format (open-telemetry#3073)

* opentelemetry-instrumentation-openai-v2: coerce openai response_format to semconv format

Signed-off-by: Adrian Cole <[email protected]>

* changelog

Signed-off-by: Adrian Cole <[email protected]>

---------

Signed-off-by: Adrian Cole <[email protected]>
  • Loading branch information
codefromthecrypt authored Dec 9, 2024
1 parent 6c92f38 commit 6134d5a
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Unreleased

- Coerce openai response_format to semconv format
([#3073](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3073))
- Add example to `opentelemetry-instrumentation-openai-v2`
([#3006](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3006))
- Support for `AsyncOpenAI/AsyncCompletions` ([#2984](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/2984))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from os import environ
from typing import Optional, Union
from typing import Mapping, Optional, Union
from urllib.parse import urlparse

from httpx import URL
Expand Down Expand Up @@ -202,12 +202,23 @@ def get_llm_request_attributes(
GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: kwargs.get(
"frequency_penalty"
),
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT: kwargs.get(
"response_format"
),
GenAIAttributes.GEN_AI_OPENAI_REQUEST_SEED: kwargs.get("seed"),
}

if (response_format := kwargs.get("response_format")) is not None:
# response_format may be string or object with a string in the `type` key
if isinstance(response_format, Mapping):
if (
response_format_type := response_format.get("type")
) is not None:
attributes[
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
] = response_format_type
else:
attributes[
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
] = response_format

set_server_address_and_port(client_instance, attributes)
service_tier = kwargs.get("service_tier")
attributes[GenAIAttributes.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER] = (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ interactions:
],
"model": "gpt-4o-mini",
"max_tokens": 50,
"response_format": {
"type": "text"
},
"seed": 42,
"stream": false,
"temperature": 0.5,
Expand All @@ -25,7 +28,7 @@ interactions:
connection:
- keep-alive
content-length:
- '183'
- '220'
content-type:
- application/json
host:
Expand All @@ -45,16 +48,16 @@ interactions:
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.5
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: |-
{
"id": "chatcmpl-ASv9WMTAMZY4O1EImv3csZa6Ch7KI",
"id": "chatcmpl-AbMH3rR6OBMN9hG5w0TRrezuiHLMr",
"object": "chat.completion",
"created": 1731456242,
"created": 1733467121,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
Expand Down Expand Up @@ -84,19 +87,19 @@ interactions:
}
},
"service_tier": "default",
"system_fingerprint": "fp_0ba0d124f1"
"system_fingerprint": "fp_bba3c8e70b"
}
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e1a8088f867e167-MRS
- 8eda4640ead3e535-KUL
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Wed, 13 Nov 2024 00:04:02 GMT
- Fri, 06 Dec 2024 06:38:42 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Expand All @@ -112,25 +115,25 @@ interactions:
- '825'
openai-organization: test_openai_org_id
openai-processing-ms:
- '488'
- '835'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
- '10000'
x-ratelimit-limit-tokens:
- '150000000'
- '200000'
x-ratelimit-remaining-requests:
- '29999'
- '9999'
x-ratelimit-remaining-tokens:
- '149999943'
- '199943'
x-ratelimit-reset-requests:
- 2ms
- 8.64s
x-ratelimit-reset-tokens:
- 0s
- 16ms
x-request-id:
- req_6df08d6267415e8f5db3628a6757edad
- req_fea877c0a861ff92a6a5217247681f24
status:
code: 200
message: OK
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ interactions:
],
"model": "gpt-4o-mini",
"max_tokens": 50,
"response_format": {
"type": "text"
},
"seed": 42,
"stream": false,
"temperature": 0.5,
Expand All @@ -25,13 +28,13 @@ interactions:
connection:
- keep-alive
content-length:
- '183'
- '220'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.54.3
- OpenAI/Python 1.26.0
x-stainless-arch:
- arm64
x-stainless-async:
Expand All @@ -41,22 +44,20 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.54.3
x-stainless-retry-count:
- '0'
- 1.26.0
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.6
- 3.12.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: |-
{
"id": "chatcmpl-ASYMT7913Sp58qhZqQgY7g7Ia2J4M",
"id": "chatcmpl-AbMH70fQA9lMPIClvBPyBSjqJBm9F",
"object": "chat.completion",
"created": 1731368633,
"created": 1733467125,
"model": "gpt-4o-mini-2024-07-18",
"choices": [
{
Expand Down Expand Up @@ -86,19 +87,17 @@ interactions:
}
},
"service_tier": "default",
"system_fingerprint": "fp_0ba0d124f1"
"system_fingerprint": "fp_0705bf87c0"
}
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8e1225a3f8e9ce65-SIN
- 8eda465e8fe9e58c-KUL
Connection:
- keep-alive
Content-Type:
- application/json
Date:
- Mon, 11 Nov 2024 23:43:53 GMT
- Fri, 06 Dec 2024 06:38:46 GMT
Server:
- cloudflare
Set-Cookie: test_set_cookie
Expand All @@ -110,11 +109,13 @@ interactions:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
content-length:
- '825'
openai-organization: test_openai_org_id
openai-processing-ms:
- '431'
- '558'
openai-version:
- '2020-10-01'
strict-transport-security:
Expand All @@ -128,11 +129,11 @@ interactions:
x-ratelimit-remaining-tokens:
- '199943'
x-ratelimit-reset-requests:
- 14.746s
- 12.967s
x-ratelimit-reset-tokens:
- 16ms
x-request-id:
- req_81e29a8992ea8001c0240bd990acf0ab
- req_22ff608d47a299f0780f52360631eabb
status:
code: 200
message: OK
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ async def test_async_chat_completion_extra_params(
max_tokens=50,
stream=False,
extra_body={"service_tier": "default"},
response_format={"type": "text"},
)

spans = span_exporter.get_finished_spans()
Expand All @@ -173,6 +174,12 @@ async def test_async_chat_completion_extra_params(
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER]
== "default"
)
assert (
spans[0].attributes[
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
]
== "text"
)


@pytest.mark.vcr()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ def test_chat_completion_extra_params(
max_tokens=50,
stream=False,
extra_body={"service_tier": "default"},
response_format={"type": "text"},
)

spans = span_exporter.get_finished_spans()
Expand All @@ -166,6 +167,12 @@ def test_chat_completion_extra_params(
spans[0].attributes[GenAIAttributes.GEN_AI_OPENAI_REQUEST_SERVICE_TIER]
== "default"
)
assert (
spans[0].attributes[
GenAIAttributes.GEN_AI_OPENAI_REQUEST_RESPONSE_FORMAT
]
== "text"
)


@pytest.mark.vcr()
Expand Down

0 comments on commit 6134d5a

Please sign in to comment.