Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix OpenAI tests #3738

Open
wants to merge 1 commit into
base: potel-base
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 9 additions & 6 deletions tests/integrations/openai/test_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def test_nonstreaming_chat_completion(
assert span["op"] == "ai.chat_completions.create.openai"

if send_default_pii and include_prompts:
assert "hello" in span["data"]["ai.input_messages"]["content"]
assert "the model response" in span["data"]["ai.responses"]["content"]
assert '"content": "hello"' in span["data"]["ai.input_messages"]
assert '"content": "the model response"' in span["data"]["ai.responses"]
else:
assert "ai.input_messages" not in span["data"]
assert "ai.responses" not in span["data"]
Expand Down Expand Up @@ -125,8 +125,8 @@ async def test_nonstreaming_chat_completion_async(
assert span["op"] == "ai.chat_completions.create.openai"

if send_default_pii and include_prompts:
assert "hello" in span["data"]["ai.input_messages"]["content"]
assert "the model response" in span["data"]["ai.responses"]["content"]
assert '"content": "hello"' in span["data"]["ai.input_messages"]
assert '"content": "the model response"' in span["data"]["ai.responses"]
else:
assert "ai.input_messages" not in span["data"]
assert "ai.responses" not in span["data"]
Expand Down Expand Up @@ -218,7 +218,7 @@ def test_streaming_chat_completion(
assert span["op"] == "ai.chat_completions.create.openai"

if send_default_pii and include_prompts:
assert "hello" in span["data"]["ai.input_messages"]["content"]
assert '"content": "hello"' in span["data"]["ai.input_messages"]
assert "hello world" in span["data"]["ai.responses"]
else:
assert "ai.input_messages" not in span["data"]
Expand Down Expand Up @@ -314,7 +314,7 @@ async def test_streaming_chat_completion_async(
assert span["op"] == "ai.chat_completions.create.openai"

if send_default_pii and include_prompts:
assert "hello" in span["data"]["ai.input_messages"]["content"]
assert '"content": "hello"' in span["data"]["ai.input_messages"]
assert "hello world" in span["data"]["ai.responses"]
else:
assert "ai.input_messages" not in span["data"]
Expand All @@ -330,6 +330,7 @@ async def test_streaming_chat_completion_async(
pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly


@pytest.mark.forked
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need to fork tests that capture exceptions. (Somehow capturing exceptions messes up the next sentry_sdk.init() and capturing events/exeptions in following tests)

def test_bad_chat_completion(sentry_init, capture_events):
sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0)
events = capture_events()
Expand Down Expand Up @@ -460,6 +461,7 @@ async def test_embeddings_create_async(
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30


@pytest.mark.forked
@pytest.mark.parametrize(
"send_default_pii, include_prompts",
[(True, True), (True, False), (False, True), (False, False)],
Expand Down Expand Up @@ -487,6 +489,7 @@ def test_embeddings_create_raises_error(
assert event["level"] == "error"


@pytest.mark.forked
@pytest.mark.asyncio
@pytest.mark.parametrize(
"send_default_pii, include_prompts",
Expand Down
Loading