Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Python: Add OpenTelemetry to Python SK #6914

Merged
merged 41 commits into from
Jul 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
22204aa
loggin
glahaye Jun 23, 2024
1156b46
Merge main
glahaye Jun 23, 2024
e5ed784
Merge branch 'main' into python_telemetry
glahaye Jun 24, 2024
a2c5fd7
Update python/semantic_kernel/connectors/ai/open_ai/services/open_ai_…
glahaye Jun 24, 2024
2848051
Addressed PR issues
glahaye Jun 27, 2024
fe86266
Merge main
glahaye Jun 27, 2024
e9dd311
Merge main
glahaye Jun 27, 2024
4f3783d
Update poetry info after merge
glahaye Jun 27, 2024
e71fab2
Address PR issues
glahaye Jun 27, 2024
6bf6406
Fix lint warning
glahaye Jun 28, 2024
00269b9
Fix false positive security issue
glahaye Jun 28, 2024
1fac03f
completion telemetry now in decorator
glahaye Jul 8, 2024
cc13f52
Use classvar
glahaye Jul 8, 2024
6437b04
Merge main
glahaye Jul 8, 2024
c6aa839
Proper merge of poetry lock
glahaye Jul 8, 2024
8cc7681
Fix precommit qual issues
glahaye Jul 8, 2024
f875eb3
Address PR issues
glahaye Jul 9, 2024
157ff32
Address PR comments + add trace_text_completion
glahaye Jul 10, 2024
7ac6a2d
Merge main
glahaye Jul 10, 2024
1d94e81
Adjust poetry lock
glahaye Jul 10, 2024
386b121
Fix mypy warnings
glahaye Jul 10, 2024
c903017
Sync poetry.lock
glahaye Jul 11, 2024
22b6ba0
Merge main
glahaye Jul 15, 2024
5c0c031
Merge branch 'main' into python_telemetry
glahaye Jul 15, 2024
dd25760
Fix ruff warning after merge
glahaye Jul 15, 2024
d95c8d2
Use kwargs for prompt, chat_history and settings
glahaye Jul 15, 2024
76739cb
Merge branch 'main' into python_telemetry
glahaye Jul 16, 2024
e0a2471
Merge main
glahaye Jul 17, 2024
a04bbd6
Merge branch 'python_telemetry' of https://github.com/glahaye/semanti…
glahaye Jul 17, 2024
17c2c72
Fix poetry.lock after merge from main
glahaye Jul 17, 2024
1f163f1
Add unit tests
glahaye Jul 23, 2024
6710051
Merge main
glahaye Jul 23, 2024
d73ead7
Adapt poetry.lock after merge
glahaye Jul 23, 2024
066e370
Merge branch 'main' into python_telemetry
glahaye Jul 24, 2024
6ae2ef6
Address PR comments + enable async unit tests
glahaye Jul 24, 2024
3e8d5e9
Merge branch 'python_telemetry' of https://github.com/glahaye/semanti…
glahaye Jul 24, 2024
c001002
Fix unit tests
glahaye Jul 24, 2024
1a73614
Overriding .env file in unit tests
glahaye Jul 24, 2024
dabe4c2
Fix unit tests
glahaye Jul 25, 2024
32e03bb
Merge main
glahaye Jul 25, 2024
f2b0f5d
Fix poetry.lock after merge
glahaye Jul 25, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion python/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ openai = ">=1.0"

# openapi and swagger
openapi_core = ">=0.18,<0.20"

# OpenTelemetry
opentelemetry-api = "^1.24.0"
glahaye marked this conversation as resolved.
Show resolved Hide resolved
opentelemetry-sdk = "^1.24.0"

prance = "^23.6.21.0"

# templating
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from functools import reduce
from typing import TYPE_CHECKING, Any

from semantic_kernel.connectors.telemetry import SEMANTIC_KERNEL_USER_AGENT
from semantic_kernel.utils.telemetry.user_agent import SEMANTIC_KERNEL_USER_AGENT

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
from semantic_kernel.connectors.ai.azure_ai_inference.azure_ai_inference_settings import AzureAIInferenceSettings
from semantic_kernel.connectors.ai.azure_ai_inference.services.azure_ai_inference_base import AzureAIInferenceBase
from semantic_kernel.connectors.ai.embeddings.embedding_generator_base import EmbeddingGeneratorBase
from semantic_kernel.connectors.telemetry import SEMANTIC_KERNEL_USER_AGENT
from semantic_kernel.exceptions.service_exceptions import ServiceInitializationError
from semantic_kernel.utils.experimental_decorator import experimental_class
from semantic_kernel.utils.telemetry.user_agent import SEMANTIC_KERNEL_USER_AGENT

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ async def get_chat_message_content(
Returns:
A string representing the response from the LLM.
"""
results = await self.get_chat_message_contents(chat_history, settings, **kwargs)
results = await self.get_chat_message_contents(chat_history=chat_history, settings=settings, **kwargs)
if results:
return results[0]
# this should not happen, should error out before returning an empty list
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@

from semantic_kernel.connectors.ai.open_ai.const import DEFAULT_AZURE_API_VERSION
from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler, OpenAIModelTypes
from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent
from semantic_kernel.const import USER_AGENT
from semantic_kernel.exceptions import ServiceInitializationError
from semantic_kernel.kernel_pydantic import HttpsUrl
from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import sys
from collections.abc import AsyncGenerator
from functools import reduce
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING, Any, ClassVar

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
Expand Down Expand Up @@ -38,6 +38,7 @@
from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import (
AutoFunctionInvocationContext,
)
from semantic_kernel.utils.telemetry.decorators import trace_chat_completion

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
Expand All @@ -56,6 +57,8 @@ class InvokeTermination(Exception):
class OpenAIChatCompletionBase(OpenAIHandler, ChatCompletionClientBase):
"""OpenAI Chat completion class."""

MODEL_PROVIDER_NAME: ClassVar[str] = "openai"
TaoChenOSU marked this conversation as resolved.
Show resolved Hide resolved

# region Overriding base class methods
# most of the methods are overridden from the ChatCompletionClientBase class, otherwise it is mentioned

Expand All @@ -64,6 +67,7 @@ def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]
return OpenAIChatPromptExecutionSettings

@override
@trace_chat_completion(MODEL_PROVIDER_NAME)
glahaye marked this conversation as resolved.
Show resolved Hide resolved
glahaye marked this conversation as resolved.
Show resolved Hide resolved
async def get_chat_message_contents(
self,
chat_history: ChatHistory,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@

from semantic_kernel.connectors.ai.open_ai.services.open_ai_handler import OpenAIHandler
from semantic_kernel.connectors.ai.open_ai.services.open_ai_model_types import OpenAIModelTypes
from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent
from semantic_kernel.const import USER_AGENT
from semantic_kernel.exceptions import ServiceInitializationError
from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
import sys
from collections.abc import AsyncGenerator
from typing import TYPE_CHECKING, Any
from typing import TYPE_CHECKING, Any, ClassVar

if sys.version_info >= (3, 12):
from typing import override # pragma: no cover
Expand All @@ -26,6 +26,7 @@
from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase
from semantic_kernel.contents.streaming_text_content import StreamingTextContent
from semantic_kernel.contents.text_content import TextContent
from semantic_kernel.utils.telemetry.decorators import trace_text_completion

if TYPE_CHECKING:
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
Expand All @@ -34,11 +35,14 @@


class OpenAITextCompletionBase(OpenAIHandler, TextCompletionClientBase):
MODEL_PROVIDER_NAME: ClassVar[str] = "openai"

@override
def get_prompt_execution_settings_class(self) -> type["PromptExecutionSettings"]:
return OpenAITextPromptExecutionSettings

@override
@trace_text_completion(MODEL_PROVIDER_NAME)
async def get_text_contents(
self,
prompt: str,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ async def get_text_content(self, prompt: str, settings: "PromptExecutionSettings
Returns:
TextContent: A string or list of strings representing the response(s) from the LLM.
"""
result = await self.get_text_contents(prompt, settings)
result = await self.get_text_contents(prompt=prompt, settings=settings)
if result:
return result[0]
# this should not happen, should error out before returning an empty list
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
import aiohttp

from semantic_kernel.connectors.memory.astradb.utils import AsyncSession
from semantic_kernel.connectors.telemetry import APP_INFO
from semantic_kernel.exceptions import ServiceResponseException
from semantic_kernel.utils.experimental_decorator import experimental_class
from semantic_kernel.utils.telemetry.user_agent import APP_INFO

ASTRA_CALLER_IDENTITY: str
SEMANTIC_KERNEL_VERSION = APP_INFO.get("Semantic-Kernel-Version")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class OpenAPIFunctionExecutionParameters(KernelBaseModel):

def model_post_init(self, __context: Any) -> None:
"""Post initialization method for the model."""
from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT
from semantic_kernel.utils.telemetry.user_agent import HTTP_USER_AGENT

if self.server_url_override:
parsed_url = urlparse(self.server_url_override)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
)
from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation_payload import RestApiOperationPayload
from semantic_kernel.connectors.openapi_plugin.models.rest_api_operation_run_options import RestApiOperationRunOptions
from semantic_kernel.connectors.telemetry import APP_INFO, prepend_semantic_kernel_to_user_agent
from semantic_kernel.exceptions.function_exceptions import FunctionExecutionException
from semantic_kernel.functions.kernel_arguments import KernelArguments
from semantic_kernel.utils.experimental_decorator import experimental_class
from semantic_kernel.utils.telemetry.user_agent import APP_INFO, prepend_semantic_kernel_to_user_agent

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

from httpx import AsyncClient, HTTPStatusError, RequestError

from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT
from semantic_kernel.exceptions import ServiceInvalidRequestError
from semantic_kernel.utils.telemetry.user_agent import HTTP_USER_AGENT

logger: logging.Logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from httpx import AsyncClient, HTTPStatusError
from pydantic import ValidationError

from semantic_kernel.connectors.telemetry import HTTP_USER_AGENT, version_info
from semantic_kernel.const import USER_AGENT
from semantic_kernel.core_plugins.sessions_python_tool.sessions_python_settings import (
ACASessionsSettings,
Expand All @@ -20,6 +19,7 @@
from semantic_kernel.exceptions.function_exceptions import FunctionExecutionException, FunctionInitializationError
from semantic_kernel.functions.kernel_function_decorator import kernel_function
from semantic_kernel.kernel_pydantic import HttpsUrl, KernelBaseModel
from semantic_kernel.utils.telemetry.user_agent import HTTP_USER_AGENT, version_info

logger = logging.getLogger(__name__)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,8 @@ async def _invoke_internal(self, context: FunctionInvocationContext) -> None:
if isinstance(prompt_render_result.ai_service, TextCompletionClientBase):
try:
texts = await prompt_render_result.ai_service.get_text_contents(
unescape(prompt_render_result.rendered_prompt), prompt_render_result.execution_settings
prompt=unescape(prompt_render_result.rendered_prompt),
settings=prompt_render_result.execution_settings,
)
except Exception as exc:
raise FunctionExecutionException(f"Error occurred while invoking function {self.name}: {exc}") from exc
Expand Down
Empty file.
28 changes: 28 additions & 0 deletions python/semantic_kernel/utils/telemetry/const.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Copyright (c) Microsoft. All rights reserved.
#
# Constants for tracing activities with semantic conventions.

# Activity tags
SYSTEM = "gen_ai.system"
OPERATION = "gen_ai.operation.name"
CHAT_COMPLETION_OPERATION = "chat.completions"
TEXT_COMPLETION_OPERATION = "text.completions"
MODEL = "gen_ai.request.model"
MAX_TOKENS = "gen_ai.request.max_tokens" # nosec
TEMPERATURE = "gen_ai.request.temperature"
TOP_P = "gen_ai.request.top_p"
RESPONSE_ID = "gen_ai.response.id"
FINISH_REASON = "gen_ai.response.finish_reason"
PROMPT_TOKENS = "gen_ai.response.prompt_tokens" # nosec
COMPLETION_TOKENS = "gen_ai.response.completion_tokens" # nosec
ADDRESS = "server.address"
PORT = "server.port"
ERROR_TYPE = "error.type"

# Activity events
PROMPT_EVENT = "gen_ai.content.prompt"
COMPLETION_EVENT = "gen_ai.content.completion"

# Activity event attributes
PROMPT_EVENT_PROMPT = "gen_ai.prompt"
COMPLETION_EVENT_COMPLETION = "gen_ai.completion"
Loading
Loading