Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Release: Dspy Enhancement #359

Merged
merged 7 commits into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions src/examples/dspy_example/QA_basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import dspy
from langtrace_python_sdk import langtrace
from dotenv import load_dotenv

load_dotenv()

langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})

# configure the language model to be used by dspy

llm = dspy.Claude()
dspy.settings.configure(lm=llm)

# create a prompt format that says that the llm will take a question and give back an answer
predict = dspy.Predict("question -> answer")
prediction = predict(
question="who scored the final goal in football world cup finals in 2014?"
)

print(prediction.answer)
28 changes: 28 additions & 0 deletions src/examples/dspy_example/QA_basic_with_chain_of_thought.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import dspy
from langtrace_python_sdk import langtrace
from dotenv import load_dotenv

load_dotenv()

langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})

# configure the language model to be used by dspy
llm = dspy.Claude()
dspy.settings.configure(lm=llm)


# create a signature for basic question answering
class BasicQA(dspy.Signature):
"""Given a question, generate the answer."""

question = dspy.InputField(desc="User's question")
answer = dspy.OutputField(desc="often between 1 and 5 words")


# create a prompt format that says that the llm will take a question and give back an answer
predict = dspy.ChainOfThought(BasicQA)
prediction = predict(
question="Who provided the assist for the final goal in the 2014 FIFA World Cup final?"
)

print(prediction.answer)
30 changes: 30 additions & 0 deletions src/examples/dspy_example/QA_basic_with_signature.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import dspy
from langtrace_python_sdk import langtrace
from dotenv import load_dotenv

load_dotenv()

langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})

# configure the language model to be used by dspy
llm = dspy.Claude()
dspy.settings.configure(lm=llm)


# create a signature for basic question answering
class BasicQA(dspy.Signature):
"""Answer questions with short factoid answers."""

question = dspy.InputField(
desc="A question that can be answered with a short factoid answer"
)
answer = dspy.OutputField(desc="often between 1 and 5 words")


# create a prompt format that says that the llm will take a question and give back an answer
predict = dspy.Predict(BasicQA)
prediction = predict(
question="Sarah has 5 apples. She buys 7 more apples from the store. How many apples does Sarah have now?"
)

print(prediction.answer)
41 changes: 41 additions & 0 deletions src/examples/dspy_example/QA_multi_step_with_chain_of_thought.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import dspy
from langtrace_python_sdk import langtrace, with_langtrace_root_span
from dotenv import load_dotenv

load_dotenv()

langtrace.init(disable_instrumentations={"all_except": ["dspy", "anthropic"]})

# configure the language model to be used by dspy
llm = dspy.Claude()
dspy.settings.configure(lm=llm)


# create a signature for basic question answering
class BasicQA(dspy.Signature):
"""Given a question, generate the answer."""

question = dspy.InputField(desc="User's question")
answer = dspy.OutputField(desc="often between 1 and 5 words")


class DoubleChainOfThought(dspy.Module):
def __init__(self):
self.cot1 = dspy.ChainOfThought("question -> step_by_step_thought")
self.cot2 = dspy.ChainOfThought("question, thought -> one_word_answer")

def forward(self, question):
thought = self.cot1(question=question).step_by_step_thought
answer = self.cot2(question=question, thought=thought).one_word_answer
return dspy.Prediction(thought=thought, answer=answer)


@with_langtrace_root_span(name="Double Chain Of thought")
def main():
multi_step_question = "what is the capital of the birth state of the person who provided the assist for the Mario Gotze's in football world cup in 2014?"
double_cot = DoubleChainOfThought()
result = double_cot(question=multi_step_question)
print(result)


main()
2 changes: 1 addition & 1 deletion src/langtrace_python_sdk/extensions/langtrace_exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
url=f"{self.api_host}",
data=json.dumps(data),
headers=headers,
timeout=20,
timeout=40,
)

if not response.ok:
Expand Down
15 changes: 5 additions & 10 deletions src/langtrace_python_sdk/instrumentation/anthropic/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,8 @@
limitations under the License.
"""

from typing import Any, Callable, Dict, List, Optional, Iterator, TypedDict, Union
from langtrace.trace_attributes import Event, SpanAttributes, LLMSpanAttributes
from langtrace_python_sdk.utils import set_span_attribute
from langtrace_python_sdk.utils.silently_fail import silently_fail
from typing import Any, Callable, List, Iterator, Union
from langtrace.trace_attributes import SpanAttributes, LLMSpanAttributes
import json

from langtrace_python_sdk.utils.llm import (
Expand All @@ -28,6 +26,7 @@
get_llm_url,
get_span_name,
set_event_completion,
set_span_attributes,
set_usage_attributes,
set_span_attribute,
)
Expand All @@ -39,8 +38,6 @@
StreamingResult,
ResultType,
MessagesCreateKwargs,
ContentItem,
Usage,
)


Expand All @@ -62,22 +59,20 @@ def traced_method(
prompts = [{"role": "system", "content": system}] + kwargs.get(
"messages", []
)
extraAttributes = get_extra_attributes()
span_attributes = {
**get_langtrace_attributes(version, service_provider),
**get_llm_request_attributes(kwargs, prompts=prompts),
**get_llm_url(instance),
SpanAttributes.LLM_PATH: APIS["MESSAGES_CREATE"]["ENDPOINT"],
**extraAttributes, # type: ignore
**get_extra_attributes(),
}

attributes = LLMSpanAttributes(**span_attributes)

span = tracer.start_span(
name=get_span_name(APIS["MESSAGES_CREATE"]["METHOD"]), kind=SpanKind.CLIENT
)
for field, value in attributes.model_dump(by_alias=True).items():
set_span_attribute(span, field, value)
set_span_attributes(span, attributes)
try:
# Attempt to call the original method
result = wrapped(*args, **kwargs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,9 @@ def instrumentation_dependencies(self):
return ["autogen >= 0.1.0"]

def _instrument(self, **kwargs):
print("Instrumneting autogen")
tracer_provider = kwargs.get("tracer_provider")
tracer = get_tracer(__name__, "", tracer_provider)
version = v("autogen")
# conversable_agent.intiate_chat
# conversable_agent.register_function
# agent.Agent
# AgentCreation
# Tools --> Register_for_llm, register_for_execution, register_for_function
try:
_W(
module="autogen.agentchat.conversable_agent",
Expand Down
103 changes: 63 additions & 40 deletions src/langtrace_python_sdk/instrumentation/dspy/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,12 @@
from importlib_metadata import version as v
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
from langtrace_python_sdk.utils import set_span_attribute
from langtrace_python_sdk.utils.llm import (
get_extra_attributes,
get_langtrace_attributes,
get_span_name,
set_span_attributes,
)
from langtrace_python_sdk.utils.silently_fail import silently_fail
from langtrace_python_sdk.constants.instrumentation.common import (
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
Expand Down Expand Up @@ -39,25 +45,29 @@ def traced_method(wrapped, instance, args, kwargs):
),
}
span_attributes["dspy.optimizer.module.prog"] = json.dumps(prog)
if hasattr(instance, 'metric'):
span_attributes["dspy.optimizer.metric"] = getattr(instance, 'metric').__name__
if hasattr(instance, "metric"):
span_attributes["dspy.optimizer.metric"] = getattr(
instance, "metric"
).__name__
if kwargs.get("trainset") and len(kwargs.get("trainset")) > 0:
span_attributes["dspy.optimizer.trainset"] = str(kwargs.get("trainset"))
config = {}
if hasattr(instance, 'metric_threshold'):
config["metric_threshold"] = getattr(instance, 'metric_threshold')
if hasattr(instance, 'teacher_settings'):
config["teacher_settings"] = getattr(instance, 'teacher_settings')
if hasattr(instance, 'max_bootstrapped_demos'):
config["max_bootstrapped_demos"] = getattr(instance, 'max_bootstrapped_demos')
if hasattr(instance, 'max_labeled_demos'):
config["max_labeled_demos"] = getattr(instance, 'max_labeled_demos')
if hasattr(instance, 'max_rounds'):
config["max_rounds"] = getattr(instance, 'max_rounds')
if hasattr(instance, 'max_steps'):
config["max_errors"] = getattr(instance, 'max_errors')
if hasattr(instance, 'error_count'):
config["error_count"] = getattr(instance, 'error_count')
if hasattr(instance, "metric_threshold"):
config["metric_threshold"] = getattr(instance, "metric_threshold")
if hasattr(instance, "teacher_settings"):
config["teacher_settings"] = getattr(instance, "teacher_settings")
if hasattr(instance, "max_bootstrapped_demos"):
config["max_bootstrapped_demos"] = getattr(
instance, "max_bootstrapped_demos"
)
if hasattr(instance, "max_labeled_demos"):
config["max_labeled_demos"] = getattr(instance, "max_labeled_demos")
if hasattr(instance, "max_rounds"):
config["max_rounds"] = getattr(instance, "max_rounds")
if hasattr(instance, "max_steps"):
config["max_errors"] = getattr(instance, "max_errors")
if hasattr(instance, "error_count"):
config["error_count"] = getattr(instance, "error_count")
if config and len(config) > 0:
span_attributes["dspy.optimizer.config"] = json.dumps(config)

Expand Down Expand Up @@ -96,37 +106,36 @@ def patch_signature(operation_name, version, tracer):
def traced_method(wrapped, instance, args, kwargs):

service_provider = SERVICE_PROVIDERS["DSPY"]
extra_attributes = baggage.get_baggage(LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY)
span_attributes = {
"langtrace.sdk.name": "langtrace-python-sdk",
"langtrace.service.name": service_provider,
"langtrace.service.type": "framework",
"langtrace.service.version": version,
"langtrace.version": v(LANGTRACE_SDK_NAME),
**(extra_attributes if extra_attributes is not None else {}),
**get_langtrace_attributes(
service_provider=service_provider,
version=version,
vendor_type="framework",
),
**get_extra_attributes(),
}

# passed operation name
opname = operation_name
if extra_attributes is not None and "langtrace.span.name" in extra_attributes:
# append the operation name to the span name
opname = f"{operation_name}-{extra_attributes['langtrace.span.name']}"

if instance.__class__.__name__:
span_attributes["dspy.signature.name"] = instance.__class__.__name__
span_attributes["dspy.signature"] = str(instance)
span_attributes["dspy.signature"] = str(instance.signature)

if kwargs and len(kwargs) > 0:
span_attributes["dspy.signature.args"] = str(kwargs)

attributes = FrameworkSpanAttributes(**span_attributes)
with tracer.start_as_current_span(opname, kind=SpanKind.CLIENT) as span:
_set_input_attributes(span, kwargs, attributes)
with tracer.start_as_current_span(
get_span_name(operation_name=operation_name), kind=SpanKind.CLIENT
) as span:
set_span_attributes(span, attributes)

try:
result = wrapped(*args, **kwargs)
if result:
set_span_attribute(span, "dspy.signature.result", str(result))
set_span_attribute(
span,
"dspy.signature.result",
json.dumps(result.toDict()),
)
span.set_status(Status(StatusCode.OK))

span.end()
Expand Down Expand Up @@ -168,27 +177,41 @@ def traced_method(wrapped, instance, args, kwargs):
if hasattr(instance, "devset"):
span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset"))
if hasattr(instance, "trainset"):
span_attributes["dspy.evaluate.display"] = str(getattr(instance, "trainset"))
span_attributes["dspy.evaluate.display"] = str(
getattr(instance, "trainset")
)
if hasattr(instance, "num_threads"):
span_attributes["dspy.evaluate.num_threads"] = str(getattr(instance, "num_threads"))
span_attributes["dspy.evaluate.num_threads"] = str(
getattr(instance, "num_threads")
)
if hasattr(instance, "return_outputs"):
span_attributes["dspy.evaluate.return_outputs"] = str(
getattr(instance, "return_outputs")
)
if hasattr(instance, "display_table"):
span_attributes["dspy.evaluate.display_table"] = str(getattr(instance, "display_table"))
span_attributes["dspy.evaluate.display_table"] = str(
getattr(instance, "display_table")
)
if hasattr(instance, "display_progress"):
span_attributes["dspy.evaluate.display_progress"] = str(
getattr(instance, "display_progress")
)
if hasattr(instance, "metric"):
span_attributes["dspy.evaluate.metric"] = getattr(instance, "metric").__name__
span_attributes["dspy.evaluate.metric"] = getattr(
instance, "metric"
).__name__
if hasattr(instance, "error_count"):
span_attributes["dspy.evaluate.error_count"] = str(getattr(instance, "error_count"))
span_attributes["dspy.evaluate.error_count"] = str(
getattr(instance, "error_count")
)
if hasattr(instance, "error_lock"):
span_attributes["dspy.evaluate.error_lock"] = str(getattr(instance, "error_lock"))
span_attributes["dspy.evaluate.error_lock"] = str(
getattr(instance, "error_lock")
)
if hasattr(instance, "max_errors"):
span_attributes["dspy.evaluate.max_errors"] = str(getattr(instance, "max_errors"))
span_attributes["dspy.evaluate.max_errors"] = str(
getattr(instance, "max_errors")
)
if args and len(args) > 0:
span_attributes["dspy.evaluate.args"] = str(args)

Expand Down
7 changes: 4 additions & 3 deletions src/langtrace_python_sdk/utils/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,14 @@
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
from langtrace_python_sdk.utils import set_span_attribute
from langtrace_python_sdk.types import NOT_GIVEN
from tiktoken import get_encoding
from tiktoken import get_encoding, list_encoding_names

from langtrace_python_sdk.constants.instrumentation.common import (
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
TIKTOKEN_MODEL_MAPPING,
)
from langtrace_python_sdk.constants.instrumentation.openai import OPENAI_COST_TABLE
from langtrace.trace_attributes import SpanAttributes, Event
from langtrace.trace_attributes import SpanAttributes
from importlib_metadata import version as v
import json
from opentelemetry import baggage
Expand Down Expand Up @@ -142,7 +141,9 @@ def get_llm_request_attributes(kwargs, prompts=None, model=None, operation_name=
SpanAttributes.LLM_FREQUENCY_PENALTY: kwargs.get("frequency_penalty"),
SpanAttributes.LLM_REQUEST_SEED: kwargs.get("seed"),
SpanAttributes.LLM_TOOLS: json.dumps(tools) if tools else None,
SpanAttributes.LLM_TOOL_CHOICE: json.dumps(tool_choice) if tool_choice else None,
SpanAttributes.LLM_TOOL_CHOICE: (
json.dumps(tool_choice) if tool_choice else None
),
SpanAttributes.LLM_REQUEST_LOGPROPS: kwargs.get("logprobs"),
SpanAttributes.LLM_REQUEST_LOGITBIAS: kwargs.get("logit_bias"),
SpanAttributes.LLM_REQUEST_TOP_LOGPROPS: kwargs.get("top_logprobs"),
Expand Down
2 changes: 1 addition & 1 deletion src/langtrace_python_sdk/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "2.3.18"
__version__ = "2.3.19"
Loading