diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 06923c501b..32581a60db 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -357,6 +357,8 @@ def capture_event( if not self._should_capture(event, hint, scope): return None + profile = event.pop("profile", None) + event_opt = self._prepare_event(event, hint, scope) if event_opt is None: return None @@ -409,14 +411,8 @@ def capture_event( envelope = Envelope(headers=headers) if is_transaction: - if "profile" in event_opt: - event_opt["profile"]["environment"] = event_opt.get("environment") - event_opt["profile"]["release"] = event_opt.get("release", "") - event_opt["profile"]["timestamp"] = event_opt.get("timestamp", "") - event_opt["profile"]["transactions"][0]["id"] = event_opt[ - "event_id" - ] - envelope.add_profile(event_opt.pop("profile")) + if profile is not None: + envelope.add_profile(profile.to_json(event_opt)) envelope.add_transaction(event_opt) else: envelope.add_event(event_opt) diff --git a/sentry_sdk/profiler.py b/sentry_sdk/profiler.py index 45ef706815..86cf1bf91d 100644 --- a/sentry_sdk/profiler.py +++ b/sentry_sdk/profiler.py @@ -56,7 +56,7 @@ def setup_profiler(options): `buffer_secs` determines the max time a sample will be buffered for `frequency` determines the number of samples to take per second (Hz) """ - buffer_secs = 60 + buffer_secs = 30 frequency = 101 if not PY33: @@ -163,6 +163,8 @@ def __init__(self, transaction, hub=None): self._start_ns = None # type: Optional[int] self._stop_ns = None # type: Optional[int] + transaction._profile = self + def __enter__(self): # type: () -> None assert _scheduler is not None @@ -175,23 +177,19 @@ def __exit__(self, ty, value, tb): _scheduler.stop_profiling() self._stop_ns = nanosecond_time() - # Now that we've collected all the data, attach it to the - # transaction so that it can be sent in the same envelope - self.transaction._profile = self.to_json() - - def to_json(self): - # type: () -> Dict[str, Any] + def to_json(self, event_opt): + # type: (Any) -> Dict[str, Any] assert _sample_buffer is not None assert self._start_ns is not None assert self._stop_ns is not None return { - "environment": None, # Gets added in client.py + "environment": event_opt.get("environment"), "event_id": uuid.uuid4().hex, "platform": "python", "profile": _sample_buffer.slice_profile(self._start_ns, self._stop_ns), - "release": None, # Gets added in client.py - "timestamp": None, # Gets added in client.py + "release": event_opt.get("release", ""), + "timestamp": event_opt["timestamp"], "version": "1", "device": { "architecture": platform.machine(), @@ -206,7 +204,7 @@ def to_json(self): }, "transactions": [ { - "id": None, # Gets added in client.py + "id": event_opt["event_id"], "name": self.transaction.name, # we start the transaction before the profile and this is # the transaction start time relative to the profile, so we @@ -304,7 +302,22 @@ def slice_profile(self, start_ns, stop_ns): sample["stack_id"] = stacks[current_stack] samples.append(sample) - return {"stacks": stacks_list, "frames": frames_list, "samples": samples} + # This collects the thread metadata at the end of a profile. Doing it + # this way means that any threads that terminate before the profile ends + # will not have any metadata associated with it. + thread_metadata = { + str(thread.ident): { + "name": thread.name, + } + for thread in threading.enumerate() + } + + return { + "stacks": stacks_list, + "frames": frames_list, + "samples": samples, + "thread_metadata": thread_metadata, + } class _Scheduler(object): @@ -344,6 +357,7 @@ class _ThreadScheduler(_Scheduler): """ mode = "thread" + name = None # type: Optional[str] def __init__(self, frequency): # type: (int) -> None @@ -368,7 +382,7 @@ def start_profiling(self): # make sure the thread is a daemon here otherwise this # can keep the application running after other threads # have exited - thread = threading.Thread(target=self.run, daemon=True) + thread = threading.Thread(name=self.name, target=self.run, daemon=True) thread.start() return True return False @@ -394,6 +408,7 @@ class _SleepScheduler(_ThreadScheduler): """ mode = "sleep" + name = "sentry.profiler.SleepScheduler" def run(self): # type: () -> None @@ -424,6 +439,7 @@ class _EventScheduler(_ThreadScheduler): """ mode = "event" + name = "sentry.profiler.EventScheduler" def run(self): # type: () -> None diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index 3bef18bc35..aacb3a5bb3 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -21,7 +21,8 @@ from typing import Tuple from typing import Iterator - from sentry_sdk._types import SamplingContext, MeasurementUnit + import sentry_sdk.profiler + from sentry_sdk._types import Event, SamplingContext, MeasurementUnit # Transaction source @@ -579,7 +580,7 @@ def __init__( self._sentry_tracestate = sentry_tracestate self._third_party_tracestate = third_party_tracestate self._measurements = {} # type: Dict[str, Any] - self._profile = None # type: Optional[Dict[str, Any]] + self._profile = None # type: Optional[sentry_sdk.profiler.Profile] self._baggage = baggage # for profiling, we want to know on which thread a transaction is started # to accurately show the active thread in the UI @@ -675,7 +676,7 @@ def finish(self, hub=None): "timestamp": self.timestamp, "start_timestamp": self.start_timestamp, "spans": finished_spans, - } + } # type: Event if hub.client is not None and self._profile is not None: event["profile"] = self._profile