Skip to content

Commit

Permalink
feat(uptime): Add function to count number of uptime monitors active …
Browse files Browse the repository at this point in the history
…in an organization (#74777)

This adds a funtion to count active uptime monitors for an org. Since
this will be called from high volum areas it also adds caching around
the function.

We want the cache to be actively invalidated when the count changes in
the org, so we also want to listen to `post_save` and `post_delete`
signals. This is a pattern we've used a few times in sentry, so I also
generalized this cache so that we don't have to manually write it every
time.
  • Loading branch information
wedamija authored and Christinarlong committed Jul 26, 2024
1 parent 1668b71 commit 2f7da79
Show file tree
Hide file tree
Showing 4 changed files with 199 additions and 0 deletions.
11 changes: 11 additions & 0 deletions src/sentry/uptime/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,10 @@
from sentry.db.models import DefaultFieldsModel, FlexibleForeignKey, region_silo_model
from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey
from sentry.db.models.manager.base import BaseManager
from sentry.models.organization import Organization
from sentry.remote_subscriptions.models import BaseRemoteSubscription
from sentry.types.actor import Actor
from sentry.utils.function_cache import cache_func_for_models


@region_silo_model
Expand Down Expand Up @@ -109,3 +111,12 @@ class Meta:
@property
def owner(self) -> Actor | None:
return Actor.from_id(user_id=self.owner_user_id, team_id=self.owner_team_id)


def get_org_from_uptime_monitor(uptime_monitor: ProjectUptimeSubscription) -> tuple[Organization]:
return (uptime_monitor.project.organization,)


@cache_func_for_models([(ProjectUptimeSubscription, get_org_from_uptime_monitor)])
def get_active_monitor_count_for_org(organization: Organization) -> int:
return ProjectUptimeSubscription.objects.filter(project__organization=organization).count()
91 changes: 91 additions & 0 deletions src/sentry/utils/function_cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import uuid
from collections.abc import Callable
from datetime import timedelta
from decimal import Decimal
from functools import partial
from typing import Any, ParamSpec, TypeVar

from django.core.cache import cache
from django.db import models
from django.db.models.signals import post_delete, post_save

from sentry.utils.hashlib import md5_text

P = ParamSpec("P")
R = TypeVar("R")
S = TypeVar("S", bound=models.Model)


def arg_to_hashable(arg: Any):
if isinstance(arg, (int, float, str, Decimal, uuid.UUID)):
return arg
elif isinstance(arg, models.Model):
return f"{arg._meta.label}:{arg.pk}"
else:
raise ValueError(
"Can only cache functions whose parameters can be hashed in a consistent way"
)


def cache_key_for_cached_func(cached_func: Callable[P, R], *args):
base_cache_key = f"query_cache:{md5_text(cached_func.__qualname__).hexdigest()}"
vals_to_hash = [arg_to_hashable(arg) for arg in args]
return f"{base_cache_key}:{md5_text(*vals_to_hash).hexdigest()}"


def clear_cache_for_cached_func(
cached_func: Callable[P, R], arg_getter, recalculate: bool, instance: S, *args, **kwargs
):
args = arg_getter(instance)
cache_key = cache_key_for_cached_func(cached_func, *args)
if recalculate:
cache.set(cache_key, cached_func(*args))
else:
cache.delete(cache_key)


def cache_func_for_models(
cache_invalidators: list[tuple[type[S], Callable[[S], P.args]]],
cache_ttl: None | timedelta = None,
recalculate: bool = True,
):
"""
Decorator that caches the result of a function, and actively invalidates the result when related models are
created/updated/deleted. To use this, decorate a function with this decorator and pass a list of `cache_invalidators`
that tell us how to invalidate the cache.
Each entry in `cache_invalidators` is a tuple of (<Model>, <func>). In more detail:
- Model is the model we'll listen to for updates. When this model fires a `post_save` or `post_delete` signal
we'll invalidate the cache.
- Func is a function that accepts an instance of `Model` and returns a tuple of values that can be used to call
the cached function. These values are used to invalidate the cache.
This only works with functions that are called using args.
If `recalculate` is `True`, we'll re-run the decorated function and overwrite the cached value. If `False`, we'll
just remove the value from the cache.
"""
if cache_ttl is None:
cache_ttl = timedelta(days=7)

def cached_query_func(func_to_cache: Callable[P, R]):
def inner(*args: P.args, **kwargs: P.kwargs) -> R:
if kwargs:
raise ValueError("Can't cache values using kwargs")

cache_key = cache_key_for_cached_func(func_to_cache, *args)
cached_val = cache.get(cache_key, None)
if cached_val is None:
cached_val = func_to_cache(*args)
cache.set(cache_key, cached_val, timeout=cache_ttl.total_seconds())
return cached_val

for model, arg_getter in cache_invalidators:
clear_cache_callable = partial(
clear_cache_for_cached_func, func_to_cache, arg_getter, recalculate
)
post_save.connect(clear_cache_callable, sender=model, weak=False)
post_delete.connect(clear_cache_callable, sender=model, weak=False)

return inner

return cached_query_func
17 changes: 17 additions & 0 deletions tests/sentry/uptime/test_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from sentry.testutils.cases import UptimeTestCase
from sentry.uptime.models import get_active_monitor_count_for_org


class GetActiveMonitorCountForOrgTest(UptimeTestCase):
def test(self):
assert get_active_monitor_count_for_org(self.organization) == 0
self.create_project_uptime_subscription()
assert get_active_monitor_count_for_org(self.organization) == 1
other_sub = self.create_uptime_subscription(url="https://santry.io")
self.create_project_uptime_subscription(uptime_subscription=other_sub)
assert get_active_monitor_count_for_org(self.organization) == 2
other_org = self.create_organization()
other_proj = self.create_project(organization=other_org)
self.create_project_uptime_subscription(uptime_subscription=other_sub, project=other_proj)
assert get_active_monitor_count_for_org(self.organization) == 2
assert get_active_monitor_count_for_org(other_org) == 1
80 changes: 80 additions & 0 deletions tests/sentry/utils/test_function_cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
from unittest.mock import create_autospec

from django.db import models

from sentry.backup.scopes import RelocationScope
from sentry.db.models import region_silo_model
from sentry.testutils.cases import TestCase
from sentry.utils.function_cache import cache_func_for_models

cache_func_for_models


@region_silo_model
class CacheModel(models.Model):
__relocation_scope__ = RelocationScope.Excluded
some_field = models.TextField()

class Meta:
app_label = "fixtures"


def count_func(text_search: str):
return CacheModel.objects.filter(some_field=text_search).count()


def arg_extractor(instance: CacheModel):
return (instance.some_field,)


class CacheFuncForModelsTest(TestCase):
def assert_called_with_count(self, mock_test_func, text_search: str, count: int):
assert (
len([ca for ca in mock_test_func.call_args_list if ca.args[0] == text_search]) == count
)

def test(self):
mock_test_func = create_autospec(count_func)
mock_test_func.side_effect = count_func
decorated_test_func = cache_func_for_models([(CacheModel, arg_extractor)])(mock_test_func)
self.assert_called_with_count(mock_test_func, "test", 0)
assert decorated_test_func("test") == 0
self.assert_called_with_count(mock_test_func, "test", 1)
assert decorated_test_func("test") == 0
self.assert_called_with_count(mock_test_func, "test", 1)

CacheModel.objects.create(some_field="test")
# Since we're actively refetching the count should go to 2 here
self.assert_called_with_count(mock_test_func, "test", 2)
assert decorated_test_func("test") == 1
self.assert_called_with_count(mock_test_func, "test", 2)
CacheModel.objects.create(some_field="test")
self.assert_called_with_count(mock_test_func, "test", 3)
assert decorated_test_func("test") == 2
self.assert_called_with_count(mock_test_func, "test", 3)
CacheModel.objects.create(some_field="another_val")
self.assert_called_with_count(mock_test_func, "test", 3)
assert decorated_test_func("test") == 2

def test_no_recalculate(self):
mock_test_func = create_autospec(count_func)
mock_test_func.side_effect = count_func
decorated_test_func = cache_func_for_models(
[(CacheModel, arg_extractor)], recalculate=False
)(mock_test_func)
self.assert_called_with_count(mock_test_func, "test", 0)
assert decorated_test_func("test") == 0
self.assert_called_with_count(mock_test_func, "test", 1)

CacheModel.objects.create(some_field="test")
# Since we're not actively refetching the count should remain the same here
self.assert_called_with_count(mock_test_func, "test", 1)
assert decorated_test_func("test") == 1
self.assert_called_with_count(mock_test_func, "test", 2)
CacheModel.objects.create(some_field="test")
self.assert_called_with_count(mock_test_func, "test", 2)
assert decorated_test_func("test") == 2
self.assert_called_with_count(mock_test_func, "test", 3)
CacheModel.objects.create(some_field="another_val")
self.assert_called_with_count(mock_test_func, "test", 3)
assert decorated_test_func("test") == 2

0 comments on commit 2f7da79

Please sign in to comment.