Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Commit

Permalink
Allow configuration of Synapse's cache without using synctl or enviro…
Browse files Browse the repository at this point in the history
…nment variables (#6391)
  • Loading branch information
hawkowl authored May 11, 2020
1 parent a8580c5 commit 7cb8b4b
Show file tree
Hide file tree
Showing 32 changed files with 620 additions and 146 deletions.
1 change: 1 addition & 0 deletions changelog.d/6391.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Synapse's cache factor can now be configured in `homeserver.yaml` by the `caches.global_factor` setting. Additionally, `caches.per_cache_factors` controls the cache factors for individual caches.
43 changes: 39 additions & 4 deletions docs/sample_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -603,6 +603,45 @@ acme:



## Caching ##

# Caching can be configured through the following options.
#
# A cache 'factor' is a multiplier that can be applied to each of
# Synapse's caches in order to increase or decrease the maximum
# number of entries that can be stored.

# The number of events to cache in memory. Not affected by
# caches.global_factor.
#
#event_cache_size: 10K

caches:
# Controls the global cache factor, which is the default cache factor
# for all caches if a specific factor for that cache is not otherwise
# set.
#
# This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
# variable. Setting by environment variable takes priority over
# setting through the config file.
#
# Defaults to 0.5, which will half the size of all caches.
#
#global_factor: 1.0

# A dictionary of cache name to cache factor for that individual
# cache. Overrides the global cache factor for a given cache.
#
# These can also be set through environment variables comprised
# of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
# letters and underscores. Setting by environment variable
# takes priority over setting through the config file.
# Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
#
per_cache_factors:
#get_users_who_share_room_with_user: 2.0


## Database ##

# The 'database' setting defines the database that synapse uses to store all of
Expand Down Expand Up @@ -646,10 +685,6 @@ database:
args:
database: DATADIR/homeserver.db

# Number of events to cache in memory.
#
#event_cache_size: 10K


## Logging ##

Expand Down
4 changes: 2 additions & 2 deletions synapse/api/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.events import EventBase
from synapse.types import StateMap, UserID
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
from synapse.util.caches import register_cache
from synapse.util.caches.lrucache import LruCache
from synapse.util.metrics import Measure

Expand Down Expand Up @@ -73,7 +73,7 @@ def __init__(self, hs):
self.store = hs.get_datastore()
self.state = hs.get_state_handler()

self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000)
self.token_cache = LruCache(10000)
register_cache("cache", "token_cache", self.token_cache)

self._auth_blocking = AuthBlocking(self.hs)
Expand Down
5 changes: 2 additions & 3 deletions synapse/app/homeserver.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@
from synapse.storage import DataStore
from synapse.storage.engines import IncorrectDatabaseSetup
from synapse.storage.prepare_database import UpgradeDatabaseException
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.manhole import manhole
from synapse.util.module_loader import load_module
Expand Down Expand Up @@ -516,8 +515,8 @@ def phone_stats_home(hs, stats, stats_process=_stats_process):

daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
stats["cache_factor"] = CACHE_SIZE_FACTOR
stats["event_cache_size"] = hs.config.event_cache_size
stats["cache_factor"] = hs.config.caches.global_factor
stats["event_cache_size"] = hs.config.caches.event_cache_size

#
# Performance statistics
Expand Down
164 changes: 164 additions & 0 deletions synapse/config/cache.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
# -*- coding: utf-8 -*-
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
from typing import Callable, Dict

from ._base import Config, ConfigError

# The prefix for all cache factor-related environment variables
_CACHES = {}
_CACHE_PREFIX = "SYNAPSE_CACHE_FACTOR"
_DEFAULT_FACTOR_SIZE = 0.5
_DEFAULT_EVENT_CACHE_SIZE = "10K"


class CacheProperties(object):
def __init__(self):
# The default factor size for all caches
self.default_factor_size = float(
os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
)
self.resize_all_caches_func = None


properties = CacheProperties()


def add_resizable_cache(cache_name: str, cache_resize_callback: Callable):
"""Register a cache that's size can dynamically change
Args:
cache_name: A reference to the cache
cache_resize_callback: A callback function that will be ran whenever
the cache needs to be resized
"""
_CACHES[cache_name.lower()] = cache_resize_callback

# Ensure all loaded caches are sized appropriately
#
# This method should only run once the config has been read,
# as it uses values read from it
if properties.resize_all_caches_func:
properties.resize_all_caches_func()


class CacheConfig(Config):
section = "caches"
_environ = os.environ

@staticmethod
def reset():
"""Resets the caches to their defaults. Used for tests."""
properties.default_factor_size = float(
os.environ.get(_CACHE_PREFIX, _DEFAULT_FACTOR_SIZE)
)
properties.resize_all_caches_func = None
_CACHES.clear()

def generate_config_section(self, **kwargs):
return """\
## Caching ##
# Caching can be configured through the following options.
#
# A cache 'factor' is a multiplier that can be applied to each of
# Synapse's caches in order to increase or decrease the maximum
# number of entries that can be stored.
# The number of events to cache in memory. Not affected by
# caches.global_factor.
#
#event_cache_size: 10K
caches:
# Controls the global cache factor, which is the default cache factor
# for all caches if a specific factor for that cache is not otherwise
# set.
#
# This can also be set by the "SYNAPSE_CACHE_FACTOR" environment
# variable. Setting by environment variable takes priority over
# setting through the config file.
#
# Defaults to 0.5, which will half the size of all caches.
#
#global_factor: 1.0
# A dictionary of cache name to cache factor for that individual
# cache. Overrides the global cache factor for a given cache.
#
# These can also be set through environment variables comprised
# of "SYNAPSE_CACHE_FACTOR_" + the name of the cache in capital
# letters and underscores. Setting by environment variable
# takes priority over setting through the config file.
# Ex. SYNAPSE_CACHE_FACTOR_GET_USERS_WHO_SHARE_ROOM_WITH_USER=2.0
#
per_cache_factors:
#get_users_who_share_room_with_user: 2.0
"""

def read_config(self, config, **kwargs):
self.event_cache_size = self.parse_size(
config.get("event_cache_size", _DEFAULT_EVENT_CACHE_SIZE)
)
self.cache_factors = {} # type: Dict[str, float]

cache_config = config.get("caches") or {}
self.global_factor = cache_config.get(
"global_factor", properties.default_factor_size
)
if not isinstance(self.global_factor, (int, float)):
raise ConfigError("caches.global_factor must be a number.")

# Set the global one so that it's reflected in new caches
properties.default_factor_size = self.global_factor

# Load cache factors from the config
individual_factors = cache_config.get("per_cache_factors") or {}
if not isinstance(individual_factors, dict):
raise ConfigError("caches.per_cache_factors must be a dictionary")

# Override factors from environment if necessary
individual_factors.update(
{
key[len(_CACHE_PREFIX) + 1 :].lower(): float(val)
for key, val in self._environ.items()
if key.startswith(_CACHE_PREFIX + "_")
}
)

for cache, factor in individual_factors.items():
if not isinstance(factor, (int, float)):
raise ConfigError(
"caches.per_cache_factors.%s must be a number" % (cache.lower(),)
)
self.cache_factors[cache.lower()] = factor

# Resize all caches (if necessary) with the new factors we've loaded
self.resize_all_caches()

# Store this function so that it can be called from other classes without
# needing an instance of Config
properties.resize_all_caches_func = self.resize_all_caches

def resize_all_caches(self):
"""Ensure all cache sizes are up to date
For each cache, run the mapped callback function with either
a specific cache factor or the default, global one.
"""
for cache_name, callback in _CACHES.items():
new_factor = self.cache_factors.get(cache_name, self.global_factor)
callback(new_factor)
6 changes: 0 additions & 6 deletions synapse/config/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,6 @@
name: sqlite3
args:
database: %(database_path)s
# Number of events to cache in memory.
#
#event_cache_size: 10K
"""


Expand Down Expand Up @@ -116,8 +112,6 @@ def __init__(self, *args, **kwargs):
self.databases = []

def read_config(self, config, **kwargs):
self.event_cache_size = self.parse_size(config.get("event_cache_size", "10K"))

# We *experimentally* support specifying multiple databases via the
# `databases` key. This is a map from a label to database config in the
# same format as the `database` config option, plus an extra
Expand Down
2 changes: 2 additions & 0 deletions synapse/config/homeserver.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from ._base import RootConfig
from .api import ApiConfig
from .appservice import AppServiceConfig
from .cache import CacheConfig
from .captcha import CaptchaConfig
from .cas import CasConfig
from .consent_config import ConsentConfig
Expand Down Expand Up @@ -55,6 +56,7 @@ class HomeServerConfig(RootConfig):
config_classes = [
ServerConfig,
TlsConfig,
CacheConfig,
DatabaseConfig,
LoggingConfig,
RatelimitConfig,
Expand Down
6 changes: 4 additions & 2 deletions synapse/http/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
from synapse.logging.context import make_deferred_yieldable
from synapse.logging.opentracing import set_tag, start_active_span, tags
from synapse.util.async_helpers import timeout_deferred
from synapse.util.caches import CACHE_SIZE_FACTOR

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -241,7 +240,10 @@ def __getattr__(_self, attr):
# tends to do so in batches, so we need to allow the pool to keep
# lots of idle connections around.
pool = HTTPConnectionPool(self.reactor)
pool.maxPersistentPerHost = max((100 * CACHE_SIZE_FACTOR, 5))
# XXX: The justification for using the cache factor here is that larger instances
# will need both more cache and more connections.
# Still, this should probably be a separate dial
pool.maxPersistentPerHost = max((100 * hs.config.caches.global_factor, 5))
pool.cachedConnectionTimeout = 2 * 60

self.agent = ProxyAgent(
Expand Down
12 changes: 8 additions & 4 deletions synapse/metrics/_exposition.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@

from twisted.web.resource import Resource

from synapse.util import caches

try:
from prometheus_client.samples import Sample
except ImportError:
Expand Down Expand Up @@ -103,13 +105,15 @@ def nameify_sample(sample):


def generate_latest(registry, emit_help=False):
output = []

for metric in registry.collect():
# Trigger the cache metrics to be rescraped, which updates the common
# metrics but do not produce metrics themselves
for collector in caches.collectors_by_name.values():
collector.collect()

if metric.name.startswith("__unused"):
continue
output = []

for metric in registry.collect():
if not metric.samples:
# No samples, don't bother.
continue
Expand Down
4 changes: 3 additions & 1 deletion synapse/push/bulk_push_rule_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
"cache",
"push_rules_delta_state_cache_metric",
cache=[], # Meaningless size, as this isn't a cache that stores values
resizable=False,
)


Expand All @@ -67,7 +68,8 @@ def __init__(self, hs):
self.room_push_rule_cache_metrics = register_cache(
"cache",
"room_push_rule_cache",
cache=[], # Meaningless size, as this isn't a cache that stores values
cache=[], # Meaningless size, as this isn't a cache that stores values,
resizable=False,
)

@defer.inlineCallbacks
Expand Down
4 changes: 2 additions & 2 deletions synapse/push/push_rule_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

from synapse.events import EventBase
from synapse.types import UserID
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
from synapse.util.caches import register_cache
from synapse.util.caches.lrucache import LruCache

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -165,7 +165,7 @@ def _get_value(self, dotted_key: str) -> str:


# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
regex_cache = LruCache(50000 * CACHE_SIZE_FACTOR)
regex_cache = LruCache(50000)
register_cache("cache", "regex_push_cache", regex_cache)


Expand Down
3 changes: 1 addition & 2 deletions synapse/replication/slave/storage/client_ips.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@

from synapse.storage.data_stores.main.client_ips import LAST_SEEN_GRANULARITY
from synapse.storage.database import Database
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.caches.descriptors import Cache

from ._base import BaseSlavedStore
Expand All @@ -26,7 +25,7 @@ def __init__(self, database: Database, db_conn, hs):
super(SlavedClientIpStore, self).__init__(database, db_conn, hs)

self.client_ip_last_seen = Cache(
name="client_ip_last_seen", keylen=4, max_entries=50000 * CACHE_SIZE_FACTOR
name="client_ip_last_seen", keylen=4, max_entries=50000
)

def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id):
Expand Down
Loading

0 comments on commit 7cb8b4b

Please sign in to comment.