Skip to content

Commit

Permalink
operator: add 'max_crawler_memory' to limit autosizing of crawler pods (
Browse files Browse the repository at this point in the history
#1746)

Adds a `max_crawler_memory` chart setting, which, if set, will
defines the upper crawler memory limit that crawler pods can be resized up to.
If not set, auto resizing is disabled and pods are always set to 'crawler_memory' memory
  • Loading branch information
ikreymer authored Apr 24, 2024
1 parent eeab64a commit ec74eb4
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 0 deletions.
15 changes: 15 additions & 0 deletions backend/btrixcloud/operator/baseoperator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
""" Base Operator class for all operators """

import asyncio
import os
from typing import TYPE_CHECKING
from kubernetes.utils import parse_quantity

Expand Down Expand Up @@ -28,6 +29,9 @@
class K8sOpAPI(K8sAPI):
"""Additional k8s api for operators"""

has_pod_metrics: bool
max_crawler_memory_size: int

def __init__(self):
super().__init__()
self.config_file = "/config/config.yaml"
Expand All @@ -38,6 +42,8 @@ def __init__(self):
self.compute_crawler_resources()
self.compute_profile_resources()

self.max_crawler_memory_size = 0

def compute_crawler_resources(self):
"""compute memory / cpu resources for crawlers"""
p = self.shared_params
Expand Down Expand Up @@ -69,6 +75,15 @@ def compute_crawler_resources(self):
crawler_memory = int(parse_quantity(p["crawler_memory"]))
print(f"memory = {crawler_memory}")

max_crawler_memory_size = 0
max_crawler_memory = os.environ.get("MAX_CRAWLER_MEMORY")
if max_crawler_memory:
max_crawler_memory_size = int(parse_quantity(max_crawler_memory_size))

self.max_crawler_memory_size = max_crawler_memory_size or crawler_memory

print("max crawler memory size", self.max_crawler_memory_size)

p["crawler_cpu"] = crawler_cpu
p["crawler_memory"] = crawler_memory

Expand Down
7 changes: 7 additions & 0 deletions backend/btrixcloud/operator/crawls.py
Original file line number Diff line number Diff line change
Expand Up @@ -1033,6 +1033,13 @@ async def handle_auto_size(self, pod_status: dict[str, PodInfo]) -> None:

# if pod is using >MEM_SCALE_UP_THRESHOLD of its memory, increase mem
if mem_usage > MEM_SCALE_UP_THRESHOLD:
if new_memory > self.k8s.max_crawler_memory_size:
print(
f"Mem {mem_usage}: Not resizing pod {name}: "
+ f"mem {new_memory} > max allowed {self.k8s.max_crawler_memory_size}"
)
return

pod.newMemory = new_memory
print(
f"Mem {mem_usage}: Resizing pod {name} -> mem {pod.newMemory} - Scale Up"
Expand Down
2 changes: 2 additions & 0 deletions chart/templates/configmap.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ data:

MIN_QA_CRAWLER_IMAGE: "{{ .Values.min_qa_crawler_image }}"

MAX_CRAWLER_MEMORY: "{{ .Values.max_crawler_memory }}"

---
apiVersion: v1
kind: ConfigMap
Expand Down
5 changes: 5 additions & 0 deletions chart/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,11 @@ crawler_extra_memory_per_browser: 768Mi
# crawler_memory = crawler_memory_base + crawler_memory_per_extra_browser * (crawler_browser_instances - 1)
# crawler_memory:


# max crawler memory, if set, will enable auto-resizing of crawler pods up to this size
# if not set, no auto-resizing is done, and crawls always use 'crawler_memory' memory
# max_crawler_memory:

# optional: defaults to crawler_memory_base and crawler_cpu_base if not set
# profile_browser_memory:
#
Expand Down

0 comments on commit ec74eb4

Please sign in to comment.