Skip to content

Commit

Permalink
log when we go above maximum number of workers
Browse files Browse the repository at this point in the history
  • Loading branch information
mrocklin committed May 18, 2018
1 parent 0f14788 commit 0d367a8
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 3 deletions.
7 changes: 5 additions & 2 deletions dask_kubernetes/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,8 +392,11 @@ def scale_up(self, n, pods=None, **kwargs):
--------
>>> cluster.scale_up(20) # ask for twenty workers
"""
if dask.config.get('kubernetes.count.max') is not None:
n = min(n, dask.config.get('kubernetes.count.max'))
maximum = dask.config.get('kubernetes.count.max')
if maximum is not None and maximum < n:
logger.info("Tried to scale beyond maximum number of workers %d > %d",
n, maximum)
n = maximum
pods = pods or self._cleanup_succeeded_pods(self.pods())
to_create = n - len(pods)
new_pods = []
Expand Down
19 changes: 18 additions & 1 deletion dask_kubernetes/tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import pytest
from dask_kubernetes import KubeCluster, make_pod_spec
from dask.distributed import Client, wait
from distributed.utils_test import loop # noqa: F401
from distributed.utils_test import loop, captured_logger # noqa: F401
from distributed.utils import tmpfile
import kubernetes
from random import random
Expand Down Expand Up @@ -456,3 +456,20 @@ def test_escape_username(pod_spec, loop, ns):
assert '!' not in cluster.name
finally:
os.environ['LOGNAME'] = old_logname


def test_maximum(cluster):
with dask.config.set(**{'kubernetes.count.max': 1}):
with captured_logger('dask-kubernetes') as logger:
cluster.scale(10)

start = time()
while len(cluster.scheduler.workers) <= 0:
sleep(0.1)
assert time() < start + 60

sleep(0.5)
assert len(cluster.scheduler.workers) == 1

result = logger.getvalue()
assert "scale beyond maximum number of workers" in result.lower()

0 comments on commit 0d367a8

Please sign in to comment.