Skip to content

Commit

Permalink
Issue #18/EP-4049 add tests about resilience
Browse files Browse the repository at this point in the history
  • Loading branch information
soxofaan committed Oct 25, 2021
1 parent 876b8bf commit a05e524
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 6 deletions.
18 changes: 13 additions & 5 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,9 @@ def multi_backend_connection(backend1, backend2) -> MultiBackendConnection:


@pytest.fixture
def config(backend1, backend2) -> AggregatorConfig:
def base_config() -> AggregatorConfig:
"""Base config for tests (without any configured backends)."""
conf = AggregatorConfig()
conf.aggregator_backends = {
"b1": backend1,
"b2": backend2,
}
# conf.flask_error_handling = False # Temporary disable flask error handlers to simplify debugging (better stack traces).

conf.configured_oidc_providers = [
Expand All @@ -58,6 +55,17 @@ def config(backend1, backend2) -> AggregatorConfig:
return conf


@pytest.fixture
def config(base_config, backend1, backend2) -> AggregatorConfig:
"""Config for most tests with two backends."""
conf = base_config
conf.aggregator_backends = {
"b1": backend1,
"b2": backend2,
}
return conf


def get_flask_app(config: AggregatorConfig) -> flask.Flask:
app = create_app(config=config, auto_logging_setup=False)
app.config['TESTING'] = True
Expand Down
30 changes: 29 additions & 1 deletion tests/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest

from openeo_aggregator.utils import TtlCache, CacheMissException, MultiDictGetter, subdict, dict_merge
from openeo_aggregator.utils import TtlCache, CacheMissException, MultiDictGetter, subdict, dict_merge, EventHandler


class FakeClock:
Expand Down Expand Up @@ -153,3 +153,31 @@ def test_dict_merge():
assert dict_merge({"foo": 1}, {"foo": 2}, foo=3) == {"foo": 3}
assert dict_merge({"foo": 1}, {"foo": 2}, foo=3, bar=4) == {"foo": 3, "bar": 4}
assert dict_merge({"foo": 1, "meh": 11}, {"foo": 2, "bar": 22}, foo=3, meh=55) == {"foo": 3, "bar": 22, "meh": 55}


class TestEventHandler:

def test_empty(self):
handler = EventHandler("event")
handler.trigger()

def test_simple(self):
data = []
handler = EventHandler("event")
handler.add(lambda: data.append("foo"))
assert data == []
handler.trigger()
assert data == ["foo"]

def test_failure(self):
data = []
handler = EventHandler("event")
handler.add(lambda: data.append("foo"))
handler.add(lambda: data.append(4 / 0))
handler.add(lambda: data.append("bar"))
assert data == []
with pytest.raises(ZeroDivisionError):
handler.trigger()
assert data == ["foo"]
handler.trigger(skip_failures=True)
assert data == ["foo", "foo", "bar"]
60 changes: 60 additions & 0 deletions tests/test_views.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import itertools
import logging
import pytest
import requests

from openeo_aggregator.backend import AggregatorCollectionCatalog
from openeo_aggregator.connection import MultiBackendConnection
from openeo_driver.errors import JobNotFoundException, ProcessGraphMissingException, JobNotFinishedException, \
ProcessGraphInvalidException
from openeo_driver.testing import ApiTester, TEST_USER_AUTH_HEADER, TEST_USER, TEST_USER_BEARER_TOKEN, DictSubSet
Expand Down Expand Up @@ -880,3 +882,61 @@ def test_get_logs_not_found_on_aggregator(self, api100):
api100.set_auth_bearer_token(token=TEST_USER_BEARER_TOKEN)
res = api100.get("/jobs/nope-and-nope/logs")
res.assert_error(404, "JobNotFound", message="The batch job 'nope-and-nope' does not exist.")


class TestResilience:

def test_startup_during_backend_downtime(self, backend1, base_config, requests_mock, caplog):
caplog.set_level(logging.WARNING)

# Backend1 is up
requests_mock.get(backend1 + "/health", text="OK")

# Instead of `backend2` from fixture, set up a broken one
backend2 = "https://b2.test/v1"
m = requests_mock.get(backend2 + "/", status_code=500)
base_config.aggregator_backends = {"b1": backend1, "b2": backend2}
api100 = get_api100(get_flask_app(base_config))

assert "Failed to create backend 'b2' connection" in caplog.text
assert m.call_count == 1

api100.get("/").assert_status_code(200)

resp = api100.get("/health").assert_status_code(200)
assert resp.json == {
"backend_status": {
"b1": {"status_code": 200, "text": "OK", "response_time": pytest.approx(0.1, abs=0.1)},
},
"status_code": 200,
}

def test_startup_during_backend_downtime_and_recover(self, backend1, base_config, requests_mock):
# Set up fake clock
MultiBackendConnection._clock = itertools.count(1).__next__

requests_mock.get(backend1 + "/health", text="OK")

# Instead of `backend2` from fixture, set up a broken one
backend2 = "https://b2.test/v1"
m = requests_mock.get(backend2 + "/", status_code=500)
base_config.aggregator_backends = {"b1": backend1, "b2": backend2}
api100 = get_api100(get_flask_app(base_config))

assert api100.get("/health").assert_status_code(200).json["backend_status"] == {
"b1": {"status_code": 200, "text": "OK", "response_time": pytest.approx(0.1, abs=0.1)},
}

# Backend 2 is up again, but cached is still active
requests_mock.get(backend2 + "/", json={"api_version": "1.0.0"})
requests_mock.get(backend2 + "/health", text="ok again")
assert api100.get("/health").assert_status_code(200).json["backend_status"] == {
"b1": {"status_code": 200, "text": "OK", "response_time": pytest.approx(0.1, abs=0.1)},
}

# Wait a bit so that cache is flushed
MultiBackendConnection._clock = itertools.count(10 * 60).__next__
assert api100.get("/health").assert_status_code(200).json["backend_status"] == {
"b1": {"status_code": 200, "text": "OK", "response_time": pytest.approx(0.1, abs=0.1)},
"b2": {"status_code": 200, "text": "ok again", "response_time": pytest.approx(0.1, abs=0.1)},
}

0 comments on commit a05e524

Please sign in to comment.