diff --git a/tests/test_coordinated_workers/test_coordinator.py b/tests/test_coordinated_workers/test_coordinator.py index c0d6bfe..2a5f000 100644 --- a/tests/test_coordinated_workers/test_coordinator.py +++ b/tests/test_coordinated_workers/test_coordinator.py @@ -1,9 +1,9 @@ +import dataclasses import json import ops import pytest -from ops import Framework -from scenario import Container, Context, Relation, State +from ops import testing from src.cosl.coordinated_workers.coordinator import ( ClusterRolesConfig, @@ -16,7 +16,7 @@ @pytest.fixture def coordinator_state(): requires_relations = { - endpoint: Relation(endpoint=endpoint, interface=interface["interface"]) + endpoint: testing.Relation(endpoint=endpoint, interface=interface["interface"]) for endpoint, interface in { "my-certificates": {"interface": "certificates"}, "my-logging": {"interface": "loki_push_api"}, @@ -24,7 +24,7 @@ def coordinator_state(): "my-workload-tracing": {"interface": "tracing"}, }.items() } - requires_relations["my-s3"] = Relation( + requires_relations["my-s3"] = testing.Relation( "my-s3", interface="s3", remote_app_data={ @@ -34,35 +34,35 @@ def coordinator_state(): "secret-key": "my-secret-key", }, ) - requires_relations["cluster_worker0"] = Relation( + requires_relations["cluster_worker0"] = testing.Relation( "my-cluster", remote_app_name="worker0", remote_app_data=ClusterRequirerAppData(role="read").dump(), ) - requires_relations["cluster_worker1"] = Relation( + requires_relations["cluster_worker1"] = testing.Relation( "my-cluster", remote_app_name="worker1", remote_app_data=ClusterRequirerAppData(role="write").dump(), ) - requires_relations["cluster_worker2"] = Relation( + requires_relations["cluster_worker2"] = testing.Relation( "my-cluster", remote_app_name="worker2", remote_app_data=ClusterRequirerAppData(role="backend").dump(), ) provides_relations = { - endpoint: Relation(endpoint=endpoint, interface=interface["interface"]) + endpoint: testing.Relation(endpoint=endpoint, interface=interface["interface"]) for endpoint, interface in { "my-dashboards": {"interface": "grafana_dashboard"}, "my-metrics": {"interface": "prometheus_scrape"}, }.items() } - return State( - containers=[ - Container("nginx", can_connect=True), - Container("nginx-prometheus-exporter", can_connect=True), - ], + return testing.State( + containers={ + testing.Container("nginx", can_connect=True), + testing.Container("nginx-prometheus-exporter", can_connect=True), + }, relations=list(requires_relations.values()) + list(provides_relations.values()), ) @@ -90,7 +90,7 @@ class MyCoordinator(ops.CharmBase): }, } - def __init__(self, framework: Framework): + def __init__(self, framework: ops.Framework): super().__init__(framework) # Note: Here it is a good idea not to use context mgr because it is "ops aware" self.coordinator = Coordinator( @@ -133,48 +133,48 @@ def __init__(self, framework: Framework): def test_worker_roles_subset_of_minimal_deployment( - coordinator_state: State, coordinator_charm: ops.CharmBase + coordinator_state: testing.State, coordinator_charm: ops.CharmBase ): # Test that the combination of worker roles is a subset of the minimal deployment roles # GIVEN a coordinator_charm - ctx = Context(coordinator_charm, meta=coordinator_charm.META) + ctx = testing.Context(coordinator_charm, meta=coordinator_charm.META) # AND a coordinator_state defining relations to worker charms with incomplete distributed roles - missing_backend_worker_relation = [ + missing_backend_worker_relation = { relation for relation in coordinator_state.relations if relation.remote_app_name != "worker2" - ] + } # WHEN we process any event - with ctx.manager( - "update-status", - state=coordinator_state.replace(relations=missing_backend_worker_relation), + with ctx( + ctx.on.update_status(), + state=dataclasses.replace(coordinator_state, relations=missing_backend_worker_relation), ) as mgr: charm: coordinator_charm = mgr.charm - # THEN the deployment is coherent + # THEN the deployment is not coherent assert not charm.coordinator.is_coherent def test_without_s3_integration_raises_error( - coordinator_state: State, coordinator_charm: ops.CharmBase + coordinator_state: testing.State, coordinator_charm: ops.CharmBase ): # Test that a charm without an s3 integration raises S3NotFoundError # GIVEN a coordinator charm without an s3 integration - ctx = Context(coordinator_charm, meta=coordinator_charm.META) - relations_without_s3 = [ + ctx = testing.Context(coordinator_charm, meta=coordinator_charm.META) + relations_without_s3 = { relation for relation in coordinator_state.relations if relation.endpoint != "my-s3" - ] + } # WHEN we process any event - with ctx.manager( - "update-status", - state=coordinator_state.replace(relations=relations_without_s3), + with ctx( + ctx.on.update_status(), + state=dataclasses.replace(coordinator_state, relations=relations_without_s3), ) as mgr: - # THEN the _s3_config method raises and S3NotFoundError + # THEN the _s3_config method raises an S3NotFoundError with pytest.raises(S3NotFoundError): mgr.charm.coordinator._s3_config @@ -193,7 +193,7 @@ def test_without_s3_integration_raises_error( ), ) def test_s3_integration( - coordinator_state: State, + coordinator_state: testing.State, coordinator_charm: ops.CharmBase, region, endpoint, @@ -206,7 +206,7 @@ def test_s3_integration( # Test that a charm with a s3 integration gives the expected _s3_config # GIVEN a coordinator charm with a s3 integration - ctx = Context(coordinator_charm, meta=coordinator_charm.META) + ctx = testing.Context(coordinator_charm, meta=coordinator_charm.META) s3_relation = coordinator_state.get_relations("my-s3")[0] relations_except_s3 = [ relation for relation in coordinator_state.relations if relation.endpoint != "my-s3" @@ -224,13 +224,14 @@ def test_s3_integration( } # WHEN we process any event - with ctx.manager( - "update-status", - state=coordinator_state.replace( - relations=relations_except_s3 + [s3_relation.replace(remote_app_data=s3_app_data)] + with ctx( + ctx.on.update_status(), + state=dataclasses.replace( + coordinator_state, + relations=relations_except_s3 + + [dataclasses.replace(s3_relation, remote_app_data=s3_app_data)], ), ) as mgr: - # THEN the s3_connection_info method returns the expected data structure coordinator: Coordinator = mgr.charm.coordinator assert coordinator.s3_connection_info.region == region diff --git a/tests/test_coordinated_workers/test_coordinator_status.py b/tests/test_coordinated_workers/test_coordinator_status.py index 459c18f..63232c2 100644 --- a/tests/test_coordinated_workers/test_coordinator_status.py +++ b/tests/test_coordinated_workers/test_coordinator_status.py @@ -1,11 +1,12 @@ +import dataclasses from unittest.mock import MagicMock, PropertyMock, patch import httpx +import ops import pytest import tenacity from lightkube import ApiError -from ops import ActiveStatus, BlockedStatus, CharmBase, Framework, WaitingStatus -from scenario import Container, Context, Relation, State +from ops import testing from cosl.coordinated_workers.coordinator import ClusterRolesConfig, Coordinator from cosl.coordinated_workers.interface import ClusterProviderAppData, ClusterRequirerAppData @@ -19,9 +20,8 @@ ) -class MyCoordCharm(CharmBase): - - def __init__(self, framework: Framework): +class MyCoordCharm(ops.CharmBase): + def __init__(self, framework: ops.Framework): super().__init__(framework) self.coordinator = Coordinator( @@ -54,7 +54,7 @@ def coord_charm(): @pytest.fixture def ctx(coord_charm): - return Context( + return testing.Context( coord_charm, meta={ "name": "lilith", @@ -80,7 +80,7 @@ def ctx(coord_charm): @pytest.fixture() def s3(): - return Relation( + return testing.Relation( "s3", remote_app_data={ "access-key": "key", @@ -98,33 +98,40 @@ def worker(): ClusterProviderAppData(worker_config="some: yaml").dump(app_data) remote_app_data = {} ClusterRequirerAppData(role="role").dump(remote_app_data) - return Relation("cluster", local_app_data=app_data, remote_app_data=remote_app_data) + return testing.Relation("cluster", local_app_data=app_data, remote_app_data=remote_app_data) @pytest.fixture() def base_state(s3, worker): - - return State( + return testing.State( leader=True, - containers=[Container("nginx"), Container("nginx-prometheus-exporter")], - relations=[worker, s3], + containers={testing.Container("nginx"), testing.Container("nginx-prometheus-exporter")}, + relations={worker, s3}, ) +def set_containers(state, nginx_can_connect=False, exporter_can_connect=False): + containers = { + testing.Container("nginx", can_connect=nginx_can_connect), + testing.Container("nginx-prometheus-exporter", can_connect=exporter_can_connect), + } + return dataclasses.replace(state, containers=containers) + + @patch( "charms.observability_libs.v0.kubernetes_compute_resources_patch.ResourcePatcher.apply", MagicMock(return_value=None), ) def test_status_check_no_workers(ctx, base_state, s3, caplog): # GIVEN the container cannot connect - state = base_state.with_can_connect("nginx", True) - state = state.replace(relations=[s3]) + state = set_containers(base_state, True, False) + state = dataclasses.replace(state, relations={s3}) # WHEN we run any event - state_out = ctx.run("config_changed", state) + state_out = ctx.run(ctx.on.config_changed(), state) # THEN the charm sets blocked - assert state_out.unit_status == BlockedStatus("[consistency] Missing any worker relation.") + assert state_out.unit_status == ops.BlockedStatus("[consistency] Missing any worker relation.") @patch( @@ -133,29 +140,28 @@ def test_status_check_no_workers(ctx, base_state, s3, caplog): ) def test_status_check_no_s3(ctx, base_state, worker, caplog): # GIVEN the container cannot connect - state = base_state.with_can_connect("nginx", True) - state = state.replace(relations=[worker]) + state = set_containers(base_state, True, False) + state = dataclasses.replace(base_state, relations={worker}) # WHEN we run any event - state_out = ctx.run("config_changed", state) + state_out = ctx.run(ctx.on.config_changed(), state) # THEN the charm sets blocked - assert state_out.unit_status == BlockedStatus("[s3] Missing S3 integration.") + assert state_out.unit_status == ops.BlockedStatus("[s3] Missing S3 integration.") @patch( "charms.observability_libs.v0.kubernetes_compute_resources_patch.KubernetesComputeResourcesPatch.get_status", - MagicMock(return_value=(BlockedStatus(""))), + MagicMock(return_value=(ops.BlockedStatus(""))), ) def test_status_check_k8s_patch_failed(ctx, base_state, caplog): # GIVEN the container can connect - state = base_state.with_can_connect("nginx", True) - state = base_state.with_can_connect("nginx-prometheus-exporter", True) + state = set_containers(base_state, True, True) # WHEN we run any event - state_out = ctx.run("update_status", state) + state_out = ctx.run(ctx.on.update_status(), state) - assert state_out.unit_status == BlockedStatus("") + assert state_out.unit_status == ops.BlockedStatus("") @patch("charms.observability_libs.v0.kubernetes_compute_resources_patch.ResourcePatcher") @@ -167,8 +173,7 @@ def test_status_check_k8s_patch_success_after_retries( resource_patcher_mock, ctx, base_state, caplog ): # GIVEN the container can connect - state = base_state.with_can_connect("nginx", True) - state = base_state.with_can_connect("nginx-prometheus-exporter", True) + state = set_containers(base_state, True, True) # Retry on that error response = httpx.Response( @@ -180,14 +185,14 @@ def test_status_check_k8s_patch_success_after_retries( # on collect-unit-status, the request patches are not yet reflected with patch( "cosl.coordinated_workers.coordinator.KubernetesComputeResourcesPatch.get_status", - MagicMock(return_value=WaitingStatus("waiting")), + MagicMock(return_value=ops.WaitingStatus("waiting")), ): - state_intermediate = ctx.run("config_changed", state) - assert state_intermediate.unit_status == WaitingStatus("waiting") + state_intermediate = ctx.run(ctx.on.config_changed(), state) + assert state_intermediate.unit_status == ops.WaitingStatus("waiting") with patch( "cosl.coordinated_workers.coordinator.KubernetesComputeResourcesPatch.get_status", - MagicMock(return_value=ActiveStatus("")), + MagicMock(return_value=ops.ActiveStatus("")), ): - state_out = ctx.run("update_status", state_intermediate) - assert state_out.unit_status == ActiveStatus("Degraded.") + state_out = ctx.run(ctx.on.update_status(), state_intermediate) + assert state_out.unit_status == ops.ActiveStatus("Degraded.") diff --git a/tests/test_coordinated_workers/test_nginx.py b/tests/test_coordinated_workers/test_nginx.py index 1c60208..e3463b2 100644 --- a/tests/test_coordinated_workers/test_nginx.py +++ b/tests/test_coordinated_workers/test_nginx.py @@ -1,9 +1,9 @@ import logging import tempfile +import ops import pytest -from ops import CharmBase -from scenario import Container, Context, ExecOutput, Mount, State +from ops import testing from src.cosl.coordinated_workers.nginx import ( CA_CERT_PATH, @@ -25,7 +25,7 @@ def certificate_mounts(): mounts = {} for cert_path, temp_file in temp_files.items(): - mounts[cert_path] = Mount(cert_path, temp_file.name) + mounts[cert_path] = testing.Mount(location=cert_path, source=temp_file.name) # TODO: Do we need to clean up the temp files since delete=False was set? return mounts @@ -33,17 +33,21 @@ def certificate_mounts(): @pytest.fixture def nginx_context(): - return Context(CharmBase, meta={"name": "foo", "containers": {"nginx": {"type": "oci-image"}}}) + return testing.Context( + ops.CharmBase, meta={"name": "foo", "containers": {"nginx": {"type": "oci-image"}}} + ) -def test_certs_on_disk(certificate_mounts: dict, nginx_context: Context): +def test_certs_on_disk(certificate_mounts: dict, nginx_context: testing.Context): # GIVEN any charm with a container ctx = nginx_context # WHEN we process any event - with ctx.manager( - "update-status", - state=State(containers=[Container("nginx", can_connect=True, mounts=certificate_mounts)]), + with ctx( + ctx.on.update_status(), + state=testing.State( + containers={testing.Container("nginx", can_connect=True, mounts=certificate_mounts)} + ), ) as mgr: charm = mgr.charm nginx = Nginx(charm, lambda: "foo_string", None) @@ -52,16 +56,18 @@ def test_certs_on_disk(certificate_mounts: dict, nginx_context: Context): assert nginx.are_certificates_on_disk -def test_certs_deleted(certificate_mounts: dict, nginx_context: Context): +def test_certs_deleted(certificate_mounts: dict, nginx_context: testing.Context): # Test deleting the certificates. # GIVEN any charm with a container ctx = nginx_context # WHEN we process any event - with ctx.manager( - "update-status", - state=State(containers=[Container("nginx", can_connect=True, mounts=certificate_mounts)]), + with ctx( + ctx.on.update_status(), + state=testing.State( + containers={testing.Container("nginx", can_connect=True, mounts=certificate_mounts)} + ), ) as mgr: charm = mgr.charm nginx = Nginx(charm, lambda: "foo_string", None) @@ -73,23 +79,23 @@ def test_certs_deleted(certificate_mounts: dict, nginx_context: Context): assert not nginx.are_certificates_on_disk -def test_reload_calls_nginx_binary_successfully(nginx_context: Context): +def test_reload_calls_nginx_binary_successfully(nginx_context: testing.Context): # Test that the reload method calls the nginx binary without error. # GIVEN any charm with a container ctx = nginx_context # WHEN we process any event - with ctx.manager( - "update-status", - state=State( - containers=[ - Container( + with ctx( + ctx.on.update_status(), + state=testing.State( + containers={ + testing.Container( "nginx", can_connect=True, - exec_mock={("nginx", "-s", "reload"): ExecOutput(return_code=0)}, + execs={testing.Exec(("nginx", "-s", "reload"), return_code=0)}, ) - ] + }, ), ) as mgr: charm = mgr.charm @@ -100,7 +106,7 @@ def test_reload_calls_nginx_binary_successfully(nginx_context: Context): assert nginx.reload() is None -def test_has_config_changed(nginx_context: Context): +def test_has_config_changed(nginx_context: testing.Context): # Test changing the nginx config and catching the change. # GIVEN any charm with a container and a nginx config file @@ -111,16 +117,18 @@ def test_has_config_changed(nginx_context: Context): f.write("foo") # WHEN we process any event - with ctx.manager( - "update-status", - state=State( - containers=[ - Container( + with ctx( + ctx.on.update_status(), + state=testing.State( + containers={ + testing.Container( "nginx", can_connect=True, - mounts={"config": Mount(NGINX_CONFIG, test_config.name)}, + mounts={ + "config": testing.Mount(location=NGINX_CONFIG, source=test_config.name) + }, ) - ] + }, ), ) as mgr: charm = mgr.charm diff --git a/tests/test_coordinated_workers/test_worker.py b/tests/test_coordinated_workers/test_worker.py index 7a90adc..105d67a 100644 --- a/tests/test_coordinated_workers/test_worker.py +++ b/tests/test_coordinated_workers/test_worker.py @@ -6,10 +6,7 @@ import ops import pytest import yaml -from ops import Framework -from ops.pebble import Layer, ServiceStatus -from scenario import Container, Context, ExecOutput, Mount, Relation, Secret, State -from scenario.runtime import UncaughtCharmError +from ops import testing from cosl.coordinated_workers.worker import ( CERT_FILE, @@ -23,9 +20,9 @@ class MyCharm(ops.CharmBase): - layer = Layer("") + layer = ops.pebble.Layer("") - def __init__(self, framework: Framework): + def __init__(self, framework: ops.Framework): super().__init__(framework) self.worker = Worker( self, @@ -41,7 +38,7 @@ def test_no_roles_error(): # raises a WorkerError # WHEN you define a charm with no role-x config options - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -53,8 +50,8 @@ def test_no_roles_error(): # IF the charm executes any event # THEN the charm raises an error - with pytest.raises(UncaughtCharmError): - ctx.run("update-status", State(containers=[Container("foo")])) + with pytest.raises(testing.errors.UncaughtCharmError): + ctx.run(ctx.on.update_status(), testing.State(containers={testing.Container("foo")})) @pytest.mark.parametrize( @@ -75,7 +72,7 @@ def test_roles_from_config(roles_active, roles_inactive, expected): # correctly determines which ones are enabled through the Worker # WHEN you define a charm with a few role-x config options - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -91,10 +88,10 @@ def test_roles_from_config(roles_active, roles_inactive, expected): ) # AND the charm runs with a few of those set to true, the rest to false - with ctx.manager( - "update-status", - State( - containers=[Container("foo")], + with ctx( + ctx.on.update_status(), + testing.State( + containers={testing.Container("foo")}, config={ **{f"role-{r}": False for r in roles_inactive}, **{f"role-{r}": True for r in roles_active}, @@ -107,7 +104,7 @@ def test_roles_from_config(roles_active, roles_inactive, expected): def test_worker_restarts_if_some_service_not_up(tmp_path): # GIVEN a worker with some services - MyCharm.layer = Layer( + MyCharm.layer = ops.pebble.Layer( { "services": { "foo": { @@ -132,7 +129,7 @@ def test_worker_restarts_if_some_service_not_up(tmp_path): } } ) - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -145,33 +142,33 @@ def test_worker_restarts_if_some_service_not_up(tmp_path): # but some of the services are down cfg = tmp_path / "cfg.yaml" cfg.write_text("some: yaml") - container = Container( + container = testing.Container( "foo", can_connect=True, - mounts={"local": Mount(CONFIG_FILE, cfg)}, - exec_mock={ - ("update-ca-certificates", "--fresh"): ExecOutput(), - ("/bin/foo", "-version"): ExecOutput(stdout="foo"), + mounts={"local": testing.Mount(location=CONFIG_FILE, source=cfg)}, + execs={ + testing.Exec(("update-ca-certificates", "--fresh")), + testing.Exec(("/bin/foo", "-version"), stdout="foo"), }, - service_status={ - "foo": ServiceStatus.INACTIVE, - "bar": ServiceStatus.ACTIVE, - "baz": ServiceStatus.INACTIVE, + service_statuses={ + "foo": ops.pebble.ServiceStatus.INACTIVE, + "bar": ops.pebble.ServiceStatus.ACTIVE, + "baz": ops.pebble.ServiceStatus.INACTIVE, }, ) - state_out = ctx.run(container.pebble_ready_event, State(containers=[container])) + state_out = ctx.run(ctx.on.pebble_ready(container), testing.State(containers={container})) # THEN the charm restarts all the services that are down container_out = state_out.get_container("foo") - service_statuses = container_out.service_status.values() - assert all(svc is ServiceStatus.ACTIVE for svc in service_statuses), [ + service_statuses = container_out.service_statuses.values() + assert all(svc is ops.pebble.ServiceStatus.ACTIVE for svc in service_statuses), [ stat.value for stat in service_statuses ] def test_worker_does_not_restart_external_services(tmp_path): # GIVEN a worker with some services and a layer with some other services - MyCharm.layer = Layer( + MyCharm.layer = ops.pebble.Layer( { "services": { "foo": { @@ -184,7 +181,7 @@ def test_worker_does_not_restart_external_services(tmp_path): } } ) - other_layer = Layer( + other_layer = ops.pebble.Layer( { "services": { "bar": { @@ -203,7 +200,7 @@ def test_worker_does_not_restart_external_services(tmp_path): } ) - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -216,39 +213,39 @@ def test_worker_does_not_restart_external_services(tmp_path): # but some of the services are down cfg = tmp_path / "cfg.yaml" cfg.write_text("some: yaml") - container = Container( + container = testing.Container( "foo", - exec_mock={ - ("update-ca-certificates", "--fresh"): ExecOutput(), - ("/bin/foo", "-version"): ExecOutput(stdout="foo"), + execs={ + testing.Exec(("update-ca-certificates", "--fresh")), + testing.Exec(("/bin/foo", "-version"), stdout="foo"), }, can_connect=True, - mounts={"local": Mount(CONFIG_FILE, cfg)}, + mounts={"local": testing.Mount(location=CONFIG_FILE, source=cfg)}, layers={"foo": MyCharm.layer, "bar": other_layer}, - service_status={ + service_statuses={ # layer foo has some inactive - "foo": ServiceStatus.INACTIVE, + "foo": ops.pebble.ServiceStatus.INACTIVE, # layer bar has some inactive - "bar": ServiceStatus.ACTIVE, - "baz": ServiceStatus.INACTIVE, + "bar": ops.pebble.ServiceStatus.ACTIVE, + "baz": ops.pebble.ServiceStatus.INACTIVE, }, ) - state_out = ctx.run(container.pebble_ready_event, State(containers=[container])) + state_out = ctx.run(ctx.on.pebble_ready(container), testing.State(containers={container})) # THEN the charm restarts all the services that are down container_out = state_out.get_container("foo") - assert container_out.service_status == { + assert container_out.service_statuses == { # layer foo service is now active - "foo": ServiceStatus.ACTIVE, + "foo": ops.pebble.ServiceStatus.ACTIVE, # layer bar services is unchanged - "bar": ServiceStatus.ACTIVE, - "baz": ServiceStatus.INACTIVE, + "bar": ops.pebble.ServiceStatus.ACTIVE, + "baz": ops.pebble.ServiceStatus.INACTIVE, } def test_worker_raises_if_service_restart_fails_for_too_long(tmp_path): # GIVEN a worker with some services - MyCharm.layer = Layer( + MyCharm.layer = ops.pebble.Layer( { "services": { "foo": { @@ -260,7 +257,7 @@ def test_worker_raises_if_service_restart_fails_for_too_long(tmp_path): } } ) - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -271,12 +268,12 @@ def test_worker_raises_if_service_restart_fails_for_too_long(tmp_path): ) cfg = tmp_path / "cfg.yaml" cfg.write_text("some: yaml") - container = Container( + container = testing.Container( "foo", can_connect=True, - mounts={"local": Mount(CONFIG_FILE, cfg)}, - service_status={ - "foo": ServiceStatus.INACTIVE, + mounts={"local": testing.Mount(location=CONFIG_FILE, source=cfg)}, + service_statuses={ + "foo": ops.pebble.ServiceStatus.INACTIVE, }, ) @@ -290,7 +287,7 @@ def raise_change_error(*args): # THEN the charm errors out # technically an ops.pebble.ChangeError but the context manager doesn't catch it for some reason stack.enter_context(pytest.raises(Exception)) - ctx.run(container.pebble_ready_event, State(containers=[container])) + ctx.run(ctx.on.pebble_ready(container), testing.State(containers={container})) @pytest.mark.parametrize( @@ -316,7 +313,7 @@ def raise_change_error(*args): ), ) def test_get_remote_write_endpoints(remote_databag, expected): - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -325,17 +322,18 @@ def test_get_remote_write_endpoints(remote_databag, expected): }, config={"options": {"role-all": {"type": "boolean", "default": True}}}, ) - container = Container( + container = testing.Container( "foo", - exec_mock={("update-ca-certificates", "--fresh"): ExecOutput()}, + execs={testing.Exec(("update-ca-certificates", "--fresh"))}, can_connect=True, ) - relation = Relation( + relation = testing.Relation( "cluster", remote_app_data=remote_databag, ) - with ctx.manager( - relation.changed_event, State(containers=[container], relations=[relation]) + with ctx( + ctx.on.relation_changed(relation), + testing.State(containers={container}, relations={relation}), ) as mgr: charm = mgr.charm mgr.run() @@ -347,7 +345,6 @@ def test_config_preprocessor(): new_config = {"modified": "config"} class MyWorker(Worker): - @property def _worker_config(self): # mock config processor that entirely replaces the config with another, @@ -355,9 +352,9 @@ def _worker_config(self): return new_config class MyCharm(ops.CharmBase): - layer = Layer({"services": {"foo": {"command": ["bar"]}}}) + layer = ops.pebble.Layer({"services": {"foo": {"command": ["bar"]}}}) - def __init__(self, framework: Framework): + def __init__(self, framework: ops.Framework): super().__init__(framework) self.worker = MyWorker( self, @@ -366,7 +363,7 @@ def __init__(self, framework: Framework): {"cluster": "cluster"}, ) - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -383,24 +380,24 @@ def __init__(self, framework: Framework): # WHEN the charm writes the config to disk state_out = ctx.run( - "config_changed", - State( + ctx.on.config_changed(), + testing.State( config={"role-all": True}, - containers=[ - Container( + containers={ + testing.Container( "foo", can_connect=True, - exec_mock={("update-ca-certificates", "--fresh"): ExecOutput()}, + execs={testing.Exec(("update-ca-certificates", "--fresh"))}, ) - ], - relations=[ - Relation( + }, + relations={ + testing.Relation( "cluster", remote_app_data={ "worker_config": json.dumps(yaml.safe_dump({"original": "config"})) }, ) - ], + }, ), ) @@ -413,8 +410,7 @@ def __init__(self, framework: Framework): @patch.object(Worker, "_set_pebble_layer", MagicMock(return_value=False)) @patch.object(Worker, "restart") def test_worker_does_not_restart(restart_mock, tmp_path): - - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -423,7 +419,7 @@ def test_worker_does_not_restart(restart_mock, tmp_path): }, config={"options": {"role-all": {"type": "boolean", "default": True}}}, ) - relation = Relation( + relation = testing.Relation( "cluster", remote_app_data={ "worker_config": json.dumps("some: yaml"), @@ -431,11 +427,11 @@ def test_worker_does_not_restart(restart_mock, tmp_path): ) # WHEN the charm receives any event and there are no changes to the config or the layer, # but some of the services are down - container = Container( + container = testing.Container( "foo", can_connect=True, ) - ctx.run("update_status", State(containers=[container], relations=[relation])) + ctx.run(ctx.on.update_status(), testing.State(containers={container}, relations={relation})) assert not restart_mock.called @@ -444,8 +440,7 @@ def test_worker_does_not_restart(restart_mock, tmp_path): @patch.object(Worker, "_set_pebble_layer", MagicMock(return_value=False)) @patch.object(Worker, "restart") def test_worker_does_not_restart_on_no_cert_changed(restart_mock, tmp_path): - - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -454,13 +449,18 @@ def test_worker_does_not_restart_on_no_cert_changed(restart_mock, tmp_path): }, config={"options": {"role-all": {"type": "boolean", "default": True}}}, ) - relation = Relation( + secret = testing.Secret( + {"private-key": "private"}, + label="private_id", + owner="app", + ) + relation = testing.Relation( "cluster", remote_app_data={ "worker_config": json.dumps("some: yaml"), "ca_cert": json.dumps("ca"), "server_cert": json.dumps("cert"), - "privkey_secret_id": json.dumps("private_id"), + "privkey_secret_id": json.dumps(secret.id), "s3_tls_ca_chain": json.dumps("s3_ca"), }, ) @@ -476,28 +476,22 @@ def test_worker_does_not_restart_on_no_cert_changed(restart_mock, tmp_path): client_ca.write_text("ca") s3_ca_chain.write_text("s3_ca") - container = Container( + container = testing.Container( "foo", can_connect=True, - exec_mock={("update-ca-certificates", "--fresh"): ExecOutput()}, + execs={testing.Exec(("update-ca-certificates", "--fresh"))}, mounts={ - "cert": Mount(CERT_FILE, cert), - "key": Mount(KEY_FILE, key), - "client_ca": Mount(CLIENT_CA_FILE, client_ca), - "s3_ca_chain": Mount(S3_TLS_CA_CHAIN_FILE, s3_ca_chain), - "root_ca": Mount(root_ca_mocked_path, client_ca), + "cert": testing.Mount(location=CERT_FILE, source=cert), + "key": testing.Mount(location=KEY_FILE, source=key), + "client_ca": testing.Mount(location=CLIENT_CA_FILE, source=client_ca), + "s3_ca_chain": testing.Mount(location=S3_TLS_CA_CHAIN_FILE, source=s3_ca_chain), + "root_ca": testing.Mount(location=root_ca_mocked_path, source=client_ca), }, ) - secret = Secret( - "secret:private_id", - label="private_id", - owner="app", - contents={0: {"private-key": "private"}}, - ) ctx.run( - "update_status", - State(leader=True, containers=[container], relations=[relation], secrets=[secret]), + ctx.on.update_status(), + testing.State(leader=True, containers={container}, relations={relation}, secrets={secret}), ) assert restart_mock.call_count == 0 @@ -507,9 +501,9 @@ def test_worker_does_not_restart_on_no_cert_changed(restart_mock, tmp_path): @patch.object(Worker, "_update_config") def test_worker_no_reconcile_when_patch_not_ready(_update_config_mock): class MyCharmWithResources(ops.CharmBase): - layer = Layer("") + layer = ops.pebble.Layer("") - def __init__(self, framework: Framework): + def __init__(self, framework: ops.Framework): super().__init__(framework) self.worker = Worker( self, @@ -521,7 +515,7 @@ def __init__(self, framework: Framework): container_name="charm", ) - ctx = Context( + ctx = testing.Context( MyCharmWithResources, meta={ "name": "foo", @@ -532,8 +526,8 @@ def __init__(self, framework: Framework): ) ctx.run( - "update_status", - State(leader=True, containers=[Container("foo")]), + ctx.on.update_status(), + testing.State(leader=True, containers={testing.Container("foo")}), ) assert not _update_config_mock.called @@ -544,7 +538,7 @@ def __init__(self, framework: Framework): @patch.object(Worker, "restart") def test_worker_certs_update(restart_mock, tmp_path): # GIVEN a worker with no cert files on disk, and a cluster relation giving us some cert data - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -553,13 +547,18 @@ def test_worker_certs_update(restart_mock, tmp_path): }, config={"options": {"role-all": {"type": "boolean", "default": True}}}, ) - relation = Relation( + secret = testing.Secret( + {"private-key": "private"}, + label="private_id", + owner="app", + ) + relation = testing.Relation( "cluster", remote_app_data={ "worker_config": json.dumps("some: yaml"), "ca_cert": json.dumps("ca"), "server_cert": json.dumps("cert"), - "privkey_secret_id": json.dumps("private_id"), + "privkey_secret_id": json.dumps(secret.id), "s3_tls_ca_chain": json.dumps("s3_ca"), }, ) @@ -569,28 +568,22 @@ def test_worker_certs_update(restart_mock, tmp_path): client_ca = tmp_path / "client_ca.cert" s3_ca_chain = tmp_path / "s3_ca_chain.cert" - container = Container( + container = testing.Container( "foo", can_connect=True, - exec_mock={("update-ca-certificates", "--fresh"): ExecOutput()}, + execs={testing.Exec(("update-ca-certificates", "--fresh"))}, mounts={ - "cert": Mount(CERT_FILE, cert), - "key": Mount(KEY_FILE, key), - "client_ca": Mount(CLIENT_CA_FILE, client_ca), - "s3_ca_chain": Mount(S3_TLS_CA_CHAIN_FILE, s3_ca_chain), + "cert": testing.Mount(location=CERT_FILE, source=cert), + "key": testing.Mount(location=KEY_FILE, source=key), + "client_ca": testing.Mount(location=CLIENT_CA_FILE, source=client_ca), + "s3_ca_chain": testing.Mount(location=S3_TLS_CA_CHAIN_FILE, source=s3_ca_chain), }, ) - secret = Secret( - "secret:private_id", - label="private_id", - owner="app", - contents={0: {"private-key": "private"}}, - ) # WHEN the charm receives any event ctx.run( - "update_status", - State(leader=True, containers=[container], relations=[relation], secrets=[secret]), + ctx.on.update_status(), + testing.State(leader=True, containers={container}, relations={relation}, secrets={secret}), ) # THEN the worker writes all tls data to the right locations on the container filesystem @@ -609,7 +602,7 @@ def test_worker_certs_update(restart_mock, tmp_path): @pytest.mark.parametrize("s3_ca_on_disk", (True, False)) def test_worker_certs_update_only_s3(restart_mock, tmp_path, s3_ca_on_disk): # GIVEN a worker with a tls-encrypted s3 bucket - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "foo", @@ -618,7 +611,7 @@ def test_worker_certs_update_only_s3(restart_mock, tmp_path, s3_ca_on_disk): }, config={"options": {"role-all": {"type": "boolean", "default": True}}}, ) - relation = Relation( + relation = testing.Relation( "cluster", remote_app_data={ "worker_config": json.dumps("some: yaml"), @@ -633,22 +626,22 @@ def test_worker_certs_update_only_s3(restart_mock, tmp_path, s3_ca_on_disk): if s3_ca_on_disk: s3_ca_chain.write_text("s3_ca") - container = Container( + container = testing.Container( "foo", can_connect=True, - exec_mock={("update-ca-certificates", "--fresh"): ExecOutput()}, + execs={testing.Exec(("update-ca-certificates", "--fresh"))}, mounts={ - "cert": Mount(CERT_FILE, cert), - "key": Mount(KEY_FILE, key), - "client_ca": Mount(CLIENT_CA_FILE, client_ca), - "s3_ca_chain": Mount(S3_TLS_CA_CHAIN_FILE, s3_ca_chain), + "cert": testing.Mount(location=CERT_FILE, source=cert), + "key": testing.Mount(location=KEY_FILE, source=key), + "client_ca": testing.Mount(location=CLIENT_CA_FILE, source=client_ca), + "s3_ca_chain": testing.Mount(location=S3_TLS_CA_CHAIN_FILE, source=s3_ca_chain), }, ) # WHEN the charm receives any event ctx.run( - "update_status", - State(leader=True, containers=[container], relations=[relation]), + ctx.on.update_status(), + testing.State(leader=True, containers={container}, relations={relation}), ) # THEN the worker writes all tls data to the right locations on the container filesystem diff --git a/tests/test_coordinated_workers/test_worker_status.py b/tests/test_coordinated_workers/test_worker_status.py index 1d79d0c..018ab84 100644 --- a/tests/test_coordinated_workers/test_worker_status.py +++ b/tests/test_coordinated_workers/test_worker_status.py @@ -1,12 +1,12 @@ +import dataclasses from contextlib import ExitStack, contextmanager from functools import partial from unittest.mock import MagicMock, patch +import ops import pytest import tenacity -from ops import ActiveStatus, CharmBase, Framework, WaitingStatus -from ops.pebble import Layer -from scenario import Container, Context, ExecOutput, Relation, State +from ops import testing from cosl.coordinated_workers.interface import ClusterProviderAppData from cosl.coordinated_workers.worker import Worker, WorkerError @@ -28,7 +28,7 @@ def _urlopen_patch(url: str, resp: str, tls: bool): @contextmanager -def k8s_patch(status=ActiveStatus(), is_ready=True): +def k8s_patch(status=ops.ActiveStatus(), is_ready=True): with patch("lightkube.core.client.GenericSyncClient"): with patch.multiple( "cosl.coordinated_workers.worker.KubernetesComputeResourcesPatch", @@ -60,13 +60,13 @@ def patch_status_wait(): @pytest.fixture def ctx(tls): - class MyCharm(CharmBase): - def __init__(self, framework: Framework): + class MyCharm(ops.CharmBase): + def __init__(self, framework: ops.Framework): super().__init__(framework) self.worker = Worker( self, "workload", - lambda _: Layer( + lambda _: ops.pebble.Layer( { "summary": "summary", "services": {"service": {"summary": "summary", "override": "replace"}}, @@ -81,7 +81,7 @@ def __init__(self, framework: Framework): def _readiness_check_endpoint(self, _): return f"{'https' if tls else 'http'}://localhost:3200/ready" - return Context( + return testing.Context( MyCharm, meta={ "name": "lilith", @@ -102,12 +102,16 @@ def _readiness_check_endpoint(self, _): def base_state(request): app_data = {} ClusterProviderAppData(worker_config="some: yaml").dump(app_data) - return State( + return testing.State( leader=request.param, - containers=[ - Container("workload", exec_mock={("update-ca-certificates", "--fresh"): ExecOutput()}) - ], - relations=[Relation("cluster", remote_app_data=app_data)], + containers={ + testing.Container( + "workload", + can_connect=True, + execs={testing.Exec(("update-ca-certificates", "--fresh"))}, + ) + }, + relations={testing.Relation("cluster", remote_app_data=app_data)}, ) @@ -137,29 +141,32 @@ def config_on_disk(): @k8s_patch() def test_status_check_no_pebble(ctx, base_state, caplog): # GIVEN the container cannot connect - state = base_state.with_can_connect("workload", False) + state = dataclasses.replace( + base_state, containers={testing.Container("workload", can_connect=False)} + ) # WHEN we run any event - state_out = ctx.run("update_status", state) + state_out = ctx.run(ctx.on.update_status(), state) # THEN the charm sets blocked - assert state_out.unit_status == WaitingStatus("Waiting for `workload` container") + assert state_out.unit_status == ops.WaitingStatus("Waiting for `workload` container") # AND THEN the charm logs that the container isn't ready. assert "container cannot connect, skipping update_config." in caplog.messages @k8s_patch() def test_status_check_no_config(ctx, base_state, caplog): - state = base_state.with_can_connect("workload", True) # GIVEN there is no config file on disk # WHEN we run any event with patch( "cosl.coordinated_workers.worker.Worker._running_worker_config", new=lambda _: None ): - state_out = ctx.run("update_status", state) + state_out = ctx.run(ctx.on.update_status(), base_state) # THEN the charm sets blocked - assert state_out.unit_status == WaitingStatus("Waiting for coordinator to publish a config") + assert state_out.unit_status == ops.WaitingStatus( + "Waiting for coordinator to publish a config" + ) # AND THEN the charm logs that the config isn't on disk assert "Config file not on disk. Skipping status check." in caplog.messages @@ -170,12 +177,12 @@ def test_status_check_starting(ctx, base_state, tls): with endpoint_starting(tls): # AND GIVEN that the config is on disk with config_on_disk(): - # AND GIVEN that the container can connect - state = base_state.with_can_connect("workload", True) + # AND GIVEN that the container can connect (default in base_state) + state = base_state # WHEN we run any event - state_out = ctx.run("update_status", state) + state_out = ctx.run(ctx.on.update_status(), state) # THEN the charm sets waiting: Starting... - assert state_out.unit_status == WaitingStatus("Starting...") + assert state_out.unit_status == ops.WaitingStatus("Starting...") @k8s_patch() @@ -185,26 +192,26 @@ def test_status_check_ready(ctx, base_state, tls): # AND GIVEN that the config is on disk with config_on_disk(): # AND GIVEN that the container can connect - state = base_state.with_can_connect("workload", True) + state = base_state # WHEN we run any event - state_out = ctx.run("update_status", state) + state_out = ctx.run(ctx.on.update_status(), state) # THEN the charm sets waiting: Starting... - assert state_out.unit_status == ActiveStatus("read,write ready.") + assert state_out.unit_status == ops.ActiveStatus("read,write ready.") def test_status_no_endpoint(ctx, base_state, caplog): # GIVEN a charm doesn't pass an endpoint to Worker - class MyCharm(CharmBase): - def __init__(self, framework: Framework): + class MyCharm(ops.CharmBase): + def __init__(self, framework: ops.Framework): super().__init__(framework) self.worker = Worker( self, "workload", - lambda _: Layer({"services": {"foo": {"command": "foo"}}}), + lambda _: ops.pebble.Layer({"services": {"foo": {"command": "foo"}}}), {"cluster": "cluster"}, ) - ctx = Context( + ctx = testing.Context( MyCharm, meta={ "name": "damian", @@ -220,11 +227,11 @@ def __init__(self, framework: Framework): }, ) # AND GIVEN that the container can connect - state = base_state.with_can_connect("workload", True) + state = base_state # WHEN we run any event - state_out = ctx.run("update_status", state) + state_out = ctx.run(ctx.on.update_status(), state) # THEN the charm sets Active: ready, even though we have no idea whether the endpoint is ready. - assert state_out.unit_status == ActiveStatus("read,write ready.") + assert state_out.unit_status == ops.ActiveStatus("read,write ready.") def test_access_readiness_no_endpoint_raises(): @@ -235,7 +242,7 @@ def test_access_readiness_no_endpoint_raises(): worker = Worker( caller, "workload", - lambda _: Layer({"services": {"foo": {"command": "foo"}}}), + lambda _: ops.pebble.Layer({"services": {"foo": {"command": "foo"}}}), {"cluster": "cluster"}, ) @@ -247,10 +254,9 @@ def test_access_readiness_no_endpoint_raises(): def test_status_check_ready_with_patch(ctx, base_state, tls): with endpoint_ready(tls): with config_on_disk(): - with k8s_patch(status=WaitingStatus("waiting")): - state = base_state.with_can_connect("workload", True) - state_out = ctx.run("config_changed", state) - assert state_out.unit_status == WaitingStatus("waiting") - with k8s_patch(status=ActiveStatus("")): - state_out_out = ctx.run("update_status", state_out) - assert state_out_out.unit_status == ActiveStatus("read,write ready.") + with k8s_patch(status=ops.WaitingStatus("waiting")): + state_out = ctx.run(ctx.on.config_changed(), base_state) + assert state_out.unit_status == ops.WaitingStatus("waiting") + with k8s_patch(status=ops.ActiveStatus("")): + state_out_out = ctx.run(ctx.on.update_status(), state_out) + assert state_out_out.unit_status == ops.ActiveStatus("read,write ready.") diff --git a/tests/test_juju_topology_from_charm.py b/tests/test_juju_topology_from_charm.py index 8caba6b..664d096 100644 --- a/tests/test_juju_topology_from_charm.py +++ b/tests/test_juju_topology_from_charm.py @@ -4,19 +4,17 @@ from collections import OrderedDict import ops -from ops.charm import CharmBase from ops.testing import Harness from cosl.juju_topology import JujuTopology -class JujuTopologyCharm(CharmBase): +class JujuTopologyCharm(ops.CharmBase): pass class TestJujuTopology(unittest.TestCase): def setUp(self): - ops.testing.SIMULATE_CAN_CONNECT = True # type: ignore self.input = OrderedDict( [ ("model", "some-model"), diff --git a/tox.ini b/tox.ini index e5ad566..b73811b 100644 --- a/tox.ini +++ b/tox.ini @@ -58,7 +58,7 @@ commands = [testenv:static] description = Run static analysis checks deps = - ops + ops[testing] PyYAML typing_extensions pyright @@ -74,10 +74,9 @@ deps = fs pytest pytest-cov - ops + ops[testing] PyYAML typing_extensions - ops-scenario<7.0.0 cryptography jsonschema lightkube>=v0.15.4