Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: istio-pilot's handling of incomplete Gateway Services #263

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 24 additions & 5 deletions charms/istio-pilot/src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,12 @@ def reconcile(self, event):
except ErrorWithStatus as err:
handled_errors.append(err)

try:
# If Gateway Service is not ready, notify user
self._assert_gateway_service_status()
except ErrorWithStatus as err:
handled_errors.append(err)

try:
self._send_gateway_info()
except ErrorWithStatus as err:
Expand Down Expand Up @@ -360,6 +366,17 @@ def upgrade_charm(self, _):
self.log.info("Upgrade complete.")
self.unit.status = ActiveStatus()

def _assert_gateway_service_status(self):
"""Checks if the ingress gateway service is up, raising an ErrorWithStatus if it is not."""
if not self._is_gateway_service_up:
raise ErrorWithStatus(
f"Gateway Service '{self.model.config['gateway-service-name']}"
f" in namespace {self.model.name} is missing or does not have an"
f" external IP. If this persists, there may be a problem with"
f" the Service.",
WaitingStatus,
)

def _check_leader(self):
"""Check if this unit is a leader."""
if not self.unit.is_leader():
Expand Down Expand Up @@ -647,7 +664,7 @@ def _report_handled_errors(self, errors):
)
self._log_and_set_status(status_to_publish)
for i, error in enumerate(errors):
self.log.info(f"Handled error {i}/{len(errors)}: {error.status}")
self.log.info(f"Handled error {i+1}/{len(errors)}: {error.status}")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you can use enumerate(errors, start=1)

Suggested change
self.log.info(f"Handled error {i+1}/{len(errors)}: {error.status}")
self.log.info(f"Handled error {i}/{len(errors)}: {error.status}")

else:
self.unit.status = ActiveStatus()

Expand Down Expand Up @@ -822,11 +839,13 @@ def _get_address_from_loadbalancer(svc):
(str): The hostname or IP address of the LoadBalancer service
"""
ingresses = svc.status.loadBalancer.ingress

# May happen due to the Kubernetes cluster not having a LoadBalancer provider
if ingresses is None or len(ingresses) == 0:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

?

Suggested change
if ingresses is None or len(ingresses) == 0:
if not ingresses:

return None

if len(ingresses) != 1:
if len(ingresses) == 0:
return None
else:
raise ValueError("Unknown situation - LoadBalancer service has more than one ingress")
raise ValueError("Unknown situation - LoadBalancer service has more than one ingress")

ingress = svc.status.loadBalancer.ingress[0]
if getattr(ingress, "hostname", None) is not None:
Expand Down
39 changes: 39 additions & 0 deletions charms/istio-pilot/tests/unit/test_charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,9 @@ def all_operator_reconcile_handlers_mocked(mocker):
"_reconcile_ingress_auth": mocker.patch("charm.Operator._reconcile_ingress_auth"),
"_reconcile_gateway": mocker.patch("charm.Operator._reconcile_gateway"),
"_remove_gateway": mocker.patch("charm.Operator._remove_gateway"),
"_assert_gateway_service_status": mocker.patch(
"charm.Operator._assert_gateway_service_status"
),
"_send_gateway_info": mocker.patch("charm.Operator._send_gateway_info"),
"_get_ingress_data": mocker.patch("charm.Operator._get_ingress_data"),
"_reconcile_ingress": mocker.patch("charm.Operator._reconcile_ingress"),
Expand Down Expand Up @@ -259,8 +262,10 @@ def test_ingress_auth_and_gateway(
krh_lightkube_client.reset_mock()

@patch("charm.Operator._handle_istio_pilot_relation")
@patch("charm.Operator._is_gateway_service_up", new_callable=PropertyMock)
def test_ingress_relation(
self,
mocked_is_gateway_service_up,
mocked_handle_istio_pilot_relation,
harness,
mocked_lightkube_client,
Expand All @@ -276,6 +281,7 @@ def test_ingress_relation(

"""
krh_class, krh_lightkube_client = kubernetes_resource_handler_with_client
mocked_is_gateway_service_up.return_value = True

model_name = "my-model"
gateway_name = "my-gateway"
Expand Down Expand Up @@ -450,6 +456,24 @@ def test_gateway_port(self, ssl_crt, ssl_key, expected_port, expected_context, h
gateway_port = harness.charm._gateway_port
assert gateway_port == expected_port

@pytest.mark.parametrize(
"is_gateway_service_up, expected_context",
[
(True, does_not_raise()),
(False, pytest.raises(ErrorWithStatus)),
],
)
@patch("charm.Operator._is_gateway_service_up", new_callable=PropertyMock)
def test_assert_gateway_service_status(
self, mocked_is_gateway_service_up, is_gateway_service_up, expected_context, harness
):
"""Tests whether _assert_gateway_service_status raises as expected when service is down."""
harness.begin()
mocked_is_gateway_service_up.return_value = is_gateway_service_up

with expected_context:
harness.charm._assert_gateway_service_status()

@pytest.mark.parametrize(
"lightkube_client_get_side_effect, expected_is_up, context_raised",
[
Expand Down Expand Up @@ -486,6 +510,7 @@ def test_is_gateway_object_up(
("mock_loadbalancer_ip_service", True),
("mock_loadbalancer_hostname_service_not_ready", False),
("mock_loadbalancer_ip_service_not_ready", False),
("mock_loadbalancer_ip_service_missing", False),
],
)
def test_is_gateway_service_up(self, mock_service_fixture, is_gateway_up, harness, request):
Expand Down Expand Up @@ -1285,6 +1310,20 @@ def mock_loadbalancer_ip_service_not_ready():
return mock_nodeport_service


@pytest.fixture()
def mock_loadbalancer_ip_service_missing():
"""This happens if LoadBalancer external IP is <pending> like if there is no provisioner."""
mock_nodeport_service = codecs.from_dict(
{
"apiVersion": "v1",
"kind": "Service",
"status": {"loadBalancer": {}},
"spec": {"type": "LoadBalancer", "clusterIP": "10.10.10.10"},
}
)
return mock_nodeport_service


@pytest.fixture()
def mock_loadbalancer_hostname_service_not_ready():
mock_nodeport_service = codecs.from_dict(
Expand Down