diff --git a/anta/tests/bfd.py b/anta/tests/bfd.py index 382fb5cfb..aea8d07b0 100644 --- a/anta/tests/bfd.py +++ b/anta/tests/bfd.py @@ -8,10 +8,11 @@ # mypy: disable-error-code=attr-defined from __future__ import annotations +from datetime import datetime from ipaddress import IPv4Address -from typing import Any, List +from typing import Any, List, Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field from anta.custom_types import BfdInterval, BfdMultiplier from anta.models import AntaCommand, AntaTest @@ -153,3 +154,82 @@ def test(self) -> None: self.result.is_success() else: self.result.is_failure(f"Following BFD peers are not configured or timers are not correct:\n{failures}") + + +class VerifyBFDPeersHealth(AntaTest): + """ + This class verifies the health of IPv4 BFD peers across all VRFs. + + It checks that no BFD peer is in the down state and that the discriminator value of the remote system is not zero. + Optionally, it can also verify that BFD peers have not been down before a specified threshold of hours. + + Expected results: + * Success: The test will pass if all IPv4 BFD peers are up, the discriminator value of each remote system is non-zero, + and the last downtime of each peer is above the defined threshold. + * Failure: The test will fail if any IPv4 BFD peer is down, the discriminator value of any remote system is zero, + or the last downtime of any peer is below the defined threshold. + """ + + name = "VerifyBFDPeersHealth" + description = "Verifies the health of all IPv4 BFD peers." + categories = ["bfd"] + # revision 1 as later revision introduces additional nesting for type + commands = [AntaCommand(command="show bfd peers", revision=1), AntaCommand(command="show clock")] + + class Input(AntaTest.Input): + """ + This class defines the input parameters of the test case. + """ + + down_threshold: Optional[int] = Field(default=None, gt=0) + """Optional down threshold in hours to check if a BFD peer was down before those hours or not.""" + + @AntaTest.anta_test + def test(self) -> None: + # Initialize failure strings + down_failures = [] + up_failures = [] + + # Extract the current timestamp and command output + clock_output = self.instance_commands[1].json_output + current_timestamp = clock_output["utcTime"] + bfd_output = self.instance_commands[0].json_output + + # set the initial result + self.result.is_success() + + # Check if any IPv4 BFD peer is configured + ipv4_neighbors_exist = any(vrf_data["ipv4Neighbors"] for vrf_data in bfd_output["vrfs"].values()) + if not ipv4_neighbors_exist: + self.result.is_failure("No IPv4 BFD peers are configured for any VRF.") + return + + # Iterate over IPv4 BFD peers + for vrf, vrf_data in bfd_output["vrfs"].items(): + for peer, neighbor_data in vrf_data["ipv4Neighbors"].items(): + for peer_data in neighbor_data["peerStats"].values(): + peer_status = peer_data["status"] + remote_disc = peer_data["remoteDisc"] + remote_disc_info = f" with remote disc {remote_disc}" if remote_disc == 0 else "" + last_down = peer_data["lastDown"] + hours_difference = (datetime.fromtimestamp(current_timestamp) - datetime.fromtimestamp(last_down)).total_seconds() / 3600 + + # Check if peer status is not up + if peer_status != "up": + down_failures.append(f"{peer} is {peer_status} in {vrf} VRF{remote_disc_info}.") + + # Check if the last down is within the threshold + elif self.inputs.down_threshold and hours_difference < self.inputs.down_threshold: + up_failures.append(f"{peer} in {vrf} VRF was down {round(hours_difference)} hours ago{remote_disc_info}.") + + # Check if remote disc is 0 + elif remote_disc == 0: + up_failures.append(f"{peer} in {vrf} VRF has remote disc {remote_disc}.") + + # Check if there are any failures + if down_failures: + down_failures_str = "\n".join(down_failures) + self.result.is_failure(f"Following BFD peers are not up:\n{down_failures_str}") + if up_failures: + up_failures_str = "\n".join(up_failures) + self.result.is_failure(f"\nFollowing BFD peers were down:\n{up_failures_str}") diff --git a/anta/tests/routing/generic.py b/anta/tests/routing/generic.py index 45a653a9a..532b4bb75 100644 --- a/anta/tests/routing/generic.py +++ b/anta/tests/routing/generic.py @@ -79,31 +79,6 @@ def test(self) -> None: self.result.is_failure(f"routing-table has {total_routes} routes and not between min ({self.inputs.minimum}) and maximum ({self.inputs.maximum})") -class VerifyBFD(AntaTest): - """ - Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors). - """ - - name = "VerifyBFD" - description = "Verifies there is no BFD peer in down state (all VRF, IPv4 neighbors)." - categories = ["bfd"] - # revision 1 as later revision introduce additional nesting for type - commands = [AntaCommand(command="show bfd peers", revision=1)] - - @AntaTest.anta_test - def test(self) -> None: - command_output = self.instance_commands[0].json_output - self.result.is_success() - for _, vrf_data in command_output["vrfs"].items(): - for _, neighbor_data in vrf_data["ipv4Neighbors"].items(): - for peer, peer_data in neighbor_data["peerStats"].items(): - if (peer_status := peer_data["status"]) != "up": - failure_message = f"bfd state for peer '{peer}' is {peer_status} (expected up)." - if (peer_l3intf := peer_data.get("l3intf")) is not None and peer_l3intf != "": - failure_message += f" Interface: {peer_l3intf}." - self.result.is_failure(failure_message) - - class VerifyRoutingTableEntry(AntaTest): """ This test verifies that the provided routes are present in the routing table of a specified VRF. diff --git a/docs/api/tests.bfd.md b/docs/api/tests.bfd.md new file mode 100644 index 000000000..d28521fbc --- /dev/null +++ b/docs/api/tests.bfd.md @@ -0,0 +1,13 @@ + + +# ANTA catalog for bfd tests + +::: anta.tests.bfd + options: + show_root_heading: false + show_root_toc_entry: false + merge_init_into_class: false diff --git a/docs/api/tests.md b/docs/api/tests.md index 78c6b1395..40c7d8ae9 100644 --- a/docs/api/tests.md +++ b/docs/api/tests.md @@ -10,6 +10,7 @@ This section describes all the available tests provided by ANTA package. - [AAA](tests.aaa.md) +- [BFD](tests.bfd.md) - [Configuration](tests.configuration.md) - [Connectivity](tests.connectivity.md) - [Field Notice](tests.field_notices.md) diff --git a/examples/tests.yaml b/examples/tests.yaml index a55bb93e7..a07b1f84d 100644 --- a/examples/tests.yaml +++ b/examples/tests.yaml @@ -69,6 +69,8 @@ anta.tests.bfd: tx_interval: 1200 rx_interval: 1200 multiplier: 3 + - VerifyBFDPeersHealth: + down_threshold: 2 anta.tests.configuration: - VerifyZeroTouch: diff --git a/mkdocs.yml b/mkdocs.yml index 79c21cc06..9dbc60ff5 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -173,6 +173,7 @@ nav: - Test Catalog Documentation: - Overview: api/tests.md - AAA: api/tests.aaa.md + - BFD: api/tests.bfd.md - Configuration: api/tests.configuration.md - Connectivity: api/tests.connectivity.md - Field Notices: api/tests.field_notices.md diff --git a/tests/units/anta_tests/routing/test_generic.py b/tests/units/anta_tests/routing/test_generic.py index 1a1889a9f..90e70f851 100644 --- a/tests/units/anta_tests/routing/test_generic.py +++ b/tests/units/anta_tests/routing/test_generic.py @@ -8,7 +8,7 @@ from typing import Any -from anta.tests.routing.generic import VerifyBFD, VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize +from anta.tests.routing.generic import VerifyRoutingProtocolModel, VerifyRoutingTableEntry, VerifyRoutingTableSize from tests.lib.anta import test # noqa: F401; pylint: disable=W0611 DATA: list[dict[str, Any]] = [ @@ -77,127 +77,6 @@ "messages": ["Minimum 666 is greater than maximum 42"], }, }, - { - "name": "success-no-peer", - "test": VerifyBFD, - "eos_data": [{"vrfs": {}}], - "inputs": None, - "expected": {"result": "success"}, - }, - { - "name": "success-peers-up", - "test": VerifyBFD, - "eos_data": [ - { - "vrfs": { - "default": { - "ipv6Neighbors": {}, - "ipv4Neighbors": { - "7.7.7.7": { - "peerStats": { - "": { - "status": "up", - "authType": "None", - "kernelIfIndex": 0, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288421.669188, - "remoteDisc": 345332116, - "sessType": "sessionTypeMultihop", - "localDisc": 1654273918, - "lastDown": 0.0, - "l3intf": "", - "tunnelId": 0, - } - } - }, - "10.3.0.1": { - "peerStats": { - "Ethernet1": { - "status": "up", - "authType": "None", - "kernelIfIndex": 11, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288900.004889, - "remoteDisc": 1017672851, - "sessType": "sessionTypeNormal", - "localDisc": 4269977256, - "lastDown": 0.0, - "l3intf": "Ethernet1", - "tunnelId": 0, - } - } - }, - }, - "ipv4ReflectorNeighbors": {}, - "ipv6ReflectorNeighbors": {}, - "ipv6InitiatorNeighbors": {}, - "ipv4InitiatorNeighbors": {}, - } - } - } - ], - "inputs": None, - "expected": {"result": "success"}, - }, - { - "name": "failure", - "test": VerifyBFD, - "eos_data": [ - { - "vrfs": { - "default": { - "ipv6Neighbors": {}, - "ipv4Neighbors": { - "7.7.7.7": { - "peerStats": { - "": { - "status": "down", - "authType": "None", - "kernelIfIndex": 0, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288421.669188, - "remoteDisc": 345332116, - "sessType": "sessionTypeMultihop", - "localDisc": 1654273918, - "lastDown": 0.0, - "l3intf": "", - "tunnelId": 0, - } - } - }, - "10.3.0.1": { - "peerStats": { - "Ethernet1": { - "status": "up", - "authType": "None", - "kernelIfIndex": 11, - "lastDiag": "diagNone", - "authProfileName": "", - "lastUp": 1683288900.004889, - "remoteDisc": 1017672851, - "sessType": "sessionTypeNormal", - "localDisc": 4269977256, - "lastDown": 0.0, - "l3intf": "Ethernet1", - "tunnelId": 0, - } - } - }, - }, - "ipv4ReflectorNeighbors": {}, - "ipv6ReflectorNeighbors": {}, - "ipv6InitiatorNeighbors": {}, - "ipv4InitiatorNeighbors": {}, - } - } - } - ], - "inputs": None, - "expected": {"result": "failure", "messages": ["bfd state for peer '' is down (expected up)."]}, - }, { "name": "success", "test": VerifyRoutingTableEntry, diff --git a/tests/units/anta_tests/test_bfd.py b/tests/units/anta_tests/test_bfd.py index ff00199af..67bb0b47a 100644 --- a/tests/units/anta_tests/test_bfd.py +++ b/tests/units/anta_tests/test_bfd.py @@ -11,7 +11,7 @@ # pylint: disable=C0413 # because of the patch above -from anta.tests.bfd import VerifyBFDPeersIntervals, VerifyBFDSpecificPeers # noqa: E402 +from anta.tests.bfd import VerifyBFDPeersHealth, VerifyBFDPeersIntervals, VerifyBFDSpecificPeers # noqa: E402 from tests.lib.anta import test # noqa: F401; pylint: disable=W0611 DATA: list[dict[str, Any]] = [ @@ -290,4 +290,234 @@ ], }, }, + { + "name": "success", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + }, + "MGMT": { + "ipv4Neighbors": { + "192.0.255.71": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + }, + } + }, + { + "utcTime": 1703667348.111288, + }, + ], + "inputs": {"down_threshold": 2}, + "expected": {"result": "success"}, + }, + { + "name": "failure-no-peer", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "MGMT": { + "ipv6Neighbors": {}, + "ipv4Neighbors": {}, + }, + "default": { + "ipv6Neighbors": {}, + "ipv4Neighbors": {}, + }, + } + }, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": None, + "expected": { + "result": "failure", + "messages": ["No IPv4 BFD peers are configured for any VRF."], + }, + }, + { + "name": "failure-session-down", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "down", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + "192.0.255.70": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + }, + "MGMT": { + "ipv4Neighbors": { + "192.0.255.71": { + "peerStats": { + "": { + "status": "down", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + }, + } + }, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": {}, + "expected": { + "result": "failure", + "messages": [ + "Following BFD peers are not up:\n192.0.255.7 is down in default VRF with remote disc 0.\n192.0.255.71 is down in MGMT VRF with remote disc 0." + ], + }, + }, + { + "name": "failure-session-up-disc", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet2", + } + } + }, + "192.0.255.71": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 0, + "lastDown": 1703657258.652725, + "l3intf": "Ethernet2", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703658481.8778424, + }, + ], + "inputs": {}, + "expected": { + "result": "failure", + "messages": ["Following BFD peers were down:\n192.0.255.7 in default VRF has remote disc 0.\n192.0.255.71 in default VRF has remote disc 0."], + }, + }, + { + "name": "failure-last-down", + "test": VerifyBFDPeersHealth, + "eos_data": [ + { + "vrfs": { + "default": { + "ipv4Neighbors": { + "192.0.255.7": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + "192.0.255.71": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + "192.0.255.17": { + "peerStats": { + "": { + "status": "up", + "remoteDisc": 3940685114, + "lastDown": 1703657258.652725, + "l3intf": "", + } + } + }, + }, + "ipv6Neighbors": {}, + } + } + }, + { + "utcTime": 1703667348.111288, + }, + ], + "inputs": {"down_threshold": 4}, + "expected": { + "result": "failure", + "messages": [ + "Following BFD peers were down:\n192.0.255.7 in default VRF was down 3 hours ago.\n" + "192.0.255.71 in default VRF was down 3 hours ago.\n192.0.255.17 in default VRF was down 3 hours ago." + ], + }, + }, ]