diff --git a/neighsyncd/neighsync.cpp b/neighsyncd/neighsync.cpp index 054f13a470..3da6a5a84f 100644 --- a/neighsyncd/neighsync.cpp +++ b/neighsyncd/neighsync.cpp @@ -17,9 +17,10 @@ using namespace std; using namespace swss; -NeighSync::NeighSync(RedisPipeline *pipelineAppDB, DBConnector *stateDb) : +NeighSync::NeighSync(RedisPipeline *pipelineAppDB, DBConnector *stateDb, DBConnector *cfgDb) : m_neighTable(pipelineAppDB, APP_NEIGH_TABLE_NAME), - m_stateNeighRestoreTable(stateDb, STATE_NEIGH_RESTORE_TABLE_NAME) + m_stateNeighRestoreTable(stateDb, STATE_NEIGH_RESTORE_TABLE_NAME), + m_cfgPeerSwitchTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) { m_AppRestartAssist = new AppRestartAssist(pipelineAppDB, "neighsyncd", "swss", DEFAULT_NEIGHSYNC_WARMSTART_TIMER); if (m_AppRestartAssist) @@ -87,14 +88,39 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) return; } + std::vector peerSwitchKeys; bool delete_key = false; - if ((nlmsg_type == RTM_DELNEIGH) || (state == NUD_INCOMPLETE) || - (state == NUD_FAILED)) + bool use_zero_mac = false; + m_cfgPeerSwitchTable.getKeys(peerSwitchKeys); + bool is_dualtor = peerSwitchKeys.size() > 0; + if (is_dualtor && (state == NUD_INCOMPLETE || state == NUD_FAILED)) + { + SWSS_LOG_INFO("Unable to resolve %s, setting zero MAC", key.c_str()); + use_zero_mac = true; + + // Unresolved neighbor deletion on dual ToR devices must be handled + // separately, otherwise delete_key is never set to true + // and neighorch is never able to remove the neighbor + if (nlmsg_type == RTM_DELNEIGH) + { + delete_key = true; + } + } + else if ((nlmsg_type == RTM_DELNEIGH) || + (state == NUD_INCOMPLETE) || (state == NUD_FAILED)) { delete_key = true; } - nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + if (use_zero_mac) + { + std::string zero_mac = "00:00:00:00:00:00"; + strncpy(macStr, zero_mac.c_str(), zero_mac.length()); + } + else + { + nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + } /* Ignore neighbor entries with Broadcast Mac - Trigger for directed broadcast */ if (!delete_key && (MacAddress(macStr) == MacAddress("ff:ff:ff:ff:ff:ff"))) diff --git a/neighsyncd/neighsync.h b/neighsyncd/neighsync.h index 387c849f30..b6d431721a 100644 --- a/neighsyncd/neighsync.h +++ b/neighsyncd/neighsync.h @@ -23,7 +23,7 @@ class NeighSync : public NetMsg public: enum { MAX_ADDR_SIZE = 64 }; - NeighSync(RedisPipeline *pipelineAppDB, DBConnector *stateDb); + NeighSync(RedisPipeline *pipelineAppDB, DBConnector *stateDb, DBConnector *cfgDb); ~NeighSync(); virtual void onMsg(int nlmsg_type, struct nl_object *obj); @@ -36,7 +36,7 @@ class NeighSync : public NetMsg } private: - Table m_stateNeighRestoreTable; + Table m_stateNeighRestoreTable, m_cfgPeerSwitchTable; ProducerStateTable m_neighTable; AppRestartAssist *m_AppRestartAssist; }; diff --git a/neighsyncd/neighsyncd.cpp b/neighsyncd/neighsyncd.cpp index 99e86b2ef9..a0882c28e2 100644 --- a/neighsyncd/neighsyncd.cpp +++ b/neighsyncd/neighsyncd.cpp @@ -18,8 +18,9 @@ int main(int argc, char **argv) DBConnector appDb("APPL_DB", 0); RedisPipeline pipelineAppDB(&appDb); DBConnector stateDb("STATE_DB", 0); + DBConnector cfgDb("CONFIG_DB", 0); - NeighSync sync(&pipelineAppDB, &stateDb); + NeighSync sync(&pipelineAppDB, &stateDb, &cfgDb); NetDispatcher::getInstance().registerMessageHandler(RTM_NEWNEIGH, &sync); NetDispatcher::getInstance().registerMessageHandler(RTM_DELNEIGH, &sync); diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index f2e245f13b..dd3ed79126 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -329,6 +329,7 @@ MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress /* Set initial state to "standby" */ stateStandby(); + state_ = MuxState::MUX_STATE_STANDBY; } bool MuxCable::stateInitActive() @@ -1025,6 +1026,37 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) return; } + auto standalone_tunnel_neigh_it = standalone_tunnel_neighbors_.find(update.entry.ip_address); + // Handling zero MAC neighbor updates + if (!update.mac) + { + /* For neighbors that were previously resolvable but are now unresolvable, + * we expect such neighbor entries to be deleted prior to a zero MAC update + * arriving for that same neighbor. + */ + + if (update.add) + { + if (standalone_tunnel_neigh_it == standalone_tunnel_neighbors_.end()) + { + createStandaloneTunnelRoute(update.entry.ip_address); + } + /* If the MAC address in the neighbor entry is zero but the neighbor IP + * is already present in standalone_tunnel_neighbors_, assume we have already + * added a tunnel route for it and exit early + */ + return; + } + } + /* If the update operation for a neighbor contains a non-zero MAC, we must + * make sure to remove any existing tunnel routes to prevent conflicts. + * This block also covers the case of neighbor deletion. + */ + if (standalone_tunnel_neigh_it != standalone_tunnel_neighbors_.end()) + { + removeStandaloneTunnelRoute(update.entry.ip_address); + } + for (auto it = mux_cable_tb_.begin(); it != mux_cable_tb_.end(); it++) { MuxCable* ptr = it->second.get(); @@ -1292,6 +1324,27 @@ bool MuxOrch::delOperation(const Request& request) return true; } +void MuxOrch::createStandaloneTunnelRoute(IpAddress neighborIp) +{ + SWSS_LOG_INFO("Creating standalone tunnel route for neighbor %s", neighborIp.to_string().c_str()); + sai_object_id_t tunnel_nexthop = getNextHopTunnelId(MUX_TUNNEL, mux_peer_switch_); + if (tunnel_nexthop == SAI_NULL_OBJECT_ID) { + SWSS_LOG_NOTICE("%s nexthop not created yet, ignoring tunnel route creation for %s", MUX_TUNNEL, neighborIp.to_string().c_str()); + return; + } + IpPrefix pfx = neighborIp.to_string(); + create_route(pfx, tunnel_nexthop); + standalone_tunnel_neighbors_.insert(neighborIp); +} + +void MuxOrch::removeStandaloneTunnelRoute(IpAddress neighborIp) +{ + SWSS_LOG_INFO("Removing standalone tunnel route for neighbor %s", neighborIp.to_string().c_str()); + IpPrefix pfx = neighborIp.to_string(); + remove_route(pfx); + standalone_tunnel_neighbors_.erase(neighborIp); +} + MuxCableOrch::MuxCableOrch(DBConnector *db, DBConnector *sdb, const std::string& tableName): Orch2(db, tableName, request_), app_tunnel_route_table_(db, APP_TUNNEL_ROUTE_TABLE_NAME), diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index fa8b058830..98bc09dbe6 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -196,6 +196,13 @@ class MuxOrch : public Orch2, public Observer, public Subject bool getMuxPort(const MacAddress&, const string&, string&); + /*** + * Methods for managing tunnel routes for neighbor IPs not associated + * with a specific mux cable + ***/ + void createStandaloneTunnelRoute(IpAddress neighborIp); + void removeStandaloneTunnelRoute(IpAddress neighborIp); + IpAddress mux_peer_switch_ = 0x0; sai_object_id_t mux_tunnel_id_ = SAI_NULL_OBJECT_ID; @@ -210,6 +217,7 @@ class MuxOrch : public Orch2, public Observer, public Subject FdbOrch *fdb_orch_; MuxCfgRequest request_; + std::set standalone_tunnel_neighbors_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 7188b91f49..e3a2003be5 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -547,7 +547,16 @@ void NeighOrch::doTask(Consumer &consumer) if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end() || m_syncdNeighbors[neighbor_entry].mac != mac_address) { - if (addNeighbor(neighbor_entry, mac_address)) + // only for unresolvable neighbors that are new + if (!mac_address) + { + if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()) + { + addZeroMacTunnelRoute(neighbor_entry, mac_address); + } + it = consumer.m_toSync.erase(it); + } + else if (addNeighbor(neighbor_entry, mac_address)) { it = consumer.m_toSync.erase(it); } @@ -954,3 +963,12 @@ bool NeighOrch::removeTunnelNextHop(const NextHopKey& nh) return true; } +void NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddress& mac) +{ + SWSS_LOG_INFO("Creating tunnel route for neighbor %s", entry.ip_address.to_string().c_str()); + MuxOrch* mux_orch = gDirectory.get(); + NeighborUpdate update = {entry, mac, true}; + mux_orch->update(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + m_syncdNeighbors[entry] = { mac, false }; +} + diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index f201677380..c8badcd854 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -99,6 +99,8 @@ class NeighOrch : public Orch, public Subject, public Observer bool resolveNeighborEntry(const NeighborEntry &, const MacAddress &); void clearResolvedNeighborEntry(const NeighborEntry &); + + void addZeroMacTunnelRoute(const NeighborEntry &, const MacAddress &); }; #endif /* SWSS_NEIGHORCH_H */ diff --git a/tests/conftest.py b/tests/conftest.py index 463f2162e5..9b565cfa06 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1529,7 +1529,7 @@ def verify_vct(self): return ret1 and ret2 -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def dvs(request) -> DockerVirtualSwitch: if sys.version_info[0] < 3: raise NameError("Python 2 is not supported, please install python 3") @@ -1559,7 +1559,7 @@ def dvs(request) -> DockerVirtualSwitch: dvs.ctn_restart() -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def vct(request): vctns = request.config.getoption("--vctns") topo = request.config.getoption("--topo") @@ -1579,7 +1579,7 @@ def vct(request): vct.destroy() -@pytest.yield_fixture +@pytest.fixture def testlog(request, dvs): dvs.runcmd(f"logger === start test {request.node.name} ===") yield testlog @@ -1602,13 +1602,13 @@ def dvs_route(request, dvs) -> DVSRoute: # FIXME: The rest of these also need to be reverted back to normal fixtures to # appease the linter. -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_lag_manager(request, dvs): request.cls.dvs_lag = dvs_lag.DVSLag(dvs.get_asic_db(), dvs.get_config_db()) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_vlan_manager(request, dvs): request.cls.dvs_vlan = dvs_vlan.DVSVlan(dvs.get_asic_db(), dvs.get_config_db(), @@ -1617,7 +1617,7 @@ def dvs_vlan_manager(request, dvs): dvs.get_app_db()) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_mirror_manager(request, dvs): request.cls.dvs_mirror = dvs_mirror.DVSMirror(dvs.get_asic_db(), dvs.get_config_db(), @@ -1626,7 +1626,7 @@ def dvs_mirror_manager(request, dvs): dvs.get_app_db()) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_policer_manager(request, dvs): request.cls.dvs_policer = dvs_policer.DVSPolicer(dvs.get_asic_db(), dvs.get_config_db()) @@ -1647,7 +1647,7 @@ def remove_dpb_config_file(dvs): dvs.runcmd(cmd) -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def dpb_setup_fixture(dvs): create_dpb_config_file(dvs) if dvs.vct is None: diff --git a/tests/mux_neigh_miss_tests.py b/tests/mux_neigh_miss_tests.py new file mode 100644 index 0000000000..d8c32a29e0 --- /dev/null +++ b/tests/mux_neigh_miss_tests.py @@ -0,0 +1,243 @@ +""" +Test scenarios and related constants for dualtor neighbor miss. + +Each item in NEIGH_MISS_TESTS is a test case, comprising of a list of steps. +Each step is a dictionary containing the action to be performed during that +step, as well as the expected result. +The expected result itself is another dictionary, containing the following +attributes: + - (bool) EXPECT_ROUTE: if we expect a route entry in ASIC_DB + - (bool) EXPECT_NEIGH: if we expect a neighbor entry in ASIC_DB + - (bool) REAL_MAC: If a real MAC address is expected in the + APPL_DB neighbor table entry, as opposed + to a zero/empty MAC + +All expected result attributes will be verified agains the DVS +after each test step is executed + +Note: EXPECT_ROUTE and EXPECT_NEIGH cannot both be True + +Note: for the purposes of this test, there is a distinction made + between 'server' IPs and 'neighbor' IPs. Server IPs are + IP addresses explicitly configured on a specific mux cable + interface in the MUX_CABLE table in config DB. Neighbor IPs + are any other IPs within the VLAN subnet. + + +""" + +__all__ = [ + 'TEST_ACTION', 'EXPECTED_RESULT', 'ACTIVE', 'STANDBY', 'PING_SERV', 'PING_NEIGH', + 'RESOLVE_ENTRY', 'DELETE_ENTRY', 'EXPECT_ROUTE', 'EXPECT_NEIGH', 'REAL_MAC', + 'INTF', 'IP', 'MAC', 'NEIGH_MISS_TESTS' +] + +TEST_ACTION = 'action' +EXPECTED_RESULT = 'result' + +# Possible test actions +ACTIVE = 'active' # Switch the test interface to active +STANDBY = 'standby' # Switch the test interface to standby +PING_SERV = 'ping_serv' # Ping the server mux cable IP, used to trigger a netlink fail message +PING_NEIGH = 'ping_neigh' # Ping the neighbor IP (not configured on a specific mux cable port) +RESOLVE_ENTRY = 'resolve_entry' # Resolve the test IP neighbor entry in the kernel +DELETE_ENTRY = 'delete_entry' # Delete the test IP neighbor entry from the kernel + +# Test expectations +EXPECT_ROUTE = 'expect_route' +EXPECT_NEIGH = 'expect_neigh' +REAL_MAC = 'real_mac' + +INTF = 'intf' +IP = 'ip' +MAC = 'mac' + +# Note: For most test cases below, after the neighbor entry is deleted, we must +# still set `REAL_MAC` to `True` in the expected result since a prior step in the +# test should have resolved the neighbor entry and confirmed that the APPL_DB +# neighbor entry contained a real MAC address. Thus, when we verify that APPL_DB +# no longer contains a neighbor table entry, we need to check for the real MAC. +# The exception to this is test cases where the neighbor entry is never resolved +# in the kernel. In that case, APPL_DB will never contain the real MAC address. + +STANDBY_MUX_CABLE_TESTS = [ + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] +] + +ACTIVE_MUX_CABLE_TESTS = [ + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] +] + +NEIGH_IP_TESTS = [ + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] + +] + +NEIGH_MISS_TESTS = ACTIVE_MUX_CABLE_TESTS + STANDBY_MUX_CABLE_TESTS + NEIGH_IP_TESTS diff --git a/tests/test_acl.py b/tests/test_acl.py index d4b317bf55..fb3ca4ccf4 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -12,7 +12,7 @@ class TestAcl: - @pytest.yield_fixture + @pytest.fixture def l3_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(L3_TABLE_NAME, L3_TABLE_TYPE, L3_BIND_PORTS) @@ -21,7 +21,7 @@ def l3_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(L3_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def l3v6_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(L3V6_TABLE_NAME, @@ -32,7 +32,7 @@ def l3v6_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(L3V6_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def setup_teardown_neighbor(self, dvs): try: # NOTE: set_interface_status has a dependency on cdb within dvs, diff --git a/tests/test_acl_egress_table.py b/tests/test_acl_egress_table.py index f2b917ebc6..e6e2a98f10 100644 --- a/tests/test_acl_egress_table.py +++ b/tests/test_acl_egress_table.py @@ -7,7 +7,7 @@ class TestEgressAclTable: - @pytest.yield_fixture + @pytest.fixture def egress_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress") diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 8562caf15d..661c79bd2b 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -5,7 +5,7 @@ from dvslib.dvs_common import PollingConfig -@pytest.yield_fixture +@pytest.fixture def dynamic_buffer(dvs): buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) yield diff --git a/tests/test_mux.py b/tests/test_mux.py index e9eb027a9d..eb31d2258e 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -2,16 +2,19 @@ import pytest import json +from ipaddress import ip_network, ip_address, IPv4Address from swsscommon import swsscommon +from mux_neigh_miss_tests import * def create_fvs(**kwargs): return swsscommon.FieldValuePairs(list(kwargs.items())) tunnel_nh_id = 0 -class TestMuxTunnelBase(object): +class TestMuxTunnelBase(): APP_MUX_CABLE = "MUX_CABLE_TABLE" + APP_NEIGH_TABLE = "NEIGH_TABLE" APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" @@ -20,17 +23,51 @@ class TestMuxTunnelBase(object): ASIC_NEIGH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" ASIC_NEXTHOP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" ASIC_ROUTE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + ASIC_FDB_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY" + ASIC_SWITCH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" CONFIG_MUX_CABLE = "MUX_CABLE" + CONFIG_PEER_SWITCH = "PEER_SWITCH" + STATE_FDB_TABLE = "FDB_TABLE" + MUX_TUNNEL_0 = "MuxTunnel0" + PEER_SWITCH_HOST = "peer_switch_hostname" + SELF_IPV4 = "10.1.0.32" + PEER_IPV4 = "10.1.0.33" SERV1_IPV4 = "192.168.0.100" SERV1_IPV6 = "fc02:1000::100" SERV2_IPV4 = "192.168.0.101" SERV2_IPV6 = "fc02:1000::101" + SERV3_IPV4 = "192.168.0.102" + SERV3_IPV6 = "fc02:1000::102" + NEIGH1_IPV4 = "192.168.0.200" + NEIGH1_IPV6 = "fc02:1000::200" + NEIGH2_IPV4 = "192.168.0.201" + NEIGH2_IPV6 = "fc02:1000::201" + NEIGH3_IPV4 = "192.168.0.202" + NEIGH3_IPV6 = "fc02:1000::202" IPV4_MASK = "/32" IPV6_MASK = "/128" TUNNEL_NH_ID = 0 ACL_PRIORITY = "999" - + VLAN_1000 = "Vlan1000" + + PING_CMD = "timeout 0.5 ping -c1 -W1 -i0 -n -q {ip}" + + SAI_ROUTER_INTERFACE_ATTR_TYPE = "SAI_ROUTER_INTERFACE_ATTR_TYPE" + SAI_ROUTER_INTERFACE_TYPE_VLAN = "SAI_ROUTER_INTERFACE_TYPE_VLAN" + + DEFAULT_TUNNEL_PARAMS = { + "tunnel_type": "IPINIP", + "dst_ip": SELF_IPV4, + "dscp_mode": "uniform", + "ecn_mode": "standard", + "ttl_mode": "pipe" + } + + DEFAULT_PEER_SWITCH_PARAMS = { + "address_ipv4": PEER_IPV4 + } + ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" @@ -46,33 +83,38 @@ class TestMuxTunnelBase(object): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } - - def create_vlan_interface(self, confdb, asicdb, dvs): + def create_vlan_interface(self, dvs): + confdb = dvs.get_config_db() fvs = {"vlanid": "1000"} - confdb.create_entry("VLAN", "Vlan1000", fvs) + confdb.create_entry("VLAN", self.VLAN_1000, fvs) fvs = {"tagging_mode": "untagged"} confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet0", fvs) confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet4", fvs) + confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet8", fvs) fvs = {"NULL": "NULL"} - confdb.create_entry("VLAN_INTERFACE", "Vlan1000", fvs) + confdb.create_entry("VLAN_INTERFACE", self.VLAN_1000, fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs) dvs.runcmd("config interface startup Ethernet0") dvs.runcmd("config interface startup Ethernet4") - + dvs.runcmd("config interface startup Ethernet8") def create_mux_cable(self, confdb): - - fvs = { "server_ipv4":self.SERV1_IPV4+self.IPV4_MASK, "server_ipv6":self.SERV1_IPV6+self.IPV6_MASK } + fvs = {"server_ipv4": self.SERV1_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV1_IPV6+self.IPV6_MASK} confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet0", fvs) - fvs = { "server_ipv4":self.SERV2_IPV4+self.IPV4_MASK, "server_ipv6":self.SERV2_IPV6+self.IPV6_MASK } + fvs = {"server_ipv4": self.SERV2_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV2_IPV6+self.IPV6_MASK} confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet4", fvs) + fvs = {"server_ipv4": self.SERV3_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV3_IPV6+self.IPV6_MASK} + confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet8", fvs) def set_mux_state(self, appdb, ifname, state_change): @@ -84,22 +126,46 @@ def set_mux_state(self, appdb, ifname, state_change): time.sleep(1) + def get_switch_oid(self, asicdb): + # Assumes only one switch is ever present + keys = asicdb.wait_for_n_keys(self.ASIC_SWITCH_TABLE, 1) + return keys[0] + + def get_vlan_rif_oid(self, asicdb): + # create_vlan_interface should be called before this method + # Assumes only one VLAN RIF is present + rifs = asicdb.get_keys(self.ASIC_RIF_TABLE) + + vlan_oid = '' + for rif_key in rifs: + entry = asicdb.get_entry(self.ASIC_RIF_TABLE, rif_key) + if entry[self.SAI_ROUTER_INTERFACE_ATTR_TYPE] == self.SAI_ROUTER_INTERFACE_TYPE_VLAN: + vlan_oid = rif_key + break + + return vlan_oid - def check_neigh_in_asic_db(self, asicdb, ip, expected=1): + def check_neigh_in_asic_db(self, asicdb, ip, expected=True): + rif_oid = self.get_vlan_rif_oid(asicdb) + switch_oid = self.get_switch_oid(asicdb) + neigh_key_map = { + "ip": ip, + "rif": rif_oid, + "switch_id": switch_oid + } + expected_key = json.dumps(neigh_key_map, sort_keys=True, separators=(',', ':')) - nbr = asicdb.wait_for_n_keys(self.ASIC_NEIGH_TABLE, expected) + if expected: + nbr_keys = asicdb.wait_for_matching_keys(self.ASIC_NEIGH_TABLE, [expected_key]) - found = False - for key in nbr: - entry = json.loads(key) - if entry["ip"] == ip: - found = True - entry = key - break + for key in nbr_keys: + if ip in key: + return key - assert found - return entry + else: + asicdb.wait_for_deleted_keys(self.ASIC_NEIGH_TABLE, [expected_key]) + return '' def check_tnl_nexthop_in_asic_db(self, asicdb, expected=1): @@ -114,7 +180,6 @@ def check_tnl_nexthop_in_asic_db(self, asicdb, expected=1): assert tunnel_nh_id - def check_nexthop_in_asic_db(self, asicdb, key, standby=False): fvs = asicdb.get_entry(self.ASIC_ROUTE_TABLE, key) @@ -127,7 +192,6 @@ def check_nexthop_in_asic_db(self, asicdb, key, standby=False): else: assert (nhid != tunnel_nh_id) - def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): fvs = asicdb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", key) @@ -144,21 +208,22 @@ def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): for k in keys: fvs = asicdb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg_id - + # Count the number of Nexthop member pointing to tunnel if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] == tunnel_nh_id: - count += 1 + count += 1 assert num_tnl_nh == count - - def add_neighbor(self, dvs, ip, mac, v6=False): - - if v6: + def add_neighbor(self, dvs, ip, mac): + if ip_address(ip).version == 6: dvs.runcmd("ip -6 neigh replace " + ip + " lladdr " + mac + " dev Vlan1000") else: dvs.runcmd("ip -4 neigh replace " + ip + " lladdr " + mac + " dev Vlan1000") + def del_neighbor(self, dvs, ip): + cmd = 'ip neigh del {} dev {}'.format(ip, self.VLAN_1000) + dvs.runcmd(cmd) def add_fdb(self, dvs, port, mac): @@ -180,7 +245,7 @@ def del_fdb(self, dvs, mac): def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): - self.create_vlan_interface(confdb, asicdb, dvs) + self.create_vlan_interface(dvs) self.create_mux_cable(confdb) @@ -188,21 +253,22 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "standby") self.add_neighbor(dvs, self.SERV1_IPV4, "00:00:00:00:00:01") - # Broadcast neigh 192.168.0.255 is default added. Hence +1 for expected number - srv1_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4, 2) + srv1_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4) - self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01", True) - srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6, 3) + self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01") + srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6) existing_keys = asicdb.get_keys(self.ASIC_NEIGH_TABLE) self.add_neighbor(dvs, self.SERV2_IPV4, "00:00:00:00:00:02") - self.add_neighbor(dvs, self.SERV2_IPV6, "00:00:00:00:00:02", True) + self.add_neighbor(dvs, self.SERV2_IPV6, "00:00:00:00:00:02") time.sleep(1) # In standby mode, the entry must not be added to Neigh table but Route asicdb.wait_for_matching_keys(self.ASIC_NEIGH_TABLE, existing_keys) - dvs_route.check_asicdb_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) + dvs_route.check_asicdb_route_entries( + [self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK] + ) # The first standby route also creates as tunnel Nexthop self.check_tnl_nexthop_in_asic_db(asicdb, 3) @@ -212,15 +278,18 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v4) asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v6) - dvs_route.check_asicdb_route_entries([self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK]) + dvs_route.check_asicdb_route_entries( + [self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK] + ) # Change state to Active. This will add Neigh and delete Route self.set_mux_state(appdb, "Ethernet4", "active") - dvs_route.check_asicdb_deleted_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4, 3) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6, 3) - + dvs_route.check_asicdb_deleted_route_entries( + [self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK] + ) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6) def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): @@ -233,27 +302,27 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): ip_1 = "fc02:1000::10" ip_2 = "fc02:1000::11" - self.add_neighbor(dvs, ip_1, "00:00:00:00:00:11", True) - self.add_neighbor(dvs, ip_2, "00:00:00:00:00:12", True) + self.add_neighbor(dvs, ip_1, "00:00:00:00:00:11") + self.add_neighbor(dvs, ip_2, "00:00:00:00:00:12") # ip_1 is on Active Mux, hence added to Host table - self.check_neigh_in_asic_db(asicdb, ip_1, 4) + self.check_neigh_in_asic_db(asicdb, ip_1) # ip_2 is on Standby Mux, hence added to Route table dvs_route.check_asicdb_route_entries([ip_2+self.IPV6_MASK]) # Check ip_1 move to standby mux, should be pointing to tunnel - self.add_neighbor(dvs, ip_1, "00:00:00:00:00:12", True) + self.add_neighbor(dvs, ip_1, "00:00:00:00:00:12") # ip_1 moved to standby Mux, hence added to Route table dvs_route.check_asicdb_route_entries([ip_1+self.IPV6_MASK]) # Check ip_2 move to active mux, should be host entry - self.add_neighbor(dvs, ip_2, "00:00:00:00:00:11", True) + self.add_neighbor(dvs, ip_2, "00:00:00:00:00:11") # ip_2 moved to active Mux, hence remove from Route table dvs_route.check_asicdb_deleted_route_entries([ip_2+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, ip_2, 4) + self.check_neigh_in_asic_db(asicdb, ip_2) # Simulate FDB aging out test case ip_3 = "192.168.0.200" @@ -273,13 +342,18 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "active") dvs_route.check_asicdb_deleted_route_entries([ip_3+self.IPV4_MASK]) + self.del_fdb(dvs, "00-00-00-00-00-11") + def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet0", "active") rtprefix = "2.3.4.0/24" - dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route " + rtprefix + " " + self.SERV1_IPV4 + "\"") + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ip route " + rtprefix + + " " + self.SERV1_IPV4 + "\"" + ) pdb = dvs.get_app_db() pdb.wait_for_entry("ROUTE_TABLE", rtprefix) @@ -331,9 +405,14 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): dvs_route.check_asicdb_deleted_route_entries([rtprefix]) ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - - fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV4 + "," + self.SERV2_IPV4), ("ifname", "Vlan1000,Vlan1000")]) - + + fvs = swsscommon.FieldValuePairs( + [ + ("nexthop", self.SERV1_IPV4 + "," + self.SERV2_IPV4), + ("ifname", "Vlan1000,Vlan1000") + ] + ) + ps.set(rtprefix, fvs) # Check if route was propagated to ASIC DB @@ -370,7 +449,12 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), ("ifname", "tun0,tun0")]) + fvs = swsscommon.FieldValuePairs( + [ + ("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), + ("ifname", "tun0,tun0") + ] + ) ps.set(rtprefix, fvs) @@ -396,6 +480,7 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "standby") self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + ps._del(rtprefix) def get_expected_sai_qualifiers(self, portlist, dvs_acl): expected_sai_qualifiers = { @@ -405,12 +490,12 @@ def get_expected_sai_qualifiers(self, portlist, dvs_acl): return expected_sai_qualifiers - - def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): + def create_and_test_acl(self, appdb, dvs_acl): # Start with active, verify NO ACL rules exists self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "active") + self.set_mux_state(appdb, "Ethernet8", "active") dvs_acl.verify_no_acl_rules() @@ -421,7 +506,7 @@ def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): # Set two mux ports to standby, verify ACL rule with inport bitmap (2 ports) self.set_mux_state(appdb, "Ethernet4", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Set one mux port to active, verify ACL rule with inport bitmap (1 port) @@ -440,7 +525,7 @@ def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): # Verify change while setting unknown from active self.set_mux_state(appdb, "Ethernet4", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) self.set_mux_state(appdb, "Ethernet0", "active") @@ -448,16 +533,15 @@ def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) self.set_mux_state(appdb, "Ethernet0", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Verify no change while setting unknown from standby self.set_mux_state(appdb, "Ethernet0", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0", "Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - - def create_and_test_metrics(self, appdb, statedb, dvs): + def create_and_test_metrics(self, appdb, statedb): # Set to active and test attributes for start and end time self.set_mux_state(appdb, "Ethernet0", "active") @@ -471,7 +555,7 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert fvs != {} start = end = False - for f,v in fvs.items(): + for f, _ in fvs.items(): if f == "orch_switch_active_start": start = True elif f == "orch_switch_active_end": @@ -493,7 +577,7 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert fvs != {} start = end = False - for f,v in fvs.items(): + for f, v in fvs.items(): if f == "orch_switch_standby_start": start = True elif f == "orch_switch_standby_end": @@ -502,26 +586,17 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert start assert end - def check_interface_exists_in_asicdb(self, asicdb, sai_oid): asicdb.wait_for_entry(self.ASIC_RIF_TABLE, sai_oid) return True - def check_vr_exists_in_asicdb(self, asicdb, sai_oid): asicdb.wait_for_entry(self.ASIC_VRF_TABLE, sai_oid) return True - - def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): + def create_and_test_peer(self, asicdb): """ Create PEER entry verify all needed enties in ASIC DB exists """ - peer_attrs = { - "address_ipv4": peer_ip - } - - db.create_entry("PEER_SWITCH", peer_name, peer_attrs) - # check asic db table # There will be two tunnels, one P2MP and another P2P tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 2) @@ -546,9 +621,9 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == src_ip + assert value == self.SELF_IPV4 elif field == "SAI_TUNNEL_ATTR_ENCAP_DST_IP": - assert value == peer_ip + assert value == self.PEER_IPV4 elif field == "SAI_TUNNEL_ATTR_PEER_MODE": assert value == "SAI_TUNNEL_PEER_MODE_P2P" elif field == "SAI_TUNNEL_ATTR_OVERLAY_INTERFACE": @@ -562,7 +637,6 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): else: assert False, "Field %s is not tested" % field - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips): tunnel_term_entries = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TERM_ENTRIES, len(dst_ips)) @@ -585,21 +659,10 @@ def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid else: assert False, "Field %s is not tested" % field - - def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): + def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_params): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs; - - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - - fvs = create_fvs(**kwargs) - - ps.set(tunnel_name, fvs) - - # wait till config will be applied - time.sleep(1) + is_symmetric_tunnel = "src_ip" in tunnel_params # check asic db table tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 1) @@ -612,15 +675,15 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel assert len(fvs) == 7 if is_symmetric_tunnel else 6 - expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] - expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] - expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] + expected_ecn_mode = self.ecn_modes_map[tunnel_params["ecn_mode"]] + expected_dscp_mode = self.dscp_modes_map[tunnel_params["dscp_mode"]] + expected_ttl_mode = self.ttl_modes_map[tunnel_params["ttl_mode"]] for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == kwargs["src_ip"] + assert value == tunnel_params["src_ip"] elif field == "SAI_TUNNEL_ATTR_DECAP_ECN_MODE": assert value == expected_ecn_mode elif field == "SAI_TUNNEL_ATTR_DECAP_TTL_MODE": @@ -634,8 +697,9 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): else: assert False, "Field %s is not tested" % field - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(",")) - + self.check_tunnel_termination_entry_exists_in_asicdb( + asicdb, tunnel_sai_obj, tunnel_params["dst_ip"].split(",") + ) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -650,7 +714,7 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): status, fvs = tunnel_table.get(tunnel_sai_obj) # get overlay loopback interface oid to check if it is deleted with the tunnel - overlay_infs_id = {f:v for f,v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] + overlay_infs_id = {f:v for f, v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) ps.set(tunnel_name, create_fvs(), 'DEL') @@ -663,6 +727,22 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def check_app_db_neigh_table( + self, appdb, intf, neigh_ip, + mac="00:00:00:00:00:00", expect_entry=True + ): + key = "{}:{}".format(intf, neigh_ip) + if isinstance(ip_address(neigh_ip), IPv4Address): + family = 'IPv4' + else: + family = 'IPv6' + + if expect_entry: + appdb.wait_for_matching_keys(self.APP_NEIGH_TABLE, [key]) + appdb.wait_for_field_match(self.APP_NEIGH_TABLE, key, {'family': family}) + appdb.wait_for_field_match(self.APP_NEIGH_TABLE, key, {'neigh': mac}) + else: + appdb.wait_for_deleted_keys(self.APP_NEIGH_TABLE, key) def cleanup_left_over(self, db, asicdb): """ Cleanup APP and ASIC tables """ @@ -679,11 +759,167 @@ def cleanup_left_over(self, db, asicdb): for key in tunnel_app_table.getKeys(): tunnel_table._del(key) + def ping_ip(self, dvs, ip): + dvs.runcmd(self.PING_CMD.format(ip=ip)) + + def check_neighbor_state( + self, dvs, dvs_route, neigh_ip, expect_route=True, + expect_neigh=False, expected_mac='00:00:00:00:00:00' + ): + """ + Checks the status of neighbor entries in APPL and ASIC DB + """ + if expect_route and expect_neigh: + pytest.fail('expect_routes and expect_neigh cannot both be True') + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + prefix = str(ip_network(neigh_ip)) + self.check_app_db_neigh_table( + app_db, self.VLAN_1000, neigh_ip, + mac=expected_mac, expect_entry=expect_route + ) + if expect_route: + self.check_tnl_nexthop_in_asic_db(asic_db) + routes = dvs_route.check_asicdb_route_entries([prefix]) + for route in routes: + self.check_nexthop_in_asic_db(asic_db, route, standby=expect_route) + else: + dvs_route.check_asicdb_deleted_route_entries([prefix]) + self.check_neigh_in_asic_db(asic_db, neigh_ip, expected=expect_neigh) + + def execute_action(self, action, dvs, test_info): + if action in (PING_SERV, PING_NEIGH): + self.ping_ip(dvs, test_info[IP]) + elif action in (ACTIVE, STANDBY): + app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + self.set_mux_state(app_db_connector, test_info[INTF], action) + elif action == RESOLVE_ENTRY: + self.add_neighbor(dvs, test_info[IP], test_info[MAC]) + elif action == DELETE_ENTRY: + self.del_neighbor(dvs, test_info[IP]) + else: + pytest.fail('Invalid test action {}'.format(action)) + + @pytest.fixture(scope='module') + def setup_vlan(self, dvs): + self.create_vlan_interface(dvs) + + @pytest.fixture(scope='module') + def setup_mux_cable(self, dvs): + config_db = dvs.get_config_db() + self.create_mux_cable(config_db) + + @pytest.fixture(scope='module') + def setup_tunnel(self, dvs): + app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + ps = swsscommon.ProducerStateTable(app_db_connector, self.APP_TUNNEL_DECAP_TABLE_NAME) + fvs = create_fvs(**self.DEFAULT_TUNNEL_PARAMS) + ps.set(self.MUX_TUNNEL_0, fvs) + + @pytest.fixture + def setup_peer_switch(self, dvs): + config_db = dvs.get_config_db() + config_db.create_entry( + self.CONFIG_PEER_SWITCH, + self.PEER_SWITCH_HOST, + self.DEFAULT_PEER_SWITCH_PARAMS + ) + + @pytest.fixture + def remove_peer_switch(self, dvs): + config_db = dvs.get_config_db() + config_db.delete_entry(self.CONFIG_PEER_SWITCH, self.PEER_SWITCH_HOST) + + @pytest.fixture(params=['IPv4', 'IPv6']) + def ip_version(self, request): + return request.param + + def clear_neighbors(self, dvs): + _, neighs_str = dvs.runcmd('ip neigh show all') + neighs = [entry.split()[0] for entry in neighs_str.split('\n')[:-1]] + + for neigh in neighs: + self.del_neighbor(dvs, neigh) + + @pytest.fixture + def neighbor_cleanup(self, dvs): + """ + Ensures that all kernel neighbors are removed before and after tests + """ + self.clear_neighbors(dvs) + yield + self.clear_neighbors(dvs) + + @pytest.fixture + def server_test_ips(self, ip_version): + if ip_version == 'IPv4': + return [self.SERV1_IPV4, self.SERV2_IPV4, self.SERV3_IPV4] + else: + return [self.SERV1_IPV6, self.SERV2_IPV6, self.SERV3_IPV6] + + @pytest.fixture + def neigh_test_ips(self, ip_version): + if ip_version == 'IPv4': + return [self.NEIGH1_IPV4, self.NEIGH2_IPV4, self.NEIGH3_IPV4] + else: + return [self.NEIGH1_IPV6, self.NEIGH2_IPV6, self.NEIGH3_IPV6] + + @pytest.fixture + def ips_for_test(self, server_test_ips, neigh_test_ips, neigh_miss_test_sequence): + # Assumes that each test sequence has at exactly one of + # PING_NEIGH OR PING_SERV as a step + for step in neigh_miss_test_sequence: + if step[TEST_ACTION] == PING_SERV: + return server_test_ips + if step[TEST_ACTION] == PING_NEIGH: + return neigh_test_ips + + # If we got here, the test sequence did not contain a ping command + pytest.fail('No ping command found in test sequence {}'.format(neigh_miss_test_sequence)) + + @pytest.fixture + def ip_to_intf_map(self, server_test_ips, neigh_test_ips): + map = { + server_test_ips[0]: 'Ethernet0', + server_test_ips[1]: 'Ethernet4', + server_test_ips[2]: 'Ethernet8', + neigh_test_ips[0]: 'Ethernet0', + neigh_test_ips[1]: 'Ethernet4', + neigh_test_ips[2]: 'Ethernet8' + } + return map + + @pytest.fixture( + params=NEIGH_MISS_TESTS, + ids=['->'.join([step[TEST_ACTION] for step in scenario]) + for scenario in NEIGH_MISS_TESTS] + ) + def neigh_miss_test_sequence(self, request): + return request.param + + @pytest.fixture + def intf_fdb_map(self, dvs, setup_vlan): + """ + Note: this fixture invokes the setup_vlan fixture so that + the interfaces are brought up before attempting to access FDB information + """ + state_db = dvs.get_state_db() + keys = state_db.get_keys(self.STATE_FDB_TABLE) + + fdb_map = {} + for key in keys: + entry = state_db.get_entry(self.STATE_FDB_TABLE, key) + mac = key.replace('{}:'.format(self.VLAN_1000), '') + port = entry['port'] + fdb_map[port] = mac + + return fdb_map + class TestMuxTunnel(TestMuxTunnelBase): """ Tests for Mux tunnel creation and removal """ - def test_Tunnel(self, dvs, testlog): + def test_Tunnel(self, dvs, setup_tunnel, testlog): """ test IPv4 Mux tunnel creation """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -692,63 +928,94 @@ def test_Tunnel(self, dvs, testlog): #self.cleanup_left_over(db, asicdb) # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", - dst_ip="10.1.0.32", dscp_mode="uniform", - ecn_mode="standard", ttl_mode="pipe") + self.create_and_test_tunnel(db, asicdb, self.MUX_TUNNEL_0, self.DEFAULT_TUNNEL_PARAMS) - - def test_Peer(self, dvs, testlog): + def test_Peer(self, dvs, setup_peer_switch, testlog): """ test IPv4 Mux tunnel creation """ - db = dvs.get_config_db() asicdb = dvs.get_asic_db() - self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32") - + self.create_and_test_peer(asicdb) def test_Neighbor(self, dvs, dvs_route, testlog): """ test Neighbor entries and mux state change """ confdb = dvs.get_config_db() - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_neighbor(confdb, appdb, asicdb, dvs, dvs_route) - def test_Fdb(self, dvs, dvs_route, testlog): """ test Fdb entries and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_fdb(appdb, asicdb, dvs, dvs_route) - def test_Route(self, dvs, dvs_route, testlog): """ test Route entries and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_route(appdb, asicdb, dvs, dvs_route) - def test_acl(self, dvs, dvs_acl, testlog): """ test acl and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - asicdb = dvs.get_asic_db() + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - self.create_and_test_acl(appdb, asicdb, dvs, dvs_acl) + self.create_and_test_acl(appdb, dvs_acl) def test_mux_metrics(self, dvs, testlog): """ test metrics for mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) statedb = dvs.get_state_db() - self.create_and_test_metrics(appdb, statedb, dvs) + self.create_and_test_metrics(appdb, statedb) + + def test_neighbor_miss( + self, dvs, dvs_route, ips_for_test, neigh_miss_test_sequence, + ip_to_intf_map, intf_fdb_map, neighbor_cleanup, setup_vlan, + setup_mux_cable, setup_tunnel, setup_peer_switch, testlog + ): + ip = ips_for_test[0] + intf = ip_to_intf_map[ip] + mac = intf_fdb_map[intf] + test_info = { + IP: ip, + INTF: intf, + MAC: mac + } + + for step in neigh_miss_test_sequence: + self.execute_action(step[TEST_ACTION], dvs, test_info) + exp_result = step[EXPECTED_RESULT] + self.check_neighbor_state( + dvs, dvs_route, ip, + expect_route=exp_result[EXPECT_ROUTE], + expect_neigh=exp_result[EXPECT_NEIGH], + expected_mac=mac if exp_result[REAL_MAC] else '00:00:00:00:00:00' + ) + + def test_neighbor_miss_no_peer( + self, dvs, dvs_route, setup_vlan, setup_mux_cable, setup_tunnel, + remove_peer_switch, neighbor_cleanup, testlog + ): + """ + test neighbor miss with no peer switch configured + No new entries are expected in APPL_DB or ASIC_DB + """ + test_ips = [self.NEIGH3_IPV4, self.SERV3_IPV4, self.NEIGH1_IPV6, self.SERV1_IPV6] + + for ip in test_ips: + self.ping_ip(dvs, ip) + + for ip in test_ips: + self.check_neighbor_state(dvs, dvs_route, ip, expect_route=False) # Add Dummy always-pass test at end as workaroud diff --git a/tests/test_port_config.py b/tests/test_port_config.py index 6ffa174dcd..1fd9db2f33 100644 --- a/tests/test_port_config.py +++ b/tests/test_port_config.py @@ -6,7 +6,7 @@ from swsscommon import swsscommon -@pytest.yield_fixture +@pytest.fixture def port_config(request, dvs): file_name = "/usr/share/sonic/hwsku/port_config.ini" dvs.runcmd("cp %s %s.bak" % (file_name, file_name))