From e9b05a31789ce7672614807026c7096a4202da32 Mon Sep 17 00:00:00 2001 From: Shi Su <67605788+shi-su@users.noreply.github.com> Date: Tue, 30 Nov 2021 16:00:16 -0800 Subject: [PATCH] [vnetorch] ECMP for vnet tunnel routes with endpoint health monitor (#1955) What I did Add functions to create/remove next hop groups for vnet tunnel routes. Count the reference count of next hop groups to create and remove as needed. Share the counter of next hop groups with routeorch. Adapt route endpoint according to the BFD state of endpoints. Why I did it To add support for overlay ECMP. How I verified it Verify ECMP groups are properly created and removed with the functions. --- orchagent/orchdaemon.cpp | 5 +- orchagent/vnetorch.cpp | 482 ++++++++++++++++++++++++++++++++++++++- orchagent/vnetorch.h | 42 +++- tests/test_vnet.py | 438 +++++++++++++++++++++++++++++++++++ 4 files changed, 949 insertions(+), 18 deletions(-) diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 52beb0fd10d7..14e4d8aa777c 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -122,6 +122,8 @@ bool OrchDaemon::init() TableConnector stateDbFdb(m_stateDb, STATE_FDB_TABLE_NAME); TableConnector stateMclagDbFdb(m_stateDb, STATE_MCLAG_REMOTE_FDB_TABLE_NAME); gFdbOrch = new FdbOrch(m_applDb, app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + TableConnector stateDbBfdSessionTable(m_stateDb, STATE_BFD_SESSION_TABLE_NAME); + gBfdOrch = new BfdOrch(m_applDb, APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); vector vnet_tables = { APP_VNET_RT_TABLE_NAME, @@ -309,9 +311,6 @@ bool OrchDaemon::init() gMacsecOrch = new MACsecOrch(m_applDb, m_stateDb, macsec_app_tables, gPortsOrch); - TableConnector stateDbBfdSessionTable(m_stateDb, STATE_BFD_SESSION_TABLE_NAME); - gBfdOrch = new BfdOrch(m_applDb, APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); - gNhgMapOrch = new NhgMapOrch(m_applDb, APP_FC_TO_NHG_INDEX_MAP_TABLE_NAME); /* diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 053784e2987e..dc5838d8a512 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -39,6 +39,7 @@ extern NeighOrch *gNeighOrch; extern CrmOrch *gCrmOrch; extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; +extern BfdOrch *gBfdOrch; /* * VRF Modeling and VNetVrf class definitions @@ -558,9 +559,14 @@ static bool del_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx) route_entry.destination = ip_pfx; sai_status_t status = sai_route_api->remove_route_entry(&route_entry); - if (status != SAI_STATUS_SUCCESS) + if (status == SAI_STATUS_ITEM_NOT_FOUND || status == SAI_STATUS_INVALID_PARAMETER) + { + SWSS_LOG_INFO("Unable to remove route since route is already removed"); + return true; + } + else if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("SAI Failed to remove route"); + SWSS_LOG_ERROR("SAI Failed to remove route, rv: %d", status); return false; } @@ -630,12 +636,17 @@ static bool update_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx, sai_obj } VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOrch *vnetOrch) - : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch) + : Orch2(db, tableNames, request_), vnet_orch_(vnetOrch), bfd_session_producer_(db, APP_BFD_SESSION_TABLE_NAME) { SWSS_LOG_ENTER(); handler_map_.insert(handler_pair(APP_VNET_RT_TABLE_NAME, &VNetRouteOrch::handleRoutes)); handler_map_.insert(handler_pair(APP_VNET_RT_TUNNEL_TABLE_NAME, &VNetRouteOrch::handleTunnel)); + + state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); + state_vnet_rt_tunnel_table_ = unique_ptr(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); + + gBfdOrch->attach(this); } bool VNetRouteOrch::hasNextHopGroup(const string& vnet, const NextHopGroupKey& nexthops) @@ -667,6 +678,10 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n for (auto it : next_hop_set) { + if (nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) + { + continue; + } sai_object_id_t next_hop_id = vrf_obj->getTunnelNextHop(it); next_hop_ids.push_back(next_hop_id); nhopgroup_members_set[next_hop_id] = it; @@ -797,7 +812,8 @@ bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, - NextHopGroupKey& nexthops, string& op) + NextHopGroupKey& nexthops, string& op, + const map& monitors) { SWSS_LOG_ENTER(); @@ -835,9 +851,9 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP if (op == SET_COMMAND) { sai_object_id_t nh_id; - /* The route is pointing to one single endpoint */ if (!hasNextHopGroup(vnet, nexthops)) { + setEndpointMonitor(vnet, monitors, nexthops); if (nexthops.getSize() == 1) { NextHopKey nexthop(nexthops.to_string(), true); @@ -851,6 +867,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { if (!addNextHopGroup(vnet, nexthops, vrf_obj)) { + delEndpointMonitor(vnet, nexthops); SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); return false; } @@ -863,13 +880,37 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { bool route_status = true; - if (it_route == syncd_tunnel_routes_[vnet].end()) + // Remove route if the nexthop group has no active endpoint + if (syncd_nexthop_groups_[vnet][nexthops].active_members.empty()) { - route_status = add_route(vr_id, pfx, nh_id); + if (it_route != syncd_tunnel_routes_[vnet].end()) + { + NextHopGroupKey nhg = it_route->second; + // Remove route when updating from a nhg with active member to another nhg without + if (!syncd_nexthop_groups_[vnet][nhg].active_members.empty()) + { + del_route(vr_id, pfx); + } + } } else { - route_status = update_route(vr_id, pfx, nh_id); + if (it_route == syncd_tunnel_routes_[vnet].end()) + { + route_status = add_route(vr_id, pfx, nh_id); + } + else + { + NextHopGroupKey nhg = it_route->second; + if (syncd_nexthop_groups_[vnet][nhg].active_members.empty()) + { + route_status = add_route(vr_id, pfx, nh_id); + } + else + { + route_status = update_route(vr_id, pfx, nh_id); + } + } } if (!route_status) @@ -900,13 +941,22 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopKey nexthop(nhg.to_string(), true); vrf_obj->removeTunnelNextHop(nexthop); } + delEndpointMonitor(vnet, nhg); + } + else + { + syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); } vrf_obj->removeRoute(ipPrefix); } + syncd_nexthop_groups_[vnet][nexthops].tunnel_routes.insert(ipPrefix); + syncd_tunnel_routes_[vnet][ipPrefix] = nexthops; syncd_nexthop_groups_[vnet][nexthops].ref_count++; vrf_obj->addRoute(ipPrefix, nexthops); + + postRouteState(vnet, ipPrefix, nexthops); } else if (op == DEL_COMMAND) { @@ -921,10 +971,14 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP for (auto vr_id : vr_set) { - if (!del_route(vr_id, pfx)) + // If an nhg has no active member, the route should already be removed + if (!syncd_nexthop_groups_[vnet][nhg].active_members.empty()) { - SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); - return false; + if (!del_route(vr_id, pfx)) + { + SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + return false; + } } } @@ -940,6 +994,11 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP NextHopKey nexthop(nhg.to_string(), true); vrf_obj->removeTunnelNextHop(nexthop); } + delEndpointMonitor(vnet, nhg); + } + else + { + syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); } syncd_tunnel_routes_[vnet].erase(ipPrefix); @@ -949,6 +1008,84 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP } vrf_obj->removeRoute(ipPrefix); + + removeRouteState(vnet, ipPrefix); + } + + return true; +} + +bool VNetRouteOrch::updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, + NextHopGroupKey& nexthops, string& op) +{ + SWSS_LOG_ENTER(); + + if (!vnet_orch_->isVnetExists(vnet)) + { + SWSS_LOG_WARN("VNET %s doesn't exist for prefix %s, op %s", + vnet.c_str(), ipPrefix.to_string().c_str(), op.c_str()); + return (op == DEL_COMMAND)?true:false; + } + + set vr_set; + auto& peer_list = vnet_orch_->getPeerList(vnet); + + auto l_fn = [&] (const string& vnet) { + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + sai_object_id_t vr_id = vnet_obj->getVRidIngress(); + vr_set.insert(vr_id); + }; + + l_fn(vnet); + for (auto peer : peer_list) + { + if (!vnet_orch_->isVnetExists(peer)) + { + SWSS_LOG_INFO("Peer VNET %s not yet created", peer.c_str()); + return false; + } + l_fn(peer); + } + + sai_ip_prefix_t pfx; + copy(pfx, ipPrefix); + + if (op == SET_COMMAND) + { + sai_object_id_t nh_id = syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; + + for (auto vr_id : vr_set) + { + bool route_status = true; + + route_status = add_route(vr_id, pfx, nh_id); + + if (!route_status) + { + SWSS_LOG_ERROR("Route add failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + return false; + } + } + } + else if (op == DEL_COMMAND) + { + auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); + if (it_route == syncd_tunnel_routes_[vnet].end()) + { + SWSS_LOG_INFO("Failed to find tunnel route entry, prefix %s\n", + ipPrefix.to_string().c_str()); + return true; + } + NextHopGroupKey nhg = it_route->second; + + for (auto vr_id : vr_set) + { + if (!del_route(vr_id, pfx)) + { + SWSS_LOG_ERROR("Route del failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); + return false; + } + } } return true; @@ -1315,6 +1452,311 @@ void VNetRouteOrch::delRoute(const IpPrefix& ipPrefix) syncd_routes_.erase(route_itr); } +void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (nexthop_info_[vnet].find(endpoint_addr) != nexthop_info_[vnet].end()) + { + SWSS_LOG_ERROR("BFD session for endpoint %s already exist", endpoint_addr.to_string().c_str()); + return; + } + + if (bfd_sessions_.find(monitor_addr) == bfd_sessions_.end()) + { + vector data; + string key = "default:default:" + monitor_addr.to_string(); + + auto tun_name = vnet_orch_->getTunnelName(vnet); + VxlanTunnelOrch* vxlan_orch = gDirectory.get(); + auto tunnel_obj = vxlan_orch->getVxlanTunnel(tun_name); + IpAddress src_ip = tunnel_obj->getSrcIP(); + + FieldValueTuple fvTuple("local_addr", src_ip.to_string()); + data.push_back(fvTuple); + + bfd_session_producer_.set(key, data); + + bfd_sessions_[monitor_addr].bfd_state = SAI_BFD_SESSION_STATE_DOWN; + } + + BfdSessionInfo& bfd_info = bfd_sessions_[monitor_addr]; + bfd_info.vnet = vnet; + bfd_info.endpoint = endpoint; + VNetNextHopInfo nexthop_info; + nexthop_info.monitor_addr = monitor_addr; + nexthop_info.bfd_state = bfd_info.bfd_state; + nexthop_info.ref_count = 0; + nexthop_info_[vnet][endpoint_addr] = nexthop_info; +} + +void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr) +{ + SWSS_LOG_ENTER(); + + IpAddress endpoint_addr = endpoint.ip_address; + if (nexthop_info_[vnet].find(endpoint_addr) == nexthop_info_[vnet].end()) + { + SWSS_LOG_ERROR("BFD session for endpoint %s does not exist", endpoint_addr.to_string().c_str()); + } + nexthop_info_[vnet].erase(endpoint_addr); + + string key = "default:default:" + monitor_addr.to_string(); + + bfd_session_producer_.del(key); + + bfd_sessions_.erase(monitor_addr); +} + +void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops) +{ + SWSS_LOG_ENTER(); + + for (auto monitor : monitors) + { + NextHopKey nh = monitor.first; + IpAddress monitor_ip = monitor.second; + if (nexthop_info_[vnet].find(nh.ip_address) == nexthop_info_[vnet].end()) + { + createBfdSession(vnet, nh, monitor_ip); + } + + nexthop_info_[vnet][nh.ip_address].ref_count++; + } +} + +void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops) +{ + SWSS_LOG_ENTER(); + + std::set nhks = nexthops.getNextHops(); + for (auto nhk: nhks) + { + IpAddress ip = nhk.ip_address; + if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { + if (--nexthop_info_[vnet][ip].ref_count == 0) + { + removeBfdSession(vnet, nhk, nexthop_info_[vnet][ip].monitor_addr); + } + } + } +} + +void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops) +{ + const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); + vector fvVector; + + NextHopGroupInfo& nhg_info = syncd_nexthop_groups_[vnet][nexthops]; + string route_state = nhg_info.active_members.empty() ? "inactive" : "active"; + string ep_str = ""; + int idx_ep = 0; + for (auto nh_pair : nhg_info.active_members) + { + NextHopKey nh = nh_pair.first; + ep_str += idx_ep == 0 ? nh.ip_address.to_string() : "," + nh.ip_address.to_string(); + idx_ep++; + } + + fvVector.emplace_back("active_endpoints", ep_str); + fvVector.emplace_back("state", route_state); + + state_vnet_rt_tunnel_table_->set(state_db_key, fvVector); +} + +void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) +{ + const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); + state_vnet_rt_tunnel_table_->del(state_db_key); +} + +void VNetRouteOrch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + assert(cntx); + + switch(type) { + case SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE: + { + BfdUpdate *update = static_cast(cntx); + updateVnetTunnel(*update); + break; + } + default: + // Received update in which we are not interested + // Ignore it + return; + } +} + +void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) +{ + SWSS_LOG_ENTER(); + + auto key = update.peer; + sai_bfd_session_state_t state = update.state; + + size_t found_vrf = key.find(state_db_key_delimiter); + if (found_vrf == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no vrf is given", key.c_str()); + return; + } + + size_t found_ifname = key.find(state_db_key_delimiter, found_vrf + 1); + if (found_ifname == string::npos) + { + SWSS_LOG_ERROR("Failed to parse key %s, no ifname is given", key.c_str()); + return; + } + + string vrf_name = key.substr(0, found_vrf); + string alias = key.substr(found_vrf + 1, found_ifname - found_vrf - 1); + IpAddress peer_address(key.substr(found_ifname + 1)); + + if (alias != "default" || vrf_name != "default") + { + return; + } + + auto it_peer = bfd_sessions_.find(peer_address); + + if (it_peer == bfd_sessions_.end()) { + SWSS_LOG_INFO("No endpoint for BFD peer %s", peer_address.to_string().c_str()); + return; + } + + BfdSessionInfo& bfd_info = it_peer->second; + bfd_info.bfd_state = state; + + string vnet = bfd_info.vnet; + NextHopKey endpoint = bfd_info.endpoint; + auto *vrf_obj = vnet_orch_->getTypePtr(vnet); + + if (syncd_nexthop_groups_.find(vnet) == syncd_nexthop_groups_.end()) + { + SWSS_LOG_ERROR("Vnet %s not found", vnet.c_str()); + return; + } + + nexthop_info_[vnet][endpoint.ip_address].bfd_state = state; + + for (auto& nhg_info_pair : syncd_nexthop_groups_[vnet]) + { + NextHopGroupKey nexthops = nhg_info_pair.first; + NextHopGroupInfo& nhg_info = nhg_info_pair.second; + + if (!(nexthops.contains(endpoint))) + { + continue; + } + + if (state == SAI_BFD_SESSION_STATE_UP) + { + sai_object_id_t next_hop_group_member_id = SAI_NULL_OBJECT_ID; + if (nexthops.getSize() > 1) + { + // Create a next hop group member + vector nhgm_attrs; + + sai_attribute_t nhgm_attr; + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + nhgm_attr.value.oid = nhg_info.next_hop_group_id; + nhgm_attrs.push_back(nhgm_attr); + + nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + nhgm_attr.value.oid = vrf_obj->getTunnelNextHop(endpoint); + nhgm_attrs.push_back(nhgm_attr); + + sai_status_t status = sai_next_hop_group_api->create_next_hop_group_member(&next_hop_group_member_id, + gSwitchId, + (uint32_t)nhgm_attrs.size(), + nhgm_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to add next hop member to group %" PRIx64 ": %d\n", + nhg_info.next_hop_group_id, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + if (handle_status != task_success) + { + continue; + } + } + + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + } + + // Re-create routes when it was temporarily removed + if (nhg_info.active_members.empty()) + { + nhg_info.active_members[endpoint] = next_hop_group_member_id; + if (vnet_orch_->isVnetExecVrf()) + { + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + string op = SET_COMMAND; + updateTunnelRoute(vnet, ip_pfx, nexthops, op); + } + } + } + else + { + nhg_info.active_members[endpoint] = next_hop_group_member_id; + } + } + else + { + if (nexthops.getSize() > 1 && nhg_info.active_members.find(endpoint) != nhg_info.active_members.end()) + { + sai_object_id_t nexthop_id = nhg_info.active_members[endpoint]; + sai_status_t status = sai_next_hop_group_api->remove_next_hop_group_member(nexthop_id); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove next hop member %" PRIx64 " from group %" PRIx64 ": %d\n", + nexthop_id, nhg_info.next_hop_group_id, status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_NEXT_HOP_GROUP, status); + if (handle_status != task_success) + { + continue; + } + } + + vrf_obj->removeTunnelNextHop(endpoint); + + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + } + + if (nhg_info.active_members.find(endpoint) != nhg_info.active_members.end()) + { + nhg_info.active_members.erase(endpoint); + + // Remove routes when nexthop group has no active endpoint + if (nhg_info.active_members.empty()) + { + if (vnet_orch_->isVnetExecVrf()) + { + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + string op = DEL_COMMAND; + updateTunnelRoute(vnet, ip_pfx, nexthops, op); + } + } + } + } + } + + // Post configured in State DB + for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) + { + postRouteState(vnet, ip_pfx, nexthops); + } + } +} + bool VNetRouteOrch::handleTunnel(const Request& request) { SWSS_LOG_ENTER(); @@ -1322,6 +1764,7 @@ bool VNetRouteOrch::handleTunnel(const Request& request) vector ip_list; vector mac_list; vector vni_list; + vector monitor_list; for (const auto& name: request.getAttrFieldNames()) { @@ -1339,6 +1782,10 @@ bool VNetRouteOrch::handleTunnel(const Request& request) string mac_str = request.getAttrString(name); mac_list = tokenize(mac_str, ','); } + else if (name == "endpoint_monitor") + { + monitor_list = request.getAttrIPList(name); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -1358,6 +1805,12 @@ bool VNetRouteOrch::handleTunnel(const Request& request) return false; } + if (!monitor_list.empty() && monitor_list.size() != ip_list.size()) + { + SWSS_LOG_ERROR("Peer monitor size of %zu does not match endpoint size of %zu", monitor_list.size(), ip_list.size()); + return false; + } + const std::string& vnet_name = request.getKeyString(0); auto ip_pfx = request.getKeyIpPrefix(1); auto op = request.getOperation(); @@ -1366,6 +1819,7 @@ bool VNetRouteOrch::handleTunnel(const Request& request) op.c_str(), ip_pfx.to_string().c_str()); NextHopGroupKey nhg("", true); + map monitors; for (size_t idx_ip = 0; idx_ip < ip_list.size(); idx_ip++) { IpAddress ip = ip_list[idx_ip]; @@ -1387,11 +1841,15 @@ bool VNetRouteOrch::handleTunnel(const Request& request) NextHopKey nh(ip, mac, vni, true); nhg.add(nh); + if (!monitor_list.empty()) + { + monitors[nh] = monitor_list[idx_ip]; + } } if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, nhg, op); + return doRouteTask(vnet_name, ip_pfx, nhg, op, monitors); } return true; diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 569a23f2e086..7e493c5f30e5 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -13,6 +13,7 @@ #include "producerstatetable.h" #include "observer.h" #include "nexthopgroupkey.h" +#include "bfdorch.h" #define VNET_BITMAP_SIZE 32 #define VNET_TUNNEL_SIZE 40960 @@ -72,6 +73,7 @@ struct NextHopGroupInfo sai_object_id_t next_hop_group_id; // next hop group id (null for single nexthop) int ref_count; // reference count std::map active_members; // active nexthops and nexthop group member id (null for single nexthop) + std::set tunnel_routes; }; class VNetObject @@ -252,7 +254,7 @@ const request_description_t vnet_route_description = { { "nexthop", REQ_T_STRING }, { "vni", REQ_T_STRING }, { "mac_address", REQ_T_STRING }, - { "endpoint_monitor", REQ_T_STRING }, + { "endpoint_monitor", REQ_T_IP_LIST }, }, { } }; @@ -283,10 +285,26 @@ struct VNetNextHopObserverEntry /* NextHopObserverTable: Destination IP address, next hop observer entry */ typedef std::map VNetNextHopObserverTable; +struct VNetNextHopInfo +{ + IpAddress monitor_addr; + sai_bfd_session_state_t bfd_state; + int ref_count; +}; + +struct BfdSessionInfo +{ + sai_bfd_session_state_t bfd_state; + std::string vnet; + NextHopKey endpoint; +}; + typedef std::map VNetNextHopGroupInfoTable; typedef std::map VNetTunnelRouteTable; +typedef std::map BfdSessionTable; +typedef std::map VNetEndpointInfoTable; -class VNetRouteOrch : public Orch2, public Subject +class VNetRouteOrch : public Orch2, public Subject, public Observer { public: VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOrch *); @@ -297,6 +315,8 @@ class VNetRouteOrch : public Orch2, public Subject void attach(Observer* observer, const IpAddress& dstAddr); void detach(Observer* observer, const IpAddress& dstAddr); + void update(SubjectType, void *); + private: virtual bool addOperation(const Request& request); virtual bool delOperation(const Request& request); @@ -312,8 +332,19 @@ class VNetRouteOrch : public Orch2, public Subject bool addNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); bool removeNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); + void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); + void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); + void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops); + void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); + void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops); + void removeRouteState(const string& vnet, IpPrefix& ipPrefix); + + void updateVnetTunnel(const BfdUpdate&); + bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); + template - bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); + bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, + const std::map& monitors=std::map()); template bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, nextHop& nh, string& op); @@ -326,6 +357,11 @@ class VNetRouteOrch : public Orch2, public Subject VNetNextHopObserverTable next_hop_observers_; std::map syncd_nexthop_groups_; std::map syncd_tunnel_routes_; + BfdSessionTable bfd_sessions_; + std::map nexthop_info_; + ProducerStateTable bfd_session_producer_; + shared_ptr state_db_; + unique_ptr
state_vnet_rt_tunnel_table_; }; class VNetCfgRouteOrch : public Orch diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 595c80a28b03..a41f9ee39f79 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -420,6 +420,69 @@ def check_linux_intf_arp_proxy(dvs, ifname): assert out != "1", "ARP proxy is not enabled for VNET interface in Linux kernel" +def update_bfd_session_state(dvs, addr, state): + bfd_id = get_bfd_session_id(dvs, addr) + assert bfd_id is not None + + bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", + "Down": "SAI_BFD_SESSION_STATE_DOWN", + "Init": "SAI_BFD_SESSION_STATE_INIT", + "Up": "SAI_BFD_SESSION_STATE_UP"} + + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + ntf = swsscommon.NotificationProducer(asic_db, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" + ntf.send("bfd_session_state_change", ntf_data, fvp) + + +def get_bfd_session_id(dvs, addr): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION") + entries = set(tbl.getKeys()) + for entry in entries: + status, fvs = tbl.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr: + return entry + + return None + + +def check_del_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is None + + +def check_bfd_session(dvs, addrs): + for addr in addrs: + assert get_bfd_session_id(dvs, addr) is not None + + +def check_state_db_routes(dvs, vnet, prefix, endpoints): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + + status, fvs = tbl.get(vnet + '|' + prefix) + assert status, "Got an error when get a key" + + fvs = dict(fvs) + assert fvs['active_endpoints'] == ','.join(endpoints) + + if endpoints: + assert fvs['state'] == 'active' + else: + assert fvs['state'] == 'inactive' + + +def check_remove_state_db_routes(dvs, vnet, prefix): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(state_db, "VNET_ROUTE_TUNNEL_TABLE") + keys = tbl.getKeys() + + assert vnet + '|' + prefix not in keys + loopback_id = 0 def_vr_id = 0 switch_mac = None @@ -438,6 +501,7 @@ class VnetVxlanVrfTunnel(object): ASIC_VLAN_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VLAN" ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" + ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" tunnel_map_ids = set() tunnel_map_entry_ids = set() @@ -460,6 +524,7 @@ def fetch_exist_entries(self, dvs): self.routes = get_exist_entries(dvs, self.ASIC_ROUTE_ENTRY) self.nhops = get_exist_entries(dvs, self.ASIC_NEXT_HOP) self.nhgs = get_exist_entries(dvs, self.ASIC_NEXT_HOP_GROUP) + self.bfd_sessions = get_exist_entries(dvs, self.ASIC_BFD_SESSION) global loopback_id, def_vr_id, switch_mac if not loopback_id: @@ -863,6 +928,7 @@ def test_vnet_orch_1(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000', '10.10.10.1') vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.1', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32", ['10.10.10.1']) create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet_2000', 'Vlan100') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2000') @@ -883,6 +949,7 @@ def test_vnet_orch_1(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001', '10.10.10.2', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet_2001', '10.10.10.2', tunnel_name, "00:12:34:56:78:9A") + check_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32", ['10.10.10.2']) create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet_2001', 'Ethernet4') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2001') @@ -900,9 +967,11 @@ def test_vnet_orch_1(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet_2001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2001') + check_remove_state_db_routes(dvs, 'Vnet_2001', "100.100.2.1/32") delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet_2000') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') + check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.1/32") delete_phy_interface(dvs, "Ethernet4", "100.102.1.1/24") vnet_obj.check_del_router_interface(dvs, "Ethernet4") @@ -943,18 +1012,22 @@ def test_vnet_orch_2(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32", ['100.1.1.10']) vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1', '100.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '100.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32", ['100.1.1.10']) vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1', '200.200.1.200') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.200', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32", ['200.200.1.200']) vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1', '200.200.1.201') vnet_obj.check_vnet_routes(dvs, 'Vnet_1', '200.200.1.201', tunnel_name) + check_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32", ['200.200.1.201']) create_vnet_local_routes(dvs, "1.1.10.0/24", 'Vnet_1', 'Vlan1001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_1') @@ -970,10 +1043,12 @@ def test_vnet_orch_2(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32", ['100.1.1.20']) vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2', '100.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_2', '100.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32", ['100.1.1.20']) create_vnet_local_routes(dvs, "2.2.10.0/24", 'Vnet_2', 'Vlan1002') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_2') @@ -988,21 +1063,27 @@ def test_vnet_orch_2(self, dvs, testlog): delete_vnet_routes(dvs, "2.2.2.11/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') + check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.11/32") delete_vnet_routes(dvs, "2.2.2.10/32", 'Vnet_2') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2') + check_remove_state_db_routes(dvs, 'Vnet_2', "2.2.2.10/32") delete_vnet_routes(dvs, "1.1.1.14/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.14/32") delete_vnet_routes(dvs, "1.1.1.12/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.12/32") delete_vnet_routes(dvs, "1.1.1.11/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.11/32") delete_vnet_routes(dvs, "1.1.1.10/32", 'Vnet_1') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_1') + check_remove_state_db_routes(dvs, 'Vnet_1', "1.1.1.10/32") delete_vlan_interface(dvs, "Vlan1002", "2.2.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan1002") @@ -1049,10 +1130,12 @@ def test_vnet_orch_3(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10', '50.1.1.10') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '50.1.1.10', tunnel_name) + check_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32", ['50.1.1.10']) vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20', '80.1.1.20') vnet_obj.check_vnet_routes(dvs, 'Vnet_10', '80.1.1.20', tunnel_name) + check_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32", ['80.1.1.20']) create_vnet_local_routes(dvs, "5.5.10.0/24", 'Vnet_10', 'Vlan2001') vnet_obj.check_vnet_local_routes(dvs, 'Vnet_10') @@ -1070,9 +1153,11 @@ def test_vnet_orch_3(self, dvs, testlog): delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet_10') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_10') + check_remove_state_db_routes(dvs, 'Vnet_10', "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet_20') vnet_obj.check_del_vnet_routes(dvs, 'Vnet_20') + check_remove_state_db_routes(dvs, 'Vnet_20', "8.8.8.10/32") delete_vlan_interface(dvs, "Vlan2001", "5.5.10.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan2001") @@ -1112,9 +1197,11 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) + check_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) create_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000') vnet_obj.check_vnet_routes(dvs, 'Vnet3001', '2000:1000:2000:3000:4000:5000:6000:7000', tunnel_name) + check_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32", ['2000:1000:2000:3000:4000:5000:6000:7000']) create_vnet_local_routes(dvs, "100.100.3.0/24", 'Vnet3001', 'Vlan300') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3001') @@ -1134,6 +1221,7 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet3002', 'fd:2::34', "00:12:34:56:78:9A") vnet_obj.check_vnet_routes(dvs, 'Vnet3002', 'fd:2::34', tunnel_name, "00:12:34:56:78:9A") + check_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/32", ['fd:2::34']) create_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002', 'Ethernet60') vnet_obj.check_vnet_local_routes(dvs, 'Vnet3002') @@ -1151,17 +1239,21 @@ def test_vnet_orch_4(self, dvs, testlog): create_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003', 'fd:2::35') vnet_obj.check_vnet_routes(dvs, 'Vnet3004', 'fd:2::35', tunnel_name) + check_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32", ['fd:2::35']) create_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004', 'fd:2::36') vnet_obj.check_vnet_routes(dvs, 'Vnet3003', 'fd:2::36', tunnel_name) + check_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32", ['fd:2::36']) # Clean-up and verify remove flows delete_vnet_routes(dvs, "5.5.5.10/32", 'Vnet3003') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3003') + check_remove_state_db_routes(dvs, 'Vnet3003', "5.5.5.10/32") delete_vnet_routes(dvs, "8.8.8.10/32", 'Vnet3004') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3004') + check_remove_state_db_routes(dvs, 'Vnet3004', "8.8.8.10/32") delete_vnet_entry(dvs, 'Vnet3003') vnet_obj.check_del_vnet_entry(dvs, 'Vnet3003') @@ -1171,6 +1263,7 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.2.1/24", 'Vnet3002') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3002') + check_remove_state_db_routes(dvs, 'Vnet3002', "100.100.2.1/24") delete_vnet_local_routes(dvs, "100.102.1.0/24", 'Vnet3002') vnet_obj.check_del_vnet_local_routes(dvs, 'Vnet3002') @@ -1189,9 +1282,11 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') + check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.1/32") delete_vnet_routes(dvs, "100.100.1.2/32", 'Vnet3001') vnet_obj.check_del_vnet_routes(dvs, 'Vnet3001') + check_remove_state_db_routes(dvs, 'Vnet3001', "100.100.1.2/32") delete_vlan_interface(dvs, "Vlan300", "100.100.3.1/24") vnet_obj.check_del_router_interface(dvs, "Vlan300") @@ -1259,10 +1354,12 @@ def test_vnet_orch_7(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3') route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3']) # Set the tunnel route to another nexthop group set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1271,12 +1368,14 @@ def test_vnet_orch_7(self, dvs, testlog): # Create another tunnel route to the same set of endpoints create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7', '7.0.0.1,7.0.0.2,7.0.0.3,7.0.0.4') route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet7', ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4'], tunnel_name) + check_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32", ['7.0.0.1', '7.0.0.2', '7.0.0.3', '7.0.0.4']) assert nhg2_1 == nhg1_2 # Remove one of the tunnel routes delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet7') vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet7', "100.100.1.1/32") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) @@ -1285,6 +1384,7 @@ def test_vnet_orch_7(self, dvs, testlog): # Remove the other tunnel route delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet7') vnet_obj.check_del_vnet_routes(dvs, 'Vnet7', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet7', "100.100.2.1/32") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1315,10 +1415,12 @@ def test_vnet_orch_8(self, dvs, testlog): vnet_obj.fetch_exist_entries(dvs) create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) # Set the tunnel route to another nexthop group set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1327,18 +1429,21 @@ def test_vnet_orch_8(self, dvs, testlog): # Create another tunnel route to the same set of endpoints create_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) + check_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) assert nhg2_1 == nhg1_2 # Create another tunnel route with ipv4 prefix to the same set of endpoints create_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') route3, nhg3_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet8', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name) + check_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) assert nhg3_1 == nhg1_2 # Remove one of the tunnel routes delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet8') vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:10::32/128") # Check the nexthop group still exists vnet_obj.fetch_exist_entries(dvs) @@ -1347,10 +1452,12 @@ def test_vnet_orch_8(self, dvs, testlog): # Remove tunnel route 2 delete_vnet_routes(dvs, "fd:8:20::32/128", 'Vnet8') vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["fd:8:20::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet8', "fd:8:20::32/128") # Remove tunnel route 3 delete_vnet_routes(dvs, "8.0.0.0/24", 'Vnet8') vnet_obj.check_del_vnet_routes(dvs, 'Vnet8', ["8.0.0.0/24"]) + check_remove_state_db_routes(dvs, 'Vnet8', "8.0.0.0/24") # Check the nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -1358,6 +1465,337 @@ def test_vnet_orch_8(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet8') vnet_obj.check_del_vnet_entry(dvs, 'Vnet8') + + + ''' + Test 9 - Test for vnet tunnel routes with ECMP nexthop group with endpoint health monitor + ''' + def test_vnet_orch_9(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_9' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet9', tunnel_name, '10009', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet9') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet9', '10009') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.3']) + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.5', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1']) + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '9.1.0.5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.1', '9.0.0.5']) + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1']) + + # Set the route1 to a new group + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9', '9.0.0.1,9.0.0.2,9.0.0.3,9.0.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4') + update_bfd_session_state(dvs, '9.1.0.4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.4']) + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.1', '9.0.0.2', '9.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.4']) + + # Set all endpoint to down state + update_bfd_session_state(dvs, '9.1.0.1', 'Down') + update_bfd_session_state(dvs, '9.1.0.2', 'Down') + update_bfd_session_state(dvs, '9.1.0.3', 'Down') + update_bfd_session_state(dvs, '9.1.0.4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet9', ['9.0.0.5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32", ['9.0.0.5']) + check_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32", []) + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet9') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet9', "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.5']) + check_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet9') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet9', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet9', "100.100.1.1/32") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3', '9.1.0.4', '9.1.0.5']) + + delete_vnet_entry(dvs, 'Vnet9') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet9') + + + ''' + Test 10 - Test for ipv6 vnet tunnel routes with ECMP nexthop group with endpoint health monitor + ''' + def test_vnet_orch_10(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_10' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, 'Vnet10', tunnel_name, '10010', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet10') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet10', '10010') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, 'fd:10:2::1', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3']) + + # Remove endpoint from group if it goes down + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + time.sleep(2) + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::3']) + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::5', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::5') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1']) + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, 'fd:10:2::5', 'Up') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::1', 'fd:10:1::5']) + + # Update BFD state and check route nexthop + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Up') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2']) + + # Set the route to a new group + set_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10', 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4') + update_bfd_session_state(dvs, 'fd:10:2::4', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::4']) + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + + # Set BFD session state for a down endpoint to up + update_bfd_session_state(dvs, 'fd:10:2::3', 'Up') + time.sleep(2) + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4'], tunnel_name, route_ids=route1, nhg=nhg1_2) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", ['fd:10:1::1', 'fd:10:1::2', 'fd:10:1::3', 'fd:10:1::4']) + + # Set all endpoint to down state + update_bfd_session_state(dvs, 'fd:10:2::1', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::2', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::3', 'Down') + update_bfd_session_state(dvs, 'fd:10:2::4', 'Down') + time.sleep(2) + + # Confirm the tunnel route is updated in ASIC + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet10', ['fd:10:1::5'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128", ['fd:10:1::5']) + check_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128", []) + + # Remove tunnel route2 + delete_vnet_routes(dvs, "fd:10:20::1/128", 'Vnet10') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:20::1/128"]) + check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:20::1/128") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['fd:10:2::5']) + check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['fd:10:2::5']) + check_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "fd:10:10::1/128", 'Vnet10') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet10', ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, 'Vnet10', "fd:10:10::1/128") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['fd:10:2::1', 'fd:10:2::2', 'fd:10:2::3', 'fd:10:2::4', 'fd:10:2::5']) + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + + delete_vnet_entry(dvs, 'Vnet10') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet10') + + + ''' + Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor + ''' + def test_vnet_orch_11(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_11' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') + create_vnet_entry(dvs, 'Vnet11', tunnel_name, '100011', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet11') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet11', '100011') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '11.11.11.11') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.1', ep_monitor='11.1.0.1') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + + # Route should be properly configured when bfd session state goes up + update_bfd_session_state(dvs, '11.1.0.1', 'Up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.1', tunnel_name) + check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", ['11.0.0.1']) + + # Create another tunnel route with endpoint group overlapped with route1 + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11', '11.0.0.1,11.0.0.2', ep_monitor='11.1.0.1,11.1.0.2') + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1'], tunnel_name) + check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1']) + + # Create a third tunnel route with another endpoint + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') + + # Update BFD session state and verify route change + update_bfd_session_state(dvs, '11.1.0.2', 'Up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.1', '11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) + check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.1', '11.0.0.2']) + + update_bfd_session_state(dvs, '11.1.0.1', 'Down') + time.sleep(2) + route2, nhg2_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet11', ['11.0.0.2'], tunnel_name, route_ids=route2, nhg=nhg2_1) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32", ['11.0.0.2']) + check_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32", []) + + # Set the route1 to a new endpoint + vnet_obj.fetch_exist_entries(dvs) + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11', '11.0.0.2', ep_monitor='11.1.0.2') + vnet_obj.check_vnet_routes(dvs, 'Vnet11', '11.0.0.2', tunnel_name) + check_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32", ['11.0.0.2']) + + # Remove tunnel route2 + delete_vnet_routes(dvs, "100.100.2.1/32", 'Vnet11') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.2.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet11', "100.100.2.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg2_1 not in vnet_obj.nhgs + + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['11.1.0.1']) + check_bfd_session(dvs, ['11.1.0.2']) + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet11') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet11', "100.100.1.1/32") + + # Remove tunnel route 3 + delete_vnet_routes(dvs, "100.100.3.1/32", 'Vnet11') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet11', ["100.100.3.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet11', "100.100.3.1/32") + + # Confirm the BFD sessions are removed + check_del_bfd_session(dvs, ['11.1.0.1', '11.1.0.2']) + + delete_vnet_entry(dvs, 'Vnet11') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet11') + # Add Dummy always-pass test at end as workaroud