From f5c5cc50df4cab45101b4aa3236a4562eb469eed Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Fri, 20 May 2022 17:04:14 -0700 Subject: [PATCH 01/64] [vstest]: Increase PollingConfig default timeout (#2285) *Increase the default timeout for PollingConfig to 20 seconds Signed-off-by: Lawrence Lee --- tests/dvslib/dvs_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/dvslib/dvs_common.py b/tests/dvslib/dvs_common.py index b2a09d5da7..0d81b4cf2e 100644 --- a/tests/dvslib/dvs_common.py +++ b/tests/dvslib/dvs_common.py @@ -17,7 +17,7 @@ class PollingConfig: """ polling_interval: float = 0.01 - timeout: float = 5.00 + timeout: float = 20.00 strict: bool = True def iterations(self) -> int: From 40316f7189486f2541ca3e163f746946a62f55a1 Mon Sep 17 00:00:00 2001 From: mohan-selvaraj <54177569+mohan-selvaraj@users.noreply.github.com> Date: Tue, 24 May 2022 22:44:40 +0530 Subject: [PATCH 02/64] Broadcast Unknown-multicast and Unknown-unicast Storm-control (#1306) * Handle BUM Storm-control CONFIG_DB update. * Segregate POLICER table and PORT_STORM_CONTROL table handling * Broadcast, Unknown-multicast and Unknown-unicast storm-control on Ethernet interfaces. --- orchagent/orchdaemon.cpp | 13 +- orchagent/policerorch.cpp | 281 ++++++++++++++++++++++++++- orchagent/policerorch.h | 6 +- tests/mock_tests/aclorch_ut.cpp | 7 +- tests/mock_tests/copporch_ut.cpp | 6 +- tests/test_storm_control.py | 316 +++++++++++++++++++++++++++++++ 6 files changed, 621 insertions(+), 8 deletions(-) create mode 100644 tests/test_storm_control.py diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 147c87459c..d4b40844a8 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -45,6 +45,7 @@ QosOrch *gQosOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; NatOrch *gNatOrch; +PolicerOrch *gPolicerOrch; MlagOrch *gMlagOrch; IsoGrpOrch *gIsoGrpOrch; MACsecOrch *gMacsecOrch; @@ -242,11 +243,17 @@ bool OrchDaemon::init() }; gBufferOrch = new BufferOrch(m_applDb, m_configDb, m_stateDb, buffer_tables); - PolicerOrch *policer_orch = new PolicerOrch(m_configDb, "POLICER"); + vector policer_tables = { + TableConnector(m_configDb, CFG_POLICER_TABLE_NAME), + TableConnector(m_configDb, CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + + TableConnector stateDbStorm(m_stateDb, "BUM_STORM_CAPABILITY"); + gPolicerOrch = new PolicerOrch(policer_tables, gPortsOrch); TableConnector stateDbMirrorSession(m_stateDb, STATE_MIRROR_SESSION_TABLE_NAME); TableConnector confDbMirrorSession(m_configDb, CFG_MIRROR_SESSION_TABLE_NAME); - gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, policer_orch); + gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch); TableConnector confDbAclTable(m_configDb, CFG_ACL_TABLE_TABLE_NAME); TableConnector confDbAclTableType(m_configDb, CFG_ACL_TABLE_TYPE_TABLE_NAME); @@ -339,7 +346,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) diff --git a/orchagent/policerorch.cpp b/orchagent/policerorch.cpp index c4528f6330..68dfffe898 100644 --- a/orchagent/policerorch.cpp +++ b/orchagent/policerorch.cpp @@ -8,10 +8,13 @@ using namespace std; using namespace swss; extern sai_policer_api_t* sai_policer_api; +extern sai_port_api_t *sai_port_api; extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; +#define ETHERNET_PREFIX "Ethernet" + static const string meter_type_field = "METER_TYPE"; static const string mode_field = "MODE"; static const string color_source_field = "COLOR_SOURCE"; @@ -23,6 +26,11 @@ static const string green_packet_action_field = "GREEN_PACKET_ACTION"; static const string red_packet_action_field = "RED_PACKET_ACTION"; static const string yellow_packet_action_field = "YELLOW_PACKET_ACTION"; +static const string storm_control_kbps = "KBPS"; +static const string storm_broadcast = "broadcast"; +static const string storm_unknown_unicast = "unknown-unicast"; +static const string storm_unknown_mcast = "unknown-multicast"; + static const map meter_type_map = { {"PACKETS", SAI_METER_TYPE_PACKETS}, {"BYTES", SAI_METER_TYPE_BYTES} @@ -105,15 +113,268 @@ bool PolicerOrch::decreaseRefCount(const string &name) return true; } -PolicerOrch::PolicerOrch(DBConnector* db, string tableName) : - Orch(db, tableName) +PolicerOrch::PolicerOrch(vector &tableNames, PortsOrch *portOrch) : Orch(tableNames), m_portsOrch(portOrch) { SWSS_LOG_ENTER(); } +task_process_status PolicerOrch::handlePortStormControlTable(swss::KeyOpFieldsValuesTuple tuple) +{ + auto key = kfvKey(tuple); + auto op = kfvOp(tuple); + string storm_key = key; + auto tokens = tokenize(storm_key, config_db_key_delimiter); + auto interface_name = tokens[0]; + auto storm_type = tokens[1]; + Port port; + + /*Only proceed for Ethernet interfaces*/ + if (strncmp(interface_name.c_str(), ETHERNET_PREFIX, strlen(ETHERNET_PREFIX))) + { + SWSS_LOG_ERROR("%s: Unsupported / Invalid interface %s", + storm_type.c_str(), interface_name.c_str()); + return task_process_status::task_success; + } + if (!gPortsOrch->getPort(interface_name, port)) + { + SWSS_LOG_ERROR("Failed to apply storm-control %s to port %s. Port not found", + storm_type.c_str(), interface_name.c_str()); + /*continue here as there can be more interfaces*/ + return task_process_status::task_success; + } + /*Policer Name: __*/ + const auto storm_policer_name = "_"+interface_name+"_"+storm_type; + + if (op == SET_COMMAND) + { + // Mark the operation as an 'update', if the policer exists. + bool update = m_syncdPolicers.find(storm_policer_name) != m_syncdPolicers.end(); + vector attrs; + bool cir = false; + sai_attribute_t attr; + + /*Meter type hardcoded to BYTES*/ + attr.id = SAI_POLICER_ATTR_METER_TYPE; + attr.value.s32 = (sai_meter_type_t) meter_type_map.at("BYTES"); + attrs.push_back(attr); + + /*Policer mode hardcoded to STORM_CONTROL*/ + attr.id = SAI_POLICER_ATTR_MODE; + attr.value.s32 = (sai_policer_mode_t) policer_mode_map.at("STORM_CONTROL"); + attrs.push_back(attr); + + /*Red Packet Action hardcoded to DROP*/ + attr.id = SAI_POLICER_ATTR_RED_PACKET_ACTION; + attr.value.s32 = packet_action_map.at("DROP"); + attrs.push_back(attr); + + for (auto i = kfvFieldsValues(tuple).begin(); + i != kfvFieldsValues(tuple).end(); ++i) + { + auto field = to_upper(fvField(*i)); + auto value = to_upper(fvValue(*i)); + + /*BPS value is used as CIR*/ + if (field == storm_control_kbps) + { + attr.id = SAI_POLICER_ATTR_CIR; + /*convert kbps to bps*/ + attr.value.u64 = (stoul(value)*1000/8); + cir = true; + attrs.push_back(attr); + SWSS_LOG_DEBUG("CIR %s",value.c_str()); + } + else + { + SWSS_LOG_ERROR("Unknown storm control attribute %s specified", + field.c_str()); + continue; + } + } + /*CIR is mandatory parameter*/ + if (!cir) + { + SWSS_LOG_ERROR("Failed to create storm control policer %s,\ + missing mandatory fields", storm_policer_name.c_str()); + return task_process_status::task_failed; + } + + /*Enabling storm-control on port*/ + sai_attribute_t port_attr; + if (storm_type == storm_broadcast) + { + port_attr.id = SAI_PORT_ATTR_BROADCAST_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_unicast) + { + port_attr.id = SAI_PORT_ATTR_FLOOD_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_mcast) + { + port_attr.id = SAI_PORT_ATTR_MULTICAST_STORM_CONTROL_POLICER_ID; + } + else + { + SWSS_LOG_ERROR("Unknown storm_type %s", storm_type.c_str()); + return task_process_status::task_failed; + } + + sai_object_id_t policer_id; + // Create a new policer + if (!update) + { + sai_status_t status = sai_policer_api->create_policer( + &policer_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create policer %s, rv:%d", + storm_policer_name.c_str(), status); + if (handleSaiCreateStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + + SWSS_LOG_DEBUG("Created storm-control policer %s", storm_policer_name.c_str()); + m_syncdPolicers[storm_policer_name] = policer_id; + m_policerRefCounts[storm_policer_name] = 0; + } + // Update an existing policer + else + { + policer_id = m_syncdPolicers[storm_policer_name]; + + // The update operation has limitations that it could only update + // the rate and the size accordingly. + // STORM_CONTROL: CIR, CBS + for (auto & attr: attrs) + { + if (attr.id != SAI_POLICER_ATTR_CIR) + { + continue; + } + + sai_status_t status = sai_policer_api->set_policer_attribute( + policer_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update policer %s attribute, rv:%d", + storm_policer_name.c_str(), status); + if (handleSaiSetStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + + } + } + } + policer_id = m_syncdPolicers[storm_policer_name]; + + if (update) + { + SWSS_LOG_NOTICE("update storm-control policer %s", storm_policer_name.c_str()); + port_attr.value.oid = SAI_NULL_OBJECT_ID; + /*Remove and re-apply policer*/ + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &port_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove storm-control %s from port %s, rv:%d", + storm_type.c_str(), interface_name.c_str(), status); + if (handleSaiSetStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + } + port_attr.value.oid = policer_id; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &port_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to apply storm-control %s to port %s, rv:%d", + storm_type.c_str(), interface_name.c_str(),status); + + /*TODO: Do the below policer cleanup in an API*/ + /*Remove the already created policer*/ + if (SAI_STATUS_SUCCESS != sai_policer_api->remove_policer( + m_syncdPolicers[storm_policer_name])) + { + SWSS_LOG_ERROR("Failed to remove policer %s, rv:%d", + storm_policer_name.c_str(), status); + /*TODO: Just doing a syslog. */ + } + + SWSS_LOG_NOTICE("Removed policer %s as set_port_attribute for %s failed", + storm_policer_name.c_str(),interface_name.c_str()); + m_syncdPolicers.erase(storm_policer_name); + m_policerRefCounts.erase(storm_policer_name); + + return task_process_status::task_need_retry; + } + } + else if (op == DEL_COMMAND) + { + if (m_syncdPolicers.find(storm_policer_name) == m_syncdPolicers.end()) + { + SWSS_LOG_ERROR("Policer %s not configured", storm_policer_name.c_str()); + return task_process_status::task_success; + } + + sai_attribute_t port_attr; + if (storm_type == storm_broadcast) + { + port_attr.id = SAI_PORT_ATTR_BROADCAST_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_unicast) + { + port_attr.id = SAI_PORT_ATTR_FLOOD_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_mcast) + { + port_attr.id = SAI_PORT_ATTR_MULTICAST_STORM_CONTROL_POLICER_ID; + } + else + { + SWSS_LOG_ERROR("Unknown storm_type %s", storm_type.c_str()); + return task_process_status::task_failed; + } + + port_attr.value.oid = SAI_NULL_OBJECT_ID; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &port_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove storm-control %s from port %s, rv:%d", + storm_type.c_str(), interface_name.c_str(), status); + if (handleSaiRemoveStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + + status = sai_policer_api->remove_policer( + m_syncdPolicers[storm_policer_name]); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove policer %s, rv:%d", + storm_policer_name.c_str(), status); + if (handleSaiRemoveStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + + SWSS_LOG_NOTICE("Removed policer %s", storm_policer_name.c_str()); + m_syncdPolicers.erase(storm_policer_name); + m_policerRefCounts.erase(storm_policer_name); + } + return task_process_status::task_success; +} + void PolicerOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); + task_process_status storm_status = task_success; if (!gPortsOrch->allPortsReady()) { @@ -127,7 +388,23 @@ void PolicerOrch::doTask(Consumer &consumer) auto key = kfvKey(tuple); auto op = kfvOp(tuple); + auto table_name = consumer.getTableName(); + // Special handling for storm-control configuration. + if (table_name == CFG_PORT_STORM_CONTROL_TABLE_NAME) + { + storm_status = handlePortStormControlTable(tuple); + if ((storm_status == task_process_status::task_success) || + (storm_status == task_process_status::task_failed)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + continue; + } if (op == SET_COMMAND) { // Mark the operation as an 'update', if the policer exists. diff --git a/orchagent/policerorch.h b/orchagent/policerorch.h index d735da03b7..9814179958 100644 --- a/orchagent/policerorch.h +++ b/orchagent/policerorch.h @@ -14,16 +14,20 @@ typedef map PolicerRefCountTable; class PolicerOrch : public Orch { public: - PolicerOrch(DBConnector* db, string tableName); + PolicerOrch(vector &tableNames, PortsOrch *portOrch); bool policerExists(const string &name); bool getPolicerOid(const string &name, sai_object_id_t &oid); bool increaseRefCount(const string &name); bool decreaseRefCount(const string &name); + task_process_status handlePortStormControlTable(swss::KeyOpFieldsValuesTuple tuple); private: + PortsOrch *m_portsOrch; virtual void doTask(Consumer& consumer); PolicerTable m_syncdPolicers; PolicerRefCountTable m_policerRefCounts; }; + + diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 6f58b12a3b..0d81c93f69 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -426,7 +426,12 @@ namespace aclorch_test }; gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); - PolicerOrch *policer_orch = new PolicerOrch(m_config_db.get(), "POLICER"); + vector policer_tables = { + TableConnector(m_config_db.get(), CFG_POLICER_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + TableConnector stateDbStorm(m_state_db.get(), "BUM_STORM_CAPABILITY"); + PolicerOrch *policer_orch = new PolicerOrch(policer_tables, gPortsOrch); TableConnector stateDbMirrorSession(m_state_db.get(), STATE_MIRROR_SESSION_TABLE_NAME); TableConnector confDbMirrorSession(m_config_db.get(), CFG_MIRROR_SESSION_TABLE_NAME); diff --git a/tests/mock_tests/copporch_ut.cpp b/tests/mock_tests/copporch_ut.cpp index 36ba71bc67..fa7c360f01 100644 --- a/tests/mock_tests/copporch_ut.cpp +++ b/tests/mock_tests/copporch_ut.cpp @@ -203,7 +203,11 @@ namespace copporch_test // PolicerOrch // - auto policerOrch = new PolicerOrch(this->configDb.get(), CFG_POLICER_TABLE_NAME); + vector policer_tables = { + TableConnector(this->configDb.get(), CFG_POLICER_TABLE_NAME), + TableConnector(this->configDb.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + auto policerOrch = new PolicerOrch(policer_tables, gPortsOrch); gDirectory.set(policerOrch); resourcesList.push_back(policerOrch); diff --git a/tests/test_storm_control.py b/tests/test_storm_control.py new file mode 100644 index 0000000000..76deef9268 --- /dev/null +++ b/tests/test_storm_control.py @@ -0,0 +1,316 @@ +from swsscommon import swsscommon +import os +import sys +import time +import json +from distutils.version import StrictVersion +import pytest + +class TestStormControl(object): + def setup_db(self,dvs): + self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + dvs.runcmd(['sh', '-c', "echo 0 > /var/log/syslog"]) + + def create_port_channel(self, dvs, lag_name): + dvs.runcmd("config portchannel add " + lag_name) + time.sleep(1) + + def delete_port_channel(self, dvs, lag_name): + dvs.runcmd("config portchannel del " + lag_name) + time.sleep(1) + + def add_port_channel_member(self, dvs, lag_name, member): + dvs.runcmd("config portchannel member add "+ lag_name + " "+ member) + time.sleep(1) + + def remove_port_channel_member(self, dvs, lag_name, member): + dvs.runcmd("config portchannel member del "+ lag_name + " "+ member) + time.sleep(1) + + def create_vlan(self, dvs, vlan): + dvs.runcmd("config vlan add " + vlan) + time.sleep(1) + + def delete_vlan(self, dvs, vlan): + dvs.runcmd("config vlan del " + vlan) + time.sleep(1) + + def add_vlan_member(self, dvs, vlan, interface): + dvs.runcmd("config vlan member add " + vlan + " " + interface) + time.sleep(1) + + def remove_vlan_member(self, dvs, vlan, interface): + dvs.runcmd("config vlan member del " + vlan + " " + interface) + time.sleep(1) + + def add_storm_session(self, if_name, storm_type, kbps_value): + tbl = swsscommon.Table(self.cdb, "PORT_STORM_CONTROL") + fvs = swsscommon.FieldValuePairs([("kbps", str(kbps_value))]) + key = if_name + "|" + storm_type + tbl.set(key,fvs) + time.sleep(1) + + def delete_storm_session(self, if_name, storm_type): + tbl = swsscommon.Table(self.cdb, "PORT_STORM_CONTROL") + key = if_name + "|" + storm_type + tbl._del(key) + time.sleep(1) + + def test_bcast_storm(self,dvs,testlog): + self.setup_db(dvs) + + if_name = "Ethernet0" + storm_type = "broadcast" + #User input is Kbps + #Orchagent converts the value to CIR as below and programs the ASIC DB + #kbps_value * 1000 / 8 + kbps_value = 1000000 + self.add_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + self.del_storm_control(dvs,if_name,storm_type) + + def del_storm_control(self, dvs, if_name, storm_type): + self.setup_db(dvs) + port_oid = dvs.asicdb.portnamemap[if_name] + atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + status, fvs = atbl.get(dvs.asicdb.portnamemap[if_name]) + assert status == True + + storm_type_port_attr = self.get_port_attr_for_storm_type(storm_type) + + policer_oid = 0 + for fv in fvs: + if fv[0] == storm_type_port_attr: + policer_oid = fv[1] + + self.delete_storm_session(if_name, storm_type) + tbl = swsscommon.Table(self.cdb, "PORT_STORM_CONTROL") + (status,fvs) = tbl.get(if_name+"|"+storm_type) + assert status == False + + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_PORT") + status, fvs = atbl.get(dvs.asicdb.portnamemap[if_name]) + assert status == True + + for fv in fvs: + if fv[0] == storm_type_port_attr: + assert fv[1] == "oid:0x0" + + if policer_oid != 0: + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_POLICER") + status, fvs = atbl.get(policer_oid) + assert status == False + + def test_uucast_storm(self,dvs,testlog): + self.setup_db(dvs) + + if_name = "Ethernet0" + storm_type = "unknown-unicast" + #User input is Kbps + #Orchagent converts the value to CIR as below and programs the ASIC DB + #kbps_value * 1000 / 8 + kbps_value = 1000000 + + self.add_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + self.del_storm_control(dvs,if_name,storm_type) + + def test_umcast_storm(self,dvs,testlog): + self.setup_db(dvs) + + if_name = "Ethernet0" + storm_type = "unknown-multicast" + #User input is Kbps + #Orchagent converts the value to CIR as below and programs the ASIC DB + #kbps_value * 1000 / 8 + kbps_value = 1000000 + + self.add_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + self.del_storm_control(dvs,if_name,storm_type) + + def get_port_attr_for_storm_type(self,storm_type): + port_attr = "" + if storm_type == "broadcast": + port_attr = "SAI_PORT_ATTR_BROADCAST_STORM_CONTROL_POLICER_ID" + elif storm_type == "unknown-unicast": + port_attr = "SAI_PORT_ATTR_FLOOD_STORM_CONTROL_POLICER_ID" + elif storm_type == "unknown-multicast": + port_attr = "SAI_PORT_ATTR_MULTICAST_STORM_CONTROL_POLICER_ID" + + return port_attr + + def check_storm_control_on_interface(self,dvs,if_name,storm_type,kbps_value): + print ("interface {} storm_type {} kbps {}".format(if_name,storm_type, kbps_value)) + tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + (status,fvs) = tbl.get(if_name+"|"+storm_type) + + assert status == True + assert len(fvs) > 0 + + port_oid = dvs.asicdb.portnamemap[if_name] + + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_PORT") + status, fvs = atbl.get(dvs.asicdb.portnamemap[if_name]) + assert status == True + + policer_oid = 0 + + storm_type_port_attr = self.get_port_attr_for_storm_type(storm_type) + + for fv in fvs: + if fv[0] == storm_type_port_attr: + assert fv[1] != "oid:0x0" + policer_oid = fv[1] + + if policer_oid != 0: + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_POLICER") + status, fvs = atbl.get(policer_oid) + assert status == True + + bps = 0 + + for fv in fvs: + if fv[0] == "SAI_POLICER_ATTR_CIR": + bps = fv[1] + + #Retrieved value of bps from ASIC_DB is converted back to user input kbps + kbps = int(int(bps) / int(1000) * 8) + print ("Kbps value {}".format(kbps)) + + assert str(kbps) == str(kbps_value) + + + def add_storm_control_on_interface(self,dvs,if_name,storm_type,kbps_value): + print ("interface {} storm_type {} kbps {}".format(if_name,storm_type,kbps_value)) + self.add_storm_session(if_name, storm_type, kbps_value) + self.check_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + + def test_add_storm_all_interfaces(self,dvs,testlog): + self.setup_db(dvs) + + tbl = swsscommon.Table(self.cdb,"PORT") + for key in tbl.getKeys(): + self.add_storm_control_on_interface(dvs,key,"broadcast",1000000) + self.add_storm_control_on_interface(dvs,key,"unknown-unicast",2000000) + self.add_storm_control_on_interface(dvs,key,"unknown-multicast",3000000) + self.del_storm_control(dvs,key,"broadcast") + self.del_storm_control(dvs,key,"unknown-unicast") + self.del_storm_control(dvs,key,"unknown-multicast") + + def test_warm_restart_all_interfaces(self,dvs,testlog): + self.setup_db(dvs) + + tbl = swsscommon.Table(self.cdb,"PORT") + for key in tbl.getKeys(): + self.add_storm_control_on_interface(dvs,key,"broadcast",1000000) + self.add_storm_control_on_interface(dvs,key,"unknown-unicast",2000000) + self.add_storm_control_on_interface(dvs,key,"unknown-multicast",3000000) + #dvs.runcmd("config save -y") + # enable warm restart + dvs.warm_restart_swss("true") + + # freeze orchagent for warm restart + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + assert result == "RESTARTCHECK succeeded\n" + time.sleep(2) + + dvs.stop_swss() + time.sleep(10) + dvs.start_swss() + time.sleep(10) + + for key in tbl.getKeys(): + self.check_storm_control_on_interface(dvs,key,"broadcast",1000000) + self.check_storm_control_on_interface(dvs,key,"unknown-unicast",2000000) + self.check_storm_control_on_interface(dvs,key,"unknown-multicast",3000000) + self.del_storm_control(dvs,key,"broadcast") + self.del_storm_control(dvs,key,"unknown-unicast") + self.del_storm_control(dvs,key,"unknown-multicast") + # disable warm restart + dvs.warm_restart_swss("false") + + def test_add_storm_lag_interface(self,dvs,testlog): + self.setup_db(dvs) + lag_name = "PortChannel10" + member_interface = "Ethernet0" + kbps_value = 1000000 + storm_list = ["broadcast","unknown-unicast","unknown-multicast"] + kbps_value_list = [1000000,2000000,3000000] + + #Create LAG interface and add member + self.create_port_channel(dvs,lag_name) + self.add_port_channel_member(dvs,lag_name,member_interface) + + #click CLI verification + #for storm_type in storm_list: + # dvs.runcmd("config interface storm-control add "+lag_name+" "+storm_type+" "+str(kbps_value)) + # tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + # (status,fvs) = tbl.get(lag_name+"|"+storm_type) + # assert status == False + # assert len(fvs) == 0 + + #Orchagent verification + storm_list_db = ["broadcast","unknown-unicast","unknown-multicast"] + for storm_type,kbps_value in zip(storm_list_db,kbps_value_list): + #Cleanup syslog + dvs.runcmd(['sh', '-c', "echo 0 > /var/log/syslog"]) + time.sleep(1) + print ("storm type: {} kbps value: {}".format(storm_type,kbps_value)) + #Add storm entry to config DB directly + self.add_storm_session(lag_name,storm_type,kbps_value) + tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + (status,fvs) = tbl.get(lag_name+"|"+storm_type) + assert status == True + assert len(fvs) > 0 + time.sleep(1) + #grep for error message in syslog + (exitcode,num) = dvs.runcmd(['sh', '-c', 'cat /var/log/syslog | grep -i "handlePortStormControlTable: {}: Unsupported / Invalid interface PortChannel10"'.format(storm_type)]) + time.sleep(1) + assert exitcode == 0 + self.delete_storm_session(lag_name, storm_type) + self.remove_port_channel_member(dvs,lag_name,member_interface) + self.delete_port_channel(dvs,lag_name) + + def test_add_storm_vlan_interface(self,dvs,testlog): + self.setup_db(dvs) + vlan_id = 99 + member_interface = "Ethernet4" + kbps_value = 1000000 + storm_list = ["broadcast","unknown-unicast","unknown-multicast"] + kbps_value_list = [1000000,2000000,3000000] + vlan_name = "Vlan"+str(vlan_id) + + #Create VLAN interface and add member + self.create_vlan(dvs,str(vlan_id)) + self.add_vlan_member(dvs,str(vlan_id),member_interface) + + #click CLI verification + #for storm_type in storm_list: + # dvs.runcmd("config interface storm-control add Vlan"+str(vlan_id)+" "+storm_type+" "+str(kbps_value)) + # tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + # (status,fvs) = tbl.get("Vlan"+str(vlan_id)+"|"+storm_type) + # assert status == False + # assert len(fvs) == 0 + + #Orchagent verification + storm_list_db = ["broadcast","unknown-unicast","unknown-multicast"] + for storm_type,kbps_value in zip(storm_list_db,kbps_value_list): + #Cleanup syslog + dvs.runcmd(['sh', '-c', "echo 0 > /var/log/syslog"]) + time.sleep(1) + print ("storm type: {} kbps value: {}".format(storm_type,kbps_value)) + #Add storm entry to config DB directly + self.add_storm_session(vlan_name,storm_type,kbps_value) + tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + (status,fvs) = tbl.get(vlan_name+"|"+storm_type) + assert status == True + assert len(fvs) > 0 + time.sleep(1) + #grep for error message in syslog + (exitcode,num) = dvs.runcmd(['sh', '-c', 'cat /var/log/syslog | grep -i "handlePortStormControlTable: {}: Unsupported / Invalid interface {}"'.format(storm_type,vlan_name)]) + time.sleep(1) + assert exitcode == 0 + self.delete_storm_session(vlan_name, storm_type) + self.remove_vlan_member(dvs,str(vlan_id),member_interface) + self.delete_vlan(dvs,str(vlan_id)) From 7fc0f738408e8bf896d494f940670e795349e7ec Mon Sep 17 00:00:00 2001 From: Lior Avramov <73036155+liorghub@users.noreply.github.com> Date: Wed, 25 May 2022 11:17:50 +0300 Subject: [PATCH 03/64] Update netlink messages handler (#2233) - What I did Ignore netlink DELLINK messages if port has master, this is applicable to the case where port was part of VLAN bridge or LAG. - Why I did it Netlink messages handler in portsyncd was ignoring all messages that had master. Therefore we ignored messages on interfaces that belong to LAG (not only interfaces belong to bridge as intended). The result was "netdev_oper_status" down in PORT_TABLE in state DB for port which is part of LAG although it is actually up. - How I verified it Check "netdev_oper_status" in PORT_TABLE in state DB for port which is part of LAG. --- portsyncd/linksync.cpp | 7 +++-- tests/test_portchannel.py | 57 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/portsyncd/linksync.cpp b/portsyncd/linksync.cpp index d3beeee632..fc28411613 100644 --- a/portsyncd/linksync.cpp +++ b/portsyncd/linksync.cpp @@ -205,10 +205,9 @@ void LinkSync::onMsg(int nlmsg_type, struct nl_object *obj) return; } - /* If netlink for this port has master, we ignore that for now - * This could be the case where the port was removed from VLAN bridge - */ - if (master) + /* Ignore DELLINK message if port has master, this is applicable to + * the case where port was part of VLAN bridge or LAG */ + if (master && nlmsg_type == RTM_DELLINK) { return; } diff --git a/tests/test_portchannel.py b/tests/test_portchannel.py index ee612ec46d..3e24b6a340 100644 --- a/tests/test_portchannel.py +++ b/tests/test_portchannel.py @@ -382,6 +382,63 @@ def test_Portchannel_tpid(self, dvs, testlog): tbl._del("PortChannel0002") time.sleep(1) + def test_portchannel_member_netdev_oper_status(self, dvs, testlog): + config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + # create port-channel + tbl = swsscommon.Table(config_db, "PORTCHANNEL") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + tbl.set("PortChannel111", fvs) + + # set port-channel oper status + tbl = swsscommon.ProducerStateTable(app_db, "LAG_TABLE") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + tbl.set("PortChannel111", fvs) + + # add members to port-channel + tbl = swsscommon.Table(config_db, "PORTCHANNEL_MEMBER") + fvs = swsscommon.FieldValuePairs([("NULL", "NULL")]) + tbl.set("PortChannel111|Ethernet0", fvs) + tbl.set("PortChannel111|Ethernet4", fvs) + + # wait for port-channel netdev creation + time.sleep(1) + + # set netdev oper status + (exitcode, _) = dvs.runcmd("ip link set up dev Ethernet0") + assert exitcode == 0, "ip link set failed" + + (exitcode, _) = dvs.runcmd("ip link set up dev Ethernet4") + assert exitcode == 0, "ip link set failed" + + (exitcode, _) = dvs.runcmd("ip link set dev PortChannel111 carrier on") + assert exitcode == 0, "ip link set failed" + + # verify port-channel members netdev oper status + tbl = swsscommon.Table(state_db, "PORT_TABLE") + status, fvs = tbl.get("Ethernet0") + assert status is True + fvs = dict(fvs) + assert fvs['netdev_oper_status'] == 'up' + + status, fvs = tbl.get("Ethernet4") + assert status is True + fvs = dict(fvs) + assert fvs['netdev_oper_status'] == 'up' + + # remove port-channel members + tbl = swsscommon.Table(config_db, "PORTCHANNEL_MEMBER") + tbl._del("PortChannel111|Ethernet0") + tbl._del("PortChannel111|Ethernet4") + + # remove port-channel + tbl = swsscommon.Table(config_db, "PORTCHANNEL") + tbl._del("PortChannel111") + + # wait for port-channel deletion + time.sleep(1) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying From 0e5e7ba5cb5581c1ea4d470216cf3aadcd19fb94 Mon Sep 17 00:00:00 2001 From: Nikola Dancejic <26731235+Ndancejic@users.noreply.github.com> Date: Wed, 25 May 2022 12:11:11 -0700 Subject: [PATCH 04/64] [muxorch] Handling optional attributes in muxorch (#2288) What I did: Added soc_ipv4 and cable_type as optional attributes in muxorch. Why I did it: cable_type field in MUX_CABLE table was throwing parse errors How I did it: adding soc_ipv4 and cable_type wo mux_cfg_request_description Signed-off-by: Nikola Dancejic --- orchagent/muxorch.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 6e4f70408c..bf230a6d71 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -131,6 +131,8 @@ const request_description_t mux_cfg_request_description = { { "server_ipv4", REQ_T_IP_PREFIX }, { "server_ipv6", REQ_T_IP_PREFIX }, { "address_ipv4", REQ_T_IP }, + { "soc_ipv4", REQ_T_IP_PREFIX }, + { "cable_type", REQ_T_STRING }, }, { } }; From 2f1324473001e9fd26c2ed7bbb92fb387732a0d0 Mon Sep 17 00:00:00 2001 From: Shilong Liu Date: Thu, 26 May 2022 11:28:31 +0800 Subject: [PATCH 05/64] [ci] Improve azp trigger settings to automaticlly support new release branch. (#2289) Signed-off-by: Shilong Liu --- azure-pipelines.yml | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 06cd17686e..9bc567b67d 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -3,10 +3,29 @@ # Add steps that publish test results, save build artifacts, deploy, and more: # https://docs.microsoft.com/azure/devops/pipelines/apps/c-cpp/gcc +pr: +- master +- 202??? +- 201??? + trigger: + batch: true + branches: + include: + - master + - 202??? + - 201??? + +# this part need to be set in UI +schedules: +- cron: "0 0 * * 6" + displayName: Weekly build branches: include: - - "*" + - master + - 202??? + - 201??? + always: true stages: - stage: Build From d7b5ff79907b22ed13a120d599eaf7c1ad8b6837 Mon Sep 17 00:00:00 2001 From: Shilong Liu Date: Thu, 26 May 2022 18:03:05 +0800 Subject: [PATCH 06/64] [ci] Use correct branch when downloading artifact. (#2292) --- .azure-pipelines/build-docker-sonic-vs-template.yml | 7 +++---- .azure-pipelines/build-template.yml | 8 ++++---- .azure-pipelines/test-docker-sonic-vs-template.yml | 2 +- azure-pipelines.yml | 7 +++++++ 4 files changed, 15 insertions(+), 9 deletions(-) diff --git a/.azure-pipelines/build-docker-sonic-vs-template.yml b/.azure-pipelines/build-docker-sonic-vs-template.yml index 97e8afb394..b0a6562d3a 100644 --- a/.azure-pipelines/build-docker-sonic-vs-template.yml +++ b/.azure-pipelines/build-docker-sonic-vs-template.yml @@ -39,7 +39,7 @@ jobs: pipeline: 9 artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: @@ -48,7 +48,7 @@ jobs: pipeline: 12 artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 inputs: @@ -61,13 +61,12 @@ jobs: pipeline: 1 artifact: sonic-buildimage.vs runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' displayName: "Download sonic buildimage" - script: | echo $(Build.DefinitionName).$(Build.BuildNumber) docker load < ../target/docker-sonic-vs.gz - mkdir -p .azure-pipelines/docker-sonic-vs/debs cp -v ../*.deb .azure-pipelines/docker-sonic-vs/debs diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index 632bdb3107..f6690731b2 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -54,7 +54,7 @@ jobs: pool: ${{ if ne(parameters.pool, 'default') }}: name: ${{ parameters.pool }} - ${{ if eq(parameters.pool, 'default') }}: + ${{ else }}: vmImage: 'ubuntu-20.04' container: @@ -82,7 +82,7 @@ jobs: pipeline: 9 artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' path: '$(Build.SourcesDirectory)/${{ parameters.swss_common_artifact_name }}' displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 @@ -92,7 +92,7 @@ jobs: pipeline: 12 artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' path: '$(Build.SourcesDirectory)/${{ parameters.sairedis_artifact_name }}' displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 @@ -104,7 +104,7 @@ jobs: pipeline: ${{ parameters.buildimage_pipeline }} artifact: ${{ parameters.buildimage_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' displayName: "Download sonic buildimage deb packages" - script: | diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 7b1b3c4163..2ba42d458b 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -40,7 +40,7 @@ jobs: pipeline: 9 artifact: sonic-swss-common.amd64.ubuntu20_04 runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' displayName: "Download sonic swss common deb packages" - script: | diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9bc567b67d..8dda0580f6 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,6 +27,13 @@ schedules: - 201??? always: true +variables: + - name: BUILD_BRANCH + ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: + value: $(System.PullRequest.TargetBranch) + ${{ else }}: + value: $(Build.SourceBranchName) + stages: - stage: Build From 583236f8a9b0a1885fd24330548c743b7f2c3a00 Mon Sep 17 00:00:00 2001 From: Yilan Ji Date: Thu, 26 May 2022 16:54:24 -0700 Subject: [PATCH 07/64] [P4Orch] Lazy UDF match creation to avoid failure during warm reboot (#2282) * [P4Orch] Lazy UDF match creation. --- orchagent/p4orch/acl_table_manager.cpp | 18 ++++++++++-------- orchagent/p4orch/tests/acl_manager_test.cpp | 20 ++++++++++++++++++-- orchagent/p4orch/tests/wcmp_manager_test.cpp | 11 ----------- tests/p4rt/test_l3.py | 12 ++++++------ tests/p4rt/test_p4rt_acl.py | 11 ++++++++++- 5 files changed, 44 insertions(+), 28 deletions(-) diff --git a/orchagent/p4orch/acl_table_manager.cpp b/orchagent/p4orch/acl_table_manager.cpp index 456c2f04d2..312f54c51c 100644 --- a/orchagent/p4orch/acl_table_manager.cpp +++ b/orchagent/p4orch/acl_table_manager.cpp @@ -36,16 +36,15 @@ AclTableManager::AclTableManager(P4OidMapper *p4oidMapper, ResponsePublisherInte SWSS_LOG_ENTER(); assert(p4oidMapper != nullptr); - // Create the default UDF match - auto status = createDefaultUdfMatch(); - if (!status.ok()) - { - SWSS_LOG_ERROR("Failed to create ACL UDF default match : %s", status.message().c_str()); - } } AclTableManager::~AclTableManager() { + sai_object_id_t udf_match_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT, &udf_match_oid)) + { + return; + } auto status = removeDefaultUdfMatch(); if (!status.ok()) { @@ -465,8 +464,11 @@ ReturnCode AclTableManager::createUdf(const P4UdfField &udf_field) sai_object_id_t udf_match_oid; if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT, &udf_match_oid)) { - return ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "UDF default match " << QuotedVar(P4_UDF_MATCH_DEFAULT) << " does not exist"; + // Create the default UDF match + LOG_AND_RETURN_IF_ERROR(createDefaultUdfMatch() + << "Failed to create ACL UDF default match " + << QuotedVar(P4_UDF_MATCH_DEFAULT)); + m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT, &udf_match_oid); } std::vector udf_attrs; sai_attribute_t udf_attr; diff --git a/orchagent/p4orch/tests/acl_manager_test.cpp b/orchagent/p4orch/tests/acl_manager_test.cpp index 64ba37e5a3..55abf24606 100644 --- a/orchagent/p4orch/tests/acl_manager_test.cpp +++ b/orchagent/p4orch/tests/acl_manager_test.cpp @@ -834,8 +834,6 @@ class AclManagerTest : public ::testing::Test Truly(std::bind(MatchSaiSwitchAttrByAclStage, SAI_SWITCH_ATTR_PRE_INGRESS_ACL, kAclGroupLookupOid, std::placeholders::_1)))) .WillRepeatedly(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); std::vector p4_tables; gP4Orch = new P4Orch(gAppDb, p4_tables, gVrfOrch, copp_orch_); acl_table_manager_ = gP4Orch->getAclTableManager(); @@ -860,6 +858,8 @@ class AclManagerTest : public ::testing::Test .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); @@ -1156,6 +1156,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenCapabilityExceeds) auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); @@ -1170,6 +1172,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenFailedToCreateTableGroupMe auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); @@ -1187,6 +1191,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableRaisesCriticalStateWhenAclTableReco auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); @@ -1205,6 +1211,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableRaisesCriticalStateWhenUdfGroupReco auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); @@ -1223,6 +1231,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableRaisesCriticalStateWhenUdfRecoveryF auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); @@ -1244,6 +1254,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenFailedToCreateUdf) AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); // Fail to create the first UDF, and success to remove the first UDF // group + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_udf_, remove_udf_group(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); @@ -2099,6 +2111,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); @@ -4055,6 +4069,8 @@ TEST_F(AclManagerTest, DoAclCounterStatsTaskSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); diff --git a/orchagent/p4orch/tests/wcmp_manager_test.cpp b/orchagent/p4orch/tests/wcmp_manager_test.cpp index 73cf34be25..b2cc68aaaa 100644 --- a/orchagent/p4orch/tests/wcmp_manager_test.cpp +++ b/orchagent/p4orch/tests/wcmp_manager_test.cpp @@ -12,7 +12,6 @@ #include "mock_sai_next_hop_group.h" #include "mock_sai_serialize.h" #include "mock_sai_switch.h" -#include "mock_sai_udf.h" #include "p4oidmapper.h" #include "p4orch.h" #include "p4orch/p4orch_util.h" @@ -31,7 +30,6 @@ extern sai_object_id_t gSwitchId; extern sai_next_hop_group_api_t *sai_next_hop_group_api; extern sai_hostif_api_t *sai_hostif_api; extern sai_switch_api_t *sai_switch_api; -extern sai_udf_api_t *sai_udf_api; extern sai_object_id_t gSwitchId; extern sai_acl_api_t *sai_acl_api; @@ -68,7 +66,6 @@ const std::string kWcmpGroupKey1 = KeyGenerator::generateWcmpGroupKey(kWcmpGroup const std::string kNexthopKey1 = KeyGenerator::generateNextHopKey(kNexthopId1); const std::string kNexthopKey2 = KeyGenerator::generateNextHopKey(kNexthopId2); const std::string kNexthopKey3 = KeyGenerator::generateNextHopKey(kNexthopId3); -constexpr sai_object_id_t kUdfMatchOid1 = 5001; // Matches the next hop group type sai_attribute_t argument. bool MatchSaiNextHopGroupAttribute(const sai_attribute_t *attr) @@ -154,7 +151,6 @@ class WcmpManagerTest : public ::testing::Test EXPECT_CALL(mock_sai_switch_, set_switch_attribute(Eq(gSwitchId), _)) .WillRepeatedly(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_table_group(_)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_udf_, remove_udf_match(_)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); delete gP4Orch; delete copp_orch_; } @@ -167,7 +163,6 @@ class WcmpManagerTest : public ::testing::Test mock_sai_hostif = &mock_sai_hostif_; mock_sai_serialize = &mock_sai_serialize_; mock_sai_acl = &mock_sai_acl_; - mock_sai_udf = &mock_sai_udf_; sai_next_hop_group_api->create_next_hop_group = create_next_hop_group; sai_next_hop_group_api->remove_next_hop_group = remove_next_hop_group; @@ -181,8 +176,6 @@ class WcmpManagerTest : public ::testing::Test sai_switch_api->set_switch_attribute = mock_set_switch_attribute; sai_acl_api->create_acl_table_group = create_acl_table_group; sai_acl_api->remove_acl_table_group = remove_acl_table_group; - sai_udf_api->create_udf_match = create_udf_match; - sai_udf_api->remove_udf_match = remove_udf_match; } void setUpP4Orch() @@ -194,9 +187,6 @@ class WcmpManagerTest : public ::testing::Test copp_orch_ = new CoppOrch(gAppDb, APP_COPP_TABLE_NAME); // init P4 orch - EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); - std::vector p4_tables; gP4Orch = new P4Orch(gAppDb, p4_tables, gVrfOrch, copp_orch_); } @@ -301,7 +291,6 @@ class WcmpManagerTest : public ::testing::Test StrictMock mock_sai_hostif_; StrictMock mock_sai_serialize_; StrictMock mock_sai_acl_; - StrictMock mock_sai_udf_; P4OidMapper *p4_oid_mapper_; WcmpManager *wcmp_group_manager_; CoppOrch *copp_orch_; diff --git a/tests/p4rt/test_l3.py b/tests/p4rt/test_l3.py index 4156576bc2..bbe7d07653 100644 --- a/tests/p4rt/test_l3.py +++ b/tests/p4rt/test_l3.py @@ -262,7 +262,7 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) # Query application database for route entries. @@ -651,7 +651,7 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Verify that P4RT key to OID count is same as original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) # Query application database for route entries. @@ -1148,7 +1148,7 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) def test_PruneNextHopOnWarmBoot(self, dvs, testlog): @@ -1386,7 +1386,7 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): @@ -1620,7 +1620,7 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): @@ -1841,5 +1841,5 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) diff --git a/tests/p4rt/test_p4rt_acl.py b/tests/p4rt/test_p4rt_acl.py index 89015fc9d5..52989e5b72 100644 --- a/tests/p4rt/test_p4rt_acl.py +++ b/tests/p4rt/test_p4rt_acl.py @@ -257,8 +257,17 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) + asic_udf_matches = util.get_keys( + self._p4rt_udf_match_obj.asic_db, self._p4rt_udf_match_obj.ASIC_DB_TBL_NAME + ) + # query ASIC database for default UDF wildcard match - udf_match_asic_db_key = original_asic_udf_matches[0] + udf_match_asic_db_keys = [ + key for key in asic_udf_matches if key not in original_asic_udf_matches + ] + + assert len(udf_match_asic_db_keys) == 1 + udf_match_asic_db_key = udf_match_asic_db_keys[0] (status, fvs) = util.get_key( self._p4rt_udf_match_obj.asic_db, From 4d6fa42ab97c236d54ce4d36e884ec3e224e65c2 Mon Sep 17 00:00:00 2001 From: Shilong Liu Date: Fri, 27 May 2022 12:28:52 +0800 Subject: [PATCH 08/64] [ci] Change artifact reference pipeline to common lib pipeline. (#2294) * [ci] Change artifact reference pipeline to common lib pipeline. --- .../build-docker-sonic-vs-template.yml | 24 +++-- .azure-pipelines/build-template.yml | 95 ++++++++----------- .../test-docker-sonic-vs-template.yml | 18 ++-- azure-pipelines.yml | 9 +- 4 files changed, 68 insertions(+), 78 deletions(-) diff --git a/.azure-pipelines/build-docker-sonic-vs-template.yml b/.azure-pipelines/build-docker-sonic-vs-template.yml index b0a6562d3a..ff0ff6c0cb 100644 --- a/.azure-pipelines/build-docker-sonic-vs-template.yml +++ b/.azure-pipelines/build-docker-sonic-vs-template.yml @@ -36,40 +36,47 @@ jobs: inputs: source: specific project: build - pipeline: 9 + pipeline: Azure.sonic-swss-common artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 12 + pipeline: Azure.sonic-sairedis artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 inputs: artifact: ${{ parameters.swss_artifact_name }} - displayName: "Download sonic swss artifact" + path: $(Build.ArtifactStagingDirectory)/download + displayName: "Download pre-stage built ${{ parameters.swss_artifact_name }}" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 1 + pipeline: Azure.sonic-buildimage.official.vs artifact: sonic-buildimage.vs runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' - displayName: "Download sonic buildimage" + path: $(Build.ArtifactStagingDirectory)/download + patterns: '**/target/docker-sonic-vs.gz' + displayName: "Download sonic-buildimage docker-sonic-vs" - script: | + set -ex echo $(Build.DefinitionName).$(Build.BuildNumber) - docker load < ../target/docker-sonic-vs.gz + docker load < $(Build.ArtifactStagingDirectory)/download/target/docker-sonic-vs.gz + mkdir -p .azure-pipelines/docker-sonic-vs/debs - cp -v ../*.deb .azure-pipelines/docker-sonic-vs/debs + cp -v $(Build.ArtifactStagingDirectory)/download/*.deb .azure-pipelines/docker-sonic-vs/debs pushd .azure-pipelines @@ -78,7 +85,8 @@ jobs: popd docker save docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) | gzip -c > $(Build.ArtifactStagingDirectory)/docker-sonic-vs.gz - + rm -rf $(Build.ArtifactStagingDirectory)/download + displayName: "Build docker-sonic-vs" - publish: $(Build.ArtifactStagingDirectory)/ artifact: ${{ parameters.artifact_name }} displayName: "Archive sonic docker vs image" diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index f6690731b2..f8040bca56 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -23,12 +23,6 @@ parameters: - name: sonic_slave type: string -- name: buildimage_artifact_name - type: string - -- name: buildimage_pipeline - type: number - - name: sairedis_artifact_name type: string @@ -46,6 +40,9 @@ parameters: type: boolean default: false +- name: common_lib_artifact_name + type: string + jobs: - job: displayName: ${{ parameters.arch }} @@ -79,77 +76,63 @@ jobs: inputs: source: specific project: build - pipeline: 9 + pipeline: Azure.sonic-swss-common artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' - path: '$(Build.SourcesDirectory)/${{ parameters.swss_common_artifact_name }}' + path: $(Build.ArtifactStagingDirectory)/download + patterns: | + libswsscommon_1.0.0_${{ parameters.arch }}.deb + libswsscommon-dev_1.0.0_${{ parameters.arch }}.deb displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 12 + pipeline: Azure.sonic-sairedis artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' - path: '$(Build.SourcesDirectory)/${{ parameters.sairedis_artifact_name }}' + path: $(Build.ArtifactStagingDirectory)/download + patterns: | + libsaivs_*.deb + libsaivs-dev_*.deb + libsairedis_*.deb + libsairedis-dev_*.deb + libsaimetadata_*.deb + libsaimetadata-dev_*.deb + syncd-vs_*.deb displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 - ${{ if eq(parameters.buildimage_pipeline, 141) }}: - continueOnError: True inputs: source: specific project: build - pipeline: ${{ parameters.buildimage_pipeline }} - artifact: ${{ parameters.buildimage_artifact_name }} + pipeline: Azure.sonic-buildimage.common_libs + artifact: ${{ parameters.common_lib_artifact_name }} runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' - path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' - displayName: "Download sonic buildimage deb packages" - - script: | - buildimage_artifact_downloaded=n - [ -d "$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}/target" ] && buildimage_artifact_downloaded=y - echo "buildimage_artifact_downloaded=$buildimage_artifact_downloaded" - echo "##vso[task.setvariable variable=buildimage_artifact_downloaded]$buildimage_artifact_downloaded" - condition: eq(${{ parameters.buildimage_pipeline }}, 141) - displayName: "Check if sonic buildimage deb packages downloaded" - - task: DownloadPipelineArtifact@2 - condition: and(eq(variables.buildimage_artifact_downloaded, 'n'), eq(${{ parameters.buildimage_pipeline }}, 141)) - inputs: - source: specific - project: build - pipeline: ${{ parameters.buildimage_pipeline }} - artifact: 'sonic-buildimage.marvell-armhf1' - runVersion: specific - runId: 80637 - path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' - displayName: "Download sonic buildimage deb packages from 80637" + path: $(Build.ArtifactStagingDirectory)/download + patterns: | + target/debs/buster/libnl-3-200_*.deb + target/debs/buster/libnl-3-dev_*.deb + target/debs/buster/libnl-genl-3-200_*.deb + target/debs/buster/libnl-genl-3-dev_*.deb + target/debs/buster/libnl-route-3-200_*.deb + target/debs/buster/libnl-route-3-dev_*.deb + target/debs/buster/libnl-nf-3-200_*.deb + target/debs/buster/libnl-nf-3-dev_*.deb + displayName: "Download common libs" - script: | - cd $(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }} - sudo dpkg -i target/debs/buster/libnl-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-3-dev_*.deb - sudo dpkg -i target/debs/buster/libnl-genl-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-genl-3-dev_*.deb - sudo dpkg -i target/debs/buster/libnl-route-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-route-3-dev_*.deb - sudo dpkg -i target/debs/buster/libnl-nf-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-nf-3-dev_*.deb - cd $(Build.SourcesDirectory)/${{ parameters.swss_common_artifact_name }} - sudo dpkg -i libswsscommon_1.0.0_${{ parameters.arch }}.deb - sudo dpkg -i libswsscommon-dev_1.0.0_${{ parameters.arch }}.deb - cd $(Build.SourcesDirectory)/${{ parameters.sairedis_artifact_name }} - sudo dpkg -i libsaivs_*.deb - sudo dpkg -i libsaivs-dev_*.deb - sudo dpkg -i libsairedis_*.deb - sudo dpkg -i libsairedis-dev_*.deb - sudo dpkg -i libsaimetadata_*.deb - sudo dpkg -i libsaimetadata-dev_*.deb - sudo dpkg -i syncd-vs_*.deb - workingDirectory: $(Pipeline.Workspace) + set -ex + cd download + sudo dpkg -i $(find target/debs/buster -type f) + sudo dpkg -i $(ls *.deb) + cd .. + rm -rf download + workingDirectory: $(Build.ArtifactStagingDirectory) displayName: "Install libnl3, sonic swss common and sairedis" - script: | - set -x + set -ex tar czf pytest.tgz tests cp -r pytest.tgz $(Build.ArtifactStagingDirectory)/ if [ '${{ parameters.archive_gcov }}' == True ]; then diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 2ba42d458b..fc1527f72c 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -31,25 +31,26 @@ jobs: - task: DownloadPipelineArtifact@2 inputs: artifact: docker-sonic-vs - displayName: "Download docker sonic vs image" - + path: $(Build.ArtifactStagingDirectory)/download + displayName: "Download pre-stage built docker-sonic-vs" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 9 + pipeline: Azure.sonic-swss-common artifact: sonic-swss-common.amd64.ubuntu20_04 runVersion: 'latestFromBranch' runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download displayName: "Download sonic swss common deb packages" - script: | - set -x + set -ex sudo .azure-pipelines/build_and_install_module.sh sudo apt-get install -y libhiredis0.14 - sudo dpkg -i --force-confask,confnew ../libswsscommon_1.0.0_amd64.deb || apt-get install -f - sudo dpkg -i ../python3-swsscommon_1.0.0_amd64.deb + sudo dpkg -i --force-confask,confnew $(Build.ArtifactStagingDirectory)/download/libswsscommon_1.0.0_amd64.deb || apt-get install -f + sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/python3-swsscommon_1.0.0_amd64.deb # install packages for vs test sudo apt-get install -y net-tools bridge-utils vlan @@ -58,8 +59,8 @@ jobs: displayName: "Install dependencies" - script: | - set -x - sudo docker load -i ../docker-sonic-vs.gz + set -ex + sudo docker load -i $(Build.ArtifactStagingDirectory)/download/docker-sonic-vs.gz docker ps ip netns list uname -a @@ -72,6 +73,7 @@ jobs: else sudo py.test -v --force-flaky --junitxml=tr.xml --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) fi + rm -rf $(Build.ArtifactStagingDirectory)/download displayName: "Run vs tests" - task: PublishTestResults@2 diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8dda0580f6..166905654e 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -42,8 +42,7 @@ stages: parameters: arch: amd64 sonic_slave: sonic-slave-buster - buildimage_artifact_name: sonic-buildimage.vs - buildimage_pipeline: 142 + common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common sairedis_artifact_name: sonic-sairedis artifact_name: sonic-swss @@ -60,8 +59,7 @@ stages: timeout: 240 pool: sonicbld-armhf sonic_slave: sonic-slave-buster-armhf - buildimage_artifact_name: sonic-buildimage.marvell-armhf - buildimage_pipeline: 141 + common_lib_artifact_name: common-lib.armhf swss_common_artifact_name: sonic-swss-common.armhf sairedis_artifact_name: sonic-sairedis.armhf artifact_name: sonic-swss.armhf @@ -73,9 +71,8 @@ stages: timeout: 240 pool: sonicbld-arm64 sonic_slave: sonic-slave-buster-arm64 + common_lib_artifact_name: common-lib.arm64 swss_common_artifact_name: sonic-swss-common.arm64 - buildimage_artifact_name: sonic-buildimage.centec-arm64 - buildimage_pipeline: 140 sairedis_artifact_name: sonic-sairedis.arm64 artifact_name: sonic-swss.arm64 archive_gcov: false From 910bfd4d17782a059daf2d81deb87673ae6ca58e Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Sat, 28 May 2022 03:04:14 +0800 Subject: [PATCH 09/64] [ACL] Add default action_list for default ACL table type (#2298) What I did This PR is derived from #2205 Fix Azure/sonic-buildimage#10425 We were seeing ACL table creation failure on some platform because action_list is mandatory, while the action_list is not provided by aclorch. Apr 1 01:24:11.702608 str2-7050cx3-acs-03 ERR swss#orchagent: :- validate: Action list for table DATAACL is mandatory Apr 1 01:24:11.702608 str2-7050cx3-acs-03 ERR swss#orchagent: :- doAclTableTask: Failed to create ACL table DATAACL, invalid configuration Apr 1 01:24:11.702741 str2-7050cx3-acs-03 ERR swss#orchagent: :- validate: Action list for table EVERFLOW is mandatory Apr 1 01:24:11.702741 str2-7050cx3-acs-03 ERR swss#orchagent: :- doAclTableTask: Failed to create ACL table EVERFLOW, invalid configuration Apr 1 01:24:11.702926 str2-7050cx3-acs-03 ERR swss#orchagent: :- validate: Action list for table EVERFLOWV6 is mandatory Apr 1 01:24:11.702926 str2-7050cx3-acs-03 ERR swss#orchagent: :- doAclTableTask: Failed to create ACL table EVERFLOWV6, invalid configuration This PR fixed the issue by adding default action_list to the default ACL table type if not present. Why I did it Fix the ACL table creation issue. How I verified it Verified by running test_acl and test_everflow on Broadcom TD3 platform Signed-off-by: bingwang Co-authored-by: syuan --- orchagent/aclorch.cpp | 223 ++++++++++++++++++++++++++++++++ orchagent/aclorch.h | 8 ++ tests/mock_tests/aclorch_ut.cpp | 92 +++++++++++++ 3 files changed, 323 insertions(+) diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index e371ecd980..73aa02dac9 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -153,6 +153,176 @@ static const acl_capabilities_t defaultAclActionsSupported = } }; +static acl_table_action_list_lookup_t defaultAclActionList = +{ + { + // L3 + TABLE_TYPE_L3, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, + { + // L3V6 + TABLE_TYPE_L3V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, + { + // MIRROR + TABLE_TYPE_MIRROR, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // MIRRORV6 + TABLE_TYPE_MIRRORV6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // MIRROR_DSCP + TABLE_TYPE_MIRROR_DSCP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // TABLE_TYPE_PFCWD + TABLE_TYPE_PFCWD, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // MCLAG + TABLE_TYPE_MCLAG, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // MUX + TABLE_TYPE_MUX, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // DROP + TABLE_TYPE_DROP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + } +}; + static acl_ip_type_lookup_t aclIpTypeLookup = { { IP_TYPE_ANY, SAI_ACL_IP_TYPE_ANY }, @@ -301,6 +471,12 @@ const set& AclTableType::getActions() const return m_aclAcitons; } +bool AclTableType::addAction(sai_acl_action_type_t action) +{ + m_aclAcitons.insert(action); + return true; +} + AclTableTypeBuilder& AclTableTypeBuilder::withName(string name) { m_tableType.m_name = name; @@ -1808,6 +1984,51 @@ AclTable::AclTable(AclOrch *pAclOrch) noexcept : m_pAclOrch(pAclOrch) } +bool AclTable::addMandatoryActions() +{ + SWSS_LOG_ENTER(); + + if (stage == ACL_STAGE_UNKNOWN) + { + return false; + } + + if (!m_pAclOrch->isAclActionListMandatoryOnTableCreation(stage)) + { + // No op if action list is not mandatory on table creation. + return true; + } + if (!type.getActions().empty()) + { + // No change if action_list is provided + return true; + } + + sai_acl_action_type_t acl_action = SAI_ACL_ACTION_TYPE_COUNTER; + if (m_pAclOrch->isAclActionSupported(stage, acl_action)) + { + SWSS_LOG_INFO("Add counter acl action"); + type.addAction(acl_action); + } + + if (defaultAclActionList.count(type.getName()) != 0) + { + // Add the default action list + for (auto action : defaultAclActionList[type.getName()][stage]) + { + if (m_pAclOrch->isAclActionSupported(stage, acl_action)) + { + SWSS_LOG_INFO("Added default action for table type %s stage %s", + type.getName().c_str(), + ((stage == ACL_STAGE_INGRESS)? "INGRESS":"EGRESS")); + type.addAction(action); + } + } + } + + return true; +} + bool AclTable::validateAddType(const AclTableType &tableType) { SWSS_LOG_ENTER(); @@ -3949,6 +4170,8 @@ void AclOrch::doAclTableTask(Consumer &consumer) newTable.validateAddType(*tableType); + newTable.addMandatoryActions(); + // validate and create/update ACL Table if (bAllAttributesOk && newTable.validate()) { diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index 710720a5c1..02631d934e 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -113,6 +113,9 @@ typedef tuple acl_range_properties_t; typedef map acl_capabilities_t; typedef map> acl_action_enum_values_capabilities_t; +typedef map > acl_stage_action_list_t; +typedef map acl_table_action_list_lookup_t; + class AclRule; class AclTableMatchInterface @@ -156,6 +159,8 @@ class AclTableType const set& getRangeTypes() const; const set& getActions() const; + bool addAction(sai_acl_action_type_t action); + private: friend class AclTableTypeBuilder; @@ -387,6 +392,9 @@ class AclTable bool validate(); bool create(); + // Add actions to ACL table if mandatory action list is required on table creation. + bool addMandatoryActions(); + // validate AclRule match attribute against rule and table configuration bool validateAclRuleMatch(sai_acl_entry_attr_t matchId, const AclRule& rule) const; // validate AclRule action attribute against rule and table configuration diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 0d81c93f69..9886e5d8ff 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -1755,4 +1755,96 @@ namespace aclorch_test // try to delete non existing acl rule ASSERT_TRUE(orch->m_aclOrch->removeAclRule(tableId, ruleId)); } + + sai_switch_api_t *old_sai_switch_api; + + // The following function is used to override SAI API get_switch_attribute to request passing + // mandatory ACL actions to SAI when creating mirror ACL table. + sai_status_t getSwitchAttribute(_In_ sai_object_id_t switch_id,_In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1) + { + switch(attr_list[0].id) + { + case SAI_SWITCH_ATTR_MAX_ACL_ACTION_COUNT: + attr_list[0].value.u32 = 2; + return SAI_STATUS_SUCCESS; + case SAI_SWITCH_ATTR_ACL_STAGE_INGRESS: + case SAI_SWITCH_ATTR_ACL_STAGE_EGRESS: + attr_list[0].value.aclcapability.action_list.count = 2; + attr_list[0].value.aclcapability.action_list.list[0]= SAI_ACL_ACTION_TYPE_COUNTER; + attr_list[0].value.aclcapability.action_list.list[1]= + attr_list[0].id == SAI_SWITCH_ATTR_ACL_STAGE_INGRESS ? + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS : SAI_ACL_ACTION_TYPE_MIRROR_EGRESS; + attr_list[0].value.aclcapability.is_action_list_mandatory = true; + return SAI_STATUS_SUCCESS; + } + } + return old_sai_switch_api->get_switch_attribute(switch_id, attr_count, attr_list); + } + + TEST_F(AclOrchTest, AclTableCreationWithMandatoryActions) + { + // Override SAI API get_switch_attribute to request passing mandatory ACL actions to SAI + // when creating mirror ACL table. + old_sai_switch_api = sai_switch_api; + sai_switch_api_t new_sai_switch_api = *sai_switch_api; + sai_switch_api = &new_sai_switch_api; + sai_switch_api->get_switch_attribute = getSwitchAttribute; + + // Set platform env to enable support of MIRRORV6 ACL table. + bool unset_platform_env = false; + if (!getenv("platform")) + { + setenv("platform", VS_PLATFORM_SUBSTRING, 0); + unset_platform_env = true; + } + + auto orch = createAclOrch(); + + for (const auto &acl_table_type : { TABLE_TYPE_MIRROR, TABLE_TYPE_MIRRORV6, TABLE_TYPE_MIRROR_DSCP }) + { + for (const auto &acl_table_stage : { STAGE_INGRESS, STAGE_EGRESS }) + { + // Create ACL table. + string acl_table_id = "mirror_acl_table"; + auto kvfAclTable = deque( + { { acl_table_id, + SET_COMMAND, + { { ACL_TABLE_DESCRIPTION, acl_table_type }, + { ACL_TABLE_TYPE, acl_table_type }, + { ACL_TABLE_STAGE, acl_table_stage }, + { ACL_TABLE_PORTS, "1,2" } } } }); + orch->doAclTableTask(kvfAclTable); + auto acl_table = orch->getAclTable(acl_table_id); + ASSERT_NE(acl_table, nullptr); + + // Verify mandaotry ACL actions has been added. + auto acl_actions = acl_table->type.getActions(); + ASSERT_NE(acl_actions.find(SAI_ACL_ACTION_TYPE_COUNTER), acl_actions.end()); + sai_acl_action_type_t action = strcmp(acl_table_stage, STAGE_INGRESS) == 0 ? + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS : SAI_ACL_ACTION_TYPE_MIRROR_EGRESS; + ASSERT_NE(acl_actions.find(action), acl_actions.end()); + + // Delete ACL table. + kvfAclTable = deque( + { { acl_table_id, + DEL_COMMAND, + {} } }); + orch->doAclTableTask(kvfAclTable); + acl_table = orch->getAclTable(acl_table_id); + ASSERT_EQ(acl_table, nullptr); + } + } + + // Unset platform env. + if (unset_platform_env) + { + unsetenv("platform"); + } + + // Restore sai_switch_api. + sai_switch_api = old_sai_switch_api; + } } // namespace nsAclOrchTest From c73cf1021b9803d72a7a9eb2d3a2aba38217ea29 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Sat, 28 May 2022 08:03:40 +0800 Subject: [PATCH 10/64] Support mock_test infra for dynamic buffer manager and fix issues found during mock test (#2234) * Support mock_test infra for dynamic buffer manager and fix issues found during mock test Signed-off-by: Stephen Sun --- cfgmgr/buffermgrdyn.cpp | 148 ++++- cfgmgr/buffermgrdyn.h | 3 +- tests/mock_tests/Makefile.am | 6 +- tests/mock_tests/buffermgrdyn_ut.cpp | 902 +++++++++++++++++++++++++++ tests/test_buffer_dynamic.py | 5 +- 5 files changed, 1031 insertions(+), 33 deletions(-) create mode 100644 tests/mock_tests/buffermgrdyn_ut.cpp diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index b3ce88c6f3..1c5b99a6f8 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -111,8 +111,11 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC } catch (...) { - SWSS_LOG_ERROR("Lua scripts for buffer calculation were not loaded successfully, buffermgrd won't start"); - return; + if (platform != "mock_test") + { + SWSS_LOG_ERROR("Lua scripts for buffer calculation were not loaded successfully, buffermgrd won't start"); + return; + } } // Init timer @@ -718,7 +721,13 @@ void BufferMgrDynamic::recalculateSharedBufferPool() // - In case the shared headroom pool size is statically configured, as it is programmed to APPL_DB during buffer pool handling, // - any change from lua plugin will be ignored. // - will handle ingress_lossless_pool in the way all other pools are handled in this case - auto &pool = m_bufferPoolLookup[poolName]; + const auto &poolRef = m_bufferPoolLookup.find(poolName); + if (poolRef == m_bufferPoolLookup.end()) + { + SWSS_LOG_WARN("Unconfigured buffer pool %s got from lua plugin", poolName.c_str()); + continue; + } + auto &pool = poolRef->second; auto &poolSizeStr = pairs[1]; auto old_xoff = pool.xoff; bool xoff_updated = false; @@ -875,10 +884,8 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ } vector fvVector; - string mode = getPgPoolMode(); - // profile threshold field name - mode += "_th"; + const string &&mode = profile.threshold_mode.empty() ? getPgPoolMode() + "_th" : profile.threshold_mode; if (profile.lossless) { @@ -959,7 +966,7 @@ task_process_status BufferMgrDynamic::allocateProfile(const string &speed, const string mode = getPgPoolMode(); if (mode.empty()) { - SWSS_LOG_NOTICE("BUFFER_PROFILE %s cannot be created because the buffer pool isn't ready", profile_name.c_str()); + SWSS_LOG_INFO("BUFFER_PROFILE %s cannot be created because the buffer pool isn't ready", profile_name.c_str()); return task_process_status::task_need_retry; } @@ -1430,9 +1437,10 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons return task_process_status::task_success; } - if (!m_bufferPoolReady) + if (!m_bufferPoolReady || m_defaultThreshold.empty()) { - SWSS_LOG_INFO("Nothing to be done since the buffer pool is not ready"); + SWSS_LOG_INFO("Nothing to be done since either the buffer pool or default threshold is not ready"); + m_bufferObjectsPending = true; return task_process_status::task_success; } @@ -1454,6 +1462,12 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons if (portPg.dynamic_calculated) { + if (portInfo.state != PORT_READY) + { + SWSS_LOG_INFO("Nothing to be done for %s since port is not ready", key.c_str()); + continue; + } + string threshold; // Calculate new headroom size if (portPg.static_configured) @@ -1892,10 +1906,16 @@ task_process_status BufferMgrDynamic::handleBufferMaxParam(KeyOpFieldsValuesTupl task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFieldsValuesTuple &tuple) { string op = kfvOp(tuple); - string newRatio = "0"; + string newRatio = ""; if (op == SET_COMMAND) { + if (m_bufferPoolLookup.find(INGRESS_LOSSLESS_PG_POOL_NAME) == m_bufferPoolLookup.end()) + { + SWSS_LOG_INFO("%s has not been configured, need to retry", INGRESS_LOSSLESS_PG_POOL_NAME); + return task_process_status::task_need_retry; + } + for (auto i : kfvFieldsValues(tuple)) { if (fvField(i) == "default_dynamic_th") @@ -1910,6 +1930,10 @@ task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFiel } } } + else if (op == DEL_COMMAND) + { + newRatio = ""; + } else { SWSS_LOG_ERROR("Unsupported command %s received for DEFAULT_LOSSLESS_BUFFER_PARAMETER table", op.c_str()); @@ -2398,6 +2422,10 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues // For set command: // 1. Create the corresponding table entries in APPL_DB // 2. Record the table in the internal cache m_bufferProfileLookup + + // If the profile did not exist, it will be created in the next line by the [] operator with incomplete data. + // In case the flow does not finish successfully, the incomplete profile should be removed + bool needRemoveOnFailure = (m_bufferProfileLookup.find(profileName) == m_bufferProfileLookup.end()); buffer_profile_t &profileApp = m_bufferProfileLookup[profileName]; profileApp.static_configured = true; @@ -2418,24 +2446,44 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues if (!value.empty()) { auto &poolName = value; - if (poolName.empty()) - { - SWSS_LOG_ERROR("BUFFER_PROFILE: Invalid format of reference to pool: %s", value.c_str()); - return task_process_status::task_invalid_entry; - } - auto poolRef = m_bufferPoolLookup.find(poolName); if (poolRef == m_bufferPoolLookup.end()) { - SWSS_LOG_WARN("Pool %s hasn't been configured yet, need retry", poolName.c_str()); + SWSS_LOG_INFO("Pool %s hasn't been configured yet, need retry", poolName.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } return task_process_status::task_need_retry; } profileApp.pool_name = poolName; profileApp.direction = poolRef->second.direction; + auto threshold_mode = poolRef->second.mode + "_th"; + if (profileApp.threshold_mode.empty()) + { + profileApp.threshold_mode = threshold_mode; + } + else if (profileApp.threshold_mode != threshold_mode) + { + SWSS_LOG_ERROR("Buffer profile %s's mode %s doesn't match with buffer pool %s whose mode is %s", + profileName.c_str(), + profileApp.threshold_mode.c_str(), + poolName.c_str(), + threshold_mode.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; + } } else { SWSS_LOG_ERROR("Pool for BUFFER_PROFILE %s hasn't been specified", field.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } return task_process_status::task_failed; } } @@ -2456,12 +2504,25 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues { profileApp.size = value; } - else if (field == buffer_dynamic_th_field_name) - { - profileApp.threshold = value; - } - else if (field == buffer_static_th_field_name) + else if (field == buffer_dynamic_th_field_name || field == buffer_static_th_field_name) { + if (profileApp.threshold_mode.empty()) + { + profileApp.threshold_mode = field; + } + else if (profileApp.threshold_mode != field) + { + SWSS_LOG_ERROR("Buffer profile %s's mode %s doesn't align with buffer pool %s whose mode is %s", + profileName.c_str(), + field.c_str(), + profileApp.pool_name.c_str(), + profileApp.threshold_mode.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; + } profileApp.threshold = value; } else if (field == buffer_headroom_type_field_name) @@ -2484,7 +2545,11 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues if (profileApp.direction != BUFFER_INGRESS) { SWSS_LOG_ERROR("BUFFER_PROFILE %s is ingress but referencing an egress pool %s", profileName.c_str(), profileApp.pool_name.c_str()); - return task_process_status::task_success; + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; } if (profileApp.dynamic_calculated) @@ -2752,6 +2817,9 @@ void BufferMgrDynamic::handleDelSingleBufferObjectOnAdminDownPort(buffer_directi task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &key, const string &port, const KeyOpFieldsValuesTuple &tuple) { string op = kfvOp(tuple); + // If the buffer PG did not exist, it will be created in the next line by the [] operator with incomplete data. + // In case the flow does not finish successfully, the incomplete profile should be removed + bool needRemoveOnFailure = (m_portPgLookup[port].find(key) == m_portPgLookup[port].end()); buffer_pg_t &bufferPg = m_portPgLookup[port][key]; port_info_t &portInfo = m_portInfoLookup[port]; @@ -2787,6 +2855,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke if (profileName.empty()) { SWSS_LOG_ERROR("BUFFER_PG: Invalid format of reference to profile: %s", value.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } return task_process_status::task_invalid_entry; } @@ -2795,13 +2867,25 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke { // In this case, we shouldn't set the dynamic calculated flag to true // It will be updated when its profile configured. - bufferPg.dynamic_calculated = false; - SWSS_LOG_WARN("Profile %s hasn't been configured yet, skip", profileName.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } + SWSS_LOG_INFO("Profile %s hasn't been configured yet, skip", profileName.c_str()); return task_process_status::task_need_retry; } else { buffer_profile_t &profileRef = searchRef->second; + if (profileRef.direction == BUFFER_EGRESS) + { + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } + SWSS_LOG_ERROR("Egress buffer profile configured on PG %s", key.c_str()); + return task_process_status::task_failed; + } bufferPg.dynamic_calculated = profileRef.dynamic_calculated; bufferPg.configured_profile_name = profileName; bufferPg.lossless = profileRef.lossless; @@ -2813,6 +2897,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke if (field != buffer_profile_field_name) { SWSS_LOG_ERROR("BUFFER_PG: Invalid field %s", field.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } return task_process_status::task_invalid_entry; } @@ -2896,6 +2984,7 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + m_portPgLookup[port].erase(key); return task_process_status::task_invalid_entry; } @@ -2911,7 +3000,7 @@ task_process_status BufferMgrDynamic::checkBufferProfileDirection(const string & auto profileSearchRef = m_bufferProfileLookup.find(profileName); if (profileSearchRef == m_bufferProfileLookup.end()) { - SWSS_LOG_NOTICE("Profile %s doesn't exist, need retry", profileName.c_str()); + SWSS_LOG_INFO("Profile %s doesn't exist, need retry", profileName.c_str()); return task_process_status::task_need_retry; } @@ -2983,6 +3072,8 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string } SWSS_LOG_INFO("Removing entry %s from APPL_DB", key.c_str()); m_portQueueLookup[port].erase(queues); + if (m_portQueueLookup[port].empty()) + m_portQueueLookup.erase(port); if (PORT_ADMIN_DOWN == portInfo.state) { handleDelSingleBufferObjectOnAdminDownPort(BUFFER_QUEUE, port, key, portInfo); @@ -3189,7 +3280,8 @@ void BufferMgrDynamic::doTask(Consumer &consumer) { case task_process_status::task_failed: SWSS_LOG_ERROR("Failed to process table update"); - return; + it = consumer.m_toSync.erase(it); + break; case task_process_status::task_need_retry: SWSS_LOG_INFO("Unable to process table update. Will retry..."); it++; @@ -3238,7 +3330,7 @@ void BufferMgrDynamic::doTask(Consumer &consumer) */ void BufferMgrDynamic::handlePendingBufferObjects() { - if (m_bufferPoolReady) + if (m_bufferPoolReady && !m_defaultThreshold.empty()) { if (!m_pendingApplyZeroProfilePorts.empty()) { diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index ef1e4f567f..cb94227522 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -71,6 +71,7 @@ typedef struct { std::string xon_offset; std::string xoff; std::string threshold; + std::string threshold_mode; std::string pool_name; // port_pgs - stores pgs referencing this profile // An element will be added or removed when a PG added or removed @@ -177,7 +178,7 @@ class BufferMgrDynamic : public Orch std::string m_configuredSharedHeadroomPoolSize; - std::shared_ptr m_applDb = nullptr; + DBConnector *m_applDb = nullptr; SelectableTimer *m_buffermgrPeriodtimer = nullptr; // Fields for zero pool and profiles diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 2a6dade254..54fb4003a2 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -2,7 +2,7 @@ FLEX_CTR_DIR = $(top_srcdir)/orchagent/flex_counter DEBUG_CTR_DIR = $(top_srcdir)/orchagent/debug_counter P4_ORCH_DIR = $(top_srcdir)/orchagent/p4orch -INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib +INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I $(top_srcdir)/cfgmgr CFLAGS_SAI = -I /usr/include/sai @@ -26,6 +26,7 @@ tests_SOURCES = aclorch_ut.cpp \ routeorch_ut.cpp \ qosorch_ut.cpp \ bufferorch_ut.cpp \ + buffermgrdyn_ut.cpp \ fdborch/flush_syncd_notif_ut.cpp \ copporch_ut.cpp \ saispy_ut.cpp \ @@ -95,7 +96,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/lagid.cpp \ $(top_srcdir)/orchagent/bfdorch.cpp \ $(top_srcdir)/orchagent/srv6orch.cpp \ - $(top_srcdir)/orchagent/nvgreorch.cpp + $(top_srcdir)/orchagent/nvgreorch.cpp \ + $(top_srcdir)/cfgmgr/buffermgrdyn.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp $(FLEX_CTR_DIR)/flowcounterrouteorch.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp diff --git a/tests/mock_tests/buffermgrdyn_ut.cpp b/tests/mock_tests/buffermgrdyn_ut.cpp new file mode 100644 index 0000000000..b64a367c79 --- /dev/null +++ b/tests/mock_tests/buffermgrdyn_ut.cpp @@ -0,0 +1,902 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#define private public +#include "buffermgrdyn.h" +#undef private +#include "warm_restart.h" + +extern string gMySwitchType; + + +namespace buffermgrdyn_test +{ + using namespace std; + + shared_ptr m_app_db = make_shared("APPL_DB", 0); + shared_ptr m_config_db = make_shared("CONFIG_DB", 0); + shared_ptr m_state_db = make_shared("STATE_DB", 0); + + BufferMgrDynamic *m_dynamicBuffer; + SelectableTimer m_selectableTable(timespec({ .tv_sec = BUFFERMGR_TIMER_PERIOD, .tv_nsec = 0 }), 0); + Table portTable(m_config_db.get(), CFG_PORT_TABLE_NAME); + Table cableLengthTable(m_config_db.get(), CFG_PORT_CABLE_LEN_TABLE_NAME); + Table bufferPoolTable(m_config_db.get(), CFG_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable(m_config_db.get(), CFG_BUFFER_PROFILE_TABLE_NAME); + Table bufferPgTable(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME); + Table bufferQueueTable(m_config_db.get(), CFG_BUFFER_QUEUE_TABLE_NAME); + Table bufferIngProfileListTable(m_config_db.get(), CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + Table bufferEgrProfileListTable(m_config_db.get(), CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + Table defaultLosslessParameterTable(m_config_db.get(), CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER); + Table appPortTable(m_app_db.get(), APP_PORT_TABLE_NAME); + Table appBufferPoolTable(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table appBufferProfileTable(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table appBufferPgTable(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table appBufferQueueTable(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + Table appBufferIngProfileListTable(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + Table appBufferEgrProfileListTable(m_app_db.get(), APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + Table bufferMaxParamTable(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + Table statePortTable(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table stateBufferTable(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + + map> zeroProfileMap; + vector zeroProfile; + + struct BufferMgrDynTest : public ::testing::Test + { + map> testBufferProfile; + map> testBufferPool; + + void SetUpReclaimingBuffer() + { + zeroProfileMap["ingress_zero_pool"] = { + {"mode", "static"}, + {"type", "ingress"}, + {"size", "0"} + }; + zeroProfileMap["ingress_lossy_pg_zero_profile"] = { + {"pool", "ingress_zero_pool"}, + {"size", "0"}, + {"static_th", "0"} + }; + zeroProfileMap["ingress_lossless_zero_profile"] = { + {"pool", "ingress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + zeroProfileMap["egress_lossy_zero_profile"] = { + {"pool", "egress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + zeroProfileMap["egress_lossless_zero_profile"] = { + {"pool", "egress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + + zeroProfile = { + { + "BUFFER_POOL_TABLE:ingress_zero_pool", + "SET", + zeroProfileMap["ingress_zero_pool"] + }, + { + "BUFFER_PROFILE_TABLE:ingress_lossy_pg_zero_profile", + "SET", + zeroProfileMap["ingress_lossy_pg_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:ingress_lossless_zero_profile", + "SET", + zeroProfileMap["ingress_lossless_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:egress_lossy_zero_profile", + "SET", + zeroProfileMap["egress_lossy_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:egress_lossless_zero_profile", + "SET", + zeroProfileMap["egress_lossless_zero_profile"] + }, + { + "control_fields", + "SET", + { + {"pgs_to_apply_zero_profile", "0"}, + {"ingress_zero_profile", "ingress_lossy_pg_zero_profile"} + } + } + }; + } + + BufferMgrDynTest() + { + testBufferPool["ingress_lossless_pool"] = { + {"mode", "dynamic"}, + {"type", "ingress"}, + {"size", "1024000"} + }; + testBufferPool["egress_lossless_pool"] = { + {"mode", "dynamic"}, + {"type", "egress"}, + {"size", "1024000"} + }; + testBufferPool["egress_lossy_pool"] = { + {"mode", "dynamic"}, + {"type", "egress"}, + {"size", "1024000"} + }; + + testBufferProfile["ingress_lossless_profile"] = { + {"dynamic_th", "7"}, + {"pool", "ingress_lossless_pool"}, + {"size", "0"} + }; + testBufferProfile["egress_lossless_profile"] = { + {"dynamic_th", "7"}, + {"pool", "egress_lossless_pool"}, + {"size", "0"} + }; + testBufferProfile["egress_lossy_profile"] = { + {"dynamic_th", "3"}, + {"pool", "egress_lossy_pool"}, + {"size", "0"} + }; + } + + void SetUp() override + { + setenv("ASIC_VENDOR", "mock_test", 1); + + testing_db::reset(); + + WarmStart::initialize("buffermgrd", "swss"); + WarmStart::checkWarmStart("buffermgrd", "swss"); + } + + void StartBufferManager(shared_ptr> zero_profile=nullptr) + { + // Init switch and create dependencies + vector buffer_table_connectors = { + TableConnector(m_config_db.get(), CFG_PORT_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_CABLE_LEN_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_POOL_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PROFILE_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_QUEUE_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME), + TableConnector(m_config_db.get(), CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER), + TableConnector(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE), + TableConnector(m_state_db.get(), STATE_PORT_TABLE_NAME) + }; + + m_dynamicBuffer = new BufferMgrDynamic(m_config_db.get(), m_state_db.get(), m_app_db.get(), buffer_table_connectors, nullptr, zero_profile); + } + + void InitPort(const string &port="Ethernet0", const string &admin_status="up") + { + portTable.set(port, + { + {"speed", "100000"}, + {"mtu", "9100"}, + {"admin_status", admin_status} + }); + m_dynamicBuffer->addExistingData(&portTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void SetPortInitDone() + { + appPortTable.set("PortInitDone", + { + {"lanes", "0"} + }); + m_dynamicBuffer->addExistingData(&appPortTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitMmuSize() + { + bufferMaxParamTable.set("global", + { + {"mmu_size", "1024000"} + }); + if (m_dynamicBuffer) + m_dynamicBuffer->addExistingData(&bufferMaxParamTable); + } + + void InitDefaultLosslessParameter(const string &over_subscribe_ratio="") + { + if (over_subscribe_ratio.empty()) + { + defaultLosslessParameterTable.set("AZURE", + { + {"default_dynamic_th", "0"} + }); + } + else + { + defaultLosslessParameterTable.set("AZURE", + { + {"default_dynamic_th", "0"}, + {"over_subscribe_ratio", over_subscribe_ratio} + }); + } + if (m_dynamicBuffer) + { + m_dynamicBuffer->addExistingData(&defaultLosslessParameterTable); + static_cast(m_dynamicBuffer)->doTask(); + } + } + + void InitBufferPool() + { + for(auto &i: testBufferPool) + { + bufferPoolTable.set(i.first, i.second); + } + + m_dynamicBuffer->addExistingData(&bufferPoolTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferPool(const string &skippedPool="", const string &clearPool="") + { + std::deque entries; + for (auto &i: testBufferPool) + { + if (skippedPool == i.first) + continue; + if (!clearPool.empty() && clearPool != i.first) + continue; + entries.push_back({i.first, "DEL", {}}); + } + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(CFG_BUFFER_POOL_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitDefaultBufferProfile() + { + for (auto &i: testBufferProfile) + { + bufferProfileTable.set(i.first, i.second); + } + + m_dynamicBuffer->addExistingData(&bufferProfileTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferProfile() + { + std::deque entries; + for (auto &i: testBufferProfile) + entries.push_back({i.first, "DEL", {}}); + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(CFG_BUFFER_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitBufferPg(const string &key, const string &profile="NULL") + { + bufferPgTable.set(key, + { + {"profile", profile} + }); + m_dynamicBuffer->addExistingData(&bufferPgTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferObject(const string &key, const string &tableName) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(tableName)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + + Table tableObject(m_config_db.get(), tableName); + tableObject.del(key); + } + + void InitBufferQueue(const string &key, const string &profile) + { + bufferQueueTable.set(key, + { + {"profile", profile} + }); + m_dynamicBuffer->addExistingData(&bufferQueueTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitBufferProfileList(const string &ports, const string &profileList, Table &appDb) + { + appDb.set(ports, + { + {"profile_list", profileList} + }); + m_dynamicBuffer->addExistingData(&appDb); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitCableLength(const string &port, const string &length) + { + cableLengthTable.set("AZURE", + { + {port, length} + }); + m_dynamicBuffer->addExistingData(&cableLengthTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void HandleTable(Table &table) + { + m_dynamicBuffer->addExistingData(&table); + static_cast(m_dynamicBuffer)->doTask(); + } + + void CheckPool(buffer_pool_t &pool, const vector &tuples) + { + for (auto i : tuples) + { + if (fvField(i) == buffer_pool_type_field_name) + { + if (fvValue(i) == buffer_value_ingress) + ASSERT_EQ(pool.direction, BUFFER_INGRESS); + else + ASSERT_EQ(pool.direction, BUFFER_EGRESS); + } + else if (fvField(i) == buffer_pool_mode_field_name) + { + ASSERT_EQ(pool.mode, fvValue(i)); + } + else if (fvField(i) == buffer_size_field_name) + { + ASSERT_TRUE(!pool.dynamic_size); + ASSERT_EQ("1024000", fvValue(i)); + } + } + } + + void CheckProfile(buffer_profile_t &profile, const vector &tuples) + { + for (auto i : tuples) + { + if (fvField(i) == buffer_pool_field_name) + { + ASSERT_EQ(profile.pool_name, fvValue(i)); + if (strstr(profile.pool_name.c_str(), "ingress") != nullptr) + ASSERT_EQ(profile.direction, BUFFER_INGRESS); + else + ASSERT_EQ(profile.direction, BUFFER_EGRESS); + } + else if (fvField(i) == buffer_dynamic_th_field_name) + { + ASSERT_EQ(profile.threshold_mode, buffer_dynamic_th_field_name); + ASSERT_EQ(profile.threshold, fvValue(i)); + } + else if (fvField(i) == buffer_size_field_name) + { + ASSERT_EQ(profile.size, fvValue(i)); + } + } + } + + void CheckPg(const string &port, const string &key, const string &expectedProfile="") + { + vector fieldValues; + + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][key].dynamic_calculated); + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][key].lossless); + + auto existInDb = (!expectedProfile.empty()); + ASSERT_EQ(appBufferPgTable.get(key, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup[port][key].running_profile_name, expectedProfile); + ASSERT_EQ(fvField(fieldValues[0]), "profile"); + ASSERT_EQ(fvValue(fieldValues[0]), expectedProfile); + } + } + + void CheckQueue(const string &port, const string &key, const string &expectedProfile, bool existInDb) + { + vector fieldValues; + + ASSERT_EQ(m_dynamicBuffer->m_portQueueLookup[port][key].running_profile_name, expectedProfile); + ASSERT_EQ(appBufferQueueTable.get(key, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(fvField(fieldValues[0]), "profile"); + ASSERT_EQ(fvValue(fieldValues[0]), expectedProfile); + } + } + + void CheckProfileList(const string &port, bool ingress, const string &profileList, bool existInDb=true) + { + vector fieldValues; + + auto direction = ingress ? BUFFER_INGRESS : BUFFER_EGRESS; + ASSERT_EQ(m_dynamicBuffer->m_portProfileListLookups[direction][port], profileList); + + auto &appDb = ingress ? appBufferIngProfileListTable : appBufferEgrProfileListTable; + + ASSERT_EQ(appDb.get(port, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(fieldValues.size(), 1); + ASSERT_EQ(fvField(fieldValues[0]), "profile_list"); + ASSERT_EQ(fvValue(fieldValues[0]), profileList); + } + } + + void CheckIfVectorsMatch(const vector &vec1, const vector &vec2) + { + ASSERT_EQ(vec1.size(), vec2.size()); + for (auto &i : vec1) + { + bool found = false; + for (auto &j : vec2) + { + if (i == j) + { + found = true; + break; + } + } + ASSERT_TRUE(found); + } + } + + void TearDown() override + { + delete m_dynamicBuffer; + m_dynamicBuffer = nullptr; + + unsetenv("ASIC_VENDOR"); + } + }; + + /* + * Dependencies + * 1. Buffer manager reads default lossless parameter and maximum mmu size at the beginning + * 2. Maximum mmu size will be pushed ahead of PortInitDone + * 3. Buffer pools can be ready at any time after PortInitDone + * 4. Buffer tables can be applied in any order + * 5. Port and buffer PG can be applied in any order + * 6. Sequence after config qos clear + */ + + /* + * Normal starting flow + * 1. Start buffer manager with default lossless parameter and maximum mmu size + * 2. PortInitDone + * 3. Cable length and port configuration + * 4. Buffer tables: BUFFER_POOL/BUFFER_PROFILE/BUFFER_PG + * 5. Queue and buffer profile lists with/without port created + */ + TEST_F(BufferMgrDynTest, BufferMgrTestNormalFlows) + { + vector fieldValues; + vector keys; + + // Prepare information that will be read at the beginning + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(); + + InitPort(); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + SetPortInitDone(); + // Timer will be called + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + + InitCableLength("Ethernet0", "5m"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + InitBufferPg("Ethernet0|3-4"); + + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + auto &portPgMap = m_dynamicBuffer->m_bufferProfileLookup[expectedProfile].port_pgs; + ASSERT_EQ(portPgMap.size(), 1); + ASSERT_TRUE(portPgMap.find("Ethernet0:3-4") != portPgMap.end()); + + // Multiple port key + InitBufferPg("Ethernet2,Ethernet4|3-4"); + + CheckPg("Ethernet2", "Ethernet2:3-4"); + CheckPg("Ethernet4", "Ethernet4:3-4"); + + // Buffer queue, ingress and egress profile list table + InitPort("Ethernet2"); + InitPort("Ethernet4"); + + InitBufferQueue("Ethernet2,Ethernet4,Ethernet6|3-4", "egress_lossless_profile"); + CheckQueue("Ethernet2", "Ethernet2:3-4", "egress_lossless_profile", true); + CheckQueue("Ethernet4", "Ethernet4:3-4", "egress_lossless_profile", true); + + InitBufferProfileList("Ethernet2,Ethernet4,Ethernet6", "ingress_lossless_profile", bufferIngProfileListTable); + CheckProfileList("Ethernet2", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet4", true, "ingress_lossless_profile"); + + InitBufferProfileList("Ethernet2,Ethernet4,Ethernet6", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + CheckProfileList("Ethernet2", false, "egress_lossless_profile,egress_lossy_profile"); + CheckProfileList("Ethernet4", false, "egress_lossless_profile,egress_lossy_profile"); + + // Check whether queue, profile lists have been applied after port created + InitPort("Ethernet6"); + CheckQueue("Ethernet6", "Ethernet6:3-4", "egress_lossless_profile", true); + CheckProfileList("Ethernet6", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet6", false, "egress_lossless_profile,egress_lossy_profile"); + } + + /* + * Verify a buffer pool will not be created without corresponding item in BUFFER_POOL + * otherwise it interferes starting flow + * 1. Configure oversubscribe ratio + * 2. Check whether ingress_lossless_pool is created + */ + TEST_F(BufferMgrDynTest, BufferMgrTestNoPoolCreatedWithoutDb) + { + StartBufferManager(); + + InitMmuSize(); + InitDefaultLosslessParameter("0"); + InitPort("Ethernet0"); + + static_cast(m_dynamicBuffer)->doTask(); + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_TRUE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + + InitBufferPool(); + static_cast(m_dynamicBuffer)->doTask(); + + ASSERT_FALSE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + } + + /* + * Sad flows test. Order is reversed in the following cases: + * - The buffer table creating. The tables referencing other tables are created first + * - Buffer manager starts with neither default lossless parameter nor maximum mmu size available + * + * 1. Start buffer manager without default lossless parameter and maximum mmu size + * 2. Buffer tables are applied in order: + * - Port configuration + * - BUFFER_QUEUE/buffer profile list + * - BUFFER_PG/BUFFER_PROFILE/BUFFER_POOL + * - PortInitDone + * 3. Cable length + * 4. Create a buffer profile with wrong threshold mode or direction + * and verify it will not be propagated to SAI + */ + TEST_F(BufferMgrDynTest, BufferMgrTestSadFlows) + { + vector ts; + vector fieldValues; + vector keys; + + StartBufferManager(); + + static_cast(m_dynamicBuffer)->doTask(); + + InitPort(); + + InitBufferPg("Ethernet0|3-4"); + // No item generated in BUFFER_PG_TABLE + CheckPg("Ethernet0", "Ethernet0:3-4"); + + InitBufferQueue("Ethernet0|3-4", "egress_lossless_profile"); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name.empty()); + + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet0"].empty()); + + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_EGRESS]["Ethernet0"].empty()); + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 0); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 0); + + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup.size(), 1); + static_cast(m_dynamicBuffer)->doTask(); + CheckProfileList("Ethernet0", true, "ingress_lossless_profile", false); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", false); + + // All default buffer profiles should be generated and pushed into BUFFER_PROFILE_TABLE + static_cast(m_dynamicBuffer)->doTask(); + + InitMmuSize(); + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + InitDefaultLosslessParameter(); + m_dynamicBuffer->doTask(m_selectableTable); + + CheckPg("Ethernet0", "Ethernet0:3-4"); + InitCableLength("Ethernet0", "5m"); + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + CheckQueue("Ethernet0", "Ethernet0:3-4", "egress_lossless_profile", true); + + CheckProfileList("Ethernet0", true, "ingress_lossless_profile", true); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", true); + + InitPort("Ethernet4"); + InitPort("Ethernet6"); + InitBufferQueue("Ethernet6|0-2", "egress_lossy_profile"); + InitBufferProfileList("Ethernet6", "ingress_lossless_profile", bufferIngProfileListTable); + + // Buffer queue/PG/profile lists with wrong direction should not overwrite the existing ones + vector ingressProfiles = {"egress_lossy_profile", "ingress_profile", ""}; + vector portsToTest = {"Ethernet0", "Ethernet4"}; + for (auto port : portsToTest) + { + for (auto ingressProfile : ingressProfiles) + { + InitBufferPg(port + "|3-4", ingressProfile); + if (port == "Ethernet0") + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name, expectedProfile); + ASSERT_TRUE(appBufferPgTable.get("Ethernet0:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", expectedProfile}}); + } + else + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port].find(port + ":3-4") == m_dynamicBuffer->m_portPgLookup[port].end()); + ASSERT_FALSE(appBufferPgTable.get(port + ":3-4", fieldValues)); + } + } + } + + InitBufferQueue("Ethernet4|0-2", "ingress_lossless_profile"); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup["Ethernet4"]["Ethernet0:0-2"].running_profile_name.empty()); + ASSERT_FALSE(appBufferQueueTable.get("Ethernet4:0-2", fieldValues)); + // No pending notifications + ts.clear(); + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + InitBufferQueue("Ethernet6|0-2", "ingress_lossless_profile"); + ASSERT_EQ(m_dynamicBuffer->m_portQueueLookup["Ethernet6"]["Ethernet6:0-2"].running_profile_name, "egress_lossy_profile"); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet6:0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_profile"}}); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + // Wrong direction + InitBufferProfileList("Ethernet4", "egress_lossless_profile", bufferIngProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet4"].empty()); + ASSERT_FALSE(appBufferIngProfileListTable.get("Ethernet4", fieldValues)); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + InitBufferProfileList("Ethernet6", "egress_lossless_profile", bufferIngProfileListTable); + ASSERT_EQ(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet6"], "ingress_lossless_profile"); + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet6", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_profile"}}); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + // Profile with wrong mode should not override the existing entries + vector wrong_profile_names = {"ingress_lossless_profile", "wrong_param_profile"}; + vector> wrong_profile_patterns = { + // wrong threshold mode + { + {"pool", "ingress_lossless_pool"}, + {"static_th", "100"}, + {"size", "0"} + }, + // unconfigured pool + { + {"pool", "ingress_pool"}, + {"dynamic_th", "0"}, + {"size", "0"} + } + }; + auto expected_pending_tasks = 0; + for (auto wrong_profile_name : wrong_profile_names) + { + bool exist = (testBufferProfile.find(wrong_profile_name) != testBufferProfile.end()); + for (auto wrong_profile_pattern : wrong_profile_patterns) + { + bufferProfileTable.set(wrong_profile_name, wrong_profile_pattern); + m_dynamicBuffer->addExistingData(&bufferProfileTable); + static_cast(m_dynamicBuffer)->doTask(); + if (exist) + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[wrong_profile_name], testBufferProfile[wrong_profile_name]); + else + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.find(wrong_profile_name), m_dynamicBuffer->m_bufferProfileLookup.end()); + ASSERT_EQ(appBufferProfileTable.get(wrong_profile_name, fieldValues), exist); + // No pending notifications + ts.clear(); + m_dynamicBuffer->dumpPendingTasks(ts); + if (get<1>(wrong_profile_pattern[0]) == "ingress_pool") + expected_pending_tasks++; + ASSERT_EQ(ts.size(), expected_pending_tasks); + } + } + } + + /* + * Port configuration flow + * Port table items are received in different order + */ + TEST_F(BufferMgrDynTest, BufferMgrTestPortConfigFlow) + { + // Prepare information that will be read at the beginning + StartBufferManager(); + + /* + * Speed, admin up, cable length + */ + portTable.set("Ethernet0", + { + {"speed", "100000"} + }); + HandleTable(portTable); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup.find("Ethernet0") != m_dynamicBuffer->m_portInfoLookup.end()); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_ADMIN_DOWN); + + portTable.set("Ethernet0", + { + {"speed", "100000"}, + {"admin_status", "up"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + cableLengthTable.set("AZURE", + { + {"Ethernet0", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + /* + * Speed, admin down, cable length, admin up + */ + portTable.set("Ethernet4", + { + {"speed", "100000"}, + {"admin_status", "down"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_ADMIN_DOWN); + cableLengthTable.set("AZURE", + { + {"Ethernet4", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_ADMIN_DOWN); + portTable.set("Ethernet4", + { + {"speed", "100000"}, + {"admin_status", "up"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_READY); + + /* + * Auto-negotiation: supported speeds received after port table + */ + portTable.set("Ethernet8", + { + {"speed", "100000"}, + {"admin_status", "up"}, + {"autoneg", "on"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_INITIALIZING); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].effective_speed.empty()); + + cableLengthTable.set("AZURE", + { + {"Ethernet8", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_INITIALIZING); + + statePortTable.set("Ethernet8", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + HandleTable(statePortTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].effective_speed, "100000"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_READY); + + /* + * Auto-negotiation: supported speeds received before port table + */ + statePortTable.set("Ethernet12", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + HandleTable(statePortTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].supported_speeds, "100000,50000,40000,25000,10000,1000"); + + portTable.set("Ethernet12", + { + {"speed", "100000"}, + {"admin_status", "up"}, + {"autoneg", "on"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_INITIALIZING); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].effective_speed, "100000"); + + cableLengthTable.set("AZURE", + { + {"Ethernet12", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_READY); + } +} diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 69c577bd26..f0a57899e0 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -11,7 +11,6 @@ def dynamic_buffer(dvs): yield buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) - @pytest.mark.usefixtures("dynamic_buffer") class TestBufferMgrDyn(object): DEFAULT_POLLING_CONFIG = PollingConfig(polling_interval=0.01, timeout=60, strict=True) @@ -129,16 +128,18 @@ def check_new_profile_in_asic_db(self, dvs, profile): if fvs.get('dynamic_th'): sai_threshold_value = fvs['dynamic_th'] sai_threshold_mode = 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC' + sai_threshold_name = 'SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH' else: sai_threshold_value = fvs['static_th'] sai_threshold_mode = 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC' + sai_threshold_name = 'SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH' self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE", self.newProfileInAsicDb, {'SAI_BUFFER_PROFILE_ATTR_XON_TH': fvs['xon'], 'SAI_BUFFER_PROFILE_ATTR_XOFF_TH': fvs['xoff'], 'SAI_BUFFER_PROFILE_ATTR_RESERVED_BUFFER_SIZE': fvs['size'], 'SAI_BUFFER_PROFILE_ATTR_POOL_ID': self.ingress_lossless_pool_oid, 'SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE': sai_threshold_mode, - 'SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH': sai_threshold_value}, + sai_threshold_name: sai_threshold_value}, self.DEFAULT_POLLING_CONFIG) def make_lossless_profile_name(self, speed, cable_length, mtu = None, dynamic_th = None): From 9999dae0186d4371d37272f270dec7983910354c Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Sat, 28 May 2022 11:43:57 +0800 Subject: [PATCH 11/64] [counter] Support gearbox counters (#2218) 1/ Enable gearbox port counter collection in GB_COUNTERS_DB 2/ Enable gearbox macsec counter collection in GB_COUNTERS_DB --- .../flex_counter/flex_counter_manager.cpp | 20 ++- orchagent/flex_counter/flex_counter_manager.h | 8 ++ orchagent/flexcounterorch.cpp | 6 + orchagent/macsecorch.cpp | 60 +++++++-- orchagent/macsecorch.h | 11 ++ orchagent/port.h | 1 + orchagent/portsorch.cpp | 122 +++++++++++++++++- orchagent/portsorch.h | 10 ++ tests/mock_tests/database_config.json | 15 +++ tests/test_gearbox.py | 63 +++++++-- 10 files changed, 287 insertions(+), 29 deletions(-) diff --git a/orchagent/flex_counter/flex_counter_manager.cpp b/orchagent/flex_counter/flex_counter_manager.cpp index 3e61289acd..ecccf415b2 100644 --- a/orchagent/flex_counter/flex_counter_manager.cpp +++ b/orchagent/flex_counter/flex_counter_manager.cpp @@ -89,14 +89,28 @@ FlexCounterManager::FlexCounterManager( const uint polling_interval, const bool enabled, FieldValueTuple fv_plugin) : + FlexCounterManager("FLEX_COUNTER_DB", group_name, stats_mode, + polling_interval, enabled, fv_plugin) +{ +} + +FlexCounterManager::FlexCounterManager( + const string& db_name, + const string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + FieldValueTuple fv_plugin) : group_name(group_name), stats_mode(stats_mode), polling_interval(polling_interval), enabled(enabled), fv_plugin(fv_plugin), - flex_counter_db(new DBConnector("FLEX_COUNTER_DB", 0)), - flex_counter_group_table(new ProducerTable(flex_counter_db.get(), FLEX_COUNTER_GROUP_TABLE)), - flex_counter_table(new ProducerTable(flex_counter_db.get(), FLEX_COUNTER_TABLE)) + flex_counter_db(new DBConnector(db_name, 0)), + flex_counter_group_table(new ProducerTable(flex_counter_db.get(), + FLEX_COUNTER_GROUP_TABLE)), + flex_counter_table(new ProducerTable(flex_counter_db.get(), + FLEX_COUNTER_TABLE)) { SWSS_LOG_ENTER(); diff --git a/orchagent/flex_counter/flex_counter_manager.h b/orchagent/flex_counter/flex_counter_manager.h index 6a997f28f7..38bf829058 100644 --- a/orchagent/flex_counter/flex_counter_manager.h +++ b/orchagent/flex_counter/flex_counter_manager.h @@ -52,6 +52,14 @@ class FlexCounterManager FlexCounterManager() {} + FlexCounterManager( + const std::string& db_name, + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")); + FlexCounterManager(const FlexCounterManager&) = delete; FlexCounterManager& operator=(const FlexCounterManager&) = delete; virtual ~FlexCounterManager(); diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index a3770b76cb..29563d90a5 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -196,6 +196,12 @@ void FlexCounterOrch::doTask(Consumer &consumer) vector fieldValues; fieldValues.emplace_back(FLEX_COUNTER_STATUS_FIELD, value); m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + + // Update FLEX_COUNTER_STATUS for gearbox port + if (key == PORT_KEY && gPortsOrch && gPortsOrch->isGearboxEnabled()) + { + gPortsOrch->setGearboxFlexCounterStatus(value == "enable"); + } } else if(field == FLEX_COUNTER_DELAY_STATUS_FIELD) { diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index 20b6057733..70721979d2 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -621,6 +621,21 @@ MACsecOrch::MACsecOrch( StatsMode::READ, MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), m_macsec_flow_stat_manager( + COUNTERS_MACSEC_FLOW_GROUP, + StatsMode::READ, + MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + m_gb_macsec_sa_attr_manager( + "GB_FLEX_COUNTER_DB", + COUNTERS_MACSEC_SA_ATTR_GROUP, + StatsMode::READ, + MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + m_gb_macsec_sa_stat_manager( + "GB_FLEX_COUNTER_DB", + COUNTERS_MACSEC_SA_GROUP, + StatsMode::READ, + MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + m_gb_macsec_flow_stat_manager( + "GB_FLEX_COUNTER_DB", COUNTERS_MACSEC_FLOW_GROUP, StatsMode::READ, MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true) @@ -2122,17 +2137,17 @@ task_process_status MACsecOrch::createMACsecSA( sc->m_sa_ids.erase(an); }); - installCounter(CounterType::MACSEC_SA_ATTR, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_attrs); + installCounter(ctx, CounterType::MACSEC_SA_ATTR, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_attrs); std::vector fvVector; fvVector.emplace_back("state", "ok"); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { - installCounter(CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_egress_stats); + installCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_egress_stats); m_state_macsec_egress_sa.set(swss::join('|', port_name, sci, an), fvVector); } else { - installCounter(CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_ingress_stats); + installCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_ingress_stats); m_state_macsec_ingress_sa.set(swss::join('|', port_name, sci, an), fvVector); } @@ -2167,8 +2182,8 @@ task_process_status MACsecOrch::deleteMACsecSA( auto result = task_success; - uninstallCounter(CounterType::MACSEC_SA_ATTR, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); - uninstallCounter(CounterType::MACSEC_SA, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); + uninstallCounter(ctx, CounterType::MACSEC_SA_ATTR, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); + uninstallCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); if (!deleteMACsecSA(ctx.get_macsec_sc()->m_sa_ids[an])) { SWSS_LOG_WARN("Cannot delete the MACsec SA %s.", port_sci_an.c_str()); @@ -2293,7 +2308,29 @@ bool MACsecOrch::deleteMACsecSA(sai_object_id_t sa_id) return true; } +FlexCounterManager& MACsecOrch::MACsecSaStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_sa_stat_manager; + return m_macsec_sa_stat_manager; +} + +FlexCounterManager& MACsecOrch::MACsecSaAttrStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_sa_attr_manager; + return m_macsec_sa_attr_manager; +} + +FlexCounterManager& MACsecOrch::MACsecFlowStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_flow_stat_manager; + return m_macsec_flow_stat_manager; +} + void MACsecOrch::installCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, @@ -2312,12 +2349,12 @@ void MACsecOrch::installCounter( switch(counter_type) { case CounterType::MACSEC_SA_ATTR: - m_macsec_sa_attr_manager.setCounterIdList(obj_id, counter_type, counter_stats); + MACsecSaAttrStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); m_macsec_counters_map.set("", fields); break; case CounterType::MACSEC_SA: - m_macsec_sa_stat_manager.setCounterIdList(obj_id, counter_type, counter_stats); + MACsecSaStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { m_macsec_sa_tx_counters_map.set("", fields); @@ -2329,7 +2366,7 @@ void MACsecOrch::installCounter( break; case CounterType::MACSEC_FLOW: - m_macsec_flow_stat_manager.setCounterIdList(obj_id, counter_type, counter_stats); + MACsecFlowStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); break; default: @@ -2340,6 +2377,7 @@ void MACsecOrch::installCounter( } void MACsecOrch::uninstallCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, @@ -2348,12 +2386,12 @@ void MACsecOrch::uninstallCounter( switch(counter_type) { case CounterType::MACSEC_SA_ATTR: - m_macsec_sa_attr_manager.clearCounterIdList(obj_id); + MACsecSaAttrStatManager(ctx).clearCounterIdList(obj_id); m_counter_db.hdel(COUNTERS_MACSEC_NAME_MAP, obj_name); break; case CounterType::MACSEC_SA: - m_macsec_sa_stat_manager.clearCounterIdList(obj_id); + MACsecSaStatManager(ctx).clearCounterIdList(obj_id); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { m_counter_db.hdel(COUNTERS_MACSEC_SA_TX_NAME_MAP, obj_name); @@ -2365,7 +2403,7 @@ void MACsecOrch::uninstallCounter( break; case CounterType::MACSEC_FLOW: - m_macsec_flow_stat_manager.clearCounterIdList(obj_id); + MACsecFlowStatManager(ctx).clearCounterIdList(obj_id); break; default: diff --git a/orchagent/macsecorch.h b/orchagent/macsecorch.h index b59984a3a6..2472d8c0ef 100644 --- a/orchagent/macsecorch.h +++ b/orchagent/macsecorch.h @@ -72,6 +72,10 @@ class MACsecOrch : public Orch FlexCounterManager m_macsec_sa_stat_manager; FlexCounterManager m_macsec_flow_stat_manager; + FlexCounterManager m_gb_macsec_sa_attr_manager; + FlexCounterManager m_gb_macsec_sa_stat_manager; + FlexCounterManager m_gb_macsec_flow_stat_manager; + struct MACsecACLTable { sai_object_id_t m_table_id; @@ -209,17 +213,24 @@ class MACsecOrch : public Orch /* Counter */ void installCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, sai_object_id_t obj_id, const std::vector &stats); void uninstallCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, sai_object_id_t obj_id); + /* Flex Counter Manager */ + FlexCounterManager& MACsecSaStatManager(MACsecOrchContext &ctx); + FlexCounterManager& MACsecSaAttrStatManager(MACsecOrchContext &ctx); + FlexCounterManager& MACsecFlowStatManager(MACsecOrchContext &ctx); + /* MACsec ACL */ bool initMACsecACLTable( MACsecACLTable &acl_table, diff --git a/orchagent/port.h b/orchagent/port.h index db9f2b7bff..fe366630ac 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -171,6 +171,7 @@ class Port SystemLagInfo m_system_lag_info; sai_object_id_t m_switch_id = 0; + sai_object_id_t m_system_side_id = 0; sai_object_id_t m_line_side_id = 0; bool m_fec_cfg = false; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 5a6ba61e5c..8c3ad481a3 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -245,6 +245,24 @@ const vector port_stat_ids = SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS }; +const vector gbport_stat_ids = +{ + SAI_PORT_STAT_IF_IN_OCTETS, + SAI_PORT_STAT_IF_OUT_OCTETS, + SAI_PORT_STAT_IF_IN_DISCARDS, + SAI_PORT_STAT_IF_OUT_DISCARDS, + SAI_PORT_STAT_IF_IN_ERRORS, + SAI_PORT_STAT_IF_OUT_ERRORS, + SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS, + SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS, + SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS, + SAI_PORT_STAT_ETHER_STATS_JABBERS, + SAI_PORT_STAT_ETHER_STATS_FRAGMENTS, + SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES, + SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES, + SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS +}; + const vector port_buffer_drop_stat_ids = { SAI_PORT_STAT_IN_DROPPED_PKTS, @@ -305,6 +323,9 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vectorgetPortCountersState()) { auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); - port_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_counter_stats); + port_stat_manager.setCounterIdList(p.m_port_id, + CounterType::PORT, port_counter_stats); + auto gbport_counter_stats = generateCounterStats(GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + if (p.m_system_side_id) + gb_port_stat_manager.setCounterIdList(p.m_system_side_id, + CounterType::PORT, gbport_counter_stats); + if (p.m_line_side_id) + gb_port_stat_manager.setCounterIdList(p.m_line_side_id, + CounterType::PORT, gbport_counter_stats); } if (flex_counters_orch->getPortBufferDropCountersState()) { @@ -5690,6 +5724,7 @@ void PortsOrch::generatePortCounterMap() } auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + auto gbport_counter_stats = generateCounterStats(GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -5697,7 +5732,14 @@ void PortsOrch::generatePortCounterMap() { continue; } - port_stat_manager.setCounterIdList(it.second.m_port_id, CounterType::PORT, port_counter_stats); + port_stat_manager.setCounterIdList(it.second.m_port_id, + CounterType::PORT, port_counter_stats); + if (it.second.m_system_side_id) + gb_port_stat_manager.setCounterIdList(it.second.m_system_side_id, + CounterType::PORT, gbport_counter_stats); + if (it.second.m_line_side_id) + gb_port_stat_manager.setCounterIdList(it.second.m_line_side_id, + CounterType::PORT, gbport_counter_stats); } m_isPortCounterMapGenerated = true; @@ -5803,6 +5845,7 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) if (port.m_type == Port::PHY) { updateDbPortOperStatus(port, status); + updateGearboxPortOperStatus(port); } port.m_oper_status = status; @@ -6285,6 +6328,9 @@ void PortsOrch::initGearbox() SWSS_LOG_NOTICE("BOX: m_gearboxInterfaceMap size = %d.", (int) m_gearboxInterfaceMap.size()); SWSS_LOG_NOTICE("BOX: m_gearboxLaneMap size = %d.", (int) m_gearboxLaneMap.size()); SWSS_LOG_NOTICE("BOX: m_gearboxPortMap size = %d.", (int) m_gearboxPortMap.size()); + + m_gb_counter_db = shared_ptr(new DBConnector("GB_COUNTERS_DB", 0)); + m_gbcounterTable = unique_ptr(new Table(m_gb_counter_db.get(), COUNTERS_PORT_NAME_MAP)); } } @@ -6383,6 +6429,7 @@ bool PortsOrch::initGearboxPort(Port &port) } SWSS_LOG_NOTICE("BOX: Created Gearbox system-side port 0x%" PRIx64 " for alias:%s index:%d", systemPort, port.m_alias.c_str(), port.m_index); + port.m_system_side_id = systemPort; /* Create LINE-SIDE port */ attrs.clear(); @@ -6495,6 +6542,15 @@ bool PortsOrch::initGearboxPort(Port &port) SWSS_LOG_NOTICE("BOX: Connected Gearbox ports; system-side:0x%" PRIx64 " to line-side:0x%" PRIx64, systemPort, linePort); m_gearboxPortListLaneMap[port.m_port_id] = make_tuple(systemPort, linePort); port.m_line_side_id = linePort; + + /* Add gearbox system/line port name map to counter table */ + FieldValueTuple tuple(port.m_alias + "_system", sai_serialize_object_id(systemPort)); + vector fields; + fields.push_back(tuple); + m_gbcounterTable->set("", fields); + + fields[0] = FieldValueTuple(port.m_alias + "_line", sai_serialize_object_id(linePort)); + m_gbcounterTable->set("", fields); } } @@ -6920,6 +6976,13 @@ std::unordered_set PortsOrch::generateCounterStats(const string& ty counter_stats.emplace(sai_serialize_port_stat(it)); } } + else if (type == GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP) + { + for (const auto& it: gbport_stat_ids) + { + counter_stats.emplace(sai_serialize_port_stat(it)); + } + } else if (type == PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP) { for (const auto& it: port_buffer_drop_stat_ids) @@ -6930,6 +6993,61 @@ std::unordered_set PortsOrch::generateCounterStats(const string& ty return counter_stats; } +void PortsOrch::setGearboxFlexCounterStatus(bool enabled) +{ + if (enabled) + { + gb_port_stat_manager.enableFlexCounterGroup(); + } + else + { + gb_port_stat_manager.disableFlexCounterGroup(); + } +} + +void PortsOrch::updateGearboxPortOperStatus(const Port& port) +{ + if (!isGearboxEnabled()) + return; + + SWSS_LOG_NOTICE("BOX: port %s, system_side_id:0x%" PRIx64 "line_side_id:0x%" PRIx64, + port.m_alias.c_str(), port.m_system_side_id, port.m_line_side_id); + + if (!port.m_system_side_id || !port.m_line_side_id) + return; + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_OPER_STATUS; + sai_status_t ret = sai_port_api->get_port_attribute(port.m_system_side_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get system_oper_status for %s", port.m_alias.c_str()); + } + else + { + sai_port_oper_status_t oper = static_cast(attr.value.u32); + vector tuples; + FieldValueTuple tuple("system_oper_status", oper_status_strings.at(oper)); + tuples.push_back(tuple); + m_portTable->set(port.m_alias, tuples); + } + + attr.id = SAI_PORT_ATTR_OPER_STATUS; + ret = sai_port_api->get_port_attribute(port.m_line_side_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get line_oper_status for %s", port.m_alias.c_str()); + } + else + { + sai_port_oper_status_t oper = static_cast(attr.value.u32); + vector tuples; + FieldValueTuple tuple("line_oper_status", oper_status_strings.at(oper)); + tuples.push_back(tuple); + m_portTable->set(port.m_alias, tuples); + } +} + bool PortsOrch::decrFdbCount(const std::string& alias, int count) { auto itr = m_portList.find(alias); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 2848cdcb91..ab35277d80 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -20,6 +20,7 @@ #define VLAN_TAG_LEN 4 #define PORT_STAT_COUNTER_FLEX_COUNTER_GROUP "PORT_STAT_COUNTER" #define PORT_RATE_COUNTER_FLEX_COUNTER_GROUP "PORT_RATE_COUNTER" +#define GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP "GBPORT_STAT_COUNTER" #define PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP "PORT_BUFFER_DROP_STAT" #define QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "QUEUE_STAT_COUNTER" #define QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP "QUEUE_WATERMARK_STAT_COUNTER" @@ -80,6 +81,7 @@ class PortsOrch : public Orch, public Subject bool allPortsReady(); bool isInitDone(); bool isConfigDone(); + bool isGearboxEnabled(); bool isPortAdminUp(const string &alias); map& getAllPorts(); @@ -168,7 +170,11 @@ class PortsOrch : public Orch, public Subject bool getPortOperStatus(const Port& port, sai_port_oper_status_t& status) const; + void setGearboxFlexCounterStatus(bool enabled); + void updateGearboxPortOperStatus(const Port& port); + bool decrFdbCount(const string& alias, int count); + private: unique_ptr
m_counterTable; unique_ptr
m_counterLagTable; @@ -199,6 +205,10 @@ class PortsOrch : public Orch, public Subject FlexCounterManager port_buffer_drop_stat_manager; FlexCounterManager queue_stat_manager; + FlexCounterManager gb_port_stat_manager; + shared_ptr m_gb_counter_db; + unique_ptr
m_gbcounterTable; + std::map m_portSupportedSpeeds; bool m_initDone = false; diff --git a/tests/mock_tests/database_config.json b/tests/mock_tests/database_config.json index 8301848683..68f850481d 100644 --- a/tests/mock_tests/database_config.json +++ b/tests/mock_tests/database_config.json @@ -57,6 +57,21 @@ "separator": "|", "instance" : "redis" }, + "GB_ASIC_DB" : { + "id" : 9, + "separator": ":", + "instance" : "redis" + }, + "GB_COUNTERS_DB" : { + "id" : 10, + "separator": ":", + "instance" : "redis" + }, + "GB_FLEX_COUNTER_DB" : { + "id" : 11, + "separator": ":", + "instance" : "redis" + }, "CHASSIS_APP_DB" : { "id" : 12, "separator": "|", diff --git a/tests/test_gearbox.py b/tests/test_gearbox.py index 00a87c2f96..7d5b568661 100644 --- a/tests/test_gearbox.py +++ b/tests/test_gearbox.py @@ -49,20 +49,20 @@ def __init__(self, dvs): for i in [x for x in intf_table.getKeys() if sr not in x]: (status, fvs) = intf_table.get(i) assert status == True - self.interfaces[i] = {"attrs" : dict(fvs)} + self.interfaces[i] = dict(fvs) - def SanityCheck(self, dvs, testlog): + def SanityCheck(self, testlog): """ Verify data integrity of Gearbox objects in APPL_DB """ for i in self.interfaces: - phy_id = self.interfaces[i]["attrs"]["phy_id"] + phy_id = self.interfaces[i]["phy_id"] assert phy_id in self.phys - assert self.interfaces[i]["attrs"]["index"] in self.phys[phy_id]["ports"] + assert self.interfaces[i]["index"] in self.phys[phy_id]["ports"] - for lane in self.interfaces[i]["attrs"]["system_lanes"].split(','): + for lane in self.interfaces[i]["system_lanes"].split(','): assert lane in self.phys[phy_id]["lanes"] - for lane in self.interfaces[i]["attrs"]["line_lanes"].split(','): + for lane in self.interfaces[i]["line_lanes"].split(','): assert lane in self.phys[phy_id]["lanes"] class GBAsic(DVSDatabase): @@ -85,9 +85,9 @@ def __init__(self, db_id: int, connector: str, gearbox: Gearbox): for i in self.gearbox.interfaces: intf = self.gearbox.interfaces[i] - if intf["attrs"]["system_lanes"] == system_lanes: - assert intf["attrs"]["line_lanes"] == line_lanes - self.ports[intf["attrs"]["index"]] = (system_port_oid, line_port_oid) + if intf["system_lanes"] == system_lanes: + assert intf["line_lanes"] == line_lanes + self.ports[intf["index"]] = (system_port_oid, line_port_oid) assert len(self.ports) == len(self.gearbox.interfaces) @@ -112,13 +112,50 @@ def _verify_db_contents(): init_polling_config = PollingConfig(2, 30, strict=True) wait_for_result(_verify_db_contents, init_polling_config) +@pytest.fixture(scope="module") +def gearbox(dvs): + return Gearbox(dvs) + +@pytest.fixture(scope="module") +def gbasic(dvs, gearbox): + return GBAsic(swsscommon.GB_ASIC_DB, dvs.redis_sock, gearbox) + +@pytest.fixture(scope="module") +def enable_port_counter(dvs): + flex_counter_table = swsscommon.Table(dvs.get_config_db().db_connection, + "FLEX_COUNTER_TABLE") + + # Enable port counter + flex_counter_table.hset("PORT", "FLEX_COUNTER_STATUS", "enable") + yield + # Disable port counter + flex_counter_table.hdel("PORT", "FLEX_COUNTER_STATUS") class TestGearbox(object): - def test_GearboxSanity(self, dvs, testlog): - Gearbox(dvs).SanityCheck(dvs, testlog) + def test_GearboxSanity(self, gearbox, testlog): + gearbox.SanityCheck(testlog) + + def test_GearboxCounter(self, dvs, gbasic, enable_port_counter, testlog): + counters_db = DVSDatabase(swsscommon.COUNTERS_DB, dvs.redis_sock) + gb_counters_db = DVSDatabase(swsscommon.GB_COUNTERS_DB, dvs.redis_sock) + + intf = gbasic.gearbox.interfaces["0"] + port_oid = counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[intf["name"]] + system_port_oid, line_port_oid = gbasic.ports["0"] + + fvs = gb_counters_db.wait_for_entry("COUNTERS", system_port_oid) + assert fvs.get("SAI_PORT_STAT_IF_OUT_ERRORS") + + fvs = gb_counters_db.wait_for_entry("COUNTERS", line_port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") + + fvs = counters_db.wait_for_entry("COUNTERS", port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") + + fvs = counters_db.wait_for_entry("COUNTERS", port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") - def test_GbAsicFEC(self, dvs, testlog): - gbasic = GBAsic(swsscommon.GB_ASIC_DB, dvs.redis_sock, Gearbox(dvs)) + def test_GbAsicFEC(self, gbasic, testlog): # set fec rs on port 0 of phy 1 fvs = swsscommon.FieldValuePairs([("system_fec","rs")]) From eba212d9cffa034c8e0fcef6e275fef6cc700604 Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Tue, 31 May 2022 09:46:43 +0300 Subject: [PATCH 12/64] [Counters] Improve performance by polling only configured ports buffer queue/pg counters (#2143) - What I did Currently in SONiC all ports queue and pg counters are created by default with the max possible amount of counters. This feature change this behavior to poll only configured counters provided by the config DB BUFFER_PG and BUFFER_QUEUE tables. If no tables are present in the DB, no counters will be created for ports. Filter the unwanted queues/pgs returned by SAI API calls and skip the creation of these queue/pg counters. Also allow creating/removing counters on runtime if buffer PG/Queue is configured or removed. - Why I did it Improve performance by filtering unconfigured queue/pg counters on init. - How I verified it Check after enabling the counters, if configured counters created in Counters DB according to the configurations. Add/Remove buffer PG/Queue configurations and observe the corresponding counters created/removed accordingly. New UT added to verify this flow. Signed-off-by: Shlomi Bitton --- orchagent/bufferorch.cpp | 32 +++- orchagent/flexcounterorch.cpp | 181 ++++++++++++++++++++- orchagent/flexcounterorch.h | 32 ++++ orchagent/portsorch.cpp | 262 +++++++++++++++++++++++------- orchagent/portsorch.h | 20 ++- tests/mock_tests/portsorch_ut.cpp | 13 +- tests/mock_tests/routeorch_ut.cpp | 6 +- tests/test_buffer_traditional.py | 18 +- tests/test_flex_counters.py | 122 +++++++------- tests/test_pg_drop_counter.py | 64 +------- tests/test_watermark.py | 29 ++-- 11 files changed, 556 insertions(+), 223 deletions(-) diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index f9b91e7a16..b9fbd096b4 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1,5 +1,6 @@ #include "tokenize.h" #include "bufferorch.h" +#include "directory.h" #include "logger.h" #include "sai_serialize.h" #include "warm_restart.h" @@ -16,6 +17,7 @@ extern sai_switch_api_t *sai_switch_api; extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; +extern Directory gDirectory; extern sai_object_id_t gSwitchId; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -948,6 +950,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + // create/remove a port queue counter for the queue buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto queues = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->createPortBufferQueueCounters(port, queues); + } + else if (op == DEL_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->removePortBufferQueueCounters(port, queues); + } + } } } } @@ -1007,7 +1023,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, - buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { @@ -1087,6 +1103,20 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return handle_status; } } + // create or remove a port PG counter for the PG buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto pgs = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->createPortBufferPgCounters(port, pgs); + } + else if (op == DEL_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->removePortBufferPgCounters(port, pgs); + } + } } } } diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index 29563d90a5..f16312f750 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -10,6 +10,7 @@ #include "debugcounterorch.h" #include "directory.h" #include "copporch.h" +#include #include "routeorch.h" #include "flowcounterrouteorch.h" @@ -58,6 +59,8 @@ unordered_map flexCounterGroupMap = FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) { @@ -144,11 +147,13 @@ void FlexCounterOrch::doTask(Consumer &consumer) } else if(key == QUEUE_KEY) { - gPortsOrch->generateQueueMap(); + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_enabled = true; } else if(key == PG_WATERMARK_KEY) { - gPortsOrch->generatePriorityGroupMap(); + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_watermark_enabled = true; } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -230,6 +235,16 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } +bool FlexCounterOrch::getPgWatermarkCountersState() const +{ + return m_pg_watermark_enabled; +} + +bool FlexCounterOrch::getQueueCountersState() const +{ + return m_queue_enabled; +} + bool FlexCounterOrch::bake() { /* @@ -271,3 +286,165 @@ bool FlexCounterOrch::bake() Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); return consumer->addToSync(entries); } + +map FlexCounterOrch::getQueueConfigurations() +{ + SWSS_LOG_ENTER(); + + map queuesStateVector; + std::vector portQueueKeys; + m_bufferQueueConfigTable.getKeys(portQueueKeys); + + for (const auto& portQueueKey : portQueueKeys) + { + auto toks = tokenize(portQueueKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortQueues = toks[1]; + toks = tokenize(configPortQueues, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxQueueNumber = gPortsOrch->getNumberOfPortSupportedQueueCounters(configPortName); + uint32_t maxQueueIndex = maxQueueNumber - 1; + uint32_t minQueueIndex = 0; + + if (!queuesStateVector.count(configPortName)) + { + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(configPortName, flexCounterQueueState)); + } + + try { + auto startIndex = to_uint(toks[0], minQueueIndex, maxQueueIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minQueueIndex, maxQueueIndex); + queuesStateVector.at(configPortName).enableQueueCounters(startIndex, endIndex); + } + else + { + queuesStateVector.at(configPortName).enableQueueCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); + continue; + } + } + } + + return queuesStateVector; +} + +map FlexCounterOrch::getPgConfigurations() +{ + SWSS_LOG_ENTER(); + + map pgsStateVector; + std::vector portPgKeys; + m_bufferPgConfigTable.getKeys(portPgKeys); + + for (const auto& portPgKey : portPgKeys) + { + auto toks = tokenize(portPgKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortPgs = toks[1]; + toks = tokenize(configPortPgs, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxPgNumber = gPortsOrch->getNumberOfPortSupportedPgCounters(configPortName); + uint32_t maxPgIndex = maxPgNumber - 1; + uint32_t minPgIndex = 0; + + if (!pgsStateVector.count(configPortName)) + { + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(configPortName, flexCounterPgState)); + } + + try { + auto startIndex = to_uint(toks[0], minPgIndex, maxPgIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minPgIndex, maxPgIndex); + pgsStateVector.at(configPortName).enablePgCounters(startIndex, endIndex); + } + else + { + pgsStateVector.at(configPortName).enablePgCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid pg index [%s] for port [%s]", configPortPgs.c_str(), configPortName.c_str()); + continue; + } + } + } + + return pgsStateVector; +} + +FlexCounterQueueStates::FlexCounterQueueStates(uint32_t maxQueueNumber) +{ + SWSS_LOG_ENTER(); + m_queueStates.resize(maxQueueNumber, false); +} + +bool FlexCounterQueueStates::isQueueCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_queueStates[index]; +} + +void FlexCounterQueueStates::enableQueueCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + enableQueueCounter(queueIndex); + } +} + +void FlexCounterQueueStates::enableQueueCounter(uint32_t queueIndex) +{ + SWSS_LOG_ENTER(); + m_queueStates[queueIndex] = true; +} + +FlexCounterPgStates::FlexCounterPgStates(uint32_t maxPgNumber) +{ + SWSS_LOG_ENTER(); + m_pgStates.resize(maxPgNumber, false); +} + +bool FlexCounterPgStates::isPgCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_pgStates[index]; +} + +void FlexCounterPgStates::enablePgCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + enablePgCounter(pgIndex); + } +} + +void FlexCounterPgStates::enablePgCounter(uint32_t pgIndex) +{ + SWSS_LOG_ENTER(); + m_pgStates[pgIndex] = true; +} diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index 4f9734c0e2..a8106720da 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,6 +10,30 @@ extern "C" { #include "sai.h" } +class FlexCounterQueueStates +{ +public: + FlexCounterQueueStates(uint32_t maxQueueNumber); + bool isQueueCounterEnabled(uint32_t index) const; + void enableQueueCounters(uint32_t startIndex, uint32_t endIndex); + void enableQueueCounter(uint32_t queueIndex); + +private: + std::vector m_queueStates{}; +}; + +class FlexCounterPgStates +{ +public: + FlexCounterPgStates(uint32_t maxPgNumber); + bool isPgCounterEnabled(uint32_t index) const; + void enablePgCounters(uint32_t startIndex, uint32_t endIndex); + void enablePgCounter(uint32_t pgIndex); + +private: + std::vector m_pgStates{}; +}; + class FlexCounterOrch: public Orch { public: @@ -18,6 +42,10 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; + bool getPgWatermarkCountersState() const; + bool getQueueCountersState() const; + map getQueueConfigurations(); + map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} bool getRouteFlowCountersState() const {return m_route_flow_counter_enabled;} bool bake() override; @@ -27,9 +55,13 @@ class FlexCounterOrch: public Orch std::shared_ptr m_flexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; + bool m_pg_watermark_enabled = false; + bool m_queue_enabled = false; bool m_hostif_trap_counter_enabled = false; bool m_route_flow_counter_enabled = false; Table m_flexCounterConfigTable; + Table m_bufferQueueConfigTable; + Table m_bufferPgConfigTable; }; #endif diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 8c3ad481a3..6700031bd2 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2457,18 +2457,6 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); } - /* when a port is added and priority group map counter is enabled --> we need to add pg counter for it */ - if (m_isPriorityGroupMapGenerated) - { - generatePriorityGroupMapPerPort(p); - } - - /* when a port is added and queue map counter is enabled --> we need to add queue map counter for it */ - if (m_isQueueMapGenerated) - { - generateQueueMapPerPort(p); - } - PortUpdate update = { p, true }; notify(SUBJECT_TYPE_PORT_CHANGE, static_cast(&update)); @@ -2521,18 +2509,6 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) port_buffer_drop_stat_manager.clearCounterIdList(p.m_port_id); } - /* remove pg port counters */ - if (m_isPriorityGroupMapGenerated) - { - removePriorityGroupMapPerPort(p); - } - - /* remove queue port counters */ - if (m_isQueueMapGenerated) - { - removeQueueMapPerPort(p); - } - /* remove port name map from counter table */ m_counterTable->hdel("", alias); @@ -5498,7 +5474,7 @@ bool PortsOrch::removeTunnel(Port tunnel) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(map queuesStateVector) { if (m_isQueueMapGenerated) { @@ -5509,53 +5485,87 @@ void PortsOrch::generateQueueMap() { if (it.second.m_type == Port::PHY) { - generateQueueMapPerPort(it.second); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias)); } } m_isQueueMapGenerated = true; } -void PortsOrch::removeQueueMapPerPort(const Port& port) +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) { - /* Remove the Queue map in the Counter DB */ + /* Create the Queue map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector queueVector; + vector queuePortVector; + vector queueIndexVector; + vector queueTypeVector; for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; - std::unordered_set counter_stats; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - m_queueTable->hdel("",name.str()); - m_queuePortTable->hdel("",id); - string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - m_queueTypeTable->hdel("",id); - m_queueIndexTable->hdel("",id); + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + queueTypeVector.emplace_back(id, queueType); + queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + + // Install a flex counter for this queue to track stats + std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) { counter_stats.emplace(sai_serialize_queue_stat(it)); } - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); - /* remove watermark queue counters */ + /* add watermark queue counters */ string key = getQueueWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); + string delimiter(""); + std::ostringstream counters_stream; + for (const auto& it: queueWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_queue_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); + + m_flexCounterTable->set(key, fieldValues); } - CounterCheckOrch::getInstance().removePort(port); + m_queueTable->set("", queueVector); + m_queuePortTable->set("", queuePortVector); + m_queueIndexTable->set("", queueIndexVector); + m_queueTypeTable->set("", queueTypeVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generateQueueMapPerPort(const Port& port) +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) { + SWSS_LOG_ENTER(); + /* Create the Queue map in the Counter DB */ /* Add stat counters to flex_counter */ vector queueVector; @@ -5563,16 +5573,21 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) vector queueIndexVector; vector queueTypeVector; - for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - queueVector.emplace_back(name.str(), id); - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) @@ -5581,6 +5596,9 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + // Install a flex counter for this queue to track stats std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) @@ -5614,7 +5632,42 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ + SWSS_LOG_ENTER(); + + /* Remove the Queues maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + /* Remove watermark queue counters */ + string key = getQueueWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove the flex counter for this queue + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + + // Remove the queue counter from counters DB maps + m_queueTable->hdel("", name.str()); + m_queuePortTable->hdel("", id); + m_queueIndexTable->hdel("", id); + m_queueTypeTable->hdel("", id); + } +} + +void PortsOrch::generatePriorityGroupMap(map pgsStateVector) { if (m_isPriorityGroupMapGenerated) { @@ -5625,48 +5678,100 @@ void PortsOrch::generatePriorityGroupMap() { if (it.second.m_type == Port::PHY) { - generatePriorityGroupMapPerPort(it.second); + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } } m_isPriorityGroupMapGenerated = true; } -void PortsOrch::removePriorityGroupMapPerPort(const Port& port) +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) { - /* Remove the PG map in the Counter DB */ + /* Create the PG map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } std::ostringstream name; name << port.m_alias << ":" << pgIndex; const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_pgTable->hdel("",name.str()); - m_pgPortTable->hdel("",id); - m_pgIndexTable->hdel("",id); + std::string delimiter = ""; + std::ostringstream counters_stream; + /* Add watermark counters to flex_counter */ + for (const auto& it: ingressPriorityGroupWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + delimiter = comma; + } - m_flexCounterTable->del(key); + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); + delimiter = ""; + std::ostringstream ingress_pg_drop_packets_counters_stream; key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* remove dropped packets counters to flex_counter */ - m_flexCounterTable->del(key); + /* Add dropped packets counters to flex_counter */ + for (const auto& it: ingressPriorityGroupDropStatIds) + { + ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + if (delimiter.empty()) + { + delimiter = comma; + } + } + fieldValues.clear(); + fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); } - CounterCheckOrch::getInstance().removePort(port); + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) { + SWSS_LOG_ENTER(); + /* Create the PG map in the Counter DB */ /* Add stat counters to flex_counter */ vector pgVector; vector pgPortVector; vector pgIndexVector; - for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) { std::ostringstream name; name << port.m_alias << ":" << pgIndex; @@ -5716,6 +5821,41 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +{ + SWSS_LOG_ENTER(); + + /* Remove the Pgs maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + /* Remove dropped packets counters from flex_counter */ + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + /* Remove watermark counters from flex_counter */ + key = getPriorityGroupWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove the pg counter from counters DB maps + m_pgTable->hdel("", name.str()); + m_pgPortTable->hdel("", id); + m_pgIndexTable->hdel("", id); + } +} + void PortsOrch::generatePortCounterMap() { if (m_isPortCounterMapGenerated) @@ -5766,6 +5906,16 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } +uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) +{ + return static_cast(m_portList[port].m_priority_group_ids.size()); +} + +uint32_t PortsOrch::getNumberOfPortSupportedQueueCounters(string port) +{ + return static_cast(m_portList[port].m_queue_ids.size()); +} + void PortsOrch::doTask(NotificationConsumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index ab35277d80..6291231ae7 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -128,9 +128,17 @@ class PortsOrch : public Orch, public Subject bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); + + void generateQueueMap(map queuesStateVector); + uint32_t getNumberOfPortSupportedQueueCounters(string port); + void createPortBufferQueueCounters(const Port &port, string queues); + void removePortBufferQueueCounters(const Port &port, string queues); + + void generatePriorityGroupMap(map pgsStateVector); + uint32_t getNumberOfPortSupportedPgCounters(string port); + void createPortBufferPgCounters(const Port &port, string pgs); + void removePortBufferPgCounters(const Port& port, string pgs); - void generateQueueMap(); - void generatePriorityGroupMap(); void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -325,13 +333,9 @@ class PortsOrch : public Orch, public Subject bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port); - void removeQueueMapPerPort(const Port& port); - + void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState); bool m_isPriorityGroupMapGenerated = false; - void generatePriorityGroupMapPerPort(const Port& port); - void removePriorityGroupMapPerPort(const Port& port); - + void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 28df6610fd..7d867396d2 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -163,13 +163,14 @@ namespace portsorch_test ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, APP_BUFFER_PROFILE_TABLE_NAME, APP_BUFFER_QUEUE_TABLE_NAME, @@ -862,7 +863,7 @@ namespace portsorch_test * updated to DB. */ TEST_F(PortsOrchTest, PortOperStatusIsUpAndOperSpeedIsZero) - { + { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); // Get SAI default ports to populate DB @@ -887,7 +888,7 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); - + // save original api since we will spy auto orig_port_api = sai_port_api; sai_port_api = new sai_port_api_t(); @@ -905,14 +906,14 @@ namespace portsorch_test // Return 0 for port operational speed attrs[0].value.u32 = 0; } - + return (sai_status_t)SAI_STATUS_SUCCESS; } ); auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); auto consumer = exec->getNotificationConsumer(); - + // mock a redis reply for notification, it notifies that Ehernet0 is going to up mockReply = (redisReply *)calloc(sizeof(redisReply), 1); mockReply->type = REDIS_REPLY_ARRAY; @@ -934,7 +935,7 @@ namespace portsorch_test // trigger the notification consumer->readData(); gPortsOrch->doTask(*consumer); - mockReply = nullptr; + mockReply = nullptr; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 66df4bfbcc..2c1c4b8535 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -176,15 +176,15 @@ namespace routeorch_test { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - static const vector route_pattern_tables = { CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, }; diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 3d2285fd7b..071217b4e3 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -77,16 +77,15 @@ def get_pg_name_map(self): @pytest.fixture def setup_teardown_test(self, dvs): - try: - self.setup_db(dvs) - self.set_port_qos_table(self.INTF, '2,3,4,6') - pg_name_map = self.get_pg_name_map() - yield pg_name_map - finally: - self.teardown() + self.setup_db(dvs) + self.set_port_qos_table(self.INTF, '2,3,4,6') + time.sleep(2) + + yield + + self.teardown() def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): - self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None try: @@ -112,6 +111,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # check if the lossless profile for the test speed is already present @@ -174,7 +174,6 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # To verify the BUFFER_PG is not hardcoded to 3,4 # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue def test_buffer_pg_update(self, dvs, setup_teardown_test): - self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None test_speed = None @@ -203,6 +202,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # get the orig buf profiles attached to the pgs diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index 76a1a535f9..f5a0b146b2 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -7,8 +7,6 @@ ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" -PORT = "Ethernet0" -PORT_MAP = "COUNTERS_PORT_NAME_MAP" counter_group_meta = { 'port_counter': { @@ -73,7 +71,6 @@ } } -@pytest.mark.usefixtures('dvs_port_manager') class TestFlexCounters(object): def setup_dbs(self, dvs): @@ -133,6 +130,18 @@ def wait_for_interval_set(self, group, interval): assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): + for retry in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(map, port + ':' + index) + if (isSet and counter_oid): + return counter_oid + elif (not isSet and not counter_oid): + return None + else: + time.sleep(1) + + assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) + def verify_no_flex_counters_tables(self, counter_stat): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" @@ -692,64 +701,53 @@ def remove_ip_address(self, interface, ip): def set_admin_status(self, interface, status): self.config_db.update_entry("PORT", interface, {"admin_status": status}) - - def test_add_remove_ports(self, dvs): + + def test_create_remove_buffer_pg_counter(self, dvs): + """ + Test steps: + 1. Enable PG flex counters. + 2. Configure new buffer prioriy group for a port + 3. Verify counter is automatically created + 4. Remove the new buffer prioriy group for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ self.setup_dbs(dvs) - - # set flex counter - counter_key = counter_group_meta['queue_counter']['key'] - counter_stat = counter_group_meta['queue_counter']['group_name'] - counter_map = counter_group_meta['queue_counter']['name_map'] - self.set_flex_counter_group_status(counter_key, counter_map) + meta_data = counter_group_meta['pg_watermark_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_queue_counter(self, dvs): + """ + Test steps: + 1. Enable Queue flex counters. + 2. Configure new buffer queue for a port + 3. Verify counter is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['queue_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - oid_list_len = len(oid_list) - - # get port oid - port_oid = self.counters_db.get_entry(PORT_MAP, "")[PORT] - - # remove port and verify that it was removed properly - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 0 - - # verify that port counter maps were removed from counters db - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key in counters_queue_map.keys(): - if PORT in key: - assert False - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - - self.config_db.create_entry("PORT", PORT, fvs) - - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_QUEUE_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify queue counters were added - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - # the number of the oids needs to be the same as the original number of oids (before removing a port and adding) - assert oid_list_len == len(oid_list) + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) diff --git a/tests/test_pg_drop_counter.py b/tests/test_pg_drop_counter.py index b3682881de..6d97af5f5c 100644 --- a/tests/test_pg_drop_counter.py +++ b/tests/test_pg_drop_counter.py @@ -2,16 +2,12 @@ import re import time import json -import pytest import redis from swsscommon import swsscommon pg_drop_attr = "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" -PORT = "Ethernet0" - -@pytest.mark.usefixtures('dvs_port_manager') class TestPGDropCounter(object): DEFAULT_POLL_INTERVAL = 10 pgs = {} @@ -61,14 +57,11 @@ def verify_value(self, dvs, obj_ids, entry_name, expected_value): assert found, "entry name %s not found" % (entry_name) def set_up_flex_counter(self): - pg_stats_entry = {"PG_COUNTER_ID_LIST": "{}".format(pg_drop_attr)} - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP_STAT_COUNTER:{}".format(pg), pg_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} - self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP", fc_status_enable) self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self): for pg in self.pgs: @@ -79,10 +72,12 @@ def clear_flex_counter(self): def test_pg_drop_counters(self, dvs): self.setup_dbs(dvs) - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") - try: - self.set_up_flex_counter() + self.set_up_flex_counter() + # Get all configured counters OID's + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + try: self.populate_asic(dvs, "0") time.sleep(self.DEFAULT_POLL_INTERVAL) self.verify_value(dvs, self.pgs, pg_drop_attr, "0") @@ -97,48 +92,3 @@ def test_pg_drop_counters(self, dvs): finally: self.clear_flex_counter() - def test_pg_drop_counter_port_add_remove(self, dvs): - self.setup_dbs(dvs) - - try: - # configure pg drop flex counter - self.set_up_flex_counter() - - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - for priority in range(0,7): - oid_list.append(dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)]) - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid_list[-1]) - assert len(fields) == 1 - - # remove port - port_oid = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[PORT] - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 0 - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - self.config_db.create_entry("PORT", PORT, fvs) - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_PG_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify counter was added - for priority in range(0,7): - oid = dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)] - - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 1 - - finally: - self.clear_flex_counter() diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 23efedcb42..a8cee70aa1 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -104,22 +104,8 @@ def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value) assert found, "no such watermark found" def set_up_flex_counter(self, dvs): - for q in self.qs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "QUEUE_WATERMARK_STAT_COUNTER:{}".format(q), - WmFCEntry.queue_stats_entry) - - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "PG_WATERMARK_STAT_COUNTER:{}".format(pg), - WmFCEntry.pg_stats_entry) - - for buffer in self.buffers: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(buffer), - WmFCEntry.buffer_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} + self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) @@ -130,7 +116,8 @@ def set_up_flex_counter(self, dvs): "BUFFER_POOL_WATERMARK", fc_status_enable) - self.populate_asic_all(dvs, "0") + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self, dvs): for q in self.qs: @@ -150,10 +137,14 @@ def clear_flex_counter(self, dvs): self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK") def set_up(self, dvs): - self.qs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_QUEUE") - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + self.qs = self.counters_db.db_connection.hgetall("COUNTERS_QUEUE_NAME_MAP").values() + assert self.qs is not None and len(self.pgs) > 0 self.buffers = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL") + self.populate_asic_all(dvs, "0") + db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(db, "COUNTERS_QUEUE_TYPE_MAP") @@ -180,9 +171,9 @@ def clear_watermark(self, dvs, data): def test_telemetry_period(self, dvs): self.setup_dbs(dvs) + self.set_up_flex_counter(dvs) self.set_up(dvs) try: - self.set_up_flex_counter(dvs) self.enable_unittests(dvs, "true") self.populate_asic_all(dvs, "100") From 4944f0f9ba2cfe9129a4fc579dd1d456fa61339a Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Fri, 3 Jun 2022 00:58:43 +0800 Subject: [PATCH 13/64] Revert "[portsorch]: Prevent LAG member configuration when port has active ACL binding (#2165)" (#2306) This reverts commit 390cae1faae93293a825b9008c20259b4d09d5cc. --- orchagent/portsorch.cpp | 11 ----- tests/test_acl_portchannel.py | 78 ----------------------------------- 2 files changed, 89 deletions(-) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 6700031bd2..dc1be1b3b6 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -3933,17 +3933,6 @@ void PortsOrch::doLagMemberTask(Consumer &consumer) continue; } - if (!port.m_ingress_acl_tables_uset.empty() || !port.m_egress_acl_tables_uset.empty()) - { - SWSS_LOG_ERROR( - "Failed to add member %s to LAG %s: ingress/egress ACL configuration is present", - port.m_alias.c_str(), - lag.m_alias.c_str() - ); - it = consumer.m_toSync.erase(it); - continue; - } - if (!addLagMember(lag, port, (status == "enabled"))) { it++; diff --git a/tests/test_acl_portchannel.py b/tests/test_acl_portchannel.py index 210c4f18d8..b912cbea2f 100644 --- a/tests/test_acl_portchannel.py +++ b/tests/test_acl_portchannel.py @@ -1,87 +1,9 @@ import time import pytest -import logging from swsscommon import swsscommon -logging.basicConfig(level=logging.INFO) -acllogger = logging.getLogger(__name__) - - -@pytest.fixture(autouse=True, scope="class") -def dvs_api(request, dvs_acl): - # Fixtures are created when first requested by a test, and are destroyed based on their scope - if request.cls is None: - yield - return - acllogger.info("Initialize DVS API: ACL") - request.cls.dvs_acl = dvs_acl - yield - acllogger.info("Deinitialize DVS API: ACL") - del request.cls.dvs_acl - - -@pytest.mark.usefixtures("dvs_lag_manager") -class TestAclInterfaceBinding: - @pytest.mark.parametrize("stage", ["ingress", "egress"]) - def test_AclTablePortChannelMemberBinding(self, testlog, stage): - """Verify that LAG member creation is prohibited when ACL binding is configured - - The test flow: - 1. Create ACL table and bind Ethernet124 - 2. Verify ACL table has been successfully added - 3. Create LAG - 4. Verify LAG has been successfully added - 5. Create LAG member Ethernet120 - 6. Verify LAG member has been successfully added - 7. Create LAG member Ethernet124 - 8. Verify LAG member hasn't been added because of active ACL binding - - Args: - testlog: test start/end log record injector - stage: ACL table stage (e.g., ingress/egress) - """ - try: - acllogger.info("Create ACL table: acl_table") - self.dvs_acl.create_acl_table( - table_name="acl_table", - table_type="L3", - ports=["Ethernet124"], - stage=stage - ) - self.dvs_acl.verify_acl_table_count(1) - - acllogger.info("Create LAG: PortChannel0001") - self.dvs_lag.create_port_channel("0001") - self.dvs_lag.get_and_verify_port_channel(1) - - acllogger.info("Create LAG member: Ethernet120") - self.dvs_lag.create_port_channel_member("0001", "Ethernet120") - self.dvs_lag.get_and_verify_port_channel_members(1) - - acllogger.info("Create LAG member: Ethernet124") - self.dvs_lag.create_port_channel_member("0001", "Ethernet124") - acllogger.info("Verify LAG member hasn't been created: Ethernet124") - self.dvs_lag.get_and_verify_port_channel_members(1) - finally: - acllogger.info("Remove LAG member: Ethernet124") - self.dvs_lag.remove_port_channel_member("0001", "Ethernet124") - self.dvs_lag.get_and_verify_port_channel_members(1) - - acllogger.info("Remove LAG member: Ethernet120") - self.dvs_lag.remove_port_channel_member("0001", "Ethernet120") - self.dvs_lag.get_and_verify_port_channel_members(0) - - acllogger.info("Remove LAG: PortChannel0001") - self.dvs_lag.remove_port_channel("0001") - self.dvs_lag.get_and_verify_port_channel(0) - - acllogger.info("Remove ACL table: acl_table") - self.dvs_acl.remove_acl_table("acl_table") - self.dvs_acl.verify_acl_table_count(0) - - class TestPortChannelAcl(object): def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) From a0c3238bacd36ca84f38e573472e0c3f1c4748b5 Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Fri, 3 Jun 2022 10:50:30 +0800 Subject: [PATCH 14/64] Add port counter sanity check (#2300) What I did Fix issue Azure/sonic-buildimage#10850 partially by adding sanity check in port_rates.lua. If the must-have counters of one port are not able to get, skip its rate computation. Why I did it It avoids port_rates.lua execution exits abnormally. --- orchagent/port_rates.lua | 76 +++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/orchagent/port_rates.lua b/orchagent/port_rates.lua index 1d3d3f24f1..c29977d153 100644 --- a/orchagent/port_rates.lua +++ b/orchagent/port_rates.lua @@ -29,28 +29,33 @@ logit(alpha) logit(one_minus_alpha) logit(delta) -local n = table.getn(KEYS) -for i = 1, n do - local state_table = rates_table_name .. ':' .. KEYS[i] .. ':' .. 'PORT' +local function compute_rate(port) + local state_table = rates_table_name .. ':' .. port .. ':' .. 'PORT' local initialized = redis.call('HGET', state_table, 'INIT_DONE') logit(initialized) -- Get new COUNTERS values - local in_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_UCAST_PKTS') - local in_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS') - local out_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS') - local out_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS') - local in_octets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_OCTETS') - local out_octets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_OCTETS') + local in_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_UCAST_PKTS') + local in_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS') + local out_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS') + local out_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS') + local in_octets = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_OCTETS') + local out_octets = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS') + + if not in_ucast_pkts or not in_non_ucast_pkts or not out_ucast_pkts or + not out_non_ucast_pkts or not in_octets or not out_octets then + logit("Not found some counters on " .. port) + return + end if initialized == 'DONE' or initialized == 'COUNTERS_LAST' then -- Get old COUNTERS values - local in_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last') - local in_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last') - local out_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last') - local out_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last') - local in_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_OCTETS_last') - local out_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_OCTETS_last') + local in_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last') + local in_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last') + local out_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last') + local out_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last') + local in_octets_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_OCTETS_last') + local out_octets_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS_last') -- Calculate new rates values local rx_bps_new = (in_octets - in_octets_last) / delta * 1000 @@ -60,22 +65,22 @@ for i = 1, n do if initialized == "DONE" then -- Get old rates values - local rx_bps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'RX_BPS') - local rx_pps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'RX_PPS') - local tx_bps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'TX_BPS') - local tx_pps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'TX_PPS') + local rx_bps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'RX_BPS') + local rx_pps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'RX_PPS') + local tx_bps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'TX_BPS') + local tx_pps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'TX_PPS') -- Smooth the rates values and store them in DB - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_BPS', alpha*rx_bps_new + one_minus_alpha*rx_bps_old) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_PPS', alpha*rx_pps_new + one_minus_alpha*rx_pps_old) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_BPS', alpha*tx_bps_new + one_minus_alpha*tx_bps_old) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_PPS', alpha*tx_pps_new + one_minus_alpha*tx_pps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_BPS', alpha*rx_bps_new + one_minus_alpha*rx_bps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_PPS', alpha*rx_pps_new + one_minus_alpha*rx_pps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_BPS', alpha*tx_bps_new + one_minus_alpha*tx_bps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_PPS', alpha*tx_pps_new + one_minus_alpha*tx_pps_old) else -- Store unsmoothed initial rates values in DB - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_BPS', rx_bps_new) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_PPS', rx_pps_new) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_BPS', tx_bps_new) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_PPS', tx_pps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_BPS', rx_bps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_PPS', rx_pps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_BPS', tx_bps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_PPS', tx_pps_new) redis.call('HSET', state_table, 'INIT_DONE', 'DONE') end else @@ -83,12 +88,17 @@ for i = 1, n do end -- Set old COUNTERS values - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last', in_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last', in_non_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last', out_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last', out_non_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_OCTETS_last', in_octets) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_OCTETS_last', out_octets) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last', in_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last', in_non_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last', out_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last', out_non_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_OCTETS_last', in_octets) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS_last', out_octets) +end + +local n = table.getn(KEYS) +for i = 1, n do + compute_rate(KEYS[i]) end return logtable From 05d19eab55f5e0bfce6fc957fde582410ff27a09 Mon Sep 17 00:00:00 2001 From: Qi Luo Date: Thu, 2 Jun 2022 20:58:34 -0700 Subject: [PATCH 15/64] Purge package sonic-db-cli which depends on libswsscommon (#2308) **What I did** Purge package sonic-db-cli which depends on libswsscommon **Why I did it** Since sonic-db-cli depends on libswsscommon, we could not simply only purge libswsscommon, so we purge both together. --- .azure-pipelines/docker-sonic-vs/Dockerfile | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/.azure-pipelines/docker-sonic-vs/Dockerfile b/.azure-pipelines/docker-sonic-vs/Dockerfile index f288c8fdaa..33cdb7e8dc 100644 --- a/.azure-pipelines/docker-sonic-vs/Dockerfile +++ b/.azure-pipelines/docker-sonic-vs/Dockerfile @@ -4,14 +4,7 @@ ARG docker_container_name ADD ["debs", "/debs"] -RUN dpkg --purge python-swsscommon -RUN dpkg --purge python3-swsscommon -RUN dpkg --purge swss -RUN dpkg --purge libsairedis -RUN dpkg --purge libswsscommon -RUN dpkg --purge libsaimetadata -RUN dpkg --purge libsaivs -RUN dpkg --purge syncd-vs +RUN dpkg --purge python-swsscommon python3-swsscommon swss libsairedis sonic-db-cli libswsscommon libsaimetadata libsaivs syncd-vs RUN dpkg -i /debs/libswsscommon_1.0.0_amd64.deb RUN dpkg -i /debs/python-swsscommon_1.0.0_amd64.deb From 2ff763fc355c8f28d88e226be809aaa06d4bd724 Mon Sep 17 00:00:00 2001 From: Vaibhav Hemant Dixit Date: Wed, 8 Jun 2022 07:52:42 -0700 Subject: [PATCH 16/64] Fix test_warm_reboot issues blocking PR merge (#2309) * Two fixes: sleep after stop and check values in routes --- tests/conftest.py | 3 ++- tests/test_warm_reboot.py | 28 +++++++++++++++++++--------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index e2e3bbcf77..efe6c85225 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -658,6 +658,7 @@ def stop_swss(self): for pname in self.swssd: cmd += "supervisorctl stop {}; ".format(pname) self.runcmd(['sh', '-c', cmd]) + time.sleep(5) # deps: warm_reboot def start_zebra(self): @@ -669,7 +670,7 @@ def start_zebra(self): # deps: warm_reboot def stop_zebra(self): self.runcmd(['sh', '-c', 'pkill -9 zebra']) - time.sleep(1) + time.sleep(5) # deps: warm_reboot def start_fpmsyncd(self): diff --git a/tests/test_warm_reboot.py b/tests/test_warm_reboot.py index cf525a64f3..9447dc55ed 100644 --- a/tests/test_warm_reboot.py +++ b/tests/test_warm_reboot.py @@ -118,12 +118,14 @@ def how_many_entries_exist(db, table): def stop_neighsyncd(dvs): dvs.runcmd(['sh', '-c', 'pkill -x neighsyncd']) + time.sleep(1) def start_neighsyncd(dvs): dvs.runcmd(['sh', '-c', 'supervisorctl start neighsyncd']) def stop_restore_neighbors(dvs): dvs.runcmd(['sh', '-c', 'pkill -x restore_neighbors']) + time.sleep(1) def start_restore_neighbors(dvs): dvs.runcmd(['sh', '-c', 'supervisorctl start restore_neighbors']) @@ -307,6 +309,7 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): # restart portsyncd dvs.runcmd(['sh', '-c', 'pkill -x portsyncd']) + time.sleep(1) pubsub = dvs.SubscribeAsicDbObject("SAI_OBJECT_TYPE") dvs.runcmd(['sh', '-c', 'supervisorctl start portsyncd']) @@ -343,7 +346,6 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): intf_tbl._del("Ethernet20") time.sleep(2) - def test_VlanMgrdWarmRestart(self, dvs, testlog): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) @@ -427,6 +429,7 @@ def test_VlanMgrdWarmRestart(self, dvs, testlog): restore_count = swss_get_RestoreCount(dvs, state_db) dvs.runcmd(['sh', '-c', 'pkill -x vlanmgrd']) + time.sleep(1) pubsub = dvs.SubscribeAsicDbObject("SAI_OBJECT_TYPE") @@ -1075,7 +1078,6 @@ def test_swss_port_state_syncup(self, dvs, testlog): # ################################################################################ - def test_routing_WarmRestart(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -1261,7 +1263,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.100.0/24" - assert rt_val == {"ifname": "Ethernet0", "nexthop": "111.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0" + assert rt_val.get("nexthop") == "111.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1333,7 +1336,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.200.0/24" - assert rt_val == {"ifname": "Ethernet0,Ethernet4,Ethernet8", "nexthop": "111.0.0.2,122.0.0.2,133.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0,Ethernet4,Ethernet8" + assert rt_val.get("nexthop") == "111.0.0.2,122.0.0.2,133.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1406,7 +1410,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.1.3" - assert rt_val == {"ifname": "Ethernet0,Ethernet4,Ethernet8", "nexthop": "111.0.0.2,122.0.0.2,133.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0,Ethernet4,Ethernet8" + assert rt_val.get("nexthop") == "111.0.0.2,122.0.0.2,133.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1444,7 +1449,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.1.3" - assert rt_val == {"ifname": "Ethernet0,Ethernet4", "nexthop": "111.0.0.2,122.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0,Ethernet4" + assert rt_val.get("nexthop") == "111.0.0.2,122.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1481,7 +1487,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "fc00:4:4::1" - assert rt_val == {"ifname": "Ethernet0", "nexthop": "1110::2"} + assert rt_val.get("ifname") == "Ethernet0" + assert rt_val.get("nexthop") == "1110::2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1579,7 +1586,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.100.0/24" - assert rt_val == {"ifname": "Ethernet0", "nexthop": "111.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0" + assert rt_val.get("nexthop") == "111.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1691,7 +1699,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.100.0/24" - assert rt_val == {"ifname": "Ethernet4", "nexthop": "122.0.0.2"} + assert rt_val.get("ifname") == "Ethernet4" + assert rt_val.get("nexthop") == "122.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -2172,6 +2181,7 @@ def test_VrfMgrdWarmRestart(self, dvs, testlog): (exitcode, vrf_before) = dvs.runcmd(['sh', '-c', "ip link show | grep Vrf"]) dvs.runcmd(['sh', '-c', 'pkill -x vrfmgrd']) + time.sleep(1) pubsub = dvs.SubscribeAsicDbObject("SAI_OBJECT_TYPE") From ad8f5e43951d6ab8cb2e777f45f4da5a9245a982 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Wed, 8 Jun 2022 15:06:18 -0700 Subject: [PATCH 17/64] Revert "[Counters] Improve performance by polling only configured ports buffer queue/pg counters (#2143)" (#2315) This reverts commit eba212d9cffa034c8e0fcef6e275fef6cc700604. --- orchagent/bufferorch.cpp | 32 +--- orchagent/flexcounterorch.cpp | 181 +-------------------- orchagent/flexcounterorch.h | 32 ---- orchagent/portsorch.cpp | 262 +++++++----------------------- orchagent/portsorch.h | 20 +-- tests/mock_tests/portsorch_ut.cpp | 13 +- tests/mock_tests/routeorch_ut.cpp | 6 +- tests/test_buffer_traditional.py | 18 +- tests/test_flex_counters.py | 122 +++++++------- tests/test_pg_drop_counter.py | 64 +++++++- tests/test_watermark.py | 29 ++-- 11 files changed, 223 insertions(+), 556 deletions(-) diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index b9fbd096b4..f9b91e7a16 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1,6 +1,5 @@ #include "tokenize.h" #include "bufferorch.h" -#include "directory.h" #include "logger.h" #include "sai_serialize.h" #include "warm_restart.h" @@ -17,7 +16,6 @@ extern sai_switch_api_t *sai_switch_api; extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; -extern Directory gDirectory; extern sai_object_id_t gSwitchId; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -950,20 +948,6 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } - // create/remove a port queue counter for the queue buffer - else - { - auto flexCounterOrch = gDirectory.get(); - auto queues = tokens[1]; - if (op == SET_COMMAND && flexCounterOrch->getQueueCountersState()) - { - gPortsOrch->createPortBufferQueueCounters(port, queues); - } - else if (op == DEL_COMMAND && flexCounterOrch->getQueueCountersState()) - { - gPortsOrch->removePortBufferQueueCounters(port, queues); - } - } } } } @@ -1023,7 +1007,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, - buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { @@ -1103,20 +1087,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return handle_status; } } - // create or remove a port PG counter for the PG buffer - else - { - auto flexCounterOrch = gDirectory.get(); - auto pgs = tokens[1]; - if (op == SET_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) - { - gPortsOrch->createPortBufferPgCounters(port, pgs); - } - else if (op == DEL_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) - { - gPortsOrch->removePortBufferPgCounters(port, pgs); - } - } } } } diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index f16312f750..29563d90a5 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -10,7 +10,6 @@ #include "debugcounterorch.h" #include "directory.h" #include "copporch.h" -#include #include "routeorch.h" #include "flowcounterrouteorch.h" @@ -59,8 +58,6 @@ unordered_map flexCounterGroupMap = FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), - m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), - m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) { @@ -147,13 +144,11 @@ void FlexCounterOrch::doTask(Consumer &consumer) } else if(key == QUEUE_KEY) { - gPortsOrch->generateQueueMap(getQueueConfigurations()); - m_queue_enabled = true; + gPortsOrch->generateQueueMap(); } else if(key == PG_WATERMARK_KEY) { - gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); - m_pg_watermark_enabled = true; + gPortsOrch->generatePriorityGroupMap(); } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -235,16 +230,6 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } -bool FlexCounterOrch::getPgWatermarkCountersState() const -{ - return m_pg_watermark_enabled; -} - -bool FlexCounterOrch::getQueueCountersState() const -{ - return m_queue_enabled; -} - bool FlexCounterOrch::bake() { /* @@ -286,165 +271,3 @@ bool FlexCounterOrch::bake() Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); return consumer->addToSync(entries); } - -map FlexCounterOrch::getQueueConfigurations() -{ - SWSS_LOG_ENTER(); - - map queuesStateVector; - std::vector portQueueKeys; - m_bufferQueueConfigTable.getKeys(portQueueKeys); - - for (const auto& portQueueKey : portQueueKeys) - { - auto toks = tokenize(portQueueKey, '|'); - if (toks.size() != 2) - { - SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); - continue; - } - - auto configPortNames = tokenize(toks[0], ','); - auto configPortQueues = toks[1]; - toks = tokenize(configPortQueues, '-'); - - for (const auto& configPortName : configPortNames) - { - uint32_t maxQueueNumber = gPortsOrch->getNumberOfPortSupportedQueueCounters(configPortName); - uint32_t maxQueueIndex = maxQueueNumber - 1; - uint32_t minQueueIndex = 0; - - if (!queuesStateVector.count(configPortName)) - { - FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); - queuesStateVector.insert(make_pair(configPortName, flexCounterQueueState)); - } - - try { - auto startIndex = to_uint(toks[0], minQueueIndex, maxQueueIndex); - if (toks.size() > 1) - { - auto endIndex = to_uint(toks[1], minQueueIndex, maxQueueIndex); - queuesStateVector.at(configPortName).enableQueueCounters(startIndex, endIndex); - } - else - { - queuesStateVector.at(configPortName).enableQueueCounter(startIndex); - } - } catch (std::invalid_argument const& e) { - SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); - continue; - } - } - } - - return queuesStateVector; -} - -map FlexCounterOrch::getPgConfigurations() -{ - SWSS_LOG_ENTER(); - - map pgsStateVector; - std::vector portPgKeys; - m_bufferPgConfigTable.getKeys(portPgKeys); - - for (const auto& portPgKey : portPgKeys) - { - auto toks = tokenize(portPgKey, '|'); - if (toks.size() != 2) - { - SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); - continue; - } - - auto configPortNames = tokenize(toks[0], ','); - auto configPortPgs = toks[1]; - toks = tokenize(configPortPgs, '-'); - - for (const auto& configPortName : configPortNames) - { - uint32_t maxPgNumber = gPortsOrch->getNumberOfPortSupportedPgCounters(configPortName); - uint32_t maxPgIndex = maxPgNumber - 1; - uint32_t minPgIndex = 0; - - if (!pgsStateVector.count(configPortName)) - { - FlexCounterPgStates flexCounterPgState(maxPgNumber); - pgsStateVector.insert(make_pair(configPortName, flexCounterPgState)); - } - - try { - auto startIndex = to_uint(toks[0], minPgIndex, maxPgIndex); - if (toks.size() > 1) - { - auto endIndex = to_uint(toks[1], minPgIndex, maxPgIndex); - pgsStateVector.at(configPortName).enablePgCounters(startIndex, endIndex); - } - else - { - pgsStateVector.at(configPortName).enablePgCounter(startIndex); - } - } catch (std::invalid_argument const& e) { - SWSS_LOG_ERROR("Invalid pg index [%s] for port [%s]", configPortPgs.c_str(), configPortName.c_str()); - continue; - } - } - } - - return pgsStateVector; -} - -FlexCounterQueueStates::FlexCounterQueueStates(uint32_t maxQueueNumber) -{ - SWSS_LOG_ENTER(); - m_queueStates.resize(maxQueueNumber, false); -} - -bool FlexCounterQueueStates::isQueueCounterEnabled(uint32_t index) const -{ - SWSS_LOG_ENTER(); - return m_queueStates[index]; -} - -void FlexCounterQueueStates::enableQueueCounters(uint32_t startIndex, uint32_t endIndex) -{ - SWSS_LOG_ENTER(); - for (uint32_t queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) - { - enableQueueCounter(queueIndex); - } -} - -void FlexCounterQueueStates::enableQueueCounter(uint32_t queueIndex) -{ - SWSS_LOG_ENTER(); - m_queueStates[queueIndex] = true; -} - -FlexCounterPgStates::FlexCounterPgStates(uint32_t maxPgNumber) -{ - SWSS_LOG_ENTER(); - m_pgStates.resize(maxPgNumber, false); -} - -bool FlexCounterPgStates::isPgCounterEnabled(uint32_t index) const -{ - SWSS_LOG_ENTER(); - return m_pgStates[index]; -} - -void FlexCounterPgStates::enablePgCounters(uint32_t startIndex, uint32_t endIndex) -{ - SWSS_LOG_ENTER(); - for (uint32_t pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) - { - enablePgCounter(pgIndex); - } -} - -void FlexCounterPgStates::enablePgCounter(uint32_t pgIndex) -{ - SWSS_LOG_ENTER(); - m_pgStates[pgIndex] = true; -} diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index a8106720da..4f9734c0e2 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,30 +10,6 @@ extern "C" { #include "sai.h" } -class FlexCounterQueueStates -{ -public: - FlexCounterQueueStates(uint32_t maxQueueNumber); - bool isQueueCounterEnabled(uint32_t index) const; - void enableQueueCounters(uint32_t startIndex, uint32_t endIndex); - void enableQueueCounter(uint32_t queueIndex); - -private: - std::vector m_queueStates{}; -}; - -class FlexCounterPgStates -{ -public: - FlexCounterPgStates(uint32_t maxPgNumber); - bool isPgCounterEnabled(uint32_t index) const; - void enablePgCounters(uint32_t startIndex, uint32_t endIndex); - void enablePgCounter(uint32_t pgIndex); - -private: - std::vector m_pgStates{}; -}; - class FlexCounterOrch: public Orch { public: @@ -42,10 +18,6 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; - bool getPgWatermarkCountersState() const; - bool getQueueCountersState() const; - map getQueueConfigurations(); - map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} bool getRouteFlowCountersState() const {return m_route_flow_counter_enabled;} bool bake() override; @@ -55,13 +27,9 @@ class FlexCounterOrch: public Orch std::shared_ptr m_flexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; - bool m_pg_watermark_enabled = false; - bool m_queue_enabled = false; bool m_hostif_trap_counter_enabled = false; bool m_route_flow_counter_enabled = false; Table m_flexCounterConfigTable; - Table m_bufferQueueConfigTable; - Table m_bufferPgConfigTable; }; #endif diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index dc1be1b3b6..6d06c6318f 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2457,6 +2457,18 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); } + /* when a port is added and priority group map counter is enabled --> we need to add pg counter for it */ + if (m_isPriorityGroupMapGenerated) + { + generatePriorityGroupMapPerPort(p); + } + + /* when a port is added and queue map counter is enabled --> we need to add queue map counter for it */ + if (m_isQueueMapGenerated) + { + generateQueueMapPerPort(p); + } + PortUpdate update = { p, true }; notify(SUBJECT_TYPE_PORT_CHANGE, static_cast(&update)); @@ -2509,6 +2521,18 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) port_buffer_drop_stat_manager.clearCounterIdList(p.m_port_id); } + /* remove pg port counters */ + if (m_isPriorityGroupMapGenerated) + { + removePriorityGroupMapPerPort(p); + } + + /* remove queue port counters */ + if (m_isQueueMapGenerated) + { + removeQueueMapPerPort(p); + } + /* remove port name map from counter table */ m_counterTable->hdel("", alias); @@ -5463,7 +5487,7 @@ bool PortsOrch::removeTunnel(Port tunnel) return true; } -void PortsOrch::generateQueueMap(map queuesStateVector) +void PortsOrch::generateQueueMap() { if (m_isQueueMapGenerated) { @@ -5474,87 +5498,53 @@ void PortsOrch::generateQueueMap(map queuesState { if (it.second.m_type == Port::PHY) { - if (!queuesStateVector.count(it.second.m_alias)) - { - auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); - FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); - queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); - } - generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias)); + generateQueueMapPerPort(it.second); } } m_isQueueMapGenerated = true; } -void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) +void PortsOrch::removeQueueMapPerPort(const Port& port) { - /* Create the Queue map in the Counter DB */ - /* Add stat counters to flex_counter */ - vector queueVector; - vector queuePortVector; - vector queueIndexVector; - vector queueTypeVector; + /* Remove the Queue map in the Counter DB */ for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; + std::unordered_set counter_stats; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + m_queueTable->hdel("",name.str()); + m_queuePortTable->hdel("",id); + string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - if (!queuesState.isQueueCounterEnabled(queueRealIndex)) - { - continue; - } - queueTypeVector.emplace_back(id, queueType); - queueIndexVector.emplace_back(id, to_string(queueRealIndex)); + m_queueTypeTable->hdel("",id); + m_queueIndexTable->hdel("",id); } - queueVector.emplace_back(name.str(), id); - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - - // Install a flex counter for this queue to track stats - std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) { counter_stats.emplace(sai_serialize_queue_stat(it)); } - queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); - /* add watermark queue counters */ + /* remove watermark queue counters */ string key = getQueueWatermarkFlexCounterTableKey(id); - string delimiter(""); - std::ostringstream counters_stream; - for (const auto& it: queueWatermarkStatIds) - { - counters_stream << delimiter << sai_serialize_queue_stat(it); - delimiter = comma; - } - - vector fieldValues; - fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); - - m_flexCounterTable->set(key, fieldValues); + m_flexCounterTable->del(key); } - m_queueTable->set("", queueVector); - m_queuePortTable->set("", queuePortVector); - m_queueIndexTable->set("", queueIndexVector); - m_queueTypeTable->set("", queueTypeVector); - - CounterCheckOrch::getInstance().addPort(port); + CounterCheckOrch::getInstance().removePort(port); } -void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +void PortsOrch::generateQueueMapPerPort(const Port& port) { - SWSS_LOG_ENTER(); - /* Create the Queue map in the Counter DB */ /* Add stat counters to flex_counter */ vector queueVector; @@ -5562,21 +5552,16 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) vector queueIndexVector; vector queueTypeVector; - auto toks = tokenize(queues, '-'); - auto startIndex = to_uint(toks[0]); - auto endIndex = startIndex; - if (toks.size() > 1) - { - endIndex = to_uint(toks[1]); - } - - for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) @@ -5585,9 +5570,6 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } - queueVector.emplace_back(name.str(), id); - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - // Install a flex counter for this queue to track stats std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) @@ -5621,42 +5603,7 @@ void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) -{ - SWSS_LOG_ENTER(); - - /* Remove the Queues maps in the Counter DB */ - /* Remove stat counters from flex_counter DB */ - auto toks = tokenize(queues, '-'); - auto startIndex = to_uint(toks[0]); - auto endIndex = startIndex; - if (toks.size() > 1) - { - endIndex = to_uint(toks[1]); - } - - for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) - { - std::ostringstream name; - name << port.m_alias << ":" << queueIndex; - const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - - /* Remove watermark queue counters */ - string key = getQueueWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); - - // Remove the flex counter for this queue - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); - - // Remove the queue counter from counters DB maps - m_queueTable->hdel("", name.str()); - m_queuePortTable->hdel("", id); - m_queueIndexTable->hdel("", id); - m_queueTypeTable->hdel("", id); - } -} - -void PortsOrch::generatePriorityGroupMap(map pgsStateVector) +void PortsOrch::generatePriorityGroupMap() { if (m_isPriorityGroupMapGenerated) { @@ -5667,100 +5614,48 @@ void PortsOrch::generatePriorityGroupMap(map pgsSta { if (it.second.m_type == Port::PHY) { - if (!pgsStateVector.count(it.second.m_alias)) - { - auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); - FlexCounterPgStates flexCounterPgState(maxPgNumber); - pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); - } - generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); + generatePriorityGroupMapPerPort(it.second); } } m_isPriorityGroupMapGenerated = true; } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +void PortsOrch::removePriorityGroupMapPerPort(const Port& port) { - /* Create the PG map in the Counter DB */ - /* Add stat counters to flex_counter */ - vector pgVector; - vector pgPortVector; - vector pgIndexVector; + /* Remove the PG map in the Counter DB */ for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { - if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) - { - continue; - } std::ostringstream name; name << port.m_alias << ":" << pgIndex; const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - pgVector.emplace_back(name.str(), id); - pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - pgIndexVector.emplace_back(id, to_string(pgIndex)); - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - std::string delimiter = ""; - std::ostringstream counters_stream; - /* Add watermark counters to flex_counter */ - for (const auto& it: ingressPriorityGroupWatermarkStatIds) - { - counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - delimiter = comma; - } + m_pgTable->hdel("",name.str()); + m_pgPortTable->hdel("",id); + m_pgIndexTable->hdel("",id); - vector fieldValues; - fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); + m_flexCounterTable->del(key); - delimiter = ""; - std::ostringstream ingress_pg_drop_packets_counters_stream; key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* Add dropped packets counters to flex_counter */ - for (const auto& it: ingressPriorityGroupDropStatIds) - { - ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - if (delimiter.empty()) - { - delimiter = comma; - } - } - fieldValues.clear(); - fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); + /* remove dropped packets counters to flex_counter */ + m_flexCounterTable->del(key); } - m_pgTable->set("", pgVector); - m_pgPortTable->set("", pgPortVector); - m_pgIndexTable->set("", pgIndexVector); - - CounterCheckOrch::getInstance().addPort(port); + CounterCheckOrch::getInstance().removePort(port); } -void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) { - SWSS_LOG_ENTER(); - /* Create the PG map in the Counter DB */ /* Add stat counters to flex_counter */ vector pgVector; vector pgPortVector; vector pgIndexVector; - auto toks = tokenize(pgs, '-'); - auto startIndex = to_uint(toks[0]); - auto endIndex = startIndex; - if (toks.size() > 1) - { - endIndex = to_uint(toks[1]); - } - - for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { std::ostringstream name; name << port.m_alias << ":" << pgIndex; @@ -5810,41 +5705,6 @@ void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) -{ - SWSS_LOG_ENTER(); - - /* Remove the Pgs maps in the Counter DB */ - /* Remove stat counters from flex_counter DB */ - auto toks = tokenize(pgs, '-'); - auto startIndex = to_uint(toks[0]); - auto endIndex = startIndex; - if (toks.size() > 1) - { - endIndex = to_uint(toks[1]); - } - - for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) - { - std::ostringstream name; - name << port.m_alias << ":" << pgIndex; - const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); - - /* Remove dropped packets counters from flex_counter */ - string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - m_flexCounterTable->del(key); - - /* Remove watermark counters from flex_counter */ - key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); - - // Remove the pg counter from counters DB maps - m_pgTable->hdel("", name.str()); - m_pgPortTable->hdel("", id); - m_pgIndexTable->hdel("", id); - } -} - void PortsOrch::generatePortCounterMap() { if (m_isPortCounterMapGenerated) @@ -5895,16 +5755,6 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } -uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) -{ - return static_cast(m_portList[port].m_priority_group_ids.size()); -} - -uint32_t PortsOrch::getNumberOfPortSupportedQueueCounters(string port) -{ - return static_cast(m_portList[port].m_queue_ids.size()); -} - void PortsOrch::doTask(NotificationConsumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 6291231ae7..ab35277d80 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -128,17 +128,9 @@ class PortsOrch : public Orch, public Subject bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); - - void generateQueueMap(map queuesStateVector); - uint32_t getNumberOfPortSupportedQueueCounters(string port); - void createPortBufferQueueCounters(const Port &port, string queues); - void removePortBufferQueueCounters(const Port &port, string queues); - - void generatePriorityGroupMap(map pgsStateVector); - uint32_t getNumberOfPortSupportedPgCounters(string port); - void createPortBufferPgCounters(const Port &port, string pgs); - void removePortBufferPgCounters(const Port& port, string pgs); + void generateQueueMap(); + void generatePriorityGroupMap(); void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -333,9 +325,13 @@ class PortsOrch : public Orch, public Subject bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState); + void generateQueueMapPerPort(const Port& port); + void removeQueueMapPerPort(const Port& port); + bool m_isPriorityGroupMapGenerated = false; - void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); + void generatePriorityGroupMapPerPort(const Port& port); + void removePriorityGroupMapPerPort(const Port& port); + bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 7d867396d2..28df6610fd 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -163,14 +163,13 @@ namespace portsorch_test ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, APP_BUFFER_PROFILE_TABLE_NAME, APP_BUFFER_QUEUE_TABLE_NAME, @@ -863,7 +862,7 @@ namespace portsorch_test * updated to DB. */ TEST_F(PortsOrchTest, PortOperStatusIsUpAndOperSpeedIsZero) - { + { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); // Get SAI default ports to populate DB @@ -888,7 +887,7 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); - + // save original api since we will spy auto orig_port_api = sai_port_api; sai_port_api = new sai_port_api_t(); @@ -906,14 +905,14 @@ namespace portsorch_test // Return 0 for port operational speed attrs[0].value.u32 = 0; } - + return (sai_status_t)SAI_STATUS_SUCCESS; } ); auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); auto consumer = exec->getNotificationConsumer(); - + // mock a redis reply for notification, it notifies that Ehernet0 is going to up mockReply = (redisReply *)calloc(sizeof(redisReply), 1); mockReply->type = REDIS_REPLY_ARRAY; @@ -935,7 +934,7 @@ namespace portsorch_test // trigger the notification consumer->readData(); gPortsOrch->doTask(*consumer); - mockReply = nullptr; + mockReply = nullptr; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 2c1c4b8535..66df4bfbcc 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -176,15 +176,15 @@ namespace routeorch_test { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; - ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + static const vector route_pattern_tables = { CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, }; diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 071217b4e3..3d2285fd7b 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -77,15 +77,16 @@ def get_pg_name_map(self): @pytest.fixture def setup_teardown_test(self, dvs): - self.setup_db(dvs) - self.set_port_qos_table(self.INTF, '2,3,4,6') - time.sleep(2) - - yield - - self.teardown() + try: + self.setup_db(dvs) + self.set_port_qos_table(self.INTF, '2,3,4,6') + pg_name_map = self.get_pg_name_map() + yield pg_name_map + finally: + self.teardown() def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): + self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None try: @@ -111,7 +112,6 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) - self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # check if the lossless profile for the test speed is already present @@ -174,6 +174,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # To verify the BUFFER_PG is not hardcoded to 3,4 # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue def test_buffer_pg_update(self, dvs, setup_teardown_test): + self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None test_speed = None @@ -202,7 +203,6 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) - self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # get the orig buf profiles attached to the pgs diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index f5a0b146b2..76a1a535f9 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -7,6 +7,8 @@ ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" +PORT = "Ethernet0" +PORT_MAP = "COUNTERS_PORT_NAME_MAP" counter_group_meta = { 'port_counter': { @@ -71,6 +73,7 @@ } } +@pytest.mark.usefixtures('dvs_port_manager') class TestFlexCounters(object): def setup_dbs(self, dvs): @@ -130,18 +133,6 @@ def wait_for_interval_set(self, group, interval): assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) - def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): - for retry in range(NUMBER_OF_RETRIES): - counter_oid = self.counters_db.db_connection.hget(map, port + ':' + index) - if (isSet and counter_oid): - return counter_oid - elif (not isSet and not counter_oid): - return None - else: - time.sleep(1) - - assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) - def verify_no_flex_counters_tables(self, counter_stat): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" @@ -701,53 +692,64 @@ def remove_ip_address(self, interface, ip): def set_admin_status(self, interface, status): self.config_db.update_entry("PORT", interface, {"admin_status": status}) - - def test_create_remove_buffer_pg_counter(self, dvs): - """ - Test steps: - 1. Enable PG flex counters. - 2. Configure new buffer prioriy group for a port - 3. Verify counter is automatically created - 4. Remove the new buffer prioriy group for the port - 5. Verify counter is automatically removed - - Args: - dvs (object): virtual switch object - """ - self.setup_dbs(dvs) - meta_data = counter_group_meta['pg_watermark_counter'] - - self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) - - self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) - counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) - self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) - - self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') - self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) - self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) - - def test_create_remove_buffer_queue_counter(self, dvs): - """ - Test steps: - 1. Enable Queue flex counters. - 2. Configure new buffer queue for a port - 3. Verify counter is automatically created - 4. Remove the new buffer queue for the port - 5. Verify counter is automatically removed - - Args: - dvs (object): virtual switch object - """ + + def test_add_remove_ports(self, dvs): self.setup_dbs(dvs) - meta_data = counter_group_meta['queue_counter'] - - self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) - - self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) - counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) - self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + # set flex counter + counter_key = counter_group_meta['queue_counter']['key'] + counter_stat = counter_group_meta['queue_counter']['group_name'] + counter_map = counter_group_meta['queue_counter']['name_map'] + self.set_flex_counter_group_status(counter_key, counter_map) - self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') - self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) - self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + # receive port info + fvs = self.config_db.get_entry("PORT", PORT) + assert len(fvs) > 0 + + # save all the oids of the pg drop counters + oid_list = [] + counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") + for key, oid in counters_queue_map.items(): + if PORT in key: + oid_list.append(oid) + fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) + assert len(fields) == 1 + oid_list_len = len(oid_list) + + # get port oid + port_oid = self.counters_db.get_entry(PORT_MAP, "")[PORT] + + # remove port and verify that it was removed properly + self.dvs_port.remove_port(PORT) + dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) + + # verify counters were removed from flex counter table + for oid in oid_list: + fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) + assert len(fields) == 0 + + # verify that port counter maps were removed from counters db + counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") + for key in counters_queue_map.keys(): + if PORT in key: + assert False + + # add port and wait until the port is added on asic db + num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + + self.config_db.create_entry("PORT", PORT, fvs) + + dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) + dvs.get_counters_db().wait_for_fields("COUNTERS_QUEUE_NAME_MAP", "", ["%s:0"%(PORT)]) + + # verify queue counters were added + oid_list = [] + counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") + + for key, oid in counters_queue_map.items(): + if PORT in key: + oid_list.append(oid) + fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) + assert len(fields) == 1 + # the number of the oids needs to be the same as the original number of oids (before removing a port and adding) + assert oid_list_len == len(oid_list) diff --git a/tests/test_pg_drop_counter.py b/tests/test_pg_drop_counter.py index 6d97af5f5c..b3682881de 100644 --- a/tests/test_pg_drop_counter.py +++ b/tests/test_pg_drop_counter.py @@ -2,12 +2,16 @@ import re import time import json +import pytest import redis from swsscommon import swsscommon pg_drop_attr = "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" +PORT = "Ethernet0" + +@pytest.mark.usefixtures('dvs_port_manager') class TestPGDropCounter(object): DEFAULT_POLL_INTERVAL = 10 pgs = {} @@ -57,11 +61,14 @@ def verify_value(self, dvs, obj_ids, entry_name, expected_value): assert found, "entry name %s not found" % (entry_name) def set_up_flex_counter(self): + pg_stats_entry = {"PG_COUNTER_ID_LIST": "{}".format(pg_drop_attr)} + for pg in self.pgs: + self.flex_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP_STAT_COUNTER:{}".format(pg), pg_stats_entry) + fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} + self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP", fc_status_enable) self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) - # Wait for DB's to populate by orchagent - time.sleep(2) def clear_flex_counter(self): for pg in self.pgs: @@ -72,12 +79,10 @@ def clear_flex_counter(self): def test_pg_drop_counters(self, dvs): self.setup_dbs(dvs) - self.set_up_flex_counter() - # Get all configured counters OID's - self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() - assert self.pgs is not None and len(self.pgs) > 0 - + self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") try: + self.set_up_flex_counter() + self.populate_asic(dvs, "0") time.sleep(self.DEFAULT_POLL_INTERVAL) self.verify_value(dvs, self.pgs, pg_drop_attr, "0") @@ -92,3 +97,48 @@ def test_pg_drop_counters(self, dvs): finally: self.clear_flex_counter() + def test_pg_drop_counter_port_add_remove(self, dvs): + self.setup_dbs(dvs) + + try: + # configure pg drop flex counter + self.set_up_flex_counter() + + # receive port info + fvs = self.config_db.get_entry("PORT", PORT) + assert len(fvs) > 0 + + # save all the oids of the pg drop counters + oid_list = [] + for priority in range(0,7): + oid_list.append(dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)]) + # verify that counters exists on flex counter + fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid_list[-1]) + assert len(fields) == 1 + + # remove port + port_oid = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[PORT] + self.dvs_port.remove_port(PORT) + dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) + + # verify counters were removed from flex counter table + for oid in oid_list: + fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) + assert len(fields) == 0 + + # add port and wait until the port is added on asic db + num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + self.config_db.create_entry("PORT", PORT, fvs) + dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) + dvs.get_counters_db().wait_for_fields("COUNTERS_PG_NAME_MAP", "", ["%s:0"%(PORT)]) + + # verify counter was added + for priority in range(0,7): + oid = dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)] + + # verify that counters exists on flex counter + fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) + assert len(fields) == 1 + + finally: + self.clear_flex_counter() diff --git a/tests/test_watermark.py b/tests/test_watermark.py index a8cee70aa1..23efedcb42 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -104,8 +104,22 @@ def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value) assert found, "no such watermark found" def set_up_flex_counter(self, dvs): - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} + for q in self.qs: + self.flex_db.create_entry("FLEX_COUNTER_TABLE", + "QUEUE_WATERMARK_STAT_COUNTER:{}".format(q), + WmFCEntry.queue_stats_entry) + + for pg in self.pgs: + self.flex_db.create_entry("FLEX_COUNTER_TABLE", + "PG_WATERMARK_STAT_COUNTER:{}".format(pg), + WmFCEntry.pg_stats_entry) + + for buffer in self.buffers: + self.flex_db.create_entry("FLEX_COUNTER_TABLE", + "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(buffer), + WmFCEntry.buffer_stats_entry) + fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) @@ -116,8 +130,7 @@ def set_up_flex_counter(self, dvs): "BUFFER_POOL_WATERMARK", fc_status_enable) - # Wait for DB's to populate by orchagent - time.sleep(2) + self.populate_asic_all(dvs, "0") def clear_flex_counter(self, dvs): for q in self.qs: @@ -137,14 +150,10 @@ def clear_flex_counter(self, dvs): self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK") def set_up(self, dvs): - self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() - assert self.pgs is not None and len(self.pgs) > 0 - self.qs = self.counters_db.db_connection.hgetall("COUNTERS_QUEUE_NAME_MAP").values() - assert self.qs is not None and len(self.pgs) > 0 + self.qs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_QUEUE") + self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") self.buffers = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL") - self.populate_asic_all(dvs, "0") - db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(db, "COUNTERS_QUEUE_TYPE_MAP") @@ -171,9 +180,9 @@ def clear_watermark(self, dvs, data): def test_telemetry_period(self, dvs): self.setup_dbs(dvs) - self.set_up_flex_counter(dvs) self.set_up(dvs) try: + self.set_up_flex_counter(dvs) self.enable_unittests(dvs, "true") self.populate_asic_all(dvs, "100") From a3f4fbb446c932c210f6c12a5ff09affccb0c6d7 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Thu, 9 Jun 2022 19:51:21 +0800 Subject: [PATCH 18/64] Combine PGs in buffermgrd (#2281) * Combine PG3 and PG4 to PG3-4 Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 32 +++++++++++++++++++++++++++----- tests/test_buffer_traditional.py | 16 ++++++++-------- 2 files changed, 35 insertions(+), 13 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 5c7d6ae9e6..c81980e363 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -11,6 +11,7 @@ #include "exec.h" #include "shellcmd.h" #include "warm_restart.h" +#include "converter.h" using namespace std; using namespace swss; @@ -175,10 +176,27 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) string profile_ref = buffer_profile_key; vector lossless_pgs = tokenize(pfc_enable, ','); + // Convert to bitmap + unsigned long lossless_pg_id = 0; + for (auto pg : lossless_pgs) + { + try + { + uint8_t cur_pg = to_uint(pg); + lossless_pg_id |= (1< lossless_pg_combinations = generateIdListFromMap(lossless_pg_id, sizeof(lossless_pg_id)); if (m_portStatusLookup[port] == "down" && m_platform == "mellanox") { - for (auto lossless_pg : lossless_pgs) + for (auto lossless_pg : lossless_pg_combinations) { // Remove the entry in BUFFER_PG table if any vector fvVectorPg; @@ -251,23 +269,27 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port) SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); } - for (auto lossless_pg : lossless_pgs) + for (auto lossless_pg : lossless_pg_combinations) { vector fvVectorPg; string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); - + bool profile_existing = false; /* Check if PG Mapping is already then log message and return. */ for (auto& prop : fvVectorPg) { if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) { SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); - continue; + profile_existing = true; + break; } } - + if (profile_existing) + { + continue; + } fvVectorPg.clear(); fvVectorPg.push_back(make_pair("profile", profile_ref)); diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 3d2285fd7b..31d1afbbd8 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -79,14 +79,12 @@ def get_pg_name_map(self): def setup_teardown_test(self, dvs): try: self.setup_db(dvs) - self.set_port_qos_table(self.INTF, '2,3,4,6') - pg_name_map = self.get_pg_name_map() - yield pg_name_map + self.set_port_qos_table(self.INTF, '3,4') + self.lossless_pg_combinations = ['3-4'] finally: self.teardown() def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): - self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None try: @@ -112,6 +110,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # check if the lossless profile for the test speed is already present @@ -136,7 +135,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) # buffer pgs should still point to the original buffer profile - for pg in self.lossless_pgs: + for pg in self.lossless_pg_combinations: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) fvs = dict() for pg in self.pg_name_map: @@ -174,7 +173,6 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # To verify the BUFFER_PG is not hardcoded to 3,4 # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue def test_buffer_pg_update(self, dvs, setup_teardown_test): - self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None test_speed = None @@ -203,6 +201,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # get the orig buf profiles attached to the pgs @@ -221,7 +220,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) # Verify BUFFER_PG is updated - for pg in self.lossless_pgs: + for pg in self.lossless_pg_combinations: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": new_lossless_profile}) fvs_negative = {} @@ -232,9 +231,10 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Add pfc_enable field for extra port self.set_port_qos_table(extra_port, '2,3,4,6') + self.lossless_pg_combinations = ['2-4', '6'] time.sleep(1) # Verify BUFFER_PG is updated when pfc_enable is available - for pg in self.lossless_pgs: + for pg in self.lossless_pg_combinations: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": new_lossless_profile}) finally: if orig_cable_len: From b12af413e9a66094101f3c57aa4bc62001d520e1 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Thu, 9 Jun 2022 18:50:02 -0700 Subject: [PATCH 19/64] [fpmsyncd] don't manipulate route weight (#2320) Return the next hop weight obtained from kernel as-is. Sample next hop weight: admin@svcstr-7050-acs-4:~$ ip route show 193.11.248.128/25 193.11.248.128/25 nhid 1452 proto bgp src 10.1.0.33 metric 20 nexthop via 10.0.1.61 dev PortChannel103 weight 1 nexthop via 10.0.1.63 dev PortChannel104 weight 1 nexthop via 10.0.1.59 dev PortChannel102 weight 1 nexthop via 10.0.1.57 dev PortChannel101 weight 1 Signed-off-by: Ying Xie --- fpmsyncd/routesync.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fpmsyncd/routesync.cpp b/fpmsyncd/routesync.cpp index 6a128a0784..ab5868cdcf 100644 --- a/fpmsyncd/routesync.cpp +++ b/fpmsyncd/routesync.cpp @@ -1208,7 +1208,7 @@ string RouteSync::getNextHopWt(struct rtnl_route *route_obj) uint8_t weight = rtnl_route_nh_get_weight(nexthop); if (weight) { - result += to_string(weight + 1); + result += to_string(weight); } else { From bf4d89082ad413ef71eb40a8ff2f279b87f1b002 Mon Sep 17 00:00:00 2001 From: Myron Sosyak Date: Mon, 13 Jun 2022 08:58:45 -0700 Subject: [PATCH 20/64] Fix key generation in removeDecapTunnel (#2322) *After the latest changes in this PR #2190 an issue was introduced. When the tunnel was deleted the TunnelTermEntries were deleted from ASIC but not from the OA cache. Due to that then the same tunnel is created the TunnelTermEntries are not created as OA has it in local cache. Signed-off-by: Myron Sosyak --- orchagent/tunneldecaporch.cpp | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 91744f3323..e84ba315c4 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -67,7 +67,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { tunnel_id = tunnelTable[key].tunnel_id; } - + if (op == SET_COMMAND) { @@ -240,11 +240,11 @@ void TunnelDecapOrch::doTask(Consumer& consumer) ++it; continue; } - + //create new tunnel if it doesn't exists already if (valid && !exists) { - + if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, dscp_to_tc_map_id, tc_to_pg_map_id)) { @@ -427,7 +427,7 @@ bool TunnelDecapOrch::addDecapTunnel( attr.value.oid = tc_to_pg_map_id; tunnel_attrs.push_back(attr); } - + // write attributes to ASIC_DB sai_object_id_t tunnel_id; status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); @@ -669,8 +669,8 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, sai_object_id_t value, sa { // TC remapping. attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; - attr.value.oid = value; - + attr.value.oid = value; + } else if (field == decap_tc_to_pg_field_name) { @@ -763,7 +763,16 @@ bool TunnelDecapOrch::removeDecapTunnel(string table_name, string key) for (auto it = tunnel_info->tunnel_term_info.begin(); it != tunnel_info->tunnel_term_info.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - string term_key = tunnel_entry_info.src_ip + '-' + tunnel_entry_info.dst_ip; + string term_key; + swss::IpAddress src_ip(tunnel_entry_info.src_ip); + if (!src_ip.isZero()) + { + term_key = src_ip.to_string() + '-' + tunnel_entry_info.dst_ip; + } + else + { + term_key = tunnel_entry_info.dst_ip; + } if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, term_key)) { return false; From 3c3bb17d6494a65bbec6723a8eaee2042c938ea2 Mon Sep 17 00:00:00 2001 From: Alexander Allen Date: Tue, 14 Jun 2022 14:43:26 -0400 Subject: [PATCH 21/64] [crmorch] Prevent exceededLogCounter from resetting when low and high values are equal (#2327) * [crmorch] Prevent exceededLogCounter from resetting when low and high values are equal --- orchagent/crmorch.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orchagent/crmorch.cpp b/orchagent/crmorch.cpp index 65dad8e98c..f5e864a357 100644 --- a/orchagent/crmorch.cpp +++ b/orchagent/crmorch.cpp @@ -768,7 +768,7 @@ void CrmOrch::checkCrmThresholds() res.exceededLogCounter++; } - else if ((utilization <= res.lowThreshold) && (res.exceededLogCounter > 0)) + else if ((utilization <= res.lowThreshold) && (res.exceededLogCounter > 0) && (res.highThreshold != res.lowThreshold)) { SWSS_LOG_WARN("%s THRESHOLD_CLEAR for %s %u%% Used count %u free count %u", res.name.c_str(), threshType.c_str(), percentageUtil, cnt.usedCounter, cnt.availableCounter); From 43743483da4331e657f2ab8b2279cdd098f5d2c5 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Wed, 15 Jun 2022 15:41:36 +0800 Subject: [PATCH 22/64] Apply `DSCP_TO_TC_MAP` from `PORT_QOS_MAP|global` to switch level (#2314) * Apply DSCP_TO_TC_MAP|AZURE to switch level Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 8 ++ orchagent/qosorch.cpp | 156 +++++++++++++++++++++----------- orchagent/qosorch.h | 9 +- tests/mock_tests/qosorch_ut.cpp | 41 +++++++-- tests/test_qos_map.py | 70 ++++++++++++++ 5 files changed, 214 insertions(+), 70 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index c81980e363..d8faa1033b 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -16,6 +16,8 @@ using namespace std; using namespace swss; +#define PORT_NAME_GLOBAL "global" + BufferMgr::BufferMgr(DBConnector *cfgDb, DBConnector *applDb, string pg_lookup_file, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgPortTable(cfgDb, CFG_PORT_TABLE_NAME), @@ -413,6 +415,12 @@ void BufferMgr::doPortQosTableTask(Consumer &consumer) { KeyOpFieldsValuesTuple tuple = it->second; string port_name = kfvKey(tuple); + if (port_name == PORT_NAME_GLOBAL) + { + // Ignore the entry for global level + it = consumer.m_toSync.erase(it); + continue; + } string op = kfvOp(tuple); if (op == SET_COMMAND) { diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 5cb9d8ad2e..274003bd72 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -104,6 +104,8 @@ map qos_to_ref_table_map = { #define DSCP_MAX_VAL 63 #define EXP_MAX_VAL 7 +#define PORT_NAME_GLOBAL "global" + task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); @@ -236,37 +238,6 @@ bool DscpToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple & return true; } -void DscpToTcMapHandler::applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t map_id) -{ - SWSS_LOG_ENTER(); - bool rv = true; - - /* Query DSCP_TO_TC QoS map at switch capability */ - rv = gSwitchOrch->querySwitchDscpToTcCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP); - if (rv == false) - { - SWSS_LOG_ERROR("Switch level DSCP to TC QoS map configuration is not supported"); - return; - } - - /* Apply DSCP_TO_TC QoS map at switch */ - sai_attribute_t attr; - attr.id = attr_id; - attr.value.oid = map_id; - - sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to apply DSCP_TO_TC QoS map to switch rv:%d", status); - return; - } - - if (map_id != gQosOrch->m_globalDscpToTcMap) - gQosOrch->m_globalDscpToTcMap = map_id; - - SWSS_LOG_NOTICE("Applied DSCP_TO_TC QoS map to switch successfully"); -} - sai_object_id_t DscpToTcMapHandler::addQosItem(const vector &attributes) { SWSS_LOG_ENTER(); @@ -292,8 +263,6 @@ sai_object_id_t DscpToTcMapHandler::addQosItem(const vector &at } SWSS_LOG_DEBUG("created QosMap object:%" PRIx64, sai_object); - applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, sai_object); - return sai_object; } @@ -301,27 +270,6 @@ bool DscpToTcMapHandler::removeQosItem(sai_object_id_t sai_object) { SWSS_LOG_ENTER(); - if (sai_object == gQosOrch->m_globalDscpToTcMap) - { - // The current global dscp to tc map is about to be removed. - // Find another one to set to the switch or NULL in case this is the last one - const auto &dscpToTcObjects = (*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]); - bool found = false; - for (const auto &ref : dscpToTcObjects) - { - if (ref.second.m_saiObjectId == sai_object) - continue; - SWSS_LOG_NOTICE("Current global dscp_to_tc map is about to be removed, set it to %s %" PRIx64, ref.first.c_str(), ref.second.m_saiObjectId); - applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, ref.second.m_saiObjectId); - found = true; - break; - } - if (!found) - { - applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, SAI_NULL_OBJECT_ID); - } - } - SWSS_LOG_DEBUG("Removing DscpToTcMap object:%" PRIx64, sai_object); sai_status_t sai_status = sai_qos_map_api->remove_qos_map(sai_object); if (SAI_STATUS_SUCCESS != sai_status) @@ -1717,12 +1665,112 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsVal return task_process_status::task_success; } +bool QosOrch::applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t map_id) +{ + SWSS_LOG_ENTER(); + + /* Query DSCP_TO_TC QoS map at switch capability */ + bool rv = gSwitchOrch->querySwitchDscpToTcCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP); + if (rv == false) + { + SWSS_LOG_ERROR("Switch level DSCP to TC QoS map configuration is not supported"); + return true; + } + + /* Apply DSCP_TO_TC QoS map at switch */ + sai_attribute_t attr; + attr.id = attr_id; + attr.value.oid = map_id; + + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to apply DSCP_TO_TC QoS map to switch rv:%d", status); + return false; + } + + SWSS_LOG_NOTICE("Applied DSCP_TO_TC QoS map to switch successfully"); + return true; +} + +task_process_status QosOrch::handleGlobalQosMap(const string &OP, KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + + task_process_status task_status = task_process_status::task_success; + + if (OP == DEL_COMMAND) + { + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, PORT_NAME_GLOBAL, dscp_to_tc_field_name, referenced_obj)) + { + return task_status; + } + // Set SAI_NULL_OBJECT_ID to switch level if PORT_QOS_MAP|global is removed + if (applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, SAI_NULL_OBJECT_ID)) + { + removeObject(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, PORT_NAME_GLOBAL); + task_status = task_process_status::task_success; + SWSS_LOG_INFO("Global QoS map type %s is removed", dscp_to_tc_field_name.c_str()); + } + else + { + task_status = task_process_status::task_failed; + SWSS_LOG_WARN("Failed to remove switch level QoS map type %s", dscp_to_tc_field_name.c_str()); + } + return task_status; + } + + for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) + { + string map_type_name = fvField(*it); + string map_name = fvValue(*it); + if (map_type_name != dscp_to_tc_field_name) + { + SWSS_LOG_WARN("Qos map type %s is not supported at global level", map_type_name.c_str()); + continue; + } + + if (qos_to_attr_map.find(map_type_name) != qos_to_attr_map.end()) + { + sai_object_id_t id; + string object_name; + ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); + + if (status != ref_resolve_status::success) + { + SWSS_LOG_INFO("Global QoS map %s is not yet created", map_name.c_str()); + task_status = task_process_status::task_need_retry; + } + + if (applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, id)) + { + setObjectReference(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, PORT_NAME_GLOBAL, map_type_name, object_name); + task_status = task_process_status::task_success; + SWSS_LOG_INFO("Applied QoS map type %s name %s to switch level", map_type_name.c_str(), object_name.c_str()); + } + else + { + task_status = task_process_status::task_failed; + SWSS_LOG_INFO("Failed to apply QoS map type %s name %s to switch level", map_type_name.c_str(), object_name.c_str()); + } + } + } + return task_status; +} + task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); string key = kfvKey(tuple); string op = kfvOp(tuple); + + if (key == PORT_NAME_GLOBAL) + { + return handleGlobalQosMap(op, tuple); + } + vector port_names = tokenize(key, list_item_delimiter); if (op == DEL_COMMAND) diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 2b348cc030..f8b97cd381 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -78,8 +78,6 @@ class DscpToTcMapHandler : public QosMapHandler bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) override; sai_object_id_t addQosItem(const vector &attributes) override; bool removeQosItem(sai_object_id_t sai_object); -protected: - void applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); }; class MplsTcToTcMapHandler : public QosMapHandler @@ -195,11 +193,13 @@ class QosOrch : public Orch task_process_status handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); task_process_status handleTcToDscpTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleGlobalQosMap(const string &op, KeyOpFieldsValuesTuple &tuple); + sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id); bool applySchedulerToQueueSchedulerGroup(Port &port, size_t queue_ind, sai_object_id_t scheduler_profile_id); bool applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_id_t sai_wred_profile); - + bool applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); private: qos_table_handler_map m_qos_handler_map; @@ -212,9 +212,6 @@ class QosOrch : public Orch std::unordered_map m_scheduler_group_port_info; - // SAI OID of the global dscp to tc map - sai_object_id_t m_globalDscpToTcMap; - friend QosMapHandler; friend DscpToTcMapHandler; }; diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp index a2152f8d7c..7461a29e3c 100644 --- a/tests/mock_tests/qosorch_ut.cpp +++ b/tests/mock_tests/qosorch_ut.cpp @@ -723,8 +723,6 @@ namespace qosorch_test static_cast(gQosOrch)->doTask(); ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); - // Global dscp to tc map should not be cleared - ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, switch_dscp_to_tc_map_id); // Make sure other dependencies are not touched CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); @@ -931,12 +929,9 @@ namespace qosorch_test TEST_F(QosOrchTest, QosOrchTestGlobalDscpToTcMap) { - // Make sure dscp to tc map is correct - ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); - // Create a new dscp to tc map std::deque entries; - entries.push_back({"AZURE_1", "SET", + entries.push_back({"AZURE", "SET", { {"1", "0"}, {"0", "1"} @@ -945,16 +940,42 @@ namespace qosorch_test auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); consumer->addToSync(entries); entries.clear(); - // Drain DSCP_TO_TC_MAP table + + entries.push_back({"global", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + // Drain DSCP_TO_TC_MAP and PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + + // Remove global DSCP_TO_TC_MAP + entries.push_back({"global", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_TABLE table static_cast(gQosOrch)->doTask(); - ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, switch_dscp_to_tc_map_id); + // Check switch_level dscp_to_tc_map is set to NULL + ASSERT_EQ(SAI_NULL_OBJECT_ID, switch_dscp_to_tc_map_id); - entries.push_back({"AZURE_1", "DEL", {}}); + entries.push_back({"AZURE", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); consumer->addToSync(entries); entries.clear(); + + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; // Drain DSCP_TO_TC_MAP table static_cast(gQosOrch)->doTask(); - ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + // Check DSCP_TO_TC_MAP|AZURE is removed, and the switch_level dscp_to_tc_map is set to NULL + ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + } TEST_F(QosOrchTest, QosOrchTestRetryFirstItem) diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 301bd3c6d6..6b236c4fb2 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -370,6 +370,76 @@ def test_port_mpls_tc(self, dvs): port_cnt = len(swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys()) assert port_cnt == cnt +class TestDscpToTcMap(object): + ASIC_QOS_MAP_STR = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + ASIC_PORT_STR = "ASIC_STATE:SAI_OBJECT_TYPE_PORT" + ASIC_SWITCH_STR = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" + + def init_test(self, dvs): + dvs.setup_db() + self.asic_db = dvs.get_asic_db() + self.config_db = dvs.get_config_db() + self.asic_qos_map_ids = self.asic_db.get_keys(self.ASIC_QOS_MAP_STR) + self.asic_qos_map_count = len(self.asic_qos_map_ids) + self.dscp_to_tc_table = swsscommon.Table(self.config_db.db_connection, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME) + self.port_qos_table = swsscommon.Table(self.config_db.db_connection, swsscommon.CFG_PORT_QOS_MAP_TABLE_NAME) + + def get_qos_id(self): + diff = set(self.asic_db.get_keys(self.ASIC_QOS_MAP_STR)) - set(self.asic_qos_map_ids) + assert len(diff) <= 1 + return None if len(diff) == 0 else diff.pop() + + def test_dscp_to_tc_map_applied_to_switch(self, dvs): + self.init_test(dvs) + dscp_to_tc_map_id = None + created_new_map = False + try: + existing_map = self.dscp_to_tc_table.getKeys() + if "AZURE" not in existing_map: + # Create a DSCP_TO_TC map + dscp_to_tc_map = [(str(i), str(i)) for i in range(0, 63)] + self.dscp_to_tc_table.set("AZURE", swsscommon.FieldValuePairs(dscp_to_tc_map)) + + self.asic_db.wait_for_n_keys(self.ASIC_QOS_MAP_STR, self.asic_qos_map_count + 1) + + # Get the DSCP_TO_TC map ID + dscp_to_tc_map_id = self.get_qos_id() + assert(dscp_to_tc_map_id is not None) + + # Assert the expected values + fvs = self.asic_db.get_entry(self.ASIC_QOS_MAP_STR, dscp_to_tc_map_id) + assert(fvs.get("SAI_QOS_MAP_ATTR_TYPE") == "SAI_QOS_MAP_TYPE_DSCP_TO_TC") + created_new_map = True + else: + for id in self.asic_qos_map_ids: + fvs = self.asic_db.get_entry(self.ASIC_QOS_MAP_STR, id) + if fvs.get("SAI_QOS_MAP_ATTR_TYPE") == "SAI_QOS_MAP_TYPE_DSCP_TO_TC": + dscp_to_tc_map_id = id + break + switch_oid = dvs.getSwitchOid() + # Check switch level DSCP_TO_TC_MAP doesn't before PORT_QOS_MAP|global is created + fvs = self.asic_db.get_entry(self.ASIC_SWITCH_STR, switch_oid) + assert("SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP" not in fvs) + + # Insert switch level map entry + self.port_qos_table.set("global", [("dscp_to_tc_map", "AZURE")]) + time.sleep(1) + + # Check the switch level DSCP_TO_TC_MAP is applied + fvs = self.asic_db.get_entry(self.ASIC_SWITCH_STR, switch_oid) + assert(fvs.get("SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP") == dscp_to_tc_map_id) + + # Remove the global level DSCP_TO_TC_MAP + self.port_qos_table._del("global") + time.sleep(1) + + # Check the global level DSCP_TO_TC_MAP is set to SAI_ + fvs = self.asic_db.get_entry(self.ASIC_SWITCH_STR, switch_oid) + assert(fvs.get("SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP") == "oid:0x0") + finally: + if created_new_map: + self.dscp_to_tc_table._del("AZURE") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying From 9da86f37c9e6eb4c6e126d191aff0758d1d11e55 Mon Sep 17 00:00:00 2001 From: Shilong Liu Date: Thu, 16 Jun 2022 11:16:40 +0800 Subject: [PATCH 23/64] [ci] Don't publish gcov artifact when test failed. (#2331) * [ci] Fix test stage retry failure issue. --- .azure-pipelines/test-docker-sonic-vs-template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index fc1527f72c..83fd36dc09 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -97,7 +97,7 @@ jobs: - publish: $(Build.ArtifactStagingDirectory)/gcov_tmp artifact: ${{ parameters.gcov_artifact_name }} displayName: "Publish gcov output" - condition: eq('${{ parameters.archive_gcov }}', true) + condition: and(succeeded(), eq('${{ parameters.archive_gcov }}', true)) - publish: $(Build.ArtifactStagingDirectory)/ artifact: ${{ parameters.log_artifact_name }}@$(System.JobAttempt) From 59f77eae01ee2b906244c1323014be0b1c649b40 Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Thu, 16 Jun 2022 15:31:26 -0700 Subject: [PATCH 24/64] [intfmgr]: Set proxy_arp kernel param (#2334) *[intfmgr]: Set proxy_arp kernel param Signed-off-by: Lawrence Lee --- cfgmgr/intfmgr.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/cfgmgr/intfmgr.cpp b/cfgmgr/intfmgr.cpp index 91ed762ef9..952ca9ef55 100644 --- a/cfgmgr/intfmgr.cpp +++ b/cfgmgr/intfmgr.cpp @@ -566,15 +566,15 @@ bool IntfMgr::setIntfProxyArp(const string &alias, const string &proxy_arp) { stringstream cmd; string res; - string proxy_arp_pvlan; + string proxy_arp_status; if (proxy_arp == "enabled") { - proxy_arp_pvlan = "1"; + proxy_arp_status = "1"; } else if (proxy_arp == "disabled") { - proxy_arp_pvlan = "0"; + proxy_arp_status = "0"; } else { @@ -582,7 +582,13 @@ bool IntfMgr::setIntfProxyArp(const string &alias, const string &proxy_arp) return false; } - cmd << ECHO_CMD << " " << proxy_arp_pvlan << " > /proc/sys/net/ipv4/conf/" << alias << "/proxy_arp_pvlan"; + cmd << ECHO_CMD << " " << proxy_arp_status << " > /proc/sys/net/ipv4/conf/" << alias << "/proxy_arp_pvlan"; + EXEC_WITH_ERROR_THROW(cmd.str(), res); + + cmd.clear(); + cmd.str(std::string()); + + cmd << ECHO_CMD << " " << proxy_arp_status << " > /proc/sys/net/ipv4/conf/" << alias << "/proxy_arp"; EXEC_WITH_ERROR_THROW(cmd.str(), res); SWSS_LOG_INFO("Proxy ARP set to \"%s\" on interface \"%s\"", proxy_arp.c_str(), alias.c_str()); From 700492f3e19aa143ce5dbdcbbd166b857ddfc66d Mon Sep 17 00:00:00 2001 From: Mickey Spiegel Date: Thu, 16 Jun 2022 17:51:22 -0700 Subject: [PATCH 25/64] [aclorch] Fix and simplify DTel watchlist tables and entries (#2155) * Fix DTel acl rule creation The significant rewrite of aclorch when adding ACL_TABLE_TYPE configuration caused a bug that prevents configuration of any DTel rules. This is due to use of an incorrect set of enum mappings while determining which type of AclRule to create. --- orchagent/aclorch.cpp | 133 +++++++++++------------------------------- orchagent/aclorch.h | 15 +---- orchagent/acltable.h | 1 - tests/test_dtel.py | 106 ++++++++++++++++++++++++++++++++- 4 files changed, 140 insertions(+), 115 deletions(-) diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index 73aa02dac9..24166a9c54 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -1455,23 +1455,14 @@ shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOr { return make_shared(acl, rule, table); } - else if (aclDTelFlowOpTypeLookup.find(action) != aclDTelFlowOpTypeLookup.cend()) + else if (aclDTelActionLookup.find(action) != aclDTelActionLookup.cend()) { if (!dtel) { throw runtime_error("DTel feature is not enabled. Watchlists cannot be configured"); } - if (action == ACTION_DTEL_DROP_REPORT_ENABLE || - action == ACTION_DTEL_TAIL_DROP_REPORT_ENABLE || - action == ACTION_DTEL_REPORT_ALL_PACKETS) - { - return make_shared(acl, dtel, rule, table); - } - else - { - return make_shared(acl, dtel, rule, table); - } + return make_shared(acl, dtel, rule, table); } } @@ -2447,13 +2438,13 @@ bool AclTable::clear() return true; } -AclRuleDTelFlowWatchListEntry::AclRuleDTelFlowWatchListEntry(AclOrch *aclOrch, DTelOrch *dtel, string rule, string table) : +AclRuleDTelWatchListEntry::AclRuleDTelWatchListEntry(AclOrch *aclOrch, DTelOrch *dtel, string rule, string table) : AclRule(aclOrch, rule, table), m_pDTelOrch(dtel) { } -bool AclRuleDTelFlowWatchListEntry::validateAddAction(string attr_name, string attr_val) +bool AclRuleDTelWatchListEntry::validateAddAction(string attr_name, string attr_val) { SWSS_LOG_ENTER(); @@ -2535,7 +2526,7 @@ bool AclRuleDTelFlowWatchListEntry::validateAddAction(string attr_name, string a return setAction(aclDTelActionLookup[attr_name], actionData); } -bool AclRuleDTelFlowWatchListEntry::validate() +bool AclRuleDTelWatchListEntry::validate() { SWSS_LOG_ENTER(); @@ -2552,19 +2543,19 @@ bool AclRuleDTelFlowWatchListEntry::validate() return true; } -bool AclRuleDTelFlowWatchListEntry::createRule() +bool AclRuleDTelWatchListEntry::createRule() { SWSS_LOG_ENTER(); return activate(); } -bool AclRuleDTelFlowWatchListEntry::removeRule() +bool AclRuleDTelWatchListEntry::removeRule() { return deactivate(); } -bool AclRuleDTelFlowWatchListEntry::activate() +bool AclRuleDTelWatchListEntry::activate() { SWSS_LOG_ENTER(); @@ -2581,7 +2572,7 @@ bool AclRuleDTelFlowWatchListEntry::activate() return AclRule::createRule(); } -bool AclRuleDTelFlowWatchListEntry::deactivate() +bool AclRuleDTelWatchListEntry::deactivate() { SWSS_LOG_ENTER(); @@ -2612,7 +2603,7 @@ bool AclRuleDTelFlowWatchListEntry::deactivate() return true; } -void AclRuleDTelFlowWatchListEntry::onUpdate(SubjectType type, void *cntx) +void AclRuleDTelWatchListEntry::onUpdate(SubjectType type, void *cntx) { sai_acl_action_data_t actionData; sai_object_id_t session_oid = SAI_NULL_OBJECT_ID; @@ -2673,72 +2664,19 @@ void AclRuleDTelFlowWatchListEntry::onUpdate(SubjectType type, void *cntx) } } -bool AclRuleDTelFlowWatchListEntry::update(const AclRule& rule) +bool AclRuleDTelWatchListEntry::update(const AclRule& rule) { - auto dtelDropWathcListRule = dynamic_cast(&rule); - if (!dtelDropWathcListRule) + auto dtelWatchListRule = dynamic_cast(&rule); + if (!dtelWatchListRule) { - SWSS_LOG_ERROR("Cannot update DTEL flow watch list rule with a rule of a different type"); + SWSS_LOG_ERROR("Cannot update DTEL watch list rule with a rule of a different type"); return false; } - SWSS_LOG_ERROR("Updating DTEL flow watch list rule is currently not implemented"); + SWSS_LOG_ERROR("Updating DTEL watch list rule is currently not implemented"); return false; } -AclRuleDTelDropWatchListEntry::AclRuleDTelDropWatchListEntry(AclOrch *aclOrch, DTelOrch *dtel, string rule, string table) : - AclRule(aclOrch, rule, table), - m_pDTelOrch(dtel) -{ -} - -bool AclRuleDTelDropWatchListEntry::validateAddAction(string attr_name, string attr_val) -{ - SWSS_LOG_ENTER(); - - if (!m_pDTelOrch) - { - return false; - } - - sai_acl_action_data_t actionData; - string attr_value = to_upper(attr_val); - - if (attr_name != ACTION_DTEL_DROP_REPORT_ENABLE && - attr_name != ACTION_DTEL_TAIL_DROP_REPORT_ENABLE && - attr_name != ACTION_DTEL_REPORT_ALL_PACKETS) - { - return false; - } - - actionData.parameter.booldata = (attr_value == DTEL_ENABLED) ? true : false; - actionData.enable = (attr_value == DTEL_ENABLED) ? true : false; - - return setAction(aclDTelActionLookup[attr_name], actionData); -} - -bool AclRuleDTelDropWatchListEntry::validate() -{ - SWSS_LOG_ENTER(); - - if (!m_pDTelOrch) - { - return false; - } - - if ((m_rangeConfig.empty() && m_matches.empty()) || m_actions.size() == 0) - { - return false; - } - - return true; -} - -void AclRuleDTelDropWatchListEntry::onUpdate(SubjectType, void *) -{ - // Do nothing -} - AclRange::AclRange(sai_acl_range_type_t type, sai_object_id_t oid, int min, int max): m_oid(oid), m_refCnt(0), m_min(min), m_max(max), m_type(type) { @@ -4619,11 +4557,10 @@ void AclOrch::createDTelWatchListTables() AclTableTypeBuilder builder; - AclTable flowWLTable(this, TABLE_TYPE_DTEL_FLOW_WATCHLIST); - AclTable dropWLTable(this, TABLE_TYPE_DTEL_DROP_WATCHLIST); + AclTable dtelWLTable(this, TABLE_TYPE_DTEL_FLOW_WATCHLIST); - flowWLTable.validateAddStage(ACL_STAGE_INGRESS); - flowWLTable.validateAddType(builder + dtelWLTable.validateAddStage(ACL_STAGE_INGRESS); + dtelWLTable.validateAddType(builder .withBindPointType(SAI_ACL_BIND_POINT_TYPE_SWITCH) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) @@ -4635,31 +4572,28 @@ void AclOrch::createDTelWatchListTables() .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_INNER_ETHER_TYPE)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_INNER_SRC_IP)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_INNER_DST_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_OUTER_VLAN_ID)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER)) .withAction(SAI_ACL_ACTION_TYPE_ACL_DTEL_FLOW_OP) .withAction(SAI_ACL_ACTION_TYPE_DTEL_INT_SESSION) - .withAction(SAI_ACL_ACTION_TYPE_DTEL_REPORT_ALL_PACKETS) - .withAction(SAI_ACL_ACTION_TYPE_DTEL_FLOW_SAMPLE_PERCENT) - .build() - ); - flowWLTable.setDescription("Dataplane Telemetry Flow Watchlist table"); - - dropWLTable.validateAddStage(ACL_STAGE_INGRESS); - dropWLTable.validateAddType(builder - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_SWITCH) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IP)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL)) .withAction(SAI_ACL_ACTION_TYPE_DTEL_DROP_REPORT_ENABLE) .withAction(SAI_ACL_ACTION_TYPE_DTEL_TAIL_DROP_REPORT_ENABLE) + .withAction(SAI_ACL_ACTION_TYPE_DTEL_REPORT_ALL_PACKETS) + .withAction(SAI_ACL_ACTION_TYPE_DTEL_FLOW_SAMPLE_PERCENT) .build() ); - dropWLTable.setDescription("Dataplane Telemetry Drop Watchlist table"); + dtelWLTable.setDescription("Dataplane Telemetry Watchlist table"); - addAclTable(flowWLTable); - addAclTable(dropWLTable); + addAclTable(dtelWLTable); } void AclOrch::deleteDTelWatchListTables() @@ -4667,7 +4601,6 @@ void AclOrch::deleteDTelWatchListTables() SWSS_LOG_ENTER(); removeAclTable(TABLE_TYPE_DTEL_FLOW_WATCHLIST); - removeAclTable(TABLE_TYPE_DTEL_DROP_WATCHLIST); } void AclOrch::registerFlexCounter(const AclRule& rule) diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index 02631d934e..ee17ba4a1f 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -339,10 +339,10 @@ class AclRuleMirror: public AclRule MirrorOrch *m_pMirrorOrch {nullptr}; }; -class AclRuleDTelFlowWatchListEntry: public AclRule +class AclRuleDTelWatchListEntry: public AclRule { public: - AclRuleDTelFlowWatchListEntry(AclOrch *m_pAclOrch, DTelOrch *m_pDTelOrch, string rule, string table); + AclRuleDTelWatchListEntry(AclOrch *m_pAclOrch, DTelOrch *m_pDTelOrch, string rule, string table); bool validateAddAction(string attr_name, string attr_value); bool validate(); bool createRule(); @@ -360,17 +360,6 @@ class AclRuleDTelFlowWatchListEntry: public AclRule bool INT_session_valid; }; -class AclRuleDTelDropWatchListEntry: public AclRule -{ -public: - AclRuleDTelDropWatchListEntry(AclOrch *m_pAclOrch, DTelOrch *m_pDTelOrch, string rule, string table); - bool validateAddAction(string attr_name, string attr_value); - bool validate(); - void onUpdate(SubjectType, void *) override; -protected: - DTelOrch *m_pDTelOrch; -}; - class AclTable { public: diff --git a/orchagent/acltable.h b/orchagent/acltable.h index 3ec7f1a757..2d91a84b98 100644 --- a/orchagent/acltable.h +++ b/orchagent/acltable.h @@ -31,7 +31,6 @@ extern "C" { #define TABLE_TYPE_PFCWD "PFCWD" #define TABLE_TYPE_CTRLPLANE "CTRLPLANE" #define TABLE_TYPE_DTEL_FLOW_WATCHLIST "DTEL_FLOW_WATCHLIST" -#define TABLE_TYPE_DTEL_DROP_WATCHLIST "DTEL_DROP_WATCHLIST" #define TABLE_TYPE_MCLAG "MCLAG" #define TABLE_TYPE_MUX "MUX" #define TABLE_TYPE_DROP "DROP" diff --git a/tests/test_dtel.py b/tests/test_dtel.py index b45ba13972..c8e86d6b7d 100644 --- a/tests/test_dtel.py +++ b/tests/test_dtel.py @@ -211,7 +211,111 @@ def test_DtelQueueReportAttribs(self, dvs, testlog): assert False tbl._del("Ethernet0|0") - + + def test_DtelFlowWatchlist(self, dvs, testlog): + self.db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.table = "DTEL_FLOW_WATCHLIST" + + fields_1=[("PRIORITY", "30"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "POSTCARD"), + ("REPORT_ALL_PACKETS", "FALSE"), + ("DROP_REPORT_ENABLE", "TRUE"), + ("TAIL_DROP_REPORT_ENABLE", "TRUE")] + fields_2=[("PRIORITY", "40"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "POSTCARD"), + ("REPORT_ALL_PACKETS", "TRUE"), + ("DROP_REPORT_ENABLE", "FALSE"), + ("TAIL_DROP_REPORT_ENABLE", "FALSE")] + fields_3=[("PRIORITY", "50"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "POSTCARD"), + ("REPORT_ALL_PACKETS", "TRUE")] + fields_4=[("PRIORITY", "60"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("REPORT_ALL_PACKETS", "TRUE"), + ("DROP_REPORT_ENABLE", "TRUE"), + ("TAIL_DROP_REPORT_ENABLE", "TRUE")] + fields_5=[("PRIORITY", "70"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "NOP"), + ("REPORT_ALL_PACKETS", "FALSE"), + ("DROP_REPORT_ENABLE", "TRUE"), + ("TAIL_DROP_REPORT_ENABLE", "TRUE")] + listfield = [fields_1, fields_2, fields_3, fields_4, fields_5] + + for field in listfield: + k = listfield.index(field) + rule = "RULE-" + str(k) + self._create_dtel_acl_rule(self.table, rule, field) + self._check_dtel_acl_rule(dvs, rule) + self._remove_dtel_acl_rule(self.table, rule) + + def _create_dtel_acl_rule(self, table, rule, field): + tbl = swsscommon.Table(self.db, "ACL_RULE") + fvs = swsscommon.FieldValuePairs(field) + tbl.set(table + "|" + rule, fvs) + time.sleep(1) + + def _remove_dtel_acl_rule(self, table, rule): + tbl = swsscommon.Table(self.db, "ACL_RULE") + tbl._del(table + "|" + rule) + time.sleep(1) + + def _check_dtel_acl_rule(self, dvs, rule): + time.sleep(1) + atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY") + keys = atbl.getKeys() + acl_entry = [k for k in keys if k not in dvs.asicdb.default_acl_entries] + assert len(acl_entry) != 0 + (status, fvs) = atbl.get(acl_entry[0]) + value = dict(fvs) + assert status + + if rule == "RULE-0": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "30" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_POSTCARD" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "disabled" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "true" + elif rule == "RULE-1": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "40" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_POSTCARD" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "disabled" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "disabled" + elif rule == "RULE-2": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "50" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_POSTCARD" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "true" + elif rule == "RULE-3": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "60" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "true" + elif rule == "RULE-4": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "70" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_NOP" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "disabled" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "true" def test_DtelEventAttribs(self, dvs, testlog): From 1bb5070775f7f6f47a06cc00c7396679b840642d Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Sat, 18 Jun 2022 00:15:24 +0800 Subject: [PATCH 26/64] Enhance mock test for dynamic buffer manager for port removing and qos reload flows (#2262) What I did Enhance the mock test of the dynamic buffer manager in port remove and config qos clear flow and fix bugs during mock test implementation Implement mock method ProduceStateTable::del Signed-off-by: Stephen Sun stephens@nvidia.com How I verified it Run regression test, mock test, vs test, and manual test. Details if related 1. Support mock test for dynamic buffer manager config qos clear and reclaiming buffer Remove port 2. Handle port remove/create flow Cache cable length for a port Try reclaiming unused buffer when maximum buffer parameters are received for a port whose state is ADMIN_DOWN and m_bufferCompletelyInitialized is true 3. Handle config qos clear If all buffer pools are removed when m_bufferPoolReady is true, remove all zero pools and profiles. Reload zero profiles and pools if they have not been loaded when reclaiming buffer --- cfgmgr/buffermgrdyn.cpp | 114 +++++++ cfgmgr/buffermgrdyn.h | 2 + tests/mock_tests/buffermgrdyn_ut.cpp | 486 +++++++++++++++++++++++++++ tests/mock_tests/mock_table.cpp | 8 + 4 files changed, 610 insertions(+) diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index 1c5b99a6f8..5017ad9d1b 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -1862,6 +1862,14 @@ task_process_status BufferMgrDynamic::handleBufferMaxParam(KeyOpFieldsValuesTupl SWSS_LOG_INFO("BUFFER_MAX_PARAM: Got port %s's max priority group %s", key.c_str(), value.c_str()); portInfo.maximum_buffer_objects[BUFFER_PG] = (sai_uint32_t)pgCount; + + if (m_bufferCompletelyInitialized && portInfo.state == PORT_ADMIN_DOWN) + { + // This is mostly for the case where the port is created only-the-fly + // The maximum buffer parameters can be received after buffer items + reclaimReservedBufferForPort(key, m_portPgLookup, BUFFER_PG); + SWSS_LOG_NOTICE("Admin-down port %s is handled after maximum buffer parameter has been received", key.c_str()); + } } else if (fvField(i) == "max_queues") { @@ -1875,6 +1883,14 @@ task_process_status BufferMgrDynamic::handleBufferMaxParam(KeyOpFieldsValuesTupl SWSS_LOG_INFO("BUFFER_MAX_PARAM: Got port %s's max queue %s", key.c_str(), value.c_str()); portInfo.maximum_buffer_objects[BUFFER_QUEUE] = (sai_uint32_t)queueCount; + + if (m_bufferCompletelyInitialized && portInfo.state == PORT_ADMIN_DOWN) + { + // This is mostly for the case where the port is created only-the-fly + // The maximum buffer parameters can be received after buffer items + reclaimReservedBufferForPort(key, m_portQueueLookup, BUFFER_QUEUE); + SWSS_LOG_NOTICE("Admin-down port %s is handled after maximum buffer parameter has been received", key.c_str()); + } } } } @@ -1961,6 +1977,7 @@ task_process_status BufferMgrDynamic::handleCableLenTable(KeyOpFieldsValuesTuple int failed_item_count = 0; if (op == SET_COMMAND) { + m_cableLengths.clear(); for (auto i : kfvFieldsValues(tuple)) { // receive and cache cable length table @@ -1975,6 +1992,8 @@ task_process_status BufferMgrDynamic::handleCableLenTable(KeyOpFieldsValuesTuple port.c_str(), portInfo.effective_speed.c_str(), portInfo.cable_length.c_str(), portInfo.gearbox_model.c_str()); + m_cableLengths[port] = cable_length; + if (portInfo.cable_length == cable_length) { continue; @@ -2183,6 +2202,11 @@ task_process_status BufferMgrDynamic::handlePortTable(KeyOpFieldsValuesTuple &tu string &mtu = portInfo.mtu; string &effective_speed = portInfo.effective_speed; + if (cable_length.empty() && !m_cableLengths[port].empty()) + { + cable_length = m_cableLengths[port]; + } + bool need_refresh_all_buffer_objects = false, need_handle_admin_down = false, was_admin_down = false; if (effective_speed_updated || mtu_updated) @@ -2304,6 +2328,28 @@ task_process_status BufferMgrDynamic::handlePortTable(KeyOpFieldsValuesTuple &tu task_status = refreshPgsForPort(port, portInfo.effective_speed, portInfo.cable_length, portInfo.mtu); } } + else if (op == DEL_COMMAND) + { + cleanUpItemsForReclaimingBuffer(port); + if ((m_portPgLookup.find(port) != m_portPgLookup.end() + && !m_portPgLookup[port].empty()) + || (m_portQueueLookup.find(port) != m_portQueueLookup.end() + && !m_portQueueLookup[port].empty()) + || (m_portProfileListLookups[BUFFER_INGRESS].find(port) != m_portProfileListLookups[BUFFER_INGRESS].end() + && !m_portProfileListLookups[BUFFER_INGRESS][port].empty()) + || (m_portProfileListLookups[BUFFER_EGRESS].find(port) != m_portProfileListLookups[BUFFER_EGRESS].end() + && !m_portProfileListLookups[BUFFER_EGRESS][port].empty())) + { + SWSS_LOG_INFO("Port %s can't be removed before buffer items have been removed", port.c_str()); + return task_process_status::task_need_retry; + } + m_portPgLookup.erase(port); + m_portQueueLookup.erase(port); + m_portProfileListLookups[BUFFER_INGRESS].erase(port); + m_portProfileListLookups[BUFFER_EGRESS].erase(port); + m_portInfoLookup.erase(port); + SWSS_LOG_NOTICE("Port %s is removed", port.c_str()); + } return task_status; } @@ -2401,6 +2447,28 @@ task_process_status BufferMgrDynamic::handleBufferPoolTable(KeyOpFieldsValuesTup m_applBufferPoolTable.del(pool); m_stateBufferPoolTable.del(pool); m_bufferPoolLookup.erase(pool); + if (pool == INGRESS_LOSSLESS_PG_POOL_NAME) + { + m_configuredSharedHeadroomPoolSize.clear(); + } + + if (m_bufferPoolReady && m_bufferPoolLookup.empty()) + { + for(auto &port : m_adminDownPorts) + { + cleanUpItemsForReclaimingBuffer(port); + } + + // Zero profiles must be unloaded once all pools have been uploaded + // This can be resulted from "config qos reload" + // Any zero profile left can leads to buffer pool not able to be cleared + unloadZeroPoolAndProfiles(); + + m_bufferPoolReady = false; + m_bufferCompletelyInitialized = false; + + m_pendingApplyZeroProfilePorts = m_adminDownPorts; + } } else { @@ -2634,6 +2702,12 @@ void BufferMgrDynamic::handleSetSingleBufferObjectOnAdminDownPort(buffer_directi { if (idsToZero.empty()) { + // Happens only after "config qos reload" + if (!m_zeroProfilesLoaded) + { + loadZeroPoolAndProfiles(); + } + // If initialization finished, no extra handle required. // Check whether the key overlaps with supported but not configured map auto const &idsToAdd = parseObjectNameFromKey(key, 1); @@ -2749,6 +2823,14 @@ void BufferMgrDynamic::handleDelSingleBufferObjectOnAdminDownPort(buffer_directi if (idsToZero.empty()) { + if (!m_bufferPoolReady) + { + // Reclaiming buffer has not started yet so just remove it. + // Do not add it to "supported but not configured" set + updateBufferObjectToDb(key, "", false, direction); + return; + } + // For admin down ports, if zero profiles have been applied to all configured items // do NOT remove it otherwise SDK default value will be set for the items // Move the key to supported_but_not_configured_items so that the slice of items @@ -3125,6 +3207,22 @@ task_process_status BufferMgrDynamic::handleSingleBufferPortProfileListEntry(con // For admin-down ports, zero profile list has been applied on the port when it entered admin-down state updateBufferObjectListToDb(key, profileListLookup[port], dir); } + else + { + const auto &profileList = m_portProfileListLookups[dir][port]; + if (!profileList.empty()) + { + // Happens only after "config qos reload" + if (!m_zeroProfilesLoaded) + { + loadZeroPoolAndProfiles(); + } + vector fvVector; + const string &zeroProfileNameList = constructZeroProfileListFromNormalProfileList(profileList, port); + fvVector.emplace_back(buffer_profile_list_field_name, zeroProfileNameList); + m_applBufferProfileListTables[dir].set(port, fvVector); + } + } } else if (op == DEL_COMMAND) { @@ -3462,9 +3560,25 @@ void BufferMgrDynamic::handlePendingBufferObjects() } } +void BufferMgrDynamic::cleanUpItemsForReclaimingBuffer(const string &port) +{ + // Clean up zero buffers when the buffer pools or a port has been removed + if (!m_bufferObjectIdsToZero[BUFFER_PG].empty()) + { + updateBufferObjectToDb(port + delimiter + m_bufferObjectIdsToZero[BUFFER_PG], "", false, BUFFER_PG); + } + if (!m_bufferObjectIdsToZero[BUFFER_QUEUE].empty()) + { + updateBufferObjectToDb(port + delimiter + m_bufferObjectIdsToZero[BUFFER_QUEUE], "", false, BUFFER_QUEUE); + } + removeSupportedButNotConfiguredItemsOnPort(m_portInfoLookup[port], port); +} + void BufferMgrDynamic::doTask(SelectableTimer &timer) { checkSharedBufferPoolSize(true); if (!m_bufferCompletelyInitialized) + { handlePendingBufferObjects(); + } } diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index cb94227522..11b55d7667 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -196,6 +196,7 @@ class BufferMgrDynamic : public Orch // key: port name // updated only when a port's speed and cable length updated port_info_lookup_t m_portInfoLookup; + std::map m_cableLengths; std::set m_adminDownPorts; std::set m_pendingApplyZeroProfilePorts; std::set m_pendingSupportedButNotConfiguredPorts[BUFFER_DIR_MAX]; @@ -302,6 +303,7 @@ class BufferMgrDynamic : public Orch void handleSetSingleBufferObjectOnAdminDownPort(buffer_direction_t direction, const std::string &port, const std::string &key, const std::string &profile); void handleDelSingleBufferObjectOnAdminDownPort(buffer_direction_t direction, const std::string &port, const std::string &key, port_info_t &portInfo); bool isReadyToReclaimBufferOnPort(const std::string &port); + void cleanUpItemsForReclaimingBuffer(const std::string &port); // Main flows template task_process_status reclaimReservedBufferForPort(const std::string &port, T &obj, buffer_direction_t dir); diff --git a/tests/mock_tests/buffermgrdyn_ut.cpp b/tests/mock_tests/buffermgrdyn_ut.cpp index b64a367c79..9dd17a5da8 100644 --- a/tests/mock_tests/buffermgrdyn_ut.cpp +++ b/tests/mock_tests/buffermgrdyn_ut.cpp @@ -785,6 +785,492 @@ namespace buffermgrdyn_test } } + /* + * Clear qos with reclaiming buffer + * + * To test clear qos flow with reclaiming buffer. + * 1. Init buffer manager as normal + * 2. Configure buffer for 2 ports with admin status being up and down respectively + * 3. Clear qos + * 4. Check whether all the buffer items have been removed + * 5. Repeat the flow from step 2 for two extra times: + * - Check whether buffer manager works correctly after clear qos + * - STATE_DB.BUFFER_MAX_PARAM is received before and after buffer items received + */ + TEST_F(BufferMgrDynTest, BufferMgrTestClearQosReclaimingBuffer) + { + vector fieldValues; + vector keys; + vector skippedPools = {"", "ingress_lossless_pool", ""}; + int round = 0; + + SetUpReclaimingBuffer(); + shared_ptr> zero_profile = make_shared>(zeroProfile); + + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(zero_profile); + + statePortTable.set("Ethernet0", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + InitPort("Ethernet0", "down"); + InitPort("Ethernet4", "down"); + InitPort("Ethernet6", "down"); + InitPort("Ethernet8", "down"); + vector adminDownPorts = {"Ethernet0", "Ethernet4", "Ethernet6"}; + vector ports = {"Ethernet0", "Ethernet2", "Ethernet4", "Ethernet6"}; + InitPort("Ethernet2"); + InitCableLength("Ethernet2", "5m"); + auto expectedProfile = "pg_lossless_100000_5m_profile"; + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_ADMIN_DOWN); + + SetPortInitDone(); + for(auto &skippedPool : skippedPools) + { + // Call timer + m_dynamicBuffer->doTask(m_selectableTable); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + + for (auto &adminDownPort : adminDownPorts) + { + InitBufferPg(adminDownPort + "|3-4", "NULL"); + InitBufferQueue(adminDownPort + "|3-4", "egress_lossless_profile"); + InitBufferQueue(adminDownPort + "|0-2", "egress_lossy_profile"); + InitBufferQueue(adminDownPort + "|5-6", "egress_lossy_profile"); + } + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + InitBufferPg("Ethernet0|3-4"); + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + // Init buffer items for a normal port and check APPL_DB + InitBufferQueue("Ethernet2|3-4", "egress_lossless_profile"); + InitBufferQueue("Ethernet2|0-2", "egress_lossy_profile"); + InitBufferPg("Ethernet2|3-4"); + InitBufferProfileList("Ethernet2", "ingress_lossless_profile", bufferIngProfileListTable); + InitBufferProfileList("Ethernet2", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet2:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", expectedProfile}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet2:0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet2:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossless_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferEgrProfileListTable.get("Ethernet2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "egress_lossless_profile,egress_lossy_profile"}}); + + // Buffer pools ready but the port is not ready to be reclaimed + m_dynamicBuffer->doTask(m_selectableTable); + + // Push maximum buffer parameters for the port in order to make it ready to reclaim + if (round == 0) + { + // To simulate different sequences + // The 1st round: STATE_DB.PORT_TABLE is updated after buffer items ready + // The 2nd, 3rd rounds: before + + for (auto &adminDownPort : adminDownPorts) + { + stateBufferTable.set(adminDownPort, + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + } + stateBufferTable.set("Ethernet8", + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + m_dynamicBuffer->addExistingData(&stateBufferTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + m_dynamicBuffer->doTask(m_selectableTable); + + // Check whether zero profiles and pool have been applied + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 4); + for (auto key : keys) + { + if (testBufferPool.find(key) == testBufferPool.end()) + { + fieldValues.clear(); + appBufferPoolTable.get(key, fieldValues); + CheckIfVectorsMatch(fieldValues, zeroProfileMap[key]); + } + } + + appBufferProfileTable.getKeys(keys); + for (auto key : keys) + { + if (testBufferProfile.find(key) == testBufferProfile.end()) + { + fieldValues.clear(); + appBufferProfileTable.get(key, fieldValues); + if (zeroProfileMap.find(key) == zeroProfileMap.end()) + CheckIfVectorsMatch(fieldValues, + { + {"xon", ""}, // Due to the limitation of mock lua scricpt call, + {"xoff", ""}, // we can not calculate the number + {"size", ""}, // so expected value is the empty string + {"pool", "ingress_lossless_pool"}, + {"dynamic_th", "0"} + }); + else + CheckIfVectorsMatch(fieldValues, zeroProfileMap[key]); + } + } + + for (auto &adminDownPort : adminDownPorts) + { + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet0:0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "ingress_lossy_pg_zero_profile"}}); + ASSERT_FALSE(appBufferPgTable.get("Ethernet0:3-4", fieldValues)); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossless_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":5-6", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + } + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferEgrProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "egress_lossless_zero_profile,egress_lossy_zero_profile"}}); + + // Configured but not applied items. There is an extra delay + m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; + m_dynamicBuffer->doTask(m_selectableTable); + for (auto &adminDownPort : adminDownPorts) + { + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":7-15", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + } + + if (round == 0) + { + ASSERT_TRUE(appBufferQueueTable.get("Ethernet8:0-15", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet8:0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "ingress_lossy_pg_zero_profile"}}); + fieldValues.clear(); + ClearBufferObject("Ethernet8", CFG_PORT_TABLE_NAME); + ASSERT_FALSE(appBufferPgTable.get("Ethernet8:0", fieldValues)); + ASSERT_FALSE(appBufferQueueTable.get("Ethernet8:0-15", fieldValues)); + } + + ClearBufferObject("Ethernet0|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet4|5-6", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet4|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + // Clear all qos tables + ClearBufferPool(skippedPool); + ClearBufferProfile(); + ClearBufferObject("Ethernet0|0", CFG_BUFFER_PG_TABLE_NAME); + for (auto &adminDownPort : adminDownPorts) + { + ClearBufferObject(adminDownPort + "|3-4", CFG_BUFFER_PG_TABLE_NAME); + } + ClearBufferObject("Ethernet2|3-4", CFG_BUFFER_PG_TABLE_NAME); + ClearBufferObject("Ethernet0|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet2|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet2|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet0|5-6", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet4|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet6|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet6|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet6|5-6", CFG_BUFFER_QUEUE_TABLE_NAME); + for (auto &port : ports) + { + ClearBufferObject(port, CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + ClearBufferObject(port, CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + } + + // Run timer + m_dynamicBuffer->doTask(m_selectableTable); + + if (!skippedPool.empty()) + { + // Clear the pool that was skipped in the previous step + // This is to simulate the case where all the pools are not removed in one-shot + ClearBufferPool("", skippedPool); + m_dynamicBuffer->doTask(m_selectableTable); + } + + // All internal data and APPL_DB has been cleared + ASSERT_TRUE((appBufferPgTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferQueueTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferProfileTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferPoolTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferIngProfileListTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferEgrProfileListTable.getKeys(keys), keys.empty())); + ASSERT_TRUE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_EGRESS].empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS].empty()); + + round++; + } + } + + + /* + * Clear qos with reclaiming buffer sad flows + * Reclaiming buffer should be triggered via any single buffer item + */ + TEST_F(BufferMgrDynTest, BufferMgrTestReclaimingBufferSadFlows) + { + vector fieldValues; + vector keys; + vector> bufferItems; + + bufferItems.emplace_back(bufferPgTable, "Ethernet0:0", "ingress_lossy_profile", appBufferPgTable, "profile", "ingress_lossy_pg_zero_profile"); + bufferItems.emplace_back(bufferPgTable, "Ethernet0:3-4", "NULL", appBufferPgTable, "", ""); + bufferItems.emplace_back(bufferQueueTable, "Ethernet0:0-2", "egress_lossy_profile", appBufferQueueTable, "profile", "egress_lossy_zero_profile"); + bufferItems.emplace_back(bufferQueueTable, "Ethernet0:3-4", "egress_lossless_profile", appBufferQueueTable, "profile", "egress_lossless_zero_profile"); + bufferItems.emplace_back(bufferIngProfileListTable, "Ethernet0", "ingress_lossless_profile", appBufferIngProfileListTable, "profile_list", "ingress_lossless_zero_profile"); + bufferItems.emplace_back(bufferEgrProfileListTable, "Ethernet0", "egress_lossless_profile,egress_lossy_profile", appBufferEgrProfileListTable, "profile_list", "egress_lossless_zero_profile,egress_lossy_zero_profile"); + + SetUpReclaimingBuffer(); + shared_ptr> zero_profile = make_shared>(zeroProfile); + + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(zero_profile); + + stateBufferTable.set("Ethernet0", + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + m_dynamicBuffer->addExistingData(&stateBufferTable); + static_cast(m_dynamicBuffer)->doTask(); + + InitPort("Ethernet0", "down"); + + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_ADMIN_DOWN); + + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + // After "config qos clear" the zero buffer profiles are unloaded + m_dynamicBuffer->unloadZeroPoolAndProfiles(); + + // Starts with empty buffer tables + for(auto &bufferItem : bufferItems) + { + auto &cfgTable = get<0>(bufferItem); + auto &key = get<1>(bufferItem); + auto &profile = get<2>(bufferItem); + auto &appTable = get<3>(bufferItem); + auto &fieldName = get<4>(bufferItem); + auto &expectedProfile = get<5>(bufferItem); + + cfgTable.set(key, + { + {fieldName, profile} + }); + m_dynamicBuffer->addExistingData(&cfgTable); + static_cast(m_dynamicBuffer)->doTask(); + + ASSERT_FALSE(m_dynamicBuffer->m_bufferCompletelyInitialized); + ASSERT_FALSE(m_dynamicBuffer->m_zeroProfilesLoaded); + ASSERT_TRUE(m_dynamicBuffer->m_portInitDone); + ASSERT_TRUE(m_dynamicBuffer->m_pendingApplyZeroProfilePorts.find("Ethernet0") != m_dynamicBuffer->m_pendingApplyZeroProfilePorts.end()); + + InitBufferPool(); + InitDefaultBufferProfile(); + + m_dynamicBuffer->doTask(m_selectableTable); + + // Another doTask to ensure all the dependent tables have been drained + // after buffer pools and profiles have been drained + static_cast(m_dynamicBuffer)->doTask(); + + if (expectedProfile.empty()) + { + ASSERT_FALSE(appTable.get(key, fieldValues)); + } + else + { + ASSERT_TRUE(appTable.get(key, fieldValues)); + CheckIfVectorsMatch(fieldValues, {{fieldName, expectedProfile}}); + } + + m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_TRUE(m_dynamicBuffer->m_pendingApplyZeroProfilePorts.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_bufferCompletelyInitialized); + + // Simulate clear qos + ClearBufferPool(); + ClearBufferProfile(); + + // Call timer + m_dynamicBuffer->doTask(m_selectableTable); + } + } + + /* + * Port removing flow + */ + TEST_F(BufferMgrDynTest, BufferMgrTestRemovePort) + { + vector fieldValues; + vector keys; + vector statuses = {"up", "down"}; + + // Prepare information that will be read at the beginning + InitDefaultLosslessParameter(); + InitMmuSize(); + + shared_ptr> zero_profile = make_shared>(zeroProfile); + StartBufferManager(zero_profile); + + SetPortInitDone(); + // Timer will be called + m_dynamicBuffer->doTask(m_selectableTable); + + InitBufferPool(); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; + InitCableLength("Ethernet0", "5m"); + + for(auto status : statuses) + { + bool admin_up = (status == "up"); + + InitPort("Ethernet0", status); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup.find("Ethernet0") != m_dynamicBuffer->m_portInfoLookup.end()); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, admin_up ? PORT_READY : PORT_ADMIN_DOWN); + + // Init port buffer items + InitBufferQueue("Ethernet0|3-4", "egress_lossless_profile"); + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + InitBufferPg("Ethernet0|3-4"); + if (admin_up) + { + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + CheckQueue("Ethernet0", "Ethernet0:3-4", "egress_lossless_profile", true); + CheckProfileList("Ethernet0", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile"); + } + else + { + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + + stateBufferTable.set("Ethernet0", + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + m_dynamicBuffer->addExistingData(&stateBufferTable); + static_cast(m_dynamicBuffer)->doTask(); + + // Make sure profile list is applied after maximum buffer parameter table + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet0:0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "ingress_lossy_pg_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet0:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossless_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet0:0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet0:5-15", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferEgrProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "egress_lossless_zero_profile,egress_lossy_zero_profile"}}); + + ClearBufferObject("Ethernet0|0", CFG_BUFFER_PG_TABLE_NAME); + } + + // Remove port + ClearBufferObject("Ethernet0", CFG_PORT_TABLE_NAME); + ASSERT_FALSE(m_dynamicBuffer->m_portPgLookup.empty()); + ClearBufferObject("Ethernet0", CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + ClearBufferObject("Ethernet0", CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + ClearBufferObject("Ethernet0|3-4", CFG_BUFFER_PG_TABLE_NAME); + ClearBufferObject("Ethernet0|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + static_cast(m_dynamicBuffer)->doTask(); + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS].empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_EGRESS].empty()); + ASSERT_TRUE((appBufferPgTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferQueueTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferIngProfileListTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferEgrProfileListTable.getKeys(keys), keys.empty())); + } + } + /* * Port configuration flow * Port table items are received in different order diff --git a/tests/mock_tests/mock_table.cpp b/tests/mock_tests/mock_table.cpp index 0af0cb372f..cd2ffbaa96 100644 --- a/tests/mock_tests/mock_table.cpp +++ b/tests/mock_tests/mock_table.cpp @@ -114,4 +114,12 @@ namespace swss iter->second.swap(new_values); } } + + void ProducerStateTable::del(const std::string &key, + const std::string &op, + const std::string &prefix) + { + auto &table = gDB[m_pipe->getDbId()][getTableName()]; + table.erase(key); + } } From 0c789e64ac67a6b7b0da658f41dc9446fc209e80 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Wed, 22 Jun 2022 13:35:09 +0800 Subject: [PATCH 27/64] Fix qos map test in vs test (#2343) Signed-off-by: bingwang --- tests/test_qos_map.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 6b236c4fb2..39c6c717ca 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -417,9 +417,6 @@ def test_dscp_to_tc_map_applied_to_switch(self, dvs): dscp_to_tc_map_id = id break switch_oid = dvs.getSwitchOid() - # Check switch level DSCP_TO_TC_MAP doesn't before PORT_QOS_MAP|global is created - fvs = self.asic_db.get_entry(self.ASIC_SWITCH_STR, switch_oid) - assert("SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP" not in fvs) # Insert switch level map entry self.port_qos_table.set("global", [("dscp_to_tc_map", "AZURE")]) From d82874d860c91385154032204fb03bdd097877e8 Mon Sep 17 00:00:00 2001 From: anilkpan <64167306+anilkpan@users.noreply.github.com> Date: Wed, 22 Jun 2022 10:14:07 -0700 Subject: [PATCH 28/64] Fix for "orchagent crashed when trying to delete fdb static entry with swssconfig #11046" (#2332) * Fix updated to not flush static mac --- orchagent/fdborch.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index d4e0d8ffad..6788e6fb91 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -899,7 +899,12 @@ void FdbOrch::doTask(NotificationConsumer& consumer) { if (op == "ALL") { - status = sai_fdb_api->flush_fdb_entries(gSwitchId, 0, NULL); + vector attrs; + sai_attribute_t attr; + attr.id = SAI_FDB_FLUSH_ATTR_ENTRY_TYPE; + attr.value.s32 = SAI_FDB_FLUSH_ENTRY_TYPE_DYNAMIC; + attrs.push_back(attr); + status = sai_fdb_api->flush_fdb_entries(gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Flush fdb failed, return code %x", status); @@ -1056,6 +1061,11 @@ void FdbOrch::flushFDBEntries(sai_object_id_t bridge_port_oid, attr.value.oid = vlan_oid; attrs.push_back(attr); } + + /* do not flush static mac */ + attr.id = SAI_FDB_FLUSH_ATTR_ENTRY_TYPE; + attr.value.s32 = SAI_FDB_FLUSH_ENTRY_TYPE_DYNAMIC; + attrs.push_back(attr); SWSS_LOG_INFO("Flushing FDB bridge_port_oid: 0x%" PRIx64 ", and bvid_oid:0x%" PRIx64 ".", bridge_port_oid, vlan_oid); From efb4530e4ec34d9629e676d6242b4821d2a8a163 Mon Sep 17 00:00:00 2001 From: Link Chiang Date: Thu, 23 Jun 2022 01:55:55 +0800 Subject: [PATCH 29/64] [orchagent, DTel]: report session support to set user vrf (#2326) [Dtel] Fix set user vrf in DTel report session and confirm the DTel report sent out via the user vrf --- orchagent/dtelorch.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/orchagent/dtelorch.cpp b/orchagent/dtelorch.cpp index 378a225e37..084d078452 100644 --- a/orchagent/dtelorch.cpp +++ b/orchagent/dtelorch.cpp @@ -5,6 +5,8 @@ #include "converter.h" #include "ipprefix.h" #include "swssnet.h" +#include "directory.h" +#include "vrforch.h" using namespace std; using namespace swss; @@ -13,6 +15,7 @@ extern sai_switch_api_t* sai_switch_api; extern sai_dtel_api_t* sai_dtel_api; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gSwitchId; +extern Directory gDirectory; dtelEventLookup_t dTelEventLookup = { @@ -1152,9 +1155,14 @@ void DTelOrch::doDtelReportSessionTableTask(Consumer &consumer) } else if (fvField(i) == VRF) { - rs_attr.id = SAI_DTEL_REPORT_SESSION_ATTR_VIRTUAL_ROUTER_ID; - /* TODO: find a way to convert vrf to oid */ + string vrf_name = fvValue(i); rs_attr.value.oid = gVirtualRouterId; + if (vrf_name != "default") + { + VRFOrch* vrf_orch = gDirectory.get(); + rs_attr.value.oid = vrf_orch->getVRFid(vrf_name); + } + rs_attr.id = SAI_DTEL_REPORT_SESSION_ATTR_VIRTUAL_ROUTER_ID; report_session_attr.push_back(rs_attr); } else if (fvField(i) == TRUNCATE_SIZE) From 6e0fc85daeb572311258f52885f86f3528a00f66 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Thu, 23 Jun 2022 04:07:40 +0800 Subject: [PATCH 30/64] [ACL] Support stage particular match fields (#2341) What I did This PR is to fix ACL table creation failure for certain types. We saw PFCWD table failed to be created at EGRESS stage. The error logs are Jun 21 07:00:03.409283 str2-7050cx3-acs-08 ERR syncd#syncd: [none] SAI_API_ACL:_brcm_sai_create_acl_table:11205 field group config create failed with error Feature unavailable (0xfffffff0). Jun 21 07:00:03.409738 str2-7050cx3-acs-08 ERR syncd#syncd: [none] SAI_API_ACL:brcm_sai_create_acl_table:298 create table entry failed with error -2. Jun 21 07:00:03.409738 str2-7050cx3-acs-08 ERR syncd#syncd: :- sendApiResponse: api SAI_COMMON_API_CREATE failed in syncd mode: SAI_STATUS_NOT_SUPPORTED Jun 21 07:00:03.409780 str2-7050cx3-acs-08 ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_ACL_BIND_POINT_TYPE_LIST: 1:SAI_ACL_BIND_POINT_TYPE_PORT Jun 21 07:00:03.409820 str2-7050cx3-acs-08 ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS: true Jun 21 07:00:03.409820 str2-7050cx3-acs-08 ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_FIELD_TC: true Jun 21 07:00:03.410144 str2-7050cx3-acs-08 ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST: 2:SAI_ACL_ACTION_TYPE_PACKET_ACTION,SAI_ACL_ACTION_TYPE_COUNTER Jun 21 07:00:03.410144 str2-7050cx3-acs-08 ERR syncd#syncd: :- processQuadEvent: attr: SAI_ACL_TABLE_ATTR_ACL_STAGE: SAI_ACL_STAGE_EGRESS Jun 21 07:00:03.410144 str2-7050cx3-acs-08 ERR swss#orchagent: :- create: create status: SAI_STATUS_NOT_SUPPORTED Jun 21 07:00:03.410144 str2-7050cx3-acs-08 ERR swss#orchagent: :- addAclTable: Failed to create ACL table pfcwd_egress The root cause for the issue is SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS is not supported at EGRESS stage. This PR addressed the issue by adding match field according to the stage. For ACL type TABLE_TYPE_PFCWD and TABLE_TYPE_DROP at INGRESS stage, the match field SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS is added, while for EGRESS the field is not added. Why I did it To fix ACL table creation issue. How I verified it Verified by vstest test_acl.py::TestAcl::test_AclTableMandatoryMatchFields[ingress] PASSED [ 87%] test_acl.py::TestAcl::test_AclTableMandatoryMatchFields[egress] PASSED [ 90%] Verified by building a new image and run on a TD3 device. Signed-off-by: bingwang --- orchagent/aclorch.cpp | 70 +++++++++++++++++++++++++++++++++++++++++-- orchagent/aclorch.h | 7 +++++ tests/test_acl.py | 31 +++++++++++++++++-- 3 files changed, 104 insertions(+), 4 deletions(-) diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index 24166a9c54..aa577110ec 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -323,6 +323,38 @@ static acl_table_action_list_lookup_t defaultAclActionList = } }; +// The match fields for certain ACL table type are not exactly the same between INGRESS and EGRESS. +// For example, we can only match IN_PORT for PFCWD table type at INGRESS. +// Hence we need to specify stage particular matching fields in stageMandatoryMatchFields +static acl_table_match_field_lookup_t stageMandatoryMatchFields = +{ + { + // TABLE_TYPE_PFCWD + TABLE_TYPE_PFCWD, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS + } + } + } + }, + { + // TABLE_TYPE_DROP + TABLE_TYPE_DROP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS + } + } + } + } + +}; + static acl_ip_type_lookup_t aclIpTypeLookup = { { IP_TYPE_ANY, SAI_ACL_IP_TYPE_ANY }, @@ -477,6 +509,12 @@ bool AclTableType::addAction(sai_acl_action_type_t action) return true; } +bool AclTableType::addMatch(shared_ptr match) +{ + m_matches.emplace(match->getId(), match); + return true; +} + AclTableTypeBuilder& AclTableTypeBuilder::withName(string name) { m_tableType.m_name = name; @@ -2020,6 +2058,34 @@ bool AclTable::addMandatoryActions() return true; } +bool AclTable::addStageMandatoryMatchFields() +{ + SWSS_LOG_ENTER(); + + if (stage == ACL_STAGE_UNKNOWN) + { + return false; + } + + if (stageMandatoryMatchFields.count(type.getName()) != 0) + { + auto &fields_for_stage = stageMandatoryMatchFields[type.getName()]; + if (fields_for_stage.count(stage) != 0) + { + // Add the stage particular matching fields + for (auto match : fields_for_stage[stage]) + { + type.addMatch(make_shared(match)); + SWSS_LOG_INFO("Added mandatory match field %s for table type %s stage %d", + sai_serialize_enum(match, &sai_metadata_enum_sai_acl_table_attr_t).c_str(), + type.getName().c_str(), stage); + } + } + } + + return true; +} + bool AclTable::validateAddType(const AclTableType &tableType) { SWSS_LOG_ENTER(); @@ -2983,7 +3049,6 @@ void AclOrch::initDefaultTableTypes() builder.withName(TABLE_TYPE_PFCWD) .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS)) .build() ); @@ -2991,7 +3056,6 @@ void AclOrch::initDefaultTableTypes() builder.withName(TABLE_TYPE_DROP) .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS)) .build() ); @@ -4108,6 +4172,8 @@ void AclOrch::doAclTableTask(Consumer &consumer) newTable.validateAddType(*tableType); + newTable.addStageMandatoryMatchFields(); + newTable.addMandatoryActions(); // validate and create/update ACL Table diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index ee17ba4a1f..ce3e9e5d63 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -116,6 +116,9 @@ typedef map> acl_action_enum_values_capabili typedef map > acl_stage_action_list_t; typedef map acl_table_action_list_lookup_t; +typedef map > acl_stage_match_field_t; +typedef map acl_table_match_field_lookup_t; + class AclRule; class AclTableMatchInterface @@ -160,6 +163,7 @@ class AclTableType const set& getActions() const; bool addAction(sai_acl_action_type_t action); + bool addMatch(shared_ptr match); private: friend class AclTableTypeBuilder; @@ -384,6 +388,9 @@ class AclTable // Add actions to ACL table if mandatory action list is required on table creation. bool addMandatoryActions(); + // Add stage mandatory matching fields to ACL table + bool addStageMandatoryMatchFields(); + // validate AclRule match attribute against rule and table configuration bool validateAclRuleMatch(sai_acl_entry_attr_t matchId, const AclRule& rule) const; // validate AclRule action attribute against rule and table configuration diff --git a/tests/test_acl.py b/tests/test_acl.py index c246eefe53..5c542193f7 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -1,4 +1,5 @@ import pytest +from requests import request L3_TABLE_TYPE = "L3" L3_TABLE_NAME = "L3_TEST" @@ -20,6 +21,9 @@ MIRROR_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] MIRROR_RULE_NAME = "MIRROR_TEST_RULE" +PFCWD_TABLE_TYPE = "PFCWD" +PFCWD_TABLE_NAME = "PFCWD_TEST" +PFCWD_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] class TestAcl: @pytest.yield_fixture def l3_acl_table(self, dvs_acl): @@ -59,6 +63,15 @@ def mirror_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(MIRROR_TABLE_NAME) dvs_acl.verify_acl_table_count(0) + @pytest.fixture(params=['ingress', 'egress']) + def pfcwd_acl_table(self, dvs_acl, request): + try: + dvs_acl.create_acl_table(PFCWD_TABLE_NAME, PFCWD_TABLE_TYPE, PFCWD_BIND_PORTS, request.param) + yield dvs_acl.get_acl_table_ids(1)[0], request.param + finally: + dvs_acl.remove_acl_table(PFCWD_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + @pytest.yield_fixture def setup_teardown_neighbor(self, dvs): try: @@ -548,8 +561,22 @@ def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighb dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) dvs_acl.verify_no_acl_rules() - - + + def test_AclTableMandatoryMatchFields(self, dvs, pfcwd_acl_table): + """ + The test case is to verify stage particular matching fields is applied + """ + table_oid, stage = pfcwd_acl_table + match_in_ports = False + entry = dvs.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE", table_oid) + for k, v in entry.items(): + if k == "SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS" and v == "true": + match_in_ports = True + + if stage == "ingress": + assert match_in_ports + else: + assert not match_in_ports class TestAclCrmUtilization: @pytest.fixture(scope="class", autouse=True) def configure_crm_polling_interval_for_test(self, dvs): From ec57bf101651b838156780b93d3300bf16bb7f74 Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Thu, 23 Jun 2022 06:33:14 +0000 Subject: [PATCH 31/64] [macsec] Update macsec flex counter (#2338) * Default macsec poll interval 10s, except of xpn1s * Correct COUNTERS_MACSEC_NAME_MAP entry in GB_COUNTERS_DB for gearbox macsec * Support macsec flex couner config * Correct port flex counter config for gearbox * Add IN_UCAST_PKTS/IN_NON_UCAST_PKTS/OUT_UCAST_PKTS/OUT_NON_UCAST_PKTS in gearbox port counter list --- orchagent/flexcounterorch.cpp | 23 +++++++++++++++---- orchagent/flexcounterorch.h | 2 ++ orchagent/macsecorch.cpp | 42 ++++++++++++++++------------------- orchagent/macsecorch.h | 12 ++++++---- orchagent/portsorch.cpp | 34 +++++++++------------------- orchagent/portsorch.h | 4 +--- 6 files changed, 59 insertions(+), 58 deletions(-) diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index 29563d90a5..ffaac6daaf 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -11,6 +11,7 @@ #include "directory.h" #include "copporch.h" #include "routeorch.h" +#include "macsecorch.h" #include "flowcounterrouteorch.h" extern sai_port_api_t *sai_port_api; @@ -52,6 +53,9 @@ unordered_map flexCounterGroupMap = {"TUNNEL", TUNNEL_STAT_COUNTER_FLEX_COUNTER_GROUP}, {FLOW_CNT_TRAP_KEY, HOSTIF_TRAP_COUNTER_FLEX_COUNTER_GROUP}, {FLOW_CNT_ROUTE_KEY, ROUTE_FLOW_COUNTER_FLEX_COUNTER_GROUP}, + {"MACSEC_SA", COUNTERS_MACSEC_SA_GROUP}, + {"MACSEC_SA_ATTR", COUNTERS_MACSEC_SA_ATTR_GROUP}, + {"MACSEC_FLOW", COUNTERS_MACSEC_FLOW_GROUP}, }; @@ -59,7 +63,9 @@ FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), - m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) + m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), + m_gbflexCounterDb(new DBConnector("GB_FLEX_COUNTER_DB", 0)), + m_gbflexCounterGroupTable(new ProducerTable(m_gbflexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) { SWSS_LOG_ENTER(); } @@ -119,6 +125,13 @@ void FlexCounterOrch::doTask(Consumer &consumer) vector fieldValues; fieldValues.emplace_back(POLL_INTERVAL_FIELD, value); m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + if (gPortsOrch && gPortsOrch->isGearboxEnabled()) + { + if (key == PORT_KEY || key.rfind("MACSEC", 0) == 0) + { + m_gbflexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + } + } } else if(field == FLEX_COUNTER_STATUS_FIELD) { @@ -197,10 +210,12 @@ void FlexCounterOrch::doTask(Consumer &consumer) fieldValues.emplace_back(FLEX_COUNTER_STATUS_FIELD, value); m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); - // Update FLEX_COUNTER_STATUS for gearbox port - if (key == PORT_KEY && gPortsOrch && gPortsOrch->isGearboxEnabled()) + if (gPortsOrch && gPortsOrch->isGearboxEnabled()) { - gPortsOrch->setGearboxFlexCounterStatus(value == "enable"); + if (key == PORT_KEY || key.rfind("MACSEC", 0) == 0) + { + m_gbflexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + } } } else if(field == FLEX_COUNTER_DELAY_STATUS_FIELD) diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index 4f9734c0e2..c00a435b68 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -25,6 +25,8 @@ class FlexCounterOrch: public Orch private: std::shared_ptr m_flexCounterDb = nullptr; std::shared_ptr m_flexCounterGroupTable = nullptr; + std::shared_ptr m_gbflexCounterDb = nullptr; + shared_ptr m_gbflexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; bool m_hostif_trap_counter_enabled = false; diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index 70721979d2..ac56fcbd5d 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -22,10 +22,8 @@ #define AVAILABLE_ACL_PRIORITIES_LIMITATION (32) #define EAPOL_ETHER_TYPE (0x888e) #define PAUSE_ETHER_TYPE (0x8808) -#define MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS (1000) -#define COUNTERS_MACSEC_SA_ATTR_GROUP "COUNTERS_MACSEC_SA_ATTR" -#define COUNTERS_MACSEC_SA_GROUP "COUNTERS_MACSEC_SA" -#define COUNTERS_MACSEC_FLOW_GROUP "COUNTERS_MACSEC_FLOW" +#define MACSEC_STAT_XPN_POLLING_INTERVAL_MS (1000) +#define MACSEC_STAT_POLLING_INTERVAL_MS (10000) #define PFC_MODE_BYPASS "bypass" #define PFC_MODE_ENCRYPT "encrypt" #define PFC_MODE_STRICT_ENCRYPT "strict_encrypt" @@ -608,37 +606,35 @@ MACsecOrch::MACsecOrch( m_applPortTable(app_db, APP_PORT_TABLE_NAME), m_counter_db("COUNTERS_DB", 0), m_macsec_counters_map(&m_counter_db, COUNTERS_MACSEC_NAME_MAP), - m_macsec_flow_tx_counters_map(&m_counter_db, COUNTERS_MACSEC_FLOW_TX_NAME_MAP), - m_macsec_flow_rx_counters_map(&m_counter_db, COUNTERS_MACSEC_FLOW_RX_NAME_MAP), - m_macsec_sa_tx_counters_map(&m_counter_db, COUNTERS_MACSEC_SA_TX_NAME_MAP), - m_macsec_sa_rx_counters_map(&m_counter_db, COUNTERS_MACSEC_SA_RX_NAME_MAP), + m_gb_counter_db("GB_COUNTERS_DB", 0), + m_gb_macsec_counters_map(&m_gb_counter_db, COUNTERS_MACSEC_NAME_MAP), m_macsec_sa_attr_manager( COUNTERS_MACSEC_SA_ATTR_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + MACSEC_STAT_XPN_POLLING_INTERVAL_MS, true), m_macsec_sa_stat_manager( COUNTERS_MACSEC_SA_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + MACSEC_STAT_POLLING_INTERVAL_MS, true), m_macsec_flow_stat_manager( COUNTERS_MACSEC_FLOW_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + MACSEC_STAT_POLLING_INTERVAL_MS, true), m_gb_macsec_sa_attr_manager( "GB_FLEX_COUNTER_DB", COUNTERS_MACSEC_SA_ATTR_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + MACSEC_STAT_XPN_POLLING_INTERVAL_MS, true), m_gb_macsec_sa_stat_manager( "GB_FLEX_COUNTER_DB", COUNTERS_MACSEC_SA_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + MACSEC_STAT_POLLING_INTERVAL_MS, true), m_gb_macsec_flow_stat_manager( "GB_FLEX_COUNTER_DB", COUNTERS_MACSEC_FLOW_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true) + MACSEC_STAT_POLLING_INTERVAL_MS, true) { SWSS_LOG_ENTER(); } @@ -2329,6 +2325,13 @@ FlexCounterManager& MACsecOrch::MACsecFlowStatManager(MACsecOrchContext &ctx) return m_macsec_flow_stat_manager; } +Table& MACsecOrch::MACsecCountersMap(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_counters_map; + return m_macsec_counters_map; +} + void MACsecOrch::installCounter( MACsecOrchContext &ctx, CounterType counter_type, @@ -2350,19 +2353,12 @@ void MACsecOrch::installCounter( { case CounterType::MACSEC_SA_ATTR: MACsecSaAttrStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); - m_macsec_counters_map.set("", fields); + MACsecCountersMap(ctx).set("", fields); break; case CounterType::MACSEC_SA: MACsecSaStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); - if (direction == SAI_MACSEC_DIRECTION_EGRESS) - { - m_macsec_sa_tx_counters_map.set("", fields); - } - else - { - m_macsec_sa_rx_counters_map.set("", fields); - } + MACsecCountersMap(ctx).set("", fields); break; case CounterType::MACSEC_FLOW: diff --git a/orchagent/macsecorch.h b/orchagent/macsecorch.h index 2472d8c0ef..9c6e2be636 100644 --- a/orchagent/macsecorch.h +++ b/orchagent/macsecorch.h @@ -16,6 +16,10 @@ using namespace swss; +#define COUNTERS_MACSEC_SA_ATTR_GROUP "COUNTERS_MACSEC_SA_ATTR" +#define COUNTERS_MACSEC_SA_GROUP "COUNTERS_MACSEC_SA" +#define COUNTERS_MACSEC_FLOW_GROUP "COUNTERS_MACSEC_FLOW" + // AN is a 2 bit number, it can only be 0, 1, 2 or 3 #define MAX_SA_NUMBER (3) @@ -63,10 +67,8 @@ class MACsecOrch : public Orch DBConnector m_counter_db; Table m_macsec_counters_map; - Table m_macsec_flow_tx_counters_map; - Table m_macsec_flow_rx_counters_map; - Table m_macsec_sa_tx_counters_map; - Table m_macsec_sa_rx_counters_map; + DBConnector m_gb_counter_db; + Table m_gb_macsec_counters_map; Table m_applPortTable; FlexCounterManager m_macsec_sa_attr_manager; FlexCounterManager m_macsec_sa_stat_manager; @@ -226,6 +228,8 @@ class MACsecOrch : public Orch const std::string &obj_name, sai_object_id_t obj_id); + Table& MACsecCountersMap(MACsecOrchContext &ctx); + /* Flex Counter Manager */ FlexCounterManager& MACsecSaStatManager(MACsecOrchContext &ctx); FlexCounterManager& MACsecSaAttrStatManager(MACsecOrchContext &ctx); diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 6d06c6318f..7b90254287 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -248,7 +248,11 @@ const vector port_stat_ids = const vector gbport_stat_ids = { SAI_PORT_STAT_IF_IN_OCTETS, + SAI_PORT_STAT_IF_IN_UCAST_PKTS, + SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS, SAI_PORT_STAT_IF_OUT_OCTETS, + SAI_PORT_STAT_IF_OUT_UCAST_PKTS, + SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS, SAI_PORT_STAT_IF_IN_DISCARDS, SAI_PORT_STAT_IF_OUT_DISCARDS, SAI_PORT_STAT_IF_IN_ERRORS, @@ -324,7 +328,7 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vectordel(key); } -std::unordered_set PortsOrch::generateCounterStats(const string& type) +std::unordered_set PortsOrch::generateCounterStats(const string& type, bool gearbox) { std::unordered_set counter_stats; if (type == PORT_STAT_COUNTER_FLEX_COUNTER_GROUP) { - for (const auto& it: port_stat_ids) - { - counter_stats.emplace(sai_serialize_port_stat(it)); - } - } - else if (type == GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP) - { - for (const auto& it: gbport_stat_ids) + auto& stat_ids = gearbox ? gbport_stat_ids : port_stat_ids; + for (const auto& it: stat_ids) { counter_stats.emplace(sai_serialize_port_stat(it)); } @@ -6982,18 +6980,6 @@ std::unordered_set PortsOrch::generateCounterStats(const string& ty return counter_stats; } -void PortsOrch::setGearboxFlexCounterStatus(bool enabled) -{ - if (enabled) - { - gb_port_stat_manager.enableFlexCounterGroup(); - } - else - { - gb_port_stat_manager.disableFlexCounterGroup(); - } -} - void PortsOrch::updateGearboxPortOperStatus(const Port& port) { if (!isGearboxEnabled()) diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index ab35277d80..c820d6969d 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -20,7 +20,6 @@ #define VLAN_TAG_LEN 4 #define PORT_STAT_COUNTER_FLEX_COUNTER_GROUP "PORT_STAT_COUNTER" #define PORT_RATE_COUNTER_FLEX_COUNTER_GROUP "PORT_RATE_COUNTER" -#define GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP "GBPORT_STAT_COUNTER" #define PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP "PORT_BUFFER_DROP_STAT" #define QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "QUEUE_STAT_COUNTER" #define QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP "QUEUE_WATERMARK_STAT_COUNTER" @@ -170,7 +169,6 @@ class PortsOrch : public Orch, public Subject bool getPortOperStatus(const Port& port, sai_port_oper_status_t& status) const; - void setGearboxFlexCounterStatus(bool enabled); void updateGearboxPortOperStatus(const Port& port); bool decrFdbCount(const string& alias, int count); @@ -376,7 +374,7 @@ class PortsOrch : public Orch, public Subject void voqSyncDelLagMember(Port &lag, Port &port); unique_ptr m_lagIdAllocator; - std::unordered_set generateCounterStats(const string& type); + std::unordered_set generateCounterStats(const string& type, bool gearbox = false); }; #endif /* SWSS_PORTSORCH_H */ From f88f992219ed1f162886b016738be48d4364e793 Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Fri, 24 Jun 2022 08:28:36 +0300 Subject: [PATCH 32/64] [mock_tests] Add Sflow Orch UTs (#2295) - What I did Added Sflow gtest - Why I did it Improve sflow orcagent coverage - How I verified it ayurkiv@487e531606e9:/sonic/src/sonic-swss/tests/mock_tests$ ./tests --gtest_filter=SflowOrchTest* Running main() from /build/googletest-YnT0O3/googletest-1.10.0.20201025/googletest/src/gtest_main.cc Note: Google Test filter = SflowOrchTest* [==========] Running 2 tests from 1 test suite. [----------] Global test environment set-up. [----------] 2 tests from SflowOrchTest [ RUN ] SflowOrchTest.SflowEnableDisable [ OK ] SflowOrchTest.SflowEnableDisable (46 ms) [ RUN ] SflowOrchTest.SflowCreateDelete [ OK ] SflowOrchTest.SflowCreateDelete (46 ms) [----------] 2 tests from SflowOrchTest (92 ms total) [----------] Global test environment tear-down [==========] 2 tests from 1 test suite ran. (93 ms total) [ PASSED ] 2 tests. Signed-off-by: Andriy Yurkiv --- tests/mock_tests/Makefile.am | 1 + tests/mock_tests/mock_orchagent_main.h | 1 + tests/mock_tests/portal.h | 19 ++ tests/mock_tests/sfloworh_ut.cpp | 372 +++++++++++++++++++++++++ tests/mock_tests/ut_saihelper.cpp | 1 + 5 files changed, 394 insertions(+) create mode 100644 tests/mock_tests/sfloworh_ut.cpp diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 54fb4003a2..553cd18bfe 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -31,6 +31,7 @@ tests_SOURCES = aclorch_ut.cpp \ copporch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ + sfloworh_ut.cpp \ ut_saihelper.cpp \ mock_orchagent_main.cpp \ mock_dbconnector.cpp \ diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index 57df931cb5..f41c5b29a5 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -82,3 +82,4 @@ extern sai_queue_api_t *sai_queue_api; extern sai_udf_api_t* sai_udf_api; extern sai_mpls_api_t* sai_mpls_api; extern sai_counter_api_t* sai_counter_api; +extern sai_samplepacket_api_t *sai_samplepacket_api; diff --git a/tests/mock_tests/portal.h b/tests/mock_tests/portal.h index 94b2051211..8f0c4ab2db 100644 --- a/tests/mock_tests/portal.h +++ b/tests/mock_tests/portal.h @@ -6,6 +6,7 @@ #include "aclorch.h" #include "crmorch.h" #include "copporch.h" +#include "sfloworch.h" #include "directory.h" #undef protected @@ -82,6 +83,24 @@ struct Portal } }; + struct SflowOrchInternal + { + static bool getSflowStatusEnable(SflowOrch &obj) + { + return obj.m_sflowStatus; + } + + static SflowRateSampleMap getSflowSampleMap(SflowOrch &obj) + { + return obj.m_sflowRateSampleMap; + } + + static SflowPortInfoMap getSflowPortInfoMap(SflowOrch &obj) + { + return obj.m_sflowPortInfoMap; + } + }; + struct DirectoryInternal { template diff --git a/tests/mock_tests/sfloworh_ut.cpp b/tests/mock_tests/sfloworh_ut.cpp new file mode 100644 index 0000000000..d3d4d0defa --- /dev/null +++ b/tests/mock_tests/sfloworh_ut.cpp @@ -0,0 +1,372 @@ +#include +#include +#include +#include +#include + +#include "ut_helper.h" +#include "mock_orchagent_main.h" + +using namespace swss; + +namespace sflow_test +{ + class MockSflowOrch final + { + public: + MockSflowOrch() + { + this->appDb = std::make_shared("APPL_DB", 0); + std::vector sflow_tables = { + APP_SFLOW_TABLE_NAME, + APP_SFLOW_SESSION_TABLE_NAME, + APP_SFLOW_SAMPLE_RATE_TABLE_NAME + }; + sflowOrch = std::make_shared(this->appDb.get(), sflow_tables); + } + ~MockSflowOrch() = default; + + void doSflowTableTask(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_SFLOW_TABLE_NAME, 1, 1), + this->sflowOrch.get(), APP_SFLOW_TABLE_NAME + )); + + consumer->addToSync(entries); + static_cast(this->sflowOrch.get())->doTask(*consumer); + } + + void doSflowSessionTableTask(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_SFLOW_SESSION_TABLE_NAME, 1, 1), + this->sflowOrch.get(), APP_SFLOW_SESSION_TABLE_NAME + )); + + consumer->addToSync(entries); + static_cast(this->sflowOrch.get())->doTask(*consumer); + } + + void doSflowSampleTableTask(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_SFLOW_SAMPLE_RATE_TABLE_NAME, 1, 1), + this->sflowOrch.get(), APP_SFLOW_SAMPLE_RATE_TABLE_NAME + )); + + consumer->addToSync(entries); + static_cast(this->sflowOrch.get())->doTask(*consumer); + } + + SflowOrch& get() + { + return *sflowOrch; + } + + private: + std::shared_ptr sflowOrch; + std::shared_ptr appDb; + }; + + class SflowOrchTest : public ::testing::Test + { + public: + SflowOrchTest() + { + this->initDb(); + } + virtual ~SflowOrchTest() = default; + + void SetUp() override + { + this->initSaiApi(); + this->initSwitch(); + this->initOrch(); + this->initPorts(); + } + + void TearDown() override + { + this->deinitOrch(); + this->deinitSwitch(); + this->deinitSaiApi(); + } + + private: + void initSaiApi() + { + std::map profileMap = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + auto status = ut_helper::initSaiApi(profileMap); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void deinitSaiApi() + { + auto status = ut_helper::uninitSaiApi(); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void initSwitch() + { + sai_status_t status; + sai_attribute_t attr; + + // Create switch + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get switch default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + } + + void deinitSwitch() + { + // Remove switch + auto status = sai_switch_api->remove_switch(gSwitchId); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gSwitchId = SAI_NULL_OBJECT_ID; + gVirtualRouterId = SAI_NULL_OBJECT_ID; + } + + void initOrch() + { + // + // SwitchOrch + // + + TableConnector switchCapTableStateDb(this->stateDb.get(), "SWITCH_CAPABILITY"); + TableConnector asicSensorsTableCfgDb(this->configDb.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector switchTableAppDb(this->appDb.get(), APP_SWITCH_TABLE_NAME); + + std::vector switchTableList = { + asicSensorsTableCfgDb, + switchTableAppDb + }; + + gSwitchOrch = new SwitchOrch(this->appDb.get(), switchTableList, switchCapTableStateDb); + gDirectory.set(gSwitchOrch); + resourcesList.push_back(gSwitchOrch); + + // + // PortsOrch + // + + const int portsorchBasePri = 40; + + std::vector portTableList = { + { APP_PORT_TABLE_NAME, portsorchBasePri + 5 }, + { APP_VLAN_TABLE_NAME, portsorchBasePri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorchBasePri }, + { APP_LAG_TABLE_NAME, portsorchBasePri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorchBasePri } + }; + + gPortsOrch = new PortsOrch(this->appDb.get(), this->stateDb.get(), portTableList, this->chassisAppDb.get()); + gDirectory.set(gPortsOrch); + resourcesList.push_back(gPortsOrch); + + // + // QosOrch + // + + std::vector qosTableList = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(this->configDb.get(), qosTableList); + gDirectory.set(gQosOrch); + resourcesList.push_back(gQosOrch); + + // + // BufferOrch + // + + std::vector bufferTableList = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(this->appDb.get(), this->configDb.get(), this->stateDb.get(), bufferTableList); + gDirectory.set(gBufferOrch); + resourcesList.push_back(gBufferOrch); + + // + // FlexCounterOrch + // + + std::vector flexCounterTableList = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + auto flexCounterOrch = new FlexCounterOrch(this->configDb.get(), flexCounterTableList); + gDirectory.set(flexCounterOrch); + resourcesList.push_back(flexCounterOrch); + } + + void deinitOrch() + { + std::reverse(this->resourcesList.begin(), this->resourcesList.end()); + for (auto &it : this->resourcesList) + { + delete it; + } + + gSwitchOrch = nullptr; + gPortsOrch = nullptr; + gQosOrch = nullptr; + gBufferOrch = nullptr; + + Portal::DirectoryInternal::clear(gDirectory); + EXPECT_TRUE(Portal::DirectoryInternal::empty(gDirectory)); + } + + void initPorts() + { + auto portTable = Table(this->appDb.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + // Set PortInitDone + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + } + + void initDb() + { + this->appDb = std::make_shared("APPL_DB", 0); + this->configDb = std::make_shared("CONFIG_DB", 0); + this->stateDb = std::make_shared("STATE_DB", 0); + this->chassisAppDb = std::make_shared("CHASSIS_APP_DB", 0); + } + + std::shared_ptr appDb; + std::shared_ptr configDb; + std::shared_ptr stateDb; + std::shared_ptr chassisAppDb; + + std::vector resourcesList; + }; + + /* Test enabling/disabling SFLOW */ + TEST_F(SflowOrchTest, SflowEnableDisable) + { + MockSflowOrch mock_orch; + { + auto table1 = deque( + { + { + "global", + SET_COMMAND, + { + {"admin_state", "down"} + } + } + }); + mock_orch.doSflowTableTask(table1); + + ASSERT_FALSE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + { + auto table2 = deque( + { + { + "global", + SET_COMMAND, + { + {"admin_state", "up"} + } + } + }); + mock_orch.doSflowTableTask(table2); + + ASSERT_TRUE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + } + + /* Test create/delete SFLOW */ + TEST_F(SflowOrchTest, SflowCreateDelete) + { + MockSflowOrch mock_orch; + { + auto table3 = deque( + { + { + "global", + SET_COMMAND, + { + {"admin_state", "up"}, + } + } + }); + mock_orch.doSflowTableTask(table3); + ASSERT_TRUE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + { + auto table4 = deque( + { + { + "global", + DEL_COMMAND, + { + {"admin_state", "up"}, + } + } + }); + mock_orch.doSflowTableTask(table4); + ASSERT_FALSE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + } +} + + diff --git a/tests/mock_tests/ut_saihelper.cpp b/tests/mock_tests/ut_saihelper.cpp index 80a2d6ee38..40594cc32c 100644 --- a/tests/mock_tests/ut_saihelper.cpp +++ b/tests/mock_tests/ut_saihelper.cpp @@ -66,6 +66,7 @@ namespace ut_helper sai_api_query(SAI_API_SWITCH, (void **)&sai_switch_api); sai_api_query(SAI_API_BRIDGE, (void **)&sai_bridge_api); sai_api_query(SAI_API_VIRTUAL_ROUTER, (void **)&sai_virtual_router_api); + sai_api_query(SAI_API_SAMPLEPACKET, (void **)&sai_samplepacket_api); sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_LAG, (void **)&sai_lag_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); From 1ed0b4bedae40ca45627f33949219b7cc912217d Mon Sep 17 00:00:00 2001 From: Junhua Zhai Date: Fri, 24 Jun 2022 10:37:56 +0000 Subject: [PATCH 33/64] [macsec] Refactor the logic of macsec name map (#2348) * Add/remove macsec name map w/o gearbox correctly * Add macsec counter unit test --- orchagent/macsecorch.cpp | 17 ++----------- tests/test_macsec.py | 53 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 15 deletions(-) diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index ac56fcbd5d..dc2c9d7b43 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -2340,10 +2340,6 @@ void MACsecOrch::installCounter( sai_object_id_t obj_id, const std::vector &stats) { - FieldValueTuple tuple(obj_name, sai_serialize_object_id(obj_id)); - vector fields; - fields.push_back(tuple); - std::unordered_set counter_stats; for (const auto &stat : stats) { @@ -2353,12 +2349,11 @@ void MACsecOrch::installCounter( { case CounterType::MACSEC_SA_ATTR: MACsecSaAttrStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); - MACsecCountersMap(ctx).set("", fields); break; case CounterType::MACSEC_SA: MACsecSaStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); - MACsecCountersMap(ctx).set("", fields); + MACsecCountersMap(ctx).hset("", obj_name, sai_serialize_object_id(obj_id)); break; case CounterType::MACSEC_FLOW: @@ -2383,19 +2378,11 @@ void MACsecOrch::uninstallCounter( { case CounterType::MACSEC_SA_ATTR: MACsecSaAttrStatManager(ctx).clearCounterIdList(obj_id); - m_counter_db.hdel(COUNTERS_MACSEC_NAME_MAP, obj_name); break; case CounterType::MACSEC_SA: MACsecSaStatManager(ctx).clearCounterIdList(obj_id); - if (direction == SAI_MACSEC_DIRECTION_EGRESS) - { - m_counter_db.hdel(COUNTERS_MACSEC_SA_TX_NAME_MAP, obj_name); - } - else - { - m_counter_db.hdel(COUNTERS_MACSEC_SA_RX_NAME_MAP, obj_name); - } + MACsecCountersMap(ctx).hdel("", obj_name); break; case CounterType::MACSEC_FLOW: diff --git a/tests/test_macsec.py b/tests/test_macsec.py index 61c90d84a8..f2f8e8843e 100644 --- a/tests/test_macsec.py +++ b/tests/test_macsec.py @@ -1,9 +1,11 @@ from swsscommon import swsscommon +from swsscommon.swsscommon import CounterTable, MacsecCounter import conftest import functools import typing import re +import time def to_string(value): @@ -389,6 +391,21 @@ def get_macsec_sa( print(info.group(0)) return info.group(0) + @macsec_sa() + def get_macsec_xpn_counter( + self, + sai: str) -> int: + counter_table = CounterTable(self.dvs.get_counters_db().db_connection) + for i in range(3): + r, value = counter_table.hget( + MacsecCounter(), + sai, + "SAI_MACSEC_SA_ATTR_CURRENT_XPN") + if r: return int(value) + time.sleep(1) # wait a moment for polling counter + + return None + class TestMACsec(object): def init_macsec( @@ -658,6 +675,18 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): peer_mac_address, macsec_port_identifier, 0)) + assert( + inspector.get_macsec_xpn_counter( + port_name, + local_mac_address, + macsec_port_identifier, + 0) == packet_number) + assert( + inspector.get_macsec_xpn_counter( + port_name, + peer_mac_address, + macsec_port_identifier, + 0) == packet_number) self.rekey_macsec( wpa, port_name, @@ -683,6 +712,18 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): peer_mac_address, macsec_port_identifier, 1)) + assert( + inspector.get_macsec_xpn_counter( + port_name, + local_mac_address, + macsec_port_identifier, + 1) == packet_number) + assert( + inspector.get_macsec_xpn_counter( + port_name, + peer_mac_address, + macsec_port_identifier, + 1) == packet_number) assert( not inspector.get_macsec_sa( macsec_port, @@ -695,6 +736,18 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): peer_mac_address, macsec_port_identifier, 0)) + assert( + not inspector.get_macsec_xpn_counter( + port_name, + local_mac_address, + macsec_port_identifier, + 0) == packet_number) + assert( + not inspector.get_macsec_xpn_counter( + port_name, + peer_mac_address, + macsec_port_identifier, + 0) == packet_number) # Exit MACsec port self.deinit_macsec( wpa, From 1b8bd94ef7e44f6089fc12efd456a3caa7aafd3f Mon Sep 17 00:00:00 2001 From: Ravindranath C K Date: Fri, 24 Jun 2022 20:20:52 +0530 Subject: [PATCH 34/64] Create ACL table fails due to incorrect check for supported ACL actions #11235 (#2351) Signed-off-by: rck-innovium --- orchagent/aclorch.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index aa577110ec..ddeca7adf4 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -153,7 +153,7 @@ static const acl_capabilities_t defaultAclActionsSupported = } }; -static acl_table_action_list_lookup_t defaultAclActionList = +static acl_table_action_list_lookup_t defaultAclActionList = { { // L3 @@ -326,7 +326,7 @@ static acl_table_action_list_lookup_t defaultAclActionList = // The match fields for certain ACL table type are not exactly the same between INGRESS and EGRESS. // For example, we can only match IN_PORT for PFCWD table type at INGRESS. // Hence we need to specify stage particular matching fields in stageMandatoryMatchFields -static acl_table_match_field_lookup_t stageMandatoryMatchFields = +static acl_table_match_field_lookup_t stageMandatoryMatchFields = { { // TABLE_TYPE_PFCWD @@ -2045,7 +2045,7 @@ bool AclTable::addMandatoryActions() // Add the default action list for (auto action : defaultAclActionList[type.getName()][stage]) { - if (m_pAclOrch->isAclActionSupported(stage, acl_action)) + if (m_pAclOrch->isAclActionSupported(stage, action)) { SWSS_LOG_INFO("Added default action for table type %s stage %s", type.getName().c_str(), From 84e9b07a175decc5afcd0e6aa4004365c9cfbd68 Mon Sep 17 00:00:00 2001 From: Yakiv Huryk <62013282+Yakiv-Huryk@users.noreply.github.com> Date: Fri, 24 Jun 2022 20:06:12 +0300 Subject: [PATCH 35/64] [fdborch] fix heap-use-after-free in clearFdbEntry() (#2353) - What I did using a copy of FDBEntry fields (stored in FDBUpdate) instead of a reference since the reference gets invalidated in the storeFdbEntryState() simplified clearFdbEntry() interface - Why I did it To fix the memory usage issue The issue is that the SWSS_LOG_INFO() uses the mac&, port_alias&, and bv_id& which are invalidated in the storeFdbEntryState(). - How I verified it Run the tests that were used to find the issues and checked the ASAN report Signed-off-by: Yakiv Huryk --- orchagent/fdborch.cpp | 21 +++++++++------------ orchagent/fdborch.h | 2 +- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index 6788e6fb91..a401148836 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -177,31 +177,28 @@ bool FdbOrch::storeFdbEntryState(const FdbUpdate& update) /* clears stateDb and decrements corresponding internal fdb counters */ -void FdbOrch::clearFdbEntry(const MacAddress& mac, - const sai_object_id_t& bv_id, - const string& port_alias) +void FdbOrch::clearFdbEntry(const FdbEntry& entry) { FdbUpdate update; - update.entry.mac = mac; - update.entry.bv_id = bv_id; + update.entry = entry; update.add = false; /* Fetch Vlan and decrement the counter */ Port temp_vlan; - if (m_portsOrch->getPort(bv_id, temp_vlan)) + if (m_portsOrch->getPort(entry.bv_id, temp_vlan)) { m_portsOrch->decrFdbCount(temp_vlan.m_alias, 1); } /* Decrement port fdb_counter */ - m_portsOrch->decrFdbCount(port_alias, 1); + m_portsOrch->decrFdbCount(entry.port_name, 1); /* Remove the FdbEntry from the internal cache, update state DB and CRM counter */ storeFdbEntryState(update); notify(SUBJECT_TYPE_FDB_CHANGE, &update); SWSS_LOG_INFO("FdbEntry removed from internal cache, MAC: %s , port: %s, BVID: 0x%" PRIx64, - mac.to_string().c_str(), port_alias.c_str(), bv_id); + update.entry.mac.to_string().c_str(), update.entry.port_name.c_str(), update.entry.bv_id); } /* @@ -224,7 +221,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, auto curr = itr++; if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) { - clearFdbEntry(curr->first.mac, curr->first.bv_id, curr->first.port_name); + clearFdbEntry(curr->first); } } } @@ -238,7 +235,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, { if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) { - clearFdbEntry(curr->first.mac, curr->first.bv_id, curr->first.port_name); + clearFdbEntry(curr->first); } } } @@ -253,7 +250,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, { if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) { - clearFdbEntry(curr->first.mac, curr->first.bv_id, curr->first.port_name); + clearFdbEntry(curr->first); } } } @@ -268,7 +265,7 @@ void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, { if (curr->second.type != "static" && (curr->first.mac == mac || mac == flush_mac)) { - clearFdbEntry(curr->first.mac, curr->first.bv_id, curr->first.port_name); + clearFdbEntry(curr->first); } } } diff --git a/orchagent/fdborch.h b/orchagent/fdborch.h index 3e53dcd394..949ffbf289 100644 --- a/orchagent/fdborch.h +++ b/orchagent/fdborch.h @@ -123,7 +123,7 @@ class FdbOrch: public Orch, public Subject, public Observer bool storeFdbEntryState(const FdbUpdate& update); void notifyTunnelOrch(Port& port); - void clearFdbEntry(const MacAddress&, const sai_object_id_t&, const string&); + void clearFdbEntry(const FdbEntry&); void handleSyncdFlushNotif(const sai_object_id_t&, const sai_object_id_t&, const MacAddress& ); }; From 37349cfc85d3c6f0b16f4bc086179af632415cff Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Sat, 25 Jun 2022 01:10:39 +0800 Subject: [PATCH 36/64] [swssconfig] Optimize performance of swssconfig (#2336) - What I did Optimize swssconfig: 1. Use unix socket 2. Cache producer table to avoid create it for same table name - Why I did it We found that generating large scale static routes via swssconfig is very slow. - How I verified it After the optimization, generating 100K routes via swssconfig take 2 seconds, however, before the optimization it takes > 60 seconds. --- swssconfig/swssconfig.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/swssconfig/swssconfig.cpp b/swssconfig/swssconfig.cpp index a41ef0ecdd..e61d038381 100644 --- a/swssconfig/swssconfig.cpp +++ b/swssconfig/swssconfig.cpp @@ -41,7 +41,10 @@ void dump_db_item(KeyOpFieldsValuesTuple &db_item) bool write_db_data(vector &db_items) { - DBConnector db("APPL_DB", 0, true); + DBConnector db("APPL_DB", 0, false); + RedisPipeline pipeline(&db); // dtor of RedisPipeline will automatically flush data + unordered_map table_map; + for (auto &db_item : db_items) { dump_db_item(db_item); @@ -55,18 +58,19 @@ bool write_db_data(vector &db_items) } string table_name = key.substr(0, pos); string key_name = key.substr(pos + 1); - ProducerStateTable producer(&db, table_name); + auto ret = table_map.emplace(std::piecewise_construct, std::forward_as_tuple(table_name), std::forward_as_tuple(&pipeline, table_name, true)); if (kfvOp(db_item) == SET_COMMAND) - producer.set(key_name, kfvFieldsValues(db_item), SET_COMMAND); + ret.first->second.set(key_name, kfvFieldsValues(db_item), SET_COMMAND); else if (kfvOp(db_item) == DEL_COMMAND) - producer.del(key_name, DEL_COMMAND); + ret.first->second.del(key_name, DEL_COMMAND); else { SWSS_LOG_ERROR("Invalid operation: %s\n", kfvOp(db_item).c_str()); return false; } } + return true; } From 93af69c5cb9a78465604974b59d1b1ed709d2b10 Mon Sep 17 00:00:00 2001 From: Vivek R Date: Fri, 24 Jun 2022 11:00:51 -0700 Subject: [PATCH 37/64] [PFC_WD] Avoid applying ZeroBuffer Profiles to ingress PG when a PFC storm is detected (#2304) What I did Avoid dropping traffic that is ingressing the port/pg that is in storm. The code changes in this PR avoid creating the ingress zero pool and profile and does not attach any zero profile to the ingress pg when pfcwd is triggered Revert changes related to #1480 where the retry mechanism was added to BufferOrch which caches the task retries and while the PG is locked by PfcWdZeroBufferHandler. Revert changes related to #2164 in PfcWdZeroBufferHandler & ZeroBufferProfile & BufferOrch. Updated UT's accordingly How I verified it UT's. Ran the sonic-mgmt test with these changes Azure/sonic-mgmt#5665 and verified if they've passed. Signed-off-by: Vivek Reddy Karri --- orchagent/bufferorch.cpp | 177 ++--------------- orchagent/bufferorch.h | 12 -- orchagent/pfcactionhandler.cpp | 180 ++++------------- orchagent/pfcactionhandler.h | 26 +-- orchagent/port.h | 8 +- orchagent/portsorch.cpp | 2 - tests/mock_tests/portsorch_ut.cpp | 320 ++---------------------------- 7 files changed, 93 insertions(+), 632 deletions(-) diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index f9b91e7a16..bfb5978067 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -48,11 +48,7 @@ BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *st m_flexCounterTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_TABLE)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), m_countersDb(new DBConnector("COUNTERS_DB", 0)), - m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE), - m_ingressZeroBufferPool(SAI_NULL_OBJECT_ID), - m_egressZeroBufferPool(SAI_NULL_OBJECT_ID), - m_ingressZeroPoolRefCount(0), - m_egressZeroPoolRefCount(0) + m_stateBufferMaximumValueTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE) { SWSS_LOG_ENTER(); initTableHandlers(); @@ -314,65 +310,6 @@ const object_reference_map &BufferOrch::getBufferPoolNameOidMap(void) return *m_buffer_type_maps[APP_BUFFER_POOL_TABLE_NAME]; } -void BufferOrch::lockZeroBufferPool(bool ingress) -{ - if (ingress) - m_ingressZeroPoolRefCount++; - else - m_egressZeroPoolRefCount++; -} - -void BufferOrch::unlockZeroBufferPool(bool ingress) -{ - sai_object_id_t pool = SAI_NULL_OBJECT_ID; - if (ingress) - { - if (--m_ingressZeroPoolRefCount <= 0) - { - pool = m_ingressZeroBufferPool; - m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - } - else - { - if (--m_egressZeroPoolRefCount <= 0) - { - pool = m_egressZeroBufferPool; - m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - } - - if (pool != SAI_NULL_OBJECT_ID) - { - auto sai_status = sai_buffer_api->remove_buffer_pool(pool); - if (SAI_STATUS_SUCCESS != sai_status) - { - SWSS_LOG_ERROR("Failed to remove buffer pool, rv:%d", sai_status); - task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) - { - return; - } - } - else - { - SWSS_LOG_NOTICE("Zero buffer pool has been successfully removed"); - } - } -} - -void BufferOrch::setZeroBufferPool(bool ingress, sai_object_id_t pool) -{ - if (ingress) - { - m_ingressZeroBufferPool = pool; - } - else - { - m_egressZeroBufferPool = pool; - } -} - task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); @@ -381,8 +318,6 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string map_type_name = APP_BUFFER_POOL_TABLE_NAME; string object_name = kfvKey(tuple); string op = kfvOp(tuple); - sai_buffer_pool_type_t pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; - bool creating_zero_pool = false; SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) @@ -396,16 +331,6 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); - if (object_name == "ingress_zero_pool") - { - creating_zero_pool = true; - pool_direction = SAI_BUFFER_POOL_TYPE_INGRESS; - } - else if (object_name == "egress_zero_pool") - { - creating_zero_pool = true; - pool_direction = SAI_BUFFER_POOL_TYPE_EGRESS; - } if (op == SET_COMMAND) { @@ -453,11 +378,6 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_invalid_entry; } attr.id = SAI_BUFFER_POOL_ATTR_TYPE; - if (creating_zero_pool && pool_direction != static_cast(attr.value.u32)) - { - SWSS_LOG_ERROR("Wrong pool direction for pool %s", object_name.c_str()); - return task_process_status::task_invalid_entry; - } attribs.push_back(attr); } else if (field == buffer_pool_mode_field_name) @@ -523,54 +443,20 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) } else { - if (creating_zero_pool) - { - if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) - { - sai_object = m_ingressZeroBufferPool; - } - else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) - { - sai_object = m_egressZeroBufferPool; - } - } - - if (SAI_NULL_OBJECT_ID == sai_object) - { - sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); - if (SAI_STATUS_SUCCESS != sai_status) - { - SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) - { - return handle_status; - } - } - - SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); - } - else - { - SWSS_LOG_NOTICE("No need to create buffer pool %s since it has been created", object_name.c_str()); - } - - if (creating_zero_pool) + sai_status = sai_buffer_api->create_buffer_pool(&sai_object, gSwitchId, (uint32_t)attribs.size(), attribs.data()); + if (SAI_STATUS_SUCCESS != sai_status) { - if (pool_direction == SAI_BUFFER_POOL_TYPE_INGRESS) - { - m_ingressZeroPoolRefCount++; - m_ingressZeroBufferPool = sai_object; - } - else if (pool_direction == SAI_BUFFER_POOL_TYPE_EGRESS) + SWSS_LOG_ERROR("Failed to create buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) { - m_egressZeroPoolRefCount++; - m_egressZeroBufferPool = sai_object; + return handle_status; } } (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = false; + SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); // Here we take the PFC watchdog approach to update the COUNTERS_DB metadata (e.g., PFC_WD_DETECTION_TIME per queue) // at initialization (creation and registration phase) // Specifically, we push the buffer pool name to oid mapping upon the creation of the oid @@ -593,39 +479,17 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) if (SAI_NULL_OBJECT_ID != sai_object) { clearBufferPoolWatermarkCounterIdList(sai_object); - bool remove = true; - if (sai_object == m_ingressZeroBufferPool) - { - if (--m_ingressZeroPoolRefCount > 0) - remove = false; - else - m_ingressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - else if (sai_object == m_egressZeroBufferPool) - { - if (--m_egressZeroPoolRefCount > 0) - remove = false; - else - m_egressZeroBufferPool = SAI_NULL_OBJECT_ID; - } - if (remove) + sai_status = sai_buffer_api->remove_buffer_pool(sai_object); + if (SAI_STATUS_SUCCESS != sai_status) { - sai_status = sai_buffer_api->remove_buffer_pool(sai_object); - if (SAI_STATUS_SUCCESS != sai_status) + SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); + if (handle_status != task_process_status::task_success) { - SWSS_LOG_ERROR("Failed to remove buffer pool %s with type %s, rv:%d", object_name.c_str(), map_type_name.c_str(), sai_status); - task_process_status handle_status = handleSaiRemoveStatus(SAI_API_BUFFER, sai_status); - if (handle_status != task_process_status::task_success) - { - return handle_status; - } + return handle_status; } - SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); - } - else - { - SWSS_LOG_NOTICE("Will not remove buffer pool %s since it is still referenced", object_name.c_str()); } + SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); } auto it_to_delete = (m_buffer_type_maps[map_type_name])->find(object_name); (m_buffer_type_maps[map_type_name])->erase(it_to_delete); @@ -1049,7 +913,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup for (string port_name : port_names) { Port port; - bool portUpdated = false; SWSS_LOG_DEBUG("processing port:%s", port_name.c_str()); if (!gPortsOrch->getPort(port_name, port)) { @@ -1064,12 +927,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup SWSS_LOG_ERROR("Invalid pg index specified:%zd", ind); return task_process_status::task_invalid_entry; } - if (port.m_priority_group_lock[ind]) - { - SWSS_LOG_WARN("Priority group %zd on port %s is locked, pending profile 0x%" PRIx64 " until unlocked", ind, port_name.c_str(), sai_buffer_profile); - portUpdated = true; - port.m_priority_group_pending_profile[ind] = sai_buffer_profile; - } else { if (need_update_sai) @@ -1090,10 +947,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup } } } - if (portUpdated) - { - gPortsOrch->setPort(port_name, port); - } } if (m_ready_list.find(key) != m_ready_list.end()) diff --git a/orchagent/bufferorch.h b/orchagent/bufferorch.h index 24af140b4a..59428509b5 100644 --- a/orchagent/bufferorch.h +++ b/orchagent/bufferorch.h @@ -37,14 +37,6 @@ class BufferOrch : public Orch static type_map m_buffer_type_maps; void generateBufferPoolWatermarkCounterIdList(void); const object_reference_map &getBufferPoolNameOidMap(void); - sai_object_id_t getZeroBufferPool(bool ingress) - { - return ingress ? m_ingressZeroBufferPool : m_egressZeroBufferPool; - } - - void lockZeroBufferPool(bool ingress); - void unlockZeroBufferPool(bool ingress); - void setZeroBufferPool(bool direction, sai_object_id_t pool); private: typedef task_process_status (BufferOrch::*buffer_table_handler)(KeyOpFieldsValuesTuple &tuple); @@ -80,10 +72,6 @@ class BufferOrch : public Orch bool m_isBufferPoolWatermarkCounterIdListGenerated = false; - sai_object_id_t m_ingressZeroBufferPool; - sai_object_id_t m_egressZeroBufferPool; - int m_ingressZeroPoolRefCount; - int m_egressZeroPoolRefCount; }; #endif /* SWSS_BUFFORCH_H */ diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index 6fb497812d..f7dc20ef26 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -3,7 +3,6 @@ #include "logger.h" #include "sai_serialize.h" #include "portsorch.h" -#include "bufferorch.h" #include #include @@ -27,7 +26,6 @@ extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; extern AclOrch * gAclOrch; -extern BufferOrch *gBufferOrch; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; extern sai_buffer_api_t *sai_buffer_api; @@ -567,7 +565,7 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, return; } - setPriorityGroupAndQueueLockFlag(portInstance, true); + setQueueLockFlag(portInstance, true); sai_attribute_t attr; attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID; @@ -583,7 +581,7 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, sai_object_id_t oldQueueProfileId = attr.value.oid; attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID; - attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(false); + attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(); // Set our zero buffer profile status = sai_queue_api->set_queue_attribute(queue, &attr); @@ -595,35 +593,6 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, // Save original buffer profile m_originalQueueBufferProfile = oldQueueProfileId; - - // Get PG - sai_object_id_t pg = portInstance.m_priority_group_ids[static_cast (queueId)]; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - - // Get PG's buffer profile - status = sai_buffer_api->get_ingress_priority_group_attribute(pg, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get buffer profile ID on PG 0x%" PRIx64 ": %d", pg, status); - return; - } - - // Set zero profile to PG - sai_object_id_t oldPgProfileId = attr.value.oid; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(true); - - status = sai_buffer_api->set_ingress_priority_group_attribute(pg, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set buffer profile ID on pg 0x%" PRIx64 ": %d", pg, status); - return; - } - - // Save original buffer profile - m_originalPgBufferProfile = oldPgProfileId; } PfcWdZeroBufferHandler::~PfcWdZeroBufferHandler(void) @@ -649,41 +618,12 @@ PfcWdZeroBufferHandler::~PfcWdZeroBufferHandler(void) return; } - auto idx = size_t(getQueueId()); - sai_object_id_t pg = portInstance.m_priority_group_ids[idx]; - sai_object_id_t pending_profile_id = portInstance.m_priority_group_pending_profile[idx]; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - - if (pending_profile_id != SAI_NULL_OBJECT_ID) - { - attr.value.oid = pending_profile_id; - SWSS_LOG_NOTICE("Priority group %zd on port %s has been restored to pending profile 0x%" PRIx64, - idx, portInstance.m_alias.c_str(), pending_profile_id); - portInstance.m_priority_group_pending_profile[idx] = SAI_NULL_OBJECT_ID; - } - else - { - attr.value.oid = m_originalPgBufferProfile; - SWSS_LOG_NOTICE("Priority group %zd on port %s has been restored to original profile 0x%" PRIx64, - idx, portInstance.m_alias.c_str(), m_originalPgBufferProfile); - } - - // Set our zero buffer profile - status = sai_buffer_api->set_ingress_priority_group_attribute(pg, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set buffer profile ID on queue 0x%" PRIx64 ": %d", getQueue(), status); - return; - } - - setPriorityGroupAndQueueLockFlag(portInstance, false); + setQueueLockFlag(portInstance, false); } -void PfcWdZeroBufferHandler::setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const +void PfcWdZeroBufferHandler::setQueueLockFlag(Port& port, bool isLocked) const { - // set lock bits on PG and queue - port.m_priority_group_lock[static_cast(getQueueId())] = isLocked; + // set lock bits on queue for (size_t i = 0; i < port.m_queue_ids.size(); ++i) { if (port.m_queue_ids[i] == getQueue()) @@ -703,9 +643,8 @@ PfcWdZeroBufferHandler::ZeroBufferProfile::~ZeroBufferProfile(void) { SWSS_LOG_ENTER(); - // Destroy ingress and egress profiles and pools - destroyZeroBufferProfile(true); - destroyZeroBufferProfile(false); + // Destroy egress profiles and pools + destroyZeroBufferProfile(); } PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance(void) @@ -717,38 +656,19 @@ PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferPro return instance; } -sai_object_id_t& PfcWdZeroBufferHandler::ZeroBufferProfile::getPool(bool ingress) -{ - // If there is a cached zero buffer pool, just use it - // else fetch zero buffer pool from buffer orch - // If there is one, use it and increase the reference number. - // otherwise, just return NULL OID - // PfcWdZeroBufferHandler will create it later and notify buffer orch later - auto &poolId = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; - if (poolId == SAI_NULL_OBJECT_ID) - { - poolId = gBufferOrch->getZeroBufferPool(ingress); - if (poolId != SAI_NULL_OBJECT_ID) - { - gBufferOrch->lockZeroBufferPool(ingress); - } - } - return poolId; -} - -sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile(bool ingress) +sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile() { SWSS_LOG_ENTER(); - if (getInstance().getProfile(ingress) == SAI_NULL_OBJECT_ID) + if (getInstance().getProfile() == SAI_NULL_OBJECT_ID) { - getInstance().createZeroBufferProfile(ingress); + getInstance().createZeroBufferProfile(); } - return getInstance().getProfile(ingress); + return getInstance().getProfile(); } -void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ingress) +void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile() { SWSS_LOG_ENTER(); @@ -756,60 +676,51 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing vector attribs; sai_status_t status; - auto &poolId = getPool(ingress); - - if (SAI_NULL_OBJECT_ID == poolId) - { - // Create zero pool - attr.id = SAI_BUFFER_POOL_ATTR_SIZE; - attr.value.u64 = 0; - attribs.push_back(attr); - - attr.id = SAI_BUFFER_POOL_ATTR_TYPE; - attr.value.u32 = ingress ? SAI_BUFFER_POOL_TYPE_INGRESS : SAI_BUFFER_POOL_TYPE_EGRESS; - attribs.push_back(attr); + // Create zero pool + attr.id = SAI_BUFFER_POOL_ATTR_SIZE; + attr.value.u64 = 0; + attribs.push_back(attr); - attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_STATIC; - attribs.push_back(attr); + attr.id = SAI_BUFFER_POOL_ATTR_TYPE; + attr.value.u32 = SAI_BUFFER_POOL_TYPE_EGRESS; + attribs.push_back(attr); - status = sai_buffer_api->create_buffer_pool( - &poolId, - gSwitchId, - static_cast(attribs.size()), - attribs.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); - return; - } + attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; + attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_DYNAMIC; + attribs.push_back(attr); - // Pass the ownership to BufferOrch - gBufferOrch->setZeroBufferPool(ingress, poolId); - gBufferOrch->lockZeroBufferPool(ingress); + status = sai_buffer_api->create_buffer_pool( + &getPool(), + gSwitchId, + static_cast(attribs.size()), + attribs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); + return; } // Create zero profile attribs.clear(); attr.id = SAI_BUFFER_PROFILE_ATTR_POOL_ID; - attr.value.oid = getPool(ingress); + attr.value.oid = getPool(); attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE; - attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC; + attr.value.u32 = SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC; attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_BUFFER_SIZE; attr.value.u64 = 0; attribs.push_back(attr); - attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH; - attr.value.s8 = 0; + attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH; + attr.value.s8 = -8; attribs.push_back(attr); status = sai_buffer_api->create_buffer_profile( - &getProfile(ingress), + &getProfile(), gSwitchId, static_cast(attribs.size()), attribs.data()); @@ -820,23 +731,20 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing } } -void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile(bool ingress) +void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile() { SWSS_LOG_ENTER(); - if (getProfile(ingress) != SAI_NULL_OBJECT_ID) + sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile()); + if (status != SAI_STATUS_SUCCESS) { - sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile(ingress)); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); - return; - } + SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); + return; } - auto &pool = ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; - if (pool != SAI_NULL_OBJECT_ID) + status = sai_buffer_api->remove_buffer_pool(getPool()); + if (status != SAI_STATUS_SUCCESS) { - gBufferOrch->unlockZeroBufferPool(ingress); + SWSS_LOG_ERROR("Failed to remove static zero buffer pool for PFC WD: %d", status); } } diff --git a/orchagent/pfcactionhandler.h b/orchagent/pfcactionhandler.h index 22908fbe08..d32df433f7 100644 --- a/orchagent/pfcactionhandler.h +++ b/orchagent/pfcactionhandler.h @@ -125,39 +125,39 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler private: /* - * Sets lock bits on port's priority group and queue - * to protect them from being changed by other Orch's - */ - void setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const; + * Sets lock bits on port's queue + * to protect it from being changed by other Orch's + */ + void setQueueLockFlag(Port& port, bool isLocked) const; // Singletone class for keeping shared data - zero buffer profiles class ZeroBufferProfile { public: ~ZeroBufferProfile(void); - static sai_object_id_t getZeroBufferProfile(bool ingress); + static sai_object_id_t getZeroBufferProfile(); private: ZeroBufferProfile(void); static ZeroBufferProfile &getInstance(void); - void createZeroBufferProfile(bool ingress); - void destroyZeroBufferProfile(bool ingress); + void createZeroBufferProfile(); + void destroyZeroBufferProfile(); - sai_object_id_t& getProfile(bool ingress) + sai_object_id_t& getProfile() { - return ingress ? m_zeroIngressBufferProfile : m_zeroEgressBufferProfile; + return m_zeroEgressBufferProfile; } - sai_object_id_t& getPool(bool ingress); + sai_object_id_t& getPool() + { + return m_zeroEgressBufferPool; + } - sai_object_id_t m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; - sai_object_id_t m_zeroIngressBufferProfile = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferProfile = SAI_NULL_OBJECT_ID; }; sai_object_id_t m_originalQueueBufferProfile = SAI_NULL_OBJECT_ID; - sai_object_id_t m_originalPgBufferProfile = SAI_NULL_OBJECT_ID; }; // PFC queue that implements drop action by draining queue via SAI diff --git a/orchagent/port.h b/orchagent/port.h index fe366630ac..2850cfc154 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -153,15 +153,13 @@ class Port bool m_mpls = false; /* - * Following two bit vectors are used to lock - * the PG/queue from being changed in BufferOrch. + * Following bit vector is used to lock + * the queue from being changed in BufferOrch. * The use case scenario is when PfcWdZeroBufferHandler - * sets zero buffer profile it should protect PG/queue + * sets zero buffer profile it should protect queue * from being overwritten in BufferOrch. */ std::vector m_queue_lock; - std::vector m_priority_group_lock; - std::vector m_priority_group_pending_profile; std::unordered_set m_ingress_acl_tables_uset; std::unordered_set m_egress_acl_tables_uset; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 7b90254287..2b816d71d2 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -4162,8 +4162,6 @@ void PortsOrch::initializePriorityGroups(Port &port) SWSS_LOG_INFO("Get %d priority groups for port %s", attr.value.u32, port.m_alias.c_str()); port.m_priority_group_ids.resize(attr.value.u32); - port.m_priority_group_lock.resize(attr.value.u32); - port.m_priority_group_pending_profile.resize(attr.value.u32); if (attr.value.u32 == 0) { diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 28df6610fd..78c633d4a1 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -20,105 +20,6 @@ namespace portsorch_test using namespace std; - sai_queue_api_t ut_sai_queue_api; - sai_queue_api_t *pold_sai_queue_api; - sai_buffer_api_t ut_sai_buffer_api; - sai_buffer_api_t *pold_sai_buffer_api; - - string _ut_stub_queue_key; - sai_status_t _ut_stub_sai_get_queue_attribute( - _In_ sai_object_id_t queue_id, - _In_ uint32_t attr_count, - _Inout_ sai_attribute_t *attr_list) - { - if (attr_count == 1 && attr_list[0].id == SAI_QUEUE_ATTR_BUFFER_PROFILE_ID) - { - auto &typemapQueue = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_QUEUE_TABLE_NAME]); - auto &profileName = typemapQueue["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; - auto profileNameVec = tokenize(profileName, ':'); - auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); - attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; - return SAI_STATUS_SUCCESS; - } - else - { - return pold_sai_queue_api->get_queue_attribute(queue_id, attr_count, attr_list); - } - } - - sai_status_t _ut_stub_sai_get_ingress_priority_group_attribute( - _In_ sai_object_id_t ingress_priority_group_id, - _In_ uint32_t attr_count, - _Inout_ sai_attribute_t *attr_list) - { - if (attr_count == 1 && attr_list[0].id == SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE) - { - auto &typemapPg = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PG_TABLE_NAME]); - auto &profileName = typemapPg["Ethernet0:3-4"].m_objsReferencingByMe["profile"]; - auto profileNameVec = tokenize(profileName, ':'); - auto &typemapProfile = (*gBufferOrch->m_buffer_type_maps[APP_BUFFER_PROFILE_TABLE_NAME]); - attr_list[0].value.oid = typemapProfile[profileNameVec[1]].m_saiObjectId; - return SAI_STATUS_SUCCESS; - } - else - { - return pold_sai_buffer_api->get_ingress_priority_group_attribute(ingress_priority_group_id, attr_count, attr_list); - } - } - - int _sai_create_buffer_pool_count = 0; - sai_status_t _ut_stub_sai_create_buffer_pool( - _Out_ sai_object_id_t *buffer_pool_id, - _In_ sai_object_id_t switch_id, - _In_ uint32_t attr_count, - _In_ const sai_attribute_t *attr_list) - { - auto status = pold_sai_buffer_api->create_buffer_pool(buffer_pool_id, switch_id, attr_count, attr_list); - if (SAI_STATUS_SUCCESS == status) - _sai_create_buffer_pool_count++; - return status; - } - - int _sai_remove_buffer_pool_count = 0; - sai_status_t _ut_stub_sai_remove_buffer_pool( - _In_ sai_object_id_t buffer_pool_id) - { - auto status = pold_sai_buffer_api->remove_buffer_pool(buffer_pool_id); - if (SAI_STATUS_SUCCESS == status) - _sai_remove_buffer_pool_count++; - return status; - } - - void _hook_sai_buffer_and_queue_api() - { - ut_sai_buffer_api = *sai_buffer_api; - pold_sai_buffer_api = sai_buffer_api; - ut_sai_buffer_api.create_buffer_pool = _ut_stub_sai_create_buffer_pool; - ut_sai_buffer_api.remove_buffer_pool = _ut_stub_sai_remove_buffer_pool; - ut_sai_buffer_api.get_ingress_priority_group_attribute = _ut_stub_sai_get_ingress_priority_group_attribute; - sai_buffer_api = &ut_sai_buffer_api; - - ut_sai_queue_api = *sai_queue_api; - pold_sai_queue_api = sai_queue_api; - ut_sai_queue_api.get_queue_attribute = _ut_stub_sai_get_queue_attribute; - sai_queue_api = &ut_sai_queue_api; - } - - void _unhook_sai_buffer_and_queue_api() - { - sai_buffer_api = pold_sai_buffer_api; - sai_queue_api = pold_sai_queue_api; - } - - void clear_pfcwd_zero_buffer_handler() - { - auto &zeroProfile = PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance(); - zeroProfile.m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; - zeroProfile.m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; - zeroProfile.m_zeroIngressBufferProfile = SAI_NULL_OBJECT_ID; - zeroProfile.m_zeroEgressBufferProfile = SAI_NULL_OBJECT_ID; - } - struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -460,9 +361,8 @@ namespace portsorch_test ASSERT_TRUE(ts.empty()); } - TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortPgAndQueue) + TEST_F(PortsOrchTest, PfcZeroBufferHandler) { - _hook_sai_buffer_and_queue_api(); Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); @@ -506,28 +406,26 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + // Create test buffer pool poolTable.set( - "ingress_pool", + "egress_pool", { - { "type", "ingress" }, + { "type", "egress" }, { "mode", "dynamic" }, { "size", "4200000" }, }); poolTable.set( - "egress_pool", + "ingress_pool", { - { "type", "egress" }, + { "type", "ingress" }, { "mode", "dynamic" }, { "size", "4200000" }, }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "ingress_pool" }, - { "xon", "14832" }, - { "xoff", "14832" }, - { "size", "35000" }, - { "dynamic_th", "0" } }); profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, { "xon", "14832" }, { "xoff", "14832" }, @@ -537,7 +435,7 @@ namespace portsorch_test { "size", "0" }, { "dynamic_th", "0" } }); - // Apply profile on PGs 3-4 all ports + // Apply profile on Queue and PGs 3-4 all ports for (const auto &it : ports) { std::ostringstream oss; @@ -550,210 +448,28 @@ namespace portsorch_test gBufferOrch->addExistingData(&profileTable); gBufferOrch->addExistingData(&queueTable); - // process pool, profile and PGs + // process pool, profile and Q's static_cast(gBufferOrch)->doTask(); - auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); - auto current_create_buffer_pool_count = _sai_create_buffer_pool_count; - auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); - - current_create_buffer_pool_count += 2; - ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); + auto queueConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); + queueConsumer->dumpPendingTasks(ts); + ASSERT_FALSE(ts.empty()); // Queue is skipped + ts.clear(); - std::deque entries; - entries.push_back({"Ethernet0:3-4", "SET", {{ "profile", "test_profile"}}}); auto pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); - pgConsumer->addToSync(entries); - entries.clear(); - static_cast(gBufferOrch)->doTask(); - - // Port should have been updated by BufferOrch->doTask - gPortsOrch->getPort("Ethernet0", port); - auto profile_id = (*BufferOrch::m_buffer_type_maps["BUFFER_PROFILE_TABLE"])[string("test_profile")].m_saiObjectId; - ASSERT_TRUE(profile_id != SAI_NULL_OBJECT_ID); - ASSERT_TRUE(port.m_priority_group_pending_profile[3] == profile_id); - ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); - - pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); pgConsumer->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); // PG is stored in m_priority_group_pending_profile + ASSERT_TRUE(ts.empty()); // PG Notification is not skipped ts.clear(); - // Create a zero buffer pool after PFC storm - entries.push_back({"ingress_zero_pool", "SET", {{ "type", "ingress" }, - { "mode", "static" }, - { "size", "0" }}}); - auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); - poolConsumer->addToSync(entries); - entries.clear(); - static_cast(gBufferOrch)->doTask(); - // Reference increased - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); - // Didn't create buffer pool again - ASSERT_TRUE(_sai_create_buffer_pool_count == current_create_buffer_pool_count); - - entries.push_back({"ingress_zero_pool", "DEL", {}}); - poolConsumer->addToSync(entries); - entries.clear(); - auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; - static_cast(gBufferOrch)->doTask(); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); - // release zero buffer drop handler dropHandler.reset(); - // re-fetch the port - gPortsOrch->getPort("Ethernet0", port); - - // pending profile should be cleared - ASSERT_TRUE(port.m_priority_group_pending_profile[3] == SAI_NULL_OBJECT_ID); - ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); - - // process PGs + // process queue static_cast(gBufferOrch)->doTask(); - pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); - pgConsumer->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); // PG should be processed now + queueConsumer->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); // queue should be processed now ts.clear(); - clear_pfcwd_zero_buffer_handler(); - _unhook_sai_buffer_and_queue_api(); - } - - TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortWithZeroPoolCreated) - { - _hook_sai_buffer_and_queue_api(); - Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); - Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); - Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); - Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); - Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); - - // Get SAI default ports to populate DB - auto ports = ut_helper::getInitialSaiPorts(); - - // Populate port table with SAI ports - for (const auto &it : ports) - { - portTable.set(it.first, it.second); - } - - // Set PortConfigDone, PortInitDone - portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - portTable.set("PortInitDone", { { "lanes", "0" } }); - - // refill consumer - gPortsOrch->addExistingData(&portTable); - - // Apply configuration : - // create ports - - static_cast(gPortsOrch)->doTask(); - - // Apply configuration - // ports - static_cast(gPortsOrch)->doTask(); - - ASSERT_TRUE(gPortsOrch->allPortsReady()); - - // No more tasks - vector ts; - gPortsOrch->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); - ts.clear(); - - // Simulate storm drop handler started on Ethernet0 TC 3 - Port port; - gPortsOrch->getPort("Ethernet0", port); - - // Create test buffer pool - poolTable.set("ingress_pool", - { - { "type", "ingress" }, - { "mode", "dynamic" }, - { "size", "4200000" }, - }); - poolTable.set("egress_pool", - { - { "type", "egress" }, - { "mode", "dynamic" }, - { "size", "4200000" }, - }); - poolTable.set("ingress_zero_pool", - { - { "type", "ingress" }, - { "mode", "static" }, - { "size", "0" } - }); - auto poolConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); - - // Create test buffer profile - profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, - { "xon", "14832" }, - { "xoff", "14832" }, - { "size", "35000" }, - { "dynamic_th", "0" } }); - profileTable.set("egress_profile", { { "pool", "egress_pool" }, - { "size", "0" }, - { "dynamic_th", "0" } }); - - // Apply profile on PGs 3-4 all ports - for (const auto &it : ports) - { - std::ostringstream oss; - oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); - queueTable.set(oss.str(), { {"profile", "egress_profile" } }); - } - - gBufferOrch->addExistingData(&poolTable); - gBufferOrch->addExistingData(&profileTable); - gBufferOrch->addExistingData(&pgTable); - gBufferOrch->addExistingData(&queueTable); - - auto current_create_buffer_pool_count = _sai_create_buffer_pool_count + 3; // call SAI API create_buffer_pool for each pool - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 0); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); - ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool == SAI_NULL_OBJECT_ID); - ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); - - // process pool, profile and PGs - static_cast(gBufferOrch)->doTask(); - - ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 0); - ASSERT_TRUE(gBufferOrch->m_ingressZeroBufferPool != SAI_NULL_OBJECT_ID); - ASSERT_TRUE(gBufferOrch->m_egressZeroBufferPool == SAI_NULL_OBJECT_ID); - - auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); - auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); - - current_create_buffer_pool_count++; // Increased for egress zero pool - ASSERT_TRUE(current_create_buffer_pool_count == _sai_create_buffer_pool_count); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(true) == gBufferOrch->m_ingressZeroBufferPool); - ASSERT_TRUE(PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance().getPool(false) == gBufferOrch->m_egressZeroBufferPool); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 2); - ASSERT_TRUE(gBufferOrch->m_egressZeroPoolRefCount == 1); - - std::deque entries; - entries.push_back({"ingress_zero_pool", "DEL", {}}); - poolConsumer->addToSync(entries); - entries.clear(); - auto current_remove_buffer_pool_count = _sai_remove_buffer_pool_count; - static_cast(gBufferOrch)->doTask(); - ASSERT_TRUE(gBufferOrch->m_ingressZeroPoolRefCount == 1); - ASSERT_TRUE(_sai_remove_buffer_pool_count == current_remove_buffer_pool_count); - - // release zero buffer drop handler - dropHandler.reset(); - clear_pfcwd_zero_buffer_handler(); - _unhook_sai_buffer_and_queue_api(); } /* This test checks that a LAG member validation happens on orchagent level From fe875fdbbf24214b4a0bba48087fd5a2f311174e Mon Sep 17 00:00:00 2001 From: svshah-intel <102195908+svshah-intel@users.noreply.github.com> Date: Mon, 27 Jun 2022 08:04:05 -0700 Subject: [PATCH 38/64] [orchagent]: srv6orch support for uSID (#2335) * [orchagent]: srv6orch support for uSID --- orchagent/srv6orch.cpp | 92 ++++++++++++++++++------------------------ tests/test_srv6.py | 19 ++++++++- 2 files changed, 58 insertions(+), 53 deletions(-) diff --git a/orchagent/srv6orch.cpp b/orchagent/srv6orch.cpp index 5081e06b6f..3d81163b2a 100644 --- a/orchagent/srv6orch.cpp +++ b/orchagent/srv6orch.cpp @@ -20,6 +20,38 @@ extern sai_next_hop_api_t* sai_next_hop_api; extern RouteOrch *gRouteOrch; extern CrmOrch *gCrmOrch; +const map end_behavior_map = +{ + {"end", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_E}, + {"end.x", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X}, + {"end.t", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_T}, + {"end.dx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6}, + {"end.dx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4}, + {"end.dt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4}, + {"end.dt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6}, + {"end.dt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46}, + {"end.b6.encaps", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS}, + {"end.b6.encaps.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED}, + {"end.b6.insert", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT}, + {"end.b6.insert.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED}, + {"udx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6}, + {"udx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4}, + {"udt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6}, + {"udt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4}, + {"udt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46}, + {"un", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN}, + {"ua", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA} +}; + +const map end_flavor_map = +{ + {"end", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"end.x", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"end.t", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"un", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"ua", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD} +}; + void Srv6Orch::srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert) { if (insert) @@ -372,62 +404,18 @@ bool Srv6Orch::mySidExists(string my_sid_string) bool Srv6Orch::sidEntryEndpointBehavior(string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor) { - if (action == "end") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_E; - end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD; - } - else if (action == "end.x") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X; - end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD; - } - else if (action == "end.t") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_T; - end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD; - } - else if (action == "end.dx6") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6; - } - else if (action == "end.dx4") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4; - } - else if (action == "end.dt4") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4; - } - else if (action == "end.dt6") + if (end_behavior_map.find(action) == end_behavior_map.end()) { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6; - } - else if (action == "end.dt46") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46; - } - else if (action == "end.b6.encaps") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS; - } - else if (action == "end.b6.encaps.red") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED; - } - else if (action == "end.b6.insert") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT; - } - else if (action == "end.b6.insert.red") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED; + SWSS_LOG_ERROR("Invalid endpoint behavior function"); + return false; } - else + end_behavior = end_behavior_map.at(action); + + if (end_flavor_map.find(action) != end_flavor_map.end()) { - SWSS_LOG_ERROR("Invalid endpoing behavior function"); - return false; + end_flavor = end_flavor_map.at(action); } + return true; } diff --git a/tests/test_srv6.py b/tests/test_srv6.py index 0d134acc2b..dddb10153b 100644 --- a/tests/test_srv6.py +++ b/tests/test_srv6.py @@ -56,6 +56,7 @@ def test_mysid(self, dvs, testlog): # create MySID entries mysid1='16:8:8:8:baba:2001:10::' mysid2='16:8:8:8:baba:2001:20::' + mysid3='16:8:8:8:fcbb:bb01:800::' # create MySID END fvs = swsscommon.FieldValuePairs([('action', 'end')]) @@ -90,14 +91,30 @@ def test_mysid(self, dvs, testlog): elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46" + # create MySID uN + fvs = swsscommon.FieldValuePairs([('action', 'un')]) + key = self.create_mysid(mysid3, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fcbb:bb01:800::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + # delete MySID self.remove_mysid(mysid1) self.remove_mysid(mysid2) + self.remove_mysid(mysid3) # remove vrf self.remove_vrf("VrfDt46") - class TestSrv6(object): def setup_db(self, dvs): self.pdb = dvs.get_app_db() From 5043701cc87efc3cb642762b13217729b576cf59 Mon Sep 17 00:00:00 2001 From: Lior Avramov <73036155+liorghub@users.noreply.github.com> Date: Mon, 27 Jun 2022 18:05:24 +0300 Subject: [PATCH 39/64] Add support for IP interface loopback action (#2307) * Add IP interface loopback action support Co-authored-by: liora --- cfgmgr/intfmgr.cpp | 12 ++++++ orchagent/intfsorch.cpp | 85 ++++++++++++++++++++++++++++++++++-- orchagent/intfsorch.h | 6 ++- orchagent/port.h | 1 - tests/test_interface.py | 96 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 193 insertions(+), 7 deletions(-) diff --git a/cfgmgr/intfmgr.cpp b/cfgmgr/intfmgr.cpp index 952ca9ef55..3651a55150 100644 --- a/cfgmgr/intfmgr.cpp +++ b/cfgmgr/intfmgr.cpp @@ -728,6 +728,7 @@ bool IntfMgr::doIntfGeneralTask(const vector& keys, string grat_arp = ""; string mpls = ""; string ipv6_link_local_mode = ""; + string loopback_action = ""; for (auto idx : data) { @@ -770,6 +771,10 @@ bool IntfMgr::doIntfGeneralTask(const vector& keys, { vlanId = value; } + else if (field == "loopback_action") + { + loopback_action = value; + } } if (op == SET_COMMAND) @@ -811,6 +816,13 @@ bool IntfMgr::doIntfGeneralTask(const vector& keys, data.push_back(fvTuple); } + /* Set loopback action */ + if (!loopback_action.empty()) + { + FieldValueTuple fvTuple("loopback_action", loopback_action); + data.push_back(fvTuple); + } + /* Set mpls */ if (!setIntfMpls(alias, mpls)) { diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index 9bc36f7bb6..3b83b0d906 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -416,6 +416,37 @@ bool IntfsOrch::setIntfProxyArp(const string &alias, const string &proxy_arp) return true; } +bool IntfsOrch::setIntfLoopbackAction(const Port &port, string actionStr) +{ + sai_attribute_t attr; + sai_packet_action_t action; + + if (!getSaiLoopbackAction(actionStr, action)) + { + return false; + } + + attr.id = SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION; + attr.value.s32 = action; + + sai_status_t status = sai_router_intfs_api->set_router_interface_attribute(port.m_rif_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Loopback action [%s] set failed, interface [%s], rc [%d]", + actionStr.c_str(), port.m_alias.c_str(), status); + + task_process_status handle_status = handleSaiSetStatus(SAI_API_ROUTER_INTERFACE, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + SWSS_LOG_NOTICE("Loopback action [%s] set success, interface [%s]", + actionStr.c_str(), port.m_alias.c_str()); + return true; +} + set IntfsOrch:: getSubnetRoutes() { SWSS_LOG_ENTER(); @@ -433,7 +464,9 @@ set IntfsOrch:: getSubnetRoutes() return subnet_routes; } -bool IntfsOrch::setIntf(const string& alias, sai_object_id_t vrf_id, const IpPrefix *ip_prefix, const bool adminUp, const uint32_t mtu) +bool IntfsOrch::setIntf(const string& alias, sai_object_id_t vrf_id, const IpPrefix *ip_prefix, + const bool adminUp, const uint32_t mtu, string loopbackAction) + { SWSS_LOG_ENTER(); @@ -443,7 +476,7 @@ bool IntfsOrch::setIntf(const string& alias, sai_object_id_t vrf_id, const IpPre auto it_intfs = m_syncdIntfses.find(alias); if (it_intfs == m_syncdIntfses.end()) { - if (!ip_prefix && addRouterIntfs(vrf_id, port)) + if (!ip_prefix && addRouterIntfs(vrf_id, port, loopbackAction)) { gPortsOrch->increasePortRefCount(alias); IntfsEntry intfs_entry; @@ -665,6 +698,7 @@ void IntfsOrch::doTask(Consumer &consumer) string inband_type = ""; bool mpls = false; string vlan = ""; + string loopbackAction = ""; for (auto idx : data) { @@ -757,6 +791,10 @@ void IntfsOrch::doTask(Consumer &consumer) { vlan = value; } + else if (field == "loopback_action") + { + loopbackAction = value; + } } if (alias == "eth0" || alias == "docker0") @@ -874,7 +912,8 @@ void IntfsOrch::doTask(Consumer &consumer) { adminUp = port.m_admin_state_up; } - if (!setIntf(alias, vrf_id, ip_prefix_in_key ? &ip_prefix : nullptr, adminUp, mtu)) + + if (!setIntf(alias, vrf_id, ip_prefix_in_key ? &ip_prefix : nullptr, adminUp, mtu, loopbackAction)) { it++; continue; @@ -906,6 +945,12 @@ void IntfsOrch::doTask(Consumer &consumer) setRouterIntfsMpls(port); gPortsOrch->setPort(alias, port); } + + /* Set loopback action */ + if (!loopbackAction.empty()) + { + setIntfLoopbackAction(port, loopbackAction); + } } } @@ -1047,7 +1092,28 @@ void IntfsOrch::doTask(Consumer &consumer) } } -bool IntfsOrch::addRouterIntfs(sai_object_id_t vrf_id, Port &port) +bool IntfsOrch::getSaiLoopbackAction(const string &actionStr, sai_packet_action_t &action) +{ + const unordered_map loopbackActionMap = + { + {"drop", SAI_PACKET_ACTION_DROP}, + {"forward", SAI_PACKET_ACTION_FORWARD}, + }; + + auto it = loopbackActionMap.find(actionStr); + if (it != loopbackActionMap.end()) + { + action = loopbackActionMap.at(actionStr); + return true; + } + else + { + SWSS_LOG_WARN("Unsupported loopback action [%s]", actionStr.c_str()); + return false; + } +} + +bool IntfsOrch::addRouterIntfs(sai_object_id_t vrf_id, Port &port, string loopbackActionStr) { SWSS_LOG_ENTER(); @@ -1067,6 +1133,17 @@ bool IntfsOrch::addRouterIntfs(sai_object_id_t vrf_id, Port &port) attr.value.oid = vrf_id; attrs.push_back(attr); + if (!loopbackActionStr.empty()) + { + sai_packet_action_t loopbackAction; + if (getSaiLoopbackAction(loopbackActionStr, loopbackAction)) + { + attr.id = SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION; + attr.value.s32 = loopbackAction; + attrs.push_back(attr); + } + } + attr.id = SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS; if (port.m_mac) { diff --git a/orchagent/intfsorch.h b/orchagent/intfsorch.h index 341675bac1..77c8efe752 100644 --- a/orchagent/intfsorch.h +++ b/orchagent/intfsorch.h @@ -54,7 +54,9 @@ class IntfsOrch : public Orch void addRifToFlexCounter(const string&, const string&, const string&); void removeRifFromFlexCounter(const string&, const string&); - bool setIntf(const string& alias, sai_object_id_t vrf_id = gVirtualRouterId, const IpPrefix *ip_prefix = nullptr, const bool adminUp = true, const uint32_t mtu = 0); + bool setIntfLoopbackAction(const Port &port, string actionStr); + bool getSaiLoopbackAction(const string &actionStr, sai_packet_action_t &action); + bool setIntf(const string& alias, sai_object_id_t vrf_id = gVirtualRouterId, const IpPrefix *ip_prefix = nullptr, const bool adminUp = true, const uint32_t mtu = 0, string loopbackAction = ""); bool removeIntf(const string& alias, sai_object_id_t vrf_id = gVirtualRouterId, const IpPrefix *ip_prefix = nullptr); void addIp2MeRoute(sai_object_id_t vrf_id, const IpPrefix &ip_prefix); @@ -95,7 +97,7 @@ class IntfsOrch : public Orch std::string getRifRateInitTableKey(std::string s); void cleanUpRifFromCounterDb(const string &id, const string &name); - bool addRouterIntfs(sai_object_id_t vrf_id, Port &port); + bool addRouterIntfs(sai_object_id_t vrf_id, Port &port, string loopbackAction); bool removeRouterIntfs(Port &port); void addDirectedBroadcast(const Port &port, const IpPrefix &ip_prefix); diff --git a/orchagent/port.h b/orchagent/port.h index 2850cfc154..a561a221cf 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -151,7 +151,6 @@ class Port sai_port_interface_type_t m_interface_type; std::vector m_adv_interface_types; bool m_mpls = false; - /* * Following bit vector is used to lock * the queue from being changed in BufferOrch. diff --git a/tests/test_interface.py b/tests/test_interface.py index a57970b1e5..98f1527152 100644 --- a/tests/test_interface.py +++ b/tests/test_interface.py @@ -4,6 +4,8 @@ from swsscommon import swsscommon +VLAN_SUB_INTERFACE_SEPARATOR = '.' + class TestRouterInterface(object): def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) @@ -2193,6 +2195,100 @@ def test_VLanInterfaceIpv6LinkLocalOnly(self, dvs, testlog): # one loopback router interface assert len(intf_entries) == 1 + def set_loopback_action(self, interface, action): + if interface.startswith("PortChannel"): + tbl_name = "PORTCHANNEL_INTERFACE" + elif interface.startswith("Vlan"): + tbl_name = "VLAN_INTERFACE" + else: + sub_intf_sep_idx = interface.find(VLAN_SUB_INTERFACE_SEPARATOR) + if sub_intf_sep_idx != -1: + tbl_name = "VLAN_SUB_INTERFACE" + else: + tbl_name = "INTERFACE" + + fvs = swsscommon.FieldValuePairs([("loopback_action", action)]) + tbl = swsscommon.Table(self.cdb, tbl_name) + tbl.set(interface, fvs) + time.sleep(1) + + def loopback_action_test(self, iface, action): + # create interface + self.create_l3_intf(iface, "") + + # set interface loopback action in config db + self.set_loopback_action(iface, action) + + # check application database + tbl = swsscommon.Table(self.pdb, "INTF_TABLE") + (status, fvs) = tbl.get(iface) + assert status == True + + action_found = False + for fv in fvs: + if fv[0] == "loopback_action": + action_found = True + assert fv[1] == action + assert action_found == True + + # check asic db + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + intf_entries = tbl.getKeys() + + action_map = {"drop": "SAI_PACKET_ACTION_DROP", "forward": "SAI_PACKET_ACTION_FORWARD"} + action_found = False + for key in intf_entries: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION": + action_found = True + assert fv[1] == action_map[action] + assert action_found == True + + # remove interface + self.remove_l3_intf(iface) + + def test_interfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8", "drop") + + def test_interfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8", "forward") + + def test_subInterfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8.1", "drop") + + def test_subInterfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8.1", "forward") + + def test_vlanInterfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.create_vlan("10") + self.loopback_action_test("Vlan10", "drop") + self.remove_vlan("10") + + def test_vlanInterfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.create_vlan("20") + self.loopback_action_test("Vlan20", "forward") + self.remove_vlan("20") + + def test_portChannelInterfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.create_port_channel("PortChannel009") + self.loopback_action_test("PortChannel009", "drop") + self.remove_port_channel("PortChannel009") + + def test_portChannelInterfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.create_port_channel("PortChannel010") + self.loopback_action_test("PortChannel010", "forward") + self.remove_port_channel("PortChannel010") # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying From 979d871bbb3dfdb2176a8a73ca49ff8caf25594a Mon Sep 17 00:00:00 2001 From: Yakiv Huryk <62013282+Yakiv-Huryk@users.noreply.github.com> Date: Tue, 28 Jun 2022 18:51:54 +0300 Subject: [PATCH 40/64] [asan] suppress the static variable leaks (#2354) Currently, ASAN sometimes reports the BufferOrch::m_buffer_type_maps and QosOrch::m_qos_maps as leaked. However, their lifetime is the lifetime of a process so they are not really 'leaked'. This also adds a simple way to add more suppressions later if required. Example of ASAN report: Direct leak of 48 byte(s) in 1 object(s) allocated from: #0 0x7f96aa952d30 in operator new(unsigned long) (/usr/lib/x86_64-linux-gnu/libasan.so.5+0xead30) #1 0x55ca1da9f789 in __static_initialization_and_destruction_0 /__w/2/s/orchagent/bufferorch.cpp:39 #2 0x55ca1daa02af in _GLOBAL__sub_I_bufferorch.cpp /__w/2/s/orchagent/bufferorch.cpp:1321 #3 0x55ca1e2a9cd4 (/usr/bin/orchagent+0xe89cd4) Direct leak of 48 byte(s) in 1 object(s) allocated from: #0 0x7f96aa952d30 in operator new(unsigned long) (/usr/lib/x86_64-linux-gnu/libasan.so.5+0xead30) #1 0x55ca1da6d2da in __static_initialization_and_destruction_0 /__w/2/s/orchagent/qosorch.cpp:80 #2 0x55ca1da6ecf2 in _GLOBAL__sub_I_qosorch.cpp /__w/2/s/orchagent/qosorch.cpp:2000 #3 0x55ca1e2a9cd4 (/usr/bin/orchagent+0xe89cd4) - What I did Added an lsan suppression config with static variable leak suppression - Why I did it To suppress ASAN false positives - How I verified it Run a test that produces the static variable leaks report and checked that report has these leaks suppressed. Signed-off-by: Yakiv Huryk --- lib/asan.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/asan.cpp b/lib/asan.cpp index 053d1bd3c3..1f7d074e68 100644 --- a/lib/asan.cpp +++ b/lib/asan.cpp @@ -4,6 +4,12 @@ #include +extern "C" { + const char* __lsan_default_suppressions() { + return "leak:__static_initialization_and_destruction_0\n"; + } +} + static void swss_asan_sigterm_handler(int signo) { SWSS_LOG_ENTER(); From 1aaccd6ca01a3051ed2757cb2e9695ae71d80f0d Mon Sep 17 00:00:00 2001 From: Yakiv Huryk <62013282+Yakiv-Huryk@users.noreply.github.com> Date: Tue, 28 Jun 2022 18:52:56 +0300 Subject: [PATCH 41/64] [tests] [asan] add graceful stop flag (#2347) - What I did Added a new flag to DVS tests - Why I did it Currently, when running the tests with ASAN-enabled image, leak reports are not generated. The reason is that dvs.destroy() (via 'ctn.remove(force=True)') uses SIGKILL to stop the container. To address this, a new flag is added. When the new flag is set, the swss processes are gracefully stopped (via SIGTERM). So ASAN reports can be generated as a result of DVS tests run - How I verified it Run the tests with --graceful-stop, observe that swss processes are stopped via SIGTERM Signed-off-by: Yakiv Huryk --- tests/conftest.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index efe6c85225..6e6939d41c 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -89,6 +89,11 @@ def pytest_addoption(parser): default="traditional", help="Buffer model") + parser.addoption("--graceful-stop", + action="store_true", + default=False, + help="Stop swss before stopping a conatainer") + def random_string(size=4, chars=string.ascii_uppercase + string.digits): return "".join(random.choice(chars) for x in range(size)) @@ -1730,6 +1735,8 @@ def manage_dvs(request) -> str: max_cpu = request.config.getoption("--max_cpu") buffer_model = request.config.getoption("--buffer_model") force_recreate = request.config.getoption("--force-recreate-dvs") + graceful_stop = request.config.getoption("--graceful-stop") + dvs = None curr_dvs_env = [] # lgtm[py/unused-local-variable] @@ -1778,6 +1785,8 @@ def update_dvs(log_path, new_dvs_env=[]): yield update_dvs + if graceful_stop: + dvs.stop_swss() dvs.get_logs() dvs.destroy() From a8e238a08070a03066632df10344ba8d7fe49ddc Mon Sep 17 00:00:00 2001 From: Yakiv Huryk <62013282+Yakiv-Huryk@users.noreply.github.com> Date: Wed, 29 Jun 2022 19:41:58 +0300 Subject: [PATCH 42/64] [vnetorch] [vxlanorch] fix a set of memory usage issues (#2352) * [vnetorch] fix use-after-free in removeBfdSession() * using a copy of monitor ip instead of a reference since the reference gets invalidated after the endpoint is erased Signed-off-by: Yakiv Huryk --- orchagent/vnetorch.cpp | 3 ++- orchagent/vxlanorch.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 45ba120ee6..4640d68853 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -1602,7 +1602,8 @@ void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& next if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { if (--nexthop_info_[vnet][ip].ref_count == 0) { - removeBfdSession(vnet, nhk, nexthop_info_[vnet][ip].monitor_addr); + IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; + removeBfdSession(vnet, nhk, monitor_addr); } } } diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index 8fce069631..7850727bd2 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -1110,13 +1110,14 @@ void VxlanTunnel::updateRemoteEndPointIpRef(const std::string remote_vtep, bool it->second.ip_refcnt++; } SWSS_LOG_DEBUG("Incrementing remote end point %s reference to %d", remote_vtep.c_str(), - it->second.ip_refcnt); + tnl_users_[remote_vtep].ip_refcnt); } else { if (it == tnl_users_.end()) { SWSS_LOG_ERROR("Cannot decrement ref. End point not referenced %s", remote_vtep.c_str()); + return; } it->second.ip_refcnt--; From d6215421ecae4302c7d505ff5276a87464f8169d Mon Sep 17 00:00:00 2001 From: Ze Gan Date: Wed, 6 Jul 2022 12:11:51 +0800 Subject: [PATCH 43/64] [teammgr]: Waiting MACsec ready before doLagMemberTask (#2286) Signed-off-by: Ze Gan ganze718@gmail.com, Judy Joseph jujoseph@microsoft.com What I did If a member of portchannel has macsec profile attached in config, enable MACsec on the port before it's been added as a member of portchannel. Why I did it Due to some hardware limitation, cannot enable MACsec on a member of portchannel. So we enable the macsec on interface first and then add it as part of portchannel. Note: This is a work around which will be removed when h/w supports it future releases. The approach taken in this PR is In the teamdMgr when an interface is added as part of the LAG, we wait for the macsecPort creation done in SAI and Ingress SA creation complete (if macsec is enabled on the interface) The above takes care of config reload, reboot scenario's where we cannot guarantee the sequence of macsec attach to interface, add interface as part of portchannel. If we do a manual removal of port from portchannel, or remove macsec config from the interface, Please follow this steps First remove the portchannel member out of portchannel Remove the macsec profile attached to interface. How I verified it Verified with config reload, reboot with the macsec profile attached to portchannel member interfaces. Verified case when SAK rekey is enabled on macsec on portchannel members Verified case when member interface link flaps --- cfgmgr/teammgr.cpp | 61 +++++++++++++++++++++++++++++- cfgmgr/teammgr.h | 3 ++ tests/test_macsec.py | 88 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 150 insertions(+), 2 deletions(-) diff --git a/cfgmgr/teammgr.cpp b/cfgmgr/teammgr.cpp index ad8572e07b..31f911741c 100644 --- a/cfgmgr/teammgr.cpp +++ b/cfgmgr/teammgr.cpp @@ -33,7 +33,8 @@ TeamMgr::TeamMgr(DBConnector *confDb, DBConnector *applDb, DBConnector *statDb, m_appPortTable(applDb, APP_PORT_TABLE_NAME), m_appLagTable(applDb, APP_LAG_TABLE_NAME), m_statePortTable(statDb, STATE_PORT_TABLE_NAME), - m_stateLagTable(statDb, STATE_LAG_TABLE_NAME) + m_stateLagTable(statDb, STATE_LAG_TABLE_NAME), + m_stateMACsecIngressSATable(statDb, STATE_MACSEC_INGRESS_SA_TABLE_NAME) { SWSS_LOG_ENTER(); @@ -98,6 +99,51 @@ bool TeamMgr::isLagStateOk(const string &alias) return true; } +bool TeamMgr::isMACsecAttached(const std::string &port) +{ + SWSS_LOG_ENTER(); + + vector temp; + + if (!m_cfgPortTable.get(port, temp)) + { + SWSS_LOG_INFO("Port %s is not ready", port.c_str()); + return false; + } + + auto macsec_opt = swss::fvsGetValue(temp, "macsec", true); + if (!macsec_opt || macsec_opt->empty()) + { + SWSS_LOG_INFO("MACsec isn't setted on the port %s", port.c_str()); + return false; + } + + return true; +} + +bool TeamMgr::isMACsecIngressSAOk(const std::string &port) +{ + SWSS_LOG_ENTER(); + + vector keys; + m_stateMACsecIngressSATable.getKeys(keys); + + for (auto key: keys) + { + auto tokens = tokenize(key, state_db_key_delimiter); + auto interface = tokens[0]; + + if (port == interface) + { + SWSS_LOG_NOTICE(" MACsec is ready on the port %s", port.c_str()); + return true; + } + } + + SWSS_LOG_INFO("MACsec is NOT ready on the port %s", port.c_str()); + return false; +} + void TeamMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -309,7 +355,11 @@ void TeamMgr::doLagMemberTask(Consumer &consumer) it++; continue; } - + if (isMACsecAttached(member) && !isMACsecIngressSAOk(member)) + { + it++; + continue; + } if (addLagMember(lag, member) == task_need_retry) { it++; @@ -400,6 +450,13 @@ void TeamMgr::doPortUpdateTask(Consumer &consumer) string lag; if (findPortMaster(lag, alias)) { + if (isMACsecAttached(alias) && !isMACsecIngressSAOk(alias)) + { + it++; + SWSS_LOG_INFO("MACsec is NOT ready on the port %s", alias.c_str()); + continue; + } + if (addLagMember(lag, alias) == task_need_retry) { it++; diff --git a/cfgmgr/teammgr.h b/cfgmgr/teammgr.h index c1b5d525c0..db87fdd1f4 100644 --- a/cfgmgr/teammgr.h +++ b/cfgmgr/teammgr.h @@ -27,6 +27,7 @@ class TeamMgr : public Orch Table m_cfgLagMemberTable; Table m_statePortTable; Table m_stateLagTable; + Table m_stateMACsecIngressSATable; ProducerStateTable m_appPortTable; ProducerStateTable m_appLagTable; @@ -55,6 +56,8 @@ class TeamMgr : public Orch bool checkPortIffUp(const std::string &); bool isPortStateOk(const std::string&); bool isLagStateOk(const std::string&); + bool isMACsecAttached(const std::string &); + bool isMACsecIngressSAOk(const std::string &); uint16_t generateLacpKey(const std::string&); }; diff --git a/tests/test_macsec.py b/tests/test_macsec.py index f2f8e8843e..9dc5a4ed53 100644 --- a/tests/test_macsec.py +++ b/tests/test_macsec.py @@ -2,6 +2,7 @@ from swsscommon.swsscommon import CounterTable, MacsecCounter import conftest +import time import functools import typing import re @@ -89,6 +90,12 @@ def convert_key(self, key: str): StateDBTable.SEPARATOR)) +class ConfigTable(Table): + + def __init__(self, dvs: conftest.DockerVirtualSwitch, table_name: str): + super(ConfigTable, self).__init__(dvs.get_config_db(), table_name) + + def gen_sci(macsec_system_identifier: str, macsec_port_identifier: int) -> str: macsec_system_identifier = macsec_system_identifier.translate( str.maketrans("", "", ":.-")) @@ -808,6 +815,87 @@ def test_macsec_attribute_change(self, dvs: conftest.DockerVirtualSwitch, testlo macsec_port_identifier, 0) + def test_macsec_with_portchannel(self, dvs: conftest.DockerVirtualSwitch, testlog): + + # Set MACsec enabled on Ethernet0 + ConfigTable(dvs, "PORT")["Ethernet0"] = {"macsec" : "test"} + StateDBTable(dvs, "FEATURE")["macsec"] = {"state": "enabled"} + + # Setup Port-channel + ConfigTable(dvs, "PORTCHANNEL")["PortChannel001"] = {"admin": "up", "mtu": "9100", "oper_status": "up"} + time.sleep(1) + + # create port channel member + ConfigTable(dvs, "PORTCHANNEL_MEMBER")["PortChannel001|Ethernet0"] = {"NULL": "NULL"} + ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001"] = {"NULL": "NULL"} + ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001|40.0.0.0/31"] = {"NULL": "NULL"} + time.sleep(3) + + # Check Portchannel member in ASIC db that shouldn't been created before MACsec enabled + lagmtbl = swsscommon.Table(swsscommon.DBConnector(1, dvs.redis_sock, 0), "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER") + lagms = lagmtbl.getKeys() + assert len(lagms) == 0 + + # Create MACsec session + port_name = "Ethernet0" + local_mac_address = "00-15-5D-78-FF-C1" + peer_mac_address = "00-15-5D-78-FF-C2" + macsec_port_identifier = 1 + macsec_port = "macsec_eth1" + sak = "0" * 32 + auth_key = "0" * 32 + packet_number = 1 + ssci = 1 + salt = "0" * 24 + + wpa = WPASupplicantMock(dvs) + inspector = MACsecInspector(dvs) + + self.init_macsec( + wpa, + port_name, + local_mac_address, + macsec_port_identifier) + self.establish_macsec( + wpa, + port_name, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0, + sak, + packet_number, + auth_key, + ssci, + salt) + time.sleep(3) + + # Check Portchannel member in ASIC db that should been created after MACsec enabled + lagmtbl = swsscommon.Table(swsscommon.DBConnector(1, dvs.redis_sock, 0), "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER") + lagms = lagmtbl.getKeys() + assert len(lagms) == 1 + + self.deinit_macsec( + wpa, + inspector, + port_name, + macsec_port, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0) + + # remove port channel member + del ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001"] + del ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001|40.0.0.0/31"] + del ConfigTable(dvs, "PORTCHANNEL_MEMBER")["PortChannel001|Ethernet0"] + + # remove port channel + del ConfigTable(dvs, "PORTCHANNEL")["PortChannel001"] + + # Clear MACsec enabled on Ethernet0 + ConfigTable(dvs, "PORT")["Ethernet0"] = {"macsec" : ""} + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down From 71752450e5d2c3c2f126aabddfe65b1d14344490 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Wed, 6 Jul 2022 19:51:25 -0700 Subject: [PATCH 44/64] [VS Test] Skip failing subport tests (#2370) * Skip failing subport tests --- tests/test_sub_port_intf.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/test_sub_port_intf.py b/tests/test_sub_port_intf.py index 748e680e2a..3a1bad68fe 100644 --- a/tests/test_sub_port_intf.py +++ b/tests/test_sub_port_intf.py @@ -1,5 +1,6 @@ import json import time +import pytest from dvslib.dvs_common import wait_for_result from swsscommon import swsscommon @@ -581,6 +582,7 @@ def _test_sub_port_intf_creation(self, dvs, sub_port_intf_name, vrf_name=None): self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_creation(self, dvs): self.connect_dbs(dvs) @@ -667,6 +669,7 @@ def _test_sub_port_intf_add_ip_addrs(self, dvs, sub_port_intf_name, vrf_name=Non self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_add_ip_addrs(self, dvs): self.connect_dbs(dvs) @@ -742,6 +745,7 @@ def _test_sub_port_intf_appl_db_proc_seq(self, dvs, sub_port_intf_name, admin_up self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_appl_db_proc_seq(self, dvs): self.connect_dbs(dvs) @@ -866,6 +870,7 @@ def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_n self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_admin_status_change(self, dvs): self.connect_dbs(dvs) @@ -950,6 +955,7 @@ def _test_sub_port_intf_remove_ip_addrs(self, dvs, sub_port_intf_name, vrf_name= self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_remove_ip_addrs(self, dvs): self.connect_dbs(dvs) @@ -1141,6 +1147,7 @@ def _test_sub_port_intf_removal(self, dvs, sub_port_intf_name, removal_seq_test= self.remove_lag(parent_port) self.check_lag_removal(parent_port_oid) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_removal(self, dvs): self.connect_dbs(dvs) @@ -1216,6 +1223,7 @@ def _test_sub_port_intf_mtu(self, dvs, sub_port_intf_name, vrf_name=None): self.remove_lag(parent_port) self.asic_db.wait_for_n_keys(ASIC_LAG_TABLE, 0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_mtu(self, dvs): self.connect_dbs(dvs) @@ -1444,6 +1452,7 @@ def _test_sub_port_intf_nhg_accel(self, dvs, sub_port_intf_name, nhop_num=3, cre parent_port_idx += (4 if parent_port_prefix == ETHERNET_PREFIX else 1) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_nhg_accel(self, dvs): self.connect_dbs(dvs) @@ -1584,6 +1593,7 @@ def _test_sub_port_intf_oper_down_with_pending_neigh_route_tasks(self, dvs, sub_ parent_port_idx += (4 if parent_port_prefix == ETHERNET_PREFIX else 1) + @pytest.mark.skip(reason="Failing. Under investigation") def test_sub_port_intf_oper_down_with_pending_neigh_route_tasks(self, dvs): self.connect_dbs(dvs) From 7126857c1e387784bd04b9f7502018b2a4973d85 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Fri, 8 Jul 2022 00:02:35 +0800 Subject: [PATCH 45/64] Port configuration incremental update support (#2305) *portsyncd no longer handle port configuration change and put it to APP DB *Implement incremental configuration change in portmgr *Adjust portsorch to meet incremental configuration change requirement --- cfgmgr/portmgr.cpp | 109 +++++++++----------- cfgmgr/portmgr.h | 3 +- orchagent/port.h | 10 +- orchagent/portsorch.cpp | 7 +- portsyncd/portsyncd.cpp | 54 ---------- tests/mock_tests/Makefile.am | 3 + tests/mock_tests/mock_shell_command.cpp | 15 +++ tests/mock_tests/portmgr_ut.cpp | 126 ++++++++++++++++++++++++ 8 files changed, 205 insertions(+), 122 deletions(-) create mode 100644 tests/mock_tests/mock_shell_command.cpp create mode 100644 tests/mock_tests/portmgr_ut.cpp diff --git a/cfgmgr/portmgr.cpp b/cfgmgr/portmgr.cpp index b385a5096a..38c0418a7a 100644 --- a/cfgmgr/portmgr.cpp +++ b/cfgmgr/portmgr.cpp @@ -31,29 +31,9 @@ bool PortMgr::setPortMtu(const string &alias, const string &mtu) // Set the port MTU in application database to update both // the port MTU and possibly the port based router interface MTU - vector fvs; - FieldValueTuple fv("mtu", mtu); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - - return true; + return writeConfigToAppDb(alias, "mtu", mtu); } -bool PortMgr::setPortTpid(const string &alias, const string &tpid) -{ - stringstream cmd; - string res; - - // Set the port TPID in application database to update port TPID - vector fvs; - FieldValueTuple fv("tpid", tpid); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - - return true; -} - - bool PortMgr::setPortAdminStatus(const string &alias, const bool up) { stringstream cmd; @@ -63,23 +43,7 @@ bool PortMgr::setPortAdminStatus(const string &alias, const bool up) cmd << IP_CMD << " link set dev " << shellquote(alias) << (up ? " up" : " down"); EXEC_WITH_ERROR_THROW(cmd.str(), res); - vector fvs; - FieldValueTuple fv("admin_status", (up ? "up" : "down")); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - - return true; -} - -bool PortMgr::setPortLearnMode(const string &alias, const string &learn_mode) -{ - // Set the port MAC learn mode in application database - vector fvs; - FieldValueTuple fv("learn_mode", learn_mode); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - - return true; + return writeConfigToAppDb(alias, "admin_status", (up ? "up" : "down")); } bool PortMgr::isPortStateOk(const string &alias) @@ -117,14 +81,14 @@ void PortMgr::doTask(Consumer &consumer) if (op == SET_COMMAND) { - if (!isPortStateOk(alias)) - { - SWSS_LOG_INFO("Port %s is not ready, pending...", alias.c_str()); - it++; - continue; - } + /* portOk=true indicates that the port has been created in kernel. + * We should not call any ip command if portOk=false. However, it is + * valid to put port configuration to APP DB which will trigger port creation in kernel. + */ + bool portOk = isPortStateOk(alias); - string admin_status, mtu, learn_mode, tpid; + string admin_status, mtu; + std::vector field_values; bool configured = (m_portList.find(alias) != m_portList.end()); @@ -138,6 +102,11 @@ void PortMgr::doTask(Consumer &consumer) m_portList.insert(alias); } + else if (!portOk) + { + it++; + continue; + } for (auto i : kfvFieldsValues(t)) { @@ -149,38 +118,42 @@ void PortMgr::doTask(Consumer &consumer) { admin_status = fvValue(i); } - else if (fvField(i) == "learn_mode") - { - learn_mode = fvValue(i); - } - else if (fvField(i) == "tpid") + else { - tpid = fvValue(i); + field_values.emplace_back(i); } } - if (!mtu.empty()) + for (auto &entry : field_values) { - setPortMtu(alias, mtu); - SWSS_LOG_NOTICE("Configure %s MTU to %s", alias.c_str(), mtu.c_str()); + writeConfigToAppDb(alias, fvField(entry), fvValue(entry)); + SWSS_LOG_NOTICE("Configure %s %s to %s", alias.c_str(), fvField(entry).c_str(), fvValue(entry).c_str()); } - if (!admin_status.empty()) + if (!portOk) { - setPortAdminStatus(alias, admin_status == "up"); - SWSS_LOG_NOTICE("Configure %s admin status to %s", alias.c_str(), admin_status.c_str()); + SWSS_LOG_INFO("Port %s is not ready, pending...", alias.c_str()); + + writeConfigToAppDb(alias, "mtu", mtu); + writeConfigToAppDb(alias, "admin_status", admin_status); + field_values.clear(); + field_values.emplace_back("mtu", mtu); + field_values.emplace_back("admin_status", admin_status); + it->second = KeyOpFieldsValuesTuple{alias, SET_COMMAND, field_values}; + it++; + continue; } - if (!learn_mode.empty()) + if (!mtu.empty()) { - setPortLearnMode(alias, learn_mode); - SWSS_LOG_NOTICE("Configure %s MAC learn mode to %s", alias.c_str(), learn_mode.c_str()); + setPortMtu(alias, mtu); + SWSS_LOG_NOTICE("Configure %s MTU to %s", alias.c_str(), mtu.c_str()); } - if (!tpid.empty()) + if (!admin_status.empty()) { - setPortTpid(alias, tpid); - SWSS_LOG_NOTICE("Configure %s TPID to %s", alias.c_str(), tpid.c_str()); + setPortAdminStatus(alias, admin_status == "up"); + SWSS_LOG_NOTICE("Configure %s admin status to %s", alias.c_str(), admin_status.c_str()); } } else if (op == DEL_COMMAND) @@ -193,3 +166,13 @@ void PortMgr::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); } } + +bool PortMgr::writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value) +{ + vector fvs; + FieldValueTuple fv(field, value); + fvs.push_back(fv); + m_appPortTable.set(alias, fvs); + + return true; +} diff --git a/cfgmgr/portmgr.h b/cfgmgr/portmgr.h index 809cd1c004..dde346bfe1 100644 --- a/cfgmgr/portmgr.h +++ b/cfgmgr/portmgr.h @@ -29,10 +29,9 @@ class PortMgr : public Orch std::set m_portList; void doTask(Consumer &consumer); + bool writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value); bool setPortMtu(const std::string &alias, const std::string &mtu); - bool setPortTpid(const std::string &alias, const std::string &tpid); bool setPortAdminStatus(const std::string &alias, const bool up); - bool setPortLearnMode(const std::string &alias, const std::string &learn_mode); bool isPortStateOk(const std::string &alias); }; diff --git a/orchagent/port.h b/orchagent/port.h index a561a221cf..e5ba8134f5 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -87,6 +87,12 @@ class Port UNKNOWN } ; + enum AutoNegMode { + AUTONEG_NOT_SET = -1, + AUTONEG_OFF = 0, + AUTONEG_ON = 1 + }; + Port() {}; Port(std::string alias, Type type) : m_alias(alias), m_type(type) {}; @@ -112,7 +118,7 @@ class Port uint32_t m_mtu = DEFAULT_MTU; uint32_t m_speed = 0; // Mbps std::string m_learn_mode = "hardware"; - int m_autoneg = -1; // -1 means not set, 0 = disabled, 1 = enabled + AutoNegMode m_autoneg = Port::AutoNegMode::AUTONEG_NOT_SET; bool m_admin_state_up = false; bool m_init = false; bool m_l3_vni = false; @@ -148,7 +154,7 @@ class Port uint32_t m_up_member_count = 0; uint32_t m_maximum_headroom = 0; std::vector m_adv_speeds; - sai_port_interface_type_t m_interface_type; + sai_port_interface_type_t m_interface_type = SAI_PORT_INTERFACE_TYPE_NONE; std::vector m_adv_interface_types; bool m_mpls = false; /* diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 2b816d71d2..687d1e915a 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -2966,6 +2966,11 @@ void PortsOrch::doPortTask(Consumer &consumer) } else { + if (admin_status.empty()) + { + admin_status = p.m_admin_state_up ? "up" : "down"; + } + if (!an_str.empty()) { if (autoneg_mode_map.find(an_str) == autoneg_mode_map.end()) @@ -3008,7 +3013,7 @@ void PortsOrch::doPortTask(Consumer &consumer) continue; } SWSS_LOG_NOTICE("Set port %s AutoNeg from %d to %d", alias.c_str(), p.m_autoneg, an); - p.m_autoneg = an; + p.m_autoneg = static_cast(an); m_portList[alias] = p; } } diff --git a/portsyncd/portsyncd.cpp b/portsyncd/portsyncd.cpp index 37e0c4232f..8243f94f8b 100644 --- a/portsyncd/portsyncd.cpp +++ b/portsyncd/portsyncd.cpp @@ -45,14 +45,12 @@ void usage() void handlePortConfigFile(ProducerStateTable &p, string file, bool warm); void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, bool warm); void handleVlanIntfFile(string file); -void handlePortConfig(ProducerStateTable &p, map &port_cfg_map); void checkPortInitDone(DBConnector *appl_db); int main(int argc, char **argv) { Logger::linkToDbNative("portsyncd"); int opt; - map port_cfg_map; while ((opt = getopt(argc, argv, "v:h")) != -1 ) { @@ -71,7 +69,6 @@ int main(int argc, char **argv) DBConnector appl_db("APPL_DB", 0); DBConnector state_db("STATE_DB", 0); ProducerStateTable p(&appl_db, APP_PORT_TABLE_NAME); - SubscriberStateTable portCfg(&cfgDb, CFG_PORT_TABLE_NAME); WarmStart::initialize("portsyncd", "swss"); WarmStart::checkWarmStart("portsyncd", "swss"); @@ -93,7 +90,6 @@ int main(int argc, char **argv) NetDispatcher::getInstance().registerMessageHandler(RTM_DELLINK, &sync); s.addSelectable(&netlink); - s.addSelectable(&portCfg); while (true) { @@ -135,28 +131,6 @@ int main(int argc, char **argv) g_init = true; } - if (!port_cfg_map.empty()) - { - handlePortConfig(p, port_cfg_map); - } - } - else if (temps == (Selectable *)&portCfg) - { - std::deque entries; - portCfg.pops(entries); - - for (auto entry: entries) - { - string key = kfvKey(entry); - - if (port_cfg_map.find(key) != port_cfg_map.end()) - { - /* For now we simply drop previous pending port config */ - port_cfg_map.erase(key); - } - port_cfg_map[key] = entry; - } - handlePortConfig(p, port_cfg_map); } else { @@ -225,31 +199,3 @@ void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo } } - -void handlePortConfig(ProducerStateTable &p, map &port_cfg_map) -{ - auto it = port_cfg_map.begin(); - while (it != port_cfg_map.end()) - { - KeyOpFieldsValuesTuple entry = it->second; - string key = kfvKey(entry); - string op = kfvOp(entry); - auto values = kfvFieldsValues(entry); - - /* only push down port config when port is not in hostif create pending state */ - if (g_portSet.find(key) == g_portSet.end()) - { - /* No support for port delete yet */ - if (op == SET_COMMAND) - { - p.set(key, values); - } - - it = port_cfg_map.erase(it); - } - else - { - it++; - } - } -} diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 553cd18bfe..dedd4445f7 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -39,7 +39,9 @@ tests_SOURCES = aclorch_ut.cpp \ mock_table.cpp \ mock_hiredis.cpp \ mock_redisreply.cpp \ + mock_shell_command.cpp \ bulker_ut.cpp \ + portmgr_ut.cpp \ fake_response_publisher.cpp \ swssnet_ut.cpp \ flowcounterrouteorch_ut.cpp \ @@ -98,6 +100,7 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/bfdorch.cpp \ $(top_srcdir)/orchagent/srv6orch.cpp \ $(top_srcdir)/orchagent/nvgreorch.cpp \ + $(top_srcdir)/cfgmgr/portmgr.cpp \ $(top_srcdir)/cfgmgr/buffermgrdyn.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp $(FLEX_CTR_DIR)/flowcounterrouteorch.cpp diff --git a/tests/mock_tests/mock_shell_command.cpp b/tests/mock_tests/mock_shell_command.cpp new file mode 100644 index 0000000000..f3ccfbfe5e --- /dev/null +++ b/tests/mock_tests/mock_shell_command.cpp @@ -0,0 +1,15 @@ +#include +#include + +int mockCmdReturn = 0; +std::string mockCmdStdcout = ""; +std::vector mockCallArgs; + +namespace swss { + int exec(const std::string &cmd, std::string &stdout) + { + mockCallArgs.push_back(cmd); + stdout = mockCmdStdcout; + return mockCmdReturn; + } +} diff --git a/tests/mock_tests/portmgr_ut.cpp b/tests/mock_tests/portmgr_ut.cpp new file mode 100644 index 0000000000..27dc61e03e --- /dev/null +++ b/tests/mock_tests/portmgr_ut.cpp @@ -0,0 +1,126 @@ +#include "portmgr.h" +#include "gtest/gtest.h" +#include "mock_table.h" +#include "redisutility.h" + +extern std::vector mockCallArgs; + +namespace portmgr_ut +{ + using namespace swss; + using namespace std; + + struct PortMgrTest : public ::testing::Test + { + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_portMgr; + PortMgrTest() + { + m_app_db = make_shared( + "APPL_DB", 0); + m_config_db = make_shared( + "CONFIG_DB", 0); + m_state_db = make_shared( + "STATE_DB", 0); + } + + virtual void SetUp() override + { + ::testing_db::reset(); + vector cfg_port_tables = { + CFG_PORT_TABLE_NAME, + }; + m_portMgr.reset(new PortMgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_port_tables)); + } + }; + + TEST_F(PortMgrTest, DoTask) + { + Table state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table app_port_table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + + // Port is not ready, verify that doTask does not handle port configuration + + cfg_port_table.set("Ethernet0", { + {"speed", "100000"}, + {"index", "1"} + }); + mockCallArgs.clear(); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + std::vector values; + app_port_table.get("Ethernet0", values); + auto value_opt = swss::fvsGetValue(values, "mtu", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_MTU_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "admin_status", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_ADMIN_STATUS_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "speed", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("100000", value_opt.get()); + value_opt = swss::fvsGetValue(values, "index", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("1", value_opt.get()); + + // Set port state to ok, verify that doTask handle port configuration + state_port_table.set("Ethernet0", { + {"state", "ok"} + }); + m_portMgr->doTask(); + ASSERT_EQ(size_t(2), mockCallArgs.size()); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" mtu \"9100\"", mockCallArgs[0]); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" down", mockCallArgs[1]); + + // Set port admin_status, verify that it could override the default value + cfg_port_table.set("Ethernet0", { + {"admin_status", "up"} + }); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + app_port_table.get("Ethernet0", values); + value_opt = swss::fvsGetValue(values, "admin_status", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("up", value_opt.get()); + } + + TEST_F(PortMgrTest, ConfigureDuringRetry) + { + Table state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table app_port_table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + + cfg_port_table.set("Ethernet0", { + {"speed", "100000"}, + {"index", "1"} + }); + + mockCallArgs.clear(); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + + cfg_port_table.set("Ethernet0", { + {"speed", "50000"}, + {"index", "1"}, + {"mtu", "1518"}, + {"admin_status", "up"} + }); + + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + + state_port_table.set("Ethernet0", { + {"state", "ok"} + }); + m_portMgr->doTask(); + ASSERT_EQ(size_t(2), mockCallArgs.size()); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" mtu \"1518\"", mockCallArgs[0]); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" up", mockCallArgs[1]); + } +} From f565f7d129c4cdfbcfb00cb681194bee32ab971f Mon Sep 17 00:00:00 2001 From: Qi Luo Date: Thu, 7 Jul 2022 15:44:51 -0700 Subject: [PATCH 46/64] Fix: missing sonic-db-cli in docker-sonic-vs image (#2357) *Fix: missing sonic-db-cli in docker-sonic-vs image --- .azure-pipelines/docker-sonic-vs/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/.azure-pipelines/docker-sonic-vs/Dockerfile b/.azure-pipelines/docker-sonic-vs/Dockerfile index 33cdb7e8dc..935dec1386 100644 --- a/.azure-pipelines/docker-sonic-vs/Dockerfile +++ b/.azure-pipelines/docker-sonic-vs/Dockerfile @@ -9,6 +9,7 @@ RUN dpkg --purge python-swsscommon python3-swsscommon swss libsairedis sonic-db- RUN dpkg -i /debs/libswsscommon_1.0.0_amd64.deb RUN dpkg -i /debs/python-swsscommon_1.0.0_amd64.deb RUN dpkg -i /debs/python3-swsscommon_1.0.0_amd64.deb +RUN dpkg -i /debs/sonic-db-cli_1.0.0_amd64.deb RUN dpkg -i /debs/libsaimetadata_1.0.0_amd64.deb RUN dpkg -i /debs/libsairedis_1.0.0_amd64.deb From fe11cc7cf871816e7f12e0aca4282cee9de18b26 Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Fri, 8 Jul 2022 12:14:14 +0800 Subject: [PATCH 47/64] Fix mux_acl_rule adding issue (#2356) What I did This PR is to fix the issue of adding mux_acl_rule into IngressTableDrop. The error log is Jun 25 08:02:37.159020 svcstr-7050-acs-4 ERR swss#orchagent: :- validateAclRuleMatch: Match SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS in rule mux_acl_rule is not supported by table IngressTableDrop PR #2341 added support for different matching field in different stage (INGRESS/EGRESS). For example, SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS is only supported at INGRESS stage. However, PR #2341 only handled one path for creating ACL table, that is by CONFIG_DB entry. There is a case that addAclTable is directly called from other orch, such as MuxOrch. In that case, the stage dependent matcing field is not added. As a resule, we will see the above error logs. To address the issue, I moved the call of addStageMandatoryMatchFields from doAclTableTask to addAclTable to ensure addStageMandatoryMatchFields is always called. Please be noted that addMandatoryActions is called from both doAclTableTask and addAclTable to ensure the validation of ACL table is passing. Why I did it To fix ACL rule issue for mux. How I verified it Verified by running test_pfcwd Verified by checking syslog Signed-off-by: bingwang --- orchagent/aclorch.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index ddeca7adf4..331a11dbda 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -3694,7 +3694,14 @@ bool AclOrch::addAclTable(AclTable &newTable) return true; } } - + // Update matching field according to ACL stage + newTable.addStageMandatoryMatchFields(); + + // Add mandatory ACL action if not present + // We need to call addMandatoryActions here because addAclTable is directly called in other orchs. + // The action_list is already added if the ACL table creation is triggered by CONFIGDD, but calling addMandatoryActions + // twice will make no effect + newTable.addMandatoryActions(); if (createBindAclTable(newTable, table_oid)) { m_AclTables[table_oid] = newTable; @@ -4171,11 +4178,8 @@ void AclOrch::doAclTableTask(Consumer &consumer) } newTable.validateAddType(*tableType); - - newTable.addStageMandatoryMatchFields(); - + // Add mandatory ACL action if not present newTable.addMandatoryActions(); - // validate and create/update ACL Table if (bAllAttributesOk && newTable.validate()) { From bf91a49657d8afdb30039bfa2123170d5f1fc30f Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Fri, 8 Jul 2022 15:21:44 -0700 Subject: [PATCH 48/64] Add BGP profile to Vnet routes (#2337) *Add BGP profile support to Vnet routes --- orchagent/vnetorch.cpp | 58 ++++++++++++++++++++++++++++++++++++------ orchagent/vnetorch.h | 13 +++++++--- tests/test_vnet.py | 31 ++++++++++++++-------- 3 files changed, 80 insertions(+), 22 deletions(-) diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 4640d68853..a3acf10e0e 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -172,6 +172,28 @@ bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, NextHopGroupKey& nexthops) return true; } +void VNetVrfObject::addProfile(IpPrefix& ipPrefix, string& profile) +{ + profile_[ipPrefix] = profile; +} + +void VNetVrfObject::removeProfile(IpPrefix& ipPrefix) +{ + if (profile_.find(ipPrefix) != profile_.end()) + { + profile_.erase(ipPrefix); + } +} + +string VNetVrfObject::getProfile(IpPrefix& ipPrefix) +{ + if (profile_.find(ipPrefix) != profile_.end()) + { + return profile_[ipPrefix]; + } + return string(); +} + void VNetVrfObject::increaseNextHopRefCount(const nextHop& nh) { /* Return when there is no next hop (dropped) */ @@ -872,7 +894,7 @@ bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, - NextHopGroupKey& nexthops, string& op, + NextHopGroupKey& nexthops, string& op, string& profile, const map& monitors) { SWSS_LOG_ENTER(); @@ -1011,6 +1033,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); } vrf_obj->removeRoute(ipPrefix); + vrf_obj->removeProfile(ipPrefix); } syncd_nexthop_groups_[vnet][nexthops].tunnel_routes.insert(ipPrefix); @@ -1019,7 +1042,12 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP syncd_nexthop_groups_[vnet][nexthops].ref_count++; vrf_obj->addRoute(ipPrefix, nexthops); - postRouteState(vnet, ipPrefix, nexthops); + if (!profile.empty()) + { + vrf_obj->addProfile(ipPrefix, profile); + } + + postRouteState(vnet, ipPrefix, nexthops, profile); } else if (op == DEL_COMMAND) { @@ -1071,6 +1099,7 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP } vrf_obj->removeRoute(ipPrefix); + vrf_obj->removeProfile(ipPrefix); removeRouteState(vnet, ipPrefix); } @@ -1609,7 +1638,7 @@ void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& next } } -void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops) +void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); vector fvVector; @@ -1634,7 +1663,7 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH { if (route_state == "active") { - addRouteAdvertisement(ipPrefix); + addRouteAdvertisement(ipPrefix, profile); } else { @@ -1650,11 +1679,18 @@ void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) removeRouteAdvertisement(ipPrefix); } -void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix) +void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix, string& profile) { const string key = ipPrefix.to_string(); vector fvs; - fvs.push_back(FieldValueTuple("", "")); + if (profile.empty()) + { + fvs.push_back(FieldValueTuple("", "")); + } + else + { + fvs.push_back(FieldValueTuple("profile", profile)); + } state_vnet_rt_adv_table_->set(key, fvs); } @@ -1865,7 +1901,8 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) // Post configured in State DB for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) { - postRouteState(vnet, ip_pfx, nexthops); + string profile = vrf_obj->getProfile(ip_pfx); + postRouteState(vnet, ip_pfx, nexthops, profile); } } } @@ -1878,6 +1915,7 @@ bool VNetRouteOrch::handleTunnel(const Request& request) vector mac_list; vector vni_list; vector monitor_list; + string profile = ""; for (const auto& name: request.getAttrFieldNames()) { @@ -1899,6 +1937,10 @@ bool VNetRouteOrch::handleTunnel(const Request& request) { monitor_list = request.getAttrIPList(name); } + else if (name == "profile") + { + profile = request.getAttrString(name); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -1962,7 +2004,7 @@ bool VNetRouteOrch::handleTunnel(const Request& request) if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, nhg, op, monitors); + return doRouteTask(vnet_name, ip_pfx, nhg, op, profile, monitors); } return true; diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 26e0733337..4f63764a0e 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -139,6 +139,7 @@ struct nextHop typedef std::map TunnelRoutes; typedef std::map RouteMap; +typedef std::map ProfileMap; class VNetVrfObject : public VNetObject { @@ -181,6 +182,10 @@ class VNetVrfObject : public VNetObject bool addRoute(IpPrefix& ipPrefix, nextHop& nh); bool removeRoute(IpPrefix& ipPrefix); + void addProfile(IpPrefix& ipPrefix, string& profile); + void removeProfile(IpPrefix& ipPrefix); + string getProfile(IpPrefix& ipPrefix); + size_t getRouteCount() const; bool getRouteNextHop(IpPrefix& ipPrefix, nextHop& nh); bool hasRoute(IpPrefix& ipPrefix); @@ -201,6 +206,7 @@ class VNetVrfObject : public VNetObject TunnelRoutes tunnels_; RouteMap routes_; + ProfileMap profile_; }; typedef std::unique_ptr VNetObject_T; @@ -275,6 +281,7 @@ const request_description_t vnet_route_description = { { "vni", REQ_T_STRING }, { "mac_address", REQ_T_STRING }, { "endpoint_monitor", REQ_T_IP_LIST }, + { "profile", REQ_T_STRING }, }, { } }; @@ -356,16 +363,16 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops); void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); - void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops); + void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); - void addRouteAdvertisement(IpPrefix& ipPrefix); + void addRouteAdvertisement(IpPrefix& ipPrefix, string& profile); void removeRouteAdvertisement(IpPrefix& ipPrefix); void updateVnetTunnel(const BfdUpdate&); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); template - bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, + bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, const std::map& monitors=std::map()); template diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 60a2ed8c33..0dec1f7446 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -140,11 +140,11 @@ def delete_vnet_local_routes(dvs, prefix, vnet_name): time.sleep(2) -def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor=""): - set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor) +def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile=""): + set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile) -def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor=""): +def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) attrs = [ @@ -160,6 +160,9 @@ def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor= if ep_monitor: attrs.append(('endpoint_monitor', ep_monitor)) + if profile: + attrs.append(('profile', profile)) + tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") fvs = swsscommon.FieldValuePairs(attrs) tbl.set("%s|%s" % (vnet_name, prefix), fvs) @@ -490,13 +493,19 @@ def check_remove_state_db_routes(dvs, vnet, prefix): assert vnet + '|' + prefix not in keys -def check_routes_advertisement(dvs, prefix): +def check_routes_advertisement(dvs, prefix, profile=""): state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") keys = tbl.getKeys() assert prefix in keys + if profile: + status, fvs = tbl.get(prefix) + assert status, "Got an error when get a key" + fvs = dict(fvs) + assert fvs['profile'] == profile + def check_remove_routes_advertisement(dvs, prefix): state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) @@ -2011,7 +2020,7 @@ def test_vnet_orch_12(self, dvs, testlog): vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3') + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3', profile="test_profile") # default bfd status is down, route should not be programmed in this status vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) @@ -2025,14 +2034,14 @@ def test_vnet_orch_12(self, dvs, testlog): time.sleep(2) route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") # Remove endpoint from group if it goes down update_bfd_session_state(dvs, '12.1.0.2', 'Down') time.sleep(2) route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) @@ -2054,15 +2063,15 @@ def test_vnet_orch_12(self, dvs, testlog): route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") # Set the route1 to a new group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4') + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4', profile="test_profile2") update_bfd_session_state(dvs, '12.1.0.4', 'Up') time.sleep(2) route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -2073,7 +2082,7 @@ def test_vnet_orch_12(self, dvs, testlog): time.sleep(2) route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") # Set all endpoint to down state update_bfd_session_state(dvs, '12.1.0.1', 'Down') From 0c3c9c671f6e53d6c1940f907be47983d3c1b0f4 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Mon, 11 Jul 2022 09:59:24 +0800 Subject: [PATCH 49/64] Replace swsssdk with swsscommon (#2368) **What I did** Replace py-swsssdk with sonic-swss-common **Why I did it** To deprecate py-swsssdk, need replace it from code. **How I verified it** Pass all UT. **Details if related** --- fpmsyncd/bgp_eoiu_marker.py | 5 ++--- neighsyncd/restore_neighbors.py | 7 +++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/fpmsyncd/bgp_eoiu_marker.py b/fpmsyncd/bgp_eoiu_marker.py index 83051d7878..d7f144e4bf 100644 --- a/fpmsyncd/bgp_eoiu_marker.py +++ b/fpmsyncd/bgp_eoiu_marker.py @@ -17,7 +17,6 @@ """ import sys -import swsssdk import time import syslog import traceback @@ -80,7 +79,7 @@ def init_peers_eor_status(self): # Only two families: 'ipv4' and 'ipv6' # state is "unknown" / "reached" / "consumed" def set_bgp_eoiu_marker(self, family, state): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) key = "BGP_STATE_TABLE|%s|eoiu" % family db.set(db.STATE_DB, key, 'state', state) @@ -90,7 +89,7 @@ def set_bgp_eoiu_marker(self, family, state): return def clean_bgp_eoiu_marker(self): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) db.delete(db.STATE_DB, "BGP_STATE_TABLE|IPv4|eoiu") db.delete(db.STATE_DB, "BGP_STATE_TABLE|IPv6|eoiu") diff --git a/neighsyncd/restore_neighbors.py b/neighsyncd/restore_neighbors.py index fac7b1f2df..a02e5434fc 100755 --- a/neighsyncd/restore_neighbors.py +++ b/neighsyncd/restore_neighbors.py @@ -13,7 +13,6 @@ """ import sys -import swsssdk import netifaces import time from pyroute2 import IPRoute, NetlinkError @@ -117,7 +116,7 @@ def is_intf_up(intf, db): # 2, need check interface state twice due to the split map def read_neigh_table_to_maps(): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.APPL_DB, False) intf_neigh_map = {} @@ -207,7 +206,7 @@ def build_arp_ns_pkt(family, smac, src_ip, dst_ip): # Set the statedb "NEIGH_RESTORE_TABLE|Flags", so neighsyncd can start reconciliation def set_statedb_neigh_restore_done(): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) db.set(db.STATE_DB, 'NEIGH_RESTORE_TABLE|Flags', 'restored', 'true') db.close(db.STATE_DB) @@ -228,7 +227,7 @@ def restore_update_kernel_neighbors(intf_neigh_map, timeout=DEF_TIME_OUT): ipclass = IPRoute() start_time = time.monotonic() is_intf_up.counter = 0 - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) while (time.monotonic() - start_time) < timeout: for intf, family_neigh_map in list(intf_neigh_map.items()): From 47f616246fb57401107b8cbcaa94d4effe1b1354 Mon Sep 17 00:00:00 2001 From: mint570 <70396898+mint570@users.noreply.github.com> Date: Tue, 12 Jul 2022 11:37:23 -0700 Subject: [PATCH 50/64] Run individule pytests instead of running all pytests at once. (#2350) *Run pytests one test at a time. --- .../test-docker-sonic-vs-template.yml | 23 ++++++++++++++----- tests/gcov_support.sh | 6 ----- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 83fd36dc09..ae2df1528d 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -69,16 +69,27 @@ jobs: pushd tests if [ '${{ parameters.archive_gcov }}' == True ]; then - sudo py.test -v --force-flaky --junitxml=tr.xml --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) + all_tests=$(ls test_*.py) + all_tests="${all_tests} p4rt" + for test in ${all_tests}; do + test_name=$(echo "${test}" | cut -d "." -f 1) + sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) ${test} + container_count=$(docker ps -q -a | wc -l) + if [ ${container_count} -gt 0 ]; then + ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) + docker stop $(docker ps -q -a) + docker rm $(docker ps -q -a) + fi + done else - sudo py.test -v --force-flaky --junitxml=tr.xml --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) + sudo py.test -v --force-flaky --junitxml=tests_tr.xml --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) fi rm -rf $(Build.ArtifactStagingDirectory)/download displayName: "Run vs tests" - task: PublishTestResults@2 inputs: - testResultsFiles: '**/tr.xml' + testResultsFiles: '**/*_tr.xml' testRunTitle: vstest condition: always() @@ -87,9 +98,9 @@ jobs: if [ '${{ parameters.archive_gcov }}' == True ]; then sudo apt-get install -y lcov - ./tests/gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) - docker stop $(docker ps -q -a) - docker rm $(docker ps -q -a) + cd $(Build.ArtifactStagingDirectory)/gcov_tmp/ + tar -zcvf sonic-gcov.tar.gz sonic-gcov/ + rm -rf sonic-gcov fi displayName: "Collect logs" condition: always() diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index d96ee1c250..1395f09149 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -197,12 +197,6 @@ gcov_set_environment() echo "cat list" cat ${CONTAINER_LIST} - - cd ${build_dir}/gcov_tmp/ - tar -zcvf sonic-gcov.tar.gz sonic-gcov/ - rm -rf sonic-gcov - cd ../../ - rm ${CONTAINER_LIST} } gcov_merge_info() From 3fd812d29ad51b5754ac3aaa0df803b66ea57740 Mon Sep 17 00:00:00 2001 From: jaganbal-a <97986478+jaganbal-a@users.noreply.github.com> Date: Wed, 13 Jul 2022 16:42:40 -0400 Subject: [PATCH 51/64] Orchagent changes for synchronizing npu/phy device Tx in the data path before enabling transceiver Tx. (#2277) * Signed-off-by: Jaganathan Anbalagan orchagent changes for https://github.com/sonic-net/SONiC/pull/916 * Signed-off-by: Jaganathan Anbalagan Addressing PR comment * Signed-off-by: Jaganathan Anbalagan Addressing PR comments-cosmetic * Signed-off-by: Jaganathan Anbalagan fixed typo * Signed-off-by: Jaganathan Anbalagan VS test code and addressing PR comment * Signed-off-by: Jaganathan Anbalagan set host_tx_ready to false if gbsyncd SAI API fails. Co-authored-by: Jaganathan Anbalagan --- orchagent/portsorch.cpp | 65 ++++++++++++++++++++++++++++++++++---- orchagent/portsorch.h | 1 + tests/test_admin_status.py | 29 +++++++++++++++++ 3 files changed, 89 insertions(+), 6 deletions(-) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 687d1e915a..181e7d4e4b 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1032,6 +1032,38 @@ void PortsOrch::getCpuPort(Port &port) port = m_cpuPort; } +/* + * Create host_tx_ready field in PORT_TABLE of STATE-DB + * and set the field to false by default for the + * front port. + */ +void PortsOrch::initHostTxReadyState(Port &port) +{ + SWSS_LOG_ENTER(); + + vector tuples; + bool exist = m_portStateTable.get(port.m_alias, tuples); + string hostTxReady; + + if (exist) + { + for (auto i : tuples) + { + if (fvField(i) == "host_tx_ready") + { + hostTxReady = fvValue(i); + } + } + } + + if (hostTxReady.empty()) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + SWSS_LOG_INFO("initalize hostTxReady %s with status %s", + port.m_alias.c_str(), hostTxReady.c_str()); + } +} + bool PortsOrch::setPortAdminStatus(Port &port, bool state) { SWSS_LOG_ENTER(); @@ -1040,11 +1072,20 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) attr.id = SAI_PORT_ATTR_ADMIN_STATE; attr.value.booldata = state; + /* Update the host_tx_ready to false before setting admin_state, when admin state is false */ + if (!state) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + SWSS_LOG_INFO("Set admin status DOWN host_tx_ready to false to port pid:%" PRIx64, + port.m_port_id); + } + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set admin status %s to port pid:%" PRIx64, state ? "UP" : "DOWN", port.m_port_id); + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1052,10 +1093,19 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) } } - SWSS_LOG_INFO("Set admin status %s to port pid:%" PRIx64, - state ? "UP" : "DOWN", port.m_port_id); - - setGearboxPortsAttr(port, SAI_PORT_ATTR_ADMIN_STATE, &state); + bool gbstatus = setGearboxPortsAttr(port, SAI_PORT_ATTR_ADMIN_STATE, &state); + if (gbstatus != true) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + } + + /* Update the state table for host_tx_ready*/ + if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) ) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "true"); + SWSS_LOG_INFO("Set admin status UP host_tx_ready to true to port pid:%" PRIx64, + port.m_port_id); + } return true; } @@ -1940,7 +1990,7 @@ void PortsOrch::initPortSupportedSpeeds(const std::string& alias, sai_object_id_ */ bool PortsOrch::setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value) { - bool status; + bool status = false; status = setGearboxPortAttr(port, PHY_PORT_TYPE, id, value); @@ -3367,7 +3417,10 @@ void PortsOrch::doPortTask(Consumer &consumer) } } - + + /* create host_tx_ready field in state-db */ + initHostTxReadyState(p); + /* Last step set port admin status */ if (!admin_status.empty() && (p.m_admin_state_up != (admin_status == "up"))) { diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index c820d6969d..0fd3552e19 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -95,6 +95,7 @@ class PortsOrch : public Orch, public Subject bool getPortByBridgePortId(sai_object_id_t bridge_port_id, Port &port); void setPort(string alias, Port port); void getCpuPort(Port &port); + void initHostTxReadyState(Port &port); bool getInbandPort(Port &port); bool getVlanByVlanId(sai_vlan_id_t vlan_id, Port &vlan); diff --git a/tests/test_admin_status.py b/tests/test_admin_status.py index 15724a7c02..1b99bf37c7 100644 --- a/tests/test_admin_status.py +++ b/tests/test_admin_status.py @@ -9,6 +9,7 @@ def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) def set_admin_status(self, port, admin_status): assert admin_status == "up" or admin_status == "down" @@ -52,6 +53,16 @@ def check_admin_status(self, dvs, port, admin_status): if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": assert fv[1] == "true" if admin_status == "up" else "false" + def check_host_tx_ready_status(self, dvs, port, admin_status): + assert admin_status == "up" or admin_status == "down" + ptbl = swsscommon.Table(self.sdb, "PORT_TABLE") + (status, fvs) = ptbl.get(port) + assert status == True + assert "host_tx_ready" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "host_tx_ready": + assert fv[1] == "true" if admin_status == "up" else "false" + def test_PortChannelMemberAdminStatus(self, dvs, testlog): self.setup_db(dvs) @@ -79,6 +90,24 @@ def test_PortChannelMemberAdminStatus(self, dvs, testlog): # remove port channel self.remove_port_channel(dvs, "PortChannel6") + def test_PortHostTxReadiness(self, dvs, testlog): + self.setup_db(dvs) + + # configure admin status to interface + self.set_admin_status("Ethernet0", "up") + self.set_admin_status("Ethernet4", "down") + self.set_admin_status("Ethernet8", "up") + + # check ASIC port database + self.check_admin_status(dvs, "Ethernet0", "up") + self.check_admin_status(dvs, "Ethernet4", "down") + self.check_admin_status(dvs, "Ethernet8", "up") + + # check host readiness status in PORT TABLE of STATE-DB + self.check_host_tx_ready_status(dvs, "Ethernet0", "up") + self.check_host_tx_ready_status(dvs, "Ethernet4", "down") + self.check_host_tx_ready_status(dvs, "Ethernet8", "up") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying From 24a079742b8bdd75c30cf91e64dc8ed4f7536b80 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Thu, 14 Jul 2022 21:22:14 +0800 Subject: [PATCH 52/64] [muxorch] Always use direct link for SoC IPs (#2369) What I did For SoC IPs of ports in active-active cable type, if mux is toggled to standby, still use direct link instead of changing next-hop to the tunnel. Why I did it In an active-active dualtor setup, changing the nexthop of route to SoC IP to the tunnel will have the following problem: If lower ToR is already standby, and somehow the upper ToR decides to toggle itself to standby, the toggle will fail: linkmgrd decide to toggle to standby --> muxorch disables the SoC IP neighbor and change the route next-hop to the tunnel --> ycabled could not setup gRPC connection. How I verified it Add unittest and verify the change on active-active testbeds. --- orchagent/muxorch.cpp | 29 ++++++++++++++++++++++++++--- orchagent/muxorch.h | 5 ++++- tests/test_mux.py | 23 +++++++++++++++++------ 3 files changed, 47 insertions(+), 10 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index fb45e0132d..c9d4158040 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -351,8 +351,8 @@ static bool remove_nh_tunnel(sai_object_id_t nh_id, IpAddress& ipAddr) return true; } -MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip) - :mux_name_(name), srv_ip4_(srv_ip4), srv_ip6_(srv_ip6), peer_ip4_(peer_ip) +MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, std::set skip_neighbors) + :mux_name_(name), srv_ip4_(srv_ip4), srv_ip6_(srv_ip6), peer_ip4_(peer_ip), skip_neighbors_(skip_neighbors) { mux_orch_ = gDirectory.get(); mux_cb_orch_ = gDirectory.get(); @@ -534,6 +534,11 @@ bool MuxCable::nbrHandler(bool enable, bool update_rt) void MuxCable::updateNeighbor(NextHopKey nh, bool add) { sai_object_id_t tnh = mux_orch_->getNextHopTunnelId(MUX_TUNNEL, peer_ip4_); + if (add && skip_neighbors_.find(nh.ip_address) != skip_neighbors_.end()) + { + SWSS_LOG_INFO("Skip update neighbor %s on %s", nh.ip_address.to_string().c_str(), nh.alias.c_str()); + return; + } nbr_handler_->update(nh, tnh, add, state_); if (add) { @@ -1208,9 +1213,27 @@ bool MuxOrch::handleMuxCfg(const Request& request) auto srv_ip = request.getAttrIpPrefix("server_ipv4"); auto srv_ip6 = request.getAttrIpPrefix("server_ipv6"); + std::set skip_neighbors; + const auto& port_name = request.getKeyString(0); auto op = request.getOperation(); + for (const auto &name : request.getAttrFieldNames()) + { + if (name == "soc_ipv4") + { + auto soc_ip = request.getAttrIpPrefix("soc_ipv4"); + SWSS_LOG_NOTICE("%s: %s was added to ignored neighbor list", port_name.c_str(), soc_ip.getIp().to_string().c_str()); + skip_neighbors.insert(soc_ip.getIp()); + } + else if (name == "soc_ipv6") + { + auto soc_ip6 = request.getAttrIpPrefix("soc_ipv6"); + SWSS_LOG_NOTICE("%s: %s was added to ignored neighbor list", port_name.c_str(), soc_ip6.getIp().to_string().c_str()); + skip_neighbors.insert(soc_ip6.getIp()); + } + } + if (op == SET_COMMAND) { if(isMuxExists(port_name)) @@ -1226,7 +1249,7 @@ bool MuxOrch::handleMuxCfg(const Request& request) } mux_cable_tb_[port_name] = std::make_unique - (MuxCable(port_name, srv_ip, srv_ip6, mux_peer_switch_)); + (MuxCable(port_name, srv_ip, srv_ip6, mux_peer_switch_, skip_neighbors)); SWSS_LOG_NOTICE("Mux entry for port '%s' was added", port_name.c_str()); } diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index bf230a6d71..011d61b924 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -76,7 +76,7 @@ class MuxNbrHandler class MuxCable { public: - MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip); + MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, std::set skip_neighbors); bool isActive() const { @@ -115,6 +115,8 @@ class MuxCable IpPrefix srv_ip4_, srv_ip6_; IpAddress peer_ip4_; + std::set skip_neighbors_; + MuxOrch *mux_orch_; MuxCableOrch *mux_cb_orch_; MuxStateOrch *mux_state_orch_; @@ -132,6 +134,7 @@ const request_description_t mux_cfg_request_description = { { "server_ipv6", REQ_T_IP_PREFIX }, { "address_ipv4", REQ_T_IP }, { "soc_ipv4", REQ_T_IP_PREFIX }, + { "soc_ipv6", REQ_T_IP_PREFIX }, { "cable_type", REQ_T_STRING }, }, { } diff --git a/tests/test_mux.py b/tests/test_mux.py index 8ba4932ea9..3913f77d2b 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -27,6 +27,7 @@ class TestMuxTunnelBase(object): SERV1_IPV4 = "192.168.0.100" SERV1_IPV6 = "fc02:1000::100" + SERV1_SOC_IPV4 = "192.168.0.102" SERV2_IPV4 = "192.168.0.101" SERV2_IPV6 = "fc02:1000::101" IPV4_MASK = "/32" @@ -74,7 +75,12 @@ def create_vlan_interface(self, confdb, asicdb, dvs): def create_mux_cable(self, confdb): - fvs = { "server_ipv4":self.SERV1_IPV4+self.IPV4_MASK, "server_ipv6":self.SERV1_IPV6+self.IPV6_MASK } + fvs = { + "server_ipv4":self.SERV1_IPV4 + self.IPV4_MASK, + "server_ipv6":self.SERV1_IPV6 + self.IPV6_MASK, + "soc_ipv4": self.SERV1_SOC_IPV4 + self.IPV4_MASK, + "cable_type": "active-active" + } confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet0", fvs) fvs = { "server_ipv4":self.SERV2_IPV4+self.IPV4_MASK, "server_ipv6":self.SERV2_IPV6+self.IPV6_MASK } @@ -201,6 +207,9 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01", True) srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6, 3) + self.add_neighbor(dvs, self.SERV1_SOC_IPV4, "00:00:00:00:00:01") + self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4, 4) + existing_keys = asicdb.get_keys(self.ASIC_NEIGH_TABLE) self.add_neighbor(dvs, self.SERV2_IPV4, "00:00:00:00:00:02") @@ -212,7 +221,7 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): dvs_route.check_asicdb_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) # The first standby route also creates as tunnel Nexthop - self.check_tnl_nexthop_in_asic_db(asicdb, 3) + self.check_tnl_nexthop_in_asic_db(asicdb, 4) # Change state to Standby. This will delete Neigh and add Route self.set_mux_state(appdb, "Ethernet0", "standby") @@ -220,13 +229,15 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v4) asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v6) dvs_route.check_asicdb_route_entries([self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK]) + self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4, 2) + dvs_route.check_asicdb_deleted_route_entries([self.SERV1_SOC_IPV4+self.IPV4_MASK]) # Change state to Active. This will add Neigh and delete Route self.set_mux_state(appdb, "Ethernet4", "active") dvs_route.check_asicdb_deleted_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4, 3) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6, 3) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4, 4) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6, 4) def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): @@ -244,7 +255,7 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): self.add_neighbor(dvs, ip_2, "00:00:00:00:00:12", True) # ip_1 is on Active Mux, hence added to Host table - self.check_neigh_in_asic_db(asicdb, ip_1, 4) + self.check_neigh_in_asic_db(asicdb, ip_1, 5) # ip_2 is on Standby Mux, hence added to Route table dvs_route.check_asicdb_route_entries([ip_2+self.IPV6_MASK]) @@ -260,7 +271,7 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): # ip_2 moved to active Mux, hence remove from Route table dvs_route.check_asicdb_deleted_route_entries([ip_2+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, ip_2, 4) + self.check_neigh_in_asic_db(asicdb, ip_2, 5) # Simulate FDB aging out test case ip_3 = "192.168.0.200" From 9d3a5c50a387f5545899c5604c9964e6b177d76c Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Fri, 15 Jul 2022 13:31:35 -0700 Subject: [PATCH 53/64] [vxlan]Fixing L2MC vlan member caching issue (#2378) * [vxlan]Fixing L2MC vlan member caching issue --- orchagent/portsorch.cpp | 83 +++++++++++++++++++++++++++++----- tests/test_evpn_tunnel_p2mp.py | 11 +++++ 2 files changed, 82 insertions(+), 12 deletions(-) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 181e7d4e4b..03e103e694 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -4832,7 +4832,11 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) { SWSS_LOG_ERROR("Failed to set l2mc flood type combined " " to vlan %hu for unknown unicast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.uuc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED; } @@ -4847,7 +4851,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) { SWSS_LOG_ERROR("Failed to set l2mc flood type combined " " to vlan %hu for broadcast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.bc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED; } @@ -4858,7 +4867,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create l2mc flood group"); - return false; + task_process_status handle_status = handleSaiCreateStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } if (vlan.m_vlan_info.uuc_flood_type == SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED) @@ -4872,7 +4886,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) SWSS_LOG_ERROR("Failed to set l2mc group %" PRIx64 " to vlan %hu for unknown unicast flooding", l2mc_group_id, vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } } if (vlan.m_vlan_info.bc_flood_type == SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED) @@ -4886,7 +4905,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) SWSS_LOG_ERROR("Failed to set l2mc group %" PRIx64 " to vlan %hu for broadcast flooding", l2mc_group_id, vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } } vlan.m_vlan_info.l2mc_group_id = l2mc_group_id; @@ -4926,7 +4950,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) { SWSS_LOG_ERROR("Failed to create l2mc group member for adding tunnel %s to vlan %hu", end_point_ip.c_str(), vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiCreateStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.l2mc_members[end_point_ip] = l2mc_group_member; m_portList[vlan.m_alias] = vlan; @@ -4953,7 +4982,11 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip { SWSS_LOG_ERROR("Failed to remove end point ip %s from vlan %hu", end_point_ip.c_str(), vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } decreaseBridgePortRefCount(port); vlan.m_vlan_info.l2mc_members.erase(end_point_ip); @@ -4973,7 +5006,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set null l2mc group " " to vlan %hu for unknown unicast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } attr.id = SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE; attr.value.s32 = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; @@ -4983,7 +5021,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set flood control type all" " to vlan %hu for unknown unicast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.uuc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; } @@ -4998,7 +5041,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set null l2mc group " " to vlan %hu for broadcast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } attr.id = SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE; attr.value.s32 = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; @@ -5008,7 +5056,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set flood control type all" " to vlan %hu for broadcast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.bc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; } @@ -5016,10 +5069,16 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to remove l2mc group %" PRIx64, l2mc_group_id); - return false; + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.l2mc_group_id = SAI_NULL_OBJECT_ID; } + m_portList[vlan.m_alias] = vlan; return true; } diff --git a/tests/test_evpn_tunnel_p2mp.py b/tests/test_evpn_tunnel_p2mp.py index 1783980b73..f2b3e62cea 100644 --- a/tests/test_evpn_tunnel_p2mp.py +++ b/tests/test_evpn_tunnel_p2mp.py @@ -102,6 +102,17 @@ def test_vlan_extension(self, dvs, testlog): vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') + print("Testing remote endpoint again to 8.8.8.8") + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') + print("Testing remote endpoint creation to 8.8.8.8") + + print("Testing VLAN 100 extension to 8.8.8.8") + vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') + + print("Testing Last Vlan removal and remote endpoint delete for 8.8.8.8") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') + vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102') From 90459953c8206381d4b6ebf137e6bbd74eb212f8 Mon Sep 17 00:00:00 2001 From: andywongarista <78833093+andywongarista@users.noreply.github.com> Date: Sun, 17 Jul 2022 19:58:48 -0700 Subject: [PATCH 54/64] [orchagent]: Enhance initSaiPhyApi (#2367) * Add support for generic hwinfo string in gearbox_config.json The SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO formatting is vendor specific. * Remove the formating check that assumes its of the mdio sysfs format * Note the the count remains without including the NULL termintor, which is not compliant with the SAI header definintion that indicates a NULL terminated string. Signed-off-by: aaronp@arista.com * Add support to allow Firmware Major Version to return unsupported" Some external phys do not support Firmware upgrades and therefore do not have a firmware version. The SAI_SWITCH_ATTR_FIRMWARE_MAJOR_VERSION may return SAI_STATUS_ATTR_NOT_SUPPORTED which needs to be gracefully supported and allow the phy to be created. * Allow SAI_STATUS_NOT_SUPPORTED return value and set version to empty string. Signed-off-by: Aaron Payment * Address review comments * Address review comments, fix hwinfo Co-authored-by: Aaron Payment --- orchagent/saihelper.cpp | 46 ++++++++++++++++++----------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index 3b409f7217..ee6ce2b802 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -359,9 +359,6 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) sai_status_t status; char fwPath[PATH_MAX]; char hwinfo[HWINFO_MAX_SIZE + 1]; - char hwinfoIntf[IFNAMSIZ + 1]; - unsigned int hwinfoPhyid; - int ret; SWSS_LOG_ENTER(); @@ -377,22 +374,15 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) attr.value.u32 = 0; attrs.push_back(attr); - ret = sscanf(phy->hwinfo.c_str(), "%" STR(IFNAMSIZ) "[^/]/%u", hwinfoIntf, &hwinfoPhyid); - if (ret != 2) { - SWSS_LOG_ERROR("BOX: hardware info doesn't match the 'interface_name/phyid' " - "format"); - return SAI_STATUS_FAILURE; + if( phy->hwinfo.length() > HWINFO_MAX_SIZE ) { + SWSS_LOG_ERROR( "hwinfo string attribute is too long." ); + return SAI_STATUS_FAILURE; } - - if (hwinfoPhyid > std::numeric_limits::max()) { - SWSS_LOG_ERROR("BOX: phyid is bigger than maximum limit"); - return SAI_STATUS_FAILURE; - } - - strcpy(hwinfo, phy->hwinfo.c_str()); + memset(hwinfo, 0, HWINFO_MAX_SIZE + 1); + strncpy(hwinfo, phy->hwinfo.c_str(), phy->hwinfo.length()); attr.id = SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO; - attr.value.s8list.count = (uint32_t) phy->hwinfo.length(); + attr.value.s8list.count = (uint32_t) phy->hwinfo.length() + 1; attr.value.s8list.list = (int8_t *) hwinfo; attrs.push_back(attr); @@ -452,17 +442,21 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) phy->phy_oid = sai_serialize_object_id(phyOid); - attr.id = SAI_SWITCH_ATTR_FIRMWARE_MAJOR_VERSION; - status = sai_switch_api->get_switch_attribute(phyOid, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("BOX: Failed to get firmware major version:%d rtn:%d", phy->phy_id, status); - return status; - } - else + if (phy->firmware.length() != 0) { - phy->firmware_major_version = string(attr.value.chardata); + attr.id = SAI_SWITCH_ATTR_FIRMWARE_MAJOR_VERSION; + status = sai_switch_api->get_switch_attribute(phyOid, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get firmware major version for hwinfo:%s, phy:%d, rtn:%d", + phy->hwinfo.c_str(), phy->phy_id, status); + return status; + } + else + { + phy->firmware_major_version = string(attr.value.chardata); + } } - return status; } + From 419ab1baaa95c2dd4c637f17212649944c961b2c Mon Sep 17 00:00:00 2001 From: Ze Gan Date: Tue, 19 Jul 2022 15:42:34 +0800 Subject: [PATCH 55/64] [macsecmgr]: Fix cleanup macsec objs if container stop (#2376) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit What I did Introduce SIGTERM handlers in macsecmgrd. When the macsecmgrd exit with signal SIGTERM, all existing MACsec objs will clean up. Adjust the cleanup order to follow the wpa_supplicant did (Remove Ingress objs firstly and Egress objs then). Why I did it When “docker stop”, macsecmgrd need also to cleanup all exiting MACsec objs. How I verified it Try "sudo config feature state macsec disabled`, the MACsec objs were removed. Signed-off-by: Ze Gan --- cfgmgr/macsecmgrd.cpp | 26 +++++++++++++++++++++++++- orchagent/macsecorch.cpp | 12 ++++++------ 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/cfgmgr/macsecmgrd.cpp b/cfgmgr/macsecmgrd.cpp index 913c0ac4ee..ff7bda9087 100644 --- a/cfgmgr/macsecmgrd.cpp +++ b/cfgmgr/macsecmgrd.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -45,6 +46,20 @@ string gResponsePublisherRecordFile; /* Global database mutex */ mutex gDbMutex; +static bool received_sigterm = false; +static struct sigaction old_sigaction; + +static void sig_handler(int signo) +{ + SWSS_LOG_ENTER(); + + if (old_sigaction.sa_handler != SIG_IGN && old_sigaction.sa_handler != SIG_DFL) { + old_sigaction.sa_handler(signo); + } + + received_sigterm = true; + return; +} int main(int argc, char **argv) { @@ -54,6 +69,15 @@ int main(int argc, char **argv) Logger::linkToDbNative("macsecmgrd"); SWSS_LOG_NOTICE("--- Starting macsecmgrd ---"); + /* Register the signal handler for SIGTERM */ + struct sigaction sigact = {}; + sigact.sa_handler = sig_handler; + if (sigaction(SIGTERM, &sigact, &old_sigaction)) + { + SWSS_LOG_ERROR("failed to setup SIGTERM action handler"); + exit(EXIT_FAILURE); + } + swss::DBConnector cfgDb("CONFIG_DB", 0); swss::DBConnector stateDb("STATE_DB", 0); @@ -73,7 +97,7 @@ int main(int argc, char **argv) } SWSS_LOG_NOTICE("starting main loop"); - while (true) + while (!received_sigterm) { Selectable *sel; int ret; diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index dc2c9d7b43..67ba904043 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -1482,22 +1482,22 @@ bool MACsecOrch::deleteMACsecPort( bool result = true; - auto sc = macsec_port.m_egress_scs.begin(); - while (sc != macsec_port.m_egress_scs.end()) + auto sc = macsec_port.m_ingress_scs.begin(); + while (sc != macsec_port.m_ingress_scs.end()) { const std::string port_sci = swss::join(':', port_name, MACsecSCI(sc->first)); sc ++; - if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_EGRESS) != task_success) + if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_INGRESS) != task_success) { result &= false; } } - sc = macsec_port.m_ingress_scs.begin(); - while (sc != macsec_port.m_ingress_scs.end()) + sc = macsec_port.m_egress_scs.begin(); + while (sc != macsec_port.m_egress_scs.end()) { const std::string port_sci = swss::join(':', port_name, MACsecSCI(sc->first)); sc ++; - if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_INGRESS) != task_success) + if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_EGRESS) != task_success) { result &= false; } From 33c420d5e05ee83b52cc2543bba93e6ae949a686 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Thu, 21 Jul 2022 01:44:48 +0800 Subject: [PATCH 56/64] [Buffer Orch] Support removing buffer port profile list (#2371) Signed-off-by: Stephen Sun What I did Support removing an entry from BUFFER_PORT_INGRESS/EGRESS_PROFILE_LIST table. To remove it, set the list as empty. Add mock test to verify the change How I verified it Mock test and manual test --- orchagent/bufferorch.cpp | 94 ++++++++++++------- tests/mock_tests/bufferorch_ut.cpp | 145 ++++++++++++++++++++++++++++- 2 files changed, 205 insertions(+), 34 deletions(-) diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index bfb5978067..51b1ac5330 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -987,28 +987,43 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue vector port_names = tokenize(key, list_item_delimiter); vector profile_list; + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; - string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, - buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, - profile_list, profile_name_list); - if (ref_resolve_status::success != resolve_status) + if (op == SET_COMMAND) { - if(ref_resolve_status::not_resolved == resolve_status) + string profile_name_list; + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); + if (ref_resolve_status::success != resolve_status) { - SWSS_LOG_INFO("Missing or invalid ingress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_need_retry; + if(ref_resolve_status::not_resolved == resolve_status) + { + SWSS_LOG_INFO("Missing or invalid ingress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Failed resolving ingress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_failed; } - SWSS_LOG_ERROR("Failed resolving ingress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_failed; - } - setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + + attr.value.objlist.count = (uint32_t)profile_list.size(); + attr.value.objlist.list = profile_list.data(); + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_NOTICE("%s has been removed from BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE", key.c_str()); + removeObject(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key); + attr.value.objlist.count = 0; + attr.value.objlist.list = profile_list.data(); + } + else + { + SWSS_LOG_ERROR("Unknown command %s when handling BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE key %s", op.c_str(), key.c_str()); + } - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; - attr.value.objlist.count = (uint32_t)profile_list.size(); - attr.value.objlist.list = profile_list.data(); for (string port_name : port_names) { if (!gPortsOrch->getPort(port_name, port)) @@ -1043,28 +1058,43 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues SWSS_LOG_DEBUG("processing:%s", key.c_str()); vector port_names = tokenize(key, list_item_delimiter); vector profile_list; + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST; - string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, - buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, - profile_list, profile_name_list); - if (ref_resolve_status::success != resolve_status) + if (op == SET_COMMAND) { - if(ref_resolve_status::not_resolved == resolve_status) + string profile_name_list; + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); + if (ref_resolve_status::success != resolve_status) { - SWSS_LOG_INFO("Missing or invalid egress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_need_retry; + if(ref_resolve_status::not_resolved == resolve_status) + { + SWSS_LOG_INFO("Missing or invalid egress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Failed resolving egress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_failed; } - SWSS_LOG_ERROR("Failed resolving egress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_failed; - } - setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + + attr.value.objlist.count = (uint32_t)profile_list.size(); + attr.value.objlist.list = profile_list.data(); + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_NOTICE("%s has been removed from BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE", key.c_str()); + removeObject(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key); + attr.value.objlist.count = 0; + attr.value.objlist.list = profile_list.data(); + } + else + { + SWSS_LOG_ERROR("Unknown command %s when handling BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE key %s", op.c_str(), key.c_str()); + } - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST; - attr.value.objlist.count = (uint32_t)profile_list.size(); - attr.value.objlist.list = profile_list.data(); for (string port_name : port_names) { if (!gPortsOrch->getPort(port_name, port)) diff --git a/tests/mock_tests/bufferorch_ut.cpp b/tests/mock_tests/bufferorch_ut.cpp index 845fe77d68..86e3ef8aa7 100644 --- a/tests/mock_tests/bufferorch_ut.cpp +++ b/tests/mock_tests/bufferorch_ut.cpp @@ -15,11 +15,52 @@ namespace bufferorch_test { using namespace std; + sai_port_api_t ut_sai_port_api; + sai_port_api_t *pold_sai_port_api; + shared_ptr m_app_db; shared_ptr m_config_db; shared_ptr m_state_db; shared_ptr m_chassis_app_db; + uint32_t _ut_stub_expected_profile_count; + uint32_t _ut_stub_port_profile_list_add_count; + uint32_t _ut_stub_port_profile_list_del_count; + sai_port_attr_t _ut_stub_expected_profile_list_type; + sai_status_t _ut_stub_sai_set_port_attribute( + _In_ sai_object_id_t port_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == _ut_stub_expected_profile_list_type) + { + if (_ut_stub_expected_profile_count == attr[0].value.objlist.count) + { + if (_ut_stub_expected_profile_count != 0) + { + _ut_stub_port_profile_list_add_count++; + } + else + { + _ut_stub_port_profile_list_del_count++; + } + } + } + return pold_sai_port_api->set_port_attribute(port_id, attr); + } + + void _hook_sai_port_api() + { + ut_sai_port_api = *sai_port_api; + pold_sai_port_api = sai_port_api; + ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; + sai_port_api = &ut_sai_port_api; + } + + void _unhook_sai_port_api() + { + sai_port_api = pold_sai_port_api; + } + struct BufferOrchTest : public ::testing::Test { BufferOrchTest() @@ -211,13 +252,13 @@ namespace bufferorch_test { {"size", "1024000"}, {"mode", "dynamic"}, - {"type", "egress"} + {"type", "ingress"} }); bufferPoolTable.set("ingress_lossy_pool", { {"size", "1024000"}, {"mode", "dynamic"}, - {"type", "egress"} + {"type", "ingress"} }); bufferProfileTable.set("ingress_lossless_profile", { @@ -346,6 +387,7 @@ namespace bufferorch_test TEST_F(BufferOrchTest, BufferOrchTestReferencingObjRemoveThenAdd) { + _hook_sai_port_api(); vector ts; std::deque entries; Table bufferProfileListTable = Table(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); @@ -354,7 +396,11 @@ namespace bufferorch_test {"profile_list", "ingress_lossy_profile,ingress_lossless_profile"} }); gBufferOrch->addExistingData(&bufferProfileListTable); + auto sai_port_profile_list_create_count = _ut_stub_port_profile_list_add_count; + _ut_stub_expected_profile_count = 2; + _ut_stub_expected_profile_list_type = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); CheckDependency(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile,ingress_lossless_profile"); @@ -405,5 +451,100 @@ namespace bufferorch_test ASSERT_EQ(ts[2], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|DEL"); ASSERT_EQ(ts[3], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|SET|pool:ingress_lossy_pool|size:0|dynamic_th:0"); ts.clear(); + + // Remove ingress port profile list + entries.push_back({"Ethernet0", "DEL", {}}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE table + _ut_stub_expected_profile_count = 0; + auto sai_port_profile_list_remove_count = _ut_stub_port_profile_list_del_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_remove_count, _ut_stub_port_profile_list_del_count); + // Drain BUFFER_PROFILE_TABLE del operation + static_cast(gBufferOrch)->doTask(); + // Drain BUFFER_POOL_TABLE del operation + static_cast(gBufferOrch)->doTask(); + // Drain the rest create operations + static_cast(gBufferOrch)->doTask(); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + // As an side-effect, all pending notifications should be drained + ASSERT_TRUE(ts.empty()); + + // To satisfy the coverage requirement + bufferProfileListTable.set("Ethernet0", + { + {"profile_list", "ingress_no_exist_profile"} + }); + gBufferOrch->addExistingData(&bufferProfileListTable); + static_cast(gBufferOrch)->doTask(); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE:Ethernet0|SET|profile_list:ingress_no_exist_profile"); + ts.clear(); + + _unhook_sai_port_api(); + } + + TEST_F(BufferOrchTest, BufferOrchTestCreateAndRemoveEgressProfileList) + { + _hook_sai_port_api(); + vector ts; + std::deque entries; + Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table bufferProfileListTable = Table(m_app_db.get(), APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + + // To satisfy the coverage requirement + bufferProfileListTable.set("Ethernet0", + { + {"profile_list", "egress_lossless_profile"} + }); + + gBufferOrch->addExistingData(&bufferProfileListTable); + static_cast(gBufferOrch)->doTask(); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE:Ethernet0|SET|profile_list:egress_lossless_profile"); + ts.clear(); + + bufferPoolTable.set("egress_lossless_pool", + { + {"size", "1024000"}, + {"mode", "dynamic"}, + {"type", "egress"} + }); + bufferProfileTable.set("egress_lossless_profile", + { + {"pool", "egress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }); + + gBufferOrch->addExistingData(&bufferPoolTable); + gBufferOrch->addExistingData(&bufferProfileTable); + + auto sai_port_profile_list_create_count = _ut_stub_port_profile_list_add_count; + _ut_stub_expected_profile_count = 1; + _ut_stub_expected_profile_list_type = SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + CheckDependency(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", + APP_BUFFER_PROFILE_TABLE_NAME, "egress_lossless_profile"); + + // Remove egress port profile list + entries.push_back({"Ethernet0", "DEL", {}}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE table + _ut_stub_expected_profile_count = 0; + auto sai_port_profile_list_remove_count = _ut_stub_port_profile_list_del_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_remove_count, _ut_stub_port_profile_list_del_count); + + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); } } From e9984d8aa70318ab5b259cfbff214469a9aa84a7 Mon Sep 17 00:00:00 2001 From: mint570 <70396898+mint570@users.noreply.github.com> Date: Thu, 21 Jul 2022 18:29:13 -0700 Subject: [PATCH 57/64] Enable p4orch unit test for test workflow. (#2375) * Enable p4orch unit test for test workflow. * Add unit test coverage into coverage report. * Remove unit test files from coverage report. --- .azure-pipelines/gcov.yml | 2 +- orchagent/Makefile.am | 6 +- .../flex_counter/flex_counter_manager.cpp | 5 +- orchagent/flexcounterorch.h | 2 +- orchagent/p4orch/tests/Makefile.am | 9 +- .../p4orch/tests/fake_flexcounterorch.cpp | 30 +++ .../tests/fake_flowcounterrouteorch.cpp | 178 ++++++++++++++++++ orchagent/p4orch/tests/fake_portorch.cpp | 7 +- orchagent/p4orch/tests/test_main.cpp | 16 ++ tests/gcov_support.sh | 73 +++---- 10 files changed, 277 insertions(+), 51 deletions(-) create mode 100644 orchagent/p4orch/tests/fake_flexcounterorch.cpp create mode 100644 orchagent/p4orch/tests/fake_flowcounterrouteorch.cpp diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index 1bee32a03e..0bd769222d 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -14,7 +14,7 @@ parameters: - name: timeout type: number - default: 180 + default: 240 - name: sonic_slave type: string diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index 9524a61a19..5755b1446e 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -6,6 +6,10 @@ INCLUDES = -I $(top_srcdir)/lib \ -I pbh \ -I nhg +if GCOV_ENABLED +SUBDIRS = p4orch/tests +endif + CFLAGS_SAI = -I /usr/include/sai swssdir = $(datadir)/swss @@ -18,7 +22,7 @@ dist_swss_DATA = \ pfc_detect_barefoot.lua \ pfc_detect_nephos.lua \ pfc_detect_cisco-8000.lua \ - pfc_detect_vs.lua \ + pfc_detect_vs.lua \ pfc_restore.lua \ pfc_restore_cisco-8000.lua \ port_rates.lua \ diff --git a/orchagent/flex_counter/flex_counter_manager.cpp b/orchagent/flex_counter/flex_counter_manager.cpp index ecccf415b2..95fb28171d 100644 --- a/orchagent/flex_counter/flex_counter_manager.cpp +++ b/orchagent/flex_counter/flex_counter_manager.cpp @@ -128,7 +128,10 @@ FlexCounterManager::~FlexCounterManager() flex_counter_table->del(getFlexCounterTableKey(group_name, counter)); } - flex_counter_group_table->del(group_name); + if (flex_counter_group_table != nullptr) + { + flex_counter_group_table->del(group_name); + } SWSS_LOG_DEBUG("Deleted flex counter group '%s'.", group_name.c_str()); } diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index c00a435b68..132bfa3b5e 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -26,7 +26,7 @@ class FlexCounterOrch: public Orch std::shared_ptr m_flexCounterDb = nullptr; std::shared_ptr m_flexCounterGroupTable = nullptr; std::shared_ptr m_gbflexCounterDb = nullptr; - shared_ptr m_gbflexCounterGroupTable = nullptr; + std::shared_ptr m_gbflexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; bool m_hostif_trap_counter_enabled = false; diff --git a/orchagent/p4orch/tests/Makefile.am b/orchagent/p4orch/tests/Makefile.am index 489acd8f99..daeb484136 100644 --- a/orchagent/p4orch/tests/Makefile.am +++ b/orchagent/p4orch/tests/Makefile.am @@ -4,7 +4,9 @@ INCLUDES = -I $(top_srcdir) -I $(ORCHAGENT_DIR) -I $(P4ORCH_DIR) -I $(top_srcdir CFLAGS_SAI = -I /usr/include/sai -bin_PROGRAMS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan +TESTS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan + +noinst_PROGRAMS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan if DEBUG DBGFLAGS = -ggdb -DDEBUG @@ -27,6 +29,7 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(ORCHAGENT_DIR)/switchorch.cpp \ $(ORCHAGENT_DIR)/request_parser.cpp \ $(ORCHAGENT_DIR)/flex_counter/flex_counter_manager.cpp \ + $(ORCHAGENT_DIR)/flex_counter/flow_counter_handler.cpp \ $(P4ORCH_DIR)/p4oidmapper.cpp \ $(P4ORCH_DIR)/p4orch.cpp \ $(P4ORCH_DIR)/p4orch_util.cpp \ @@ -39,9 +42,11 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(P4ORCH_DIR)/acl_rule_manager.cpp \ $(P4ORCH_DIR)/wcmp_manager.cpp \ $(P4ORCH_DIR)/mirror_session_manager.cpp \ - $(top_srcdir)/tests/mock_tests/fake_response_publisher.cpp \ + $(top_srcdir)/tests/mock_tests/fake_response_publisher.cpp \ fake_portorch.cpp \ fake_crmorch.cpp \ + fake_flexcounterorch.cpp \ + fake_flowcounterrouteorch.cpp \ fake_dbconnector.cpp \ fake_producertable.cpp \ fake_consumerstatetable.cpp \ diff --git a/orchagent/p4orch/tests/fake_flexcounterorch.cpp b/orchagent/p4orch/tests/fake_flexcounterorch.cpp new file mode 100644 index 0000000000..a98795bd77 --- /dev/null +++ b/orchagent/p4orch/tests/fake_flexcounterorch.cpp @@ -0,0 +1,30 @@ +#include "copporch.h" +#include "flexcounterorch.h" + +FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) + : Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME) +{ +} + +FlexCounterOrch::~FlexCounterOrch(void) +{ +} + +void FlexCounterOrch::doTask(Consumer &consumer) +{ +} + +bool FlexCounterOrch::getPortCountersState() const +{ + return true; +} + +bool FlexCounterOrch::getPortBufferDropCountersState() const +{ + return true; +} + +bool FlexCounterOrch::bake() +{ + return true; +} \ No newline at end of file diff --git a/orchagent/p4orch/tests/fake_flowcounterrouteorch.cpp b/orchagent/p4orch/tests/fake_flowcounterrouteorch.cpp new file mode 100644 index 0000000000..08caf52fe6 --- /dev/null +++ b/orchagent/p4orch/tests/fake_flowcounterrouteorch.cpp @@ -0,0 +1,178 @@ +#include "copporch.h" +#include "flowcounterrouteorch.h" + +extern size_t gMaxBulkSize; +extern sai_route_api_t *sai_route_api; + +#define ROUTE_FLOW_COUNTER_POLLING_INTERVAL_MS 10000 + +FlowCounterRouteOrch::FlowCounterRouteOrch(swss::DBConnector *db, const std::vector &tableNames) + : Orch(db, tableNames), mRouteFlowCounterMgr(ROUTE_FLOW_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, + ROUTE_FLOW_COUNTER_POLLING_INTERVAL_MS, false), + gRouteBulker(sai_route_api, gMaxBulkSize) +{ +} + +FlowCounterRouteOrch::~FlowCounterRouteOrch(void) +{ +} + +void FlowCounterRouteOrch::generateRouteFlowStats() +{ +} + +void FlowCounterRouteOrch::clearRouteFlowStats() +{ +} + +void FlowCounterRouteOrch::addRoutePattern(const std::string &pattern, size_t) +{ +} + +void FlowCounterRouteOrch::removeRoutePattern(const std::string &pattern) +{ +} + +void FlowCounterRouteOrch::onAddMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, bool add_to_cache) +{ +} + +void FlowCounterRouteOrch::onAddMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t &ip_pfx, bool add_to_cache) +{ +} + +void FlowCounterRouteOrch::onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, + bool remove_from_cache) +{ +} + +void FlowCounterRouteOrch::onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t &ip_pfx, + bool remove_from_cache) +{ +} + +void FlowCounterRouteOrch::onAddVR(sai_object_id_t vrf_id) +{ +} + +void FlowCounterRouteOrch::onRemoveVR(sai_object_id_t vrf_id) +{ +} + +void FlowCounterRouteOrch::handleRouteAdd(sai_object_id_t vrf_id, const IpPrefix &ip_prefix) +{ +} + +void FlowCounterRouteOrch::handleRouteRemove(sai_object_id_t vrf_id, const IpPrefix &ip_prefix) +{ +} + +void FlowCounterRouteOrch::processRouteFlowCounterBinding() +{ +} + +void FlowCounterRouteOrch::doTask(Consumer &consumer) +{ +} + +void FlowCounterRouteOrch::doTask(SelectableTimer &timer) +{ +} + +void FlowCounterRouteOrch::initRouteFlowCounterCapability() +{ +} + +void FlowCounterRouteOrch::removeRoutePattern(const RoutePattern &route_pattern) +{ +} + +void FlowCounterRouteOrch::removeRouteFlowCounterFromDB(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, + sai_object_id_t counter_oid) +{ +} + +bool FlowCounterRouteOrch::bindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, + const IpPrefix &ip_prefix) +{ + return true; +} + +void FlowCounterRouteOrch::unbindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, + const IpPrefix &ip_prefix, sai_object_id_t counter_oid) +{ +} + +void FlowCounterRouteOrch::pendingUpdateFlexDb(const RoutePattern &route_pattern, const IpPrefix &ip_prefix, + sai_object_id_t counter_oid) +{ +} + +void FlowCounterRouteOrch::updateRouterFlowCounterCache(const RoutePattern &route_pattern, const IpPrefix &ip_prefix, + sai_object_id_t counter_oid, RouterFlowCounterCache &cache) +{ +} + +bool FlowCounterRouteOrch::validateRoutePattern(const RoutePattern &route_pattern) const +{ + return true; +} + +void FlowCounterRouteOrch::onRoutePatternMaxMatchCountChange(RoutePattern &route_pattern, size_t new_max_match_count) +{ +} + +bool FlowCounterRouteOrch::isRouteAlreadyBound(const RoutePattern &route_pattern, const IpPrefix &ip_prefix) const +{ + return true; +} + +void FlowCounterRouteOrch::createRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t currentBoundCount) +{ +} + +bool FlowCounterRouteOrch::removeRouteFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, + const IpPrefix &ip_prefix) +{ + return true; +} + +void FlowCounterRouteOrch::createRouteFlowCounterFromVnetRoutes(const RoutePattern &route_pattern, + size_t ¤t_bound_count) +{ +} + +void FlowCounterRouteOrch::reapRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t currentBoundCount) +{ +} + +bool FlowCounterRouteOrch::isRouteFlowCounterEnabled() const +{ + return true; +} + +void FlowCounterRouteOrch::getRouteFlowCounterNameMapKey(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, + std::string &key) +{ +} + +size_t FlowCounterRouteOrch::getRouteFlowCounterSizeByPattern(const RoutePattern &route_pattern) const +{ + return 0; +} + +bool FlowCounterRouteOrch::parseRouteKeyForRoutePattern(const std::string &key, char sep, sai_object_id_t &vrf_id, + IpPrefix &ip_prefix, std::string &vrf_name) +{ + return true; +} + +bool FlowCounterRouteOrch::getVrfIdByVnetName(const std::string &vnet_name, sai_object_id_t &vrf_id) +{ + return true; +} + +bool FlowCounterRouteOrch::getVnetNameByVrfId(sai_object_id_t vrf_id, std::string &vnet_name) +{ + return true; +} diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index aaf766e1aa..51ff450312 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -206,7 +206,8 @@ bool PortsOrch::removeAclTableGroup(const Port &p) return true; } -bool PortsOrch::addSubPort(Port &port, const string &alias, const bool &adminUp, const uint32_t &mtu) +bool PortsOrch::addSubPort(Port &port, const string &alias, const string &vlan, const bool &adminUp, + const uint32_t &mtu) { return true; } @@ -400,7 +401,7 @@ void PortsOrch::initializePriorityGroups(Port &port) { } -void PortsOrch::initializePortMaximumHeadroom(Port &port) +void PortsOrch::initializePortBufferMaximumParameters(Port &port) { } @@ -685,7 +686,7 @@ void PortsOrch::voqSyncDelLagMember(Port &lag, Port &port) { } -std::unordered_set PortsOrch::generateCounterStats(const string &type) +std::unordered_set PortsOrch::generateCounterStats(const string &type, bool gearbox) { return {}; } \ No newline at end of file diff --git a/orchagent/p4orch/tests/test_main.cpp b/orchagent/p4orch/tests/test_main.cpp index 23cf37d8e1..203344e434 100644 --- a/orchagent/p4orch/tests/test_main.cpp +++ b/orchagent/p4orch/tests/test_main.cpp @@ -11,6 +11,7 @@ extern "C" #include "crmorch.h" #include "dbconnector.h" #include "directory.h" +#include "flowcounterrouteorch.h" #include "mock_sai_virtual_router.h" #include "p4orch.h" #include "portsorch.h" @@ -37,6 +38,8 @@ sai_object_id_t gUnderlayIfId; #define DEFAULT_BATCH_SIZE 128 int gBatchSize = DEFAULT_BATCH_SIZE; +#define DEFAULT_MAX_BULK_SIZE 1000 +size_t gMaxBulkSize = DEFAULT_MAX_BULK_SIZE; bool gSairedisRecord = true; bool gSwssRecord = true; bool gLogRotate = false; @@ -50,6 +53,7 @@ PortsOrch *gPortsOrch; CrmOrch *gCrmOrch; P4Orch *gP4Orch; VRFOrch *gVrfOrch; +FlowCounterRouteOrch *gFlowCounterRouteOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; ofstream gRecordOfs; @@ -73,6 +77,8 @@ sai_switch_api_t *sai_switch_api; sai_mirror_api_t *sai_mirror_api; sai_udf_api_t *sai_udf_api; sai_tunnel_api_t *sai_tunnel_api; +sai_my_mac_api_t *sai_my_mac_api; +sai_counter_api_t *sai_counter_api; namespace { @@ -162,6 +168,9 @@ int main(int argc, char *argv[]) sai_switch_api_t switch_api; sai_mirror_api_t mirror_api; sai_udf_api_t udf_api; + sai_my_mac_api_t my_mac_api; + sai_tunnel_api_t tunnel_api; + sai_counter_api_t counter_api; sai_router_intfs_api = &router_intfs_api; sai_neighbor_api = &neighbor_api; sai_next_hop_api = &next_hop_api; @@ -174,6 +183,9 @@ int main(int argc, char *argv[]) sai_switch_api = &switch_api; sai_mirror_api = &mirror_api; sai_udf_api = &udf_api; + sai_my_mac_api = &my_mac_api; + sai_tunnel_api = &tunnel_api; + sai_counter_api = &counter_api; swss::DBConnector appl_db("APPL_DB", 0); swss::DBConnector state_db("STATE_DB", 0); @@ -193,6 +205,10 @@ int main(int argc, char *argv[]) gVrfOrch = &vrf_orch; gDirectory.set(static_cast(&vrf_orch)); + FlowCounterRouteOrch flow_counter_route_orch(gConfigDb, std::vector{}); + gFlowCounterRouteOrch = &flow_counter_route_orch; + gDirectory.set(static_cast(&flow_counter_route_orch)); + // Setup ports for all tests. SetupPorts(); AddVrf(); diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index 1395f09149..b9d334bd16 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -65,16 +65,6 @@ list_lcov_path() echo "Start searching .gcda files..." exec 4>$TMP_FILE find_gcda_file=`find ${gcda_dir} -name *.gcda` - echo "Start rm unused gcno files for speed up" - find_gcno_file=`find ${gcda_dir} -name *.gcno` - for line in ${find_gcno_file} - do - temp_gcda=${line/.gcno/$gcdastr} - if [ ! -f ${temp_gcda} ]; then - rm ${line} - fi - done - echo ${find_gcda_file} RESULT=${find_gcda_file} echo "$RESULT" >&4 @@ -93,8 +83,7 @@ lcov_genhtml_report() do local fullpath=$line local infoname=${INFO_FILE_PREFIX}${fullpath##*/}.info - htmldirname=${HTML_FILE_PREFIX}${fullpath##*/} - + echo ${fullpath} pushd ${fullpath} @@ -102,7 +91,7 @@ lcov_genhtml_report() echo "gcda count: $GCDA_COUNT" if [ $GCDA_COUNT -ge 1 ]; then echo "Executing lcov -c -d . -o ${infoname}" - lcov -c -d . -o ${infoname} + lcov -c -d . -o ${infoname} &>/dev/null if [ "$?" != "0" ]; then echo "lcov fail!" rm ${infoname} @@ -112,12 +101,6 @@ lcov_genhtml_report() done < ${gcda_file_range}/gcda_dir_list.txt } -rm_unused_gcno() -{ - cur_dir = $1/ - -} - # generate html reports for all eligible submodules lcov_genhtml_all() { @@ -143,6 +126,11 @@ lcov_merge_all() done < infolist lcov --extract total.info '*sonic-gcov/*' -o total.info + + # Remove unit test files. + lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/orchagent/p4orch/tests/*" + lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/tests/*" + cp $1/lcov_cobertura.py $1/common_work/gcov/ python $1/common_work/gcov/lcov_cobertura.py total.info -o coverage.xml @@ -214,7 +202,8 @@ gcov_support_generate_report() mkdir -p gcov_output/info #for same code path - mkdir -p common_work + mkdir -p common_work/gcov + tar -zxvf swss.tar.gz -C common_work/gcov cat container_dir_list while read line @@ -223,7 +212,6 @@ gcov_support_generate_report() echo ${container_id} cp -rf ${container_id}/* common_work - tar -zxvf swss.tar.gz -C common_work/gcov cd common_work/gcov/ find -name gcda*.tar.gz > tmp_gcda.txt while read LINE ; do @@ -233,13 +221,16 @@ gcov_support_generate_report() done < tmp_gcda.txt rm tmp_gcda.txt - find -name gcno*.tar.gz > tmp_gcno.txt - while read LINE ; do - echo ${LINE} - echo ${LINE%%.*} - tar -zxvf ${LINE} - done < tmp_gcno.txt - rm tmp_gcno.txt + gcno_count=`find -name "*.gcno" | wc -l` + if [ ${gcno_count} -lt 1 ]; then + find -name gcno*.tar.gz > tmp_gcno.txt + while read LINE ; do + echo ${LINE} + echo ${LINE%%.*} + tar -zxvf ${LINE} + done < tmp_gcno.txt + rm tmp_gcno.txt + fi cd - ls -lh common_work/* @@ -248,22 +239,20 @@ gcov_support_generate_report() echo "###lcov operation fail.." return 0 fi - cd common_work - find . -name "*.gcda" -o -name "*.gcno" -o -name "*.gz" -o -name "*.cpp" -o -name "*.h"| xargs rm -rf - cd ../ - cp -rf common_work/* ${container_id}/* - cd ${container_id} - find . -name "*.gcda" -o -name "*.gcno" -o -name "*.gz" -o -name "*.cpp" -o -name "*.h"| xargs rm -rf - cd ../ - - rm -rf common_work/* - - cp -rf ${container_id} gcov_output/ + mkdir -p gcov_output/${container_id} + cp -rf common_work/* gcov_output/${container_id}/* + pushd gcov_output/${container_id} + find . -name "*.gcda" -o -name "*.gcno" -o -name "*.gz" -o -name "*.cpp" -o -name "*.h" | xargs rm -rf + popd + pushd common_work + find . -name "*.gcda" -o -name "*.gz" -o -name "*.info" | xargs rm -rf + popd done < container_dir_list # generate report with code - mkdir -p common_work/gcov - tar -zxvf swss.tar.gz -C common_work/gcov + pushd common_work/gcov + find . -name "*.gcno" | xargs rm -rf + popd echo "### Make info generating completed !!" } @@ -339,7 +328,7 @@ gcov_support_collect_gcno() echo " === Start collecting .gcno files... === " submodule_name=$1 exec 3>$GCNO_LIST_FILE - find_command=`find -name *.gcno` + find_command=`find -name "*.gcno" -o -name "*.gcda"` echo "${find_command}" if [ -z "${find_command}" ]; then echo "### Error! no gcno files found!" From 4a6f940d976b693c379ce845f8bc35ccf978d388 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Fri, 22 Jul 2022 17:35:29 -0700 Subject: [PATCH 58/64] [EVPN]Fix missing Vlan member update notification in P2MP scenario (#2388) *Fixed the missing vlan member update notification when the Vlan member add is from remote endpoint in P2MP scenario. --- orchagent/portsorch.cpp | 3 +++ tests/test_evpn_fdb_p2mp.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 03e103e694..e9a3afdc1e 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -4960,6 +4960,9 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) vlan.m_vlan_info.l2mc_members[end_point_ip] = l2mc_group_member; m_portList[vlan.m_alias] = vlan; increaseBridgePortRefCount(port); + + VlanMemberUpdate update = { vlan, port, true }; + notify(SUBJECT_TYPE_VLAN_MEMBER_CHANGE, static_cast(&update)); return true; } diff --git a/tests/test_evpn_fdb_p2mp.py b/tests/test_evpn_fdb_p2mp.py index 8c1cfbf1d6..5aa407966c 100644 --- a/tests/test_evpn_fdb_p2mp.py +++ b/tests/test_evpn_fdb_p2mp.py @@ -371,6 +371,40 @@ def test_evpnFdbP2MP(dvs, testlog): assert mac1_found, str(extra) print("FDB Vlan3:52-54-00-25-06-E9:Ethernet0 is created in STATE-DB") + #UT-10 Evpn Mac add from remote when tunnels are not created + mac = "52:54:00:25:06:E1" + remote_ip_9 = "9.9.9.9" + print("Creating Evpn FDB Vlan3:"+mac.lower()+":9.9.9.9 in APP-DB") + helper.create_entry_pst( + dvs.pdb, + "VXLAN_FDB_TABLE", "Vlan3:"+mac.lower(), + [ + ("remote_vtep", remote_ip_9), + ("type", "dynamic"), + ("vni", "3") + ] + ) + time.sleep(1) + + #Adding remote VNI later + vxlan_obj.create_evpn_remote_vni(dvs, "Vlan3", remote_ip_9, "3") + time.sleep(1) + tnl_bp_oid_9 = get_vxlan_p2mp_tunnel_bp(dvs.adb, source_tnl_ip) + + # check that the FDB entry is inserted into ASIC DB + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", mac), ("bvid", vlan_oid_3)], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC"), + ("SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE", "true"), + ("SAI_FDB_ENTRY_ATTR_ENDPOINT_IP", remote_ip_9), + ("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", str(tnl_bp_oid_9)), + ] + ) + assert ok == True, str(extra) + print("EVPN FDB Vlan3:"+mac.lower()+":"+remote_ip_9+" is created in ASIC-DB") + + time.sleep(1) + dvs.remove_vlan_member("3", "Ethernet0") dvs.remove_vlan("3") From dc8bc1c40f647cc2acf2c21431c4f58c6ccd8ff3 Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Sun, 24 Jul 2022 15:20:05 +0800 Subject: [PATCH 59/64] [portsorch] Expose supported FEC modes to STABE_DB and check whether FEC mode is supported before setting it (#2333) - What I did Expose supported FEC modes to STATE_DB.PORT_TABLE|.supported_fecs. The orchagent calls get_port_attribute to get attribute SAI_PORT_ATTR_SUPPORTED_FEC_MODE during initialization and then records it into internal data. 1. By default, the supported FEC modes will be returned by SAI and exposed to STATE_DB. Eg. rs,none means only rs and none is supported on the port. The orchagent will check whether the FEC mode is supported on the port before calling the SAI API to set it. The CLI will check whether the FEC mode is in supported_fecs before setting FEC mode on a port to the CONFIG_DB 2. In case the SAI API does not support any FEC mode on the port, N/A will be exposed to STATE_DB The orchagent will deny any FEC setting and prints log. The CLI will deny FEC setting. 3. In case the SAI API get_port_attribute returns Not implemented No check will be performed before setting a FEC mode to SAI on the port. The CLI will check whether the FEC mode is defined before setting it to CONFIG_DB. - Why I did it It is not supported to set FEC mode on some platforms. To avoid error, we need to expose the supported FEC list. - How I verified it Manually test and mock test. --- orchagent/portsorch.cpp | 119 ++++++++++++++-- orchagent/portsorch.h | 8 +- tests/mock_tests/portsorch_ut.cpp | 227 ++++++++++++++++++++++++++++++ 3 files changed, 342 insertions(+), 12 deletions(-) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index e9a3afdc1e..fd96d6a3c2 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -77,6 +77,13 @@ static map fec_mode_map = { "fc", SAI_PORT_FEC_MODE_FC } }; +static map fec_mode_reverse_map = +{ + { SAI_PORT_FEC_MODE_NONE, "none" }, + { SAI_PORT_FEC_MODE_RS, "rs" }, + { SAI_PORT_FEC_MODE_FC, "fc" } +}; + static map pfc_asym_map = { { "on", SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE }, @@ -1188,19 +1195,31 @@ bool PortsOrch::setPortTpid(sai_object_id_t id, sai_uint16_t tpid) return true; } - -bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) +bool PortsOrch::setPortFec(Port &port, string &mode) { SWSS_LOG_ENTER(); + auto searchRef = m_portSupportedFecModes.find(port.m_port_id); + if (searchRef != m_portSupportedFecModes.end()) + { + auto &supportedFecModes = searchRef->second; + if (!supportedFecModes.empty() && (supportedFecModes.find(mode) == supportedFecModes.end())) + { + SWSS_LOG_ERROR("Unsupported mode %s on port %s", mode.c_str(), port.m_alias.c_str()); + // We return true becase the caller will keep the item in m_toSync and retry it later if we return false + // As the FEC mode is not supported it doesn't make sense to retry. + return true; + } + } + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.s32 = mode; + attr.value.s32 = port.m_fec_mode; sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); + SWSS_LOG_ERROR("Failed to set FEC mode %s to port %s", mode.c_str(), port.m_alias.c_str()); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1208,7 +1227,7 @@ bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) } } - SWSS_LOG_INFO("Set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); + SWSS_LOG_NOTICE("Set port %s FEC mode %s", port.m_alias.c_str(), mode.c_str()); setGearboxPortsAttr(port, SAI_PORT_ATTR_FEC_MODE, &mode); @@ -1985,6 +2004,87 @@ void PortsOrch::initPortSupportedSpeeds(const std::string& alias, sai_object_id_ m_portStateTable.set(alias, v); } +void PortsOrch::getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes) +{ + sai_attribute_t attr; + sai_status_t status; + vector fecModes(fec_mode_reverse_map.size()); + + attr.id = SAI_PORT_ATTR_SUPPORTED_FEC_MODE; + attr.value.s32list.count = static_cast(fecModes.size()); + attr.value.s32list.list = fecModes.data(); + + status = sai_port_api->get_port_attribute(port_id, 1, &attr); + fecModes.resize(attr.value.s32list.count); + if (status == SAI_STATUS_SUCCESS) + { + if (fecModes.empty()) + { + supported_fecmodes.insert("N/A"); + } + else + { + for(auto fecMode : fecModes) + { + supported_fecmodes.insert(fec_mode_reverse_map[static_cast(fecMode)]); + } + } + } + else + { + if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) || + status == SAI_STATUS_NOT_IMPLEMENTED) + { + // unable to validate FEC mode if attribute is not supported on platform + SWSS_LOG_NOTICE("Unable to validate FEC mode for port %s id=%" PRIx64 " due to unsupported by platform", + alias.c_str(), port_id); + } + else + { + SWSS_LOG_ERROR("Failed to get a list of supported FEC modes for port %s id=%" PRIx64 ". Error=%d", + alias.c_str(), port_id, status); + } + + supported_fecmodes.clear(); // return empty + } +} + +void PortsOrch::initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id) +{ + // If port supported speeds map already contains the information, save the SAI call + if (m_portSupportedFecModes.count(port_id)) + { + return; + } + PortSupportedFecModes supported_fec_modes; + getPortSupportedFecModes(alias, port_id, supported_fec_modes); + m_portSupportedFecModes[port_id] = supported_fec_modes; + + if (supported_fec_modes.empty()) + { + // Do not expose "supported_fecs" in case fetching FEC modes is not supported by the vendor + SWSS_LOG_INFO("No supported_fecs exposed to STATE_DB for port %s since fetching supported FEC modes is not supported by the vendor", + alias.c_str()); + return; + } + + vector v; + std::string supported_fec_modes_str; + bool first = true; + for(auto fec : supported_fec_modes) + { + if (first) + first = false; + else + supported_fec_modes_str += ','; + supported_fec_modes_str += fec; + } + + v.emplace_back(std::make_pair("supported_fecs", supported_fec_modes_str)); + m_portStateTable.set(alias, v); +} + /* * If Gearbox is enabled and this is a Gearbox port then set the attributes accordingly. */ @@ -2978,6 +3078,7 @@ void PortsOrch::doPortTask(Consumer &consumer) } initPortSupportedSpeeds(get<0>(it->second), m_portListLaneMap[it->first]); + initPortSupportedFecModes(get<0>(it->second), m_portListLaneMap[it->first]); it++; } @@ -3326,14 +3427,12 @@ void PortsOrch::doPortTask(Consumer &consumer) p.m_fec_mode = fec_mode_map[fec_mode]; p.m_fec_cfg = true; - if (setPortFec(p, p.m_fec_mode)) + if (setPortFec(p, fec_mode)) { m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); it++; continue; } @@ -3343,14 +3442,12 @@ void PortsOrch::doPortTask(Consumer &consumer) /* Port is already down, setting fec mode*/ p.m_fec_mode = fec_mode_map[fec_mode]; p.m_fec_cfg = true; - if (setPortFec(p, p.m_fec_mode)) + if (setPortFec(p, fec_mode)) { m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); it++; continue; } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 0fd3552e19..a3413790b1 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -27,6 +27,7 @@ #define PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP "PG_DROP_STAT_COUNTER" typedef std::vector PortSupportedSpeeds; +typedef std::set PortSupportedFecModes; static const map oper_status_strings = { @@ -209,6 +210,8 @@ class PortsOrch : public Orch, public Subject unique_ptr
m_gbcounterTable; std::map m_portSupportedSpeeds; + // Supported FEC modes on the system side. + std::map m_portSupportedFecModes; bool m_initDone = false; Port m_cpuPort; @@ -305,7 +308,7 @@ class PortsOrch : public Orch, public Subject bool setPortTpid(sai_object_id_t id, sai_uint16_t tpid); bool setPortPvid (Port &port, sai_uint32_t pvid); bool getPortPvid(Port &port, sai_uint32_t &pvid); - bool setPortFec(Port &port, sai_port_fec_mode_t mode); + bool setPortFec(Port &port, std::string &mode); bool setPortPfcAsym(Port &port, string pfc_asym); bool getDestPortId(sai_object_id_t src_port_id, dest_port_type_t port_type, sai_object_id_t &des_port_id); @@ -314,6 +317,9 @@ class PortsOrch : public Orch, public Subject bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); void initPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id); + // Get supported FEC modes on system side + void getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes); + void initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id); task_process_status setPortSpeed(Port &port, sai_uint32_t speed); bool getPortSpeed(sai_object_id_t id, sai_uint32_t &speed); bool setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value); diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 78c633d4a1..93e73d9041 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -20,6 +20,70 @@ namespace portsorch_test using namespace std; + sai_port_api_t ut_sai_port_api; + sai_port_api_t *pold_sai_port_api; + + bool not_support_fetching_fec; + vector mock_port_fec_modes = {SAI_PORT_FEC_MODE_RS, SAI_PORT_FEC_MODE_FC}; + + sai_status_t _ut_stub_sai_get_port_attribute( + _In_ sai_object_id_t port_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + sai_status_t status; + if (attr_count == 1 && attr_list[0].id == SAI_PORT_ATTR_SUPPORTED_FEC_MODE) + { + if (not_support_fetching_fec) + { + status = SAI_STATUS_NOT_IMPLEMENTED; + } + else + { + uint32_t i; + for (i = 0; i < attr_list[0].value.s32list.count && i < mock_port_fec_modes.size(); i++) + { + attr_list[0].value.s32list.list[i] = mock_port_fec_modes[i]; + } + attr_list[0].value.s32list.count = i; + status = SAI_STATUS_SUCCESS; + } + } + else + { + status = pold_sai_port_api->get_port_attribute(port_id, attr_count, attr_list); + } + return status; + } + + uint32_t _sai_set_port_fec_count; + int32_t _sai_port_fec_mode; + sai_status_t _ut_stub_sai_set_port_attribute( + _In_ sai_object_id_t port_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_PORT_ATTR_FEC_MODE) + { + _sai_set_port_fec_count++; + _sai_port_fec_mode = attr[0].value.s32; + } + return pold_sai_port_api->set_port_attribute(port_id, attr); + } + + void _hook_sai_port_api() + { + ut_sai_port_api = *sai_port_api; + pold_sai_port_api = sai_port_api; + ut_sai_port_api.get_port_attribute = _ut_stub_sai_get_port_attribute; + ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; + sai_port_api = &ut_sai_port_api; + } + + void _unhook_sai_port_api() + { + sai_port_api = pold_sai_port_api; + } + struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -173,6 +237,169 @@ namespace portsorch_test }; + TEST_F(PortsOrchTest, PortSupportedFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "none"} + }}); + consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + + ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + _unhook_sai_port_api(); + } + + /* + * Test case: SAI_PORT_ATTR_SUPPORTED_FEC_MODE is not supported by vendor + **/ + TEST_F(PortsOrchTest, PortNotSupportedFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + /* + * Test case: Fetching SAI_PORT_ATTR_SUPPORTED_FEC_MODE is supported but no FEC mode is supported on the port + **/ + TEST_F(PortsOrchTest, PortSupportNoFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + auto old_mock_port_fec_modes = mock_port_fec_modes; + mock_port_fec_modes.clear(); + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + mock_port_fec_modes = old_mock_port_fec_modes; + _unhook_sai_port_api(); + } + TEST_F(PortsOrchTest, PortReadinessColdBoot) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); From 6565b502f01e739b4961f216084ffedf78395640 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Mon, 25 Jul 2022 16:40:32 -0700 Subject: [PATCH 60/64] Revert "[portsorch] Expose supported FEC modes to STABE_DB and check whether FEC mode is supported before setting it (#2333)" (#2396) This reverts commit dc8bc1c40f647cc2acf2c21431c4f58c6ccd8ff3. *Revert "[portsorch] Expose supported FEC modes to STABE_DB and check whether FEC mode is supported before setting it" --- orchagent/portsorch.cpp | 119 ++-------------- orchagent/portsorch.h | 8 +- tests/mock_tests/portsorch_ut.cpp | 227 ------------------------------ 3 files changed, 12 insertions(+), 342 deletions(-) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index fd96d6a3c2..e9a3afdc1e 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -77,13 +77,6 @@ static map fec_mode_map = { "fc", SAI_PORT_FEC_MODE_FC } }; -static map fec_mode_reverse_map = -{ - { SAI_PORT_FEC_MODE_NONE, "none" }, - { SAI_PORT_FEC_MODE_RS, "rs" }, - { SAI_PORT_FEC_MODE_FC, "fc" } -}; - static map pfc_asym_map = { { "on", SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE }, @@ -1195,31 +1188,19 @@ bool PortsOrch::setPortTpid(sai_object_id_t id, sai_uint16_t tpid) return true; } -bool PortsOrch::setPortFec(Port &port, string &mode) + +bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) { SWSS_LOG_ENTER(); - auto searchRef = m_portSupportedFecModes.find(port.m_port_id); - if (searchRef != m_portSupportedFecModes.end()) - { - auto &supportedFecModes = searchRef->second; - if (!supportedFecModes.empty() && (supportedFecModes.find(mode) == supportedFecModes.end())) - { - SWSS_LOG_ERROR("Unsupported mode %s on port %s", mode.c_str(), port.m_alias.c_str()); - // We return true becase the caller will keep the item in m_toSync and retry it later if we return false - // As the FEC mode is not supported it doesn't make sense to retry. - return true; - } - } - sai_attribute_t attr; attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.s32 = port.m_fec_mode; + attr.value.s32 = mode; sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set FEC mode %s to port %s", mode.c_str(), port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1227,7 +1208,7 @@ bool PortsOrch::setPortFec(Port &port, string &mode) } } - SWSS_LOG_NOTICE("Set port %s FEC mode %s", port.m_alias.c_str(), mode.c_str()); + SWSS_LOG_INFO("Set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); setGearboxPortsAttr(port, SAI_PORT_ATTR_FEC_MODE, &mode); @@ -2004,87 +1985,6 @@ void PortsOrch::initPortSupportedSpeeds(const std::string& alias, sai_object_id_ m_portStateTable.set(alias, v); } -void PortsOrch::getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes) -{ - sai_attribute_t attr; - sai_status_t status; - vector fecModes(fec_mode_reverse_map.size()); - - attr.id = SAI_PORT_ATTR_SUPPORTED_FEC_MODE; - attr.value.s32list.count = static_cast(fecModes.size()); - attr.value.s32list.list = fecModes.data(); - - status = sai_port_api->get_port_attribute(port_id, 1, &attr); - fecModes.resize(attr.value.s32list.count); - if (status == SAI_STATUS_SUCCESS) - { - if (fecModes.empty()) - { - supported_fecmodes.insert("N/A"); - } - else - { - for(auto fecMode : fecModes) - { - supported_fecmodes.insert(fec_mode_reverse_map[static_cast(fecMode)]); - } - } - } - else - { - if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) || - status == SAI_STATUS_NOT_IMPLEMENTED) - { - // unable to validate FEC mode if attribute is not supported on platform - SWSS_LOG_NOTICE("Unable to validate FEC mode for port %s id=%" PRIx64 " due to unsupported by platform", - alias.c_str(), port_id); - } - else - { - SWSS_LOG_ERROR("Failed to get a list of supported FEC modes for port %s id=%" PRIx64 ". Error=%d", - alias.c_str(), port_id, status); - } - - supported_fecmodes.clear(); // return empty - } -} - -void PortsOrch::initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id) -{ - // If port supported speeds map already contains the information, save the SAI call - if (m_portSupportedFecModes.count(port_id)) - { - return; - } - PortSupportedFecModes supported_fec_modes; - getPortSupportedFecModes(alias, port_id, supported_fec_modes); - m_portSupportedFecModes[port_id] = supported_fec_modes; - - if (supported_fec_modes.empty()) - { - // Do not expose "supported_fecs" in case fetching FEC modes is not supported by the vendor - SWSS_LOG_INFO("No supported_fecs exposed to STATE_DB for port %s since fetching supported FEC modes is not supported by the vendor", - alias.c_str()); - return; - } - - vector v; - std::string supported_fec_modes_str; - bool first = true; - for(auto fec : supported_fec_modes) - { - if (first) - first = false; - else - supported_fec_modes_str += ','; - supported_fec_modes_str += fec; - } - - v.emplace_back(std::make_pair("supported_fecs", supported_fec_modes_str)); - m_portStateTable.set(alias, v); -} - /* * If Gearbox is enabled and this is a Gearbox port then set the attributes accordingly. */ @@ -3078,7 +2978,6 @@ void PortsOrch::doPortTask(Consumer &consumer) } initPortSupportedSpeeds(get<0>(it->second), m_portListLaneMap[it->first]); - initPortSupportedFecModes(get<0>(it->second), m_portListLaneMap[it->first]); it++; } @@ -3427,12 +3326,14 @@ void PortsOrch::doPortTask(Consumer &consumer) p.m_fec_mode = fec_mode_map[fec_mode]; p.m_fec_cfg = true; - if (setPortFec(p, fec_mode)) + if (setPortFec(p, p.m_fec_mode)) { m_portList[alias] = p; + SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); } else { + SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); it++; continue; } @@ -3442,12 +3343,14 @@ void PortsOrch::doPortTask(Consumer &consumer) /* Port is already down, setting fec mode*/ p.m_fec_mode = fec_mode_map[fec_mode]; p.m_fec_cfg = true; - if (setPortFec(p, fec_mode)) + if (setPortFec(p, p.m_fec_mode)) { m_portList[alias] = p; + SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); } else { + SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); it++; continue; } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index a3413790b1..0fd3552e19 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -27,7 +27,6 @@ #define PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP "PG_DROP_STAT_COUNTER" typedef std::vector PortSupportedSpeeds; -typedef std::set PortSupportedFecModes; static const map oper_status_strings = { @@ -210,8 +209,6 @@ class PortsOrch : public Orch, public Subject unique_ptr
m_gbcounterTable; std::map m_portSupportedSpeeds; - // Supported FEC modes on the system side. - std::map m_portSupportedFecModes; bool m_initDone = false; Port m_cpuPort; @@ -308,7 +305,7 @@ class PortsOrch : public Orch, public Subject bool setPortTpid(sai_object_id_t id, sai_uint16_t tpid); bool setPortPvid (Port &port, sai_uint32_t pvid); bool getPortPvid(Port &port, sai_uint32_t &pvid); - bool setPortFec(Port &port, std::string &mode); + bool setPortFec(Port &port, sai_port_fec_mode_t mode); bool setPortPfcAsym(Port &port, string pfc_asym); bool getDestPortId(sai_object_id_t src_port_id, dest_port_type_t port_type, sai_object_id_t &des_port_id); @@ -317,9 +314,6 @@ class PortsOrch : public Orch, public Subject bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); void initPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id); - // Get supported FEC modes on system side - void getPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id, PortSupportedFecModes &supported_fecmodes); - void initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id); task_process_status setPortSpeed(Port &port, sai_uint32_t speed); bool getPortSpeed(sai_object_id_t id, sai_uint32_t &speed); bool setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value); diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 93e73d9041..78c633d4a1 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -20,70 +20,6 @@ namespace portsorch_test using namespace std; - sai_port_api_t ut_sai_port_api; - sai_port_api_t *pold_sai_port_api; - - bool not_support_fetching_fec; - vector mock_port_fec_modes = {SAI_PORT_FEC_MODE_RS, SAI_PORT_FEC_MODE_FC}; - - sai_status_t _ut_stub_sai_get_port_attribute( - _In_ sai_object_id_t port_id, - _In_ uint32_t attr_count, - _Inout_ sai_attribute_t *attr_list) - { - sai_status_t status; - if (attr_count == 1 && attr_list[0].id == SAI_PORT_ATTR_SUPPORTED_FEC_MODE) - { - if (not_support_fetching_fec) - { - status = SAI_STATUS_NOT_IMPLEMENTED; - } - else - { - uint32_t i; - for (i = 0; i < attr_list[0].value.s32list.count && i < mock_port_fec_modes.size(); i++) - { - attr_list[0].value.s32list.list[i] = mock_port_fec_modes[i]; - } - attr_list[0].value.s32list.count = i; - status = SAI_STATUS_SUCCESS; - } - } - else - { - status = pold_sai_port_api->get_port_attribute(port_id, attr_count, attr_list); - } - return status; - } - - uint32_t _sai_set_port_fec_count; - int32_t _sai_port_fec_mode; - sai_status_t _ut_stub_sai_set_port_attribute( - _In_ sai_object_id_t port_id, - _In_ const sai_attribute_t *attr) - { - if (attr[0].id == SAI_PORT_ATTR_FEC_MODE) - { - _sai_set_port_fec_count++; - _sai_port_fec_mode = attr[0].value.s32; - } - return pold_sai_port_api->set_port_attribute(port_id, attr); - } - - void _hook_sai_port_api() - { - ut_sai_port_api = *sai_port_api; - pold_sai_port_api = sai_port_api; - ut_sai_port_api.get_port_attribute = _ut_stub_sai_get_port_attribute; - ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; - sai_port_api = &ut_sai_port_api; - } - - void _unhook_sai_port_api() - { - sai_port_api = pold_sai_port_api; - } - struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -237,169 +173,6 @@ namespace portsorch_test }; - TEST_F(PortsOrchTest, PortSupportedFecModes) - { - _hook_sai_port_api(); - Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); - std::deque entries; - - not_support_fetching_fec = false; - // Get SAI default ports to populate DB - auto ports = ut_helper::getInitialSaiPorts(); - - for (const auto &it : ports) - { - portTable.set(it.first, it.second); - } - - // Set PortConfigDone - portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - - // refill consumer - gPortsOrch->addExistingData(&portTable); - - // Apply configuration : - // create ports - static_cast(gPortsOrch)->doTask(); - - uint32_t current_sai_api_call_count = _sai_set_port_fec_count; - - entries.push_back({"Ethernet0", "SET", - { - {"fec", "rs"} - }}); - auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); - consumer->addToSync(entries); - static_cast(gPortsOrch)->doTask(); - entries.clear(); - - ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); - ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); - - vector ts; - - gPortsOrch->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); - - entries.push_back({"Ethernet0", "SET", - { - {"fec", "none"} - }}); - consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); - consumer->addToSync(entries); - static_cast(gPortsOrch)->doTask(); - - ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); - ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); - - gPortsOrch->dumpPendingTasks(ts); - ASSERT_EQ(ts.size(), 0); - - _unhook_sai_port_api(); - } - - /* - * Test case: SAI_PORT_ATTR_SUPPORTED_FEC_MODE is not supported by vendor - **/ - TEST_F(PortsOrchTest, PortNotSupportedFecModes) - { - _hook_sai_port_api(); - Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); - std::deque entries; - - not_support_fetching_fec = true; - // Get SAI default ports to populate DB - auto ports = ut_helper::getInitialSaiPorts(); - - for (const auto &it : ports) - { - portTable.set(it.first, it.second); - } - - // Set PortConfigDone - portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - - // refill consumer - gPortsOrch->addExistingData(&portTable); - - // Apply configuration : - // create ports - static_cast(gPortsOrch)->doTask(); - - uint32_t current_sai_api_call_count = _sai_set_port_fec_count; - - entries.push_back({"Ethernet0", "SET", - { - {"fec", "rs"} - }}); - auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); - consumer->addToSync(entries); - static_cast(gPortsOrch)->doTask(); - entries.clear(); - - ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); - ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); - - vector ts; - - gPortsOrch->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); - - _unhook_sai_port_api(); - } - - /* - * Test case: Fetching SAI_PORT_ATTR_SUPPORTED_FEC_MODE is supported but no FEC mode is supported on the port - **/ - TEST_F(PortsOrchTest, PortSupportNoFecModes) - { - _hook_sai_port_api(); - Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); - std::deque entries; - - not_support_fetching_fec = false; - auto old_mock_port_fec_modes = mock_port_fec_modes; - mock_port_fec_modes.clear(); - // Get SAI default ports to populate DB - auto ports = ut_helper::getInitialSaiPorts(); - - for (const auto &it : ports) - { - portTable.set(it.first, it.second); - } - - // Set PortConfigDone - portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - - // refill consumer - gPortsOrch->addExistingData(&portTable); - - // Apply configuration : - // create ports - static_cast(gPortsOrch)->doTask(); - - uint32_t current_sai_api_call_count = _sai_set_port_fec_count; - - entries.push_back({"Ethernet0", "SET", - { - {"fec", "rs"} - }}); - auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); - consumer->addToSync(entries); - static_cast(gPortsOrch)->doTask(); - entries.clear(); - - ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); - - vector ts; - - gPortsOrch->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); - - mock_port_fec_modes = old_mock_port_fec_modes; - _unhook_sai_port_api(); - } - TEST_F(PortsOrchTest, PortReadinessColdBoot) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); From dc88d55d144f3c918534894ff0303a5f7995acab Mon Sep 17 00:00:00 2001 From: andywongarista <78833093+andywongarista@users.noreply.github.com> Date: Mon, 25 Jul 2022 20:04:40 -0700 Subject: [PATCH 61/64] Revert hwinfo count change (#2383) What I did Revert change from #2367 which increases count associated with SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO by 1, as well as the memset. Why I did it Original intention of this change was to accommodate sairedis behaviour when copying null-terminated string; original behaviour is that the null-terminator would not be copied and so receiver of the hwinfo (PAI) would see non-null terminated string. Reverting this change so that old behaviour is maintained and PAI driver is responsible for not relying on string to be null terminated. --- orchagent/saihelper.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index ee6ce2b802..172b74b559 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -378,11 +378,10 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) SWSS_LOG_ERROR( "hwinfo string attribute is too long." ); return SAI_STATUS_FAILURE; } - memset(hwinfo, 0, HWINFO_MAX_SIZE + 1); strncpy(hwinfo, phy->hwinfo.c_str(), phy->hwinfo.length()); attr.id = SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO; - attr.value.s8list.count = (uint32_t) phy->hwinfo.length() + 1; + attr.value.s8list.count = (uint32_t) phy->hwinfo.length(); attr.value.s8list.list = (int8_t *) hwinfo; attrs.push_back(attr); From 75fc96597b688961e2a5ef5b0c170316b2e9afe9 Mon Sep 17 00:00:00 2001 From: Devesh Pathak <54966909+devpatha@users.noreply.github.com> Date: Tue, 26 Jul 2022 08:44:40 -0700 Subject: [PATCH 62/64] [DualToR] Handle race condition between tunnel_decap and mux orchestrator (#2397) *Change orch list to make sure mux_orch and mux_cb_orch are placed after tunnel_decap_orch --- orchagent/muxorch.cpp | 3 +-- orchagent/orchdaemon.cpp | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index c9d4158040..6770a4defb 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -1280,8 +1280,6 @@ bool MuxOrch::handlePeerSwitch(const Request& request) if (op == SET_COMMAND) { - mux_peer_switch_ = peer_ip; - // Create P2P tunnel when peer_ip is available. IpAddresses dst_ips = decap_orch_->getDstIpAddresses(MUX_TUNNEL); if (!dst_ips.getSize()) @@ -1316,6 +1314,7 @@ bool MuxOrch::handlePeerSwitch(const Request& request) } mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id, tc_to_queue_map_id, dscp_mode_name); + mux_peer_switch_ = peer_ip; SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); } diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index d4b40844a8..934f1f5d19 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -346,7 +346,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch, mux_orch, mux_cb_orch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) From 525a57fab294e02e0a013e0170eaefb91bc37d00 Mon Sep 17 00:00:00 2001 From: skeesara-nokia <60406151+skeesara-nokia@users.noreply.github.com> Date: Wed, 27 Jul 2022 14:40:34 -0400 Subject: [PATCH 63/64] Fix for remote system interface not getting created (#2364) Signed-off-by: keesara --- orchagent/intfsorch.cpp | 18 +++++++++++++++++- orchagent/intfsorch.h | 1 + orchagent/neighorch.cpp | 2 +- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index 3b83b0d906..73ab335cc2 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -678,7 +678,7 @@ void IntfsOrch::doTask(Consumer &consumer) if(table_name == CHASSIS_APP_SYSTEM_INTERFACE_TABLE_NAME) { - if(!isRemoteSystemPortIntf(alias)) + if(isLocalSystemPortIntf(alias)) { //Synced local interface. Skip it = consumer.m_toSync.erase(it); @@ -1623,6 +1623,22 @@ bool IntfsOrch::isRemoteSystemPortIntf(string alias) return false; } +bool IntfsOrch::isLocalSystemPortIntf(string alias) +{ + Port port; + if(gPortsOrch->getPort(alias, port)) + { + if (port.m_type == Port::LAG) + { + return(port.m_system_lag_info.switch_id == gVoqMySwitchId); + } + + return(port.m_system_port_info.type != SAI_SYSTEM_PORT_TYPE_REMOTE); + } + //Given alias is system port alias of the local port/LAG + return false; +} + void IntfsOrch::voqSyncAddIntf(string &alias) { //Sync only local interface. Confirm for the local interface and diff --git a/orchagent/intfsorch.h b/orchagent/intfsorch.h index 77c8efe752..ba28c8dde6 100644 --- a/orchagent/intfsorch.h +++ b/orchagent/intfsorch.h @@ -70,6 +70,7 @@ class IntfsOrch : public Orch bool updateSyncdIntfPfx(const string &alias, const IpPrefix &ip_prefix, bool add = true); bool isRemoteSystemPortIntf(string alias); + bool isLocalSystemPortIntf(string alias); private: diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 42bf064367..cd2dc4cd2c 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -1203,7 +1203,7 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) string alias = key.substr(0, found); - if(!gIntfsOrch->isRemoteSystemPortIntf(alias)) + if(gIntfsOrch->isLocalSystemPortIntf(alias)) { //Synced local neighbor. Skip it = consumer.m_toSync.erase(it); From 3161eaae6bd589bf8c9819aefb9d738047e0c9f8 Mon Sep 17 00:00:00 2001 From: "Dante (Kuo-Jung) Su" Date: Fri, 29 Jul 2022 01:08:18 +0800 Subject: [PATCH 64/64] portsorch: initial support for link-training (#2359) * Squashed commit of the following: commit 18632a3e84de380ad29a671a6496018cfd1cd540 Author: Dante Su Date: Mon May 23 12:22:49 2022 +0000 optimize port state refresh logic Signed-off-by: Dante Su commit 081d491c9bc3cda80343d1476b76c457bdeacb8e Author: ds952811 Date: Mon May 23 02:33:56 2022 +0000 address review comments Signed-off-by: ds952811 commit 84bdde4913cae37dc96ceb110cb2503633922847 Author: Dante Su Date: Fri May 20 02:15:59 2022 +0000 update the default LT capability upon get failures Signed-off-by: Dante Su commit 0f73666ba3b13af2116e55b0238e96c6e9c49270 Author: Dante Su Date: Thu May 19 11:28:38 2022 +0000 Rename updatePortStatesXX as refreshPortStatesXX Signed-off-by: Dante Su commit ddd57fe08f78f4463ee39a2075b0b0b0d56e9117 Author: Dante Su Date: Thu May 19 04:03:13 2022 +0000 Have AN cap defaults to 1, and use AN attr for LT cap query Signed-off-by: Dante Su commit 876e605b8462c4318f09af86767453907d055e5b Author: Dante Su Date: Fri May 13 11:15:12 2022 +0000 drop LT capability query Signed-off-by: Dante Su commit 55ced7db0155d161d5637f889453e7d53cdbbf10 Author: Dante Su Date: Fri Apr 29 13:53:17 2022 +0000 incorporate autoneg support from PR#2215 Signed-off-by: Dante Su commit a04594e6efee7d5f5e3b86f45c413196affc89a8 Author: Dante Su Date: Thu Apr 28 16:33:14 2022 +0000 address review comments Signed-off-by: Dante Su commit e9eeb9a87f27740ee692e0a1e86e4a10dd9e943f Author: Dante Su Date: Thu Apr 28 15:00:04 2022 +0000 address review comments Signed-off-by: Dante Su commit 4ff604da578b3fa2a40544c6f800c68cef4b9622 Author: Dante Su Date: Fri Apr 22 03:51:56 2022 +0000 Stop the port state poll by default Signed-off-by: Dante Su commit bdfb8d847fc81bc4771592d18e8e0747114688b3 Author: Dante Su Date: Fri Apr 22 03:48:07 2022 +0000 address review comments Signed-off-by: Dante Su commit 1c6bda8279bcdcce564bdf83c59dc6a3ac7e3f97 Author: Dante Su Date: Mon Apr 18 08:46:21 2022 +0000 Restore pre-emphasis when LT is transitioned from ON to OFF Signed-off-by: Dante Su commit 09a9b334f8f4b06d399e4b3af73443f4b16d5640 Author: Dante Su Date: Mon Apr 18 02:33:11 2022 +0000 fix build failure due to SAI_PORT_ATTR_SUPPORTED_LINK_TRAINING_MODE Signed-off-by: Dante Su commit b0bee3ec7bd1c00c976e812eae27f0e88d41f630 Author: Dante Su Date: Thu Apr 14 07:54:14 2022 +0000 address review comments Signed-off-by: Dante Su commit c4345efbafd1881ab6b80e878e2ecb9d7e637b3b Author: Dante Su Date: Fri Mar 25 02:26:05 2022 +0000 portsorch: initial support for link-training - What I did Add Link-Training support to portsorch, while Gearbox is not in the scope - Why I did it In the case of DAC, static pre-calibrated pre-emphasis is rarely available on SONIC, as most of the ODM are expecting this to be done dynamically at runtime via link-training, hence we'll need this feature to improve the link quality - How I verified it Manual test Ran the Unit-tests to the corresponding changes Signed-off-by: Dante Su * Add support for selected multiple tests Signed-off-by: Dante Su * Revert "Add support for selected multiple tests" This reverts commit 8e2f7a4334278589581b2110e76f4252bbec03f0. * fix the comment for 'autoneg is not supported' Signed-off-by: Dante Su * address review comments Signed-off-by: Dante Su * validate AN cap only when there is an update to AN config Signed-off-by: Dante Su * drop the changes to tests/conftest.py Signed-off-by: Dante Su * fix link failure in p4orch_tests-fake_portorch.o Signed-off-by: Dante Su --- orchagent/p4orch/tests/fake_portorch.cpp | 6 +- orchagent/port.h | 7 + orchagent/portsorch.cpp | 383 ++++++++++++++++++++++- orchagent/portsorch.h | 23 ++ tests/test_port_an.py | 17 + tests/test_port_lt.py | 139 ++++++++ 6 files changed, 569 insertions(+), 6 deletions(-) create mode 100644 tests/test_port_lt.py diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index 51ff450312..1cd3e20ae3 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -689,4 +689,8 @@ void PortsOrch::voqSyncDelLagMember(Port &lag, Port &port) std::unordered_set PortsOrch::generateCounterStats(const string &type, bool gearbox) { return {}; -} \ No newline at end of file +} + +void PortsOrch::doTask(swss::SelectableTimer &timer) +{ +} diff --git a/orchagent/port.h b/orchagent/port.h index e5ba8134f5..a5e003584b 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -119,6 +119,7 @@ class Port uint32_t m_speed = 0; // Mbps std::string m_learn_mode = "hardware"; AutoNegMode m_autoneg = Port::AutoNegMode::AUTONEG_NOT_SET; + int m_link_training = -1; // -1 means not set, 0 = disabled, 1 = enabled bool m_admin_state_up = false; bool m_init = false; bool m_l3_vni = false; @@ -177,8 +178,14 @@ class Port sai_object_id_t m_system_side_id = 0; sai_object_id_t m_line_side_id = 0; + /* pre-emphasis */ + std::map> m_preemphasis; + bool m_fec_cfg = false; bool m_an_cfg = false; + + int m_cap_an = -1; /* Capability - AutoNeg, -1 means not set */ + int m_cap_lt = -1; /* Capability - LinkTraining, -1 means not set */ }; } diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index e9a3afdc1e..3a0b4e75f0 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -61,6 +61,8 @@ extern string gMyAsicName; #define DEFAULT_VLAN_ID 1 #define MAX_VALID_VLAN_ID 4094 +#define PORT_SPEED_LIST_DEFAULT_SIZE 16 +#define PORT_STATE_POLLING_SEC 5 #define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 #define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 #define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 @@ -112,6 +114,26 @@ static map autoneg_mode_map = { "off", 0 } }; +static map link_training_mode_map = +{ + { "on", 1 }, + { "off", 0 } +}; + +static map link_training_failure_map = +{ + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_NO_ERROR, "none" }, + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_FRAME_LOCK_ERROR, "frame_lock"}, + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_SNR_LOWER_THRESHOLD, "snr_low"}, + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_TIME_OUT, "timeout"} +}; + +static map link_training_rx_status_map = +{ + { SAI_PORT_LINK_TRAINING_RX_STATUS_NOT_TRAINED, "not_trained" }, + { SAI_PORT_LINK_TRAINING_RX_STATUS_TRAINED, "trained"} +}; + // Interface type map used for gearbox static map interface_type_map = { @@ -331,7 +353,8 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector (new LagIdAllocator(chassisAppDb)); } + + auto executor = new ExecutableTimer(m_port_state_poller, this, "PORT_STATE_POLLER"); + Orch::addExecutor(executor); } void PortsOrch::removeDefaultVlanMembers() @@ -1985,6 +2011,35 @@ void PortsOrch::initPortSupportedSpeeds(const std::string& alias, sai_object_id_ m_portStateTable.set(alias, v); } +void PortsOrch::initPortCapAutoNeg(Port &port) +{ + sai_status_t status; + sai_attribute_t attr; + + attr.id = SAI_PORT_ATTR_SUPPORTED_AUTO_NEG_MODE; + status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status == SAI_STATUS_SUCCESS) + { + port.m_cap_an = attr.value.booldata ? 1 : 0; + } + else + { + // To avoid breakage on the existing platforms, AN should be 1 by default + port.m_cap_an = 1; + SWSS_LOG_WARN("Unable to get %s AN support capability", + port.m_alias.c_str()); + } +} + +void PortsOrch::initPortCapLinkTraining(Port &port) +{ + // TODO: + // Add SAI_PORT_ATTR_SUPPORTED_LINK_TRAINING_MODE query when it is + // available in the saiport.h of SAI. + port.m_cap_lt = 1; + SWSS_LOG_WARN("Unable to get %s LT support capability", port.m_alias.c_str()); +} + /* * If Gearbox is enabled and this is a Gearbox port then set the attributes accordingly. */ @@ -2139,6 +2194,45 @@ bool PortsOrch::getPortSpeed(sai_object_id_t id, sai_uint32_t &speed) return true; } +bool PortsOrch::getPortAdvSpeeds(const Port& port, bool remote, std::vector& speed_list) +{ + sai_object_id_t port_id = port.m_port_id; + sai_object_id_t line_port_id; + sai_attribute_t attr; + sai_status_t status; + std::vector speeds(PORT_SPEED_LIST_DEFAULT_SIZE); + + attr.id = remote ? SAI_PORT_ATTR_REMOTE_ADVERTISED_SPEED : SAI_PORT_ATTR_ADVERTISED_SPEED; + attr.value.u32list.count = static_cast(speeds.size()); + attr.value.u32list.list = speeds.data(); + + if (getDestPortId(port_id, LINE_PORT_TYPE, line_port_id)) + { + status = sai_port_api->get_port_attribute(line_port_id, 1, &attr); + } + else + { + status = sai_port_api->get_port_attribute(port_id, 1, &attr); + } + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Unable to get advertised speed for %s", port.m_alias.c_str()); + return false; + } + speeds.resize(attr.value.u32list.count); + speed_list.swap(speeds); + return true; +} + +bool PortsOrch::getPortAdvSpeeds(const Port& port, bool remote, string& adv_speeds) +{ + std::vector speed_list; + bool rc = getPortAdvSpeeds(port, remote, speed_list); + + adv_speeds = rc ? swss::join(',', speed_list.begin(), speed_list.end()) : ""; + return rc; +} + task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list) { SWSS_LOG_ENTER(); @@ -2253,6 +2347,32 @@ task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) return task_success; } +task_process_status PortsOrch::setPortLinkTraining(const Port &port, bool state) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::PHY) + { + return task_failed; + } + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_LINK_TRAINING_ENABLE; + attr.value.booldata = state; + + string op = state ? "on" : "off"; + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set LT %s to port %s", op.c_str(), port.m_alias.c_str()); + return handleSaiSetStatus(SAI_API_PORT, status); + } + + SWSS_LOG_INFO("Set LT %s to port %s", op.c_str(), port.m_alias.c_str()); + + return task_success; +} + bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const { SWSS_LOG_ENTER(); @@ -2760,7 +2880,9 @@ void PortsOrch::doPortTask(Consumer &consumer) uint32_t speed = 0; string learn_mode; string an_str; + string lt_str; int an = -1; + int lt = -1; int index = -1; string role; string adv_speeds_str; @@ -2851,6 +2973,11 @@ void PortsOrch::doPortTask(Consumer &consumer) { adv_interface_types_str = fvValue(i); } + /* Set link training */ + else if (fvField(i) == "link_training") + { + lt_str = fvValue(i); + } /* Set port serdes Pre-emphasis */ else if (fvField(i) == "preemphasis") { @@ -3030,10 +3157,21 @@ void PortsOrch::doPortTask(Consumer &consumer) it = consumer.m_toSync.erase(it); continue; } - an = autoneg_mode_map[an_str]; if (an != p.m_autoneg) { + if (p.m_cap_an < 0) + { + initPortCapAutoNeg(p); + m_portList[alias] = p; + } + if (p.m_cap_an < 1) + { + SWSS_LOG_ERROR("%s: autoneg is not supported (cap=%d)", p.m_alias.c_str(), p.m_cap_an); + // autoneg is not supported, don't retry + it = consumer.m_toSync.erase(it); + continue; + } if (p.m_admin_state_up) { /* Bring port down before applying speed */ @@ -3065,6 +3203,62 @@ void PortsOrch::doPortTask(Consumer &consumer) SWSS_LOG_NOTICE("Set port %s AutoNeg from %d to %d", alias.c_str(), p.m_autoneg, an); p.m_autoneg = static_cast(an); m_portList[alias] = p; + m_portStateTable.hdel(p.m_alias, "rmt_adv_speeds"); + updatePortStatePoll(p, PORT_STATE_POLL_AN, (an > 0)); + } + } + + if (!lt_str.empty() && (p.m_type == Port::PHY)) + { + if (link_training_mode_map.find(lt_str) == link_training_mode_map.end()) + { + SWSS_LOG_ERROR("Failed to parse LT value: %s", lt_str.c_str()); + // Invalid link training mode configured, don't retry + it = consumer.m_toSync.erase(it); + continue; + } + + lt = link_training_mode_map[lt_str]; + if (lt != p.m_link_training) + { + if (p.m_cap_lt < 0) + { + initPortCapLinkTraining(p); + m_portList[alias] = p; + } + if (p.m_cap_lt < 1) + { + SWSS_LOG_WARN("%s: LT is not supported(cap=%d)", alias.c_str(), p.m_cap_lt); + // Don't retry + it = consumer.m_toSync.erase(it); + continue; + } + + auto status = setPortLinkTraining(p, lt > 0 ? true : false); + if (status != task_success) + { + SWSS_LOG_ERROR("Failed to set port %s LT from %d to %d", alias.c_str(), p.m_link_training, lt); + if (status == task_need_retry) + { + it++; + } + else + { + it = consumer.m_toSync.erase(it); + } + continue; + } + m_portStateTable.hset(alias, "link_training_status", lt_str); + SWSS_LOG_NOTICE("Set port %s LT from %d to %d", alias.c_str(), p.m_link_training, lt); + p.m_link_training = lt; + m_portList[alias] = p; + updatePortStatePoll(p, PORT_STATE_POLL_LT, (lt > 0)); + + // Restore pre-emphasis when LT is transitioned from ON to OFF + if ((p.m_link_training < 1) && (serdes_attr.size() == 0)) + { + serdes_attr = p.m_preemphasis; + } } } @@ -3405,9 +3599,17 @@ void PortsOrch::doPortTask(Consumer &consumer) if (serdes_attr.size() != 0) { - if (setPortSerdesAttribute(p.m_port_id, serdes_attr)) + if (p.m_link_training > 0) + { + SWSS_LOG_NOTICE("Save port %s preemphasis for LT", alias.c_str()); + p.m_preemphasis = serdes_attr; + m_portList[alias] = p; + } + else if (setPortSerdesAttribute(p.m_port_id, serdes_attr)) { - SWSS_LOG_NOTICE("Set port %s preemphasis is success", alias.c_str()); + SWSS_LOG_NOTICE("Set port %s preemphasis is success", alias.c_str()); + p.m_preemphasis = serdes_attr; + m_portList[alias] = p; } else { @@ -3415,7 +3617,6 @@ void PortsOrch::doPortTask(Consumer &consumer) it++; continue; } - } /* create host_tx_ready field in state-db */ @@ -5957,6 +6158,18 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) { updateDbPortOperStatus(port, status); updateGearboxPortOperStatus(port); + + /* Refresh the port states and reschedule the poller tasks */ + if (port.m_autoneg > 0) + { + refreshPortStateAutoNeg(port); + updatePortStatePoll(port, PORT_STATE_POLL_AN, !(status == SAI_PORT_OPER_STATUS_UP)); + } + if (port.m_link_training > 0) + { + refreshPortStateLinkTraining(port); + updatePortStatePoll(port, PORT_STATE_POLL_LT, !(status == SAI_PORT_OPER_STATUS_UP)); + } } port.m_oper_status = status; @@ -6112,6 +6325,50 @@ bool PortsOrch::getPortOperSpeed(const Port& port, sai_uint32_t& speed) const return true; } +bool PortsOrch::getPortLinkTrainingRxStatus(const Port &port, sai_port_link_training_rx_status_t &rx_status) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::PHY) + { + return false; + } + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_LINK_TRAINING_RX_STATUS; + sai_status_t ret = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get LT rx status for %s", port.m_alias.c_str()); + return false; + } + + rx_status = static_cast(attr.value.u32); + return true; +} + +bool PortsOrch::getPortLinkTrainingFailure(const Port &port, sai_port_link_training_failure_status_t &failure) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::PHY) + { + return false; + } + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_LINK_TRAINING_FAILURE_STATUS; + sai_status_t ret = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get LT failure status for %s", port.m_alias.c_str()); + return false; + } + + failure = static_cast(attr.value.u32); + return true; +} + bool PortsOrch::getSaiAclBindPointType(Port::Type type, sai_acl_bind_point_type_t &sai_acl_bind_type) { @@ -7154,3 +7411,119 @@ bool PortsOrch::decrFdbCount(const std::string& alias, int count) } return true; } + +/* Refresh the per-port Auto-Negotiation operational states */ +void PortsOrch::refreshPortStateAutoNeg(const Port &port) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::Type::PHY) + { + return; + } + + string adv_speeds = "N/A"; + + if (port.m_admin_state_up) + { + if (!getPortAdvSpeeds(port, true, adv_speeds)) + { + adv_speeds = "N/A"; + updatePortStatePoll(port, PORT_STATE_POLL_AN, false); + } + } + + m_portStateTable.hset(port.m_alias, "rmt_adv_speeds", adv_speeds); +} + +/* Refresh the per-port Link-Training operational states */ +void PortsOrch::refreshPortStateLinkTraining(const Port &port) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::Type::PHY) + { + return; + } + + string status = "off"; + + if (port.m_admin_state_up && port.m_link_training > 0 && port.m_cap_lt > 0) + { + sai_port_link_training_rx_status_t rx_status; + sai_port_link_training_failure_status_t failure; + + if (!getPortLinkTrainingRxStatus(port, rx_status)) + { + status = "on"; // LT is enabled, while the rx status is unavailable + } + else if (rx_status == SAI_PORT_LINK_TRAINING_RX_STATUS_TRAINED) + { + status = link_training_rx_status_map.at(rx_status); + } + else + { + if (getPortLinkTrainingFailure(port, failure) && + failure != SAI_PORT_LINK_TRAINING_FAILURE_STATUS_NO_ERROR) + { + status = link_training_failure_map.at(failure); + } + else + { + status = link_training_rx_status_map.at(rx_status); + } + } + } + + m_portStateTable.hset(port.m_alias, "link_training_status", status); +} + +/* Activate/De-activate a specific port state poller task */ +void PortsOrch::updatePortStatePoll(const Port &port, port_state_poll_t type, bool active) +{ + if (type == PORT_STATE_POLL_NONE) + { + return; + } + if (active) + { + m_port_state_poll[port.m_alias] |= type; + m_port_state_poller->start(); + } + else + { + m_port_state_poll[port.m_alias] &= ~type; + } +} + +void PortsOrch::doTask(swss::SelectableTimer &timer) +{ + Port port; + + for (auto it = m_port_state_poll.begin(); it != m_port_state_poll.end(); ) + { + if ((it->second == PORT_STATE_POLL_NONE) || !getPort(it->first, port)) + { + it = m_port_state_poll.erase(it); + continue; + } + if (!port.m_admin_state_up) + { + ++it; + continue; + } + if (it->second & PORT_STATE_POLL_AN) + { + refreshPortStateAutoNeg(port); + } + if (it->second & PORT_STATE_POLL_LT) + { + refreshPortStateLinkTraining(port); + } + ++it; + } + if (m_port_state_poll.size() == 0) + { + m_port_state_poller->stop(); + } +} diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 0fd3552e19..42e17d7fb9 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -258,6 +258,8 @@ class PortsOrch : public Orch, public Subject NotificationConsumer* m_portStatusNotificationConsumer; + swss::SelectableTimer *m_port_state_poller = nullptr; + void doTask() override; void doTask(Consumer &consumer); void doPortTask(Consumer &consumer); @@ -267,6 +269,7 @@ class PortsOrch : public Orch, public Subject void doLagMemberTask(Consumer &consumer); void doTask(NotificationConsumer &consumer); + void doTask(swss::SelectableTimer &timer); void removePortFromLanesMap(string alias); void removePortFromPortListMap(sai_object_id_t port_id); @@ -299,6 +302,9 @@ class PortsOrch : public Orch, public Subject bool initPort(const string &alias, const string &role, const int index, const set &lane_set); void deInitPort(string alias, sai_object_id_t port_id); + void initPortCapAutoNeg(Port &port); + void initPortCapLinkTraining(Port &port); + bool setPortAdminStatus(Port &port, bool up); bool getPortAdminStatus(sai_object_id_t id, bool& up); bool setPortMtu(sai_object_id_t id, sai_uint32_t mtu); @@ -319,6 +325,8 @@ class PortsOrch : public Orch, public Subject bool setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value); bool setGearboxPortAttr(Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value); + bool getPortAdvSpeeds(const Port& port, bool remote, std::vector& speed_list); + bool getPortAdvSpeeds(const Port& port, bool remote, string& adv_speeds); task_process_status setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list); bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); @@ -338,12 +346,27 @@ class PortsOrch : public Orch, public Subject bool setPortFecMode(sai_object_id_t id, int fec); task_process_status setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type); task_process_status setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types); + task_process_status setPortLinkTraining(const Port& port, bool state); void updatePortOperStatus(Port &port, sai_port_oper_status_t status); bool getPortOperSpeed(const Port& port, sai_uint32_t& speed) const; void updateDbPortOperSpeed(Port &port, sai_uint32_t speed); + bool getPortLinkTrainingRxStatus(const Port &port, sai_port_link_training_rx_status_t &rx_status); + bool getPortLinkTrainingFailure(const Port &port, sai_port_link_training_failure_status_t &failure); + + typedef enum { + PORT_STATE_POLL_NONE = 0, + PORT_STATE_POLL_AN = 0x00000001, /* Auto Negotiation */ + PORT_STATE_POLL_LT = 0x00000002 /* Link Trainig */ + } port_state_poll_t; + + map m_port_state_poll; + void updatePortStatePoll(const Port &port, port_state_poll_t type, bool active); + void refreshPortStateAutoNeg(const Port &port); + void refreshPortStateLinkTraining(const Port &port); + void getPortSerdesVal(const std::string& s, std::vector &lane_values); bool getPortAdvSpeedsVal(const std::string &s, std::vector &speed_values); bool getPortInterfaceTypeVal(const std::string &s, sai_port_interface_type_t &interface_type); diff --git a/tests/test_port_an.py b/tests/test_port_an.py index dc98f43d0e..9c004aa790 100644 --- a/tests/test_port_an.py +++ b/tests/test_port_an.py @@ -293,6 +293,23 @@ def test_PortAutoNegWarm(self, dvs, testlog): # slow down crm polling dvs.crm_poll_set("10000") + def test_PortAutoNegRemoteAdvSpeeds(self, dvs, testlog): + + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + ctbl = swsscommon.Table(cdb, "PORT") + stbl = swsscommon.Table(sdb, "PORT_TABLE") + + # set autoneg = true and admin_status = up + fvs = swsscommon.FieldValuePairs([("autoneg","on"),("admin_status","up")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(10) + + (status, fvs) = stbl.get("Ethernet0") + assert status == True + assert "rmt_adv_speeds" in [fv[0] for fv in fvs] # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_port_lt.py b/tests/test_port_lt.py new file mode 100644 index 0000000000..3ec51ed68b --- /dev/null +++ b/tests/test_port_lt.py @@ -0,0 +1,139 @@ +import time +import os +import pytest + +from swsscommon import swsscommon + + +class TestPortLinkTraining(object): + def test_PortLinkTrainingForce(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + adb = dvs.get_asic_db() + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + fvs = swsscommon.FieldValuePairs([("link_training","off")]) + tbl.set("Ethernet0", fvs) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + fvs = swsscommon.FieldValuePairs([("link_training","on")]) + tbl.set("Ethernet4", fvs) + + # validate if link_training false is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_LINK_TRAINING_ENABLE":"false"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + # validate if link_training true is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet4"] + expected_fields = {"SAI_PORT_ATTR_LINK_TRAINING_ENABLE":"true"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + def test_PortLinkTrainingCold(self, dvs, testlog): + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + + # set link_training = true + fvs = swsscommon.FieldValuePairs([("link_training","on")]) + + tbl.set("Ethernet0", fvs) + + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_LINK_TRAINING_ENABLE": + assert fv[1] == "true" + + # change link_training to false + fvs = swsscommon.FieldValuePairs([("link_training","off")]) + + tbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_LINK_TRAINING_ENABLE": + assert fv[1] == "false" + + def test_PortLinkTrainingWarm(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + ctbl = swsscommon.Table(cdb, "PORT") + stbl = swsscommon.Table(sdb, "PORT_TABLE") + + # set link_training = true + fvs = swsscommon.FieldValuePairs([("link_training","on")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_AUTO_NEG_MODE": + assert fv[1] == "true" + + # set admin up + cfvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + ctbl.set("Ethernet0", cfvs) + + # enable warm restart + (exitcode, result) = dvs.runcmd("config warm_restart enable swss") + assert exitcode == 0 + + # freeze orchagent for warm restart + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + assert result == "RESTARTCHECK succeeded\n" + time.sleep(2) + + try: + # restart orchagent + # clean port state + dvs.stop_swss() + ports = stbl.getKeys() + for port in ports: + stbl._del(port) + dvs.start_swss() + time.sleep(2) + + # check ASIC DB after warm restart + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_LINK_TRAINING_ENABLE": + assert fv[1] == "true" + + finally: + # disable warm restart + dvs.runcmd("config warm_restart disable swss") + # slow down crm polling + dvs.runcmd("crm config polling interval 10000") + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass