From b133c9fb6b7b75198f87378f0cd469aa0ed833ef Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 3 Mar 2022 01:09:58 -0800 Subject: [PATCH 01/15] Support new field pfcwd_sw_enable Signed-off-by: bingwang --- orchagent/pfcwdorch.cpp | 2 +- orchagent/port.h | 4 +++- orchagent/portsorch.cpp | 46 +++++++++++++++++++++++++++++++++++++++++ orchagent/portsorch.h | 8 +++++++ orchagent/qosorch.cpp | 24 +++++++++++++++++++-- orchagent/qosorch.h | 2 ++ tests/test_pfcwd.py | 13 ++++++------ 7 files changed, 89 insertions(+), 10 deletions(-) diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index be4c1e51c4..01716ab174 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -489,7 +489,7 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, uint8_t pfcMask = 0; - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, PFC_WD_SW, &pfcMask)) { SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); return false; diff --git a/orchagent/port.h b/orchagent/port.h index 83f61e1b1c..435c2e189a 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -139,7 +139,9 @@ class Port std::vector m_queue_ids; std::vector m_priority_group_ids; sai_port_priority_flow_control_mode_t m_pfc_asym = SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED; - uint8_t m_pfc_bitmask = 0; + uint8_t m_pfc_bitmask = 0; // PFC enable bit mask + uint8_t m_pfcwd_sw_bitmask = 0; // PFC software watchdog enable + uint8_t m_pfcwd_hw_bitmask = 0; // PFC hardware watchdog enable uint16_t m_tpid = DEFAULT_TPID; uint32_t m_nat_zone_id = 0; uint32_t m_vnid = VNID_NONE; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 9eca1439ed..5b2a93e4eb 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1193,6 +1193,52 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } +bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t pfcwd_bitmask) +{ + SWSS_LOG_ENTER(); + + Port p; + + if (!getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return false; + } + if (type == PFC_WD_SW) + { + p.m_pfcwd_sw_bitmask = pfcwd_bitmask; + } + else + { + p.m_pfcwd_hw_bitmask = pfcwd_bitmask; + } + + return true; +} + +bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t *pfcwd_bitmask) +{ + SWSS_LOG_ENTER(); + + Port p; + + if (!pfcwd_bitmask || !getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return false; + } + if (type == PFC_WD_SW) + { + *pfcwd_bitmask = p.m_pfcwd_sw_bitmask; + } + else + { + *pfcwd_bitmask = p.m_pfcwd_hw_bitmask; + } + + return true; +} + bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) { SWSS_LOG_ENTER(); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 843ffbd7f1..a71fddb81c 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -72,6 +72,11 @@ struct VlanMemberUpdate bool add; }; +enum PfcWatchDogType +{ + PFC_WD_SW = 0, // PFC watchdog by software + PFC_WD_HW // PFC watchdog by hardware +}; class PortsOrch : public Orch, public Subject { public: @@ -124,6 +129,9 @@ class PortsOrch : public Orch, public Subject bool getPortPfc(sai_object_id_t portId, uint8_t *pfc_bitmask); bool setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask); + bool setPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t pfc_bitmask); + bool getPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t *pfc_bitmask); + void generateQueueMap(); void generatePriorityGroupMap(); void generatePortCounterMap(); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 5a48b69969..93a7bf11cf 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -1628,6 +1628,8 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) string op = kfvOp(tuple); sai_uint8_t pfc_enable = 0; + sai_uint8_t pfcwd_sw_enable = 0; + sai_uint8_t pfcwd_hw_enable = 0; map> update_list; for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) { @@ -1648,14 +1650,28 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) update_list[qos_to_attr_map[map_type_name]] = make_pair(map_name, id); } - if (fvField(*it) == pfc_enable_name) + else if (fvField(*it) == pfc_enable_name || fvField(*it) == pfcwd_sw_enable_name || fvField(*it) == pfcwd_hw_enable_name) { + sai_uint8_t bitmask = 0; vector queue_indexes; queue_indexes = tokenize(fvValue(*it), list_item_delimiter); for(string q_ind : queue_indexes) { sai_uint8_t q_val = (uint8_t)stoi(q_ind); - pfc_enable |= (uint8_t)(1 << q_val); + bitmask |= (uint8_t)(1 << q_val); + } + + if (fvField(*it) == pfc_enable_name) + { + pfc_enable = bitmask; + } + else if (fvField(*it) == pfcwd_sw_enable_name) + { + pfcwd_sw_enable = bitmask; + } + else + { + pfcwd_hw_enable = bitmask; } } } @@ -1708,6 +1724,10 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) SWSS_LOG_INFO("Applied PFC bits 0x%x to port %s", pfc_enable, port_name.c_str()); } + + // Save pfd_wd bitmask unconditionally + gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, PFC_WD_SW, pfcwd_sw_enable); + gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, PFC_WD_HW, pfcwd_hw_enable); } SWSS_LOG_NOTICE("Applied QoS maps to ports"); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index cd265d59ec..2b96535ea3 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -14,6 +14,8 @@ const string dot1p_to_tc_field_name = "dot1p_to_tc_map"; const string pfc_to_pg_map_name = "pfc_to_pg_map"; const string pfc_to_queue_map_name = "pfc_to_queue_map"; const string pfc_enable_name = "pfc_enable"; +const string pfcwd_sw_enable_name = "pfcwd_sw_enable"; +const string pfcwd_hw_enable_name = "pfcwd_hw_enable"; const string tc_to_pg_map_field_name = "tc_to_pg_map"; const string tc_to_queue_field_name = "tc_to_queue_map"; const string scheduler_field_name = "scheduler"; diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index 78cd851574..9a31d1dcf5 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -147,10 +147,11 @@ def _get_bitmask(self, queues): return str(mask) - def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): + def set_ports_pfc(self, status='enable', pfc_queues=[3,4], watchdog_type='software'): + keyname = 'pfcwd_sw_enable' if watchdog_type == 'software' else 'pfcwd_hw_enable' for port in self.test_ports: if 'enable' in status: - fvs = {'pfc_enable': ",".join([str(q) for q in pfc_queues])} + fvs = {keyname: ",".join([str(q) for q in pfc_queues])} self.config_db.create_entry("PORT_QOS_MAP", port, fvs) else: self.config_db.delete_entry("PORT_QOS_MAP", port) @@ -212,11 +213,11 @@ def set_storm_state(self, queues, state="enabled"): queue_name = port + ":" + str(queue) self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) - def test_pfcwd_single_queue(self, dvs, setup_teardown_test): + def test_pfcwd_software_single_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] - self.set_ports_pfc(pfc_queues=test_queues) + self.set_ports_pfc(pfc_queues=test_queues, watchdog_type='software') # verify in asic db self.verify_ports_pfc(test_queues) @@ -253,11 +254,11 @@ def test_pfcwd_single_queue(self, dvs, setup_teardown_test): self.reset_pfcwd_counters(storm_queue) self.stop_pfcwd_on_ports() - def test_pfcwd_multi_queue(self, dvs, setup_teardown_test): + def test_pfcwd_software_multi_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] - self.set_ports_pfc(pfc_queues=test_queues) + self.set_ports_pfc(pfc_queues=test_queues, watchdog_type='software') # verify in asic db self.verify_ports_pfc(test_queues) From 87edb68e5723cff4cd1e32e360b2c435fa2dcac0 Mon Sep 17 00:00:00 2001 From: bingwang Date: Mon, 7 Mar 2022 11:17:59 +0000 Subject: [PATCH 02/15] Fix UT Signed-off-by: bingwang --- orchagent/portsorch.cpp | 5 ++++- tests/test_pfcwd.py | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 5b2a93e4eb..8b8929c6fe 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1212,7 +1212,10 @@ bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType { p.m_pfcwd_hw_bitmask = pfcwd_bitmask; } - + + m_portList[p.m_alias] = p; + + SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmast=0x%x, type=%d", portId, pfcwd_bitmask, type); return true; } diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index 9a31d1dcf5..d3b7af3a14 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -151,7 +151,8 @@ def set_ports_pfc(self, status='enable', pfc_queues=[3,4], watchdog_type='softwa keyname = 'pfcwd_sw_enable' if watchdog_type == 'software' else 'pfcwd_hw_enable' for port in self.test_ports: if 'enable' in status: - fvs = {keyname: ",".join([str(q) for q in pfc_queues])} + queues = ",".join([str(q) for q in pfc_queues]) + fvs = {keyname: queues, 'pfc_enable': queues} self.config_db.create_entry("PORT_QOS_MAP", port, fvs) else: self.config_db.delete_entry("PORT_QOS_MAP", port) From 0408e945f5e6a3b5ec002f77f651e32e15d04407 Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 10 Mar 2022 19:55:12 -0800 Subject: [PATCH 03/15] Rm pfcwd_hw attributes Signed-off-by: bingwang --- orchagent/pfcwdorch.cpp | 2 +- orchagent/port.h | 1 - orchagent/portsorch.cpp | 28 ++++++++-------------------- orchagent/portsorch.h | 9 ++------- orchagent/qosorch.cpp | 12 +++--------- orchagent/qosorch.h | 1 - tests/test_pfcwd.py | 2 +- 7 files changed, 15 insertions(+), 40 deletions(-) diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index 01716ab174..16f4a47b3d 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -489,7 +489,7 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, uint8_t pfcMask = 0; - if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, PFC_WD_SW, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); return false; diff --git a/orchagent/port.h b/orchagent/port.h index 435c2e189a..db9f2b7bff 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -141,7 +141,6 @@ class Port sai_port_priority_flow_control_mode_t m_pfc_asym = SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED; uint8_t m_pfc_bitmask = 0; // PFC enable bit mask uint8_t m_pfcwd_sw_bitmask = 0; // PFC software watchdog enable - uint8_t m_pfcwd_hw_bitmask = 0; // PFC hardware watchdog enable uint16_t m_tpid = DEFAULT_TPID; uint32_t m_nat_zone_id = 0; uint32_t m_vnid = VNID_NONE; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 8b8929c6fe..bc7a8839e2 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -1193,7 +1193,7 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } -bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t pfcwd_bitmask) +bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfcwd_bitmask) { SWSS_LOG_ENTER(); @@ -1204,22 +1204,16 @@ bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); return false; } - if (type == PFC_WD_SW) - { - p.m_pfcwd_sw_bitmask = pfcwd_bitmask; - } - else - { - p.m_pfcwd_hw_bitmask = pfcwd_bitmask; - } - + + p.m_pfcwd_sw_bitmask = pfcwd_bitmask; + m_portList[p.m_alias] = p; - SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmast=0x%x, type=%d", portId, pfcwd_bitmask, type); + SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmast=0x%x", portId, pfcwd_bitmask); return true; } -bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t *pfcwd_bitmask) +bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfcwd_bitmask) { SWSS_LOG_ENTER(); @@ -1230,14 +1224,8 @@ bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); return false; } - if (type == PFC_WD_SW) - { - *pfcwd_bitmask = p.m_pfcwd_sw_bitmask; - } - else - { - *pfcwd_bitmask = p.m_pfcwd_hw_bitmask; - } + + *pfcwd_bitmask = p.m_pfcwd_sw_bitmask; return true; } diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index a71fddb81c..f770ff973a 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -72,11 +72,6 @@ struct VlanMemberUpdate bool add; }; -enum PfcWatchDogType -{ - PFC_WD_SW = 0, // PFC watchdog by software - PFC_WD_HW // PFC watchdog by hardware -}; class PortsOrch : public Orch, public Subject { public: @@ -129,8 +124,8 @@ class PortsOrch : public Orch, public Subject bool getPortPfc(sai_object_id_t portId, uint8_t *pfc_bitmask); bool setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask); - bool setPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t pfc_bitmask); - bool getPortPfcWatchdogStatus(sai_object_id_t portId, PfcWatchDogType type, uint8_t *pfc_bitmask); + bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); + bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); void generateQueueMap(); void generatePriorityGroupMap(); diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 93a7bf11cf..eec3828cb0 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -1629,7 +1629,6 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) sai_uint8_t pfc_enable = 0; sai_uint8_t pfcwd_sw_enable = 0; - sai_uint8_t pfcwd_hw_enable = 0; map> update_list; for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) { @@ -1650,7 +1649,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) update_list[qos_to_attr_map[map_type_name]] = make_pair(map_name, id); } - else if (fvField(*it) == pfc_enable_name || fvField(*it) == pfcwd_sw_enable_name || fvField(*it) == pfcwd_hw_enable_name) + else if (fvField(*it) == pfc_enable_name || fvField(*it) == pfcwd_sw_enable_name) { sai_uint8_t bitmask = 0; vector queue_indexes; @@ -1665,13 +1664,9 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) { pfc_enable = bitmask; } - else if (fvField(*it) == pfcwd_sw_enable_name) - { - pfcwd_sw_enable = bitmask; - } else { - pfcwd_hw_enable = bitmask; + pfcwd_sw_enable = bitmask; } } } @@ -1726,8 +1721,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } // Save pfd_wd bitmask unconditionally - gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, PFC_WD_SW, pfcwd_sw_enable); - gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, PFC_WD_HW, pfcwd_hw_enable); + gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, pfcwd_sw_enable); } SWSS_LOG_NOTICE("Applied QoS maps to ports"); diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index 2b96535ea3..4db54583ec 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -15,7 +15,6 @@ const string pfc_to_pg_map_name = "pfc_to_pg_map"; const string pfc_to_queue_map_name = "pfc_to_queue_map"; const string pfc_enable_name = "pfc_enable"; const string pfcwd_sw_enable_name = "pfcwd_sw_enable"; -const string pfcwd_hw_enable_name = "pfcwd_hw_enable"; const string tc_to_pg_map_field_name = "tc_to_pg_map"; const string tc_to_queue_field_name = "tc_to_queue_map"; const string scheduler_field_name = "scheduler"; diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index d3b7af3a14..a89e8133c0 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -148,7 +148,7 @@ def _get_bitmask(self, queues): return str(mask) def set_ports_pfc(self, status='enable', pfc_queues=[3,4], watchdog_type='software'): - keyname = 'pfcwd_sw_enable' if watchdog_type == 'software' else 'pfcwd_hw_enable' + keyname = 'pfcwd_sw_enable' for port in self.test_ports: if 'enable' in status: queues = ",".join([str(q) for q in pfc_queues]) From 06f2a7697d0b333c467fd48334f63c3c0819b229 Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 31 Mar 2022 02:47:44 +0000 Subject: [PATCH 04/15] Query pfcwd_sw_enable for BigRed mode Signed-off-by: bingwang --- orchagent/pfcwdorch.cpp | 8 ++++---- tests/test_pfcwd.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index 16f4a47b3d..62765ab0a1 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -399,9 +399,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } @@ -443,9 +443,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index a89e8133c0..a96ae6a5c0 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -147,7 +147,7 @@ def _get_bitmask(self, queues): return str(mask) - def set_ports_pfc(self, status='enable', pfc_queues=[3,4], watchdog_type='software'): + def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): keyname = 'pfcwd_sw_enable' for port in self.test_ports: if 'enable' in status: @@ -218,7 +218,7 @@ def test_pfcwd_software_single_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] - self.set_ports_pfc(pfc_queues=test_queues, watchdog_type='software') + self.set_ports_pfc(pfc_queues=test_queues) # verify in asic db self.verify_ports_pfc(test_queues) @@ -259,7 +259,7 @@ def test_pfcwd_software_multi_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] - self.set_ports_pfc(pfc_queues=test_queues, watchdog_type='software') + self.set_ports_pfc(pfc_queues=test_queues) # verify in asic db self.verify_ports_pfc(test_queues) From e740ee2c86f84d8fcd99d35c55eedd478536712c Mon Sep 17 00:00:00 2001 From: bingwang Date: Wed, 6 Apr 2022 03:50:52 -0700 Subject: [PATCH 05/15] Add extra lossless PG Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 134 ++++++++++++++++--------------- cfgmgr/buffermgr.h | 2 +- tests/test_buffer_traditional.py | 2 +- 3 files changed, 71 insertions(+), 67 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 0fb39a862a..813ca5b9f2 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -135,7 +135,6 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( */ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) { - vector fvVectorPg, fvVectorProfile; string cable; string speed; @@ -153,94 +152,99 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) } speed = m_speedLookup[port]; - - string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + LOSSLESS_PGS; // key format is pg_lossless___profile string buffer_profile_key = "pg_lossless_" + speed + "_" + cable + "_profile"; string profile_ref = buffer_profile_key; + + vector lossless_pgs = tokenize(LOSSLESS_PGS, ','); + for (auto lossless_pg : lossless_pgs) + { + vector fvVectorPg, fvVectorProfile; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; - m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); - if (!admin_up && m_platform == "mellanox") - { - // Remove the entry in BUFFER_PG table if any - if (!fvVectorPg.empty()) + if (!admin_up && m_platform == "mellanox") { - for (auto &prop : fvVectorPg) + // Remove the entry in BUFFER_PG table if any + if (!fvVectorPg.empty()) { - if (fvField(prop) == "profile") + for (auto &prop : fvVectorPg) { - if (fvValue(prop) == profile_ref) + if (fvField(prop) == "profile") { - SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); - m_cfgBufferPgTable.del(buffer_pg_key); - } - else - { - SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + if (fvValue(prop) == profile_ref) + { + SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); + m_cfgBufferPgTable.del(buffer_pg_key); + } + else + { + SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + } } } } - } - return task_process_status::task_success; - } - - if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) - { - SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", - port.c_str(), speed.c_str(), cable.c_str()); - return task_process_status::task_invalid_entry; - } - - // check if profile already exists - if yes - skip creation - m_cfgBufferProfileTable.get(buffer_profile_key, fvVectorProfile); - // Create record in BUFFER_PROFILE table - if (fvVectorProfile.size() == 0) - { - SWSS_LOG_NOTICE("Creating new profile '%s'", buffer_profile_key.c_str()); + continue; + } - string mode = getPgPoolMode(); - if (mode.empty()) + if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) { - // this should never happen if switch initialized properly - SWSS_LOG_INFO("PG lossless pool is not yet created"); - return task_process_status::task_need_retry; + SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", + port.c_str(), speed.c_str(), cable.c_str()); + return task_process_status::task_invalid_entry; } - // profile threshold field name - mode += "_th"; + // check if profile already exists - if yes - skip creation + m_cfgBufferProfileTable.get(buffer_profile_key, fvVectorProfile); + // Create record in BUFFER_PROFILE table + if (fvVectorProfile.size() == 0) + { + SWSS_LOG_NOTICE("Creating new profile '%s'", buffer_profile_key.c_str()); - fvVectorProfile.push_back(make_pair("pool", INGRESS_LOSSLESS_PG_POOL_NAME)); - fvVectorProfile.push_back(make_pair("xon", m_pgProfileLookup[speed][cable].xon)); - if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) { - fvVectorProfile.push_back(make_pair("xon_offset", - m_pgProfileLookup[speed][cable].xon_offset)); + string mode = getPgPoolMode(); + if (mode.empty()) + { + // this should never happen if switch initialized properly + SWSS_LOG_INFO("PG lossless pool is not yet created"); + return task_process_status::task_need_retry; + } + + // profile threshold field name + mode += "_th"; + + fvVectorProfile.push_back(make_pair("pool", INGRESS_LOSSLESS_PG_POOL_NAME)); + fvVectorProfile.push_back(make_pair("xon", m_pgProfileLookup[speed][cable].xon)); + if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) { + fvVectorProfile.push_back(make_pair("xon_offset", + m_pgProfileLookup[speed][cable].xon_offset)); + } + fvVectorProfile.push_back(make_pair("xoff", m_pgProfileLookup[speed][cable].xoff)); + fvVectorProfile.push_back(make_pair("size", m_pgProfileLookup[speed][cable].size)); + fvVectorProfile.push_back(make_pair(mode, m_pgProfileLookup[speed][cable].threshold)); + m_cfgBufferProfileTable.set(buffer_profile_key, fvVectorProfile); + } + else + { + SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); } - fvVectorProfile.push_back(make_pair("xoff", m_pgProfileLookup[speed][cable].xoff)); - fvVectorProfile.push_back(make_pair("size", m_pgProfileLookup[speed][cable].size)); - fvVectorProfile.push_back(make_pair(mode, m_pgProfileLookup[speed][cable].threshold)); - m_cfgBufferProfileTable.set(buffer_profile_key, fvVectorProfile); - } - else - { - SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); - } - /* Check if PG Mapping is already then log message and return. */ - for (auto& prop : fvVectorPg) - { - if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + /* Check if PG Mapping is already then log message and return. */ + for (auto& prop : fvVectorPg) { - SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); - return task_process_status::task_success; + if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + { + SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); + continue; + } } - } - fvVectorPg.clear(); + fvVectorPg.clear(); - fvVectorPg.push_back(make_pair("profile", profile_ref)); - m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + fvVectorPg.push_back(make_pair("profile", profile_ref)); + m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + } return task_process_status::task_success; } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index e7f04465ef..c01db699d5 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -11,7 +11,7 @@ namespace swss { #define INGRESS_LOSSLESS_PG_POOL_NAME "ingress_lossless_pool" -#define LOSSLESS_PGS "3-4" +#define LOSSLESS_PGS "2,3-4,6" #define BUFFERMGR_TIMER_PERIOD 10 diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 3defae0c80..26513168b7 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -3,7 +3,7 @@ class TestBuffer(object): - LOSSLESS_PGS = [3, 4] + LOSSLESS_PGS = [2, 3, 4, 6] INTF = "Ethernet0" def setup_db(self, dvs): From ad9146fb554c2d0816024ef2d02666fc39744622 Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 7 Apr 2022 12:26:07 +0000 Subject: [PATCH 06/15] Enable lossless PG only on pfc_enable queues Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 23 ++++++++++++++++++++--- cfgmgr/buffermgr.h | 6 ++++-- tests/test_buffer_traditional.py | 11 ++++++++--- 3 files changed, 32 insertions(+), 8 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 813ca5b9f2..f66ca2c25c 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -18,6 +18,7 @@ using namespace swss; BufferMgr::BufferMgr(DBConnector *cfgDb, DBConnector *applDb, string pg_lookup_file, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgPortTable(cfgDb, CFG_PORT_TABLE_NAME), + m_cfgPortQosMapTable(cfgDb, CFG_PORT_QOS_MAP_TABLE_NAME), m_cfgCableLenTable(cfgDb, CFG_PORT_CABLE_LEN_TABLE_NAME), m_cfgBufferProfileTable(cfgDb, CFG_BUFFER_PROFILE_TABLE_NAME), m_cfgBufferPgTable(cfgDb, CFG_BUFFER_PG_TABLE_NAME), @@ -133,7 +134,7 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } } */ -task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) +task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up, const string& pfc_enable) { string cable; string speed; @@ -156,7 +157,7 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) string buffer_profile_key = "pg_lossless_" + speed + "_" + cable + "_profile"; string profile_ref = buffer_profile_key; - vector lossless_pgs = tokenize(LOSSLESS_PGS, ','); + vector lossless_pgs = tokenize(pfc_enable, ','); for (auto lossless_pg : lossless_pgs) { vector fvVectorPg, fvVectorProfile; @@ -350,6 +351,21 @@ void BufferMgr::doBufferMetaTask(Consumer &consumer) } } +/* +Read pfc_enable entry from PORT_QOS_MAP to decide on which queue to +apply lossless PG. +Return the hardcoded value '3-4' if failed to get that value +*/ +string BufferMgr::getPfcEnableQueuesForPort(std::string port) +{ + string pfc_enable; + if (!m_cfgPortQosMapTable.hget(port, "pfc_enable", pfc_enable)) + { + return LOSSLESS_PGS; + } + return pfc_enable; +} + void BufferMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -442,7 +458,8 @@ void BufferMgr::doTask(Consumer &consumer) if (m_speedLookup.count(port) != 0) { // create/update profile for port - task_status = doSpeedUpdateTask(port, admin_up); + string lossless_pgs = getPfcEnableQueuesForPort(port); + task_status = doSpeedUpdateTask(port, admin_up, lossless_pgs); } if (task_status != task_process_status::task_success) diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index c01db699d5..877d459c47 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -11,7 +11,7 @@ namespace swss { #define INGRESS_LOSSLESS_PG_POOL_NAME "ingress_lossless_pool" -#define LOSSLESS_PGS "2,3-4,6" +#define LOSSLESS_PGS "3-4" #define BUFFERMGR_TIMER_PERIOD 10 @@ -43,6 +43,7 @@ class BufferMgr : public Orch Table m_cfgBufferProfileTable; Table m_cfgBufferPgTable; Table m_cfgLosslessPgPoolTable; + Table m_cfgPortQosMapTable; ProducerStateTable m_applBufferProfileTable; ProducerStateTable m_applBufferPgTable; @@ -60,13 +61,14 @@ class BufferMgr : public Orch std::string getPgPoolMode(); void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); - task_process_status doSpeedUpdateTask(std::string port, bool admin_up); + task_process_status doSpeedUpdateTask(std::string port, bool admin_up, const std::string &pfc_enable); void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); void doTask(Consumer &consumer); void doBufferMetaTask(Consumer &consumer); + std::string getPfcEnableQueuesForPort(std::string port); }; } diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 26513168b7..8822601cb6 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -3,7 +3,7 @@ class TestBuffer(object): - LOSSLESS_PGS = [2, 3, 4, 6] + lossless_pgs = [] INTF = "Ethernet0" def setup_db(self, dvs): @@ -14,6 +14,10 @@ def setup_db(self, dvs): # enable PG watermark self.set_pg_wm_status('enable') + + def get_pfc_enable_queues(self): + qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) + self.lossless_pgs = qos_map['pfc_enable'].split(',') def get_pg_oid(self, pg): fvs = dict() @@ -61,7 +65,7 @@ def setup_teardown_test(self, dvs): try: self.setup_db(dvs) pg_name_map = dict() - for pg in self.LOSSLESS_PGS: + for pg in self.lossless_pgs: pg_name = "{}:{}".format(self.INTF, pg) pg_name_map[pg_name] = self.get_pg_oid(pg_name) yield pg_name_map @@ -119,7 +123,8 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) # buffer pgs should still point to the original buffer profile - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":3-4", {"profile": orig_lossless_profile}) + for pg in self.lossless_pgs: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) fvs = dict() for pg in self.pg_name_map: fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] From 446c55b13def20fc1308d13663c21d9927cf06df Mon Sep 17 00:00:00 2001 From: bingwang Date: Sun, 17 Apr 2022 23:10:12 -0700 Subject: [PATCH 07/15] Retry if PORT_QOS_TABLE not available Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 65 +++++++++++++++++++++++++++++++++++++------ cfgmgr/buffermgr.h | 8 ++++-- cfgmgr/buffermgrd.cpp | 3 +- 3 files changed, 64 insertions(+), 12 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index f66ca2c25c..9dd041035e 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -18,7 +18,6 @@ using namespace swss; BufferMgr::BufferMgr(DBConnector *cfgDb, DBConnector *applDb, string pg_lookup_file, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgPortTable(cfgDb, CFG_PORT_TABLE_NAME), - m_cfgPortQosMapTable(cfgDb, CFG_PORT_QOS_MAP_TABLE_NAME), m_cfgCableLenTable(cfgDb, CFG_PORT_CABLE_LEN_TABLE_NAME), m_cfgBufferProfileTable(cfgDb, CFG_BUFFER_PROFILE_TABLE_NAME), m_cfgBufferPgTable(cfgDb, CFG_BUFFER_PG_TABLE_NAME), @@ -351,19 +350,52 @@ void BufferMgr::doBufferMetaTask(Consumer &consumer) } } +/* +Parse PORT_QOS_MAP to retrieve on which queue PFC is enable, and +cached in a map +*/ +void BufferMgr::doPortQosTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + string port_name = kfvKey(tuple); + string op = kfvOp(t); + if (op == SET_COMMAND) + { + for (auto itp : kfvFieldsValues(tuple)) + { + if (fvField(itp) == "pfc_enable") + { + m_portPfcStatus[port_name] = fvValue(itp); + SWSS_LOG_INFO("Got pfc status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); + break; + } + } + } + it = consumer.m_toSync.erase(it); + } + +} + /* Read pfc_enable entry from PORT_QOS_MAP to decide on which queue to apply lossless PG. -Return the hardcoded value '3-4' if failed to get that value +Return the true if PORT_QOS_MAP is already available, otherwise return false */ -string BufferMgr::getPfcEnableQueuesForPort(std::string port) +bool BufferMgr::getPfcEnableQueuesForPort(std::string port, std::string &pfc_status) { - string pfc_enable; - if (!m_cfgPortQosMapTable.hget(port, "pfc_enable", pfc_enable)) + auto iter = m_portPfcStatus.find(port); + if (iter != m_portPfcStatus.end()) { - return LOSSLESS_PGS; + pfc_status = iter->second; + return true; } - return pfc_enable; + + return false; } void BufferMgr::doTask(Consumer &consumer) @@ -419,6 +451,12 @@ void BufferMgr::doTask(Consumer &consumer) return; } + if (table_name == CFG_PORT_QOS_MAP_TABLE_NAME) + { + doPortQosTableTask(consumer); + return; + } + auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -458,8 +496,17 @@ void BufferMgr::doTask(Consumer &consumer) if (m_speedLookup.count(port) != 0) { // create/update profile for port - string lossless_pgs = getPfcEnableQueuesForPort(port); - task_status = doSpeedUpdateTask(port, admin_up, lossless_pgs); + string pfc_enable_status; + if (!getPfcEnableQueuesForPort(port, pfc_enable_status)) + { + // PORT_QOS_MAP is not ready yet. retry is required + SWSS_LOG_NOTICE("pfc_enable status is not available for port %s", port.c_str()); + task_status = task_process_status::task_need_retry; + } + else + { + task_status = doSpeedUpdateTask(port, admin_up, lossless_pgs); + } } if (task_status != task_process_status::task_success) diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index 877d459c47..c9e361cd89 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -28,6 +28,7 @@ typedef std::map pg_profile_lookup_t; typedef std::map port_cable_length_t; typedef std::map port_speed_t; +typedef std::map port_pfc_status_t; class BufferMgr : public Orch { @@ -43,7 +44,6 @@ class BufferMgr : public Orch Table m_cfgBufferProfileTable; Table m_cfgBufferPgTable; Table m_cfgLosslessPgPoolTable; - Table m_cfgPortQosMapTable; ProducerStateTable m_applBufferProfileTable; ProducerStateTable m_applBufferPgTable; @@ -62,12 +62,16 @@ class BufferMgr : public Orch void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); task_process_status doSpeedUpdateTask(std::string port, bool admin_up, const std::string &pfc_enable); - void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); + void doBufferTableTask(Consumer &consumer); void transformSeperator(std::string &name); void doTask(Consumer &consumer); void doBufferMetaTask(Consumer &consumer); + + port_pfc_status_t m_portPfcStatus; + void doPortQosTableTask(Consumer &consumer); + std::string getPfcEnableQueuesForPort(std::string port); }; diff --git a/cfgmgr/buffermgrd.cpp b/cfgmgr/buffermgrd.cpp index 05932a9e3c..eb5de60b65 100644 --- a/cfgmgr/buffermgrd.cpp +++ b/cfgmgr/buffermgrd.cpp @@ -215,7 +215,8 @@ int main(int argc, char **argv) CFG_BUFFER_QUEUE_TABLE_NAME, CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, - CFG_DEVICE_METADATA_TABLE_NAME + CFG_DEVICE_METADATA_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME }; cfgOrchList.emplace_back(new BufferMgr(&cfgDb, &applDb, pg_lookup_file, cfg_buffer_tables)); } From fc484c85d34b4f7155100eb502492f37d7bc8a51 Mon Sep 17 00:00:00 2001 From: bingwang Date: Mon, 18 Apr 2022 07:54:05 +0000 Subject: [PATCH 08/15] Update vstest Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 8 ++++---- cfgmgr/buffermgr.h | 4 ++-- tests/test_buffer_traditional.py | 10 +++++++++- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 9dd041035e..ce16f60dbe 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -363,7 +363,7 @@ void BufferMgr::doPortQosTableTask(Consumer &consumer) { KeyOpFieldsValuesTuple tuple = it->second; string port_name = kfvKey(tuple); - string op = kfvOp(t); + string op = kfvOp(tuple); if (op == SET_COMMAND) { for (auto itp : kfvFieldsValues(tuple)) @@ -371,7 +371,7 @@ void BufferMgr::doPortQosTableTask(Consumer &consumer) if (fvField(itp) == "pfc_enable") { m_portPfcStatus[port_name] = fvValue(itp); - SWSS_LOG_INFO("Got pfc status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); + SWSS_LOG_INFO("Got pfc enable status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); break; } } @@ -500,12 +500,12 @@ void BufferMgr::doTask(Consumer &consumer) if (!getPfcEnableQueuesForPort(port, pfc_enable_status)) { // PORT_QOS_MAP is not ready yet. retry is required - SWSS_LOG_NOTICE("pfc_enable status is not available for port %s", port.c_str()); + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); task_status = task_process_status::task_need_retry; } else { - task_status = doSpeedUpdateTask(port, admin_up, lossless_pgs); + task_status = doSpeedUpdateTask(port, admin_up, pfc_enable_status); } } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index c9e361cd89..d5e677e251 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -62,7 +62,7 @@ class BufferMgr : public Orch void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); task_process_status doSpeedUpdateTask(std::string port, bool admin_up, const std::string &pfc_enable); - void doBufferTableTask(Consumer &consumer); + void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); @@ -72,7 +72,7 @@ class BufferMgr : public Orch port_pfc_status_t m_portPfcStatus; void doPortQosTableTask(Consumer &consumer); - std::string getPfcEnableQueuesForPort(std::string port); + bool getPfcEnableQueuesForPort(std::string port, std::string &pfc_enable_status); }; } diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 52368eeb87..e15c1cd3f9 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -60,10 +60,18 @@ def change_cable_len(self, cable_len): fvs[self.INTF] = cable_len self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs) + def set_port_qos_table(self): + pfc_enable = "2,3,4,6" + fvs=dict() + fvs['pfc_enable'] = pfc_enable + self.config_db.update_entry("PORT_QOS_MAP", self.INTF, fvs) + @pytest.fixture def setup_teardown_test(self, dvs): try: self.setup_db(dvs) + self.set_port_qos_table() + self.get_pfc_enable_queues() pg_name_map = dict() for pg in self.lossless_pgs: pg_name = "{}:{}".format(self.INTF, pg) @@ -121,7 +129,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): test_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, test_cable_len) # buffer profile should not get created self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) - + # buffer pgs should still point to the original buffer profile for pg in self.lossless_pgs: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) From a5d902f363dc73c2a1efb0e487a2f86dfec20dcf Mon Sep 17 00:00:00 2001 From: bingwang Date: Thu, 21 Apr 2022 08:30:27 +0000 Subject: [PATCH 09/15] Address comments Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 147 +++++++++++++++++++++++-------- cfgmgr/buffermgr.h | 8 +- tests/test_buffer_traditional.py | 49 ++++++++--- 3 files changed, 152 insertions(+), 52 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index ce16f60dbe..cd1324b12e 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -133,10 +133,12 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } } */ -task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up, const string& pfc_enable) +task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) { string cable; string speed; + string pfc_enable; + string applied_lossless_pg_status; if (m_cableLenLookup.count(port) == 0) { @@ -151,22 +153,30 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up, con return task_process_status::task_success; } + if (m_portPfcStatus.count(port) == 0) + { + // PORT_QOS_MAP is not ready yet. retry is required + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_need_retry; + } + pfc_enable = m_portPfcStatus[port]; + speed = m_speedLookup[port]; // key format is pg_lossless___profile string buffer_profile_key = "pg_lossless_" + speed + "_" + cable + "_profile"; string profile_ref = buffer_profile_key; vector lossless_pgs = tokenize(pfc_enable, ','); - for (auto lossless_pg : lossless_pgs) - { - vector fvVectorPg, fvVectorProfile; - string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; - - m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); - if (!admin_up && m_platform == "mellanox") + if (!admin_up && m_platform == "mellanox") + { + for (auto lossless_pg : lossless_pgs) { // Remove the entry in BUFFER_PG table if any + vector fvVectorPg; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); if (!fvVectorPg.empty()) { for (auto &prop : fvVectorPg) @@ -185,9 +195,26 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up, con } } } - - continue; } + // Clear applied recorded + if (m_portLosslessPgs.count(port) != 0) + { + m_portLosslessPgs.erase(port); + }; + // Clear recorded pg for admin_down port + if (m_portPgLookup.count(port) != 0) + { + m_portPgLookup.erase(port); + } + return task_process_status::task_success; + } + + for (auto lossless_pg : lossless_pgs) + { + vector fvVectorPg, fvVectorProfile; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) { @@ -244,10 +271,78 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up, con fvVectorPg.push_back(make_pair("profile", profile_ref)); m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + // Save the applied lossless PG + m_portLosslessPgs[port] = pfc_enable; + // Save the profile_ref + m_portPgLookup[port] = profile_ref; } return task_process_status::task_success; } +void BufferMgr::updatePortLosslessPg(const string &port) +{ + string applied_lossless_pg_status; + // Read the lossless profile that has been applied to port + auto iter = m_portLosslessPgs.find(port); + if (iter == m_portLosslessPgs.end()) + { + // No lossless pg is applied to port yet, no need to update + return; + } + applied_lossless_pg_status = iter->second; + + string new_pfc_enable_flag = m_portPfcStatus[port]; + + vector new_lossless_pgs = tokenize(new_pfc_enable_flag, ','); + std::sort(new_lossless_pgs.begin(), new_lossless_pgs.end()); + + vector applied_lossless_pgs = tokenize(applied_lossless_pg_status, ','); + std::sort(applied_lossless_pgs.begin(), applied_lossless_pgs.end()); + + // Compare the sorted items, and update if diff is found + if (new_lossless_pgs == applied_lossless_pgs) + { + // No diff detected + return; + } + + iter = m_portPgLookup.find(port); + if (iter == m_portPgLookup.end()) + { + // Ignore if PG profile is not generated for the port + return; + } + string profile_ref = m_portPgLookup[port]; + string buffer_pg_key; + vector fvVectorPg; + // Remove the already applied BUFFER_PG + for (auto pg : applied_lossless_pgs) + { + buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + pg; + fvVectorPg.clear(); + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + + // Remove the entry in BUFFER_PG table if any + if (!fvVectorPg.empty()) + { + m_cfgBufferPgTable.del(buffer_pg_key); + } + SWSS_LOG_INFO("Removed stale PG profile for port %s PG %s", port.c_str(), pg.c_str()); + } + + // Apply PG profile to new lossless pgs + for (auto pg : new_lossless_pgs) + { + buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + pg; + fvVectorPg.clear(); + fvVectorPg.push_back(make_pair("profile", profile_ref)); + m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + SWSS_LOG_INFO("Applied PG profile for port %s PG %s", port.c_str(), pg.c_str()); + } + // Record the PGs + m_portLosslessPgs[port] = new_pfc_enable_flag; +} + void BufferMgr::transformSeperator(string &name) { size_t pos; @@ -372,6 +467,8 @@ void BufferMgr::doPortQosTableTask(Consumer &consumer) { m_portPfcStatus[port_name] = fvValue(itp); SWSS_LOG_INFO("Got pfc enable status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); + // Check if pfc_enable flag is updated + updatePortLosslessPg(port_name); break; } } @@ -381,23 +478,6 @@ void BufferMgr::doPortQosTableTask(Consumer &consumer) } -/* -Read pfc_enable entry from PORT_QOS_MAP to decide on which queue to -apply lossless PG. -Return the true if PORT_QOS_MAP is already available, otherwise return false -*/ -bool BufferMgr::getPfcEnableQueuesForPort(std::string port, std::string &pfc_status) -{ - auto iter = m_portPfcStatus.find(port); - if (iter != m_portPfcStatus.end()) - { - pfc_status = iter->second; - return true; - } - - return false; -} - void BufferMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -495,18 +575,7 @@ void BufferMgr::doTask(Consumer &consumer) if (m_speedLookup.count(port) != 0) { - // create/update profile for port - string pfc_enable_status; - if (!getPfcEnableQueuesForPort(port, pfc_enable_status)) - { - // PORT_QOS_MAP is not ready yet. retry is required - SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); - task_status = task_process_status::task_need_retry; - } - else - { - task_status = doSpeedUpdateTask(port, admin_up, pfc_enable_status); - } + task_status = doSpeedUpdateTask(port, admin_up); } if (task_status != task_process_status::task_success) diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index d5e677e251..5dbea52b80 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -29,6 +29,7 @@ typedef std::map pg_profile_lookup_t; typedef std::map port_cable_length_t; typedef std::map port_speed_t; typedef std::map port_pfc_status_t; +typedef std::map port_pg_profile_t; class BufferMgr : public Orch { @@ -61,7 +62,7 @@ class BufferMgr : public Orch std::string getPgPoolMode(); void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); - task_process_status doSpeedUpdateTask(std::string port, bool admin_up, const std::string &pfc_enable); + task_process_status doSpeedUpdateTask(std::string port, bool admin_up); void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); @@ -70,9 +71,12 @@ class BufferMgr : public Orch void doBufferMetaTask(Consumer &consumer); port_pfc_status_t m_portPfcStatus; + port_pfc_status_t m_portLosslessPgs; void doPortQosTableTask(Consumer &consumer); - bool getPfcEnableQueuesForPort(std::string port, std::string &pfc_enable_status); + port_pg_profile_t m_portPgLookup; + void updatePortLosslessPg(const std::string &port_name); + }; } diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index e15c1cd3f9..68c045c0b6 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -17,7 +17,7 @@ def setup_db(self, dvs): def get_pfc_enable_queues(self): qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) - self.lossless_pgs = qos_map['pfc_enable'].split(',') + return qos_map['pfc_enable'].split(',') def get_pg_oid(self, pg): fvs = dict() @@ -60,22 +60,25 @@ def change_cable_len(self, cable_len): fvs[self.INTF] = cable_len self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs) - def set_port_qos_table(self): - pfc_enable = "2,3,4,6" + def set_port_qos_table(self, pfc_enable_flag): fvs=dict() - fvs['pfc_enable'] = pfc_enable + fvs['pfc_enable'] = pfc_enable_flag self.config_db.update_entry("PORT_QOS_MAP", self.INTF, fvs) - + self.lossless_pgs = pfc_enable_flag.split(',') + + def get_pg_name_map(self): + pg_name_map = dict() + for pg in self.lossless_pgs: + pg_name = "{}:{}".format(self.INTF, pg) + pg_name_map[pg_name] = self.get_pg_oid(pg_name) + return pg_name_map + @pytest.fixture def setup_teardown_test(self, dvs): try: self.setup_db(dvs) - self.set_port_qos_table() - self.get_pfc_enable_queues() - pg_name_map = dict() - for pg in self.lossless_pgs: - pg_name = "{}:{}".format(self.INTF, pg) - pg_name_map[pg_name] = self.get_pg_oid(pg_name) + self.set_port_qos_table('2,3,4,6') + pg_name_map = self.get_pg_name_map() yield pg_name_map finally: self.teardown() @@ -138,6 +141,30 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs) + # Verify pgs are updated if pfc_enable is updated + orig_lossless_pgs = self.lossless_pgs + orig_pg_name_map = self.pg_name_map + + self.set_port_qos_table('0,2,3,7') + self.pg_name_map = self.get_pg_name_map() + time.sleep(1) + for pg in orig_lossless_pgs: + if pg not in self.lossless_pgs: + assert len(self.app_db.get_entry("BUFFER_PG_TABLE", self.INTF + ":" + pg)) == 0 + for pg in orig_pg_name_map: + if pg not in self.pg_name_map: + fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = "oid:0x0" + self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", orig_pg_name_map[pg], fvs) + + # Update buf_pg_profile + self.get_asic_buf_pg_profiles() + + for pg in self.lossless_pgs: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) + for pg in self.pg_name_map: + fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] + self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs) + # change cable length to 'cable_len_before_test' self.change_cable_len(cable_len_before_test) From 956eeff28dfae32708c97ec779a262891a02be1a Mon Sep 17 00:00:00 2001 From: bingwang Date: Fri, 22 Apr 2022 09:33:22 +0000 Subject: [PATCH 10/15] Remove pfc_enable update support Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 149 ++++++++----------------------- cfgmgr/buffermgr.h | 5 -- tests/test_buffer_traditional.py | 89 ++++++++++++------ 3 files changed, 98 insertions(+), 145 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index cd1324b12e..1a7e36bd5d 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -196,22 +196,49 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) } } } - // Clear applied recorded - if (m_portLosslessPgs.count(port) != 0) + + return task_process_status::task_success; + } + + vector fvVectorProfile; + // check if profile already exists - if yes - skip creation + m_cfgBufferProfileTable.get(buffer_profile_key, fvVectorProfile); + // Create record in BUFFER_PROFILE table + if (fvVectorProfile.size() == 0) + { + SWSS_LOG_NOTICE("Creating new profile '%s'", buffer_profile_key.c_str()); + + string mode = getPgPoolMode(); + if (mode.empty()) { - m_portLosslessPgs.erase(port); - }; - // Clear recorded pg for admin_down port - if (m_portPgLookup.count(port) != 0) + // this should never happen if switch initialized properly + SWSS_LOG_INFO("PG lossless pool is not yet created"); + return task_process_status::task_need_retry; + } + + // profile threshold field name + mode += "_th"; + + fvVectorProfile.push_back(make_pair("pool", INGRESS_LOSSLESS_PG_POOL_NAME)); + fvVectorProfile.push_back(make_pair("xon", m_pgProfileLookup[speed][cable].xon)); + if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) { - m_portPgLookup.erase(port); + fvVectorProfile.push_back(make_pair("xon_offset", + m_pgProfileLookup[speed][cable].xon_offset)); } - return task_process_status::task_success; + fvVectorProfile.push_back(make_pair("xoff", m_pgProfileLookup[speed][cable].xoff)); + fvVectorProfile.push_back(make_pair("size", m_pgProfileLookup[speed][cable].size)); + fvVectorProfile.push_back(make_pair(mode, m_pgProfileLookup[speed][cable].threshold)); + m_cfgBufferProfileTable.set(buffer_profile_key, fvVectorProfile); + } + else + { + SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); } for (auto lossless_pg : lossless_pgs) { - vector fvVectorPg, fvVectorProfile; + vector fvVectorPg; string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); @@ -223,40 +250,6 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_invalid_entry; } - // check if profile already exists - if yes - skip creation - m_cfgBufferProfileTable.get(buffer_profile_key, fvVectorProfile); - // Create record in BUFFER_PROFILE table - if (fvVectorProfile.size() == 0) - { - SWSS_LOG_NOTICE("Creating new profile '%s'", buffer_profile_key.c_str()); - - string mode = getPgPoolMode(); - if (mode.empty()) - { - // this should never happen if switch initialized properly - SWSS_LOG_INFO("PG lossless pool is not yet created"); - return task_process_status::task_need_retry; - } - - // profile threshold field name - mode += "_th"; - - fvVectorProfile.push_back(make_pair("pool", INGRESS_LOSSLESS_PG_POOL_NAME)); - fvVectorProfile.push_back(make_pair("xon", m_pgProfileLookup[speed][cable].xon)); - if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) { - fvVectorProfile.push_back(make_pair("xon_offset", - m_pgProfileLookup[speed][cable].xon_offset)); - } - fvVectorProfile.push_back(make_pair("xoff", m_pgProfileLookup[speed][cable].xoff)); - fvVectorProfile.push_back(make_pair("size", m_pgProfileLookup[speed][cable].size)); - fvVectorProfile.push_back(make_pair(mode, m_pgProfileLookup[speed][cable].threshold)); - m_cfgBufferProfileTable.set(buffer_profile_key, fvVectorProfile); - } - else - { - SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); - } - /* Check if PG Mapping is already then log message and return. */ for (auto& prop : fvVectorPg) { @@ -271,78 +264,10 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) fvVectorPg.push_back(make_pair("profile", profile_ref)); m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); - // Save the applied lossless PG - m_portLosslessPgs[port] = pfc_enable; - // Save the profile_ref - m_portPgLookup[port] = profile_ref; } return task_process_status::task_success; } -void BufferMgr::updatePortLosslessPg(const string &port) -{ - string applied_lossless_pg_status; - // Read the lossless profile that has been applied to port - auto iter = m_portLosslessPgs.find(port); - if (iter == m_portLosslessPgs.end()) - { - // No lossless pg is applied to port yet, no need to update - return; - } - applied_lossless_pg_status = iter->second; - - string new_pfc_enable_flag = m_portPfcStatus[port]; - - vector new_lossless_pgs = tokenize(new_pfc_enable_flag, ','); - std::sort(new_lossless_pgs.begin(), new_lossless_pgs.end()); - - vector applied_lossless_pgs = tokenize(applied_lossless_pg_status, ','); - std::sort(applied_lossless_pgs.begin(), applied_lossless_pgs.end()); - - // Compare the sorted items, and update if diff is found - if (new_lossless_pgs == applied_lossless_pgs) - { - // No diff detected - return; - } - - iter = m_portPgLookup.find(port); - if (iter == m_portPgLookup.end()) - { - // Ignore if PG profile is not generated for the port - return; - } - string profile_ref = m_portPgLookup[port]; - string buffer_pg_key; - vector fvVectorPg; - // Remove the already applied BUFFER_PG - for (auto pg : applied_lossless_pgs) - { - buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + pg; - fvVectorPg.clear(); - m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); - - // Remove the entry in BUFFER_PG table if any - if (!fvVectorPg.empty()) - { - m_cfgBufferPgTable.del(buffer_pg_key); - } - SWSS_LOG_INFO("Removed stale PG profile for port %s PG %s", port.c_str(), pg.c_str()); - } - - // Apply PG profile to new lossless pgs - for (auto pg : new_lossless_pgs) - { - buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + pg; - fvVectorPg.clear(); - fvVectorPg.push_back(make_pair("profile", profile_ref)); - m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); - SWSS_LOG_INFO("Applied PG profile for port %s PG %s", port.c_str(), pg.c_str()); - } - // Record the PGs - m_portLosslessPgs[port] = new_pfc_enable_flag; -} - void BufferMgr::transformSeperator(string &name) { size_t pos; @@ -467,8 +392,6 @@ void BufferMgr::doPortQosTableTask(Consumer &consumer) { m_portPfcStatus[port_name] = fvValue(itp); SWSS_LOG_INFO("Got pfc enable status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); - // Check if pfc_enable flag is updated - updatePortLosslessPg(port_name); break; } } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index 5dbea52b80..b30d3c2e87 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -11,7 +11,6 @@ namespace swss { #define INGRESS_LOSSLESS_PG_POOL_NAME "ingress_lossless_pool" -#define LOSSLESS_PGS "3-4" #define BUFFERMGR_TIMER_PERIOD 10 @@ -71,12 +70,8 @@ class BufferMgr : public Orch void doBufferMetaTask(Consumer &consumer); port_pfc_status_t m_portPfcStatus; - port_pfc_status_t m_portLosslessPgs; void doPortQosTableTask(Consumer &consumer); - port_pg_profile_t m_portPgLookup; - void updatePortLosslessPg(const std::string &port_name); - }; } diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 68c045c0b6..05bbfa7780 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -14,7 +14,7 @@ def setup_db(self, dvs): # enable PG watermark self.set_pg_wm_status('enable') - + def get_pfc_enable_queues(self): qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) return qos_map['pfc_enable'].split(',') @@ -72,7 +72,7 @@ def get_pg_name_map(self): pg_name = "{}:{}".format(self.INTF, pg) pg_name_map[pg_name] = self.get_pg_oid(pg_name) return pg_name_map - + @pytest.fixture def setup_teardown_test(self, dvs): try: @@ -132,7 +132,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): test_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, test_cable_len) # buffer profile should not get created self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) - + # buffer pgs should still point to the original buffer profile for pg in self.lossless_pgs: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) @@ -141,30 +141,6 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs) - # Verify pgs are updated if pfc_enable is updated - orig_lossless_pgs = self.lossless_pgs - orig_pg_name_map = self.pg_name_map - - self.set_port_qos_table('0,2,3,7') - self.pg_name_map = self.get_pg_name_map() - time.sleep(1) - for pg in orig_lossless_pgs: - if pg not in self.lossless_pgs: - assert len(self.app_db.get_entry("BUFFER_PG_TABLE", self.INTF + ":" + pg)) == 0 - for pg in orig_pg_name_map: - if pg not in self.pg_name_map: - fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = "oid:0x0" - self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", orig_pg_name_map[pg], fvs) - - # Update buf_pg_profile - self.get_asic_buf_pg_profiles() - - for pg in self.lossless_pgs: - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) - for pg in self.pg_name_map: - fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] - self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs) - # change cable length to 'cable_len_before_test' self.change_cable_len(cable_len_before_test) @@ -192,3 +168,62 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): if orig_speed: dvs.port_field_set(self.INTF, "speed", orig_speed) dvs.port_admin_set(self.INTF, "down") + + # To verify the BUFFER_PG is not hardcoded to 3,4 + # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue + def test_buffer_pg_update(self, dvs, setup_teardown_test): + self.pg_name_map = setup_teardown_test + orig_cable_len = None + orig_speed = None + try: + # Retrieve cable len + fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + orig_cable_len = fvs_cable_len[self.INTF] + if orig_cable_len == "0m": + fvs_cable_len[self.INTF] = "300m" + cable_len_before_test = "300m" + self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs_cable_len) + else: + cable_len_before_test = orig_cable_len + + dvs.port_admin_set(self.INTF, "up") + + # Retrieve port speed + fvs_port = self.config_db.get_entry("PORT", self.INTF) + orig_speed = fvs_port["speed"] + + # Make sure the buffer PG has been created + orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.orig_profiles = self.get_asic_buf_profile() + + # get the orig buf profiles attached to the pgs + self.get_asic_buf_pg_profiles() + + # Update port speed + if orig_speed == "100000": + test_speed = "40000" + elif orig_speed == "40000": + test_speed = "100000" + # change intf speed to 'test_speed' + dvs.port_field_set(self.INTF, "speed", test_speed) + + # Verify new profile is generated + new_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, cable_len_before_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) + + # Verify BUFFER_PG is updated + for pg in self.lossless_pgs: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": new_lossless_profile}) + + fvs_negative = {} + for pg in self.pg_name_map: + # verify that buffer pgs do not point to the old profile since we cannot deduce the new profile oid + fvs_negative["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] + self.asic_db.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs_negative) + finally: + if orig_cable_len: + self.change_cable_len(orig_cable_len) + if orig_speed: + dvs.port_field_set(self.INTF, "speed", orig_speed) + dvs.port_admin_set(self.INTF, "down") From 045334f488a6a03f056c22b08a1f64991374101e Mon Sep 17 00:00:00 2001 From: bingwang Date: Fri, 22 Apr 2022 09:59:58 +0000 Subject: [PATCH 11/15] Move check for speed out of for loop Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 1a7e36bd5d..18e92f10c4 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -199,6 +199,13 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } + + if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) + { + SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", + port.c_str(), speed.c_str(), cable.c_str()); + return task_process_status::task_invalid_entry; + } vector fvVectorProfile; // check if profile already exists - if yes - skip creation @@ -243,13 +250,6 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); - if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) - { - SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", - port.c_str(), speed.c_str(), cable.c_str()); - return task_process_status::task_invalid_entry; - } - /* Check if PG Mapping is already then log message and return. */ for (auto& prop : fvVectorPg) { From 05a55b692b517cae0381eca723499779c6165cff Mon Sep 17 00:00:00 2001 From: bingwang Date: Sun, 24 Apr 2022 02:11:34 +0000 Subject: [PATCH 12/15] Remove unused variable Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 1 - cfgmgr/buffermgr.h | 1 - 2 files changed, 2 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 18e92f10c4..5d4eb2b561 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -138,7 +138,6 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) string cable; string speed; string pfc_enable; - string applied_lossless_pg_status; if (m_cableLenLookup.count(port) == 0) { diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index b30d3c2e87..3a619d3804 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -28,7 +28,6 @@ typedef std::map pg_profile_lookup_t; typedef std::map port_cable_length_t; typedef std::map port_speed_t; typedef std::map port_pfc_status_t; -typedef std::map port_pg_profile_t; class BufferMgr : public Orch { From e3aa60f8e04f5b7571ada4d21dbd62096d7e5e65 Mon Sep 17 00:00:00 2001 From: bingwang Date: Sun, 24 Apr 2022 03:46:13 +0000 Subject: [PATCH 13/15] Continue loop other port if not succeed Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 5d4eb2b561..96c4c1395f 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -497,32 +497,28 @@ void BufferMgr::doTask(Consumer &consumer) if (m_speedLookup.count(port) != 0) { + // create/update profile for port task_status = doSpeedUpdateTask(port, admin_up); } + } - if (task_status != task_process_status::task_success) - { + switch (task_status) + { + case task_process_status::task_failed: + SWSS_LOG_ERROR("Failed to process table update"); + return; + case task_process_status::task_need_retry: + SWSS_LOG_INFO("Unable to process table update. Will retry..."); + ++it; + break; + case task_process_status::task_invalid_entry: + SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); + it = consumer.m_toSync.erase(it); + break; + default: + it = consumer.m_toSync.erase(it); break; - } } } - - switch (task_status) - { - case task_process_status::task_failed: - SWSS_LOG_ERROR("Failed to process table update"); - return; - case task_process_status::task_need_retry: - SWSS_LOG_INFO("Unable to process table update. Will retry..."); - ++it; - break; - case task_process_status::task_invalid_entry: - SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); - it = consumer.m_toSync.erase(it); - break; - default: - it = consumer.m_toSync.erase(it); - break; - } } } From 3a89428fdd4361265b2896e6f85c8cfc460f1cee Mon Sep 17 00:00:00 2001 From: bingwang Date: Sun, 24 Apr 2022 04:38:24 +0000 Subject: [PATCH 14/15] Update vs test Signed-off-by: bingwang --- tests/test_buffer_traditional.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 05bbfa7780..fab1db6af6 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -55,9 +55,11 @@ def get_asic_buf_pg_profiles(self): buf_pg_entries = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg]) self.buf_pg_profile[pg] = buf_pg_entries["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] - def change_cable_len(self, cable_len): + def change_cable_len(self, cable_len, extra_port=None): fvs = dict() fvs[self.INTF] = cable_len + if extra_port: + fvs[extra_port] = cable_len self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs) def set_port_qos_table(self, pfc_enable_flag): @@ -175,17 +177,23 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None + test_speed = None + test_port = "Ethernet4" try: # Retrieve cable len fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") orig_cable_len = fvs_cable_len[self.INTF] if orig_cable_len == "0m": - fvs_cable_len[self.INTF] = "300m" - cable_len_before_test = "300m" + cable_len_for_test = "300m" + fvs_cable_len[self.INTF] = cable_len_for_test + fvs_cable_len[test_port] = cable_len_for_test + self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs_cable_len) else: - cable_len_before_test = orig_cable_len - + cable_len_for_test = orig_cable_len + # Ethernet4 is set to up, while no 'pfc_enable' available. `Ethernet0` is not supposed to be impacted + dvs.port_admin_set(test_port, "up") + dvs.port_admin_set(self.INTF, "up") # Retrieve port speed @@ -193,7 +201,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): orig_speed = fvs_port["speed"] # Make sure the buffer PG has been created - orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) + orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) self.orig_profiles = self.get_asic_buf_profile() @@ -209,7 +217,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): dvs.port_field_set(self.INTF, "speed", test_speed) # Verify new profile is generated - new_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, cable_len_before_test) + new_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, cable_len_for_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) # Verify BUFFER_PG is updated @@ -223,7 +231,8 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): self.asic_db.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs_negative) finally: if orig_cable_len: - self.change_cable_len(orig_cable_len) + self.change_cable_len(orig_cable_len, test_port) if orig_speed: dvs.port_field_set(self.INTF, "speed", orig_speed) dvs.port_admin_set(self.INTF, "down") + dvs.port_admin_set(test_port, "down") From b76cc51b6e80c1d2b584cba80f94c4b65e72a502 Mon Sep 17 00:00:00 2001 From: bingwang Date: Tue, 26 Apr 2022 08:23:23 +0000 Subject: [PATCH 15/15] Call doSpeedUpdateTask in doPortQosTableTask Signed-off-by: bingwang --- cfgmgr/buffermgr.cpp | 34 ++++++++++++++++++++++++-------- cfgmgr/buffermgr.h | 4 +++- tests/test_buffer_traditional.py | 28 ++++++++++++++++---------- 3 files changed, 47 insertions(+), 19 deletions(-) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 96c4c1395f..5c7d6ae9e6 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -133,7 +133,7 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } } */ -task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) +task_process_status BufferMgr::doSpeedUpdateTask(string port) { string cable; string speed; @@ -152,12 +152,21 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } - if (m_portPfcStatus.count(port) == 0) + if (m_portStatusLookup.count(port) == 0) { - // PORT_QOS_MAP is not ready yet. retry is required + // admin_statue is not available yet. This can happen when notification of `PORT_QOS_MAP` table + // comes first. SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); return task_process_status::task_need_retry; } + + if (m_portPfcStatus.count(port) == 0) + { + // PORT_QOS_MAP is not ready yet. The notification is cleared, and buffer pg + // will be handled when `pfc_enable` in `PORT_QOS_MAP` table is available + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_success; + } pfc_enable = m_portPfcStatus[port]; speed = m_speedLookup[port]; @@ -167,7 +176,7 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) vector lossless_pgs = tokenize(pfc_enable, ','); - if (!admin_up && m_platform == "mellanox") + if (m_portStatusLookup[port] == "down" && m_platform == "mellanox") { for (auto lossless_pg : lossless_pgs) { @@ -385,15 +394,25 @@ void BufferMgr::doPortQosTableTask(Consumer &consumer) string op = kfvOp(tuple); if (op == SET_COMMAND) { + bool update_pfc_enable = false; for (auto itp : kfvFieldsValues(tuple)) { if (fvField(itp) == "pfc_enable") { - m_portPfcStatus[port_name] = fvValue(itp); + if (m_portPfcStatus.count(port_name) == 0 || m_portPfcStatus[port_name] != fvValue(itp)) + { + m_portPfcStatus[port_name] = fvValue(itp); + update_pfc_enable = true; + } SWSS_LOG_INFO("Got pfc enable status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); break; } } + if (update_pfc_enable) + { + // The return status is ignored + doSpeedUpdateTask(port_name); + } } it = consumer.m_toSync.erase(it); } @@ -482,7 +501,6 @@ void BufferMgr::doTask(Consumer &consumer) } else if (m_pgfile_processed && table_name == CFG_PORT_TABLE_NAME) { - bool admin_up = false; for (auto i : kfvFieldsValues(t)) { if (fvField(i) == "speed") @@ -491,14 +509,14 @@ void BufferMgr::doTask(Consumer &consumer) } if (fvField(i) == "admin_status") { - admin_up = ("up" == fvValue(i)); + m_portStatusLookup[port] = fvValue(i); } } if (m_speedLookup.count(port) != 0) { // create/update profile for port - task_status = doSpeedUpdateTask(port, admin_up); + task_status = doSpeedUpdateTask(port); } } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index 3a619d3804..b9b3f2c496 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -28,6 +28,7 @@ typedef std::map pg_profile_lookup_t; typedef std::map port_cable_length_t; typedef std::map port_speed_t; typedef std::map port_pfc_status_t; +typedef std::map port_admin_status_t; class BufferMgr : public Orch { @@ -56,11 +57,12 @@ class BufferMgr : public Orch pg_profile_lookup_t m_pgProfileLookup; port_cable_length_t m_cableLenLookup; + port_admin_status_t m_portStatusLookup; port_speed_t m_speedLookup; std::string getPgPoolMode(); void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); - task_process_status doSpeedUpdateTask(std::string port, bool admin_up); + task_process_status doSpeedUpdateTask(std::string port); void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index fab1db6af6..3d2285fd7b 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -62,10 +62,10 @@ def change_cable_len(self, cable_len, extra_port=None): fvs[extra_port] = cable_len self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs) - def set_port_qos_table(self, pfc_enable_flag): + def set_port_qos_table(self, port, pfc_enable_flag): fvs=dict() fvs['pfc_enable'] = pfc_enable_flag - self.config_db.update_entry("PORT_QOS_MAP", self.INTF, fvs) + self.config_db.update_entry("PORT_QOS_MAP", port, fvs) self.lossless_pgs = pfc_enable_flag.split(',') def get_pg_name_map(self): @@ -79,7 +79,7 @@ def get_pg_name_map(self): def setup_teardown_test(self, dvs): try: self.setup_db(dvs) - self.set_port_qos_table('2,3,4,6') + self.set_port_qos_table(self.INTF, '2,3,4,6') pg_name_map = self.get_pg_name_map() yield pg_name_map finally: @@ -178,7 +178,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): orig_cable_len = None orig_speed = None test_speed = None - test_port = "Ethernet4" + extra_port = "Ethernet4" try: # Retrieve cable len fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") @@ -186,13 +186,13 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): if orig_cable_len == "0m": cable_len_for_test = "300m" fvs_cable_len[self.INTF] = cable_len_for_test - fvs_cable_len[test_port] = cable_len_for_test + fvs_cable_len[extra_port] = cable_len_for_test self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs_cable_len) else: cable_len_for_test = orig_cable_len # Ethernet4 is set to up, while no 'pfc_enable' available. `Ethernet0` is not supposed to be impacted - dvs.port_admin_set(test_port, "up") + dvs.port_admin_set(extra_port, "up") dvs.port_admin_set(self.INTF, "up") @@ -215,7 +215,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): test_speed = "100000" # change intf speed to 'test_speed' dvs.port_field_set(self.INTF, "speed", test_speed) - + dvs.port_field_set(extra_port, "speed", test_speed) # Verify new profile is generated new_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, cable_len_for_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) @@ -223,16 +223,24 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Verify BUFFER_PG is updated for pg in self.lossless_pgs: self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": new_lossless_profile}) - + fvs_negative = {} for pg in self.pg_name_map: # verify that buffer pgs do not point to the old profile since we cannot deduce the new profile oid fvs_negative["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] self.asic_db.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs_negative) + + # Add pfc_enable field for extra port + self.set_port_qos_table(extra_port, '2,3,4,6') + time.sleep(1) + # Verify BUFFER_PG is updated when pfc_enable is available + for pg in self.lossless_pgs: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": new_lossless_profile}) finally: if orig_cable_len: - self.change_cable_len(orig_cable_len, test_port) + self.change_cable_len(orig_cable_len, extra_port) if orig_speed: dvs.port_field_set(self.INTF, "speed", orig_speed) + dvs.port_field_set(extra_port, "speed", orig_speed) dvs.port_admin_set(self.INTF, "down") - dvs.port_admin_set(test_port, "down") + dvs.port_admin_set(extra_port, "down")