diff --git a/.azure-pipelines/baseline_test/baseline.test.template.yml b/.azure-pipelines/baseline_test/baseline.test.template.yml index ffcd8b780f7..d104ce83132 100644 --- a/.azure-pipelines/baseline_test/baseline.test.template.yml +++ b/.azure-pipelines/baseline_test/baseline.test.template.yml @@ -115,20 +115,20 @@ jobs: STOP_ON_FAILURE: "False" TEST_PLAN_NUM: $(BASELINE_MGMT_PUBLIC_MASTER_TEST_NUM) -- job: dpu_elastictest - displayName: "kvmtest-dpu by Elastictest" - timeoutInMinutes: 240 - continueOnError: false - pool: ubuntu-20.04 - steps: - - template: ../run-test-elastictest-template.yml - parameters: - TOPOLOGY: dpu - MIN_WORKER: $(T0_SONIC_INSTANCE_NUM) - MAX_WORKER: $(T0_SONIC_INSTANCE_NUM) - KVM_IMAGE_BRANCH: "master" - MGMT_BRANCH: "master" - BUILD_REASON: "BaselineTest" - RETRY_TIMES: "0" - STOP_ON_FAILURE: "False" - TEST_PLAN_NUM: $(BASELINE_MGMT_PUBLIC_MASTER_TEST_NUM) +# - job: dpu_elastictest +# displayName: "kvmtest-dpu by Elastictest" +# timeoutInMinutes: 240 +# continueOnError: false +# pool: ubuntu-20.04 +# steps: +# - template: ../run-test-elastictest-template.yml +# parameters: +# TOPOLOGY: dpu +# MIN_WORKER: $(T0_SONIC_INSTANCE_NUM) +# MAX_WORKER: $(T0_SONIC_INSTANCE_NUM) +# KVM_IMAGE_BRANCH: "master" +# MGMT_BRANCH: "master" +# BUILD_REASON: "BaselineTest" +# RETRY_TIMES: "0" +# STOP_ON_FAILURE: "False" +# TEST_PLAN_NUM: $(BASELINE_MGMT_PUBLIC_MASTER_TEST_NUM) diff --git a/.azure-pipelines/recover_testbed/README.md b/.azure-pipelines/recover_testbed/README.md new file mode 100644 index 00000000000..1fec438b93a --- /dev/null +++ b/.azure-pipelines/recover_testbed/README.md @@ -0,0 +1,103 @@ +# Automatically recover unhealthy testbed via console + +## Background +The success rate of nightly test depends on the health of the testbeds. +In the past, we used pipelines to re-deploy testbeds when they had problems. This could fix some issues like configuration loss, but it was not enough. +Sometimes, the pipeline failed to restore the testbeds, and we had to do it manually. This was time-consuming and inefficient. +Therefore, we need a better way to automatically recover unhealthy testbeds, which can handle more situations and respond faster. + +## Design +Our script is designed to recover devices that lose their management ip and cannot be accessed via ssh. +The script uses console as an alternative way to connect to the device and reinstall the image from the boot loader. + +The script first checks the ssh connectivity of the device. +If ssh is working, it then checks the availability of `sonic-installer` on the device. +If `sonic-installer` is working, the device is considered healthy and no further action is needed. +Otherwise, the script proceeds to the recovery process via console. + +The recovery process depends on the console access of the device. +If console access is not possible, the script cannot proceed. +The script obtains a console session and power cycles the device. It then waits for the right timing to enter the boot loader. +The script supports four types of boot loaders: ++ ONIE: used by Mellanox, Cisco, Acs, Celestica hwskus ++ Marvell: used by Nokia hwskus ++ Loader: used by Nexus hwskus ++ Aboot: used by Arista hwskus + +In the boot loader, the script sets the temporary management ip and default route, and then reinstalls the image. +After the image is reinstalled, the script logs in to the device via console again and sets the permanent management ip and default route in Sonic. +It also writes these configurations to `/etc/network/interfaces` file to prevent losing them after reboot. + +Finally, the script verifies that ssh and `sonic-installer` are working on the device. If both are ok, the recovery process is completed. + +## Structure +Our scripts are under the folder `.azure-pipelines/recover_testbed` +```buildoutcfg + .azure-pipelines + | + |-- recover_testbed + | + |-- common.py + |-- constants.py + |-- dut_connection.py + |-- interfaces.j2 + |-- recover_testbed.py + |-- testbed_status.py +``` + ++ `common.py` - This module contains the common functions that are used for recovering testbeds, such as how to enter the boot loader mode. + These functions are imported by other modules that implement the specific recovery steps for different devices. + + ++ `constants.py` - This module defines the constants that are used under the recover_testbed folder, such as sonic prompt, key words of timing. + These constants are used to avoid hard-coding and to make the code more readable and maintainable. + + ++ `dut_connection.py` - This module defines the connection of the DUT, including ssh and console connections. + It provides functions to create these connections, as well as to handle exceptions and errors. + These functions are used to communicate with the DUT and execute commands on it. + + ++ `interfaces.j2` - This is a Jinja2 template file that is used to generate the file `/etc/network/interfaces` on the DUT. + It defines the network interfaces and their configurations, such as IP address, netmask, gateway, etc. + The template file takes some variables as input, such as the interface name, the IP address range, etc. These variables are passed by the recover_testbed.py module. + + ++ `recover_testbed.py` - This is the main module that implements the recovery process for the testbed. + It takes some arguments as input, such as the inventory, the device name, the hwsku, etc. + It then calls the appropriate functions from the common.py and dut_connection.py modules to establish a connection with the DUT and enter the recovery mode. + It also uses the interfaces.j2 template file to generate and apply the network configuration on the DUT. + Finally, it verifies that the DUT is successfully recovered and reports the result. + + ++ `testbed_status.py` - This module defines some status of the DUT, such as losing management IP address. + It provides functions to check and update these status, as well as to log them. + These functions are used by the recover_testbed.py module to monitor and troubleshoot the recovery process. + + + +## Description of parameters ++ `inventory` - The name of the inventory file that contains the information about the devices in the testbed, such as hostname, IP address, hwsku, etc. + + ++ `testbed-name` - The name of the testbed. The testbed name should match the name of the testbed file that defines the topology and connections of the devices in the testbed. + + ++ `tbfile` - The name of the testbed file that defines the topology and connections of the devices in the testbed. The default value is `testbed.yaml`. + + ++ `verbosity` - The level of verbosity that is used for logging the automation steps and results. Verbosity level can be 0 (silent), 1 (brief), 2 (detailed), or 3 (verbose). The default value is 2. + + ++ `log-level` - The level of severity that is used for logging the automation messages. Log level can be Error, Warning, Info, or Debug. The default value is Debug. + + ++ `image` - The URL of the golden image that is used to install DUT. The golden image should be a valid SONiC image file that can be downloaded from a image server. + + ++ `hwsku` - The hardware SKU that identifies the model and configuration of the DUT in the testbed. + +## How to run the script +The script should be run from the `sonic-mgmt/ansible` directory with the following command: +`python3 ../.azure-pipelines/recover_testbed/recover_testbed.py -i {inventory} -t {tbname} --tbfile {tbfile} --log-level {log-level} --image {image url} --hwsku {hwsku} +` diff --git a/.azure-pipelines/recover_testbed/common.py b/.azure-pipelines/recover_testbed/common.py index de378e2c40b..d18d070f13b 100644 --- a/.azure-pipelines/recover_testbed/common.py +++ b/.azure-pipelines/recover_testbed/common.py @@ -6,8 +6,8 @@ import time import pexpect import ipaddress -from constants import OS_VERSION_IN_GRUB, ONIE_ENTRY_IN_GRUB, INSTALL_OS_IN_ONIE, \ - ONIE_START_TO_DISCOVERY, SONIC_PROMPT, MARVELL_ENTRY +from constants import OS_VERSION_IN_GRUB, ONIE_ENTRY_IN_GRUB, ONIE_INSTALL_MODEL, \ + ONIE_START_TO_DISCOVERY, SONIC_PROMPT, MARVELL_ENTRY, BOOTING_INSTALL_OS, ONIE_RESCUE_MODEL _self_dir = os.path.dirname(os.path.abspath(__file__)) base_path = os.path.realpath(os.path.join(_self_dir, "../..")) @@ -46,7 +46,7 @@ def get_pdu_managers(sonichosts, conn_graph_facts): return pdu_managers -def posix_shell_onie(dut_console, mgmt_ip, image_url, is_nexus=False, is_nokia=False): +def posix_shell_onie(dut_console, mgmt_ip, image_url, is_nexus=False, is_nokia=False, is_celestica=False): enter_onie_flag = True gw_ip = list(ipaddress.ip_interface(mgmt_ip).network.hosts())[0] @@ -80,38 +80,57 @@ def posix_shell_onie(dut_console, mgmt_ip, image_url, is_nexus=False, is_nokia=F dut_console.remote_conn.send(b'\x1b[B') continue - if ONIE_ENTRY_IN_GRUB in x and INSTALL_OS_IN_ONIE not in x: + if ONIE_ENTRY_IN_GRUB in x and ONIE_INSTALL_MODEL not in x and ONIE_RESCUE_MODEL not in x: dut_console.remote_conn.send("\n") enter_onie_flag = False + if ONIE_RESCUE_MODEL in x: + dut_console.remote_conn.send(b'\x1b[A') + dut_console.remote_conn.send("\n") + + if is_celestica and BOOTING_INSTALL_OS in x: + dut_console.remote_conn.send("\n") + # "ONIE: Starting ONIE Service Discovery" if ONIE_START_TO_DISCOVERY in x: + dut_console.remote_conn.send("\n") + # TODO: Define a function to send command here - for i in range(5): - dut_console.remote_conn.send('onie-discovery-stop\n') - dut_console.remote_conn.send("\n") + dut_console.remote_conn.send('onie-discovery-stop\n') + dut_console.remote_conn.send("\n") - if is_nokia: - enter_onie_flag = False - dut_console.remote_conn.send('umount /dev/sda2\n') + if is_nokia: + enter_onie_flag = False + dut_console.remote_conn.send('umount /dev/sda2\n') - dut_console.remote_conn.send("ifconfig eth0 {} netmask {}".format(mgmt_ip.split('/')[0], - ipaddress.ip_interface(mgmt_ip).with_netmask.split('/')[1])) - dut_console.remote_conn.send("\n") + dut_console.remote_conn.send("ifconfig eth0 {} netmask {}".format(mgmt_ip.split('/')[0], + ipaddress.ip_interface(mgmt_ip).with_netmask.split('/')[1])) + dut_console.remote_conn.send("\n") - dut_console.remote_conn.send("ip route add default via {}".format(gw_ip)) - dut_console.remote_conn.send("\n") + dut_console.remote_conn.send("ip route add default via {}".format(gw_ip)) + dut_console.remote_conn.send("\n") - dut_console.remote_conn.send("onie-nos-install {}".format(image_url)) - dut_console.remote_conn.send("\n") - # We will wait some time to connect to image server + # Remove the image if it already exists + dut_console.remote_conn.send("rm -f {}".format(image_url.split("/")[-1])) + dut_console.remote_conn.send("\n") + + dut_console.remote_conn.send("wget {}".format(image_url)) + dut_console.remote_conn.send("\n") + + # Waiting downloading finishing + for i in range(5): time.sleep(60) x = dut_console.remote_conn.recv(1024) x = x.decode('ISO-8859-9') - # TODO: Give a sample output here - if "ETA" in x: + # If we see "0:00:00", it means we finish downloading sonic image + # Sample output: + # sonic-mellanox-202012 100% |*******************************| 1196M 0:00:00 ETA + if "0:00:00" in x: break + dut_console.remote_conn.send("onie-nos-install {}".format(image_url.split("/")[-1])) + dut_console.remote_conn.send("\n") + if SONIC_PROMPT in x: dut_console.remote_conn.close() @@ -178,7 +197,7 @@ def posix_shell_aboot(dut_console, mgmt_ip, image_url): dut_console.remote_conn.send("\n") for i in range(5): - time.sleep(10) + time.sleep(60) x = dut_console.remote_conn.recv(1024) x = x.decode('ISO-8859-9') if "ETA" in x: diff --git a/.azure-pipelines/recover_testbed/constants.py b/.azure-pipelines/recover_testbed/constants.py index acfd106e1b3..6e35d8259ea 100644 --- a/.azure-pipelines/recover_testbed/constants.py +++ b/.azure-pipelines/recover_testbed/constants.py @@ -38,11 +38,18 @@ # Press enter to boot the selected OS, `e' to edit the commands # before booting or `c' for a command-line. -INSTALL_OS_IN_ONIE = "Install OS" +ONIE_INSTALL_MODEL = "Install" +ONIE_RESCUE_MODEL = "Rescue" + +# While entering into ONIE, we will get some output like +# " Booting `ONIE: Install OS' " +# " OS Install Mode" +BOOTING_INSTALL_OS = "Booting" # After enter into the installation in ONIE, it will discover some configuration # And finally, we will get the string "ONIE: Starting ONIE Service Discovery" -ONIE_START_TO_DISCOVERY = "Discovery" +# To fit the scenario of Celestica, we finally use the string "covery" +ONIE_START_TO_DISCOVERY = "covery" # At last, if installation successes in ONIE, we will get the prompt SONIC_PROMPT = "sonic login:" diff --git a/.azure-pipelines/recover_testbed/dut_connection.py b/.azure-pipelines/recover_testbed/dut_connection.py index 9784c7d029f..cef5cb9db8f 100644 --- a/.azure-pipelines/recover_testbed/dut_connection.py +++ b/.azure-pipelines/recover_testbed/dut_connection.py @@ -97,7 +97,6 @@ def get_ssh_info(sonichost): host=sonichost.im.get_hosts(pattern='sonic')[0]).get("ansible_altpassword") sonic_password = [creds['sonicadmin_password'], sonicadmin_alt_password] sonic_ip = sonichost.im.get_host(sonichost.hostname).vars['ansible_host'] - logging.info("sonic username: {}, password: {}".format(sonic_username, sonic_password)) return sonic_username, sonic_password, sonic_ip diff --git a/.azure-pipelines/recover_testbed/interfaces.j2 b/.azure-pipelines/recover_testbed/interfaces.j2 index ca04779599e..91d34ad5f99 100644 --- a/.azure-pipelines/recover_testbed/interfaces.j2 +++ b/.azure-pipelines/recover_testbed/interfaces.j2 @@ -5,9 +5,20 @@ auto eth0 iface eth0 inet static address {{ addr }} netmask {{ mask }} + network {{ network }} + broadcast {{ brd_ip }} ################ management network policy routing rules #### management port up rules" - up ip route add default via {{ gwaddr }} dev eth0 table default - up ip rule add from {{ addr }}/32 table default + up ip -4 route add default via {{ gwaddr }} dev eth0 table default metric 201 + up ip -4 route add {{ mgmt_ip }} dev eth0 table default + + # management port down rules + pre-down ip -4 route delete default via {{ gwaddr }} dev eth0 table default + pre-down ip -4 route delete {{ mgmt_ip }} dev eth0 table default + +# +source /etc/network/interfaces.d/* +# + {% endblock mgmt_interface %} # diff --git a/.azure-pipelines/recover_testbed/recover_testbed.py b/.azure-pipelines/recover_testbed/recover_testbed.py index 4d87ebac83c..7d956e44eb0 100644 --- a/.azure-pipelines/recover_testbed/recover_testbed.py +++ b/.azure-pipelines/recover_testbed/recover_testbed.py @@ -5,6 +5,7 @@ import os import sys import ipaddress +import traceback from common import do_power_cycle, check_sonic_installer, posix_shell_aboot, posix_shell_onie from constants import RC_SSH_FAILED @@ -44,17 +45,18 @@ def recover_via_console(sonichost, conn_graph_facts, localhost, mgmt_ip, image_u posix_shell_aboot(dut_console, mgmt_ip, image_url) elif device_type in ["nexus"]: posix_shell_onie(dut_console, mgmt_ip, image_url, is_nexus=True) - elif device_type in ["mellanox", "cisco", "acs", "celestica"]: - posix_shell_onie(dut_console, mgmt_ip, image_url) + elif device_type in ["mellanox", "cisco", "acs", "celestica", "force10"]: + is_celestica = device_type in ["celestica"] + posix_shell_onie(dut_console, mgmt_ip, image_url, is_celestica=is_celestica) elif device_type in ["nokia"]: posix_shell_onie(dut_console, mgmt_ip, image_url, is_nokia=True) else: - return + raise Exception("We don't support this type of testbed.") dut_lose_management_ip(sonichost, conn_graph_facts, localhost, mgmt_ip) except Exception as e: - logger.info(e) - return + traceback.print_exc() + raise Exception(e) def recover_testbed(sonichosts, conn_graph_facts, localhost, image_url, hwsku): @@ -69,15 +71,29 @@ def recover_testbed(sonichosts, conn_graph_facts, localhost, image_url, hwsku): if type(dut_ssh) == tuple: logger.info("SSH success.") + # May recover from boot loader, need to delete image file + sonichost.shell("sudo rm -f /host/{}".format(image_url.split("/")[-1]), + module_ignore_errors=True) + # Add ip info into /etc/network/interface extra_vars = { 'addr': mgmt_ip.split('/')[0], 'mask': ipaddress.ip_interface(mgmt_ip).with_netmask.split('/')[1], - 'gwaddr': list(ipaddress.ip_interface(mgmt_ip).network.hosts())[0] + 'gwaddr': list(ipaddress.ip_interface(mgmt_ip).network.hosts())[0], + 'mgmt_ip': mgmt_ip, + 'brd_ip': ipaddress.ip_interface(mgmt_ip).network.broadcast_address, + 'network': str(ipaddress.ip_interface(mgmt_ip).network).split('/')[0] } sonichost.vm.extra_vars.update(extra_vars) sonichost.template(src="../.azure-pipelines/recover_testbed/interfaces.j2", - dest="/etc/network/interface") + dest="/etc/network/interfaces") + + # Add management ip info into config_db.json + sonichost.template(src="../.azure-pipelines/recover_testbed/mgmt_ip.j2", + dest="/etc/sonic/mgmt_ip.json") + sonichost.shell("configlet -u -j {}".format("/etc/sonic/mgmt_ip.json")) + + sonichost.shell("sudo config save -y") sonic_username = dut_ssh[0] sonic_password = dut_ssh[1] @@ -94,8 +110,7 @@ def recover_testbed(sonichosts, conn_graph_facts, localhost, image_url, hwsku): # Do power cycle need_to_recover = True else: - logger.info("Authentication failed. Passwords are incorrect.") - return + raise Exception("Authentication failed. Passwords are incorrect.") if need_to_recover: recover_via_console(sonichost, conn_graph_facts, localhost, mgmt_ip, image_url, hwsku) @@ -182,14 +197,6 @@ def main(args): help="Loglevel" ) - parser.add_argument( - "-o", "--output", - type=str, - dest="output", - required=False, - help="Output duts version to the specified file." - ) - parser.add_argument( "--image", type=str, diff --git a/.azure-pipelines/recover_testbed/testbed_status.py b/.azure-pipelines/recover_testbed/testbed_status.py index adc3cfa67d1..5d9826c791d 100644 --- a/.azure-pipelines/recover_testbed/testbed_status.py +++ b/.azure-pipelines/recover_testbed/testbed_status.py @@ -22,4 +22,5 @@ def dut_lose_management_ip(sonichost, conn_graph_facts, localhost, mgmt_ip): logging.info(e) finally: logger.info("=====Recover finish=====") + localhost.pause(seconds=120, prompt="Wait for SONiC initialization") dut_console.disconnect() diff --git a/.azure-pipelines/run-test-elastictest-template.yml b/.azure-pipelines/run-test-elastictest-template.yml index a28eb7b3b8f..695268fc73c 100644 --- a/.azure-pipelines/run-test-elastictest-template.yml +++ b/.azure-pipelines/run-test-elastictest-template.yml @@ -117,7 +117,7 @@ parameters: - name: DUMP_KVM_IF_FAIL type: string - default: "True" + default: "False" # KVM dump has beed deleted values: - "True" - "False" diff --git a/.azure-pipelines/test_plan.py b/.azure-pipelines/test_plan.py index daf6b17c508..24de7b63d81 100644 --- a/.azure-pipelines/test_plan.py +++ b/.azure-pipelines/test_plan.py @@ -23,6 +23,7 @@ SPECIFIC_PARAM_KEYWORD = "specific_param" TOLERATE_HTTP_EXCEPTION_TIMES = 20 TOKEN_EXPIRE_HOURS = 6 +MAX_GET_TOKEN_RETRY_TIMES = 3 class TestPlanStatus(Enum): @@ -188,13 +189,18 @@ def get_token(self): "client_secret": self.client_secret, "scope": get_scope(self.url) } - try: - resp = requests.post(token_url, headers=headers, data=payload, timeout=10).json() - self._token = resp["access_token"] - self._token_generate_time = datetime.utcnow() - return self._token - except Exception as exception: - raise Exception("Get token failed with exception: {}".format(repr(exception))) + attempt = 0 + while(attempt < MAX_GET_TOKEN_RETRY_TIMES): + try: + resp = requests.post(token_url, headers=headers, data=payload, timeout=10).json() + self._token = resp["access_token"] + self._token_generate_time = datetime.utcnow() + return self._token + except Exception as exception: + attempt += 1 + print("Get token failed with exception: {}. Retry {} times to get token." + .format(repr(exception), MAX_GET_TOKEN_RETRY_TIMES - attempt)) + raise Exception("Failed to get token after {} attempts".format(MAX_GET_TOKEN_RETRY_TIMES)) def create(self, topology, test_plan_name="my_test_plan", deploy_mg_extra_params="", kvm_build_id="", min_worker=None, max_worker=None, pr_id="unknown", output=None, diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index 38b1f0a1b6b..ea83c0e7279 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -161,7 +161,7 @@ - name: find all vlan interface names for T0 topology set_fact: - vlan_intfs: "{{ vlan_intfs|default([])}} + ['{{ port_alias[item] }}' ]" + vlan_intfs: "{{ vlan_intfs|default([]) + [port_alias[item]] }}" with_items: "{{ host_if_indexes }}" when: "('host_interfaces_by_dut' in vm_topo_config) and ('tor' in vm_topo_config['dut_type'] | lower)" @@ -203,11 +203,11 @@ enable_tunnel_qos_remap: true when: "(('leafrouter' == (vm_topo_config['dut_type'] | lower)) or ('backendleafrouter' == (vm_topo_config['dut_type'] | lower))) and (hwsku in hwsku_list_dualtor_t1) and not (is_ixia_testbed)" - - name: gather hwsku that supports Compute-AI deployment + - name: gather hwsku that supports ComputeAI deployment set_fact: hwsku_list_compute_ai: "['Cisco-8111-O64']" - - name: enable Compute-AI deployment + - name: enable ComputeAI deployment set_fact: enable_compute_ai_deployment: true when: "(hwsku in hwsku_list_compute_ai) and not (is_ixia_testbed)" @@ -495,21 +495,6 @@ line: 'snmp_rocommunity: {{ snmp_rocommunity }}' become: true - - name: disable automatic minigraph update if we are deploying new minigraph into SONiC - lineinfile: - name: /etc/sonic/updategraph.conf - regexp: '^enabled=' - line: 'enabled=false' - become: true - register: updategraph_conf - - - name: restart automatic minigraph update service - become: true - service: - name: updategraph - state: restarted - when: updategraph_conf.changed - - name: docker status shell: docker ps register: docker_status @@ -525,6 +510,19 @@ delay: 10 until: result is not failed + - name: Cleanup /etc/sonic folder before loading new minigraph + block: + - name: Ensure /etc/sonic/acl.json is deleted + become: true + file: + path: /etc/sonic/acl.json + state: absent + - name: Ensure /etc/sonic/port_config.json is deleted + become: true + file: + path: /etc/sonic/port_config.json + state: absent + - name: execute cli "config load_minigraph -y" to apply new minigraph become: true shell: config load_minigraph -y diff --git a/ansible/devutils b/ansible/devutils index 51da040e611..821b4b81aa7 100755 --- a/ansible/devutils +++ b/ansible/devutils @@ -305,9 +305,9 @@ def pdu_action_on_dut(host, attrs, action): ret['Summary'].append('Unsupported action {}.'.format(action)) return ret - status = pduman.get_outlet_status() + outlet_status = pduman.get_outlet_status() psu_status = {} - for outlet in status: + for outlet in outlet_status: ret['PDU status'].append(outlet) psu_name = outlet['psu_name'] status = psu_status.get(psu_name, { @@ -318,7 +318,15 @@ def pdu_action_on_dut(host, attrs, action): if 'output_watts' not in outlet: status['output_watts'] = 'N/A' else: - status['output_watts'] = status['output_watts'] + int(outlet.get('output_watts', 0)) + raw_outlet_watts = outlet['output_watts'] + if not raw_outlet_watts: + outlet_watts = 0 + else: + try: + outlet_watts = int(outlet['output_watts']) + except ValueError: + outlet_watts = 0 + status['output_watts'] = status['output_watts'] + outlet_watts psu_status[psu_name] = status ret['PSU status'] = psu_status diff --git a/ansible/dualtor/config_simulated_y_cable.yml b/ansible/dualtor/config_simulated_y_cable.yml index 65245f047e0..003ea2af4f9 100644 --- a/ansible/dualtor/config_simulated_y_cable.yml +++ b/ansible/dualtor/config_simulated_y_cable.yml @@ -14,12 +14,15 @@ when: restart_pmon is not defined and restart_pmon|bool == true - name: Get host server address - vmhost_server_info: vmhost_server_name={{ testbed_facts['server'] }} vm_file={{ vm_file }} + script: scripts/vmhost_server_address.py --inv-file {{ vm_file }} --server-name {{ testbed_facts['server'] }} + args: + executable: python delegate_to: localhost + register: vmhost_server_address - name: Set y cable simulator server address set_fact: - mux_simulator_server: "{{ vmhost_server_address }}" + mux_simulator_server: "{{ vmhost_server_address.stdout }}" - name: Set default y cable simulator server port set_fact: diff --git a/ansible/example_ixia b/ansible/example_ixia index 500df7a96ac..142cbab08d9 100644 --- a/ansible/example_ixia +++ b/ansible/example_ixia @@ -18,6 +18,7 @@ ixia_chassis: sonic: vars: mgmt_subnet_mask_length: 23 + mgmt_subnet_v6_mask_length: 64 children: sonic_dell64_40 diff --git a/ansible/group_vars/sonic/variables b/ansible/group_vars/sonic/variables index 1d2c55c649e..3ce56ca22df 100644 --- a/ansible/group_vars/sonic/variables +++ b/ansible/group_vars/sonic/variables @@ -1,6 +1,9 @@ ansible_ssh_user: admin ansible_connection: multi_passwd_ssh ansible_altpassword: YourPaSsWoRd +# ansible_altpasswords: +# - fakepassword1 +# - fakepassword2 sonic_version: "v2" diff --git a/ansible/lab b/ansible/lab index 3671d8e485f..40774802f07 100644 --- a/ansible/lab +++ b/ansible/lab @@ -3,6 +3,7 @@ all: lab: vars: mgmt_subnet_mask_length: 24 + mgmt_subnet_v6_mask_length: 64 children: sonic: children: diff --git a/ansible/library/acl_capabilities_facts.py b/ansible/library/acl_capabilities_facts.py index e391bda73e2..051e793c98a 100644 --- a/ansible/library/acl_capabilities_facts.py +++ b/ansible/library/acl_capabilities_facts.py @@ -38,8 +38,8 @@ def run(self): """ self.facts['acl_capabilities'] = {} namespace_list = multi_asic.get_namespace_list() - - SonicDBConfig.load_sonic_global_db_config() + if multi_asic.is_multi_asic(): + SonicDBConfig.load_sonic_global_db_config() conn = SonicV2Connector(namespace=namespace_list[0]) conn.connect(conn.STATE_DB) keys = conn.keys(conn.STATE_DB, 'ACL_STAGE_CAPABILITY_TABLE|*') or [] diff --git a/ansible/library/show_ipv6_interface.py b/ansible/library/show_ipv6_interface.py new file mode 100644 index 00000000000..a520b0c2a14 --- /dev/null +++ b/ansible/library/show_ipv6_interface.py @@ -0,0 +1,116 @@ +#!/usr/bin/python + +from ansible.module_utils.basic import AnsibleModule +import socket + +DOCUMENTATION = ''' +module: show_ipv6_interface.py +Short_description: Retrieve show ipv6 interface +Description: + - Retrieve IPv6 address of interface and IPv4 address of its neighbor + +options: + - namespace:: + Description: In multi ASIC env, namespace to run the command + Required: False + +''' + +EXAMPLES = ''' + # Get show ipv6 interface + - show_ipv6_interface: + + # Get show ipv6 interface in namespace asic0 + - show_ipv6_interface: namespace='asic0' + +''' + + +def split_dash(string_with_dash): + return string_with_dash.split("/") + + +class ShowIpv6InterfaceModule(object): + def __init__(self): + self.module = AnsibleModule( + argument_spec=dict( + namespace=dict(required=False, type='str', default=None), + ), + supports_check_mode=False + ) + self.m_args = self.module.params + self.out = None + self.facts = {} + self.ns = "" + ns = self.m_args["namespace"] + if ns is not None: + self.ns = " -n {} -d all ".format(ns) + + def run(self): + """ + Main method of the class + """ + self.ip_int = {} + try: + rc, self.out, err = self.module.run_command( + "show ipv6 interfaces{}".format(self.ns), + executable='/bin/bash', + use_unsafe_shell=True + ) + for line in self.out.split("\n"): + line = line.split() + + # only collect non-link addresses + if not len(line) or (not line[0].startswith("Ethernet") and not line[0].startswith("PortChannel")): + continue + + intf = line[0] + + if len(line) == 6: + address, prefix = split_dash(line[2]) + admin, oper = split_dash(line[3]) + bgp_neighbour = line[4] + peer_ipv6 = line[5] + elif len(line) == 5: + address, prefix = split_dash(line[1]) + admin, oper = split_dash(line[2]) + bgp_neighbour = line[3] + peer_ipv6 = line[4] + else: + raise Exception("Unexpected output") + + if peer_ipv6 == "N/A": + continue + + # sanity check ipv6 address + try: + socket.inet_pton(socket.AF_INET6, address) + except socket.error: + continue + + self.ip_int[intf] = { + "ipv6": address, + "prefix_len": prefix, + "admin": admin, + "oper_state": oper, + "bgp_neighbour": bgp_neighbour, + "peer_ipv6": peer_ipv6 + } + self.facts['ipv6_interfaces'] = self.ip_int + except Exception as e: + self.module.fail_json(msg=str(e)) + if rc != 0: + self.module.fail_json( + msg="Command failed rc = %d, out = %s, err = %s" % (rc, self.out, err)) + + self.module.exit_json(ansible_facts=self.facts) + + +def main(): + ShowIpInt = ShowIpv6InterfaceModule() + ShowIpInt.run() + return + + +if __name__ == "__main__": + main() diff --git a/ansible/library/vmhost_server_info.py b/ansible/library/vmhost_server_info.py deleted file mode 100644 index c85450475f9..00000000000 --- a/ansible/library/vmhost_server_info.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -from ansible.module_utils.basic import AnsibleModule -from ansible.parsing.dataloader import DataLoader -from ansible.inventory.manager import InventoryManager - -DOCUMENTATION = ''' -module: vmhost_server_info.py -short_description: Gather mgmt IP for given host server (like server_17) -Description: - This plugin will parse the input vm_file and return mgmt IP for given host server. - options: - vmhost_server_name: the name of vm_host server, like server_1; required: True - vm_file: the virtual machine file path ; default: 'veos' - -Ansible_facts: - 'vmhost_server_address': the IPv4 address for given vmhost server - -''' - -EXAMPLES = ''' - - name: gather vm_host server address - vmhost_server_info: vmhost_server_name='server_1' vm_file='veos' -''' - -VM_INV_FILE = 'veos' - - -def main(): - module = AnsibleModule( - argument_spec=dict( - vmhost_server_name=dict(required=True, type='str'), - vm_file=dict(default=VM_INV_FILE, type='str') - ), - supports_check_mode=True - ) - m_args = module.params - vmhost_server_name = m_args["vmhost_server_name"] - vm_file = m_args["vm_file"] - - inv_mgr = InventoryManager(loader=DataLoader(), sources=vm_file) - - all_hosts = inv_mgr.get_hosts(pattern=vmhost_server_name) - if len(all_hosts) == 0: - module.fail_json(msg="No host matches {} in inventory file {}".format( - vmhost_server_name, vm_file)) - else: - for host in all_hosts: - if host.name.startswith('VM'): - continue - module.exit_json( - ansible_facts={"vmhost_server_address": host.get_vars()["ansible_host"]}) - - module.fail_json(msg="Unable to find IP address of host server {} in inventory file {}".format( - vmhost_server_name, vm_file)) - - -if __name__ == "__main__": - main() diff --git a/ansible/plugins/connection/multi_passwd_ssh.py b/ansible/plugins/connection/multi_passwd_ssh.py index 696d1b5afd4..8286e1394b9 100644 --- a/ansible/plugins/connection/multi_passwd_ssh.py +++ b/ansible/plugins/connection/multi_passwd_ssh.py @@ -1,11 +1,14 @@ import imp +import logging import os from functools import wraps -from ansible.errors import AnsibleAuthenticationFailure +from ansible.errors import AnsibleAuthenticationFailure, AnsibleConnectionFailure from ansible.plugins import connection +logger = logging.getLogger(__name__) + # HACK: workaround to import the SSH connection plugin _ssh_mod = os.path.join(os.path.dirname(connection.__file__), "ssh.py") _ssh = imp.load_source("_ssh", _ssh_mod) @@ -21,23 +24,39 @@ - name: ansible_altpassword - name: ansible_ssh_altpass - name: ansible_ssh_altpassword + altpasswords: + description: Alternative authentication passwords list for the C(remote_user). Can be supplied as CLI option. + vars: + - name: ansible_altpasswords + - name: ansible_ssh_altpasswords + hostv6: + description: IPv6 address + vars: + - name: ansible_hostv6 """.lstrip("\n") +# Sample error messages that host unreachable: +# 'Failed to connect to the host via ssh: ssh: connect to host 192.168.0.2 port 22: Connection timed out' +# 'Failed to connect to the host via ssh: ssh: connect to host 192.168.0.2 port 22: No route to host' +CONNECTION_TIMEOUT_ERR_FLAG1 = "Connection timed out" +CONNECTION_TIMEOUT_ERR_FLAG2 = "No route to host" + def _password_retry(func): """ Decorator to retry ssh/scp/sftp in the case of invalid password - - Will retry for password in (ansible_password, ansible_altpassword): + Will retry with IPv6 addr if IPv4 addr is unavailable + Will retry for password in (ansible_password, ansible_altpassword, ansible_altpasswords): """ - @wraps(func) - def wrapped(self, *args, **kwargs): + def _conn_with_multi_pwd(self, *args, **kwargs): password = self.get_option("password") or self._play_context.password conn_passwords = [password] altpassword = self.get_option("altpassword") if altpassword: conn_passwords.append(altpassword) - + altpasswds = self.get_option("altpasswords") + if altpasswds: + conn_passwords.extend(altpasswds) while conn_passwords: conn_password = conn_passwords.pop(0) # temporarily replace `password` for this trial @@ -53,8 +72,44 @@ def wrapped(self, *args, **kwargs): # reset `password` to its original state self.set_option("password", password) self._play_context.password = password - # retry here, need create a new pipe for sshpass + # This is a retry, so the fd/pipe for sshpass is closed, and we need a new one self.sshpass_pipe = os.pipe() + + @wraps(func) + def wrapped(self, *args, **kwargs): + try: + # First, try with original host(generally IPv4) with multi-password + return _conn_with_multi_pwd(self, *args, **kwargs) + except AnsibleConnectionFailure as e: + # If a non-authentication related exception is raised and IPv6 host is set, + # Retry with IPv6 host with multi-password + try: + hostv6 = self.get_option("hostv6") + except KeyError: + hostv6 = None + + ipv4_addr_unavailable = (CONNECTION_TIMEOUT_ERR_FLAG1 in e.message) or \ + (CONNECTION_TIMEOUT_ERR_FLAG2 in e.message) + + try_ipv6_addr = (type(e) != AnsibleAuthenticationFailure) and ipv4_addr_unavailable and hostv6 + if try_ipv6_addr: + # This is a retry, so the fd/pipe for sshpass is closed, and we need a new one + self.sshpass_pipe = os.pipe() + self._play_context.remote_addr = hostv6 + # args sample: + # ( [b'sshpass', b'-d18', b'ssh', b'-o', b'ControlMaster=auto', b'-o', b'ControlPersist=120s', b'-o', b'UserKnownHostsFile=/dev/null', b'-o', b'StrictHostKeyChecking=no', b'-o', b'StrictHostKeyChecking=no', b'-o', b'User="admin"', b'-o', b'ConnectTimeout=60', b'-o', b'ControlPath="/home/user/.ansible/cp/376bdcc730"', 'fc00:1234:5678:abcd::2', b'/bin/sh -c \'echo PLATFORM; uname; echo FOUND; command -v \'"\'"\'python3.10\'"\'"\'; command -v \'"\'"\'python3.9\'"\'"\'; command -v \'"\'"\'python3.8\'"\'"\'; command -v \'"\'"\'python3.7\'"\'"\'; command -v \'"\'"\'python3.6\'"\'"\'; command -v \'"\'"\'python3.5\'"\'"\'; command -v \'"\'"\'/usr/bin/python3\'"\'"\'; command -v \'"\'"\'/usr/libexec/platform-python\'"\'"\'; command -v \'"\'"\'python2.7\'"\'"\'; command -v \'"\'"\'/usr/bin/python\'"\'"\'; command -v \'"\'"\'python\'"\'"\'; echo ENDFOUND && sleep 0\''], None) # noqa: E501 + # args[0] are the parameters of ssh connection + ssh_args = args[0] + # Change the IPv4 host in the ssh_args to IPv6 + for idx in range(len(ssh_args)): + if type(ssh_args[idx]) == bytes and ssh_args[idx].decode() == self.host: + ssh_args[idx] = hostv6 + self.host = hostv6 + self.set_option("host", hostv6) + return _conn_with_multi_pwd(self, *args, **kwargs) + else: + raise e + return wrapped diff --git a/ansible/roles/test/files/ptftests/py3/advanced-reboot.py b/ansible/roles/test/files/ptftests/py3/advanced-reboot.py index cac76620eff..09c4972cd95 100644 --- a/ansible/roles/test/files/ptftests/py3/advanced-reboot.py +++ b/ansible/roles/test/files/ptftests/py3/advanced-reboot.py @@ -1475,7 +1475,7 @@ def reboot_dut(self): # Check to see if the warm-reboot script knows about the retry count feature stdout, stderr, return_code = self.dut_connection.execCommand( "sudo " + self.reboot_type + " -h", timeout=5) - if "retry count" in stdout: + if "retry count" in "\n".join(stdout): if self.test_params['neighbor_type'] == "sonic": reboot_command = self.reboot_type + " -N" else: diff --git a/ansible/roles/test/files/ptftests/py3/copp_tests.py b/ansible/roles/test/files/ptftests/py3/copp_tests.py index 723f68326a5..898f64bec6d 100644 --- a/ansible/roles/test/files/ptftests/py3/copp_tests.py +++ b/ansible/roles/test/files/ptftests/py3/copp_tests.py @@ -251,17 +251,25 @@ def __init__(self): def check_constraints(self, send_count, recv_count, time_delta_ms, rx_pps): self.log("") - self.log("Checking constraints (PolicyApplied):") - self.log( - "PPS_LIMIT_MIN (%d) <= rx_pps (%d) <= PPS_LIMIT_MAX (%d): %s" % - (int(self.PPS_LIMIT_MIN), - int(rx_pps), - int(self.PPS_LIMIT_MAX), - str(self.PPS_LIMIT_MIN <= rx_pps <= self.PPS_LIMIT_MAX)) - ) - - assert self.PPS_LIMIT_MIN <= rx_pps <= self.PPS_LIMIT_MAX, "rx_pps {}".format( - rx_pps) + if self.has_trap: + self.log("Checking constraints (PolicyApplied):") + self.log( + "PPS_LIMIT_MIN (%d) <= rx_pps (%d) <= PPS_LIMIT_MAX (%d): %s" % + (int(self.PPS_LIMIT_MIN), + int(rx_pps), + int(self.PPS_LIMIT_MAX), + str(self.PPS_LIMIT_MIN <= rx_pps <= self.PPS_LIMIT_MAX)) + ) + assert self.PPS_LIMIT_MIN <= rx_pps <= self.PPS_LIMIT_MAX, "rx_pps {}".format(rx_pps) + else: + self.log("Checking constraints (NoPolicyApplied):") + self.log( + "rx_pps (%d) <= PPS_LIMIT_MIN (%d): %s" % + (int(rx_pps), + int(self.PPS_LIMIT_MIN), + str(rx_pps <= self.PPS_LIMIT_MIN)) + ) + assert rx_pps <= self.PPS_LIMIT_MIN, "rx_pps {}".format(rx_pps) # SONIC config contains policer CIR=600 for ARP diff --git a/ansible/roles/test/files/ptftests/py3/dhcp_relay_test.py b/ansible/roles/test/files/ptftests/py3/dhcp_relay_test.py index 1826a67e8aa..e5f6d664764 100644 --- a/ansible/roles/test/files/ptftests/py3/dhcp_relay_test.py +++ b/ansible/roles/test/files/ptftests/py3/dhcp_relay_test.py @@ -795,7 +795,5 @@ def runTest(self): # Below verification will be done only when client port is set in ptf_runner if not self.dual_tor and 'other_client_port' in self.test_params: - self.verify_dhcp_relay_pkt_on_other_client_port_with_no_padding( - self.dest_mac_address, self.client_udp_src_port) self.verify_dhcp_relay_pkt_on_server_port_with_no_padding( self.dest_mac_address, self.client_udp_src_port) diff --git a/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py b/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py index 80e548e3d40..7743999bc6e 100644 --- a/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py +++ b/ansible/roles/test/files/ptftests/py3/dhcpv6_relay_test.py @@ -298,8 +298,9 @@ def create_dhcp_relayed_relay_packet(self): def create_dhcp_relay_relay_reply_packet(self): relay_relay_reply_packet = packet.Ether(dst=self.uplink_mac) + dst_ip = self.loopback_ipv6 if self.is_dualtor else self.relay_iface_ip relay_relay_reply_packet /= IPv6(src=self.server_ip, - dst=self.relay_iface_ip) + dst=dst_ip) relay_relay_reply_packet /= packet.UDP( sport=self.DHCP_SERVER_PORT, dport=self.DHCP_SERVER_PORT) relay_relay_reply_packet /= DHCP6_RelayReply(msgtype=13, hopcount=1, linkaddr=self.vlan_ip, diff --git a/ansible/roles/test/files/ptftests/py3/vxlan-decap.py b/ansible/roles/test/files/ptftests/py3/vxlan-decap.py index 7aca45e5d09..d41e42c3f3b 100644 --- a/ansible/roles/test/files/ptftests/py3/vxlan-decap.py +++ b/ansible/roles/test/files/ptftests/py3/vxlan-decap.py @@ -48,7 +48,7 @@ from device_connection import DeviceConnection -def count_matched_packets_helper(test, exp_packet, exp_packet_number, port, device_number=0, timeout=1): +def count_matched_packets_helper(test, exp_packet, exp_packet_number, port=None, device_number=0, timeout=1): """ Add exp_packet_number to original ptf interface in order to stop waiting when expected number of packets is received @@ -169,7 +169,11 @@ def generate_VlanPrefixes(self, gw, prefixlen, acc_ports): else: addr += 1 # skip gw res[port] = host_ip - addr += 1 + # skip soc IPs for aa dualtor + if self.is_active_active_dualtor: + addr += 2 + else: + addr += 1 return res @@ -177,6 +181,7 @@ def setUp(self): self.dataplane = ptf.dataplane_instance self.test_params = testutils.test_params_get() + self.is_active_active_dualtor = self.test_params.get("is_active_active_dualtor", False) if 'vxlan_enabled' in self.test_params and self.test_params['vxlan_enabled']: self.vxlan_enabled = True @@ -214,10 +219,16 @@ def setUp(self): self.pc_info = [] self.net_ports = [] + self.all_active_net_ports = [] for name, val in graph['minigraph_portchannels'].items(): members = [graph['minigraph_port_indices'][member] for member in val['members']] self.net_ports.extend(members) + if self.is_active_active_dualtor: + self.all_active_net_ports.extend(members) + members = [graph['mg_unslctd_port_idx'][member] + for member in val['members']] + self.all_active_net_ports.extend(members) ip = None for d in graph['minigraph_portchannel_interfaces']: @@ -426,11 +437,12 @@ def runTest(self): self.work_test() def Vxlan(self, test): - for i, n in enumerate(test['acc_ports']): - for j, a in enumerate(test['acc_ports']): - res, out = self.checkVxlan(a, n, test, self.vlan_mac) - if not res: - return False, out + " | net_port_rel(acc)=%d acc_port_rel=%d" % (i, j) + if not self.is_active_active_dualtor: + for i, n in enumerate(test['acc_ports']): + for j, a in enumerate(test['acc_ports']): + res, out = self.checkVxlan(a, n, test, self.vlan_mac) + if not res: + return False, out + " | net_port_rel(acc)=%d acc_port_rel=%d" % (i, j) for i, n in enumerate(self.net_ports): for j, a in enumerate(test['acc_ports']): @@ -483,17 +495,28 @@ def checkRegularRegularVLANtoLAG(self, acc_port, pc_ports, dst_ip, test): exp_packet = Mask(exp_packet) exp_packet.set_do_not_care_scapy(scapy.Ether, "dst") + # skip smac check for aa dualtor + if self.is_active_active_dualtor: + exp_packet.set_do_not_care_scapy(scapy.Ether, "src") self.dataplane.flush() for i in range(self.nr): testutils.send_packet(self, acc_port, packet) - nr_rcvd = count_matched_packets_all_ports_helper( - self, exp_packet, self.nr, pc_ports, timeout=20) + if self.is_active_active_dualtor: + nr_rcvd = count_matched_packets_all_ports_helper( + self, exp_packet, self.nr, self.all_active_net_ports, timeout=20) + else: + nr_rcvd = count_matched_packets_all_ports_helper( + self, exp_packet, self.nr, pc_ports, timeout=20) rv = nr_rcvd == self.nr out = "" if not rv: - arg = self.nr, nr_rcvd, str(acc_port), str( - pc_ports), src_mac, dst_mac, src_ip, dst_ip + if self.is_active_active_dualtor: + arg = self.nr, nr_rcvd, str(acc_port), str( + self.all_active_net_ports), src_mac, dst_mac, src_ip, dst_ip + else: + arg = self.nr, nr_rcvd, str(acc_port), str( + pc_ports), src_mac, dst_mac, src_ip, dst_ip out = "sent = %d rcvd = %d | src_port=%s dst_ports=%s | src_mac=%s dst_mac=%s src_ip=%s dst_ip=%s" % arg return rv, out @@ -565,8 +588,12 @@ def checkVxlan(self, acc_port, net_port, test, dst_mac): self.dataplane.flush() for i in range(self.nr): testutils.send_packet(self, net_port, packet) - nr_rcvd = count_matched_packets_helper( - self, inpacket, self.nr, acc_port, timeout=20) + if self.is_active_active_dualtor: + nr_rcvd = count_matched_packets_helper( + self, inpacket, self.nr, timeout=20) + else: + nr_rcvd = count_matched_packets_helper( + self, inpacket, self.nr, acc_port, timeout=20) rv = nr_rcvd == self.nr out = "" if not rv: diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt index fc47ab4657a..759b62e88c3 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer_common_ignore.txt @@ -230,3 +230,41 @@ r, ".* ERR dualtor_neighbor_check.py: .*" r, ".*ERR kernel: \[.*\] ccp.*firmware: failed to load amd\/amd_sev_.*.sbin .*" r, ".*ERR kernel: \[.*\] firmware_class: See https:\/\/wiki.debian.org\/Firmware for information about missing firmware.*" r, ".*ERR kernel: \[.*\] snd_hda_intel.*no codecs found!.*" + +#Cisco platform ignore messages +r, ".* ERR .*-E-PVT-0- get_temperature: sensor=GIBRALTAR_HBM_SENSOR_0 is not ready.*" +r, ".* ERR .*-E-PVT-0- get_temperature: sensor=GIBRALTAR_HBM_SENSOR_1 is not ready.*" +r, ".* ERR CCmisApi: system_service_Map_base::at.*" +r, ".* ERR gbsyncd\d*#GBSAI.*pointer for SAI_SWITCH_ATTR_REGISTER_READ is not handled, FIXME.*" +r, ".* ERR gbsyncd\d*#GBSAI.*pointer for SAI_SWITCH_ATTR_REGISTER_WRITE is not handled, FIXME.*" +r, ".* ERR gbsyncd\d*#GBSAI[\d*] updateNotifications: pointer for SAI_SWITCH_ATTR_REGISTER_WRITE is not handled, FIXME!" +r, ".* ERR kernel:.*No associated hostinterface to 6 port.*" +r, ".* ERR lldp#lldpmgrd\[\d*\]: Port init timeout reached.*" +r, ".* ERR swss\d*#orchagent.*pointer for SAI_SWITCH_ATTR_REGISTER_READ is not handled, FIXME.*" +r, ".* ERR swss\d*#orchagent.*pointer for SAI_SWITCH_ATTR_REGISTER_WRITE is not handled, FIXME.*" +r, ".* ERR swss\d*#orchagent: :- removeLag: Failed to remove non-empty LAG PortChannel.*" +r, ".* ERR swss\d*#orchagent:.*initBufferConstants: Failed to get Maximum memory size.*" +r, ".* ERR syncd#syncd:.*-E-HLD-0- get_dependent_objects: NULL dependee.*" +r, ".* ERR syncd\d*#syncd:.*la_acl_key_profile_base::initialize failed to place udk for key type IPV6.*" +r, ".* ERR syncd#syncd:.*SAI_LOG|SAI_API_TUNNEL: VLAN to VNI not implemented yet.*" +r, ".* ERR syncd\d*#syncd: :- setEndTime: event 'create:SAI_OBJECT_TYPE_SWITCH:oid:0x[0-9a-fA-F]*' took \d* ms to execute.*" +r, ".* ERR syncd\d*#syncd: :- threadFunction: time span WD exceeded \d* ms for create:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000.*" +r, ".* ERR syncd\d*#syncd:.* SDK_LOG|-E-API-0- shared/src/hld/system/la_device_impl_pacgbakpg.cpp::\d* get_trap_configuration API returned: status = Leaba_Err: Entry requested not found: la_status silicon_one::gibraltar::la_device_impl_pacgbakpg::do_get_trap_configuration.*" +r, ".* ERR syncd\d*#syncd:.*Failed to retrieve system port SAI ID for port ID .*, switch not in VOQ mode.*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_ACL: Invalid or unsupported ACL match field 4143.*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_BUFFER: get_buffer_pool_stats unknown counter 20.*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_BUFFER: get_ingress_priority_group_stats unknown counter 5.*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_BUFFER: get_ingress_priority_group_stats unknown counter 7.*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_HOSTIF: src/sai_trap.cpp:.*: Invalid trap event code .*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_LAG: resolve_feat_over_member_ports: found port index .*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_LAG: resolve_feat_over_member_ports: port index .* now selected.*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_PORT: Invalid port counter .*index.*" +r, ".* ERR syncd\d*#syncd:.*SAI_API_SWITCH: genl_ctrl_resolve failed family=lb_genl_family0.*" +r, ".* ERR syncd\d*#syncd:.*la_vrf_port_common_base::get_acl_group egress acl group not found.*" +r, ".* ERR syncd\d*#syncd:.*la_vrf_port_common_base::get_acl_group ingress acl group not found.*" +r, ".* ERR systemd-udevd\[\d*\]:.* leaba* Failed to get link config: No such device.*" +r, ".* INFO .*Failed to instantiate thermal sensor SSD_Temp: xr_sysctl_get.*status -116.*" +r, ".* INFO .*[duty_cycle_map]: illegal pwm value .*" +r, ".* INFO .*command '/usr/sbin/smartctl' failed: [116] Stale file handle.*" +r, ".* INFO healthd.*Key 'TEMPERATURE_INFO|ASIC' field 'high_threshold' unavailable in database 'STATE_DB'.*" +r, ".* INFO healthd.*Key 'TEMPERATURE_INFO|ASIC' field 'temperature' unavailable in database 'STATE_DB'.*" diff --git a/ansible/roles/vm_set/library/sonic_kickstart.py b/ansible/roles/vm_set/library/sonic_kickstart.py index 44497bfccab..e4c4920916a 100644 --- a/ansible/roles/vm_set/library/sonic_kickstart.py +++ b/ansible/roles/vm_set/library/sonic_kickstart.py @@ -84,18 +84,10 @@ def logout(self): def session(new_params): - if new_params['disable_updategraph']: - seq = [ - ('while true; do if [ $(systemctl is-active swss) == "active" ]; then break; fi; ' - 'echo $(systemctl is-active swss); ' - 'sed -i -e "s/enabled=true/enabled=false/" /etc/sonic/updategraph.conf; ' - 'systemctl restart updategraph; sleep 1; done', [r'#'], 180), - ] - else: - seq = [ - ('while true; do if [ $(systemctl is-active swss) == "active" ]; then break; fi; ' - 'echo $(systemctl is-active swss); sleep 1; done', [r'#'], 180), - ] + seq = [ + ('while true; do if [ $(systemctl is-active swss) == "active" ]; then break; fi; ' + 'echo $(systemctl is-active swss); sleep 1; done', [r'#'], 180), + ] seq.extend([ ('pkill dhclient', [r'#']), @@ -145,7 +137,6 @@ def main(): mgmt_gw=dict(required=True), new_password=dict(required=True), num_asic=dict(required=True), - disable_updategraph=dict(required=True, type='bool'), )) try: diff --git a/ansible/roles/vm_set/tasks/kickstart_vm.yml b/ansible/roles/vm_set/tasks/kickstart_vm.yml index 45879b1cf14..ca74b1329a1 100644 --- a/ansible/roles/vm_set/tasks/kickstart_vm.yml +++ b/ansible/roles/vm_set/tasks/kickstart_vm.yml @@ -3,10 +3,6 @@ respin_vms: [] when: respin_vms is not defined -- set_fact: - disable_updategraph: False - when: disable_updategraph is not defined - - set_fact: skip_this_vm: True @@ -65,7 +61,6 @@ mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }} new_password={{ sonic_password }} num_asic={{ num_asic }} - disable_updategraph={{ disable_updategraph }} register: kickstart_output until: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code == 0' retries: 5 @@ -97,7 +92,6 @@ mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }} new_password={{ sonic_password }} num_asic={{ num_asic }} - disable_updategraph={{ disable_updategraph }} register: kickstart_output_final until: '"kickstart_code" in kickstart_output_final and kickstart_output_final.kickstart_code == 0' retries: 5 diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml index 31add804e77..121d56caad5 100644 --- a/ansible/roles/vm_set/tasks/main.yml +++ b/ansible/roles/vm_set/tasks/main.yml @@ -168,6 +168,13 @@ sysctl_set: yes become: yes +- name: Increase IPv6 route cache size + sysctl: + name: "net.ipv6.route.max_size" + value: "16384" + sysctl_set: yes + become: yes + - name: Setup external front port include_tasks: external_port.yml when: external_port is defined diff --git a/ansible/roles/vm_set/tasks/start_sonic_vm.yml b/ansible/roles/vm_set/tasks/start_sonic_vm.yml index 94628c14b33..0f83bc77751 100644 --- a/ansible/roles/vm_set/tasks/start_sonic_vm.yml +++ b/ansible/roles/vm_set/tasks/start_sonic_vm.yml @@ -2,10 +2,6 @@ sonic_vm_storage_location: "{{ home_path }}/sonic-vm" when: sonic_vm_storage_location is not defined -- set_fact: - disable_updategraph: False - when: disable_updategraph is not defined - - name: Create directory for vm images and vm disks file: path={{ item }} state=directory mode=0755 with_items: @@ -66,7 +62,6 @@ mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }} new_password={{ sonic_password }} num_asic={{ num_asic }} - disable_updategraph={{ disable_updategraph }} register: kickstart_output - name: Fail if kickstart gives error for {{ dut_name }} diff --git a/ansible/scripts/vmhost_server_address.py b/ansible/scripts/vmhost_server_address.py new file mode 100644 index 00000000000..153befa38a2 --- /dev/null +++ b/ansible/scripts/vmhost_server_address.py @@ -0,0 +1,58 @@ +"""This script is to parse ansible inventory file and return the mgmt IP for given host server. +""" + +import argparse +import sys + +from ansible.parsing.dataloader import DataLoader +from ansible.inventory.manager import InventoryManager + + +def main(args): + server_name = args.server_name + inv_file = args.inv_file + ip_ver = args.ip_ver + + inv_mgr = InventoryManager(loader=DataLoader(), sources=inv_file) + all_hosts = inv_mgr.get_hosts(pattern=server_name) + + if len(all_hosts) == 0: + sys.stderr.write("No host matches {} in inventory file {}".format(server_name, inv_file)) + sys.exit(1) + else: + for host in all_hosts: + if host.name.startswith('VM'): + continue + if ip_ver == 'ipv4': + result = host.get_vars().get("ansible_host", "") + else: + result = host.get_vars().get("ansible_hostv6", "") + sys.stdout.write(result) + sys.exit(0) + + sys.stderr.write( + "Unable to find IP address of host server {} in inventory file {}".format(server_name, inv_file) + ) + sys.exit(2) + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser(description='Gather mgmt IP for given host server (like server_17)') + parser.add_argument( + '--server-name', + help='The name of vm_host server, like server_1' + ) + parser.add_argument( + '--inv-file', + default='veos', + help='The inventory file contains server information. Default is veos.' + ) + parser.add_argument( + '--ip-ver', + default='ipv4', + choices=['ipv4', 'ipv6'], + help='The IP version to return. Default is ipv4.' + ) + args = parser.parse_args() + main(args) diff --git a/ansible/t2_lab b/ansible/t2_lab index 4964802a77c..4ff0a2fd8a0 100644 --- a/ansible/t2_lab +++ b/ansible/t2_lab @@ -3,6 +3,7 @@ all: lab: vars: mgmt_subnet_mask_length: 24 + mgmt_subnet_v6_mask_length: 64 children: sonic: children: diff --git a/ansible/templates/minigraph_dpg.j2 b/ansible/templates/minigraph_dpg.j2 index 9fdadccb86d..0c2352010f0 100644 --- a/ansible/templates/minigraph_dpg.j2 +++ b/ansible/templates/minigraph_dpg.j2 @@ -75,9 +75,9 @@ V6HostIP eth0 - {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64 + {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/{{ mgmt_subnet_v6_mask_length if mgmt_subnet_v6_mask_length is defined else '64' }} - {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64 + {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/{{ mgmt_subnet_v6_mask_length if mgmt_subnet_v6_mask_length is defined else '64' }} @@ -170,6 +170,9 @@ {{ vlan_param['id'] }} {{ vlan_param['tag'] }} {{ vlan_param['prefix'] | ipaddr('network') }}/{{ vlan_param['prefix'] | ipaddr('prefix') }} +{% if 'secondary_subnet' in vlan_param %} + {{ vlan_param['secondary_subnet'] | ipaddr('network') }}/{{ vlan_param['secondary_subnet'] | ipaddr('secondary_subnet') }} +{% endif %} {% if 'mac' in vlan_param %} {{ vlan_param['mac'] }} {% endif %} @@ -208,6 +211,13 @@ {{ vlan }} {{ vlan_param['prefix'] }} +{%if 'secondary_subnet' in vlan_param %} + + + {{ vlan }} + {{ vlan_param['secondary_subnet'] }} + +{% endif %} {% endfor %} {% for vlan, vlan_param in vlan_configs.items() %} {% if 'prefix_v6' in vlan_param %} diff --git a/ansible/templates/minigraph_dpg_asic.j2 b/ansible/templates/minigraph_dpg_asic.j2 index b199fbdd1fc..f2b8b5e1e10 100644 --- a/ansible/templates/minigraph_dpg_asic.j2 +++ b/ansible/templates/minigraph_dpg_asic.j2 @@ -61,9 +61,9 @@ V6HostIP eth0 - {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64 + {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/{{ mgmt_subnet_v6_mask_length if mgmt_subnet_v6_mask_length is defined else '64' }} - {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64 + {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/{{ mgmt_subnet_v6_mask_length if mgmt_subnet_v6_mask_length is defined else '64' }} diff --git a/ansible/templates/minigraph_meta.j2 b/ansible/templates/minigraph_meta.j2 index 9a93b8bbd93..f9b14197c2f 100644 --- a/ansible/templates/minigraph_meta.j2 +++ b/ansible/templates/minigraph_meta.j2 @@ -63,7 +63,7 @@ ResourceType - Compute-AI + ComputeAI {% endif %} {% if dhcp_servers %} diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh index 6ba89549784..73863536de2 100755 --- a/ansible/testbed-cli.sh +++ b/ansible/testbed-cli.sh @@ -54,7 +54,6 @@ function usage echo "To deploy topology for specified testbed on a server: $0 add-topo 'testbed-name' ~/.password" echo " Optional argument for add-topo:" echo " -e ptf_imagetag= # Use PTF image with specified tag for creating PTF container" - echo " -e disable_updategraph= # Disable updategraph service when deploying testbed" echo "To deploy topology with the help of the last cached deployed topology for the specified testbed on a server:" echo " $0 deploy-topo-with-cache 'testbed-name' 'inventory' ~/.password" echo "To remove topology for specified testbed on a server: $0 remove-topo 'testbed-name' ~/.password" diff --git a/ansible/testbed-new.yaml b/ansible/testbed-new.yaml index 733baa06645..d02fd2b1227 100644 --- a/ansible/testbed-new.yaml +++ b/ansible/testbed-new.yaml @@ -49,6 +49,7 @@ device_groups: children: [sonic, fanout] # source: sonic-mgmt/ansible/lab vars: mgmt_subnet_mask_length: "24" # source: sonic-mgmt/ansible/lab + mgmt_subnet_v6_mask_length: "64" # source: sonic-mgmt/ansible/lab sonic: children: [sonic_s6000, sonic_s6100, sonic_sn2700_40, sonic_a7260] # source: sonic-mgmt/ansible/lab sonic_s6000: @@ -138,6 +139,7 @@ devices: device_type: server # source: sonic-mgmt/ansible/files/sonic_lab_devices-github.csv hwsku: TestServ # source: sonic-mgmt/ansible/files/sonic_lab_devices-github.csv mgmt_subnet_mask_length: "24" # source: sonic-mgmt/ansible/lab + mgmt_subnet_v6_mask_length: "64" # source: sonic-mgmt/ansible/lab alias: credentials: username: @@ -153,6 +155,7 @@ devices: device_type: blank hwsku: mgmt_subnet_mask_length: + mgmt_subnet_v6_mask_length: alias: credentials: username: diff --git a/ansible/vars/topo_t0.yml b/ansible/vars/topo_t0.yml index 23e38ed4d67..5a547e0e9e8 100644 --- a/ansible/vars/topo_t0.yml +++ b/ansible/vars/topo_t0.yml @@ -65,6 +65,7 @@ topology: id: 100 intfs: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] prefix: 192.168.0.1/22 + secondary_subnet: 192.169.0.1/22 prefix_v6: fc02:100::1/64 tag: 100 Vlan200: diff --git a/ansible/veos_vtb b/ansible/veos_vtb index 123d47276ce..7f2f613b3cf 100644 --- a/ansible/veos_vtb +++ b/ansible/veos_vtb @@ -97,6 +97,7 @@ all: sonic: vars: mgmt_subnet_mask_length: 24 + mgmt_subnet_v6_mask_length: 64 ansible_connection: multi_passwd_ssh ansible_altpassword: YourPaSsWoRd hosts: diff --git a/docs/api_wiki/README.md b/docs/api_wiki/README.md index e5438b7ff28..8f12c966161 100644 --- a/docs/api_wiki/README.md +++ b/docs/api_wiki/README.md @@ -113,6 +113,8 @@ def test_fun(duthosts, rand_one_dut_hostname, ptfhost): - [active_ip_interfaces](sonichost_methods/active_ip_interfaces.md) - Provides information on all active IP (Ethernet or Portchannel) interfaces given a list of interface names. +- [add_acl_table](sonichost_methods/add_acl_table.md) - Add new acl table via command `sudo config acl add table ` + - [all_critical_process_status](sonichost_methods/all_critical_process_status.md) - Provides summary and status of all critical services and their processes - [check_bgp_session_nsf](sonichost_methods/check_bgp_session_nsf.md) - Checks if BGP neighbor session has entered Nonstop Forwarding(NSF) state @@ -169,6 +171,8 @@ def test_fun(duthosts, rand_one_dut_hostname, ptfhost): - [get_interfaces_status](sonichost_methods/get_interfaces_status.md) - Get interfaces status on the DUT and parse the result into a dict. +- [get_intf_link_local_ipv6_addr](sonichost_methods/get_intf_link_local_ipv6_addr.md) - Get the link local ipv6 address of the interface + - [get_ip_route_info](sonichost_methods/get_ip_route_info.md) - Returns route information for a destionation. The destination could an ip address or ip prefix. - [get_monit_services_status](sonichost_methods/get_monit_services_status.md) - Get metadata on services monitored by Monit. @@ -207,6 +211,8 @@ def test_fun(duthosts, rand_one_dut_hostname, ptfhost): - [get_vlan_intfs](sonichost_methods/get_vlan_intfs.md) - Retrieves list of interfaces belonging to a VLAN. +- [get_vlan_brief](sonichost_methods/get_vlan_brief.md) - Returns a dict contians all vlans with their brief information + - [hostname](sonichost_methods/hostname.md) - Provides hostname for device. - [is_backend_portchannel](sonichost_methods/is_backend_portchannel.md) - Returns whether or not a provided portchannel is a backend portchannel. diff --git a/docs/api_wiki/ansible_methods/show_ipv6_interface.md b/docs/api_wiki/ansible_methods/show_ipv6_interface.md new file mode 100644 index 00000000000..dd871e80f0d --- /dev/null +++ b/docs/api_wiki/ansible_methods/show_ipv6_interface.md @@ -0,0 +1,35 @@ +# show_ipv6_interface + +- [Overview](#overview) +- [Examples](#examples) +- [Arguments](#arguments) +- [Expected Output](#expected-output) + +## Overview +Retrieve ipv6 address of interface and ipv6 address for corresponding neighbor + +## Examples +``` +def test_fun(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + + ip_intfs = duthost.show_ipv6_interface() +``` + +## Arguments +- `namespace` - if multi-asic, namespace to run the commmand + - Required: `False` + - Type: `String` + +## Expected Output +Provides a dictionary with information on the interfaces. The dictionary hierarchy is described below, with each indentation describing a sub-dictionary: + +- `ansible_facts` + - `ipv6_interfaces` - Dictionary mapping interface name to information on the interface + - `{INTERFACE_NAME}` - Dictionary with info in interface + - `bgp_neighbor` - Name of BGP neighbor for interface + - `ipv6` - interface configured ipv6 address + - `peer_ipv6` - BGP neighbor ipv6 address + - `admin` - admin state + - `oper_state` - operator state + - `prefix_len` - interface prefix length diff --git a/docs/api_wiki/sonichost_methods/add_acl_table.md b/docs/api_wiki/sonichost_methods/add_acl_table.md new file mode 100644 index 00000000000..1245c04ff4c --- /dev/null +++ b/docs/api_wiki/sonichost_methods/add_acl_table.md @@ -0,0 +1,50 @@ +# add_acl_table + +- [Overview](#overview) +- [Examples](#examples) +- [Arguments](#arguments) +- [Expected Output](#expected-output) + +## Overview +Add new acl table via command `sudo config acl add table ` + +## Examples +``` +def test_fun(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + + duthost.add_acl_table(table_name="Test_TABLE", + table_type="L3", + acl_stage="ingress", + bind_ports=["Ethernet0", "Ethernet1", "Ethernet3"]) +``` + +## Arguments + - `table_name` - table name of acl table + - Required: `True` + - Type: `String` + - `table_type` - table type of acl table + - Required: `True` + - Type: `String` + - `acl_stage` - acl stage + - Required: `False` + - Type: `String` + - Validate value: "ingress" or "egress" + - Default: None + - `bind_ports` - ports to bind + - Required: `False` + - Type option 1: `String` + - Format: Ethernet0,Ethernet1,Ethernet3 + - Type option 2: `List` + - Member Type: `String` + - Format: ["Ethernet0", "Ethernet1", "Ethernet3"] or ["Vlan100", "Vlan200"] + - This list of interfaces will be join to a string + - If list of VLAN name provided, acl table will bind to ports binding to those VLAN. + - Default: None + - `description` - description of acl table + - Required: `False` + - Type: `String` + - Default: None + +## Expected Output +None diff --git a/docs/api_wiki/sonichost_methods/get_intf_link_local_ipv6_addr.md b/docs/api_wiki/sonichost_methods/get_intf_link_local_ipv6_addr.md new file mode 100644 index 00000000000..c035be5ed33 --- /dev/null +++ b/docs/api_wiki/sonichost_methods/get_intf_link_local_ipv6_addr.md @@ -0,0 +1,25 @@ +# get_intf_link_local_ipv6_addr + +- [Overview](#overview) +- [Examples](#examples) +- [Arguments](#arguments) +- [Expected Output](#expected-output) + +## Overview +Get the link local ipv6 address of the interface + +## Examples +``` +def test_fun(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + + duthost.get_intf_link_local_ipv6_addr("Ethernet0") +``` + +## Arguments + - `intf` - the interface name + - Required: `True` + - Type: `String` + +## Expected Output +Link local only IPv6 address like: fe80::2edd:e9ff:fefc:dd58 or empty string if not found. diff --git a/docs/api_wiki/sonichost_methods/get_vlan_brief.md b/docs/api_wiki/sonichost_methods/get_vlan_brief.md new file mode 100644 index 00000000000..81109bf05ee --- /dev/null +++ b/docs/api_wiki/sonichost_methods/get_vlan_brief.md @@ -0,0 +1,48 @@ +# get_vlan_brief + +- [Overview](#overview) +- [Examples](#examples) +- [Arguments](#arguments) +- [Expected Output](#expected-output) +- [Potential Exception](#potential-exception) + +## Overview + +Read vlan brief information from running config. + +## Examples + +```python +def test_fun(duthosts, rand_one_dut_hostname): + duthost = duthosts[rand_one_dut_hostname] + vlan_brief = duthost.get_vlan_brief() +``` + +## Arguments + +None + +## Expected Output + +Returns an dict, key is vlan name and value is brief information. + +Example output: + +``` +{ + "Vlan1000": { + "interface_ipv4": [ "192.168.0.1/24" ], + "interface_ipv6": [ "fc02:1000::1/64" ], + "members": ["Ethernet0", "Ethernet1"] + }, + "Vlan2000": { + "interface_ipv4": [ "192.168.1.1/24" ], + "interface_ipv6": [ "fc02:1001::1/64" ], + "members": ["Ethernet3", "Ethernet4"] + } +} +``` + +## Potential Exception + +- [Exception from function get_running_config_facts](get_running_config_facts.md) diff --git a/docs/testplan/BFD-echomode-test-plan.md b/docs/testplan/BFD-echomode-test-plan.md new file mode 100644 index 00000000000..1b984049249 --- /dev/null +++ b/docs/testplan/BFD-echomode-test-plan.md @@ -0,0 +1,110 @@ +# Birirectional Forwarding Detection (BFD) Echo-Mode + +## Test Plan Revision History + +| Rev | Date | Author | Change Description | +| ---- | ---------- | ----------------- | ---------------------------- | +| 1 | 05/03/2024 | Ghulam Bahoo | Initial Version of test plan | + +## Definition/Abbreviation + +| **Term** | **Meaning** | +| ---------- | ---------------------------------------- | +| BFD | Bidirectional Forwarding Detection | + +## Introduction + +### Objective +The purpose is to test functionality of BFD echo-mode on the SONIC switch DUT, closely resembling production environment. The test assumes all standard topology configurations, such as BGP neighborship, are pre-configured in the DUT and neighboring systems with no BFD configurations. It is also assumed that neighboring devices are all SONiC devices. + +### Scope +- Test BFD Echo-Mode on SONiC DUT and neighboring devices + +### Related DUT CLI Commands +| Commands| Comment | +| ------- | ------- | +|Configuration commands| +| router bgp | BGP configuration mode | +| neighbor x.x.x.x bfd | Enable bfd on BGP neighbor | +| bfd | Opens the BFD daemon configuration mode | +| peer x.x.x.x | Configure BFD peer | +| echo-mode |Enables or disables the echo transmission mode | +|Show commands| +| show ip bgp summary | Dispaly current BGP neighborship statistics | +| show bfd peer | Show all configured BFD peers information and current status | + +## Test structure + +### Testbed +The test will run on the following testbeds: +* t0 +* t1 +## Setup configuration +The test assumes all standard configurations, such as BGP neighborship, are pre-configured in the DUT and neighboring systems with no BFD configurations. It is also assumed that the neighboring devices are of SONiC type. + +## Test + +## Existing Test cases + +### Test case # 1 – BFD Basic + +#### Test objective +Verify that BFD neighborship is established and BFD peers are sharing BFD peer information to each other. +#### Test steps +* Establish BFD session between BFD Peers. +* Enable BFD protocol on BFD peer interfaces. +* Verify BFD peer information. +### Test case # 2 – BFD Scale + +#### Test objective +To validate BFD session establishment, state transitions (Up, Down, AdminDown), suspension, and scale testing with various scenarios including IPv4 and IPv6 addresses, single-hop, and multi-hop configurations, along with queue counter verification for BFD traffic. + +#### Test steps + +* Setup + * Identify network interfaces and their respective neighbors. + * Assign IP addresses to interfaces. + * Initialize BFD on the testing tool or platform. + +* Test Execution + * Create BFD sessions between DUT and neighboring devices. + * Validate BFD session states and transitions (e.g., Up, Down, AdminDown). + * Perform specific state transitions for testing (e.g., suspension, restoration). + Check and validate BFD queue counters or traffic statistics. +* Cleanup + * Remove BFD sessions established during testing. + * Release IP addresses assigned earlier. + * Stop BFD on the testing tool or platform. + + +### Test case # 3 – BFD Multihop + +#### Test objective +To validate BFD session establishment, state transitions (Up, Down, AdminDown), suspension, and scale testing with various scenarios including IPv4 and IPv6 addresses, single-hop, and multi-hop configurations, along with queue counter verification for BFD traffic. +#### Test steps + +* Setup + * Identify network interfaces and their respective neighbors. + * Assign IP addresses to interfaces. + * Initialize BFD on the testing tool or platform. + +* Test Execution + * Create BFD sessions between network devices. + * Validate BFD session states and transitions (e.g., Up, Down, AdminDown). + * Perform specific state transitions for testing (e.g., suspension, restoration). + Check and validate BFD queue counters or traffic statistics. +* Cleanup + * Remove BFD sessions established during testing. + * Release IP addresses assigned earlier. + * Stop BFD on the testing tool or platform. +## New Test cases + +### Test case # 4 – BFD Echo Mode + +#### Test objective +Verify that BFD neighborship is established and BFD peers are sharing BFD peer information to each other. Also varify the bidirectional reachability of a network path rather than just monitoring the path for faults. +#### Test steps +* Establish BFD session between existing bgp neighbors +* Enable BFD echo mode +* Verify BFD peer information +* Verify that BFD peers are sharing BFD echo packets diff --git a/docs/testplan/BGP-Suppress-FIB-Pending-test-plan.md b/docs/testplan/BGP-Suppress-FIB-Pending-test-plan.md index ee083bbc17b..aa4b0f195a9 100644 --- a/docs/testplan/BGP-Suppress-FIB-Pending-test-plan.md +++ b/docs/testplan/BGP-Suppress-FIB-Pending-test-plan.md @@ -171,3 +171,26 @@ show ip route 1.1.1.0/24 json 8. Restore orchagent process 9. Make sure the routes are programmed in FIB by checking __offloaded__ flag in the DUT routing table 10. Send traffic matching the prefixes and verify packets are forwarded to __T0 VM__ + +### Test case # 7 - Test BGP route suppress under stress +1. Do BGP route flap 5 times +2. Disable BGP suppress-fib-pending function +3. Send traffic matching the prefixes in the BGP route flap and verify packets are forwarded __back to T2 VM__ +4. Suspend orchagent process to simulate a delay +5. Announce 1K BGP prefixes to DUT from T0 VM by exabgp +6. Verify the BGP routes are announced to T2 VM peer +7. Send traffic matching the prefixes in the BGP route flap and verify packets are forwarded __back to T2 VM__ +8. Enable BGP suppress-fib-pending function at DUT +9. Restore orchagent process +10. Verify the routes are programmed in FIB by checking __offloaded__ flag in the DUT routing table +11. Send traffic matching the prefixes and verify packets are forwarded to __T0 VM__ + +### Test case # 8 - Test BGP route suppress performance +1. Enable BGP suppress-fib-pending function at DUT +2. Start tcpdump capture at the ingress and egress port at DUT +3. Announce 1K BGP prefixes to DUT from T0 VM by exabgp +4. Verify the BGP routes are announced to T2 VM peer by DUT +5. Withdraw 1K BGP prefixes +6. Verify the BGP routes are withdraw from T2 VM peer by DUT +7. Stop tcpdump cature +8. Verify the average as well as middle route process time is under threshold diff --git a/docs/testplan/GCU-Dynamic-ACL-testplan.md b/docs/testplan/GCU-Dynamic-ACL-testplan.md new file mode 100644 index 00000000000..69a95e350f2 --- /dev/null +++ b/docs/testplan/GCU-Dynamic-ACL-testplan.md @@ -0,0 +1,464 @@ +# Dynamic ACL Update via GCU Test Plan + +## Overview + +This test plan will certify that Generic Config Updater (GCU) is able to properly add, remove, and update ACL Table Types, ACL Tables, and ACL Rules, and that these ACL rules and their priorities are respected and appropriate action is taken on both IPv4 and IPv6 packets. + +## Testbed + +The test will run on T0 testbeds. + +## Setup Configuration + +No setup pre-configuration is required, the test will configure and return the testbed to its original state. + +Tests themselves will utilize a fixture that automatically creates a ACL_TABLE_TYPE and ACL_TABLE, and then removes them when the test is complete. + +## Testing Plan + +To test the capability of GCU to dynamically update ACLs, we will utilize various Json Patch files to create, update, and remove various ACL Tables and Rules. The contents of the Json Patch files, as well as additional details about verification processes, will be defined in the last section of this document, [JSON Patch Files and Expected Results](#json-patch-files-and-expected-results). Traffic tests are performed after applying various rules to confirm expected behavior, and each traffic test is replicated for both IPv4 and IPv6 packets. + +### Test Case # 1 - Create and apply custom ACL table without rules + +#### Test Objective + +Verify that we can utilize GCU to create a custom ACL Table Type, and then create an ACL Table from this type. This is accomplished with a fixture, which will automatically be run on each subsequent test + +#### Testing Steps + +- Use GCU to create a new ACL Table Type via GCU + +- Use GCU to create an ACL Table utilizing this ACL Table Type + +- Verify that both operations were successful + +- Verify that output of "show acl table {tablename}" matches expected output + +- Use GCU to remove ACL Table + +- Use GCU to remove ACL Table Type + +- Verify that both operations were successful + +### Test Case # 2 - Create a drop rule within custom table + +#### Test Objective + +Verify that we can create a single drop rule utilizing GCU + +#### Testing Steps + +- Use GCU to create a new drop rule on a specific port in our ACL Table + +- Verify that operation was successful + +- Verify that output of "show acl rule | grep {rule_name}" matches expected output + +- Verify that packets sent on this port are dropped + +- Verify that packets sent on another port are forwarded + +### Test Case # 3 - Remove a drop rule from the ACL table + +#### Test Objective + +Verify that we can remove a previously created drop rule from our ACL Table with GCU + +#### Testing Steps + +- Use GCU to create a drop rule on a specific port on ACL Table + +- Remove the drop rule from ACL Table + +- Verify that all operations were successful + +- Verify that the result of "show acl rule {rule_name}" has no relevant output + +- Verify that packets that were previously dropped are now forwarded + +### Test Case # 4 - Create forward rules within custom ACL table + +#### Test Objective + +Verify that we can create a forward rule utilizing GCU, and that we can create forward rules for both ipv4 and ipv6 + +#### Testing Steps + +- Use GCU to create 2 new forwarding rules with top priority on our ACL Table, one for IPv4 and one for IPv6 + +- Use GCU to create drop rule on a specific port with lower priority in ACL Table + +- Verify that all operations were successful + +- Verify that for both rules created, "show acl rule | grep {rulename}" matches expected output for both rules + +- Verify that packets matching forwarding rules on this specific port are correctly forwarded + +- Verify that packets not matching forwarding rules on this specific port are correctly dropped + +### Test Case # 5 - Replace the IP Address on an ACL Rule + +#### Test Objective + +Verify that after creation, ACL Rules can have their match conditions updated + +#### Testing Steps + +- Use GCU to create 2 new forwarding rules on ACL Table + +- Use GCU to create drop rule on a specific port with lower priority on ACL Table + +- Use GCU to replace the IP addresses in both forwarding rules + +- Verify that all operations were successful + +- Verify that the results of "show acl rule | grep {rule_name}" matches expected output for both rules + +- Verify that packets with IPs matching original forwarding rules on this specific port are dropped + +- Verify that packets with IPs matching replacement rules on this specific port are forwarded + +### Test Case # 6 - Remove forward rule from ACL Table + +#### Test Objective + +Verify that after creation, a forward ACL rule can be removed and packets matching the forward rule are no longer forwarded + +#### Testing Steps + +- Use GCU to create 2 new forwarding rules on ACL Table + +- Use GCU to create drop rule on a specific port with lower priority on ACL Table + +- Use GCU to remove the 2 forwarding rules + +- Verify that all operations were successful + +- Verify that the results of "show acl rule {rule_name}" are empty for both rule names + +- Verify that packets with IPs matching the removed forwarding rules are dropped on this specific port + +### Test Case # 7 - Scale test of ACL Table, add large amount of forward and drop rules + +#### Test Objective + +Verify that GCU is capable of adding a large amount of ACL rules and that they are still followed. + +#### Testing Steps + +- Use GCU to create 150 forwarding rules on ACL Table + +- Use GCU to create drop rules for each server facing port on ACL Table + +- Verify that all operations were successful + +- Verify that rules created are properly shown when using "show acl rule" + +- Verify that packets with IPs matching a destination IP for one of the forwarding rules are forwarded + +- Verify that packets not matching a forwarding IP are dropped + +### Test Case # 8 - Replace the IP Address of a non-existent ACL Rule + +#### Test Objective + +Verify that attempting to replace the address of a rule that does not exist properly results in an error and does not affect configDB + +#### Testing Steps + +- Create 2 new forwarding rules on ACL Table + +- Replace the IP addresses in non-existent forwarding rules + +- Verify that the replace action failed + +### Test Case # 9 - Remove non-existent ACL Table + +#### Test Objective + +Verify that attempting to remove an ACL Table that does not exist properly results in an error and does not affect configDB + +#### Testing Steps + +- Attempt to remove a table that does not exist + +- Verify that this removal fails + +## JSON Patch Files and Expected Results + +This section contains explicit details on the contents of each JSON Patch file used within the test, as well as the exact way that these operations are checked for success + +### Create a new ACL table type +**JSON Patch:** + + [ + { + "op": "add", + "path": "/ACL_TABLE_TYPE", + "value": { + "DYNAMIC_ACL_TABLE_TYPE" : { + "MATCHES": ["DST_IP","DST_IPV6","IN_PORTS"], + "ACTIONS": ["PACKET_ACTION","COUNTER"], + "BIND_POINTS": ["PORT"] + } + } + } + ] +**Expected Result** +- Operation Success + +**Additional checks** +- None + +### Create an ACL Table + +**Json Patch**: + + [ + { + "op": "add", + "path": "/ACL_TABLE/DYNAMIC_ACL_TABLE", + "value": { + "policy_desc": "DYNAMIC_ACL_TABLE", + "type": "DYNAMIC_ACL_TABLE_TYPE", + "stage": "INGRESS", + "ports": {vlan port members from minigraph} + } + } + ] + +**Expected result** +- Operation Success + + +**Additional checks** +- Check that results of the command “show acl table” match this expected output: + +Name | Type | Binding | Description | Stage | Status +------------- | ------------- | ---------- | ----------| --------- | --------- +DYNAMIC_ACL_TABLE | DYNAMIC_ACL_TABLE_TYPE | {vlan port 1} | DYNAMIC_ACL_TABLE_TYPE | ingress | Active + | | {vlan port 2} | | + | | {vlan port 3}... + +### Create Forwarding Rules + +**Json Patch:** + + [ + { + "op": "add", + "path": "/ACL_RULE", + "value": { + "DYNAMIC_ACL_TABLE|RULE_1": { + "DST_IP": "103.23.2.1/32", + "PRIORITY": "9999", + "PACKET_ACTION": "FORWARD" + }, + "DYNAMIC_ACL_TABLE|RULE_2": { + "DST_IPV6": "103:23:2:1::1/128", + "PRIORITY": "9998", + "PACKET_ACTION": "FORWARD" + } + } + } + ] + +**Expected Result** +- Operation Success + +**Additional Checks** ++ Check that results of “show acl rule DYNAMIC_ACL_TABLE RULE_1” and “show acl rule DYNAMIC_ACL_TABLE RULE_2” match the following output: + + DYNAMIC_ACL |_TABLE RULE_1 | 9999 | FORWARD | DST_IP: 103.23.2.1/32 + + DYNAMIC_ACL_TABLE | RULE_2 | 9998 | FORWARD | DST_IPV6: 103.23.2.1::1/128 + +### Create Drop Rule + +**Json Patch** + + [ + { + "op": "add", + "path": "/ACL_RULE/DYNAMIC_ACL_TABLE|RULE_3", + "value": { + "PRIORITY": "9997", + "PACKET_ACTION": "DROP", + "IN_PORTS": {port selected from DUT minigraph} + } + } + ] + + OR + + [ + { + "op": "add", + "path": "/ACL_RULE", + "value": { + "DYNAMIC_ACL_TABLE|RULE_3": { + "PRIORITY": "9997", + "PACKET_ACTION": "DROP", + "IN_PORTS": setup["blocked_src_port_name"], + } + } + } + ] + + Which patch is applied depends on whether there are already ACL Rules created, or if this is the first ACL rule that we are creating + +**Expected result** +- Operation Success + +**Additional checks:** ++ Check that result of “show acl rule DYNAMIC_ACL_TABLE RULE_3” matches the following output: + + DYNAMIC_ACL_TABLE | RULE_3 | 9997 | DROP | IN_PORTS: {port selected from DUT minigraph} + +### Remove Drop Rule + +**Json Patch** + + [ + { + "op": "remove", + "path": "/ACL_RULE/DYNAMIC_ACL_TABLE|RULE_3" + } + ] + + OR + + [ + { + "op": "remove", + "path": "/ACL_RULE" + } + ] + + Which of these two patches is applied depends on whether this is the only ACL Rule in the table or not, as we are not allowed to leave a table empty. + +**Expected result** + - Operation Success + +**Additional checks** +- Check that “show acl rule DYNAMIC_ACL_TABLE RULE_3” results in no output + +### Replace Non-Existent Rule +**Json Patch** + + [ + { + "op": "replace", + "path": "/ACL_RULE/DYNAMIC_ACL_TABLE|RULE_10", + "value": { + "DST_IP": "103.23.2.2/32", + "PRIORITY": "9999", + "PACKET_ACTION": "FORWARD" + } + } + ] + +**Expected result** +- Operation Failure + +**Additional checks** +- None + +### Replace Content of a Rule + +**Json Patch** + + [ + { + "op": "replace", + "path": "/ACL_RULE/DYNAMIC_ACL_TABLE|RULE_1", + "value": { + "DST_IP": "103.23.2.2/32", + "PRIORITY": "9999", + "PACKET_ACTION": "FORWARD" + } + }, + { + "op": "replace", + "path": "/ACL_RULE/DYNAMIC_ACL_TABLE|RULE_2", + "value": { + "DST_IPV6": "103:23:2:1::2/128", + "PRIORITY": "9998", + "PACKET_ACTION": "FORWARD" + } + } + ] + +**Expected result** +- Operation Success + +**Additional checks** ++ Check that results of “show acl rule | grep RULE_1” and “show acl rule | grep RULE_2” match the following output: + + DYNAMIC_ACL_TABLE | RULE_1 | 9999 | FORWARD | DST_IP: 103.23.2.2/32 + + DYNAMIC_ACL_TABLE | RULE_2 | 9998 | FORWARD | DST_IPV6: 103.23.2.1::2/128 + +### Remove Forward Rules +**Json Patch**: + + [ + { + "op": "remove", + "path": "/ACL_RULE/DYNAMIC_ACL_TABLE|RULE_1" + }, + { + "op": "remove", + "path": "/ACL_RULE/DYNAMIC_ACL_TABLE|RULE_2" + } + ] + +**Expected result** +- Operation Success + +**Additional checks** +- Check that “show acl rule DYNAMIC_ACL_TABLE RULE_1” and “show acl rule DYNAMIC_ACL_TABLE RULE_2” both result in no output + +### Remove Non-Existent Table +**Json Patch**: + + [ + { + "op": "remove", + "path": "/ACL_TABLE/DYNAMIC_ACL_TABLE_BAD" + } + ] + +**Expected result** +- Operation Failure + +**Additional checks** +- None + +### Remove ACL Table + +**Json Patch** + + [ + { + "op": "remove", + "path": "/ACL_TABLE/DYNAMIC_ACL_TABLE" + } + ] + +**Expected result** +- Operation Success + +**Additional checks** +- None + +### Remove ACL Table Type +**Json Patch:** + + [ + { + "op": "remove", + "path": "/ACL_TABLE_TYPE" + } + ] + +**Expected result** +- Operation Success + +**Additional checks** +- None diff --git a/docs/testplan/IPv4-Port-Based-DHCP-Server-test-plan.md b/docs/testplan/IPv4-Port-Based-DHCP-Server-test-plan.md new file mode 100644 index 00000000000..6c0ef62dc3c --- /dev/null +++ b/docs/testplan/IPv4-Port-Based-DHCP-Server-test-plan.md @@ -0,0 +1,332 @@ +# IPv4 Port Based DHCP Server Test Plan + + +- [IPv4 Port Based DHCP Server Test Plan](#ipv4-port-based-dhcp-server-test-plan) + - [Related Documents](#related-documents) + - [Overview](#overview) + - [Scope](#scope) + - [Test Scenario](#test-scenario) + - [Supported Topology](#supported-topology) + - [Test Case](#test-case) + - [Common Function](#common-function) + - [send_and_verify:](#send_and_verify) + - [Test Module #1 test_dhcp_server.py](#test-module-1-test_dhcp_serverpy) + - [Port Based Common setup](#port-based-common-setup) + - [Port Based Common teardown](#port-based-common-teardown) + - [test_dhcp_server_port_based_assignment_single_ip](#test_dhcp_server_port_based_assignment_single_ip) + - [test_dhcp_server_port_based_assigenment_single_ip_mac_move](#test_dhcp_server_port_based_assigenment_single_ip_mac_move) + - [test_dhcp_server_port_based_assigenment_single_ip_mac_swap](#test_dhcp_server_port_based_assigenment_single_ip_mac_swap) + - [test_dhcp_server_port_based_assignment_range](#test_dhcp_server_port_based_assignment_range) + - [test_dhcp_server_port_based_customize_options](#test_dhcp_server_port_based_customize_options) + - [test_dhcp_server_config_change](#test_dhcp_server_config_change) + - [test_dhcp_server_config_vlan_intf_change](#test_dhcp_server_config_vlan_intf_change) + - [test_dhcp_server_config_vlan_member_change](#test_dhcp_server_config_vlan_member_change) + - [test_dhcp_server_critical_process](#test_dhcp_server_critical_process) + - [Test Module #2 test_dhcp_server_multi_vlan.py](#test-module-2-test_dhcp_server_multi_vlanpy) + - [Common setup](#common-setup) + - [Common teardown](#common-teardown) + - [test_dhcp_server_multi_vlan](#test_dhcp_server_multi_vlan) + - [Test Module #3 test_dhcp_server_stress.py](#test-module-3-test_dhcp_server_stresspy) + - [Common setup](#common-setup) + - [Common teardown](#common-teardown) + - [test_dhcp_server_stress](#test_dhcp_server_stress) + - [Test Module #4 test_dhcp_server_smart_switch.py](#test-module-4-test_dhcp_server_smart_switchpy) + - [Common setup](#common-setup) + - [Common teardown](#common-teardown) + - [test_dhcp_server_smart_switch](#test_dhcp_server_smart_switch) + + +## Related Documents + +| **Document Name** | **Link** | +|-------------------|----------| +| IPv4 Port Based DHCP_SERVER in SONiC | [port_based_dhcp_server_high_level_design.md](https://github.com/sonic-net/SONiC/blob/master/doc/dhcp_server/port_based_dhcp_server_high_level_design.md)| +|Smart Switch IP address assignment| [smart-switch-ip-address-assignment.md](https://github.com/sonic-net/SONiC/blob/master/doc/smart-switch/ip-address-assigment/smart-switch-ip-address-assignment.md)| + +## Overview + +A DHCP Server is a server on network that can automatically provide and assign IP addresses, default gateways and other network parameters to client devices. Port based DHCP server is to assign IPs based on interface index. + +## Scope + +### Test Scenario + +The tests will include: + +1. Configuration test + 1. Add related configuration into CONFIG_DB and then verify configuration and process running status. + 2. Update related tables in CONFIG_DB to see whether configuration for DHCP Server change too. +2. Functionality test + 1. Check whether dhcrelay in dhcp_relay can foward DHCP packets between client and dhcp_server container. + 2. Check whether dhcp_server container can reply DHCP reply packets as expected. + 3. Verify in multi-vlan scenario. + 4. Verify in mac change scenario. + +### Supported Topology + +Base dhcp_server functionality tests (test module [#1](#test-module-1-test_dhcp_serverpy) [#2](#test-module-2-test_dhcp_server_multi_vlanpy) [#3](#test-module-3-test_dhcp_server_stresspy)) are supported on mx topology, smart switch related test (test module [#4](#test-module-4-test_dhcp_server_smart_switchpy)) is supported on t1-smartswitch topology (A new topology on real smart switch testbed). + +## Test Case + +### Common Function + +#### send_and_verify + * Send DHCP discover packets from PTF, check whether configured port receive DHCP offer packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. + * Send DHCP request packets from PTF, check whether configured port receive DHCP ack packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. Besides, check lease via show CLI to make sure lease is correct. + * For renew scenario, send DHCP request packets from PTF, check whether configured port receive DHCP ack packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. Besides, check lease via show CLI to make sure lease is correct. + * Send DHCP release packets from PTF, check whether lease release via lease file inside dhcp_server container. + +### Test Module #1 test_dhcp_server.py + +#### Port Based Common setup + +* Check whether dhcrelay process running as expected (Original dhcp_relay functionality). +* Enable dhcp_server feature, and then use CLI to add DHCP Server configuration. + +#### Port Based Common teardown + +* Disable dhcp_server feature, and then check whether dhcrelay process running as expected (Original dhcp_relay functionality). +* Config reload, remove dhcp_server container. + +#### test_dhcp_server_port_based_assignment_single_ip + +* **Test objective** + + To test port based single ip assign. + + Assume that ports in DUT and PTF are connected like below: + + * DUT Ethernet0 - PTF eth0 + * DUT Ethernet1 - PTF eth1 + * DUT Ethernet2 - PTF eth2 + * DUT Ethernet3 - PTF eth3 + * DUT EThernet4 - PTF eth4 (Not configured interface) + + 3 tested scenarios: + + 1. Verify configured interface with client mac not in FDB table can successfully get IP. + 2. Verify configured interface with client mac in FDB table can successfully get IP. + 3. Verify configured interface with client mac in FDB table but ip it's learnt from another interface can successfully get IP. + 4. Verify no-configured interface cannot get IP. + +* **Setup** + + * Clear FDB table in DUT. + * Ping DUT vlan ip from eth1 and eth3 in PTF. + +* **Test detail** + + * Add a fixture to verify above scenarios: + * mac_not_in_fdb: Use `send_and_verify` to send and verify from eth0 with mac address of eth0, success to get IP. + * mac_in_fdb:Use `send_and_verify` to send and verify from eth0 with mac address of eth1, success to get IP. + * mac_learnt_from_other_interface: Use `send_and_verify` to send and verify from eth2 with mac address of eth3, success to get IP. + * no_configured_interface: Use `send_and_verify` to send and verify from eth4 with mac address of eth4, expected result: fail to get IP. + +#### test_dhcp_server_port_based_assigenment_single_ip_mac_move + +* **Test objective** + + To test port based single ip assign with client move to an interface has free IP to assign. + +* **Setup** + + Save originaly mac address in PTF. + +* **Teardown** + + Restore mac address configuration in PTF. + +* **Test detail** + + * `send_and_verify` with mac A in interface A, expected result: IP assign successfully. + * `send_and_verify` with mac A in interface B, expected result: IP assign successfully. + +#### test_dhcp_server_port_based_assigenment_single_ip_mac_swap + +* **Test objective** + + To test port based single ip assign with client swap. + +* **Setup** + + Save originaly mac address in PTF. + +* **Teardown** + + Restore mac address configuration in PTF. + +* **Test detail** + + * `send_and_verify` with mac A in interface A, expected result: client A can get correct IP. + * `send_and_verify` with mac B in interface B, expected result: client A can get correct IP. + * `send_and_verify` with mac A in interface B, expected result: client A can get correct IP. + * `send_and_verify` with mac B in interface A, expected result: client A can get correct IP. + +#### test_dhcp_server_port_based_assignment_range + +* **Test objective** + + To test port based range ip assign. + +* **Setup** + + Add range and bind range via CLI. + +* **Teardown** + + Unbind range and del range via CLI. + +* **Test detail** + + * Always send packets from 1 PTF port with different client mac, process of sending and verifying can reuse function `send_and_verify`. + * Verify that new client can get / renew / release IP from range binded. When IPs in range are all used, new client cannot get IP. + +#### test_dhcp_server_port_based_customize_options + +* **Test objective** + + To test customize options (In current design, customized options will be always sent to client). + +* **Setup** + + Add option and bind option via CLI. + +* **Teardown** + + Unbind option and del option via CLI. + +* **Test detail** + + * Send DHCP discover packets from PTF, check whether configured port receive DHCP offer packet and no-configured ports don't receive. Need to check customized options. + * Send DHCP request packets from PTF, check whether configured port receive DHCP ack packet and no-configured ports don't receive. Need to check customized options. + * Send DHCP release packets from PTF. + +#### test_dhcp_server_config_change + +* **Test objective** + + To test dhcp_server configuration change scenario. + +* **Test detail** + + * Use CLI to modify lease_time / netmask / gateway / customized_options in `DHCP_SEVER_IPV4` table and send discover / requset packets from PTF and check whether receive expected offer / ack packets. + * Use CLI to disable / enable DHCP interface and send discover / requset packets from PTF and check whether receive expected offer / ack packets. + * Use CLI to modify `DHCP_SERVER_IPV4_PORT` / `DHCP_SERVER_IPV4_CUSTOMIZED_OPTIONS` and send discover / requset packets from PTF and check whether receive expected offer / ack packets. + +#### test_dhcp_server_config_vlan_intf_change + +* **Test objective** + + To test vlan interface configuration change scenario. + +* **Setup** + + Modify vlan ip in `VLAN_INTERFACE` table, change to another subnet. + +* **Teardown** + + Restore vlan ip. + +* **Test detail** + + * Send discover / requset packets from PTF, expect not receive offer / ack packets because ip address configure in `DHCP_SERVER_IPV4_PORT` doesn't match vlan ip. + +#### test_dhcp_server_config_vlan_member_change + +* **Test objective** + + To test vlan member configuration change scenario. + +* **Setup** + + Delete vlan member. + +* **Teardown** + + Restore vlan member. + +* **Test detail** + + * Send discover / requset packets from PTF, expect not receive offer / ack packets because member not in vlan. + +#### test_dhcp_server_critical_process + +* **Test objective** + + To test critical processes crush scenario. + +* **Test detail** + + * Kill processes in `dhcp_relay:/etc/supervisor/critical_processes` and `dhcp_server:/etc/supervisor/critical_processes` to see whether dhcp_server and dhcp_relay container restart. + * Can refer to `tests/process_monitoring/test_critical_process_monitoring.py` + +### Test Module #2 test_dhcp_server_multi_vlan.py + +#### Common setup + +* Enable dhcp_server feature. +* Apply DHCP Server related configuration for multiple vlans (Suggest use GCU). +* Use GCU to apply different VLAN configuration (VLAN / VLAN_MEMBER / VLAN_INTERFACE). The reason use GCU is that changing VLAN configuration via CLI is complicate. + +#### Common teardown + +* Config reload, remove dhcp_server container. + +#### test_dhcp_server_multi_vlan + +* **Test objective** + + To test ip assign in multiple vlan scenario. + +* **Test detail** + + * Send DHCP discover packets from PTF, check whether configured port receive DHCP offer packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. + * Send DHCP request packets from PTF, check whether configured port receive DHCP ack packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. Besides, check lease via show CLI to make sure lease is correct. + * For renew scenario, send DHCP request packets from PTF, check whether configured port receive DHCP ack packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. Besides, check lease via show CLI to make sure lease is correct. + * Send DHCP release packets from PTF, check whether lease release via lease file inside dhcp_server container. + +### Test Module #3 test_dhcp_server_stress.py + +#### Common setup + +* Enable dhcp_server feature, and then add DHCP Server configuration. + +#### Common teardown + +* Config reload, remove dhcp_server container. + +#### test_dhcp_server_stress + +* **Test objective** + + To test ip assign with flooding packets. + +* **Test detail** + + * Send flooding (100/s) DHCP discover packets in PTF and verify offer packets receive (whether receive and receive time) in PTF side. + * Send flooding (100/s) DHCP request packets in PTF and verify ack packets receive (whether receive and receive time) in PTF side. + +### Test Module #4 test_dhcp_server_smart_switch.py + +#### Common setup + +* Enable dhcp_server feature. +* Add DHCP Server configuration. +* Add smart switch related configuration (`DPUS` table and `MID_PLANE` table). + +#### Common teardown + +* Config reload, remove dhcp_server container. + +#### test_dhcp_server_smart_switch + +* **Test objective** + + To test ip assign with in smart switch. + +* **Test detail** + + * Send DHCP discover packets from PTF, check whether configured port receive DHCP offer packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. + * Send DHCP request packets from PTF, check whether configured port receive DHCP ack packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. Besides, check lease via show CLI to make sure lease is correct. + * For renew scenario, send DHCP request packets from PTF, check whether configured port receive DHCP ack packet and no-configured ports don't receive. Need to check netmask / gateway / lease_time / yiaddr. Besides, check lease via show CLI to make sure lease is correct. + * Send DHCP release packets from PTF, check whether lease release via lease file inside dhcp_server container. diff --git a/docs/testplan/PFC_Congestion_Oversubscription_Test_Plan.md b/docs/testplan/PFC_Congestion_Oversubscription_Test_Plan.md new file mode 100644 index 00000000000..d3e678f52da --- /dev/null +++ b/docs/testplan/PFC_Congestion_Oversubscription_Test_Plan.md @@ -0,0 +1,318 @@ +# PFC testing during congestion and oversubscription + +- [PFC testing during congestion and oversubscription](#pfc-testing-during-congestion-and-oversubscription) + - [Overview](#overview) + - [Scope](#scope) + - [Keysight Testbed](#keysight-testbed) + - [Topology](#topology) + - [SONiC DUT as T2 switch](#sonic-dut-as-t2-switch) + - [Setup configuration](#setup-configuration) + - [Test cases](#test-cases) + - [Test case # 1 - Many-to-One congestion oversubscribe lossless traffic](#test-case--1---many-to-one-congestion-oversubscribe-lossless-traffic) + - [Test objective](#test-objective) + - [Background](#background) + - [Testbed Setup](#testbed-setup) + - [Test steps](#test-steps) + - [Test case # 2 – Many-to-One Congestion oversubscribe lossy traffic](#test-case--2--many-to-one-congestion-oversubscribe-lossy-traffic) + - [Test objective](#test-objective-1) + - [Background](#background-1) + - [Testbed Setup](#testbed-setup-1) + - [Test steps](#test-steps-1) + - [Test case # 3 – Many-to-One Congestion oversubscribe lossless and lossy traffic](#test-case--3--many-to-one-congestion-oversubscribe-lossless-and-lossy-traffic) + - [Test objective](#test-objective-2) + - [Background](#background-2) + - [Testbed Setup](#testbed-setup-2) + - [Test steps](#test-steps-2) + - [Test case # 4 – Many-to-One with Fluctuating Lossless Traffic Congestion](#test-case--4--many-to-one-with-fluctuating-lossless-traffic-congestion) + - [Test objective](#test-objective-3) + - [Background](#background-3) + - [Testbed Setup](#testbed-setup-3) + - [Test steps](#test-steps-3) + - [Test case # 5 – Lossless Response to External PFC Pause Storms](#test-case--5--lossless-response-to-external-pfc-pause-storms) + - [Test objective](#test-objective-4) + - [Background](#background-4) + - [Testbed Setup](#testbed-setup-4) + - [Test steps](#test-steps-4) + - [Test case # 6 – Lossless Response to Throttling PFC Pause](#test-case--6--lossless-response-to-throttling-pfc-pause) + - [Test objective](#test-objective-5) + - [Background](#background-5) + - [Testbed Setup](#testbed-setup-5) + - [Test Steps](#test-steps-5) + +## Overview +The purpose of these tests is to verify PFC behavior when the links are congested and verify the performance of the SONiC chassis system, closely resembling production environment. + +### Scope +These tests are targeted on fully functioning SONiC chassis system. Will cover functional testing of PFC during congestion and oversubscription scenarios. + +### Keysight Testbed +The tests can run on single line card or multi line card in a sonic chassis system. + +### SONiC Chassis Topology + +

+ +

+ +## Topology +### SONiC DUT as T2 switch + + +

+ +

+ + +## Setup configuration +IBGP neighborship will be established between the fabric and each line card. + + +## Test cases +### Test case # 1 - Many-to-One congestion oversubscribe lossless traffic +#### Test objective +Test the response of lossy and lossless streams destined to a single egress port, with lossless traffic oversubscription causing congestion. +PFC pause frames should automatically be generated by the device under test (DUT) to throttle back the lossless traffic sources causing the over-subscription, thus avoid any lossless traffic drops. +The three aspects that will be verified are whether: +1. SONiC can send PFC pause frames on lossless priorities when there is congestion +2. SONiC has enough PFC headroom (lossless traffic has no packet drops) +3. The WRR scheduler works as expected + +#### Background +The diagram in the testbed setup shows a multi-ASIC router using a VOQ architecture, with line cards interconnected via a non-blocking fabric. Although the diagram appears to imply a multi-card, multi-ASIC architecture, the test and expected behavior also applies to a single-card, multi-ASIC architecture. +It is assumed that the VOQs within the ingress packet forwarding engines will request and receive credits as needed from the output ports' egress packet forwarding engines, with all the forwarding engines being treated equally regardless of whether the request/response messages need to traverse the fabric or can communicate within the same switching ASIC. +When the egress port is presented with more bandwidth that it can handle (egress congestion), the egress packet forwarding engine will use a weighted round robin (WRR) to fairly allocate bandwidth from the requestors' VOQs. For this version of the test, we will assume a SONiC configuration of a WRR scheduler with the same weights across all traffic priorities (lossy and lossless), and no strict-priority (SP) scheduling. +The case of ingress congestion would happen when the chosen input ports are serviced by the same switch ASIC's ingress forwarding engine, with a combined offered load higher than what the egress port could handle. The overall resulting behavior will still be the same, with the egress forwarding engine still allocating bandwidth to the VOQs in a fair manner per the configured schedules. +In either case, the ingress port VOQs, with not enough allocation granted for the lossless traffic streams causing the congestion, will in turn generate PFC pause (XOFF) messages out of the ingress port to temporarily stop the incoming lossless frames for the appropriate priority. + +#### Testbed Setup +The testbed setup consists of three IXIA ports and a SONiC router as the device under test (DUT). All IXIA ports should have the same line speed. The three DUT ports will be chosen at random across all the available ports in the router in order to eventually cover combinations of ports within the same line card and across line cards (or ports within the same ASIC vs. different ASICs on a single-card, multi-ASIC design). + + +

+ +

+ + +#### Test steps +In this experiment we will create a total of four streams, with each of the two test ports sending two streams each. The aggregate bandwidth will be slightly over 100%: + * The background traffic will include two lossy traffic streams, each with randomly chosen priorities (0..2, 5..7), and each having a 25% bandwidth. + * The test data traffic will consist of two lossless traffic streams, with the SONiC default lossless priorities of 3 and 4, and randomized so that either both streams are at 30% bandwidth each, or for one stream to be at 25% while the other is at 30% bandwidth. + +The mapping of background and test data traffic streams to IXIA transmit ports can be randomized while keeping two streams per port; for example, one test port could send two lossless traffic streams and the second port two lossy streams, or each port could send two mixed streams, one lossless, one lossy. + +This experiment needs the following eight steps: +1. Start the background traffic from IXIA Tx ports 1 & 2; the combined line rate will be 50% of the egress port capacity (25% line rate bandwidth per stream) +2. Start the test traffic from IXIA Tx ports 1 & 2; the additional increase in bandwidth will be either 55% or 60% depending on randomization (one stream with 25% BW, second stream with 30%, or both at 30% each), for a total oversubscription of either 105% or 110%. +3. After a few seconds, measure the received bandwidth per flow on the IXIA Rx port 3: each of the four flows should have an allocation of 25% of the total egress port bandwidth. There should be PFC pause messages being received at the IXIA transmit ports causing the over-subscription. +4. Stop all traffic from the two IXIA ports 1 & 2 +5. On the IXIA Rx port ensure that: +a. The received lossless traffic streams on IXIA port 3 show no missing and no out-of-order frames. +b. Lossy traffic streams rx rate should be equal to tx rate and did not drop any frames. +6. If possible, query the DUT drop counters to verify that no packets got discarded. +7. Repeat the test by adding a impairment (Network emulator) device between Tx port and DUT ingress port to simulate various cable lengths and validate that PFC headroom is sufficient for variable cable lengths. +8. Verify that the test results are same for varying cable lengths. + + +### Test case # 2 – Many-to-One Congestion oversubscribe lossy traffic +#### Test objective +Test the response of lossy and lossless streams destined to a single egress port, with lossy traffic causing oversubscription of the egress port. +PFC pause frames should automatically be generated by the device under test (DUT) to throttle back the lossless traffic sources causing the over-subscription, avoiding any lossless traffic drops. +The two aspects that will be verified are whether: +* Packet drops really happen when lossy traffic suffers from congestion without triggering PFC +* WRR scheduler works as expected + +#### Background +This test uses a similar diagram and description of the multi-ASIC router as the ‘Many-to-One Lossless Traffic Congestion’ test case. +In this variant, the egress port packet forwarding engine, facing similar oversubscription demands as the previous test, will fairly grant resources to the VOQs in the packet forwarding engines. The ingress port VOQs, with not enough allocation granted for the lossy traffic streams causing the congestion, will eventually run out of the memory buffer resources for the aggressor VOQs and will be forced drop some of their frames. + +#### Testbed Setup +The test bed setup consists of three IXIA ports and a SONiC router as the device under test (DUT). All IXIA ports should have the same line speed. The three DUT ports will be chosen at random across all the available ports in the router in order to eventually cover combinations of ports within the same line card and across line cards (or ports within the same ASIC vs. different ASICs on a single-card, multi-ASIC design). + +

+ +

+ + +#### Test steps +In this experiment we will create a total of four streams, with each of the two test ports sending two streams each. The aggregate bandwidth will be slightly over 100%: + * The background traffic will include two lossless traffic streams, with the SONiC default lossless priorities of 3 and 4, and each having a 25% bandwidth. + * The test data traffic will consist of two lossy traffic streams, each with randomly chosen priorities (0..2, 5..7), and randomized so that either both streams are at 30% bandwidth each, or for one stream to be at 25% while the other is at 30% bandwidth. + +The mapping of background and test data traffic streams to IXIA transmit ports can be randomized while keeping two streams per port; for example, one test port could send two lossless traffic streams and the second port two lossy streams, or each port could send two mixed streams, one lossless, one lossy. + +This experiment needs the following eight steps: +1. Start the background traffic from IXIA Tx ports 1 & 2; the combined line rate will be 48% of the egress port capacity (24% line rate bandwidth per stream) +2. Start the test traffic from IXIA Tx ports 1 & 2; the additional increase in bandwidth will be either 55% or 60% depending on randomization (one stream with 25% BW, second stream with 30%, or both at 30% each), for a total oversubscription of either 103% or 108%. +3. After a few seconds, measure the received bandwidth per flow on the IXIA Rx port 3: total egress port bandwidth should be 100%. Internally the router should be dropping lossy frames from the streams causing the over-subscription. +4. Stop all traffic from the two IXIA ports 1 & 2 +5. On the IXIA Rx port ensure that: +a. The received lossless traffic streams on IXIA port 3 show no missing and no out-of-order frames. +b. The lossy traffic streams causing the oversubscription had some frame drops. +6. If possible, query the DUT drop counters to verify that the lossy stream drops were accounted for. +7. Repeat the test by adding a impairment (Network emulator) device between Tx port and DUT ingress port to simulate various cable lengths and validate that PFC headroom is sufficient for variable cable lengths. +8. Verify that the test results are same for varying cable lengths. + +### Test case # 3 – Many-to-One Congestion oversubscribe lossless and lossy traffic +#### Test objective +Test the response of lossy and lossless streams destined to a single egress port, with both lossless & lossy traffic causing oversubscription of the egress port. +PFC pause frames should automatically be generated by the device under test (DUT) to throttle back the lossless traffic sources causing the over-subscription, avoiding any lossless traffic drops. +The two aspects that will be verified are whether: +* Packet drops really happen when lossy traffic suffers from congestion without triggering PFC +* WRR scheduler works as expected + +#### Background +This test uses a similar diagram and description of the multi-ASIC router as the ‘Many-to-One Lossless Traffic Congestion’ test case. +In this variant, the egress port packet forwarding engine, facing similar oversubscription demands as the previous test, will fairly grant resources to the VOQs in the packet forwarding engines. The ingress port VOQs, with not enough allocation granted for the lossless traffic streams causing the congestion, will in turn generate PFC pause (XOFF) messages out of the ingress port to temporarily stop the incoming lossless frames for the appropriate priority and for the lossy traffic streams causing the congestion, will eventually run out of the memory buffer resources for the aggressor VOQs and will be forced drop some of their frames. + +#### Testbed Setup +The test bed setup consists of three IXIA ports and a SONiC router as the device under test (DUT). All IXIA ports should have the same line speed. The three DUT ports will be chosen at random across all the available ports in the router in order to eventually cover combinations of ports within the same line card and across line cards (or ports within the same ASIC vs. different ASICs on a single-card, multi-ASIC design). + + + +

+ +

+ + +#### Test steps +In this experiment we will create a total of four streams, with each of the two test ports sending two streams each. The aggregate bandwidth will be slightly over 100%: + * The test data traffic will include two lossless traffic streams, with the SONiC default lossless priorities of 3 and 4, and first stream having a 40% bandwidth from port 1 and second stream having 20% of bandwidth from port 2. + * The background traffic will consist of two lossy traffic streams, each with randomly chosen priorities (0..2, 5..7), and one stream to be at 20% from port 1while the other is at 40% bandwidth from port 2. + +The mapping of background and test data traffic streams to IXIA transmit ports can be randomized while keeping two streams per port; for example, one test port could send two lossless traffic streams and the second port two lossy streams, or each port could send two mixed streams, one lossless, one lossy. + +This experiment needs the following eight steps: +1. Start the test data traffic from IXIA Tx ports 1; the combined line rate will be 60% of the egress port capacity. +2. Start the background traffic from IXIA Tx ports 2; the additional increase in bandwidth will be 60%, for a total oversubscription of 120%. +3. After a few seconds, measure the received bandwidth per flow on the IXIA Rx port 3: test data traffic and background traffic with 20% of bandwidth should not lose any packet. Internally the router should be dropping lossy frames from the streams causing the over-subscription (Port 2). There should be PFC pause messages being received at the IXIA transmit port 1 stream 2 causing the over-subscription. +4. Stop all traffic from the two IXIA ports 1 & 2 +5. On the IXIA Rx port ensure that: +a. The received lossless traffic streams on IXIA port 3 show no missing and no out-of-order frames. +b. The lossy traffic streams causing the oversubscription had some frame drops. +6. If possible, query the DUT drop counters to verify that the lossy stream drops were accounted for. +7. Repeat the test by adding a impairment (Network emulator) device between Tx port and DUT ingress port to simulate various cable lengths and validate that PFC headroom is sufficient for variable cable lengths. +8. Verify that the test results are same for varying cable lengths. + + + +### Test case # 4 – Many-to-One with Fluctuating Lossless Traffic Congestion +#### Test objective +Test the dynamic response of lossy and lossless streams destined to a single egress port when the lossless streams cause periods of over-subscription. +PFC pause frames should automatically be generated by the device under test (DUT) to throttle back the lossless traffic sources causing the over-subscription, avoiding any lossless traffic drops. The PFC pause frame generation should stop once the over-subscription condition subsides. + +#### Background +This test uses a similar diagram and description of the multi-ASIC router as the ‘Many-to-One Lossless Traffic Congestion’ test case (it is indeed a superset of that test). +In this variant, we will use all the lossy and lossless traffic priorities simultaneously, and will expect an even distribution of traffic across all eight priorities even under periods of oversubscription caused by lossless traffic streams. The assumption is that the SONiC default scheduler will allocate same weights to all eight priorities in its WRR scheduler with no strict priority (SP) priorities configured. + +#### Testbed Setup + +The test bed setup consists of three IXIA ports and a SONiC router as the device under test (DUT). All IXIA ports should have the same line speed. The three DUT ports will be chosen at random across all the available ports in the router in order to eventually cover combinations of ports within the same line card and across line cards (or ports within the same ASIC vs. different ASICs on a single-card, multi-ASIC design). + + +

+ +

+ + +#### Test steps + In this experiment we will create a total of six streams, four lossy streams and two lossless streams: + * The background traffic will include four lossy traffic streams, with any priorities 0..2 and 5..6, each having 20% bandwidth for a total of 80% of the port line rate. + * The test data traffic will include two lossless traffic flows, with the SONiC default lossless priorities of 3 and 4. + * Each of lossless traffic flows will be shaped to have line rate of 20% and 10%, so that there are periods where both lossless flows contribute a bandwidth of 30% (which should cause over-subscription on the egress port). + +The mapping of background and test data traffic streams to IXIA transmit ports can be randomized; the only constraint is for a transmit port to have at least one stream assigned to it. + This experiment needs the following steps: +1. Start the background and test traffic from IXIA Tx ports 1 & 2. +2. The combined bandwidth of all the streams going to the egress port will be 110% (over-subscription). +3. After a few seconds, measure the received bandwidth per flow on the IXIA Rx port 3: each of the four lossy flows should have an allocation of 18% of the total egress port bandwidth. Lossless flows should have an allocation of 18% and 10% of the total egress port bandwidth. There should be PFC pause messages being received at the IXIA transmit port(s) causing the over-subscription. +4. Perform a few more bandwidth samples to ensure there are no drastic changes in the final allocated bandwidth across all traffic priorities. +5. Stop all traffic from the two IXIA Tx ports +6. On the IXIA Rx port ensure that: +a. The received lossless traffic streams on IXIA port 3 show no missing and no out-of-order frames. +b. The lossy traffic streams got their fair share of allocated bandwidth and did not drop frames. +7. If possible, query the DUT drop counters to verify that no packets got discarded. +8. Repeat the test by adding a impairment (Network emulator) device between Tx port and DUT ingress port to simulate various cable lengths. +9. Verify that the test results are same for varying cable lengths. + + + +### Test case # 5 – Lossless Response to External PFC Pause Storms +#### Test objective +Test the dynamic response of lossless streams when subject to continuous external PFC pause messages intending to fully block their bandwidth. +The key question to answer is whether DUT’s PFC mechanism responds fast enough to fully stop lossless streams without drops. + +#### Background +This test uses a similar diagram and description of the multi-ASIC router as the 'Lossless Traffic Causing Egress or Ingress Congestion' test case. +Rather than using traffic aggregation to cause over-subscription, we will create conditions of egress congestion by applying periods of continuous PFC pause messages into the egress port. +Similar to the cases of lossless traffic congestion, an egress packet forwarding engine receiving a PFC pause message for a particular priority will stop granting resources to any VOQs requesting credits for the egress port and priority combination. The affected VOQs in the ingress packet forwarding engines will in-turn fill-up their buffering resources, eventually hitting watermarks that will cause the ingress port to apply backpressure to the sender via PFC pause out of its port. +In this test case we will also assume that all the lossy and lossless traffic priorities have a WRR scheduler configured with the same weights across all eight priorities, with no strict priority (SP) priorities configured. + +#### Testbed Setup +The test bed setup consists of three IXIA ports and a SONiC router as the device under test (DUT). All IXIA ports should have the same line speed. The three DUT ports will be chosen at random across all the available ports in the router in order to eventually cover combinations of ports within the same line card and across line cards (or ports within the same ASIC vs. different ASICs on a single-card, multi-ASIC design). + + +

+ +

+ +In addition, PFC watchdog must be disabled at the SONiC DUT. Otherwise, the DUT will trigger PFC watchdog to drop packets when it detects persistent PFC pause storms. The command to disable PFC watchdog is sudo pfcwd stop. + +#### Test steps +In this experiment we will create a total of five traffic items: + * The background traffic will include two lossy traffic streams, each with randomly chosen priorities (0..2, 5..7), and each having a 25% bandwidth. + * The test data traffic will consist of two lossless traffic streams, with the SONiC default lossless priorities of 3 and 4, and each having a 25% bandwidth. + * PFC pause storm: Persistent PFC pause frames from the IXIA Rx port. The priority of PFC pause frames will be randomized to those of the test data traffic streams, or include all lossless priorities. The inter-frame transmission interval should be smaller than the per-frame pause duration. +The mapping of background and test data traffic streams to IXIA transmit ports can be randomized while keeping two streams per port; for example, one test port could send two lossless traffic streams and the second port two lossy streams, or each port could send two mixed streams, one lossless, one lossy. + +This experiment needs the following six steps: +1. Start the background and test traffic from IXIA Tx ports 1 & 2; the combined line rate will be 100% of the egress port capacity (25% line rate bandwidth per stream) +2. After a few seconds, start the PFC pause storm to fully block one or all test priorities at the switch. Ensure that: +a. The IXIA Rx port is receiving all lossy streams and any of the lossless streams not marked for PFC pause due to randomization. +b. The IXIA Rx port does not receive any of the lossless streams marked to be fully blocked by PFC. +c. If possible, query the DUT drop and watermark counters +3. After a few more seconds, stop the PFC pause storm; ensure that the IXIA Rx port statistics show that 100% of the traffic is being received. +4. Repeat steps 2 & 3 a few times, possibly with different PFC storm durations. +5. Stop the PFC storm and all traffic. +6. On the IXIA Rx port ensure that: +a. The received lossless traffic streams on IXIA port 3 show no missing and no out-of-order frames. +b. The lossy traffic streams did not drop frames. + +### Test case # 6 – Lossless Response to Throttling PFC Pause +#### Test objective +Test the reduction in lossless streams bandwidth when subject to throttling PFC pause messages. The PFC watchdog, which should be enabled for this test, should not intervene. + +#### Background +This test uses the same configuration and test bed as similar diagram and description of the multi-ASIC router as the ‘Lossless Response to External PFC Pause Storms ' test case. + +In this test case we will also assume that all the lossy and lossless traffic priorities have a WRR scheduler configured with the same weights across all eight priorities, with no strict priority (SP) priorities configured. + +#### Testbed Setup +The test bed setup consists of three IXIA ports and a SONiC router as the device under test (DUT). All IXIA ports should have the same line speed. The three DUT ports will be chosen at random across all the available ports in the router in order to eventually cover combinations of ports within the same line card and across line cards (or ports within the same ASIC vs. different ASICs on a single-card, multi-ASIC design). + +

+ +

+ +In addition, PFC watchdog must be enabled at the SONiC DUT (default mode of operation). + +#### Test Steps + +In this experiment we will create a total of five traffic items: +* The background traffic will include two lossy traffic streams, each with randomly chosen priorities (0..2, 5..7), and each having a 25% bandwidth. +* The test data traffic will consist of two lossless traffic streams, with the SONiC default lossless priorities of 3 and 4, and each having a 25% bandwidth. +* PFC pause throttling stream: Persistent PFC pause frames from the IXIA Rx port with enough repetition and quanta chosen so as to reduce one or all lossless streams down to 90% of their configured bandwidth. + +The mapping of background and test data traffic streams to IXIA transmit ports can be randomized while keeping two streams per port; for example, one test port could send two lossless traffic streams and the second port two lossy streams, or each port could send two mixed streams, one lossless, one lossy. + +This experiment needs the following six steps: +1. Start the background and test traffic from IXIA Tx ports 1 & 2; the combined line rate will be 100% of the egress port capacity (25% line rate bandwidth per stream) +2. After a few seconds, from IXIA port 3, start the PFC throttling stream onto one or all lossless priorities. Ensure that: +a. The IXIA RX port indicates that the targeted lossless streams got their bandwidth reduced by 90% +b. The IXIA Rx port is receiving all lossy streams and any of the lossless streams not marked for PFC pause to their nominal configured transmit bandwidth. +c. If possible, query the DUT drop and watermark counters +3. After a few more seconds, stop the PFC throttling stream; ensure that the IXIA Rx port statistics show that 100% of the traffic is being received. +4. Repeat steps 2 & 3 a few times, possibly with different throttling durations. +5. Stop the PFC throttling stream and all traffic. +6. On the IXIA Rx port ensure that: +a. The received lossless traffic streams on IXIA port 3 show no missing and no out-of-order frames. +b. The lossy traffic streams did not drop frames. diff --git a/docs/testplan/PFC_Testcase1.png b/docs/testplan/PFC_Testcase1.png new file mode 100644 index 00000000000..2a69c86d45e Binary files /dev/null and b/docs/testplan/PFC_Testcase1.png differ diff --git a/docs/testplan/PFC_Testcase2.png b/docs/testplan/PFC_Testcase2.png new file mode 100644 index 00000000000..dd19c5cf0f2 Binary files /dev/null and b/docs/testplan/PFC_Testcase2.png differ diff --git a/docs/testplan/PFC_Testcase3.png b/docs/testplan/PFC_Testcase3.png new file mode 100644 index 00000000000..ba9a0b95a90 Binary files /dev/null and b/docs/testplan/PFC_Testcase3.png differ diff --git a/docs/testplan/PFC_Testcase4.png b/docs/testplan/PFC_Testcase4.png new file mode 100644 index 00000000000..800b4b5f5ac Binary files /dev/null and b/docs/testplan/PFC_Testcase4.png differ diff --git a/docs/testplan/PFC_Testcase_5.png b/docs/testplan/PFC_Testcase_5.png new file mode 100644 index 00000000000..bc101828730 Binary files /dev/null and b/docs/testplan/PFC_Testcase_5.png differ diff --git a/docs/testplan/PFC_Testcase_6.png b/docs/testplan/PFC_Testcase_6.png new file mode 100644 index 00000000000..f1fb7321d60 Binary files /dev/null and b/docs/testplan/PFC_Testcase_6.png differ diff --git a/docs/testplan/SONiC_Chassis_Topology.png b/docs/testplan/SONiC_Chassis_Topology.png new file mode 100644 index 00000000000..6949e78443d Binary files /dev/null and b/docs/testplan/SONiC_Chassis_Topology.png differ diff --git a/docs/testplan/T2_Topology.png b/docs/testplan/T2_Topology.png new file mode 100644 index 00000000000..d5382417125 Binary files /dev/null and b/docs/testplan/T2_Topology.png differ diff --git a/docs/testplan/WoL-test-plan.md b/docs/testplan/WoL-test-plan.md new file mode 100644 index 00000000000..5e48160dd48 --- /dev/null +++ b/docs/testplan/WoL-test-plan.md @@ -0,0 +1,71 @@ +# Wake-on-LAN Test Plan + +## 1 Overview + +The purpose is to test the functionality of **WoL** (**Wake-on-LAN**) feature on SONiC switch. + +For details of WoL feature design, please refer to HLD: [Wake-on-LAN in SONiC](https://github.com/sonic-net/SONiC/blob/master/doc/wol/Wake-on-LAN-HLD.md). + +### 1.1 Scope + +The test is targeting a running SONiC system will fully functioning configuration. The purpose of this test is to verify the function of WoL CLI utility. + +### 1.2 Testbed + +The test can run on both physical and virtual testbeds with any topology. + +### 1.3 Limitation + +The functional test of WoL depends on NIC capability of target device. In real scenarios, the device waiting for wake up is in low-power mode. In this situation, the network interface is UP but wol packet is not delivered to CPU of device. Instead, the wol packet is handled by NIC and NIC will turn on the device. + +In this testplan, the interfaces in PTF are not in low-power mode. **We only verify the SONiC switch can send out wol packet as expected. Will not verify the target device can be woken up.** + +## 2 Setup Configuration + +No setup pre-configuration is required, test will setup and clean-up all the configuration. + +## 3 Test + +### Test for WoL CLI Utility + +The test will issue `wol` commands with various parameter combinations on DUT, then check if target devices (PTF) can capture the expected packet(s). + +#### Test case #1 - Verrify send a wol packet to a specific interface +1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. +1. Issue command on DUT host: `wol ` (e.g., `wol Ethernet10 00:11:22:33:44:55`) +1. Stop `tcpdump` process in PTF. +1. Check if only one wol packet exists in `.pcap` file and the content is expected. + +#### Test case #2 - Verify send a wol packekt to each member of a vlan +1. Start multiple `tcpdump` processes in PTF to capture WoL packet on each interfaces. Save the captured packets to different `.pcap` files. +1. Issue command on DUT host: `wol `. (e.g., `wol Vlan1000 00:11:22:33:44:55`) +1. Stop all `tcpdump` processes in PTF. +1. *For each interface in vlan*, check if one wol packet exists in corresponding `.pcap` file and the content is expected. +1. *For each interface not in vlan*, check no wol packet exists in corresponding `.pcap` file. + +#### Test case #3 - Verify send a broadcast wol packet +1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. +1. Issue command on DUT host: `wol -b` (e.g., `wol Ethernet10 00:11:22:33:44:55 -b`) +1. Stop `tcpdump` process in PTF. +1. Check if only one wol packet exists in `.pcap` file and the content is expected. Especially, verify the destination MAC in Ethernet frame header is broadcast MAC address (`FF:FF:FF:FF:FF:FF`). + +#### Test case #4 - Verify send a wol packet with password +1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. +1. Issue command on DUT host: `wol -p ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -p 192.168.1.1`) +1. Stop `tcpdump` process in PTF. +1. Check if only one wol packet exists in `.pcap` file and the content is expected. Especially, verify the password in wol packet is same as command. + +#### Test case #5 - Verify send multiple wol packets with specific interval to a specific interface +1. Start `tcpdump` process in PTF to capture WoL packet on spacific interface. Save the captured packets to `.pcap` file. +1. Issue command on DUT host: `wol -c -i ` (e.g., `wol Ethernet10 00:11:22:33:44:55 -c 3 -i 2000`) +1. Stop `tcpdump` process in PTF. +1. Check if exact `` wol packets exist in `.pcap` file and the content is expected. Moreover, check the time interval between each wol packet in `.pcap` file is ALMOST SAME[^1] as input ``. + +#### Test case #6 - Verify send multiple wol packets with specific interval to each membor of a vlan +1. Start multiple `tcpdump` processes in PTF to capture WoL packet on each interfaces. Save the captured packets to different `.pcap` files. +1. Issue command on DUT host: `wol -c -i ` (e.g., `wol Vlan1000 00:11:22:33:44:55 -c 3 -i 2000`) +1. Stop `tcpdump` process in PTF. +1. *For each interface in vlan*, check if exact `` wol packets exist in `.pcap` file and the content is expected. Moreover, check the time interval between each wol packet in `.pcap` file is ALMOST SAME[^1] as input ``. +1. *For each interface not in vlan*, check no wol packet exists in corresponding `.pcap` file. + +[^1]: ALMOST SAME means we should tolerate small errors caused by electrical characteristics. diff --git a/docs/testplan/pac/PAC_Topology.png b/docs/testplan/pac/PAC_Topology.png new file mode 100644 index 00000000000..f8ab4a60092 Binary files /dev/null and b/docs/testplan/pac/PAC_Topology.png differ diff --git a/docs/testplan/pac/Port_Access_Control.md b/docs/testplan/pac/Port_Access_Control.md new file mode 100644 index 00000000000..5ad9f6db415 --- /dev/null +++ b/docs/testplan/pac/Port_Access_Control.md @@ -0,0 +1,622 @@ +# Port Access Control(PAC) + +[TOC] + +## Test Plan Revision History + +| Rev | Date | Author | Change Description | +| ---- | ---------- | ----------------- | ---------------------------- | +| 1 | 18/05/2023 | Lakshminarayana D | Initial Version of test plan | + +## Definition/Abbreviation + +| **Term** | **Meaning** | +| ---------- | ---------------------------------------- | +| VLAN | Virtual Local Area Network | +| PAC | Port Access Control | +| EAPoL | Extensible Authentication Protocol over LAN | +| MAC | Media Access Control | +| MAB | Mac Authentication Bypass | +| PO | Port Channel | +| NAS | Network Access Switch | +| DUT | Device Under Test | +| RADIUS | Remote Authentication Dial In User service | +| FDB | Forwarding Database | +| Supplicant | A client that attempts to access services offered by the Authenticator | +| PAE | Port Access Entity | + +## Introduction + +### Objective + +The main objective of this document is to cover the test cases that will be executed for Port authentication methods 802.1x and MAB. Topologies and test cases for testing the feature will be discussed as part of this document. + +### Scope + +- PAC authentication of hosts on access port +- This functionality has been tested using the SPyTest framework. In order to emulate 802.1x and MAB clients, traffic generators like Ixia and Spirent will be used. FreeRADIUS is using for User Authentication. + +### Out of scope + +- Authentication on Trunk port not supported + +## Feature Overview + +Port Access Control (PAC) feature provides validation of client and user credentials to prevent unauthorized access to a specific switch port. + +Local Area Networks (LANs) are often deployed in environments that permit unauthorized devices to be physically attached to the LAN infrastructure, or permit unauthorized users to attempt to access the LAN through equipment already attached. In such environments, it may be desirable to restrict access to the services offered by the LAN to those users and devices that are permitted to use those services. Port access control makes use of the physical characteristics of LAN infrastructures in order to provide a means of authenticating and authorizing devices attached to a LAN port that has point-to-point connection characteristics and of preventing access to that port in cases in which the authentication and authorization process fails. In this context, a port is a single point of attachment to the LAN, such as Ports of MAC bridges and associations between stations or access points in IEEE 802.11 Wireless LANs. + +802.1x: + +IEEE 802.1X-2004 is an IEEE Standard for Port Access Control (PAC) that provides an authentication mechanism to devices wishing to attach to a LAN. The standard defines Extensible Authentication Protocol Over LAN (EAPoL). The 802.1X standard describes an architectural framework within which authentication and consequent actions take place. It also establishes the requirements for a protocol between the authenticator and the supplicant, as well as between the authenticator and the authentication server. + +MAC Authentication Bypass(MAB): + +Simple devices like camera or printers which do not support 802.1x authentication can make use of MAB feature where the device gets authenticated based on the device MAC address. + +## Test Framework +Using SPyTest framework to test this feature. Traffic generators like Ixia and Spirent will be using to simulate 802.1x and MAB clients. FreeRADIUS is using for User Authentication and Authorization. + +## 1 Test Focus Areas + +### 1.1 CLI Testing + + - Verify port authentication can be enabled only on physical interfaces and gets denied on VLAN, Portchannel, PO member ports and sub interfaces. + - Verify configured CLI fields are updated properly in respective show commands + +### 1.2 Functional Testing + + - Verify all data traffic is blocked when PAC is enabled on the port. + - Verify 802.1x client authentication in single-host mode and verify only first authenticated user is allowed. + - Verify 802.1x client with multi-host mode and verify all users on the port are allowed after first-user gets authenticated. + - Verify 802.1x client authentication in multi-auth mode and verify all users with valid credentials gets authenticated. + - Verify in multi-auth mode, one of the clients logoff does not impact other authenticated clients. + - Verify in multi-host mode, if primary host gets logged-off, other hosts are blocked and verify after authenticating again. + - Verify 802.1x client authentication with port-control mode as force-authorized. + - Verify 802.1x client authentication with port-control mode as force-unauthorized. + - Verify interface level 802.1x pae authenticator disable/enable. + - Verify global 802.1x system-auth-control enable/disable. + - Verify enabling re-authentication with different re-authenticate timer and disabling authentication periodic shouldn't allow re-authentication. + - Verify client authentication with MAB auth-type as EAP-MD5. + - Verify client authentication with MAB auth-type as PAP. + - Verify client authentication with MAB auth-type as CHAP. + - Verify authentication order with user-configured priorities for different authentication methods. + - Verify non-default max-users per port and check remaining clients are denied. + - Verify that when host mode changes, the authenticated clients gets removed and traffic is blocked. + - Verify that when authentication order is set to 802.1x, then only 802.1x client allowed to authenticate. + - Verify that when authentication order is set to MAB, then 802.1x client authentication is not successful. + - Verify MAB client authentication with port-control mode toggle between force authorized/unauthorized and auto. + - Verify that 802.1x and MAB client is not authenticated if it's RADIUS assigned VLAN is not available statically on the authenticator switch. + - Verify 802.1x and MAB client is not authenticated if RADIUS does not assign a VLAN and the port is configured with tagged VLAN. + - Verify a port with Multi-auth mode can have authenticated clients in different radius assigned VLANs. + - Verify that 802.1x and MAB client is not authenticated if RADIUS does not assign a VLAN and the port's configured untagged VLAN (Access VLAN) is not available + - Verify the same MAB client authentication on different port after authenticated and verify MAC movement of the client + - Verify the same 802.1x client authentication on different port after authenticated and verify MAC movement of the client + - Verify the same 802.1x and MAB client authentication when PAC and ACLs applied on a same port + +### 1.3 Reboot and Trigger Testing + + - Verify Client authentication after reboot + - Verify Client authentication after warmboot + - Verify Client authentication after config reload + - Verify Client authentication after port toggle + +### 1.4 Scale Testing + + - Verify 128 max supported 802.1x clients on DUT. + - Verify 128 max supported MAB clients on DUT. + - Verify that the 128 maximum supported clients on DUT can be authenticated by using both 802.1x and MAB clients. + + +## 2 Topologies + +## 2.1 Topology 1 + +![PAC](PAC_topology.png "Figure 1: Topology 1") + +## 3 Test Case and objectives + +### **3.1 CLI Test Cases** + +### 3.1.1 Verify port authentication can be enabled only on physical interfaces and gets denied on VLAN, Portchannel, Po member ports and sub interfaces + +| **Test ID** | **PAC_CLI_001** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify port authentication can be enabled only on physical interfaces and gets denied on VLAN, Portchannel, PO member ports and sub interfaces** | +| **Test Setup** | **Topology1** | +| **Type** | **CLI** | +| **Steps** | 1. Verify port authentication configuration gets denied for VLAN interface,Sub interface and Portchannel interfaces
2. Verify authentication can not be enabled on Portchannel member ports
3. Enable authentication on physical port and add it to Portchannel and verify it is not allowed
4. Enable authentication on loopback port and verify it is not allowed
| + + +### 3.1.2 Verify configured CLI fields are updated properly in respective show commands + +| **Test ID** | **PAC_CLI_002** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify configured CLI fields are updated properly in respective show commands** | +| **Test Setup** | **Topology1** | +| **Type** | **CLI** | +| **Steps** | 1. Enable 802.1x globally using command "config dot1x system-auth-control enable" and verify administrative mode as enabled in "show dot1x" command
2. Disable 802.1x globally using command "config dot1x system-auth-control disable" and verify administrative mode as disabled in "show dot1x" command
3. Configure port-control mode to auto using command "config authentication port-control interface auto Ethernet0" and verify port control mode is changed to auto from force-authorized in "show authentication interface Ethernet0"
4. Configure port-control mode to force-unauthorized using command "config authentication port-control interface force-unauthorized Ethernet0" and verify port control mode is changed to force-unauthorized
5. Enable pae role as authenticator on interface using command "config dot1x pae interface authenticator Ethernet0" and verify pae mode is changed to authenticator in "show dot1x detail Ethernet0" and "show dot1x detail all"
6. Disable pae role on interface using command "config dot1x pae interface none Ethernet0" and verify pae mode is changed to none in "show dot1x detail Ethernet0" and "show dot1x detail all"
7. Configure host-mode to multi-auth using command "config authentication host-mode interface multi-auth Ethernet0" and verify host mode is changed to multi-auth from multi-host in "show authentication interface Ethernet0"
8. Configure host-mode to single-host using command "config authentication host-mode interface single-host Ethernet0" and verify host mode is changed to single-host from multi-host in "show authentication interface Ethernet0"
9. Configure max-users to non-default value using command "config authentication max-users interface 8 Ethernet0" and verify max-users field is changed to 8 from 16 in "show authentication interface Ethernet0"
10. Enable authentication periodic using command "config authentication periodic interface enable Ethernet0" and verify re-authentication periodic is enabled in "show authentication interface Ethernet0"
11. Disable authentication periodic command "config authentication periodic interface disable Ethernet0" and verify re-authentication periodic is disabled in "show authentication interface Ethernet0"
12. Configure authentication timer command "config authentication timer re-authenticate 100 Ethernet0" and verify re-authentication period is updated properly in "show authentication interface Ethernet0"
13. Configure authentication order to 802.1x using command "config authentication order interface dot1x Ethernet0" and verify configured method order is updated to 802.1x in "show authentication interface Ethernet0"
14. Configure authentication priority to 802.1x using command "config authentication priority interface dot1x Ethernet0" and verify configured method priority is updated to 802.1x in "show authentication interface Ethernet0"
15. Enable MAB with auth-type pap on interface using command "config MAB interface enable auth-type pap Ethernet0" and verify MAB admin mode is enabled with auth-type pap in "show mab Ethernet0" or "show mab"
16. Disable MAB on interface using command "config mab interface disable Ethernet0" and verify MAB admin mode is disabled in "show mab Ethernet0" and "show mab"
| + + +### **3.2 Functional Test Cases** + +### 3.2.1 Verify all data traffic is blocked when PAC is enabled on the port. + +| **Test ID** | **PAC_FUNC_001** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify all data traffic is blocked when PAC is enabled on the port** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication on an interface
2. Verify all data traffic is blocked when PAC is enabled on the port
3. Verify EAPoL packets are not dropped
| + + +### 3.2.2 Verify 802.1x authentication in single-host mode and verify only first authenticated user is allowed. + +| **Test ID** | **PAC_FUNC_002** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 802.1x authentication in single-host mode and verify only first authenticated user is allowed.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication on interface on DUT
2. Configure RADIUS server on the authenticator.
3. Enable authentication host mode to single-host mode on the interface.
4. Initiate Authentication from 802.1x supplicant and Verify Authenticator encapsulates EAP packets and sends it to RADIUS server
5. Verify Authenticator moves client to Authenticated state after sending EAP success, once authenticator receives Access-accept from RADIUS server
6. Verify "show authentication clients all" to see the client authentication state
7. Verify static MAC FDB entry gets populated with client mac address
8. Verify traffic gets forwarded from the client after it gets authenticated on the port and Verify client's traffic forwarding from the RADIUS assigned VLAN if not port's untagged VLAN is used for authorizing the client.
9. Verify Client2 is blocked from accessing the server in single-host mode
10. Do clear mac address table, and check statically created FDB entry for the client is not cleared and client authentication is not disturb.
11. Logoff client1 and verify FDB entry gets deleted and client1 gets blocked for all the traffic
12. Verify once client1 is deleted, client2 with single-host mode can be authenticated
13. Clear the clients using command 'sonic-clear authentication sessions interface and verify client gets deleted on the device
| + + +### 3.2.3 Verify 802.1x with multi-host mode and verify all users on the port are allowed after first-user gets authenticated. + +| **Test ID** | **PAC_FUNC_003** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 802.1x with multi-host mode and verify all users on the port are allowed after first-user gets authenticated.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication on interface.
2. Configure RADIUS server on the authenticator.
3. Enable authentication host mode to multi-host mode
4. Initiate Authentication from 802.1x supplicant and Verify Authenticator encapsulates EAP packets and sends it to RADIUS server
6. Verify Authenticator moves client to Authenticated state after sending EAP success, once authenticator receives Access-accept from RADIUS server
7. Verify "show authentication clients all" to see the client state
8. Verify client's MAC FDB entry is populated once the client sends traffic after client gets authenticated
9. Verify untagged and tagged traffic gets forwarded from the client's RADIUS assigned VLAN if not port's untagged VLAN after it gets authenticated on the port, check non matching tagged traffic is not allowed.
10. Verify all the subsequent Clients traffic connected to port also gets access and check MAC address gets updated for all those clients
11. Logoff client1 and verify all the clients connected to the port gets blocked
| + + +### 3.2.4 Verify 802.1x client authentication in multi-auth mode and verify all users with valid credentials gets authenticated. + +| **Test ID** | **PAC_FUNC_004** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 802.1x in multi-auth mode and verify all users with valid credentials gets authenticated.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication on interface.
2. Configure RADIUS server on the authenticator.
3. Enable authentication host mode to multi-auth mode
4. Initiate Authentication from multiple 802.1x supplicants connected to the same NAS port
5. Verify Authenticator encapsulates EAP packets and sends it to RADIUS server
6. Verify Authenticator moves all the clients to Authenticated state after sending EAP success, once authenticator receives Access-accept from RADIUS server for each client
7. Verify "show authentication clients all" to see the clients authentication state
8. Verify MAC FDB entry gets populated with client mac addresses, check clear mac address table, don't clear this statically created FDB entry and client authentication should not be disturbed.
9. Verify traffic gets forwarded for all the authenticated Clients on the port
10. Clear the clients using command 'sonic-clear authentication sessions interface all'
12. verify all clients gets deleted on the device and their access is blocked
| + + +### 3.2.5 Verify in multi-auth mode, one of the clients logoff does not impact other authenticated clients. + +| **Test ID** | **PAC_FUNC_005** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify in multi-auth mode, one of the clients logoff does not impact other authenticated clientss.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication on port in multi-auth mode
2. Verify multiple Clients gets authenticated individually
3. Logoff first client and verify other clients are still in authenticated state
4. Verify only Client1 blocked access and other clients are allowed access to server and traffic gets forwarded
5. Try authenticate client1 again and check authentication is successful and existing clients authentication should not be disturbed.
6. Clear the one of the clients using command 'sonic-clear authentication sessions mac ' and check only its authentication is cleared and remaining are not disturbed.
| + + +### 3.2.6 Verify in multi-host mode, if primary host gets logged-off, other hosts are blocked and verify after authenticating again. + +| **Test ID** | **PAC_FUNC_006** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify in multi-host mode, if primary host gets logged-off,other hosts are blocked and verify after authenticating again.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication on port in multi-host method
2. Verify First Client gets authenticated and all subsequent clients are allowed access to server
3. Logoff the primary Client1 and verify all Clients blocked access
4. Try authenticating Client2 as first user and verify all Clients are allowed access | + + +### 3.2.7 Verify 802.1x client authentication with port-control mode as force-authorized. + +| **Test ID** | **PAC_FUNC_007** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 802.1x client authentication with port-control mode as force-authorized.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1.Enable 802.1x globally and at interface level
2. Configure RADIUS Server on the authenticator.
3. Change the port control mode of the interface to force-authorized. Verify the same using “show authentication interface
4. Verify the client is not required to authenticate and is set to "force-authorized"
5. Try accessing the server from 802.1x client. Client should be able to access the server
| + + +### 3.2.8 Verify 802.1x client authentication with port-control mode as force-unauthorized. + +| **Test ID** | **PAC_FUNC_008** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 802.1x client authentication with port-control mode as force-unauthorized.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x globally and at interface level
2. Configure RADIUS Server on the authenticator.
3. Change the port control mode of the interface to force-unauthorized. Verify the same using “show authentication interface
4. Try accessing the server from 802.1x client. Client should not be able to access the server. Also verify that there is no EAPoL packets exchange between DUT and client
| + + +### 3.2.9 Verify interface level 802.1x pae authenticator disable/enable. + +| **Test ID** | **PAC_FUNC_009** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify interface level 802.1x pae authenticator disable/enable.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable authentication ('config dot1x pae interface authenticator ') on interfaces .
2. Enable 802.1x single-host on one port and multi-host on another port
3. Initiate Authentication from 802.1x Supplicant and Verify 802.1x aware clients gets authorized on the ports
4. Verify client gets access to server
5. Disable authentication ('config dot1x pae interface none ') at interface level with 802.1x clients authenticated
6. Verify 802.1x configurations are still present under interface
7. Re-enable authentication and verify all 802.1x clients gets authenticated and gets access to server
| + + +### 3.2.10 Verify global 802.1x system-auth-control enable/disable. + +| **Test ID** | **PAC_FUNC_010** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify global 802.1x authentication enable/disable.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication ('config dot1x system-auth-control enable') at global level.
2. Enable 802.1x authentication with single-host on one port and multi-host on another port
3. Initiate Authentication from 802.1x Supplicant and Verify 802.1x aware clients gets authorized on all the ports
4. Verify client gets access to server
5. Disable 802.1x authentication ('config dot1x system-auth-control disable') at global level with 802.1x clients authenticated
6. Verify 802.1x configurations still present under interface and ports move to unauthorized state
7. Verify authenticated 802.1x clients cleared on the authenticator
8. Re-enable global 802.1x authentication and verify all 802.1x clients gets authenticated
| + + +### 3.2.11 Verify enabling re-authentication with different re-authenticate timer and disabling authentication periodic shouldn't allow re-authentication. + +| **Test ID** | **PAC_FUNC_011** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify enabling re-authentication with different re-authenticate timer and disabling authentication periodic shouldn't allow re-authentication.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x globally and at interface level
2. Configure RADIUS sever on the authenticator.
3. Verify Clients connected to NAS port gets authenticated
4. Verify by default no re-authentication happens for any of the authenticated clients
5. Enable "config authentication periodic interface enable " on the port(authentication time is server on the port by default, session termination action is set to default).
6. Verify client initiates re-authentication after Session-Timeout value, the client authentication is failed because the session termination action is set to default on client.
7. Configure re-authentication timer using "config authentication timer re-authenticate interface " on DUT between 1 to 65535 in seconds and verify client gets re-authenticated as per configured re-auth timer expires on that specific port
8. Modify the RADIUS server configuration such that the client authentication failed.
9. Verify clients gets deleted in re-authentication process after re-auth timer expires.
10. Remove re-authentication timer configuration on a port and verify client initiates re-authentication as authentication periodic enabled on the port after server supplied timeout value, the client authentication is failed because the session termination action is set to default on client.
11. Disable authentication periodic on one port and verify authenticator do not attempt any re-authentication only for that specific port
| + + +### 3.2.12 Verify client authentication with MAB auth-type as EAP-MD5. + +| **Test ID** | **PAC_FUNC_012** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify client authentication with MAB auth-type as EAP-MD5.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable authentication on interface.
2. Configure RADIUS server on the authenticator.
3. Enable MAB with auth-type as EAP-MD5 and single-host mode on interface
4. Initiate Authentication from 802.1x Supplicant or Send data packets from client, Verify Authenticator (DUT) sends Access-Request to RADIUS server with username and password as Mac address learnt on the port
5. Verify DUT moves client to authenticated state once RADIUS server responds with Access-accept
6. Verify "show authentication clients all" to see the client state
7. Verify MAC FDB entry gets populated with client mac address, check clear mac address table don't clear this mac entry and client authentication is not disturbed.
8. Verify traffic gets forwarded after client gets authenticated on the port
9. Verify Client2 is blocked from accessing the server in single-Host mode
10. Logoff client1 and verify FDB entry gets deleted and client1 gets blocked for all the traffic
| + + +### 3.2.13 Verify client authentication with MAB auth-type as PAP. + +| **Test ID** | **PAC_FUNC_013** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify client authentication with MAB auth-type as PAP.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable authentication on interface.
2. Configure RADIUS server on the authenticator.
3. Enable MAB with auth-type as PAP and multi-host mode on interface
4. Initiate Authentication from 802.1x Supplicant or Send data packets from client, Verify Authenticator (DUT) sends Access-Request to RADIUS server with username and password as Mac address learnt on the port
5. Verify DUT moves client to Authenticated state once RADIUS server responds with Access-accept
6. Verify "show authentication clients all" to see the client state
7. Verify other clients connected to same port gets access to server
8. Verify MAC FDB entry gets populated with all the mac addresses
9. Verify traffic gets forwarded after client gets authenticated on the port
10. Logoff client1 and verify FDB entry gets deleted for the client on the port and access blocked for all the hosts
| + + +### 3.2.14 Verify client authentication with MAB auth-type as CHAP. + +| **Test ID** | **PAC_FUNC_014** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify client authentication with MAB auth-type as CHAP.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable authentication on interface.
2. Configure RADIUS server on the authenticator.
3. Enable MAB with auth-type as CHAP and multi-auth mode on interface
4. Initiate Authentication from multiple 802.1x Supplicant or Send data packets from multiple clients connected to the same NAS port
5. Verify Authenticator (DUT) sends Access-request to RADIUS server with mac address as username and password
6. Verify DUT moves client to Authenticated state after authenticator receives Access-accept from RADIUS server
7. Verify "show authentication clients all" to see the clients authentication state
8. Verify MAC FDB entry gets populated with client mac addresses
9. Verify traffic gets forwarded for all the authenticated Clients on the port
10. Verify unauthenticated new Host on the port not granted access
11. Logoff all the clients and verify Clients move to Unauthenticated state and access blocked
| + + +### 3.2.15 Verify authentication order with user-configured priorities for different authentication methods. + +| **Test ID** | **PAC_FUNC_015** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify authentication order with user-configured priorities for different authentication methods.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable both 802.1x and MAB authentication on an interface
2. Configure-authentication priority as [dot1x, mab]
3. Send data packets from client and Verify client authenticated as a MAB client as per the authentication order
4. Try authenticate the same client using 802.1x and verify authentication is successful with 802.1x(has higher priority on the interface)
| + + +### 3.2.16 Verify non-default max-users per port and check remaining clients are denied + +| **Test ID** | **PAC_FUNC_016** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify non-default max-users per port and check remaining clients are denied** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable PAC at interface
2. Configure max-users as 10 on the interface
3. Verify only 10 hosts are authenticated in multi-auth mode and proper log message generated indicating max-users reached.
4. Verify 11th host will not be authenticated
5. Logoff one of the clients and try to authenticate that new 11th client now, check that it is authenticated successfully.
6. Delete max-user config and verify it resets to default 16 and all 16 clients got authenticated
| + + +### 3.2.17 Verify that when host mode changes, the authenticated clients gets removed and traffic is blocked. + +| **Test ID** | **PAC_FUNC_017** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify that when host mode changes, the authenticated clients gets removed and traffic is blocked.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication on port in multi-auth mode
2. Verify multiple Clients gets authenticated individually
3. Verify that traffic corresponding to those clients gets forwarded.
4. While clients are authenticated and traffic is forwarding, change the mode to multi-host mode on the interface.
5. Verify that change of mode config is successful, authenticated clients gets removed and traffic is blocked from the client on the interface
6. Now with port in multi-host mode, authenticate a single client and check traffic forwarding corresponding to this client and other clients also is successful.
7. While traffic is forwarding, change the mode to single-host mode and check config is successful and earlier client authentication gets removed and traffic is blocked.
8. With port in single-host mode, authenticate a client and check traffic forwarding is successful.
9. While traffic is forwarding, change the mode to multi-auth and check earlier client authentication is removed and traffic is blocked.
10. With port in multi-auth mode, authenticate multiple clients and check the corresponding clients traffic forwarding is successful.
| + + +### 3.2.18 Verify that when authentication order is set to 802.1x and host mode is set to multi-auth, then only 802.1x client allowed to authenticate. + +| **Test ID** | **PAC_FUNC_018** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify that when authentication order is set to 802.1x and host mode is set to multi-auth, then only 802.1x client allowed to authenticate.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x globally and on interface.
2. Configure 'config authentication order interface dot1x ' with mode as multi-auth and also enable MAB.
3. Initiate a 802.1x client and check authentication is successful.
4. Try initiate traffic for MAB client, and check it is rejected and MAB client should not get authenticated.
5. Try authenticating another 802.1x client, and check authentication is successful.
6. Verify that traffic from these 802.1x clients is forwarded.
| + + +### 3.2.19 Verify that when authentication order is set to MAB and host mode is set to multi-auth, then 802.1x client authentication is not successful. + +| **Test ID** | **PAC_FUNC_019** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify that when authentication order is set to MAB and host mode is set to multi-auth, then 802.1x client authentication is not successful.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x globally and on interface.
2. Configure 'config authentication order interface MAB ' with mode as multi-auth and also enable MAB.
3. Initiate a 802.1x client and check authentication is not successful as 802.1x method is not enabled.
4. Try initiate traffic for MAB client, and check its authentication is successful.
5. Try authenticating another MAB client, and check authentication is successful.
6. Verify that traffic from these MAB clients is forwarded.
| + + +### 3.2.20 Verify MAB client authentication with port-control mode toggle between force authorized/unauthorized and auto. + +| **Test ID** | **PAC_FUNC_020** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify MAB client authentication with port-control mode toggle between force -authorized/unauthorized and auto.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x globally and at interface level, enable MAB on the interface
2. Configure the port control mode to force-authorized and verify port is set to force-authorized and client gets access to server without any authentication
3. Change the port control mode to "auto" and verify Client requires authentication before granting server access
4. Change the port control mode to force-unauthorized and verify no clients on the port gets authenticated
5. Change it to "auto" and verify Client gets authenticated and then granted access to server
| + + +### 3.2.21 Verify that 802.1x and MAB client is not authenticated if it's RADIUS assigned VLAN is not available statically on the authenticator switch. + +| **Test ID** | **PAC_FUNC_021** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify that 802.1x and MAB client is not authenticated if it's RADIUS assigned VLAN is not available statically on the authenticator switch.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x system auth control globally and enable pae authenticator on interface.
2. Configure host mode as multi-auth, enable 802.1x and MAB on the interface.
3. Configure RADIUS server as VLAN attribute to non-existing VLAN ID in NAS.
4. Authenticate 802.1x and MAB clients and check authentication is not successful.
5. Create RADIUS assigned VLAN on NAS.
6. Try to authenticate same 802.1x and MAB client and verify authentication is successful.
| + + +### 3.2.22 Verify 802.1x and MAB client is not authenticated if RADIUS does not assign a VLAN and the port is configured with tagged VLAN. + +| **Test ID** | **PAC_FUNC_022** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 802.1x and MAB client is not authenticated if RADIUS does not assign a VLAN and the port is configured with tagged VLAN.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x system-auth-control globally and enable pae authenticator on interface.
2. Configure host mode as multi-host, enable 802.1x and MAB on the interface.
3. Configure RADIUS server as VLAN attribute to existing VLAN ID in NAS.
4. Add switchport trunk VLAN configuration on 802.1x and MAB enabled interfaces.
5. Authenticate 802.1x and MAB clients and check authentication is not successful.
6. Change the port participation from truck to access mode on the 802.1x and MAB enabled interfaces.
7. Try to authenticate same 802.1x and MAB client and verify authentication is successful.
| + + +### 3.2.23 Verify a port with Multi-auth mode can have authenticated clients in different radius assigned VLANs. + +| **Test ID** | **PAC_FUNC_023** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify a port with Multi-auth mode can have authenticated clients in different radius assigned VLANs.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x system auth control globally and enable pae authenticator on interface.
2. Configure host mode as multi-auth, enable 802.1x and MAB on the interface.
3. Configure multiple users with different VLANs in RADIUS server.
4. Try to authenticate client1 and verify client authentication is successful, port added in RADIUS assigned VLAN.
5. Try to Authenticate client2 with different RADIUS assigned VLAN and verify authentication is not successful.
6. Change the VLAN attribute to Client2 as Client1 in RADIUS server.
7. Try to authenticate Client2 again and verify authentication is successful.
| + + +### 3.2.24 Verify that 802.1x and MAB client is not authenticated if RADIUS does not assign a VLAN and the port's configured untagged VLAN (Access VLAN) is not available + +| **Test ID** | **PAC_FUNC_024** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify that 802.1x and MAB client is not authenticated if it's RADIUS assigned VLAN is not available statically on the authenticator switch.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x system auth control globally and enable pae authenticator on interface.
2. Configure host mode as multi-host, enable 802.1x and MAB on the interface.
3. Configure users to without VLAN attribute in RADIUS server.
4. Add switchport access VLAN configuration on 802.1x and MAB enabled interfaces, do not create configured VLAN on the the switch
5. Authenticate 802.1x and MAB clients and check authentication is not successful.
6. Create RADIUS assigned VLAN on NAS.
7. Try to authenticate same 802.1x and MAB client and verify authentication is successful.
| + + +### 3.2.25 Verify the same MAB client authentication on different port after authenticated and verify MAC movement of the client +| **Test ID** | **PAC_FUNC_025** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify the same MAB client authentication on different port after authenticated and verify MAC movement of the client.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x system auth control globally and enable pae authenticator on interface.
2. Configure host mode as multi-host, enable MAB on the two interface port-1 and port-2.
3. Authenticate MAB client on port-1 and check authentication is successful.
4. Verify the FDB entry is installed properly on port-1.
5. Try to authenticate same MAB client on port-2 and verify authentication is successful on port-2.
6. Verify MAC is moved from port-1 to port-2 properly and verify traffic is forwarded to uplink port.
7. Now again, try to authenticate same MAB client on port-1 and verify authentication is successful on port-1.
6. Verify MAC is moved from port-2 to port-1 properly and verify traffic is forwarded to uplink port.
| + + +### 3.2.26 Verify the same 802.1x client authentication on different port after authenticated and verify MAC movement of the client +| **Test ID** | **PAC_FUNC_026** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify the same 802.1x client authentication on different port after authenticated and verify MAC movement of the client.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x system auth control globally and enable pae authenticator on interface.
2. Configure host mode as multi-host, enable 802.1x on the two interface port-1 and port-2.
3. Authenticate 802.1x client on port-1 and check authentication is successful.
4. Verify the FDB entry is installed properly on port-1.
5. Try to authenticate same 802.1x client on port-2 and verify authentication is successful on port-2.
6. Verify MAC is moved from port-1 to port-2 properly and verify traffic is forwarded to uplink port.
7. Now again, try to authenticate same 802.1x client on port-1 and verify authentication is successful on port-1.
6. Verify MAC is moved from port-2 to port-1 properly and verify traffic is forwarded to uplink port.
| + +### 3.2.27 Verify the same 802.1x and MAB client authentication when PAC and ACLs applied on a same port +| **Test ID** | **PAC_FUNC_027** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify the same 802.1x and MAB client authentication when PAC and ACLs applied on a same port.** | +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x system auth control globally and enable pae authenticator on interface.
2. Configure host mode as multi-host, enable 802.1x on the two interface port-1 and port-2.
3. Create static ACL and applied on PAC enabled port and verify the ACLs applied properly.
4. The traffic is dropped to uplink port before clients authentication.
5. Authenticate 802.1x client on port-1 and check authentication is successful.
6. Send the traffic matching to configured ACL rules and verify the traffic is forwarded properly.
7. Delete assign the ACLs on the port and verify the authenticated clients won't be impacted.
8. Logoff the clients and verify clients removed on the port without any issue.
.9. Verify applied ACLs on a port is retained and traffic forwarding to uplink port as per rules.
| + + +### **3.3 Reboot and Trigger Test Cases** + +### 3.3.1 Verify Client authentication after reboot + +| **Test ID** | **PAC_FUNC_TRIGGER_001** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify Client authentication after reboot**| +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Define RADIUS server
2. Enable 802.1x/MAB globally and at interface level
3. Authenticate to the DUT with multiple 802.1x clients and MAB clients
4. Do config save and "reboot"
5. 802.1x/MAB clients are removed and authenticated again.
6. Initiate 802.1x and MAB clients and verify that 802.1x/MAB clients authentication is successful after reboot.
| + + +### 3.3.2 Verify Client authentication after warmboot + +| **Test ID** | **PAC_FUNC_TRIGGER_002** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify Client authentication after warmboot**| +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Define RADIUS server
2. Enable 802.1x/MAB globally and at interface level
3. Authenticate to the DUT with multiple 802.1x clients and MAB clients
4. Do config save and warmboot
5. 802.1x/MAB clients are removed and authenticated again.
6. Initiate 802.1x and MAB clients and verify that 802.1x/MAB clients authentication is successful after warm-reboot.
| + + +### 3.3.3 Verify Client authentication after config reload + +| **Test ID** | **PAC_FUNC_TRIGGER_003** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify Client authentication after config reload**| +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Define RADIUS server
2. Enable 802.1x/MAB globally and at interface level
3. Authenticate to the DUT with multiple 802.1x clients and MAB clients
4. Do config save and perform config-reload
5. 802.1x/MAB clients are removed and authenticated again.
6. Initiate 802.1x and MAB clients and verify that 802.1x/MAB clients authentication is successful after config-reload.
| + + +### 3.3.4 Verify Client authentication after port toggle + +| **Test ID** | **PAC_FUNC_TRIGGER_004** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify Client authentication after port toggle**| +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Define RADIUS server
2. Enable 802.1x/MAB globally and at interface level
3. Authenticate to the DUT with multiple 802.1x clients and MAB clients
4. Perform shutdown and no shutdown on client authenticated port.
5. 802.1x/MAB clients are removed and authenticated again.
6. Initiate 802.1x and MAB clients and verify that 802.1x/MAB clients authentication is successful after port flap.
| + + +### **3.4 Scale Test Cases** + +### 3.4.1 Verify 128 max supported 802.1x clients on DUT. + +| **Test ID** | **PAC_SCAL_001** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 128 max supported 802.1x clients on DUT.**| +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication globally and at interface level on multiple ports
2. Configure host mode as multi-auth on multiple ports on the DUT.
3. Try authentication 128 802.1x clients across multiple interfaces(at least 16 clients from one or two interfaces to cover max clients per port) and verify 128 802.1x authenticated clients
4. Verify FDB entries for all 128 clients
5. Verify all client traffic gets allowed after authentication
6. Logoff all the 128 clients and verify clients move to unauthorized state and gets blocked from accessing the server.
7. Reinitiate the clients and check all are authenticated successfully.
8. Log off the clients again and check all are cleared.
| + + +### 3.4.2 Verify 128 max supported MAB clients on DUT. + +| **Test ID** | **PAC_SCAL_002** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify 128 max supported MAB clients on DUT.**| +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication globally and at interface level on multiple ports
2. Configure host mode as multi-auth on multiple ports on the DUT.
3.Try authentication 128 MAB clients across multiple interfaces and verify 128 MAB authenticated clients
4. Verify FDB entries for all 128 clients
5. Verify all client traffic gets allowed after authentication
6. Logoff all the 128 clients and verify clients move to unauthorized state and gets blocked from accessing the server
7. Reinitiate the clients and check all are authenticated successfully.
8. With max clients authenticated, perform save and reload.
9. Check that after DUT comes up, all clients are authenticated successfully.
| + + +### 3.4.3 Verify that the 128 maximum supported clients on DUT can be authenticated by using both 802.1x and MAB clients. + +| **Test ID** | **PAC_SCAL_003** | +| -------------- | :--------------------------------------- | +| **Test Name** | **Verify that the 128 maximum supported clients on DUT can be authenticated by using both 802.1x and MAB clients.**| +| **Test Setup** | **Topology1** | +| **Type** | **Functional** | +| **Steps** | 1. Enable 802.1x authentication globally and at interface level on multiple ports
2. Configure host mode as multi-auth and MAB on multiple ports on the DUT.
3. Try to validate the authentication of a combination of 802.1x and MAB clients across different interfaces, accommodating a scale of up to 128 clients and verify that all 802.1x and MAB clients authenticated properly on different ports.
4. Verify FDB entries for all 128 clients installed properly.
5. Verify all client traffic gets allowed after authentication.
6. Logoff all the 128 clients and verify clients move to unauthorized state and gets blocked from accessing the server
7. Reinitiate the clients and check all are authenticated successfully.
| + +## **4 Sample Outputs** + +### 4.1 Sample configuration commands +``` +config authentication port-control interface +config dot1x pae interface +config authentication host-mode interface +config dot1x system-auth-control +config authentication max-users interface +config mab interface \[ auth-type \] +config authentication periodic interface +config authentication timer reauthenticate interface +config authentication order interface +config authentication priority interface +``` + + +### 4.2 Sample clear commands +``` +sonic-clear authentication sessions \>\> | \> +``` + + +### 4.3 Sample show outputs +``` +admin@sonic:~$ show authentication clients all + +--------------------------------------------------------------------------------------------------------------------------------------------- +Interface User Name MAC-Address Method Host Mode Control Mode VLAN Assigned Reason +--------------------------------------------------------------------------------------------------------------------------------------------- +Ethernet0 Userv11 00:00:00:41:22:33 802.1x single-host auto Radius (20) +Ethernet1 Userv21 00:00:00:42:22:33 802.1x multi-host auto Radius (30) + + +admin@sonic:~$ show authentication clients Ethernet1 + +Mac Address ........................................ 00:00:00:42:22:33 +User Name .......................................... Userv21 +VLAN Assigned Reason ............................... Radius (30) +Host Mode .......................................... multi-host +Method ............................................. 802.1x +Session time ....................................... 147 +Session timeout .................................... 60 +Time left for Session Termination Action ........... Not Applicable +Session Termination Action ......................... Default + + +admin@sonic:~$ show authentication interface Ethernet0 + +Interface ..................................... Eth1/46 +Port Control Mode.............................. auto +Host Mode...................................... single-host +Configured method order........................ dot1x mab +Enabled method order........................... dot1x mab +Configured method priority..................... dot1x mab +Enabled method priority........................ dot1x mab +Reauthentication Enabled....................... TRUE +Reauthentication Period (secs)................. 90 +Maximum Users.................................. 1 +PAE role ...................................... Authenticator + + +admin@sonic:~$ show mab interface Ethernet0 + +Interface ..................................... Ethernet0 +Admin mode ..................................... Enabled +mab_auth_type .................................. EAP_MD5 +Server Timeout(secs) ........................... 30 + + +admin@sonic:~$ show mab + +Interface ..................................... Ethernet0 +Admin mode ..................................... Disabled +mab_auth_type .................................. EAP_MD5 +Server Timeout(secs) ........................... 30 + +Interface ..................................... Ethernet1 +Admin mode ..................................... Enabled +mab_auth_type .................................. EAP_MD5 +Server Timeout(secs) ........................... 30 + +Interface ..................................... Ethernet2 +Admin mode ..................................... Disabled +mab_auth_type .................................. EAP_MD5 +Server Timeout(secs) ........................... 30 + +Interface ..................................... Ethernet3 +Admin mode ..................................... Enabled +mab_auth_type .................................. EAP_MD5 +Server Timeout(secs) ........................... 30 + +Interface ..................................... Ethernet4 +Admin mode ..................................... Disabled +mab_auth_type .................................. EAP_MD5 +Server Timeout(secs) ........................... 30 + +Interface ..................................... Ethernet5 +Admin mode ..................................... Enabled +mab_auth_type .................................. EAP_MD5 +Server Timeout(secs) ........................... 30 + + +admin@sonic:~$ show dot1x detail Ethernet0 + +Interface ..................................... Ethernet0 +PAE Capabilities .............................. authenticator +Server Timeout(secs) .......................... 30 +Quiet Period(secs)............................. 30 + + +admin@sonic:~$ show dot1x detail all + +Interface ..................................... Ethernet0 +PAE Capabilities .............................. none +Server Timeout(secs) .......................... 30 +Quiet Period(secs)............................. 30 + +Interface ..................................... Ethernet1 +PAE Capabilities .............................. none +Server Timeout(secs) .......................... 30 +Quiet Period(secs)............................. 30 + +Interface ..................................... Ethernet2 +PAE Capabilities .............................. none +Server Timeout(secs) .......................... 30 +Quiet Period(secs)............................. 30 + +Interface ..................................... Ethernet3 +PAE Capabilities .............................. none +Server Timeout(secs) .......................... 30 +Quiet Period(secs)............................. 30 + +Interface ..................................... Ethernet4 +PAE Capabilities .............................. none +Server Timeout(secs) .......................... 30 +``` + +## **Reference Links** + +https://github.com/sonic-net/SONiC/pull/1315 diff --git a/docs/testplan/syslog/Syslog_Protocol_Filter_TrapSeverityLevel_test_plan.md b/docs/testplan/syslog/Syslog_Protocol_Filter_TrapSeverityLevel_test_plan.md new file mode 100644 index 00000000000..db6f9d3fb8b --- /dev/null +++ b/docs/testplan/syslog/Syslog_Protocol_Filter_TrapSeverityLevel_test_plan.md @@ -0,0 +1,104 @@ +# Syslog Protocol Filter Trap severity level Test Plan + +## Related documents + +| **Document Name** | **Link** | +|-------------------|----------| +| Syslog new functionality HLD | [https://github.com/sonic-net/SONiC/pull/1218]| + + +## Overview + +Extended following functionality in syslog: + +Configure remote syslog servers: protocol, filter, trap severity level +Update global syslog configuration: trap severity level, message format + +### Scope + +The test is to verify syslog new functionality + +### Scale / Performance + +No scale/performance test involved in this test plan + +### Related **DUT** CLI commands + +``` +User interface: + +config +|--- syslog + |--- add OPTIONS + |--- del + +Options: + +config syslog add server_ip + +-s|--source - source ip address +-p|--port - server udp port +-r|--vrf - vrf device + + +show +|--- syslog +``` + +### Supported topology +The tests will be supported on any topo. + +### Test cases #1 - Configure syslog server with source:unset/unset +1. Configure syslog server with source:unset/unset like below: +``` +config syslog add 2.2.2.2 +``` +2. Check syslog config by show syslog, the result should like below: + ``` + # show syslog + SERVER SOURCE PORT VRF + ---------- ---------- ------ -------- + 2.2.2.2 N/A 514 default + ``` +3. Check the corresponding interface will send syslog message with port 514 on dut +``` +# show syslog +SERVER SOURCE PORT VRF +---------- ---------- ------ -------- +2.2.2.2 N/A 514 default +``` +4. Change syslog protocol to tcp +``` +sonic-db-cli CONFIG_DB HSET 'SYSLOG_SERVER|2.2.2.2' 'protocol' 'tcp' +``` +5. Send message with tcp protocol and verify packet sent +6. Send message with udp and verify it did not send +7. Configure include filter with filter regex +``` +sonic-db-cli CONFIG_DB hset 'SYSLOG_SERVER|2.2.2.2' 'filter_type' 'include' 'filter_regex' 'sonic' +``` +8. Send message with include filter and verify packet sent +9. Send message without include filter and verify packet did not send +10. Configure exclude filter +``` +sonic-db-cli CONFIG_DB hset 'SYSLOG_SERVER|2.2.2.2' 'filter_type' 'exclude' 'filter_regex' 'aa' +``` +11. Send message with exclude regex and verify packet not sent +12. Send message without exclude regex and verify packet sent +13. Remove exclude filter +``` +sonic-db-cli CONFIG_DB hdel 'SYSLOG_SERVER|2.2.2.2' 'filter_type' 'exclude' 'filter_regex' 'aa' +``` +14. Send messages with different severities and make sure they will be filtered according to default severity +15. Change global severity and make sure it works according to messages you sent +16. Remove syslog config +``` +config syslog del 2.2.2.2 +``` + +14. Send messages with different severities and make sure they will be filtered according to default severity +15. Change global severity and make sure it works according to messages you sent +16. Remove syslog config +``` +config syslog del 2.2.2.2 +``` diff --git a/sdn_tests/images/dualNodeTopology.png b/sdn_tests/images/dualNodeTopology.png new file mode 100644 index 00000000000..8cd7722eebd Binary files /dev/null and b/sdn_tests/images/dualNodeTopology.png differ diff --git a/sdn_tests/readme.md b/sdn_tests/readme.md new file mode 100644 index 00000000000..0481c70690c --- /dev/null +++ b/sdn_tests/readme.md @@ -0,0 +1,197 @@ +# SONiC-Ondatra Test Framework + + +## [**Summary**](#summary) + +Google developed hundreds of tests to verify the functionality of switch software based on SONiC. The tests are developed on the Ondatra framework. Google proposes to open source these tests to the SONiC community to improve test coverage. Ondatra is an open-source testing framework for network devices. It provides an infrastructure to write and run tests against network devices. Ondatra is written in Go and uses the gRPC protocol to communicate with the devices under test. More details about Ondatra can be found here [https://github.com/openconfig/ondatra/blob/main/README.md](https://github.com/openconfig/ondatra/blob/main/README.md) + +This document covers the details of topologies that these tests can run and also cover the customizations required to run them in the upstream SONiC topology. + + +### [**Advantages of Ondatra**](#advantages-of-ondatra) + +* Uses standard gRPC interfaces to Device under tests in topology +* In built API support for yang models + * APIs for gNMI GET and SET + * API to validate gNMI SET payload + * API to validate GET/Telemetry responses + * API for gNOI operations +* Open Traffic Generator Interface support alleviates the problem of vendor lockin for traffic generator +* Supports physical and virtual topologies +* Already [upstreamed](https://github.com/openconfig/ondatra/blob/main/README.md) available in openconfig and well tested framework +* Minimal integration effort + + +## [**Topologies**](#topologies) + +Ondatra framework is generic and supports both physical and virtual topologies. We use the following topology to run our tests. + + +### Dual Node Topology (SUT and Control switch running SONiC) + +![topology-sut-and-control-switch-running-sonic](images/dualNodeTopology.png) + + +In this topology the DUT is connected to the Control switch with a minimum set of links required by the test definition. Test runner uses standard openconfig interfaces gNMI, gNOI, gNSI and P4RT to interact with the SUT and Control device. + +* gNMI for configuration and telemetry +* gNOI for operations ex: Reboot, Install, file creation/deletion +* gNSI for Security operations ex: Rotate certs, keys +* P4RT for packet I/O and flow programming + + +## [Test categories](test-categories) + +The following are the test categories +* Operations ex: Installation, reboot, link qualification, file add/delete +* Configuration + * Verify the configuration and state of the device matches for all the openconfig model that google uses + * Save on set +* Telemetry +* Features + * Port management + * Dynamic Port breakout + * LACP + * Link event damp + * Inband manager + * ACL + * Flow Programming + * Packet forwarding + * QOS + * Stress + +Most of the tests are written in GO lang using the Ondatra framework. But few categories of tests such as flow programming, packet forwarding, and QOS tests are written in C++ due to their dependency on C++ based libraries. As part of upstreaming Google also upstream the glue code that enables these C++ tests to run on Ondatra. + + +## [Sample Ondatra Test](sample-ondatra-test) + +``` GO +// Sample test that fetches oper status for set of interfaces. +func TestGetInterfaceOperStatus(t *testing.T) { + dut := ondatra.DUT(t, "DUT") + frontPanelPorts := []string{"Ethernet0", "Ethernet1", "Ethernet2", "Ethernet3", "Ethernet4"} + for _, intf := range frontPanelPorts { + operStatus := gnmi.Get(t, dut, gnmi.OC().Interface(intf).OperStatus().State()) + if operStatus != oc.Interface_OperStatus_UP { + t.Logf("%v OperStatus is %v, wanted UP", intf, operStatus) + } + } +} +``` + +## [Test Execution and dependent tool chain](test-execution-and-dependent-tool-chain) + +Each test case mandates the topology requirements for the test to run. An example of of Build rule for ondatra test + +``` python +ondatra_test( +name = "dual_node_sample_gpins_test", +srcs = ["dual_node_sample_gpins_test.go"], +glaze_kind = "go_test", +testbed = "//gpins/testing/ondatra/testbeds:dualnode.textproto", +deps = [ +"//third_party/openconfig/ondatra", +"//third_party/openconfig/ondatra/gnmi", +"//third_party/openconfig/ondatra/gnmi/oc", +"//third_party/pins_infra/ondatra/binding:gpinsbind", +], +) +``` + + +The below box has the contents of file dualnode.textproto. It shows the topology requirements to run the test “[dual_node_sample_gpins_test](https://source.corp.google.com/piper///depot/google3/platforms/networking/gpins/testing/ondatra/tests/BUILD;bpv=1;bpt=1;l=35?gsn=dual_node_sample_gpins_test&gs=KYTHE%3A%2F%2Fkythe%3A%2F%2Fgoogle3%3Flang%3Dbazel%23build%253Aplatforms%252Fnetworking%252Fgpins%252Ftesting%252Fondatra%252Ftests%253Adual_node_sample_gpins_test)”. It has 2 devices “DUT” and “CONTROL” with each having 4 links connected between them. + + +``` GO +duts { + id: "DUT" + ports { + id: "port1" + } + ports { + id: "port2" + } + ports { + id: "port3" + } + ports { + id: "port4" + } +} + +duts { + id: "CONTROL" + ports { + id: "port1" + } + ports { + id: "port2" + } + ports { + id: "port3" + } + ports { + id: "port4" + } +} + + +links { + a: "DUT:port1" + b: "CONTROL:port1" +} +links { + a: "DUT:port2" + b: "CONTROL:port2" +} +links { + a: "DUT:port3" + b: "CONTROL:port3" +} +links { + a: "DUT:port4" + b: "CONTROL:port4" +} +``` + + +All the required dependencies to run ondatra tests are already open sourced and available in github. + +User can run an Ondatra test using bazel command as below + + +``` shell +bazel test //gpins/testing/ondatra/tests :dual_node_sample_gpins_test +``` + + +This command reserves the testbed matching the testbed requirements and executes the test on the reserved testbed. The reservation is an Ondatra interface that should be filled. The next section covers the details of various binding services that need to be implemented to run Ondatra tests. + + +## [Ondatra Customizations](ondatra-customizations) + +Ondatra supports the following binding interfaces that need to be implemented for test running. Please refer to the file [gpins_binding](https://github.com/openconfig/ondatra/blob/main/binding/binding.go) to understand the binding requirements. The required bindings to run the tests are: + +``` GO +type Binding interface { + // Reserving a testbed matching the test requirements. + Reserve(ctx context.Context, tb *opb.Testbed, runTime, waitTime time.Duration, partial map[string]string) (*Reservation, error) + // Releasing the reserved testbed. + Release(ctx context.Context) error +} +``` +``` GO +type DUT interface { + DialGNMI(context.Context, ...grpc.DialOption) (gpb.GNMIClient, error) + DialGNOI(context.Context, ...grpc.DialOption) (GNOIClients, error) + DialGNSI(context.Context, ...grpc.DialOption) (GNSIClients, error) + DialP4RT(context.Context, ...grpc.DialOption) (p4pb.P4RuntimeClient, error) +} +``` +``` GO +type ATE interface { + DialOTG(context.Context, ...grpc.DialOption) (gosnappi.GosnappiApi, error) +} +``` + +We will upstream the above interface implementations that support the above topologies along with the tests. diff --git a/test_reporting/README.md b/test_reporting/README.md index 14630000641..5a98e77b510 100644 --- a/test_reporting/README.md +++ b/test_reporting/README.md @@ -1,20 +1,28 @@ # SONiC Test Reporting ## Setup environment -In the sonic-mgmt container: + +There are two options to run the test reporting scripts: +1. Inside the sonic-mgmt container (recommended) +2. On a Linux host + +### Option 1, inside the sonic-mgmt container (recommended) +Go to this folder in the sonic-mgmt container: ``` -source /var/johnar/env-python3/bin/activate +cd /test_reporting ``` +### Option 2, on a Linux host On a Linux host (verified against Ubuntu 20.04, but should work anywhere python3/virtualenv are supported): ``` virtualenv env source env/bin/activate +cd /test_reporting pip3 install -r requirements.txt ``` ## Uploading test results to a Kusto/Azure Data Explorer (ADX) cluster -You need to add the following environment variables first: +You need to export the following environment variables first: - TEST_REPORT_INGEST_KUSTO_CLUSTER: The ingest URL of your kusto/ADX cluster - TEST_REPORT_AAD_TENANT_ID: The tenant ID of your Azure Active Directory (AAD) tenant - TEST_REPORT_AAD_CLIENT_ID: The client ID for your AAD application @@ -23,7 +31,7 @@ You need to add the following environment variables first: Check out [this doc from Kusto](https://docs.microsoft.com/en-us/azure/data-explorer/provision-azure-ad-app) for more details about setting up AAD client applications for accessing Kusto. If you want to upload data into a new table, please add the related create table commands in setup.kql file and run them manually in Kusto. -Make sure the table is created and mapping is generated sucessfully. +Make sure the table is created and mapping is generated successfully. Once these have been added, you can use the `report_uploader.py` script to upload test report data to Kusto: ``` diff --git a/tests/acl/custom_acl_table/acl_rules.json b/tests/acl/custom_acl_table/acl_rules.json index 0725f65ab4a..35ae57013a7 100644 --- a/tests/acl/custom_acl_table/acl_rules.json +++ b/tests/acl/custom_acl_table/acl_rules.json @@ -32,6 +32,21 @@ "PACKET_ACTION": "FORWARD", "PRIORITY": "9992" }, + "CUSTOM_TABLE|RULE_PINHOLE_1": { + "DST_IP": "10.1.0.36/32", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "5" + }, + "CUSTOM_TABLE|RULE_PINHOLE_2": { + "DST_IP": "10.1.0.38/32", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "4" + }, + "CUSTOM_TABLE|RULE_PINHOLE_3": { + "DST_IP": "10.1.0.39/32", + "PACKET_ACTION": "FORWARD", + "PRIORITY": "3" + }, "CUSTOM_TABLE|DEFAULT_DROP_RULE_V4": { "ETHER_TYPE": "2048", "PACKET_ACTION": "DROP", diff --git a/tests/acl/custom_acl_table/test_custom_acl_table.py b/tests/acl/custom_acl_table/test_custom_acl_table.py index c53383d0454..d0ac5d56c1d 100644 --- a/tests/acl/custom_acl_table/test_custom_acl_table.py +++ b/tests/acl/custom_acl_table/test_custom_acl_table.py @@ -30,16 +30,20 @@ @pytest.fixture(scope='module') -def setup_counterpoll_interval(rand_selected_dut): +def setup_counterpoll_interval(rand_selected_dut, rand_unselected_dut, tbinfo): """ Set the counterpoll interval for acl to 1 second (10 seconds by default) """ # Set polling interval to 1 second rand_selected_dut.shell('counterpoll acl interval 1000') + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell('counterpoll acl interval 1000') time.sleep(10) yield # Restore default value 10 seconds rand_selected_dut.shell('counterpoll acl interval 10000') + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell('counterpoll acl interval 10000') def clear_acl_counter(dut): @@ -68,13 +72,15 @@ def read_acl_counter(dut, rule_name): # TODO: Move this fixture to a shared place of acl test @pytest.fixture(scope="module", autouse=True) -def remove_dataacl_table(rand_selected_dut): +def remove_dataacl_table(rand_selected_dut, rand_unselected_dut, tbinfo): """ Remove DATAACL to free TCAM resources """ TABLE_NAME = "DATAACL" data_acl_table = None output = rand_selected_dut.shell("sonic-cfggen -d --var-json \"ACL_TABLE\"")['stdout'] + if "dualtor-aa" in tbinfo["topo"]["name"]: + output = rand_unselected_dut.shell("sonic-cfggen -d --var-json \"ACL_TABLE\"")['stdout'] try: acl_tables = json.loads(output) if TABLE_NAME in acl_tables: @@ -87,6 +93,8 @@ def remove_dataacl_table(rand_selected_dut): # Remove DATAACL logger.info("Removing ACL table {}".format(TABLE_NAME)) rand_selected_dut.shell(cmd="config acl remove table {}".format(TABLE_NAME)) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd="config acl remove table {}".format(TABLE_NAME)) yield # Recover DATAACL data_acl = {} @@ -94,17 +102,24 @@ def remove_dataacl_table(rand_selected_dut): cmd = 'sonic-cfggen -a \'{}\' -w'.format(json.dumps(data_acl)) logger.info("Restoring ACL table {}".format(TABLE_NAME)) rand_selected_dut.shell(cmd) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd) @pytest.fixture(scope='module') -def setup_custom_acl_table(rand_selected_dut): +def setup_custom_acl_table(rand_selected_dut, rand_unselected_dut, tbinfo): # Define a custom table type CUSTOM_TYPE by loading a json configuration rand_selected_dut.copy(src=CUSTOM_ACL_TABLE_TYPE_SRC_FILE, dest=CUSTOM_ACL_TABLE_TYPE_DST_FILE) rand_selected_dut.shell("sonic-cfggen -j {} -w".format(CUSTOM_ACL_TABLE_TYPE_DST_FILE)) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.copy(src=CUSTOM_ACL_TABLE_TYPE_SRC_FILE, dest=CUSTOM_ACL_TABLE_TYPE_DST_FILE) + rand_unselected_dut.shell("sonic-cfggen -j {} -w".format(CUSTOM_ACL_TABLE_TYPE_DST_FILE)) # Create an ACL table and bind to Vlan1000 interface cmd_create_table = "config acl add table CUSTOM_TABLE CUSTOM_TYPE -s ingress -p Vlan1000" cmd_remove_table = "config acl remove table CUSTOM_TABLE" loganalyzer = LogAnalyzer(ansible_host=rand_selected_dut, marker_prefix="custom_acl") + if "dualtor-aa" in tbinfo["topo"]["name"]: + loganalyzer = LogAnalyzer(ansible_host=rand_unselected_dut, marker_prefix="custom_acl") loganalyzer.load_common_config() try: @@ -114,42 +129,60 @@ def setup_custom_acl_table(rand_selected_dut): loganalyzer.ignore_regex = [r".*"] with loganalyzer: rand_selected_dut.shell(cmd_create_table) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd_create_table) except LogAnalyzerError as err: # Cleanup Config DB if table creation failed logger.error("ACL table creation failed, attempting to clean-up...") rand_selected_dut.shell(cmd_remove_table) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd_remove_table) raise err yield logger.info("Removing ACL table and custom type") # Remove ACL table rand_selected_dut.shell(cmd_remove_table) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd_remove_table) # Remove custom type rand_selected_dut.shell("sonic-db-cli CONFIG_DB del \'ACL_TABLE_TYPE|CUSTOM_TYPE\'") + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell("sonic-db-cli CONFIG_DB del \'ACL_TABLE_TYPE|CUSTOM_TYPE\'") @pytest.fixture(scope='module') -def setup_acl_rules(rand_selected_dut, setup_custom_acl_table): +def setup_acl_rules(rand_selected_dut, rand_unselected_dut, tbinfo, setup_custom_acl_table): # Copy and load acl rules rand_selected_dut.copy(src=ACL_RULE_SRC_FILE, dest=ACL_RULE_DST_FILE) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.copy(src=ACL_RULE_SRC_FILE, dest=ACL_RULE_DST_FILE) cmd_add_rules = "sonic-cfggen -j {} -w".format(ACL_RULE_DST_FILE) cmd_rm_rules = "acl-loader delete CUSTOM_TABLE" loganalyzer = LogAnalyzer(ansible_host=rand_selected_dut, marker_prefix="custom_acl") + if "dualtor-aa" in tbinfo["topo"]["name"]: + loganalyzer = LogAnalyzer(ansible_host=rand_unselected_dut, marker_prefix="custom_acl") loganalyzer.match_regex = [LOG_EXPECT_ACL_RULE_FAILED_RE] try: logger.info("Creating ACL rules in CUSTOM_TABLE") with loganalyzer: rand_selected_dut.shell(cmd_add_rules) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd_add_rules) except LogAnalyzerError as err: # Cleanup Config DB if failed logger.error("ACL rule creation failed, attempting to clean-up...") rand_selected_dut.shell(cmd_rm_rules) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd_rm_rules) raise err yield # Remove testing rules logger.info("Removing testing ACL rules") rand_selected_dut.shell(cmd_rm_rules) + if "dualtor-aa" in tbinfo["topo"]["name"]: + rand_unselected_dut.shell(cmd_rm_rules) def build_testing_pkts(router_mac): @@ -215,7 +248,7 @@ def build_exp_pkt(input_pkt): return exp_pkt -def test_custom_acl(rand_selected_dut, tbinfo, ptfadapter, +def test_custom_acl(rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter, setup_acl_rules, toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 setup_counterpoll_interval, remove_dataacl_table): """ @@ -230,6 +263,7 @@ def test_custom_acl(rand_selected_dut, tbinfo, ptfadapter, """ mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo) if "dualtor" in tbinfo["topo"]["name"]: + mg_facts_unselected_dut = rand_unselected_dut.get_extended_minigraph_facts(tbinfo) vlan_name = list(mg_facts['minigraph_vlans'].keys())[0] # Use VLAN MAC as router MAC on dual-tor testbed router_mac = rand_selected_dut.get_dut_iface_mac(vlan_name) @@ -244,6 +278,8 @@ def test_custom_acl(rand_selected_dut, tbinfo, ptfadapter, for _, v in mg_facts['minigraph_portchannels'].items(): for member in v['members']: dst_port_indices.append(mg_facts['minigraph_ptf_indices'][member]) + if "dualtor-aa" in tbinfo["topo"]["name"]: + dst_port_indices.append(mg_facts_unselected_dut['minigraph_ptf_indices'][member]) test_pkts = build_testing_pkts(router_mac) for rule, pkt in list(test_pkts.items()): @@ -251,9 +287,14 @@ def test_custom_acl(rand_selected_dut, tbinfo, ptfadapter, exp_pkt = build_exp_pkt(pkt) # Send and verify packet clear_acl_counter(rand_selected_dut) + if "dualtor-aa" in tbinfo["topo"]["name"]: + clear_acl_counter(rand_unselected_dut) ptfadapter.dataplane.flush() testutils.send(ptfadapter, pkt=pkt, port_id=src_port_indice) testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=dst_port_indices, timeout=5) acl_counter = read_acl_counter(rand_selected_dut, rule) + if "dualtor-aa" in tbinfo["topo"]["name"]: + acl_counter_unselected_dut = read_acl_counter(rand_unselected_dut, rule) + acl_counter += acl_counter_unselected_dut # Verify acl counter pytest_assert(acl_counter == 1, "ACL counter for {} didn't increase as expected".format(rule)) diff --git a/tests/acl/templates/acltb_test_rules.j2 b/tests/acl/templates/acltb_test_rules.j2 index 0119fc83bb8..ee9bd39874a 100644 --- a/tests/acl/templates/acltb_test_rules.j2 +++ b/tests/acl/templates/acltb_test_rules.j2 @@ -510,7 +510,41 @@ "destination-ip-address": "192.168.0.122/32" } } +{% if dualtor == True -%} + }, + "34": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 34 + }, + "ip": { + "config": { + "destination-ip-address": "{{Loopback2}}/32" + } + } + }, + "35": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 35 + }, + "ip": { + "config": { + "destination-ip-address": "{{Loopback3}}/32" + } + } + } +{%- else -%} } +{%- endif %} } } } diff --git a/tests/acl/templates/acltb_test_rules_part_1.j2 b/tests/acl/templates/acltb_test_rules_part_1.j2 index 4583a14977c..4946866fbed 100644 --- a/tests/acl/templates/acltb_test_rules_part_1.j2 +++ b/tests/acl/templates/acltb_test_rules_part_1.j2 @@ -125,7 +125,41 @@ "destination-port": "179" } } +{% if dualtor == True -%} + }, + "29": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 29 + }, + "ip": { + "config": { + "destination-ip-address": "{{Loopback2}}/32" + } + } + }, + "30": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 30 + }, + "ip": { + "config": { + "destination-ip-address": "{{Loopback3}}/32" + } + } + } +{%- else -%} } +{%- endif %} } } } diff --git a/tests/acl/templates/acltb_test_rules_part_2.j2 b/tests/acl/templates/acltb_test_rules_part_2.j2 index 0119fc83bb8..ee9bd39874a 100644 --- a/tests/acl/templates/acltb_test_rules_part_2.j2 +++ b/tests/acl/templates/acltb_test_rules_part_2.j2 @@ -510,7 +510,41 @@ "destination-ip-address": "192.168.0.122/32" } } +{% if dualtor == True -%} + }, + "34": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 34 + }, + "ip": { + "config": { + "destination-ip-address": "{{Loopback2}}/32" + } + } + }, + "35": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 35 + }, + "ip": { + "config": { + "destination-ip-address": "{{Loopback3}}/32" + } + } + } +{%- else -%} } +{%- endif %} } } } diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py index 3d4b5928b2b..5920d69bc38 100644 --- a/tests/acl/test_acl.py +++ b/tests/acl/test_acl.py @@ -25,6 +25,7 @@ from tests.common.fixtures.conn_graph_facts import conn_graph_facts # noqa F401 from tests.common.platform.processes_utils import wait_critical_processes from tests.common.platform.interface_utils import check_all_interface_information +from tests.common.utilities import is_ipv4_address logger = logging.getLogger(__name__) @@ -604,7 +605,7 @@ class BaseAclTest(six.with_metaclass(ABCMeta, object)): ACL_COUNTERS_UPDATE_INTERVAL_SECS = 10 @abstractmethod - def setup_rules(self, dut, acl_table, ip_version): + def setup_rules(self, dut, acl_table, ip_version, tbinfo): """Setup ACL rules for testing. Args: @@ -669,7 +670,7 @@ def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_ent # Ignore any other errors to reduce noise loganalyzer.ignore_regex = [r".*"] with loganalyzer: - self.setup_rules(duthost, acl_table, ip_version) + self.setup_rules(duthost, acl_table, ip_version, tbinfo) # Give the dut some time for the ACL rules to be applied and LOG message generated time.sleep(30) @@ -1168,7 +1169,7 @@ def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_ver class TestBasicAcl(BaseAclTest): """Test Basic functionality of ACL rules (i.e. setup with full update on a running device).""" - def setup_rules(self, dut, acl_table, ip_version): + def setup_rules(self, dut, acl_table, ip_version, tbinfo): """Setup ACL rules for testing. Args: @@ -1176,6 +1177,23 @@ def setup_rules(self, dut, acl_table, ip_version): acl_table: Configuration info for the ACL table. """ + + if 'dualtor' in tbinfo['topo']['name']: + dut.host.options["variable_manager"].extra_vars.update({"dualtor": True}) + sonichost = dut.get_asic_or_sonic_host(None) + config_facts = sonichost.get_running_config_facts() + tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback2"] + if is_ipv4_address(_.split("/")[0])][0] + tor_ipv4_address = tor_ipv4_address.split("/")[0] + dut.host.options["variable_manager"].extra_vars.update({"Loopback2": tor_ipv4_address}) + + tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback3"] + if is_ipv4_address(_.split("/")[0])][0] + tor_ipv4_address = tor_ipv4_address.split("/")[0] + dut.host.options["variable_manager"].extra_vars.update({"Loopback3": tor_ipv4_address}) + else: + dut.host.options["variable_manager"].extra_vars.update({"dualtor": False}) + table_name = acl_table["table_name"] dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name}) @@ -1196,7 +1214,7 @@ class TestIncrementalAcl(BaseAclTest): multiple parts. """ - def setup_rules(self, dut, acl_table, ip_version): + def setup_rules(self, dut, acl_table, ip_version, tbinfo): """Setup ACL rules for testing. Args: @@ -1204,6 +1222,21 @@ def setup_rules(self, dut, acl_table, ip_version): acl_table: Configuration info for the ACL table. """ + if 'dualtor' in tbinfo['topo']['name']: + dut.host.options["variable_manager"].extra_vars.update({"dualtor": True}) + sonichost = dut.get_asic_or_sonic_host(None) + config_facts = sonichost.get_running_config_facts() + tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback2"] + if is_ipv4_address(_.split("/")[0])][0] + tor_ipv4_address = tor_ipv4_address.split("/")[0] + dut.host.options["variable_manager"].extra_vars.update({"Loopback2": tor_ipv4_address}) + tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback3"] + if is_ipv4_address(_.split("/")[0])][0] + tor_ipv4_address = tor_ipv4_address.split("/")[0] + dut.host.options["variable_manager"].extra_vars.update({"Loopback3": tor_ipv4_address}) + else: + dut.host.options["variable_manager"].extra_vars.update({"dualtor": False}) + table_name = acl_table["table_name"] dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name}) diff --git a/tests/arp/conftest.py b/tests/arp/conftest.py index 3eb30ecf3c8..be8354a596c 100644 --- a/tests/arp/conftest.py +++ b/tests/arp/conftest.py @@ -311,6 +311,9 @@ def proxy_arp_enabled(rand_selected_dut, config_facts): proxy_arp_del_cmd = 'sonic-db-cli CONFIG_DB HDEL "VLAN_INTERFACE|Vlan{}" proxy_arp' for vid, proxy_arp_val in list(old_proxy_arp_vals.items()): if 'enabled' not in proxy_arp_val: + # Disable proxy_arp explicitly + duthost.shell(proxy_arp_config_cmd.format(vid, 'disabled')) + time.sleep(2) # Delete the DB entry instead of using the config command to satisfy check_dut_health_status duthost.shell(proxy_arp_del_cmd.format(vid)) diff --git a/tests/arp/test_arp_extended.py b/tests/arp/test_arp_extended.py index a12969bf92d..ffe6d8607ad 100644 --- a/tests/arp/test_arp_extended.py +++ b/tests/arp/test_arp_extended.py @@ -52,7 +52,7 @@ def test_arp_garp_enabled(rand_selected_dut, garp_enabled, ip_and_intf_info, int pytest_assert(switch_arptable['arptable']['v4'][arp_request_ip]['interface'] in vlan_intfs) -def test_proxy_arp(proxy_arp_enabled, ip_and_intf_info, ptfadapter, packets_for_test): +def test_proxy_arp(rand_selected_dut, proxy_arp_enabled, ip_and_intf_info, ptfadapter, packets_for_test): """ Send an ARP request or neighbor solicitation (NS) to the DUT for an IP address within the subnet of the DUT's VLAN. @@ -70,5 +70,25 @@ def test_proxy_arp(proxy_arp_enabled, ip_and_intf_info, ptfadapter, packets_for_ pytest_require(ptf_intf_ipv6_addr is not None, 'No IPv6 VLAN address configured on device') ptfadapter.dataplane.flush() + + if ip_version == 'v6': + running_config = rand_selected_dut.get_running_config_facts() + logger.debug("NDP Debug Logs Start") + for table_name, table in running_config.items(): + if "VLAN" in table_name: + logger.debug("{}: {}".format(table_name, table)) + swss_status = rand_selected_dut.shell('docker exec swss supervisorctl status', + module_ignore_errors=True)['stdout'] + logger.debug(swss_status) + ndppd_conf = rand_selected_dut.shell('docker exec swss cat /etc/ndppd.conf', + module_ignore_errors=True)['stdout'] + logger.debug(ndppd_conf) + + neigh_table = rand_selected_dut.shell('ip -6 neigh')['stdout'] + logger.debug(neigh_table) + testutils.send_packet(ptfadapter, ptf_intf_index, outgoing_packet) + if ip_version == 'v6': + neigh_table = rand_selected_dut.shell('ip -6 neigh')['stdout'] + logger.debug(neigh_table) testutils.verify_packet(ptfadapter, expected_packet, ptf_intf_index, timeout=10) diff --git a/tests/arp/test_stress_arp.py b/tests/arp/test_stress_arp.py index 22cbd4e486d..071aa26f8e9 100644 --- a/tests/arp/test_stress_arp.py +++ b/tests/arp/test_stress_arp.py @@ -120,29 +120,17 @@ def ipv6_packets_for_test(ip_and_intf_info, fake_src_mac, fake_src_addr): ns_pkt /= IPv6(dst=inet_ntop(socket.AF_INET6, multicast_tgt_addr), src=fake_src_addr) ns_pkt /= ICMPv6ND_NS(tgt=tgt_addr) ns_pkt /= ICMPv6NDOptSrcLLAddr(lladdr=fake_src_mac) - logging.info(repr(ns_pkt)) return ns_pkt -def get_ipv6_entries_status(duthost, ipv6_addr): - ipv6_entry = duthost.shell("ip -6 neighbor | grep -w {}".format(ipv6_addr))["stdout_lines"][0] - ipv6_entry_status = ipv6_entry.split(" ")[-1] - return (ipv6_entry_status == 'REACHABLE') - - -def add_nd(duthost, ptfhost, ptfadapter, config_facts, tbinfo, ip_and_intf_info, ptf_intf_index, nd_avaliable): +def add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable): for entry in range(0, nd_avaliable): nd_entry_mac = IntToMac(MacToInt(ARP_SRC_MAC) + entry) fake_src_addr = generate_global_addr(nd_entry_mac) ns_pkt = ipv6_packets_for_test(ip_and_intf_info, nd_entry_mac, fake_src_addr) - ptfhost.shell("ip -6 addr add {}/64 dev eth1".format(fake_src_addr)) - - ptfadapter.dataplane.flush() testutils.send_packet(ptfadapter, ptf_intf_index, ns_pkt) - wait_until(20, 1, 0, get_ipv6_entries_status, duthost, fake_src_addr) - ptfhost.shell("ip -6 addr del {}/64 dev eth1".format(fake_src_addr)) def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, @@ -163,7 +151,7 @@ def test_ipv6_nd(duthost, ptfhost, config_facts, tbinfo, ip_and_intf_info, while loop_times > 0: loop_times -= 1 - add_nd(duthost, ptfhost, ptfadapter, config_facts, tbinfo, ip_and_intf_info, ptf_intf_index, nd_avaliable) + add_nd(ptfadapter, ip_and_intf_info, ptf_intf_index, nd_avaliable) pytest_assert(wait_until(20, 1, 0, lambda: get_fdb_dynamic_mac_count(duthost) >= nd_avaliable), "Neighbor Table Add failed") diff --git a/tests/bfd/bfd_base.py b/tests/bfd/bfd_base.py new file mode 100644 index 00000000000..8ad100633fa --- /dev/null +++ b/tests/bfd/bfd_base.py @@ -0,0 +1,288 @@ +import random +import pytest +import re +import time +import logging +from tests.platform_tests.cli import util +from tests.common.plugins.sanity_check.checks import _parse_bfd_output + +logger = logging.getLogger(__name__) + + +class BfdBase: + def list_to_dict(self, sample_list): + data_rows = sample_list[3:] + for data in data_rows: + data_dict = {} + data = data.encode("utf-8").split() + data_dict["Peer Addr"] = data[0] + data_dict["Interface"] = data[1] + data_dict["Vrf"] = data[2] + data_dict["State"] = data[3] + data_dict["Type"] = data[4] + data_dict["Local Addr"] = data[5] + data_dict["TX Interval"] = data[6] + data_dict["RX Interval"] = data[7] + data_dict["Multiplier"] = data[8] + data_dict["Multihop"] = data[9] + data_dict["Local Discriminator"] = data[10] + return data_dict + + def selecting_route_to_delete(self, asic_routes, nexthops): + for asic in asic_routes: + for prefix in asic_routes[asic]: + nexthops_in_static_route_output = asic_routes[asic][prefix] + # If nexthops on source dut are same destination dut's interfaces, we are picking that static route + if sorted(nexthops_in_static_route_output) == sorted(nexthops): + time.sleep(2) + logger.info("Nexthops from static route output") + logger.info(sorted(nexthops_in_static_route_output)) + logger.info("Given Nexthops") + logger.info(sorted(nexthops)) + logger.info("Prefix") + logger.info(prefix) + return prefix + + def modify_all_bfd_sessions(self, dut, flag): + # Extracting asic count + cmd = "show platform summary" + logging.info("Verifying output of '{}' on '{}'...".format(cmd, dut.hostname)) + summary_output_lines = dut.command(cmd)["stdout_lines"] + summary_dict = util.parse_colon_speparated_lines(summary_output_lines) + asic_count = int(summary_dict["ASIC Count"]) + + # Creating bfd.json, bfd0.json, bfd1.json, bfd2.json ... + for i in range(asic_count): + file_name = "config_db{}.json".format(i) + dut.shell("cp /etc/sonic/{} /etc/sonic/{}.bak".format(file_name, file_name)) + if flag == "false": + command = """sed -i 's/"bfd": "true"/"bfd": "false"/' {}""".format( + "/etc/sonic/" + file_name + ) + elif flag == "true": + command = """sed -i 's/"bfd": "false"/"bfd": "true"/' {}""".format( + "/etc/sonic/" + file_name + ) + dut.shell(command) + + def extract_backend_portchannels(self, dut): + output = dut.show_and_parse("show int port -d all") + port_channel_dict = {} + + for item in output: + if "BP" in item.get("ports", ""): + port_channel = item.get("team dev", "") + ports_with_status = [ + port.strip() + for port in item.get("ports", "").split() + if "BP" in port + ] + ports = [ + ( + re.match(r"^([\w-]+)\([A-Za-z]\)", port).group(1) + if re.match(r"^([\w-]+)\([A-Za-z]\)", port) + else None + ) + for port in ports_with_status + ] + status_match = re.search( + r"LACP\(A\)\((\w+)\)", item.get("protocol", "") + ) + status = status_match.group(1) if status_match else "" + if ports: + port_channel_dict[port_channel] = { + "members": ports, + "status": status, + } + + return port_channel_dict + + def extract_ip_addresses_for_backend_portchannels(self, dut, dut_asic, version): + backend_port_channels = self.extract_backend_portchannels(dut) + if version == "ipv4": + command = "show ip int -d all" + elif version == "ipv6": + command = "show ipv6 int -d all" + data = dut.show_and_parse("{} -n asic{}".format(command, dut_asic.asic_index)) + result_dict = {} + for item in data: + if version == "ipv4": + ip_address = item.get("ipv4 address/mask", "").split("/")[0] + elif version == "ipv6": + ip_address = item.get("ipv6 address/mask", "").split("/")[0] + interface = item.get("interface", "") + + if interface in backend_port_channels: + result_dict[interface] = ip_address + return result_dict + + def delete_bfd(self, asic_number, prefix, dut): + command = "sonic-db-cli -n asic{} CONFIG_DB HSET \"STATIC_ROUTE|{}\" bfd 'false'".format( + asic_number, prefix + ).replace( + "\\", "" + ) + logger.info(command) + dut.shell(command) + time.sleep(15) + + def add_bfd(self, asic_number, prefix, dut): + command = "sonic-db-cli -n asic{} CONFIG_DB HSET \"STATIC_ROUTE|{}\" bfd 'true'".format( + asic_number, prefix + ).replace( + "\\", "" + ) + logger.info(command) + dut.shell(command) + time.sleep(15) + + def extract_current_bfd_state(self, nexthop, asic_number, dut): + bfd_peer_command = "ip netns exec asic{} show bfd peer {}".format( + asic_number, nexthop + ) + logger.info("Verifying BFD status on {}".format(dut)) + logger.info(bfd_peer_command) + bfd_peer_output = ( + dut.shell(bfd_peer_command, module_ignore_errors=True)["stdout"] + .encode("utf-8") + .strip() + .split("\n") + ) + if "No BFD sessions found" in bfd_peer_output[0]: + return "No BFD sessions found" + else: + entry = self.list_to_dict(bfd_peer_output) + return entry["State"] + + def find_bfd_peers_with_given_state(self, dut, dut_asic, expected_bfd_state): + # Expected BFD states: Up, Down, No BFD sessions found + peer_count = [] + bfd_cmd = "ip netns exec asic{} show bfd sum" + result = True + bfd_peer_output = ( + dut.shell(bfd_cmd.format(dut_asic))["stdout"] + .encode("utf-8") + .strip() + .split("\n") + ) + if any( + keyword in bfd_peer_output[0] + for keyword in ("Total number of BFD sessions: 0", "No BFD sessions found") + ): + return result + else: + bfd_output = _parse_bfd_output(bfd_peer_output) + for peer in bfd_output: + if not bfd_output[peer]["State"] == expected_bfd_state: + peer_count.append(peer) + if len(peer_count) > 0: + result = False + return result + + def verify_bfd_state(self, dut, dut_nexthops, dut_asic, expected_bfd_state): + logger.info("Verifying BFD state on {} ".format(dut)) + for nexthop in dut_nexthops: + current_bfd_state = self.extract_current_bfd_state( + nexthop, dut_asic.asic_index, dut + ) + logger.info("current_bfd_state: {}".format(current_bfd_state)) + logger.info("expected_bfd_state: {}".format(expected_bfd_state)) + if current_bfd_state != expected_bfd_state: + return False + return True + + def extract_routes(self, static_route_output, version): + asic_routes = {} + asic = None + + for line in static_route_output: + if line.startswith("asic"): + asic = line.split(":")[0] + asic_routes[asic] = {} + elif line.startswith("S>*") or line.startswith(" *"): + parts = line.split(",") + if line.startswith("S>*"): + if version == "ipv4": + prefix_match = re.search(r"(\d+\.\d+\.\d+\.\d+/\d+)", parts[0]) + elif version == "ipv6": + prefix_match = re.search(r"([0-9a-fA-F:.\/]+)", parts[0]) + if prefix_match: + prefix = prefix_match.group(1) + else: + continue + if version == "ipv4": + next_hop_match = re.search(r"via\s+(\d+\.\d+\.\d+\.\d+)", parts[0]) + elif version == "ipv6": + next_hop_match = re.search(r"via\s+([0-9a-fA-F:.\/]+)", parts[0]) + if next_hop_match: + next_hop = next_hop_match.group(1) + else: + continue + + asic_routes[asic].setdefault(prefix, []).append(next_hop) + return asic_routes + + @pytest.fixture( + scope="class", name="select_src_dst_dut_and_asic", params=(["multi_dut"]) + ) + def select_src_dst_dut_and_asic(self, duthosts, request, tbinfo): + src_dut_index = 0 + dst_dut_index = 0 + src_asic_index = 0 + dst_asic_index = 0 + if (len(duthosts.frontend_nodes)) < 2: + pytest.skip("Don't have 2 frontend nodes - so can't run multi_dut tests") + # Random selection of dut indices based on number of front end nodes + dut_indices = random.sample(list(range(len(duthosts.frontend_nodes))), 2) + src_dut_index = dut_indices[0] + dst_dut_index = dut_indices[1] + + # Random selection of source asic based on number of asics available on source dut + src_asic_index_selection = random.choice( + duthosts[src_dut_index].get_asic_namespace_list() + ) + src_asic_index = src_asic_index_selection.split("asic")[1] + + # Random selection of destination asic based on number of asics available on destination dut + dst_asic_index_selection = random.choice( + duthosts[dst_dut_index].get_asic_namespace_list() + ) + dst_asic_index = dst_asic_index_selection.split("asic")[1] + + yield { + "src_dut_index": src_dut_index, + "dst_dut_index": dst_dut_index, + "src_asic_index": int(src_asic_index), + "dst_asic_index": int(dst_asic_index), + } + + @pytest.fixture(scope="class") + def get_src_dst_asic_and_duts(self, duthosts, select_src_dst_dut_and_asic): + logger.info("Printing select_src_dst_dut_and_asic") + logger.info(select_src_dst_dut_and_asic) + + logger.info("Printing duthosts.frontend_nodes") + logger.info(duthosts.frontend_nodes) + src_dut = duthosts.frontend_nodes[select_src_dst_dut_and_asic["src_dut_index"]] + dst_dut = duthosts.frontend_nodes[select_src_dst_dut_and_asic["dst_dut_index"]] + + logger.info("Printing source dut asics") + logger.info(src_dut.asics) + logger.info("Printing destination dut asics") + logger.info(dst_dut.asics) + src_asic = src_dut.asics[select_src_dst_dut_and_asic["src_asic_index"]] + dst_asic = dst_dut.asics[select_src_dst_dut_and_asic["dst_asic_index"]] + + all_asics = [src_asic, dst_asic] + all_duts = [src_dut, dst_dut] + + rtn_dict = { + "src_asic": src_asic, + "dst_asic": dst_asic, + "src_dut": src_dut, + "dst_dut": dst_dut, + "all_asics": all_asics, + "all_duts": all_duts, + } + rtn_dict.update(select_src_dst_dut_and_asic) + yield rtn_dict diff --git a/tests/bfd/conftest.py b/tests/bfd/conftest.py index 6e49ab13bb7..422ed66f4ec 100644 --- a/tests/bfd/conftest.py +++ b/tests/bfd/conftest.py @@ -1,3 +1,114 @@ +import pytest +from bfd_base import BfdBase +import logging +from tests.platform_tests.link_flap.link_flap_utils import check_orch_cpu_utilization +from tests.common.utilities import wait_until +from tests.common.config_reload import config_reload + +logger = logging.getLogger(__name__) + + +@pytest.fixture(scope="class") +def bfd_base_instance(): + return BfdBase() + + def pytest_addoption(parser): parser.addoption("--num_sessions", action="store", default=5) parser.addoption("--num_sessions_scale", action="store", default=128) + + +@pytest.fixture(scope="function") +def bfd_cleanup_db( + request, duthosts, enum_supervisor_dut_hostname, bfd_base_instance, autouse=True +): + orch_cpu_threshold = 10 + # Make Sure Orch CPU < orch_cpu_threshold before starting test. + logger.info( + "Make Sure orchagent CPU utilization is less that %d before starting the test", + orch_cpu_threshold, + ) + duts = duthosts.frontend_nodes + for dut in duts: + assert wait_until( + 100, 2, 0, check_orch_cpu_utilization, dut, orch_cpu_threshold + ), "Orch CPU utilization {} > orch cpu threshold {} before starting the test".format( + dut.shell("show processes cpu | grep orchagent | awk '{print $9}'")[ + "stdout" + ], + orch_cpu_threshold, + ) + + yield + orch_cpu_threshold = 10 + # Orchagent CPU should consume < orch_cpu_threshold at last. + logger.info( + "watch orchagent CPU utilization when it goes below %d", orch_cpu_threshold + ) + for dut in duts: + assert wait_until( + 45, 2, 0, check_orch_cpu_utilization, dut, orch_cpu_threshold + ), "Orch CPU utilization {} > orch cpu threshold {} before starting the test".format( + dut.shell("show processes cpu | grep orchagent | awk '{print $9}'")[ + "stdout" + ], + orch_cpu_threshold, + ) + + logger.info("Verifying swss container status on RP") + rp = duthosts[enum_supervisor_dut_hostname] + container_status = True + if hasattr(request.config, "rp_asic_ids"): + for id in request.config.rp_asic_ids: + docker_output = rp.shell( + "docker ps | grep swss{} | awk '{{print $NF}}'".format(id) + )["stdout"] + if len(docker_output) == 0: + container_status = False + if not container_status: + config_reload(rp) + + logger.info( + "Clearing BFD configs on {}, {}".format( + request.config.src_dut, request.config.dst_dut + ) + ) + command = ( + "sonic-db-cli -n asic{} CONFIG_DB HSET \"STATIC_ROUTE|{}\" bfd 'false'".format( + request.config.src_asic.asic_index, request.config.src_prefix + ).replace("\\", "") + ) + request.config.src_dut.shell(command) + command = ( + "sonic-db-cli -n asic{} CONFIG_DB HSET \"STATIC_ROUTE|{}\" bfd 'false'".format( + request.config.dst_asic.asic_index, request.config.dst_prefix + ).replace("\\", "") + ) + request.config.dst_dut.shell(command) + + logger.info("Bringing up portchannels or respective members") + if hasattr(request.config, "portchannels_on_dut"): + portchannels_on_dut = request.config.portchannels_on_dut + selected_interfaces = request.config.selected_portchannels + elif hasattr(request.config, "selected_portchannel_members"): + portchannels_on_dut = request.config.portchannels_on_dut + selected_interfaces = request.config.selected_portchannel_members + else: + logger.info( + "None of the portchannels are selected to flap. So skipping portchannel interface check" + ) + selected_interfaces = [] + + if selected_interfaces: + dut = ( + request.config.src_dut + if portchannels_on_dut == "src" + else request.config.dst_dut + ) + asic = ( + request.config.src_asic + if portchannels_on_dut == "src" + else request.config.dst_asic + ) + for interface in selected_interfaces: + bfd_base_instance.interface_cleanup(dut, asic, interface) diff --git a/tests/bfd/test_bfd_static_route.py b/tests/bfd/test_bfd_static_route.py new file mode 100644 index 00000000000..f0232a074e9 --- /dev/null +++ b/tests/bfd/test_bfd_static_route.py @@ -0,0 +1,3361 @@ +import pytest +from bfd_base import BfdBase +import logging +import time +from tests.common.utilities import wait_until +from tests.common.config_reload import config_reload +from tests.common.platform.processes_utils import wait_critical_processes +from tests.common.reboot import reboot + +pytestmark = [pytest.mark.topology("t2")] + +logger = logging.getLogger(__name__) + + +class TestBfdStaticRoute(BfdBase): + test_case_status = True + total_iterations = 100 + + @pytest.fixture(autouse=True, scope="class") + def modify_bfd_sessions(self, duthosts, bfd_base_instance): + """ + 1. Gather all front end nodes + 2. Modify BFD state to required state & issue config reload. + 3. Wait for Critical processes + 4. Gather all ASICs for each dut + 5. Calls find_bfd_peers_with_given_state using wait_until + a. Runs ip netns exec asic{} show bfd sum + b. If expected state is "Total number of BFD sessions: 0" and it is in result, output is True + c. If expected state is "Up" and no. of down peers is 0, output is True + d. If expected state is "Down" and no. of up peers is 0, output is True + """ + try: + duts = duthosts.frontend_nodes + for dut in duts: + bfd_base_instance.modify_all_bfd_sessions(dut, "false") + for dut in duts: + # config reload + # config_reload(dut) + wait_critical_processes(dut) + # Verification that all BFD sessions are deleted + for dut in duts: + asics = [ + asic.split("asic")[1] for asic in dut.get_asic_namespace_list() + ] + for asic in asics: + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.find_bfd_peers_with_given_state( + dut, asic, "No BFD sessions found" + ), + ) + + yield + + finally: + duts = duthosts.frontend_nodes + for dut in duts: + bfd_base_instance.modify_all_bfd_sessions(dut, "true") + for dut in duts: + config_reload(dut) + wait_critical_processes(dut) + # Verification that all BFD sessions are added + for dut in duts: + asics = [ + asic.split("asic")[1] for asic in dut.get_asic_namespace_list() + ] + for asic in asics: + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.find_bfd_peers_with_given_state( + dut, asic, "Up" + ), + ) + + def test_bfd_with_lc_reboot_ipv4( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Perform a cold reboot on source dut + reboot(src_dut, localhost) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Config reload of Source dut + reboot(src_dut, localhost) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_static_route_deletion_ipv4( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + + To verify deletion of BFD session between two line cards. + Test Steps: + 1. Delete BFD on Source dut + 2. Verify that on Source dut BFD gets cleaned up and static route exists. + 3. Verify that on Destination dut BFD goes down and static route will be removed. + 4. Delete BFD on Destination dut. + 5. Verify that on Destination dut BFD gets cleaned up and static route will be added back. + """ + version = "ipv4" + + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + logger.info("BFD deletion on destination dut") + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + assert self.test_case_status, "BFD deletion did not influence static routes" + logger.info("test_bfd_static_route_deletion completed") + + def test_bfd_with_lc_reboot_ipv6( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Perform a cold reboot on source dut + reboot(src_dut, localhost) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Config reload of Source dut + reboot(src_dut, localhost) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_static_route_deletion_ipv6( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + logger.info("BFD deletion on destination dut") + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + assert self.test_case_status, "BFD deletion did not influence static routes" + logger.info("test_bfd_static_route_deletion completed") + + def verify_static_route( + self, + request, + asic, + prefix, + dut, + dut_nexthops, + expected_prefix_state, + bfd_base_instance, + version, + ): + # Verification of static route + if version == "ipv4": + command = "show ip route static" + elif version == "ipv6": + command = "show ipv6 route static" + static_route_output = ( + dut.shell(command, module_ignore_errors=True)["stdout"] + .encode("utf-8") + .strip() + .split("\n") + ) + asic_routes = bfd_base_instance.extract_routes(static_route_output, version) + logger.info("Here are asic routes, {}".format(asic_routes)) + + if expected_prefix_state == "Route Removal": + if len(asic_routes) == 0 and request.config.interface_shutdown: + logger.info("asic routes are empty post interface shutdown") + else: + assert len(asic_routes) > 0, "static routes on source dut are empty" + assert ( + prefix + not in asic_routes.get("asic{}".format(asic.asic_index), {}).keys() + ), "Prefix removal is not successful. Prefix being validated: {}.".format( + prefix + ) + elif expected_prefix_state == "Route Addition": + assert ( + prefix in asic_routes.get("asic{}".format(asic.asic_index), {}).keys() + ), "Prefix has not been added even though BFD is expected. Prefix: {}".format( + prefix + ) + + def select_src_dst_dut_with_asic( + self, request, get_src_dst_asic_and_duts, bfd_base_instance, version + ): + logger.debug("Selecting source and destination DUTs with ASICs...") + # Random selection of dut & asic. + src_asic = get_src_dst_asic_and_duts["src_asic"] + dst_asic = get_src_dst_asic_and_duts["dst_asic"] + src_dut = get_src_dst_asic_and_duts["src_dut"] + dst_dut = get_src_dst_asic_and_duts["dst_dut"] + + logger.info("Source Asic: %s", src_asic) + logger.info("Destination Asic: %s", dst_asic) + logger.info("Source dut: %s", src_dut) + logger.info("Destination dut: %s", dst_dut) + + request.config.src_asic = src_asic + request.config.dst_asic = dst_asic + request.config.src_dut = src_dut + request.config.dst_dut = dst_dut + + # Extracting static routes + if version == "ipv4": + static_route_command = "show ip route static" + elif version == "ipv6": + static_route_command = "show ipv6 route static" + src_dut_static_route_output = ( + src_dut.shell(static_route_command, module_ignore_errors=True)["stdout"] + .encode("utf-8") + .strip() + .split("\n") + ) + src_asic_routes = bfd_base_instance.extract_routes( + src_dut_static_route_output, version + ) + logger.info("Source asic routes, {}".format(src_asic_routes)) + assert len(src_asic_routes) > 0, "static routes on source dut are empty" + + dst_dut_static_route_output = ( + dst_dut.shell(static_route_command, module_ignore_errors=True)["stdout"] + .encode("utf-8") + .strip() + .split("\n") + ) + dst_asic_routes = bfd_base_instance.extract_routes( + dst_dut_static_route_output, version + ) + logger.info("Destination asic routes, {}".format(dst_asic_routes)) + assert len(dst_asic_routes) > 0, "static routes on destination dut are empty" + + # Extracting nexthops + dst_dut_nexthops = ( + bfd_base_instance.extract_ip_addresses_for_backend_portchannels( + src_dut, src_asic, version + ) + ) + logger.info("Destination nexthops, {}".format(dst_dut_nexthops)) + assert len(dst_dut_nexthops) != 0, "Destination Nexthops are empty" + + src_dut_nexthops = ( + bfd_base_instance.extract_ip_addresses_for_backend_portchannels( + dst_dut, dst_asic, version + ) + ) + logger.info("Source nexthops, {}".format(src_dut_nexthops)) + assert len(src_dut_nexthops) != 0, "Source Nexthops are empty" + + # Picking a static route to delete correspinding BFD session + src_prefix = bfd_base_instance.selecting_route_to_delete( + src_asic_routes, src_dut_nexthops.values() + ) + logger.info("Source prefix: %s", src_prefix) + request.config.src_prefix = src_prefix + assert src_prefix is not None and src_prefix != "", "Source prefix not found" + + dst_prefix = bfd_base_instance.selecting_route_to_delete( + dst_asic_routes, dst_dut_nexthops.values() + ) + logger.info("Destination prefix: %s", dst_prefix) + request.config.dst_prefix = dst_prefix + assert ( + dst_prefix is not None and dst_prefix != "" + ), "Destination prefix not found" + + return ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) + + def test_bfd_flap_ipv4( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + + To flap the BFD session ( Up <--> Down <---> Up) between linecards for 100 times. + Test Steps: + 1. Delete BFD on Source dut + 2. Verify that on Source dut BFD gets cleaned up and static route exists. + 3. Verify that on Destination dut BFD goes down and static route will be removed. + 4. Add BFD on Source dut. + 5. Verify that on Source dut BFD is up + 6. Verify that on destination dut BFD is up and static route is added back. + 7. Repeat above steps 100 times. + """ + version = "ipv4" + + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + successful_iterations = 0 # Counter for successful iterations + + for i in range(self.total_iterations): + logger.info("Iteration {}".format(i)) + + logger.info("BFD deletion on source dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("Waiting for 5s post BFD shutdown") + time.sleep(5) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, + src_dut_nexthops.values(), + src_asic, + "No BFD sessions found", + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + # Check if both iterations were successful and increment the counter + if self.test_case_status: + successful_iterations += 1 + + # Determine the success rate + logger.info("successful_iterations: %d", successful_iterations) + success_rate = (successful_iterations / self.total_iterations) * 100 + + logger.info("Current success rate: %.2f%%", success_rate) + # Check if the success rate is above the threshold (e.g., 98%) + assert ( + success_rate >= 98 + ), "BFD flap verification success rate is below 98% ({}%)".format(success_rate) + + logger.info("test_bfd_flap completed") + + def test_bfd_flap_ipv6( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + + To flap the BFD session ( Up <--> Down <---> Up) between linecards for 100 times. + Test Steps: + 1. Delete BFD on Source dut + 2. Verify that on Source dut BFD gets cleaned up and static route exists. + 3. Verify that on Destination dut BFD goes down and static route will be removed. + 4. Add BFD on Source dut. + 5. Verify that on Source dut BFD is up + 6. Verify that on destination dut BFD is up and static route is added back. + 7. Repeat above steps 100 times. + """ + version = "ipv6" + + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + successful_iterations = 0 # Counter for successful iterations + + for i in range(self.total_iterations): + logger.info("Iteration {}".format(i)) + + logger.info("BFD deletion on source dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("Waiting for 5s post BFD shutdown") + time.sleep(5) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, + src_dut_nexthops.values(), + src_asic, + "No BFD sessions found", + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD & Static route verifications") + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + # Check if both iterations were successful and increment the counter + if self.test_case_status: + successful_iterations += 1 + + # Determine the success rate + logger.info("successful_iterations: %d", successful_iterations) + success_rate = (successful_iterations / self.total_iterations) * 100 + + logger.info("Current success rate: %.2f%%", success_rate) + # Check if the success rate is above the threshold (e.g., 98%) + assert ( + success_rate >= 98 + ), "BFD flap verification success rate is below 98% ({}%)".format(success_rate) + + logger.info("test_bfd_flap completed") + + def control_interface_state(self, dut, asic, bfd_base_instance, interface, action): + int_status = dut.show_interface( + command="status", include_internal_intfs=True, asic_index=asic.asic_index + )["ansible_facts"]["int_status"][interface] + oper_state = int_status["oper_state"] + if action == "shutdown": + target_state = "down" + elif action == "startup": + target_state = "up" + + if oper_state != target_state: + command = "shutdown" if action == "shutdown" else "startup" + exec_cmd = ( + "sudo ip netns exec asic{} config interface -n asic{} {} {}".format( + asic.asic_index, asic.asic_index, command, interface + ) + ) + logger.info("Command: {}".format(exec_cmd)) + logger.info("Target state: {}".format(target_state)) + dut.shell(exec_cmd) + assert wait_until( + 180, + 10, + 0, + lambda: dut.show_interface( + command="status", + include_internal_intfs=True, + asic_index=asic.asic_index, + )["ansible_facts"]["int_status"][interface]["oper_state"] + == target_state, + ) + else: + raise ValueError("Invalid action specified") + + def test_bfd_with_rp_reboot_ipv4( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + enum_supervisor_dut_hostname, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + rp = duthosts[enum_supervisor_dut_hostname] + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Perform a cold reboot on source dut + reboot(rp, localhost) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Config reload of Source dut + reboot(rp, localhost) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_remote_link_flap_ipv4( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + request.config.interface_shutdown = True + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Extract portchannel interfaces on dst + list_of_portchannels_on_dst = src_dut_nexthops.keys() + request.config.portchannels_on_dut = "dst" + request.config.selected_portchannels = list_of_portchannels_on_dst + + # Shutdown PortChannels on destination dut + for interface in list_of_portchannels_on_dst: + action = "shutdown" + self.control_interface_state( + dst_dut, dst_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state on src dut + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Verify that corresponding static route has been removed on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + + for interface in list_of_portchannels_on_dst: + action = "startup" + self.control_interface_state( + dst_dut, dst_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Verify that corresponding static route has been added on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + def test_bfd_with_rp_reboot_ipv6( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + enum_supervisor_dut_hostname, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + rp = duthosts[enum_supervisor_dut_hostname] + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Perform a cold reboot on source dut + reboot(rp, localhost) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Config reload of Source dut + reboot(rp, localhost) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_remote_link_flap_ipv6( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + request.config.interface_shutdown = True + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Extract portchannel interfaces on dst + list_of_portchannels_on_dst = src_dut_nexthops.keys() + request.config.portchannels_on_dut = "dst" + request.config.selected_portchannels = list_of_portchannels_on_dst + + # Shutdown PortChannels on destination dut + for interface in list_of_portchannels_on_dst: + action = "shutdown" + self.control_interface_state( + dst_dut, dst_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state on src dut + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Verify that corresponding static route has been removed on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + + for interface in list_of_portchannels_on_dst: + action = "startup" + self.control_interface_state( + dst_dut, dst_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Verify that corresponding static route has been added on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + def test_bfd_lc_asic_shutdown_ipv4( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + request.config.interface_shutdown = True + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Extract portchannel interfaces on src + list_of_portchannels_on_src = dst_dut_nexthops.keys() + request.config.portchannels_on_dut = "src" + request.config.selected_portchannels = list_of_portchannels_on_src + + # Shutdown PortChannels + for interface in list_of_portchannels_on_src: + action = "shutdown" + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Verify that corresponding static route has been removed on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + + for interface in list_of_portchannels_on_src: + action = "startup" + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Verify that corresponding static route has been added on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + def test_bfd_lc_asic_shutdown_ipv6( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + request.config.interface_shutdown = True + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Extract portchannel interfaces on src + list_of_portchannels_on_src = dst_dut_nexthops.keys() + request.config.portchannels_on_dut = "src" + request.config.selected_portchannels = list_of_portchannels_on_src + + # Shutdown PortChannels + for interface in list_of_portchannels_on_src: + action = "shutdown" + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Verify that corresponding static route has been removed on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + + for interface in list_of_portchannels_on_src: + action = "startup" + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, interface, action + ) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Verify that corresponding static route has been added on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + def test_bfd_portchannel_member_flap_ipv4( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + request.config.interface_shutdown = True + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Extract portchannel interfaces on src + list_of_portchannels_on_src = dst_dut_nexthops.keys() + request.config.portchannels_on_dut = "src" + request.config.selected_portchannels = list_of_portchannels_on_src + + # Shutdown PortChannel members + for portchannel_interface in list_of_portchannels_on_src: + action = "shutdown" + list_of_portchannel_members_on_src = ( + bfd_base_instance.extract_backend_portchannels(src_dut)[ + portchannel_interface + ]["members"] + ) + request.config.selected_portchannel_members = ( + list_of_portchannel_members_on_src + ) + for each_member in list_of_portchannel_members_on_src: + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, each_member, action + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Verify that corresponding static route has been removed on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + + # Bring up of PortChannel members + for portchannel_interface in list_of_portchannels_on_src: + action = "startup" + list_of_portchannel_members_on_src = ( + bfd_base_instance.extract_backend_portchannels(src_dut)[ + portchannel_interface + ]["members"] + ) + for each_member in list_of_portchannel_members_on_src: + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, each_member, action + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Verify that corresponding static route has been added on both duts + logger.info("Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + def test_bfd_config_reload_ipv4( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Config reload of Source dut + config_reload(src_dut) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Config reload of Source dut + config_reload(src_dut) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_portchannel_member_flap_ipv6( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + request.config.interface_shutdown = True + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Extract portchannel interfaces on src + list_of_portchannels_on_src = dst_dut_nexthops.keys() + request.config.portchannels_on_dut = "src" + request.config.selected_portchannels = list_of_portchannels_on_src + + # Shutdown PortChannel members + for portchannel_interface in list_of_portchannels_on_src: + action = "shutdown" + list_of_portchannel_members_on_src = ( + bfd_base_instance.extract_backend_portchannels(src_dut)[ + portchannel_interface + ]["members"] + ) + request.config.selected_portchannel_members = ( + list_of_portchannel_members_on_src + ) + for each_member in list_of_portchannel_members_on_src: + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, each_member, action + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Verify that corresponding static route has been removed on both duts + logger.info("BFD & Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Removal", + bfd_base_instance, + version, + ) + + # Bring up of PortChannel members + for portchannel_interface in list_of_portchannels_on_src: + action = "startup" + list_of_portchannel_members_on_src = ( + bfd_base_instance.extract_backend_portchannels(src_dut)[ + portchannel_interface + ]["members"] + ) + for each_member in list_of_portchannel_members_on_src: + self.control_interface_state( + src_dut, src_asic, bfd_base_instance, each_member, action + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Verify that corresponding static route has been added on both duts + logger.info("Static route verifications") + self.verify_static_route( + request, + dst_asic, + dst_prefix, + dst_dut, + dst_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + self.verify_static_route( + request, + src_asic, + src_prefix, + src_dut, + src_dut_nexthops, + "Route Addition", + bfd_base_instance, + version, + ) + + def test_bfd_with_rp_config_reload_ipv4( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + enum_supervisor_dut_hostname, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + rp = duthosts[enum_supervisor_dut_hostname] + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Perform a cold reboot on source dut + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Config reload of Source dut + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_with_rp_config_reload_ipv6( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + enum_supervisor_dut_hostname, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + rp = duthosts[enum_supervisor_dut_hostname] + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Perform a cold reboot on source dut + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Config reload of Source dut + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_with_bad_fc_asic_ipv4( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + enum_supervisor_dut_hostname, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv4" + + rp = duthosts[enum_supervisor_dut_hostname] + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Extract asic ids + docker_output = rp.shell("docker ps | grep swss | awk '{print $NF}'")[ + "stdout" + ].split("\n") + asic_ids = [int(element.split("swss")[1]) for element in docker_output] + + # Shut down corresponding asic on supervisor to simulate bad asic + for id in asic_ids: + rp.shell("systemctl stop swss@{}".format(id)) + + # Verify that BFD sessions are down + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Config reload RP to bring up the swss containers + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Config reload RP + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_config_reload_ipv6( + self, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Config reload of Source dut + config_reload(src_dut) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source & destination dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + + # Config reload of Source dut + config_reload(src_dut) + + # Waiting for all processes on Source dut + wait_critical_processes(src_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) + + def test_bfd_with_bad_fc_asic_ipv6( + self, + localhost, + duthost, + request, + duthosts, + tbinfo, + get_src_dst_asic_and_duts, + bfd_base_instance, + enum_supervisor_dut_hostname, + bfd_cleanup_db, + ): + """ + Author: Harsha Golla + Email : harsgoll@cisco.com + """ + + version = "ipv6" + + rp = duthosts[enum_supervisor_dut_hostname] + + # Selecting source, destination dut & prefix & BFD status verification for all nexthops + logger.info( + "Selecting Source dut, destination dut, source asic, destination asic, source prefix, destination prefix" + ) + ( + src_asic, + dst_asic, + src_dut, + dst_dut, + src_dut_nexthops, + dst_dut_nexthops, + src_prefix, + dst_prefix, + ) = self.select_src_dst_dut_with_asic( + request, get_src_dst_asic_and_duts, bfd_base_instance, version + ) + + # Creation of BFD + logger.info("BFD addition on source dut") + bfd_base_instance.add_bfd(src_asic.asic_index, src_prefix, src_dut) + + logger.info("BFD addition on destination dut") + bfd_base_instance.add_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Verification of BFD session state. + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Extract asic ids + docker_output = rp.shell("docker ps | grep swss | awk '{print $NF}'")[ + "stdout" + ].split("\n") + asic_ids = [int(element.split("swss")[1]) for element in docker_output] + + # Shut down corresponding asic on supervisor to simulate bad asic + for id in asic_ids: + rp.shell("systemctl stop swss@{}".format(id)) + + # Verify that BFD sessions are down + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Down" + ), + ) + assert wait_until( + 180, + 10, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Down" + ), + ) + + # Config reload RP to bring up the swss containers + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "Up" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "Up" + ), + ) + + logger.info("BFD deletion on source dut") + bfd_base_instance.delete_bfd(src_asic.asic_index, src_prefix, src_dut) + bfd_base_instance.delete_bfd(dst_asic.asic_index, dst_prefix, dst_dut) + + # Savings the configs + src_dut.shell("sudo config save -y") + dst_dut.shell("sudo config save -y") + + # Config reload RP + config_reload(rp) + + # Waiting for all processes on Source & destination dut + wait_critical_processes(src_dut) + wait_critical_processes(dst_dut) + + check_bgp = request.getfixturevalue("check_bgp") + results = check_bgp() + failed = [ + result for result in results if "failed" in result and result["failed"] + ] + if failed: + pytest.fail( + "BGP check failed, not all BGP sessions are up. Failed: {}".format( + failed + ) + ) + + # Verification of BFD session state. + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + dst_dut, dst_dut_nexthops.values(), dst_asic, "No BFD sessions found" + ), + ) + assert wait_until( + 300, + 20, + 0, + lambda: bfd_base_instance.verify_bfd_state( + src_dut, src_dut_nexthops.values(), src_asic, "No BFD sessions found" + ), + ) diff --git a/tests/bgp/bgp_helpers.py b/tests/bgp/bgp_helpers.py index 4b6795a1d01..8a583d730bb 100644 --- a/tests/bgp/bgp_helpers.py +++ b/tests/bgp/bgp_helpers.py @@ -556,33 +556,41 @@ def get_eth_port(duthost, tbinfo): return port -def get_vm_offset(duthost, nbrhosts, tbinfo): +def get_vm_offset(duthost, nbrhosts, tbinfo, is_random=True): """ - Get port offset of exabgp and ptf receive port + Get ports offset of exabgp and ptf receive port """ + port_offset_ptf_recv_port_list = [] + vm_name_list = [vm_name for vm_name in nbrhosts.keys() if vm_name.endswith('T0')] logging.info("get_vm_offset ---------") - vm_name = random.choice([vm_name for vm_name in nbrhosts.keys() if vm_name.endswith('T0')]) - ptf_recv_port = get_ptf_recv_port(duthost, vm_name, tbinfo) - port_offset = tbinfo['topo']['properties']['topology']['VMs'][vm_name]['vm_offset'] - logging.info("vm_offset of {} is: {}".format(vm_name, port_offset)) - return port_offset, ptf_recv_port + if is_random: + vm_name_list = [random.choice(vm_name_list)] + for vm_name in vm_name_list: + port_offset = tbinfo['topo']['properties']['topology']['VMs'][vm_name]['vm_offset'] + ptf_recv_port = get_ptf_recv_port(duthost, vm_name, tbinfo) + logging.info("vm_offset of {} is: {}".format(vm_name, port_offset)) + port_offset_ptf_recv_port_list.append((port_offset, ptf_recv_port)) + return port_offset_ptf_recv_port_list -def get_exabgp_port(duthost, nbrhosts, tbinfo, exabgp_base_port): +def get_exabgp_port(duthost, nbrhosts, tbinfo, exabgp_base_port, is_random=True): """ Get exabgp port and ptf receive port """ - port_offset, ptf_recv_port = get_vm_offset(duthost, nbrhosts, tbinfo) - return exabgp_base_port + port_offset, ptf_recv_port + port_offset_ptf_recv_port_list = get_vm_offset(duthost, nbrhosts, tbinfo, is_random) + port_offset_list, ptf_recv_port_list = zip(*port_offset_ptf_recv_port_list) + return [_ + exabgp_base_port for _ in port_offset_list], ptf_recv_port_list -def get_vm_name(tbinfo, vm_level='T2'): +def get_vm_name_list(tbinfo, vm_level='T2'): """ Get vm name, default return value would be T2 VM name """ + vm_name_list = [] for vm in tbinfo['topo']['properties']['topology']['VMs'].keys(): if vm[-2:] == vm_level: - return vm + vm_name_list.append(vm) + return vm_name_list def get_t2_ptf_intfs(mg_facts): @@ -600,20 +608,32 @@ def get_t2_ptf_intfs(mg_facts): return ptf_interfaces +def get_eth_name_from_ptf_port(mg_facts, ptf_ports): + """ + Get eth name from ptf port + """ + eth_name_list = [] + for k, v in mg_facts["minigraph_ptf_indices"].items(): + for port in ptf_ports: + if v == port: + eth_name_list.append(k) + return eth_name_list + + def get_bgp_neighbor_ip(duthost, vm_name, vrf=DEFAULT): """ Get ipv4 and ipv6 bgp neighbor ip addresses """ if vrf == DEFAULT: - cmd_v4 = "show ip interface | grep -w {} | awk '{{print $5}}'" - cmd_v6 = "show ipv6 interface | grep -w {} | awk '{{print $5}}'" - bgp_neighbor_ip = duthost.shell(cmd_v4.format(vm_name))['stdout'] - bgp_neighbor_ipv6 = duthost.shell(cmd_v6.format(vm_name))['stdout'] + cmd_v4 = "show ip interface | grep -w {} | awk '{{print $2}}'" + cmd_v6 = "show ipv6 interface | grep -w {} | awk '{{print $2}}'" + bgp_neighbor_ip = duthost.shell(cmd_v4.format(vm_name))['stdout'].split('/')[0] + bgp_neighbor_ipv6 = duthost.shell(cmd_v6.format(vm_name))['stdout'].split('/')[0] else: - cmd_v4 = "show ip interface | grep -w {} | awk '{{print $7}}' | sed 's/)//g'" - cmd_v6 = "show ipv6 interface | grep -w {} | awk '{{print $7}}' | sed 's/)//g'" - bgp_neighbor_ip = duthost.shell(cmd_v4.format(vm_name))['stdout'][1:-1] - bgp_neighbor_ipv6 = duthost.shell(cmd_v6.format(vm_name))['stdout'][1:-1] + cmd_v4 = "show ip interface | grep -w {} | awk '{{print $3}}'" + cmd_v6 = "show ipv6 interface | grep -w {} | awk '{{print $3}}'" + bgp_neighbor_ip = duthost.shell(cmd_v4.format(vm_name))['stdout'].split('/')[0] + bgp_neighbor_ipv6 = duthost.shell(cmd_v6.format(vm_name))['stdout'].split('/')[0] logging.info("BGP neighbor of {} is {}".format(vm_name, bgp_neighbor_ip)) logging.info("IPv6 BGP neighbor of {} is {}".format(vm_name, bgp_neighbor_ipv6)) @@ -642,6 +662,8 @@ def check_route_status(duthost, route, check_field, vrf=DEFAULT, ip_ver=IP_VER, Get 'offloaded' or 'queu' value of specific route """ out = get_vrf_route_json(duthost, route, vrf, ip_ver) + if out == '{}': + return False check_field_status = out[route][0].get(check_field, None) if check_field_status: logging.info("Route:{} - {} status:{} - expect status:{}" @@ -672,55 +694,68 @@ def check_route_install_status(duthost, route, vrf=DEFAULT, ip_ver=IP_VER, check "Vrf:{} - route:{} is installed into FIB".format(vrf, route)) -def check_propagate_route(duthost, route, bgp_neighbor, vrf=DEFAULT, ip_ver=IP_VER, action=ACTION_IN): +def check_propagate_route(vmhost, route_list, bgp_neighbor, ip_ver=IP_VER, action=ACTION_IN): """ Check whether ipv4 or ipv6 route is advertised to T2 VM """ if ip_ver == IP_VER: - logging.info('Execute command - vtysh -c "show ip bgp vrf {} neighbors {} advertised-routes"' - .format(vrf, bgp_neighbor)) - out = duthost.shell('vtysh -c "show ip bgp vrf {} neighbors {} advertised-routes"'.format(vrf, bgp_neighbor), - verbose=False)['stdout'] + logging.info('Execute EOS command - "show ip bgp neighbors {} routes"'.format(bgp_neighbor)) + out = vmhost['host'].eos_command(commands=['show ip bgp neighbors {} routes'.format(bgp_neighbor)])['stdout'][0] else: - logging.info('Execute command - vtysh -c "show ip bgp vrf {} ipv6 neighbors {} advertised-routes"' - .format(vrf, bgp_neighbor)) - out = duthost.shell('vtysh -c "show ip bgp vrf {} ipv6 neighbors {} advertised-routes"'. - format(vrf, bgp_neighbor), verbose=False)['stdout'] + logging.info('Execute EOS command - "show ipv6 bgp peers {} routes"'.format(bgp_neighbor)) + out = vmhost['host'].eos_command(commands=['show ipv6 bgp peers {} routes'.format(bgp_neighbor)])['stdout'][0] logging.debug('Command output:\n {}'.format(out)) if action == ACTION_IN: - if route in out: - logging.info("Route:{} found - action:{}".format(route, action)) - return True - else: - logging.info("Route:{} not found - action:{}".format(route, action)) - return False + for route in route_list: + if route in out: + logging.debug("Route:{} found - action:{}".format(route, action)) + else: + logging.info("Route:{} not found - action:{}".format(route, action)) + return False else: - if route in out: - logging.info("Route:{} found - action:{}".format(route, action)) - return False - else: - logging.info("Route:{} not found - action:{}".format(route, action)) - return True + for route in route_list: + if route in out: + logging.info("Route:{} found - action:{}".format(route, action)) + return False + else: + logging.debug("Route:{} not found - action:{}".format(route, action)) + return True -def validate_route_propagate_status(duthost, route, bgp_neighbor, vrf=DEFAULT, ip_ver=IP_VER, exist=True): +def validate_route_propagate_status(vmhost, route_list, bgp_neighbor, vrf=DEFAULT, ip_ver=IP_VER, exist=True): """ Verify ipv4 or ipv6 route propagate status - :param duthost: duthost fixture - :param route: ipv4 or ipv6 route + :param vmhost: vm host object + :param route_list: ipv4 or ipv6 route list :param bgp_neighbor: ipv4 or ipv6 bgp neighbor address :param vrf: vrf name :param ip_ver: ip version number :param exist: route expected status """ if exist: - pytest_assert(wait_until(30, 2, 0, check_propagate_route, duthost, route, bgp_neighbor, vrf, ip_ver), - "Vrf:{} - route:{} is not propagated to {}".format(vrf, route, bgp_neighbor)) + pytest_assert(wait_until(30, 2, 0, check_propagate_route, vmhost, route_list, bgp_neighbor, ip_ver), + "Vrf:{} - route:{} is not propagated to T2 VM {}".format(vrf, route_list, vmhost)) else: - pytest_assert(wait_until(30, 2, 0, check_propagate_route, duthost, route, bgp_neighbor, vrf, ip_ver, - ACTION_NOT_IN), - "Vrf:{} - route:{} is propagated to {}".format(vrf, route, bgp_neighbor)) + pytest_assert( + wait_until(30, 2, 0, check_propagate_route, vmhost, route_list, bgp_neighbor, ip_ver, ACTION_NOT_IN), + "Vrf:{} - route:{} is propagated to T2 VM {}".format(vrf, route_list, vmhost)) + + +def check_fib_route(duthost, route_list, ip_ver=IP_VER): + """ + Verify ipv4 or ipv6 routes are installed into fib + """ + fib_type = 'ip' if ip_ver == IP_VER else 'ipv6' + logging.info(f"Execute command - show {fib_type} fib") + out = duthost.shell(f"show {fib_type} fib") + for route in route_list: + if route in out['stdout']: + logging.debug(f"Route:{route} installed into fib") + else: + logging.info(f"Route:{route} not found in fib") + assert False + logging.info(f"{route_list} are installed into fib successfully") def operate_orchagent(duthost, action=ACTION_STOP): @@ -734,3 +769,15 @@ def operate_orchagent(duthost, action=ACTION_STOP): logging.info('Recover orchagent process') cmd = 'sudo kill -SIGCONT $(pidof orchagent)' duthost.shell(cmd) + + +def check_bgp_neighbor(duthost): + """ + Validate all the bgp neighbors are established + """ + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {}) + pytest_assert( + wait_until(300, 10, 0, duthost.check_bgp_session_state, bgp_neighbors), + "bgp sessions {} are not up".format(bgp_neighbors) + ) diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py index bf3bf6ed239..51ca4126367 100644 --- a/tests/bgp/conftest.py +++ b/tests/bgp/conftest.py @@ -638,6 +638,14 @@ def pytest_addoption(parser): default="reload", help="reboot type such as reload, fast, warm, cold, random" ) + parser.addoption( + "--continuous_boot_times", + action="store", + dest="continuous_boot_times", + type=int, + default=3, + help="continuous reboot time number. default is 3" + ) @pytest.fixture(scope="module", autouse=True) diff --git a/tests/bgp/test_bgp_dual_asn.py b/tests/bgp/test_bgp_dual_asn.py index 1fc60b5885b..b1e079173f3 100644 --- a/tests/bgp/test_bgp_dual_asn.py +++ b/tests/bgp/test_bgp_dual_asn.py @@ -282,7 +282,7 @@ def dual_asn_teardown(self, duthosts, rand_one_dut_hostname, ptfhost): logger.info("exabgp stopped") for port in self.ptf_ports: - ptfhost.shell("ip addr flush dev %s" % port) + ptfhost.shell("ip addr flush dev {} scope global".format(port)) duthost.command("sonic-clear arp") duthost.command("sonic-clear ndp") duthost.command("sonic-clear fdb all") diff --git a/tests/bgp/test_bgp_fact.py b/tests/bgp/test_bgp_fact.py index df83d08b42f..f2678ce4efa 100644 --- a/tests/bgp/test_bgp_fact.py +++ b/tests/bgp/test_bgp_fact.py @@ -6,7 +6,7 @@ ] -def test_bgp_facts(duthosts, enum_frontend_dut_hostname, enum_asic_index): +def run_bgp_facts(duthosts, enum_frontend_dut_hostname, enum_asic_index): """compare the bgp facts between observed states and target state""" duthost = duthosts[enum_frontend_dut_hostname] @@ -40,3 +40,7 @@ def test_bgp_facts(duthosts, enum_frontend_dut_hostname, enum_asic_index): assert v['name'] == bgp_facts['bgp_neighbors'][k]['description'] # Compare the bgp neighbors ASN with config db assert int(v['asn'].encode().decode("utf-8")) == bgp_facts['bgp_neighbors'][k]['remote AS'] + + +def test_bgp_facts(duthosts, enum_frontend_dut_hostname, enum_asic_index): + run_bgp_facts(duthosts, enum_frontend_dut_hostname, enum_asic_index) diff --git a/tests/bgp/test_bgp_suppress_fib.py b/tests/bgp/test_bgp_suppress_fib.py index ca8fa1fbd64..63365e49dd1 100644 --- a/tests/bgp/test_bgp_suppress_fib.py +++ b/tests/bgp/test_bgp_suppress_fib.py @@ -5,19 +5,25 @@ import ipaddress import pytest import allure +import time + +from scapy.all import sniff, IP, IPv6 +from scapy.contrib import bgp import ptf.testutils as testutils import ptf.packet as scapy from copy import deepcopy - +from retry.api import retry_call from ptf.mask import Mask from natsort import natsorted from tests.common.reboot import reboot from tests.common.utilities import wait_until from tests.common.config_reload import config_reload from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.tcpdump_sniff_helper import TcpdumpSniffHelper from tests.common.platform.interface_utils import check_interface_status_of_up_ports -from bgp_helpers import restart_bgp_session, get_eth_port, get_exabgp_port, get_vm_name, get_bgp_neighbor_ip, \ - check_route_install_status, validate_route_propagate_status, operate_orchagent, get_t2_ptf_intfs +from bgp_helpers import restart_bgp_session, get_eth_port, get_exabgp_port, get_vm_name_list, get_bgp_neighbor_ip, \ + check_route_install_status, validate_route_propagate_status, operate_orchagent, get_t2_ptf_intfs, \ + get_eth_name_from_ptf_port, check_bgp_neighbor, check_fib_route pytestmark = [ pytest.mark.topology("t1"), @@ -47,35 +53,50 @@ USER_DEFINED_VRF = "Vrf1" DEFAULT = "default" VRF_TYPES = [DEFAULT, USER_DEFINED_VRF] - +BGP_FILTER = 'tcp port 179' STATIC_ROUTE_PREFIX = "1.1.1.0/24" -# ipv4 route injection from T0 -IP_ROUTE_LIST = [ - '91.0.1.0/24', - '91.0.2.0/24' -] +BASE_IP_ROUTE = '91.0.1.0/24' +BASE_IPV6_ROUTE = '1000:1001::/64' +BULK_ROUTE_COUNT = 512 # 512 ipv4 route and 512 ipv6 route +FUNCTION = "function" +STRESS = "stress" +TRAFFIC_WAIT_TIME = 0.1 +BULK_TRAFFIC_WAIT_TIME = 0.004 +BGP_ROUTE_FLAP_TIMES = 5 +UPDATE_WITHDRAW_THRESHOLD = 2 # Use the threshold value defined in test_bgp_update_timer.py -# ipv6 route injection from T0 -IPV6_ROUTE_LIST = [ - '1000:1001::/64', - '1000:1002::/64' -] -TRAFFIC_DATA_FORWARD = [ - # src_ip, expected_result - ("91.0.1.1", FORWARD), - ("91.0.2.1", FORWARD), - ("1000:1001::1", FORWARD), - ("1000:1002::1", FORWARD) -] +@pytest.fixture(scope="module") +def generate_route_and_traffic_data(): + """ + Generate route and traffic data + """ + ip_routes_ipv4 = generate_routes(BASE_IP_ROUTE) + ip_routes_ipv6 = generate_routes(BASE_IPV6_ROUTE) + + ipv4_routes_stress_and_perf = generate_routes(BASE_IP_ROUTE, BULK_ROUTE_COUNT) + ipv6_routes_stress_and_perf = generate_routes(BASE_IPV6_ROUTE, BULK_ROUTE_COUNT) + + route_and_traffic_data = { + FUNCTION: [ + ip_routes_ipv4, + ip_routes_ipv6, + generate_traffic_data(ip_routes_ipv4, FORWARD), + generate_traffic_data(ip_routes_ipv6, FORWARD), + generate_traffic_data(ip_routes_ipv4, DROP), + generate_traffic_data(ip_routes_ipv6, DROP) + ], + STRESS: [ + ipv4_routes_stress_and_perf, + ipv6_routes_stress_and_perf, + generate_traffic_data(ipv4_routes_stress_and_perf, FORWARD), + generate_traffic_data(ipv6_routes_stress_and_perf, FORWARD), + generate_traffic_data(ipv4_routes_stress_and_perf, DROP), + generate_traffic_data(ipv6_routes_stress_and_perf, DROP) + ] + } -TRAFFIC_DATA_DROP = [ - # src_ip, expected_result - ("91.0.1.1", DROP), - ("91.0.2.1", DROP), - ("1000:1001::1", DROP), - ("1000:1002::1", DROP), -] + return route_and_traffic_data @pytest.fixture(autouse=True) @@ -124,14 +145,107 @@ def restore_bgp_suppress_fib(duthost): duthost.shell('sudo config save -y') -@pytest.fixture(scope="module") -def get_exabgp_ptf_ports(duthost, nbrhosts, tbinfo): +@pytest.fixture(scope='module') +def completeness_level(pytestconfig): + return pytestconfig.getoption("--completeness_level") + + +@pytest.fixture(scope="function") +def get_exabgp_ptf_ports(duthost, nbrhosts, tbinfo, completeness_level, request): """ Get ipv4 and ipv6 Exabgp port and ptf receive port """ - exabgp_port, ptf_recv_port = get_exabgp_port(duthost, nbrhosts, tbinfo, EXABGP_BASE_PORT) - exabgp_port_v6, ptf_recv_port_v6 = get_exabgp_port(duthost, nbrhosts, tbinfo, EXABGP_BASE_PORT_V6) - return exabgp_port, ptf_recv_port, exabgp_port_v6, ptf_recv_port_v6 + is_random = True + if completeness_level == "thorough": + logger.info("Completeness Level is 'thorough', and script would do full verification over all VMs!") + is_random = False + exabgp_port_list, ptf_recv_port_list = get_exabgp_port(duthost, nbrhosts, tbinfo, EXABGP_BASE_PORT, is_random) + exabgp_port_list_v6, ptf_recv_port_list_v6 = get_exabgp_port(duthost, nbrhosts, tbinfo, EXABGP_BASE_PORT_V6, + is_random) + return [(exabgp_port, ptf_recv_port, exabgp_port_v6, ptf_recv_port_v6) + for exabgp_port, ptf_recv_port, exabgp_port_v6, ptf_recv_port_v6 in zip(exabgp_port_list, + ptf_recv_port_list, + exabgp_port_list_v6, + ptf_recv_port_list_v6)] + + +@pytest.fixture(scope="function") +def prepare_param(duthost, tbinfo, get_exabgp_ptf_ports): + """ + Prepare parameters + """ + router_mac = duthost.facts["router_mac"] + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + ptf_ip = tbinfo['ptf_ip'] + total_port_list = get_exabgp_ptf_ports + exabgp_port_list, ptf_recv_port_list, exabgp_port_list_v6, ptf_recv_port_list_v6 = zip(*total_port_list) + recv_port_list = [{4: ptf_recv_port, 6: ptf_recv_port_v6} for ptf_recv_port, ptf_recv_port_v6 in + zip(ptf_recv_port_list, ptf_recv_port_list_v6)] + return router_mac, mg_facts, ptf_ip, exabgp_port_list, exabgp_port_list_v6, recv_port_list + + +@pytest.fixture(scope="module") +def continuous_boot_times(request, completeness_level): + continuous_boot_times = request.config.getoption("--continuous_boot_times") + if completeness_level == 'thorough': + logger.info(f"Completeness Level is 'thorough', and script would do continuous boot test " + f"for {continuous_boot_times} times") + return continuous_boot_times + else: + return 1 + + +@pytest.fixture(scope="function") +def tcpdump_helper(ptfadapter, duthost, ptfhost): + return TcpdumpSniffHelper(ptfadapter, duthost, ptfhost) + + +def ip_address_incr(ip_str): + """ + Increment an IP subnet prefix by 1 + """ + net = ipaddress.ip_network(ip_str, strict=False) + next_net_addr = net.network_address + net.num_addresses + return f"{next_net_addr}/{net.prefixlen}" + + +def generate_routes(start_ip, count=2): + """ + Generate a number of IP routes + """ + route_list = [start_ip] + for _ in range(count - 1): + start_ip = ip_address_incr(start_ip) + route_list.append(start_ip) + return route_list + + +def get_first_ip(subnet): + """ + Get the first IP address from the subnet + """ + network = ipaddress.ip_network(subnet, strict=False) + all_usable_ips = network.hosts() + first_ip = next(all_usable_ips) + return str(first_ip) + + +def generate_traffic_data(route_list, action): + """ + Generate traffic data list + Example: + Input: route_list=['91.0.1.0/24', '91.0.2.0/24'], action='FORWARD' + Output: [ + ('91.0.1.1', 'FORWARD'), + ('91.0.2.1', 'FORWARD') + ] + """ + traffic_data_list = [] + for route in route_list: + ipaddr = get_first_ip(route) + traffic_data = (ipaddr, action) + traffic_data_list.append(traffic_data) + return traffic_data_list def is_orchagent_stopped(duthost): @@ -144,15 +258,12 @@ def is_orchagent_stopped(duthost): @pytest.fixture(scope="function", autouse=True) -def withdraw_bgp_routes_and_restore_orchagent(duthost, tbinfo, nbrhosts, get_exabgp_ptf_ports): +def restore_orchagent(duthost, tbinfo, nbrhosts, get_exabgp_ptf_ports): """ - Fixture to withdraw ipv4 and ipv6 routes and restore process 'orchagent' in case of unexpected failures in case + Fixture to restore process 'orchagent' in case of unexpected failures in case """ yield - ptf_ip = tbinfo['ptf_ip'] - exabgp_port, _, exabgp_port_v6, _ = get_exabgp_ptf_ports - announce_ipv4_ipv6_routes(ptf_ip, exabgp_port, exabgp_port_v6, action=WITHDRAW) if is_orchagent_stopped(duthost): logger.info('Orchagent process stopped, will restore it') operate_orchagent(duthost, action=ACTION_CONTINUE) @@ -174,16 +285,16 @@ def get_cfg_facts(duthost): return cfg_facts -def get_port_connected_with_t0_vm(duthost, nbrhosts): +def get_port_connected_with_vm(duthost, nbrhosts, vm_type='T0'): """ Get ports that connects with T0 VM """ port_list = [] - t0_vm_list = [vm_name for vm_name in nbrhosts.keys() if vm_name.endswith('T0')] - for t0_vm in t0_vm_list: - port = duthost.shell("show ip interface | grep -w {} | awk '{{print $1}}'".format(t0_vm))['stdout'] + vm_list = [vm_name for vm_name in nbrhosts.keys() if vm_name.endswith(vm_type)] + for vm in vm_list: + port = duthost.shell("show ip interface | grep -w {} | awk '{{print $1}}'".format(vm))['stdout'] port_list.append(port) - logger.info("Ports connected with T0 VMs: {}".format(port_list)) + logger.info("Ports connected with {} VMs: {}".format(vm_type, port_list)) return port_list @@ -193,7 +304,7 @@ def setup_vrf_cfg(duthost, cfg_facts, nbrhosts, tbinfo): """ cfg_t1 = deepcopy(cfg_facts) cfg_t1.pop('config_port_indices', None) - port_list = get_port_connected_with_t0_vm(duthost, nbrhosts) + port_list = get_port_connected_with_vm(duthost, nbrhosts) vm_list = nbrhosts.keys() mg_facts = duthost.get_extended_minigraph_facts(tbinfo) port_channel_list = mg_facts['minigraph_portchannels'].keys() @@ -218,26 +329,30 @@ def setup_vrf(duthost, nbrhosts, tbinfo): setup_vrf_cfg(duthost, cfg_t1, nbrhosts, tbinfo) -def install_route_from_exabgp(operation, ptfip, route, port): +def install_route_from_exabgp(operation, ptfip, route_list, port): """ Install or withdraw ipv4 or ipv6 route by exabgp """ + route_data = [] url = "http://{}:{}".format(ptfip, port) - data = {"command": "{} route {} next-hop self".format(operation, route)} - logging.info("url: {}".format(url)) - logging.info("data: {}".format(data)) - r = requests.post(url, data=data) + for route in route_list: + route_data.append(route) + command = "{} attribute next-hop self nlri {}".format(operation, ' '.join(route_data)) + data = {"command": command} + logger.info("url: {}".format(url)) + logger.info("command: {}".format(data)) + r = requests.post(url, data=data, timeout=90) assert r.status_code == 200 -def announce_route(ptfip, route, port, action=ANNOUNCE): +def announce_route(ptfip, route_list, port, action=ANNOUNCE): """ Announce or withdraw ipv4 or ipv6 route """ - logging.info("\n========================== announce_route -- {} ==========================".format(action)) - logging.info(" action:{}\n ptfip:{}\n route:{}\n port:{}".format(action, ptfip, route, port)) - install_route_from_exabgp(action, ptfip, route, port) - logging.info("\n--------------------------------------------------------------------------------") + logger.info("\n========================== announce_route -- {} ==========================".format(action)) + logger.info(" action:{}\n ptfip:{}\n route:{}\n port:{}".format(action, ptfip, route_list, port)) + install_route_from_exabgp(action, ptfip, route_list, port) + logger.info("\n--------------------------------------------------------------------------------") def generate_packet(src_ip, dst_ip, dst_mac): @@ -247,82 +362,227 @@ def generate_packet(src_ip, dst_ip, dst_mac): if ipaddress.ip_network(src_ip.encode().decode(), False).version == 4: pkt = testutils.simple_ip_packet(eth_dst=dst_mac, ip_src=src_ip, ip_dst=dst_ip) exp_pkt = Mask(pkt) - exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") - exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") + exp_pkt.set_do_not_care_packet(scapy.IP, "ttl") + exp_pkt.set_do_not_care_packet(scapy.IP, "chksum") else: pkt = testutils.simple_tcpv6_packet(eth_dst=dst_mac, ipv6_src=src_ip, ipv6_dst=dst_ip) exp_pkt = Mask(pkt) - exp_pkt.set_do_not_care_scapy(scapy.IPv6, "hlim") + exp_pkt.set_do_not_care_packet(scapy.IPv6, "hlim") - exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") - exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") + exp_pkt.set_do_not_care_packet(scapy.Ether, "dst") + exp_pkt.set_do_not_care_packet(scapy.Ether, "src") return pkt, exp_pkt -def send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, rx_port, expected_action): +def send_and_verify_packet(ptfadapter, pkt_list, exp_pkt_list, tx_port, rx_ports, exp_action_list, ip_ver_list=None): """ Send packet with ptfadapter and verify if packet is forwarded or dropped as expected """ ptfadapter.dataplane.flush() - testutils.send(ptfadapter, pkt=pkt, port_id=tx_port) - if expected_action == FORWARD: - testutils.verify_packet(ptfadapter, pkt=exp_pkt, port_id=rx_port, timeout=5) - else: - testutils.verify_no_packet(ptfadapter, pkt=exp_pkt, port_id=rx_port, timeout=5) + for pkt, exp_pkt, exp_action, ip_ver in zip(pkt_list, exp_pkt_list, exp_action_list, ip_ver_list): + rx_port = rx_ports[ip_ver] if ip_ver else rx_ports + testutils.send(ptfadapter, pkt=pkt, port_id=tx_port) + if exp_action == FORWARD: + testutils.verify_packet(ptfadapter, pkt=exp_pkt, port_id=rx_port, timeout=TRAFFIC_WAIT_TIME) + else: + testutils.verify_no_packet(ptfadapter, pkt=exp_pkt, port_id=rx_port, timeout=TRAFFIC_WAIT_TIME) -def send_and_verify_loopback_packets(ptfadapter, pkt, exp_pkt, tx_port, rx_ports, expected_action): +def send_and_verify_loopback_packets(ptfadapter, pkt_list, exp_pkt_list, tx_port, rx_ports, exp_action_list): """ Send packet with ptfadapter and verify if packet is forwarded back or dropped as expected """ ptfadapter.dataplane.flush() - testutils.send(ptfadapter, pkt=pkt, port_id=tx_port) - if expected_action == FORWARD: - testutils.verify_packets_any(ptfadapter, pkt=exp_pkt, ports=rx_ports) - else: - testutils.verify_no_packet_any(ptfadapter, pkt=exp_pkt, ports=rx_ports) + for pkt, exp_pkt, exp_action in zip(pkt_list, exp_pkt_list, exp_action_list): + testutils.send(ptfadapter, pkt=pkt, port_id=tx_port) + if exp_action == FORWARD: + testutils.verify_packets_any(ptfadapter, pkt=exp_pkt, ports=rx_ports, timeout=TRAFFIC_WAIT_TIME) + else: + testutils.verify_no_packet_any(ptfadapter, pkt=exp_pkt, ports=rx_ports, timeout=TRAFFIC_WAIT_TIME) -def validate_traffic(ptfadapter, traffic_data, router_mac, ptf_interfaces, recv_port, loop_back=False): +def send_and_verify_bulk_traffic(tcpdump_helper, ptfadapter, ip_ver_list, pkt_list, tx_port, rx_ports, exp_action_list): """ - Verify traffic is forwarded/forwarded back/drop as expected + Send packet with ptfadapter and verify if packet is forwarded or dropped as expected """ + tcpdump_helper.in_direct_ifaces = rx_ports if isinstance(rx_ports, list) else rx_ports.values() + tcpdump_helper.start_sniffer() + logger.info("Start sending traffic") + ptfadapter.dataplane.flush() + for pkt in pkt_list: + testutils.send(ptfadapter, pkt=pkt, port_id=tx_port) + time.sleep(BULK_TRAFFIC_WAIT_TIME) + + logger.info("Stop sending traffic") + tcpdump_helper.stop_sniffer() + cap_pkt_list = tcpdump_helper.sniffer_result() + check_pkt_forward_state(cap_pkt_list, ip_ver_list, pkt_list, exp_action_list) + + +def check_pkt_forward_state(captured_packets, ip_ver_list, send_packet_list, expect_action_list): + """ + Validate if sent packets are captured as expected + """ + act_forward_count = 0 + exp_forward_count = len([1 for action in expect_action_list if action == FORWARD]) + filter = "src={} dst={}" + captured_packets_str = str(captured_packets.res) + + for i in range(len(send_packet_list)): + ver_filter = 'IPv6' if ip_ver_list[i] == 6 else 'IP' + if filter.format(send_packet_list[i][ver_filter].src, + send_packet_list[i][ver_filter].dst) in captured_packets_str and \ + expect_action_list[i] == FORWARD: + act_forward_count += 1 + logger.debug("Packet is captured:\n{}".format(str(send_packet_list[i].summary))) + else: + logger.info("Packet is not captured:\n{}".format(str(send_packet_list[i].summary))) + + assert act_forward_count == exp_forward_count, \ + "Captured forward traffic number: {}, expect forward traffic number: {}".format(act_forward_count, + exp_forward_count) + + +def update_time_stamp(time_stamp_dict, prefix, timestamp): + if prefix in time_stamp_dict: + time_stamp_dict[prefix].append(timestamp) + else: + time_stamp_dict[prefix] = [timestamp] + + +def parse_time_stamp(bgp_packets, ipv4_route_list, ipv6_route_list): + announce_prefix_time_stamp, withdraw_prefix_time_stamp = {}, {} + bgp_updates = bgp_packets[bgp.BGPUpdate] + # get time stamp + for i in range(len(bgp_updates)): + if bgp.BGPNLRI_IPv4 in bgp_updates[i]: + layer_index = 1 + while bgp_updates[i].getlayer(bgp.BGPUpdate, nb=layer_index): + layer = bgp_updates[i].getlayer(bgp.BGPUpdate, nb=layer_index) + if layer.nlri: + for route in layer.nlri: + if route.prefix in ipv4_route_list: + update_time_stamp(announce_prefix_time_stamp, route.prefix, bgp_packets[i].time) + if layer.withdrawn_routes: + for route in layer.withdrawn_routes: + if route.prefix in ipv4_route_list: + update_time_stamp(withdraw_prefix_time_stamp, route.prefix, bgp_packets[i].time) + layer_index += 1 + + if bgp.BGPNLRI_IPv6 in bgp_updates[i]: + layer_index = 1 + while bgp_updates[i].getlayer(bgp.BGPPAMPReachNLRI, nb=layer_index): + layer = bgp_updates[i].getlayer(bgp.BGPPAMPReachNLRI, nb=layer_index) + if layer.nlri: + for route in layer.nlri: + if route.prefix in ipv6_route_list: + update_time_stamp(announce_prefix_time_stamp, route.prefix, bgp_packets[i].time) + layer_index += 1 + + layer_index = 1 + while bgp_updates[i].getlayer(bgp.BGPPAMPUnreachNLRI_IPv6, nb=layer_index): + layer = bgp_updates[i].getlayer(bgp.BGPPAMPUnreachNLRI_IPv6, nb=layer_index) + if layer.withdrawn_routes: + for route in layer.withdrawn_routes: + if route.prefix in ipv6_route_list: + update_time_stamp(withdraw_prefix_time_stamp, route.prefix, bgp_packets[i].time) + layer_index += 1 + + return announce_prefix_time_stamp, withdraw_prefix_time_stamp + + +def compute_middle_average_time(time_stamp_dict): + time_delta_list = [] + for _, timestamp_list in time_stamp_dict.items(): + time_delta_list.append(abs(timestamp_list[1] - timestamp_list[0])) + time_delta_list.sort() + + mid_delta_time = time_delta_list[(len(time_delta_list) - 1) // 2] + ave_delta_time = sum(time_delta_list) / len(time_delta_list) + return mid_delta_time, ave_delta_time + + +def validate_route_process_perf(pcap_file, ipv4_route_list, ipv6_route_list): + route_num = len(ipv4_route_list + ipv6_route_list) + bgp_packets = sniff(offline=pcap_file, + lfilter=lambda p: (IP or IPv6 in p) and bgp.BGPHeader in p and p[bgp.BGPHeader].type == 2) + announce_prefix_time_stamp, withdraw_prefix_time_stamp = parse_time_stamp(bgp_packets, ipv4_route_list, + ipv6_route_list) + logger.info("Received and send timestamp for announced routes:\n{}".format(announce_prefix_time_stamp)) + logger.info("Received and send timestamp for withdrawn routes:\n{}".format(withdraw_prefix_time_stamp)) + + announce_middle_time, announce_average_time = compute_middle_average_time(announce_prefix_time_stamp) + withdraw_middle_time, withdraw_average_time = compute_middle_average_time(withdraw_prefix_time_stamp) + # compare with threshold + logger.info("\n------------------------------------------------------------------------------------") + logger.info("Middle time usage of announce {} route : {} s".format(route_num, announce_middle_time)) + logger.info("Average time usage of announce {} route : {} s".format(route_num, announce_average_time)) + logger.info("Middle time usage of withdraw {} route : {} s".format(route_num, withdraw_middle_time)) + logger.info("Average time usage of withdraw {} route : {} s".format(route_num, withdraw_average_time)) + logger.info("------------------------------------------------------------------------------------\n") + assert announce_middle_time < UPDATE_WITHDRAW_THRESHOLD + assert announce_average_time < UPDATE_WITHDRAW_THRESHOLD + assert withdraw_middle_time < UPDATE_WITHDRAW_THRESHOLD + assert withdraw_average_time < UPDATE_WITHDRAW_THRESHOLD + + +def prepare_traffic(traffic_data, router_mac, ptf_interfaces, recv_port): + ip_ver_list, pkt_list, exp_pkt_list, exp_res_list = [], [], [], [] + tx_port = random.choice(ptf_interfaces) + for test_item in traffic_data: dst_ip = test_item[0] - expected_result = test_item[1] + exp_res = test_item[1] ip_ver = ipaddress.ip_network(dst_ip.encode().decode(), False).version - logger.info("Testing with dst_ip = {} expected_result = {}" - .format(dst_ip, expected_result)) pkt, exp_pkt = generate_packet(SRC_IP[ip_ver], dst_ip, router_mac) - tx_port = random.choice(ptf_interfaces) + if ptf_interfaces is recv_port: + rx_port = ptf_interfaces + else: + rx_port = recv_port[ip_ver] + logger.info("Expected packet:\n dst_mac:{} - src_ip:{} - dst_ip:{} - ptf tx_port:{} - ptf rx_port:{} - " + "expected_result = {}".format(router_mac, SRC_IP[ip_ver], dst_ip, tx_port, rx_port, exp_res)) + + ip_ver_list.append(ip_ver) + pkt_list.append(pkt) + exp_pkt_list.append(exp_pkt) + exp_res_list.append(exp_res) + + return tx_port, ip_ver_list, pkt_list, exp_pkt_list, exp_res_list + + +def validate_traffic(ptfadapter, traffic_data_list, router_mac, ptf_interfaces, recv_port, loop_back=False): + """ + Verify traffic is forwarded/forwarded back/drop as expected + """ + for traffic_data in traffic_data_list: + tx_port, ip_ver_list, pkt_list, exp_pkt_list, exp_res_list = prepare_traffic(traffic_data, router_mac, + ptf_interfaces, recv_port) if ptf_interfaces is recv_port: if loop_back: - logger.info("Expected packet:\n dst_mac:{} - src_ip:{} - dst_ip:{} - ptf tx_port:{} - ptf rx_port:{}". - format(router_mac, SRC_IP[ip_ver], dst_ip, tx_port, ptf_interfaces)) - send_and_verify_loopback_packets(ptfadapter, pkt, exp_pkt, tx_port, ptf_interfaces, expected_result) + send_and_verify_loopback_packets(ptfadapter, pkt_list, exp_pkt_list, tx_port, ptf_interfaces, + exp_res_list) else: - logger.info("Loopback traffic - expected packet:\n dst_mac:{} - src_ip:{} - dst_ip:{} - ptf tx_port:{}\ - - ptf rx_port:{}".format(router_mac, SRC_IP[ip_ver], dst_ip, tx_port, tx_port)) - send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, tx_port, expected_result) + send_and_verify_packet(ptfadapter, pkt_list, exp_pkt_list, tx_port, tx_port, exp_res_list) else: - logger.info("Expected packet:\n dst_mac:{} - src_ip:{} - dst_ip:{} - ptf tx_port:{} - ptf rx_port:{}". - format(router_mac, SRC_IP[ip_ver], dst_ip, tx_port, recv_port[ip_ver])) - send_and_verify_packet(ptfadapter, pkt, exp_pkt, tx_port, recv_port[ip_ver], expected_result) + send_and_verify_packet(ptfadapter, pkt_list, exp_pkt_list, tx_port, recv_port, exp_res_list, ip_ver_list) + +def validate_bulk_traffic(tcpdump_helper, ptfadapter, traffic_data_list, router_mac, ptf_interfaces, recv_port): + tx_port, ip_ver_list, pkt_list, exp_pkt_list, exp_res_list = prepare_traffic(traffic_data_list, router_mac, + ptf_interfaces, recv_port) + send_and_verify_bulk_traffic(tcpdump_helper, ptfadapter, ip_ver_list, pkt_list, tx_port, recv_port, exp_res_list) -def announce_ipv4_ipv6_routes(ptf_ip, exabgp_port, exabgp_port_v6, action=ANNOUNCE): + +def announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, action=ANNOUNCE): """ Announce or withdraw ipv4 and ipv6 routes by exabgp """ - for route in IP_ROUTE_LIST: - announce_route(ptf_ip, route, exabgp_port, action) - - for route in IPV6_ROUTE_LIST: - announce_route(ptf_ip, route, exabgp_port_v6, action) + announce_route(ptf_ip, ipv4_route_list, exabgp_port, action) + announce_route(ptf_ip, ipv6_route_list, exabgp_port_v6, action) -def config_bgp_suppress_fib(duthost, enable=True): +def config_bgp_suppress_fib(duthost, enable=True, validate_result=False): """ Enable or disable bgp suppress-fib-pending function """ @@ -333,6 +593,9 @@ def config_bgp_suppress_fib(duthost, enable=True): logger.info('Disable BGP suppress fib pending function') cmd = 'sudo config suppress-fib-pending disabled' duthost.shell(cmd) + if validate_result: + res = duthost.shell('show suppress-fib-pending') + assert enable is (res['stdout'] == 'Enabled') def do_and_wait_reboot(duthost, localhost, reboot_type): @@ -367,26 +630,34 @@ def param_reboot(request, duthost, localhost): do_and_wait_reboot(duthost, localhost, reboot_type) -def validate_route_states(duthost, vrf=DEFAULT, check_point=QUEUED, action=ACTION_IN): +def validate_route_states(duthost, ipv4_route_list, ipv6_route_list, vrf=DEFAULT, check_point=QUEUED, action=ACTION_IN): """ Verify ipv4 and ipv6 routes install status """ - for route in IP_ROUTE_LIST: + for route in ipv4_route_list: check_route_install_status(duthost, route, vrf, IP_VER, check_point, action) - for route in IPV6_ROUTE_LIST: + for route in ipv6_route_list: check_route_install_status(duthost, route, vrf, IPV6_VER, check_point, action) -def validate_route_propagate(duthost, tbinfo, vrf=DEFAULT, exist=True): +def validate_fib_route(duthost, ipv4_route_list, ipv6_route_list): + """ + Verify ipv4 and ipv6 route were installed into fib + """ + retry_call(check_fib_route, fargs=[duthost, ipv4_route_list], tries=5, delay=2) + retry_call(check_fib_route, fargs=[duthost, ipv6_route_list, IPV6_VER], tries=5, delay=2) + + +def validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list, vrf=DEFAULT, exist=True): """ - Verify ipv4 and ipv6 route propagate status + Verify ipv4 and ipv6 route propagate status at t2 vm side """ - t2_vm = get_vm_name(tbinfo) - bgp_neighbor_v4, bgp_neighbor_v6 = get_bgp_neighbor_ip(duthost, t2_vm, vrf) - for route in IP_ROUTE_LIST: - validate_route_propagate_status(duthost, route, bgp_neighbor_v4, vrf, exist=exist) - for route in IPV6_ROUTE_LIST: - validate_route_propagate_status(duthost, route, bgp_neighbor_v6, vrf, ip_ver=IPV6_VER, exist=exist) + t2_vm_list = get_vm_name_list(tbinfo) + for t2_vm in t2_vm_list: + bgp_neighbor_v4, bgp_neighbor_v6 = get_bgp_neighbor_ip(duthost, t2_vm, vrf) + validate_route_propagate_status(nbrhosts[t2_vm], ipv4_route_list, bgp_neighbor_v4, vrf, exist=exist) + validate_route_propagate_status(nbrhosts[t2_vm], ipv6_route_list, bgp_neighbor_v6, vrf, ip_ver=IPV6_VER, + exist=exist) def redistribute_static_route_to_bgp(duthost, redistribute=True): @@ -415,23 +686,49 @@ def remove_static_route_and_redistribute(duthost): redistribute_static_route_to_bgp(duthost, redistribute=False) +def bgp_route_flap_with_stress(duthost, tbinfo, nbrhosts, ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, + exabgp_port_v6, vrf=DEFAULT, flap_time=1): + """ + Do bgp route flap + """ + for i in range(flap_time): + with allure.step("Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6) + + with allure.step("Validate BGP ipv4 and ipv6 routes are announced to T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list, vrf=vrf) + + with allure.step("Withdraw BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, + action=WITHDRAW) + + with allure.step("Validate bgp neighbors are established"): + check_bgp_neighbor(duthost) + + +def perf_sniffer_prepare(tcpdump_sniffer, duthost, nbrhosts, mg_facts, recv_port): + eths_to_t2_vm = get_port_connected_with_vm(duthost, nbrhosts, vm_type='T2') + eths_to_t0_vm = get_eth_name_from_ptf_port(mg_facts, [port for port in recv_port.values()]) + tcpdump_sniffer.out_direct_ifaces = [random.choice(eths_to_t2_vm)] + tcpdump_sniffer.in_direct_ifaces = eths_to_t0_vm + tcpdump_sniffer.tcpdump_filter = BGP_FILTER + + @pytest.mark.parametrize("vrf_type", VRF_TYPES) def test_bgp_route_with_suppress(duthost, tbinfo, nbrhosts, ptfadapter, localhost, restore_bgp_suppress_fib, - get_exabgp_ptf_ports, vrf_type, request): + prepare_param, vrf_type, continuous_boot_times, generate_route_and_traffic_data, + request): try: if vrf_type == USER_DEFINED_VRF: with allure.step("Configure user defined vrf"): setup_vrf(duthost, nbrhosts, tbinfo) with allure.step("Prepare needed parameters"): - router_mac = duthost.facts["router_mac"] - mg_facts = duthost.get_extended_minigraph_facts(tbinfo) - ptf_ip = tbinfo['ptf_ip'] - exabgp_port, ptf_recv_port, exabgp_port_v6, ptf_recv_port_v6 = get_exabgp_ptf_ports - recv_port = { - 4: ptf_recv_port, - 6: ptf_recv_port_v6 - } + router_mac, mg_facts, ptf_ip, exabgp_port_list, exabgp_port_list_v6, recv_port_list = prepare_param + + with allure.step("Get route and traffic data"): + ipv4_route_list, ipv6_route_list, traffic_data_ipv4_forward, traffic_data_ipv6_forward, \ + traffic_data_ipv4_drop, traffic_data_ipv6_drop = generate_route_and_traffic_data[FUNCTION] with allure.step("Config bgp suppress-fib-pending function"): config_bgp_suppress_fib(duthost) @@ -440,39 +737,61 @@ def test_bgp_route_with_suppress(duthost, tbinfo, nbrhosts, ptfadapter, localhos logger.info("Save configuration") duthost.shell('sudo config save -y') - with allure.step("Do reload"): - param_reboot(request, duthost, localhost) + for continous_boot_index in range(continuous_boot_times): + if continuous_boot_times > 1: + logger.info("======== Continuous boot needed - this is the {} time boot test ========". + format(continous_boot_index+1)) - with allure.step("Suspend orchagent process to simulate a route install delay"): - operate_orchagent(duthost) + with allure.step("Do reload"): + param_reboot(request, duthost, localhost) - with allure.step("Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP"): - announce_ipv4_ipv6_routes(ptf_ip, exabgp_port, exabgp_port_v6) + for exabgp_port, exabgp_port_v6, recv_port in zip(exabgp_port_list, exabgp_port_list_v6, recv_port_list): + try: + with allure.step("Suspend orchagent process to simulate a route install delay"): + operate_orchagent(duthost) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(QUEUED)): - validate_route_states(duthost, vrf_type) + with allure.step(f"Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6) - with allure.step("Validate BGP ipv4 and ipv6 routes are not announced to T2 VM peer"): - validate_route_propagate(duthost, tbinfo, vrf_type, exist=False) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(QUEUED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, vrf_type) - with allure.step("Validate traffic could not be forwarded to T0 VM"): - ptf_interfaces = get_t2_ptf_intfs(mg_facts) - validate_traffic(ptfadapter, TRAFFIC_DATA_DROP, router_mac, ptf_interfaces, recv_port) + with allure.step("Validate BGP ipv4 and ipv6 routes are not announced to T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list, vrf_type, + exist=False) - with allure.step("Restore orchagent process"): - operate_orchagent(duthost, action=ACTION_CONTINUE) + with allure.step("Validate traffic could not be forwarded to T0 VM"): + ptf_interfaces = get_t2_ptf_intfs(mg_facts) + validate_traffic(ptfadapter, [traffic_data_ipv4_drop, traffic_data_ipv6_drop], router_mac, + ptf_interfaces, recv_port) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are not in {} state".format(QUEUED)): - validate_route_states(duthost, vrf_type, check_point=QUEUED, action=ACTION_NOT_IN) + with allure.step("Restore orchagent process"): + operate_orchagent(duthost, action=ACTION_CONTINUE) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): - validate_route_states(duthost, vrf_type, check_point=OFFLOADED) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are not in {} state".format(QUEUED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, vrf_type, check_point=QUEUED, + action=ACTION_NOT_IN) - with allure.step("Validate BGP ipv4 and ipv6 routes are announced to T2 VM peer"): - validate_route_propagate(duthost, tbinfo, vrf_type) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, vrf_type, + check_point=OFFLOADED) + + with allure.step("Validate BGP ipv4 and ipv6 routes are announced to T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list, vrf_type) - with allure.step("Validate traffic would be forwarded to T0 VM"): - validate_traffic(ptfadapter, TRAFFIC_DATA_FORWARD, router_mac, ptf_interfaces, recv_port) + with allure.step("Validate traffic would be forwarded to T0 VM"): + validate_traffic(ptfadapter, [traffic_data_ipv4_forward, traffic_data_ipv6_forward], router_mac, + ptf_interfaces, recv_port) + finally: + with allure.step(f"Withdraw BGP ipv4 and ipv6 routes from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, + action=WITHDRAW) + + with allure.step("Validate BGP ipv4 and ipv6 routes are withdrawn from T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list, vrf_type, + exist=False) finally: if vrf_type == USER_DEFINED_VRF: @@ -481,120 +800,128 @@ def test_bgp_route_with_suppress(duthost, tbinfo, nbrhosts, ptfadapter, localhos config_reload(duthost) -def test_bgp_route_without_suppress(duthost, tbinfo, nbrhosts, ptfadapter, get_exabgp_ptf_ports, - restore_bgp_suppress_fib): +def test_bgp_route_without_suppress(duthost, tbinfo, nbrhosts, ptfadapter, prepare_param, restore_bgp_suppress_fib, + generate_route_and_traffic_data): with allure.step("Prepare needed parameters"): - router_mac = duthost.facts["router_mac"] - mg_facts = duthost.get_extended_minigraph_facts(tbinfo) - ptf_ip = tbinfo['ptf_ip'] - exabgp_port, ptf_recv_port, exabgp_port_v6, ptf_recv_port_v6 = get_exabgp_ptf_ports - recv_port = { - 4: ptf_recv_port, - 6: ptf_recv_port_v6 - } + router_mac, mg_facts, ptf_ip, exabgp_port_list, exabgp_port_list_v6, recv_port_list = prepare_param with allure.step("Disable bgp suppress-fib-pending function"): config_bgp_suppress_fib(duthost, False) - with allure.step("Suspend orchagent process to simulate a route install delay"): - operate_orchagent(duthost) + with allure.step("Get route and traffic data"): + ipv4_route_list, ipv6_route_list, traffic_data_ipv4_forward, traffic_data_ipv6_forward, \ + traffic_data_ipv4_drop, traffic_data_ipv6_drop = generate_route_and_traffic_data[FUNCTION] - with allure.step("Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP"): - announce_ipv4_ipv6_routes(ptf_ip, exabgp_port, exabgp_port_v6) + for exabgp_port, exabgp_port_v6, recv_port in zip(exabgp_port_list, exabgp_port_list_v6, recv_port_list): + try: + with allure.step("Suspend orchagent process to simulate a route install delay"): + operate_orchagent(duthost) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are not in {} state".format(QUEUED)): - validate_route_states(duthost, check_point=QUEUED, action=ACTION_NOT_IN) + with allure.step(f"Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6) - with allure.step("Validate BGP ipv4 and ipv6 routes are announced to T2 VM peer"): - validate_route_propagate(duthost, tbinfo) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are not in {} state".format(QUEUED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, check_point=QUEUED, + action=ACTION_NOT_IN) - with allure.step("Restore orchagent process"): - operate_orchagent(duthost, action=ACTION_CONTINUE) + with allure.step("Validate BGP ipv4 and ipv6 routes are announced to T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list) + + with allure.step("Restore orchagent process"): + operate_orchagent(duthost, action=ACTION_CONTINUE) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): - validate_route_states(duthost, check_point=OFFLOADED) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, check_point=OFFLOADED) - with allure.step("Validate traffic would be forwarded to T0 VM"): - ptf_interfaces = get_t2_ptf_intfs(mg_facts) - validate_traffic(ptfadapter, TRAFFIC_DATA_FORWARD, router_mac, ptf_interfaces, recv_port) + with allure.step("Validate traffic would be forwarded to T0 VM"): + ptf_interfaces = get_t2_ptf_intfs(mg_facts) + validate_traffic(ptfadapter, [traffic_data_ipv4_forward, traffic_data_ipv6_forward], router_mac, + ptf_interfaces, recv_port) + finally: + with allure.step(f"Withdraw BGP ipv4 and ipv6 routes from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, + action=WITHDRAW) -def test_bgp_route_with_suppress_negative_operation(duthost, tbinfo, nbrhosts, ptfadapter, localhost, - restore_bgp_suppress_fib, get_exabgp_ptf_ports): +def test_bgp_route_with_suppress_negative_operation(duthost, tbinfo, nbrhosts, ptfadapter, localhost, prepare_param, + restore_bgp_suppress_fib, generate_route_and_traffic_data): try: with allure.step("Prepare needed parameters"): - router_mac = duthost.facts["router_mac"] - mg_facts = duthost.get_extended_minigraph_facts(tbinfo) - ptf_ip = tbinfo['ptf_ip'] - exabgp_port, ptf_recv_port, exabgp_port_v6, ptf_recv_port_v6 = get_exabgp_ptf_ports - recv_port = { - 4: ptf_recv_port, - 6: ptf_recv_port_v6 - } + router_mac, mg_facts, ptf_ip, exabgp_port_list, exabgp_port_list_v6, recv_port_list = prepare_param + + with allure.step("Get route and traffic data"): + ipv4_route_list, ipv6_route_list, traffic_data_ipv4_forward, traffic_data_ipv6_forward, \ + traffic_data_ipv4_drop, traffic_data_ipv6_drop = generate_route_and_traffic_data[FUNCTION] with allure.step("Config bgp suppress-fib-pending function"): config_bgp_suppress_fib(duthost) - with allure.step("Suspend orchagent process to simulate a route install delay"): - operate_orchagent(duthost) - - with allure.step("Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP"): - announce_ipv4_ipv6_routes(ptf_ip, exabgp_port, exabgp_port_v6) + for exabgp_port, exabgp_port_v6, recv_port in zip(exabgp_port_list, exabgp_port_list_v6, recv_port_list): + try: + with allure.step("Suspend orchagent process to simulate a route install delay"): + operate_orchagent(duthost) - with allure.step("Execute bgp sessions restart"): - restart_bgp_session(duthost) + with allure.step(f"Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6) - with allure.step("Validate bgp neighbor are established"): - config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] - bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {}) - pytest_assert( - wait_until(300, 10, 0, duthost.check_bgp_session_state, bgp_neighbors), - "graceful restarted bgp sessions {} are not coming back".format(bgp_neighbors) - ) + with allure.step("Execute bgp sessions restart"): + restart_bgp_session(duthost) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(QUEUED)): - validate_route_states(duthost) + with allure.step("Validate bgp neighbor are established"): + check_bgp_neighbor(duthost) - with allure.step("Validate BGP ipv4 and ipv6 routes are not announced to T2 VM peer"): - validate_route_propagate(duthost, tbinfo, exist=False) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(QUEUED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list) - with allure.step("Config static route and redistribute to BGP"): - port = get_eth_port(duthost, tbinfo) - logger.info("Config static route - sudo config route add prefix {} nexthop dev {}". - format(STATIC_ROUTE_PREFIX, port)) - duthost.shell("sudo config route add prefix {} nexthop dev {}".format(STATIC_ROUTE_PREFIX, port)) - redistribute_static_route_to_bgp(duthost) + with allure.step("Validate BGP ipv4 and ipv6 routes are not announced to T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list, exist=False) - with allure.step("Validate redistributed static route is propagate to T2 VM peer"): - t2_vm = get_vm_name(tbinfo) - bgp_neighbor_v4, _ = get_bgp_neighbor_ip(duthost, t2_vm) - validate_route_propagate_status(duthost, STATIC_ROUTE_PREFIX, bgp_neighbor_v4) + with allure.step("Config static route and redistribute to BGP"): + port = get_eth_port(duthost, tbinfo) + logger.info("Config static route - sudo config route add prefix {} nexthop dev {}". + format(STATIC_ROUTE_PREFIX, port)) + duthost.shell("sudo config route add prefix {} nexthop dev {}".format(STATIC_ROUTE_PREFIX, port)) + redistribute_static_route_to_bgp(duthost) - with allure.step("Validate traffic could not be forwarded to T0 VM"): - ptf_interfaces = get_t2_ptf_intfs(mg_facts) - validate_traffic(ptfadapter, TRAFFIC_DATA_DROP, router_mac, ptf_interfaces, recv_port) + with allure.step("Validate redistributed static route is propagate to T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, [STATIC_ROUTE_PREFIX], []) - with allure.step("Restore orchagent process"): - operate_orchagent(duthost, action=ACTION_CONTINUE) + with allure.step("Validate traffic could not be forwarded to T0 VM"): + ptf_interfaces = get_t2_ptf_intfs(mg_facts) + validate_traffic(ptfadapter, [traffic_data_ipv4_drop, traffic_data_ipv6_drop], router_mac, + ptf_interfaces, recv_port) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are not in {} state".format(QUEUED)): - validate_route_states(duthost, check_point=QUEUED, action=ACTION_NOT_IN) + with allure.step("Restore orchagent process"): + operate_orchagent(duthost, action=ACTION_CONTINUE) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): - validate_route_states(duthost, check_point=OFFLOADED) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are not in {} state".format(QUEUED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, check_point=QUEUED, + action=ACTION_NOT_IN) - with allure.step("Validate BGP ipv4 and ipv6 routes are announced to T2 VM peer"): - validate_route_propagate(duthost, tbinfo) + with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, check_point=OFFLOADED) - with allure.step("Validate traffic would be forwarded to T0 VM"): - validate_traffic(ptfadapter, TRAFFIC_DATA_FORWARD, router_mac, ptf_interfaces, recv_port) + with allure.step("Validate BGP ipv4 and ipv6 routes are announced to T2 VM peer"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list) + with allure.step("Validate traffic would be forwarded to T0 VM"): + validate_traffic(ptfadapter, [traffic_data_ipv4_forward, traffic_data_ipv6_forward], router_mac, + ptf_interfaces, recv_port) + finally: + with allure.step(f"Withdraw BGP ipv4 and ipv6 routes from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, + action=WITHDRAW) finally: with allure.step("Delete static route and remove redistribute to BGP"): remove_static_route_and_redistribute(duthost) -def test_credit_loop(duthost, tbinfo, nbrhosts, ptfadapter, get_exabgp_ptf_ports, restore_bgp_suppress_fib): +def test_credit_loop(duthost, tbinfo, nbrhosts, ptfadapter, prepare_param, generate_route_and_traffic_data, + restore_bgp_suppress_fib): """ The problem with BGP programming occurs after the T1 switch is rebooted: @@ -606,40 +933,160 @@ def test_credit_loop(duthost, tbinfo, nbrhosts, ptfadapter, get_exabgp_ptf_ports When the traffic is bounced back on lossless queue, buffers on both sides are overflown, credit loop happens """ with allure.step("Prepare needed parameters"): - router_mac = duthost.facts["router_mac"] - mg_facts = duthost.get_extended_minigraph_facts(tbinfo) - ptf_ip = tbinfo['ptf_ip'] - exabgp_port, ptf_recv_port, exabgp_port_v6, ptf_recv_port_v6 = get_exabgp_ptf_ports - recv_port = { - 4: ptf_recv_port, - 6: ptf_recv_port_v6 - } + router_mac, mg_facts, ptf_ip, exabgp_port_list, exabgp_port_list_v6, recv_port_list = prepare_param - with allure.step("Disable bgp suppress-fib-pending function"): - config_bgp_suppress_fib(duthost, False) + with allure.step("Get route and traffic data"): + ipv4_route_list, ipv6_route_list, traffic_data_ipv4_forward, traffic_data_ipv6_forward, \ + traffic_data_ipv4_drop, traffic_data_ipv6_drop = generate_route_and_traffic_data[FUNCTION] - with allure.step("Suspend orchagent process to simulate a route install delay"): - operate_orchagent(duthost) + for exabgp_port, exabgp_port_v6, recv_port in zip(exabgp_port_list, exabgp_port_list_v6, recv_port_list): + try: + with allure.step("Disable bgp suppress-fib-pending function"): + config_bgp_suppress_fib(duthost, False) - with allure.step("Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP"): - announce_ipv4_ipv6_routes(ptf_ip, exabgp_port, exabgp_port_v6) + with allure.step( + "Validate traffic is forwarded back to T2 VM and routes in HW table are removed by orchagent"): + ptf_interfaces = get_t2_ptf_intfs(mg_facts) + retry_call(validate_traffic, + fargs=[ptfadapter, [traffic_data_ipv4_forward, traffic_data_ipv6_forward], router_mac, + ptf_interfaces, ptf_interfaces, True], tries=3, delay=2) - with allure.step("Validate the BGP routes are propagated to T2 VM"): - validate_route_propagate(duthost, tbinfo) + with allure.step("Suspend orchagent process to simulate a route install delay"): + operate_orchagent(duthost) - with allure.step("Validate traffic is forwarded back to T2 VM"): - ptf_interfaces = get_t2_ptf_intfs(mg_facts) - validate_traffic(ptfadapter, TRAFFIC_DATA_FORWARD, router_mac, ptf_interfaces, ptf_interfaces, loop_back=True) + with allure.step(f"Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6) - with allure.step("Config bgp suppress-fib-pending function"): - config_bgp_suppress_fib(duthost) + with allure.step("Validate the BGP routes are propagated to T2 VM"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list) - with allure.step("Restore orchagent process"): - operate_orchagent(duthost, action=ACTION_CONTINUE) + with allure.step("Validate traffic is forwarded back to T2 VM"): + validate_traffic(ptfadapter, [traffic_data_ipv4_forward, traffic_data_ipv6_forward], router_mac, + ptf_interfaces, ptf_interfaces, loop_back=True) - with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): - validate_route_states(duthost, check_point=OFFLOADED) + with allure.step("Config bgp suppress-fib-pending function"): + config_bgp_suppress_fib(duthost, validate_result=True) - with allure.step("Validate traffic would be forwarded to T0 VM"): - ptf_interfaces = get_t2_ptf_intfs(mg_facts) - validate_traffic(ptfadapter, TRAFFIC_DATA_FORWARD, router_mac, ptf_interfaces, recv_port) + with allure.step("Restore orchagent process"): + assert is_orchagent_stopped(duthost), "orchagent shall in stop state" + operate_orchagent(duthost, action=ACTION_CONTINUE) + + with allure.step("Validate announced BGP ipv4 and ipv6 routes are in {} state".format(OFFLOADED)): + validate_route_states(duthost, ipv4_route_list, ipv6_route_list, check_point=OFFLOADED) + + with allure.step("Validate traffic would be forwarded to T0 VM"): + validate_traffic(ptfadapter, [traffic_data_ipv4_forward, traffic_data_ipv6_forward], router_mac, + ptf_interfaces, recv_port) + finally: + with allure.step(f"Withdraw BGP ipv4 and ipv6 routes from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, + action=WITHDRAW) + + +def test_suppress_fib_stress(duthost, tbinfo, nbrhosts, ptfadapter, prepare_param, completeness_level, + generate_route_and_traffic_data, tcpdump_helper, restore_bgp_suppress_fib): + with allure.step("Prepare needed parameters"): + router_mac, mg_facts, ptf_ip, exabgp_port_list, exabgp_port_list_v6, recv_port_list = prepare_param + + with allure.step("Get route and traffic data"): + ipv4_route_list, ipv6_route_list, traffic_data_ipv4_forward, traffic_data_ipv6_forward, \ + traffic_data_ipv4_drop, traffic_data_ipv6_drop = generate_route_and_traffic_data[STRESS] + + for exabgp_port, exabgp_port_v6, recv_port in zip(exabgp_port_list, exabgp_port_list_v6, recv_port_list): + try: + with allure.step("Do BGP route flap"): + flap_time = 1 if completeness_level == "thorough" else BGP_ROUTE_FLAP_TIMES + bgp_route_flap_with_stress(duthost, tbinfo, nbrhosts, ptf_ip, ipv4_route_list, exabgp_port, + ipv6_route_list, exabgp_port_v6, flap_time=flap_time) + + with allure.step("Disable bgp suppress-fib-pending function"): + config_bgp_suppress_fib(duthost, enable=False, validate_result=True) + + with allure.step("Validate traffics are back to T2 VM to make sure routes in HW are removed by orchagent"): + ptf_interfaces = get_t2_ptf_intfs(mg_facts) + retry_call(validate_bulk_traffic, + fargs=[tcpdump_helper, ptfadapter, traffic_data_ipv4_forward + traffic_data_ipv6_forward, + router_mac, ptf_interfaces, ptf_interfaces], tries=3, delay=2) + + with allure.step("Suspend orchagent process to simulate a route install delay"): + operate_orchagent(duthost) + + with allure.step(f"Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6) + + with allure.step("Validate the BGP routes are propagated to T2 VM"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list) + + with allure.step("Validate traffics are forwarded back to T2 VM"): + validate_bulk_traffic(tcpdump_helper, ptfadapter, + traffic_data_ipv4_forward + traffic_data_ipv6_forward, router_mac, + ptf_interfaces, ptf_interfaces) + + with allure.step("Config bgp suppress-fib-pending function"): + config_bgp_suppress_fib(duthost, validate_result=True) + + with allure.step("Restore orchagent process"): + assert is_orchagent_stopped(duthost), "orchagent shall in stop state" + operate_orchagent(duthost, action=ACTION_CONTINUE) + + with allure.step("Validate announced BGP ipv4 and ipv6 routes are installed into fib"): + validate_fib_route(duthost, ipv4_route_list, ipv6_route_list) + + with allure.step("Validate traffic would be forwarded to T0 VM"): + validate_bulk_traffic(tcpdump_helper, ptfadapter, + traffic_data_ipv4_forward + traffic_data_ipv6_forward, router_mac, + ptf_interfaces, recv_port) + finally: + with allure.step(f"Withdraw BGP ipv4 and ipv6 routes from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, + exabgp_port_v6, action=WITHDRAW) + + +def test_suppress_fib_performance(tcpdump_helper, duthost, tbinfo, nbrhosts, ptfadapter, prepare_param, + generate_route_and_traffic_data, restore_bgp_suppress_fib): + with allure.step("Prepare needed parameters"): + router_mac, mg_facts, ptf_ip, exabgp_port_list, exabgp_port_list_v6, recv_port_list = prepare_param + + with allure.step("Get route and traffic data"): + ipv4_route_list, ipv6_route_list, _, _, _, _ = generate_route_and_traffic_data[STRESS] + + for exabgp_port, exabgp_port_v6, recv_port in zip(exabgp_port_list, exabgp_port_list_v6, recv_port_list): + try: + with allure.step("Config bgp suppress-fib-pending function"): + config_bgp_suppress_fib(duthost) + + with allure.step("Start sniffer"): + tcpdump_sniffer = tcpdump_helper + perf_sniffer_prepare(tcpdump_sniffer, duthost, nbrhosts, mg_facts, recv_port) + tcpdump_sniffer.start_sniffer(host='dut') + + with allure.step(f"Announce BGP ipv4 and ipv6 routes to DUT from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6) + + with allure.step("Validate the BGP routes are propagated to T2 VM"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list) + + with allure.step(f"Withdraw BGP ipv4 and ipv6 routes from T0 VM by ExaBGP - " + f"v4: {exabgp_port} v6: {exabgp_port_v6}"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, + action=WITHDRAW) + with allure.step("Validate the BGP routes are withdrawn from T2 VM"): + validate_route_propagate(duthost, nbrhosts, tbinfo, ipv4_route_list, ipv6_route_list, exist=False) + + with allure.step("Stop sniffer"): + tcpdump_sniffer.stop_sniffer(host='dut') + + with allure.step("Validate BGP route process performance"): + validate_route_process_perf(tcpdump_sniffer.pcap_path, ipv4_route_list, ipv6_route_list) + finally: + with allure.step("Disable bgp suppress-fib-pending function"): + config_bgp_suppress_fib(duthost, False, validate_result=True) + + with allure.step("Withdraw BGP ipv4 and ipv6 routes in case of any failure in case"): + announce_ipv4_ipv6_routes(ptf_ip, ipv4_route_list, exabgp_port, ipv6_route_list, exabgp_port_v6, + action=WITHDRAW) diff --git a/tests/bgp/test_traffic_shift.py b/tests/bgp/test_traffic_shift.py index 0b55e4808d8..c8e8ca4320f 100644 --- a/tests/bgp/test_traffic_shift.py +++ b/tests/bgp/test_traffic_shift.py @@ -116,7 +116,7 @@ def parse_routes_process_vsonic(node=None, results=None): for a_route in routes_json: # empty community string routes[a_route] = "" - all_routes[hostname] = routes + results[hostname] = routes all_routes = parallel_run(parse_routes_process_vsonic, (), {}, list(neigh_hosts.values()), timeout=120, concurrent_tasks=8) diff --git a/tests/cacl/test_cacl_application.py b/tests/cacl/test_cacl_application.py index 19187617962..6df1fc743f9 100644 --- a/tests/cacl/test_cacl_application.py +++ b/tests/cacl/test_cacl_application.py @@ -28,7 +28,9 @@ def duthost_dualtor(request, upper_tor_host, lower_tor_host): # noqa F811 else: logger.info("Select upper tor...") dut = upper_tor_host - return dut + dut.shell("sudo config mux mode manual all") + yield dut + dut.shell("sudo config mux mode auto all") @pytest.fixture diff --git a/tests/cacl/test_ebtables_application.py b/tests/cacl/test_ebtables_application.py index 7c60762064b..790aab44e19 100644 --- a/tests/cacl/test_ebtables_application.py +++ b/tests/cacl/test_ebtables_application.py @@ -13,6 +13,7 @@ def generate_expected_rules(duthost): ebtables_rules.append("-d BGA -j DROP") ebtables_rules.append("-p ARP -j DROP") ebtables_rules.append("-p 802_1Q --vlan-encap ARP -j DROP") + ebtables_rules.append("-d Multicast -j DROP") return ebtables_rules diff --git a/tests/common/config_reload.py b/tests/common/config_reload.py index 4f2159b897b..89a131ba1e0 100644 --- a/tests/common/config_reload.py +++ b/tests/common/config_reload.py @@ -127,9 +127,13 @@ def _config_reload_cmd_wrapper(cmd, executable): sonic_host.shell(cmd, executable="/bin/bash") elif config_source == 'running_golden_config': - cmd = 'config reload -y -l /etc/sonic/running_golden_config.json &>/dev/null' + golden_path = '/etc/sonic/running_golden_config.json' + if sonic_host.is_multi_asic: + for asic in sonic_host.asics: + golden_path = f'{golden_path},/etc/sonic/running_golden_config{asic.asic_index}.json' + cmd = f'config reload -y -l {golden_path} &>/dev/null' if config_force_option_supported(sonic_host): - cmd = 'config reload -y -f -l /etc/sonic/running_golden_config.json &>/dev/null' + cmd = f'config reload -y -f -l {golden_path} &>/dev/null' sonic_host.shell(cmd, executable="/bin/bash") modular_chassis = sonic_host.get_facts().get("modular_chassis") diff --git a/tests/common/devices/duthosts.py b/tests/common/devices/duthosts.py index 1fe548e5fa8..8474a424c12 100644 --- a/tests/common/devices/duthosts.py +++ b/tests/common/devices/duthosts.py @@ -54,9 +54,15 @@ def __init__(self, ansible_adhoc, tbinfo, duts): DUTs in the testbed should be used """ + self.ansible_adhoc = ansible_adhoc + self.tbinfo = tbinfo + self.duts = duts + self.__initialize_nodes() + + def __initialize_nodes(self): # TODO: Initialize the nodes in parallel using multi-threads? - self.nodes = self._Nodes([MultiAsicSonicHost(ansible_adhoc, hostname, self, tbinfo['topo']['type']) - for hostname in tbinfo["duts"] if hostname in duts]) + self.nodes = self._Nodes([MultiAsicSonicHost(self.ansible_adhoc, hostname, self, self.tbinfo['topo']['type']) + for hostname in self.tbinfo["duts"] if hostname in self.duts]) self.supervisor_nodes = self._Nodes([node for node in self.nodes if node.is_supervisor_node()]) self.frontend_nodes = self._Nodes([node for node in self.nodes if node.is_frontend_node()]) @@ -125,3 +131,6 @@ def config_facts(self, *module_args, **complex_args): complex_args['host'] = node.hostname result[node.hostname] = node.config_facts(*module_args, **complex_args)['ansible_facts'] return result + + def reset(self): + self.__initialize_nodes() diff --git a/tests/common/devices/eos.py b/tests/common/devices/eos.py index 9e16bd077d4..327a99e646b 100644 --- a/tests/common/devices/eos.py +++ b/tests/common/devices/eos.py @@ -310,6 +310,9 @@ def get_auto_negotiation_mode(self, interface_name): autoneg_enabled = output['stdout'][0]['interfaceStatuses'][interface_name]['autoNegotiateActive'] return autoneg_enabled + def get_version(self): + return self.eos_command(commands=["show version"]) + def _reset_port_speed(self, interface_name): out = self.eos_config( lines=['default speed'], diff --git a/tests/common/devices/fanout.py b/tests/common/devices/fanout.py index 744385356d1..1c80636ae64 100644 --- a/tests/common/devices/fanout.py +++ b/tests/common/devices/fanout.py @@ -43,6 +43,8 @@ def __init__(self, ansible_adhoc, os, hostname, device_type, user, passwd, self.os = 'eos' self.host = EosHost(ansible_adhoc, hostname, user, passwd, shell_user=eos_shell_user, shell_passwd=eos_shell_passwd) + # Check eos fanout reachability by running show command + self.host.get_version() def __getattr__(self, module_name): return getattr(self.host, module_name) diff --git a/tests/common/devices/onyx.py b/tests/common/devices/onyx.py index aad53c02067..b3f467b72dc 100644 --- a/tests/common/devices/onyx.py +++ b/tests/common/devices/onyx.py @@ -92,11 +92,13 @@ def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs): if res["localhost"]["rc"] != 0: raise Exception("Unable to execute template\n{}".format(res["localhost"]["stdout"])) - def get_supported_speeds(self, interface_name): + def get_supported_speeds(self, interface_name, raw_data=False): """Get supported speeds for a given interface Args: interface_name (str): Interface name + raw_data (bool): when it is True , + return raw data, else return the data which has been handled Returns: list: A list of supported speed strings or None @@ -110,6 +112,8 @@ def get_supported_speeds(self, interface_name): out = show_int_result['stdout'][0].strip() logger.debug('Get supported speeds for port {} from onyx: {}'.format(interface_name, out)) + if raw_data: + return out.split(':')[-1].strip().split() if not out: return None @@ -185,6 +189,19 @@ def set_speed(self, interface_name, speed): speed = 'auto' else: speed = speed[:-3] + 'G' + # The speed support list for onyx is like '1G 10G 25G 40G 50Gx1 50Gx2 100Gx2 100Gx4 200Gx4'. + # We need to set the speed according to the speed support list. + # For example, when dut and fanout all support 50G, + # if support speed list of fanout just includes 50Gx1 not 50G, + # we need to set the speed with 50Gx1 instead of 50G, otherwise, the port can not be up. + all_support_speeds = self.get_supported_speeds(interface_name, raw_data=True) + for support_speed in all_support_speeds: + if speed in support_speed: + logger.info("Speed {} find the matched support speed:{} ".format(speed, support_speed)) + speed = support_speed + break + logger.info("set speed is {}".format(speed)) + if autoneg_mode or speed == 'auto': out = self.host.onyx_config( lines=['shutdown', 'speed {}'.format(speed), 'no shutdown'], diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index f6772572d2d..8214215723b 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -1333,6 +1333,23 @@ def check_intf_link_state(self, interface_name): intf_status = self.show_interface(command="status", interfaces=[interface_name])["ansible_facts"]['int_status'] return intf_status[interface_name]['oper_state'] == 'up' + def get_intf_link_local_ipv6_addr(self, intf): + """ + Get the link local ipv6 address of the interface + + Args: + intf: The SONiC interface name + + Returns: + The link local ipv6 address of the interface or empty string if not found + + Sample output: + fe80::2edd:e9ff:fefc:dd58 + """ + cmd = "ip addr show %s | grep inet6 | grep 'scope link' | awk '{print $2}' | cut -d '/' -f1" % intf + addr = self.shell(cmd)["stdout"] + return addr + def get_bgp_neighbor_info(self, neighbor_ip): """ @summary: return bgp neighbor info @@ -1762,6 +1779,41 @@ def get_vlan_intfs(self): return vlan_intfs + def get_vlan_brief(self): + """ + Get vlan brief + Sample output: + { + "Vlan1000": { + "interface_ipv4": [ "192.168.0.1/24" ], + "interface_ipv6": [ "fc02:1000::1/64" ], + "members": ["Ethernet0", "Ethernet1"] + }, + "Vlan2000": { + "interface_ipv4": [ "192.168.1.1/24" ], + "interface_ipv6": [ "fc02:1001::1/64" ], + "members": ["Ethernet3", "Ethernet4"] + } + } + """ + config = self.get_running_config_facts() + vlan_brief = {} + for vlan_name, members in config["VLAN_MEMBER"].items(): + vlan_brief[vlan_name] = { + "interface_ipv4": [], + "interface_ipv6": [], + "members": list(members.keys()) + } + for vlan_name, vlan_info in config["VLAN_INTERFACE"].items(): + if vlan_name not in vlan_brief: + continue + for prefix in vlan_info.keys(): + if '.' in prefix: + vlan_brief[vlan_name]["interface_ipv4"].append(prefix) + elif ':' in prefix: + vlan_brief[vlan_name]["interface_ipv6"].append(prefix) + return vlan_brief + def get_interfaces_status(self): ''' Get intnerfaces status by running 'show interfaces status' on the DUT, and parse the result into a dict. @@ -2146,6 +2198,32 @@ def ping_v4(self, ipv4, count=1, ns_arg=""): return False return True + def ping_v6(self, ipv6, count=1, ns_arg=""): + """ + Returns 'True' if ping to IP address works, else 'False' + Args: + IPv6 address + + Returns: + True or False + """ + try: + socket.inet_pton(socket.AF_INET6, ipv6) + except socket.error: + raise Exception("Invalid IPv6 address {}".format(ipv6)) + + netns_arg = "" + if ns_arg is not DEFAULT_NAMESPACE: + netns_arg = "sudo ip netns exec {} ".format(ns_arg) + + try: + self.shell("{}ping -6 -q -c{} {} > /dev/null".format( + netns_arg, count, ipv6 + )) + except RunAnsibleModuleFail: + return False + return True + def is_backend_portchannel(self, port_channel, mg_facts): ports = mg_facts["minigraph_portchannels"].get(port_channel) # minigraph facts does not have backend portchannel IFs @@ -2156,17 +2234,21 @@ def is_backend_portchannel(self, port_channel, mg_facts): def is_backend_port(self, port, mg_facts): return True if "Ethernet-BP" in port else False - def active_ip_interfaces(self, ip_ifs, tbinfo, ns_arg=DEFAULT_NAMESPACE, intf_num="all"): + def active_ip_interfaces(self, ip_ifs, tbinfo, ns_arg=DEFAULT_NAMESPACE, intf_num="all", ipv6_ifs=None): """ Return a dict of active IP (Ethernet or PortChannel) interfaces, with interface and peer IPv4 address. + If ipv6_ifs exists, also returns the interfaces' IPv6 address and its peer + IPv6 addresses if found for each interface. + Returns: - Dict of Interfaces and their IPv4 address + Dict of Interfaces and their IPv4 address (with IPv6 if ipv6_ifs exists) """ active_ip_intf_cnt = 0 mg_facts = self.get_extended_minigraph_facts(tbinfo, ns_arg) ip_ifaces = {} + for k, v in list(ip_ifs.items()): if ((k.startswith("Ethernet") and not is_inband_port(k)) or (k.startswith("PortChannel") and not @@ -2182,6 +2264,15 @@ def active_ip_interfaces(self, ip_ifs, tbinfo, ns_arg=DEFAULT_NAMESPACE, intf_nu } active_ip_intf_cnt += 1 + if ipv6_ifs: + ipv6_intf = ipv6_ifs[k] + if (ipv6_intf["peer_ipv6"] != "N/A" and self.ping_v6(ipv6_intf["peer_ipv6"], + count=3, ns_arg=ns_arg)): + ip_ifaces[k].update({ + "ipv6": ipv6_intf["ipv6"], + "peer_ipv6": ipv6_intf["peer_ipv6"] + }) + if isinstance(intf_num, int) and intf_num > 0 and active_ip_intf_cnt == intf_num: break @@ -2258,6 +2349,34 @@ def get_port_counters(self, in_json=True): res = self.shell(cli)['stdout'] return re.sub(r"Last cached time was.*\d+\n", "", res) + def add_acl_table(self, table_name, table_type, acl_stage=None, bind_ports=None, description=None): + """ + Add ACL table via 'config acl add table' command. + Command sample: + config acl add table TEST_TABLE L3 -s ingress -p Ethernet0,Ethernet4 -d "Test ACL table" + + Args: + table_name: name of new acl table + table_type: type of the acl table + acl_stage: acl stage, ingress or egress + bind_ports: ports bind to the acl table + description: description of the acl table + """ + cmd = "config acl add table {} {}".format(table_name, table_type) + + if acl_stage: + cmd += " -s {}".format(acl_stage) + + if bind_ports: + if isinstance(bind_ports, list): + bind_ports = ",".join(bind_ports) + cmd += " -p {}".format(bind_ports) + + if description: + cmd += " -d {}".format(description) + + self.command(cmd) + def remove_acl_table(self, acl_table): """ Remove acl table @@ -2410,6 +2529,17 @@ def get_sfp_type(self, portname): sfp_type = re.search(r'[QO]?SFP-?[\d\w]{0,3}', out["stdout_lines"][0]).group() return sfp_type + def get_counter_poll_status(self): + result_dict = {} + output = self.shell("counterpoll show")["stdout_lines"][2::] + for line in output: + counter_type, interval, status = re.split(r'\s\s+', line) + interval = int(re.search(r'\d+', interval).group(0)) + result_dict[counter_type] = {} + result_dict[counter_type]['interval'] = interval + result_dict[counter_type]['status'] = status + return result_dict + def assert_exit_non_zero(shell_output): if shell_output['rc'] != 0: diff --git a/tests/common/devices/sonic_asic.py b/tests/common/devices/sonic_asic.py index dabd5b835d8..68f6f2386f1 100644 --- a/tests/common/devices/sonic_asic.py +++ b/tests/common/devices/sonic_asic.py @@ -154,6 +154,19 @@ def show_ip_interface(self, *module_args, **complex_args): complex_args['namespace'] = self.namespace return self.sonichost.show_ip_interface(*module_args, **complex_args) + def show_ipv6_interface(self, *module_args, **complex_args): + """Wrapper for the ansible module 'show_ipv6_interface' + + Args: + module_args: other ansible module args passed from the caller + complex_args: other ansible keyword args + + Returns: + [dict]: [the output of show ipv6 interface status command] + """ + complex_args['namespace'] = self.namespace + return self.sonichost.show_ipv6_interface(*module_args, **complex_args) + def run_sonic_db_cli_cmd(self, sonic_db_cmd): cmd = "{} {}".format(self.sonic_db_cli, sonic_db_cmd) return self.sonichost.command(cmd, verbose=False) @@ -281,17 +294,23 @@ def is_backend_portchannel(self, port_channel): return False return True - def get_active_ip_interfaces(self, tbinfo, intf_num="all"): + def get_active_ip_interfaces(self, tbinfo, intf_num="all", include_ipv6=False): """ Return a dict of active IP (Ethernet or PortChannel) interfaces, with interface and peer IPv4 address. + If include_ipv6 is true, also returns IPv6 and its peer IPv6 addresses. + Returns: - Dict of Interfaces and their IPv4 address + Dict of Interfaces and their IPv4 address (with IPv6 if include_ipv6 option is true) """ + ipv6_ifs = None ip_ifs = self.show_ip_interface()["ansible_facts"]["ip_interfaces"] + if include_ipv6: + ipv6_ifs = self.show_ipv6_interface()["ansible_facts"]["ipv6_interfaces"] + return self.sonichost.active_ip_interfaces( - ip_ifs, tbinfo, self.namespace, intf_num=intf_num + ip_ifs, tbinfo, self.namespace, intf_num=intf_num, ipv6_ifs=ipv6_ifs ) def bgp_drop_rule(self, ip_version, state="present"): @@ -342,7 +361,8 @@ def remove_ssh_tunnel_sai_rpc(self): try: pid_list = self.sonichost.shell( - r'pgrep -f "ssh -o StrictHostKeyChecking=no -fN -L \*:{}"'.format(self.get_rpc_port_ssh_tunnel()) + r'pgrep -f "ssh -o StrictHostKeyChecking=no -o ServerAliveInterval=60 -fN -L \*:{}"'.format( + self.get_rpc_port_ssh_tunnel()) )["stdout_lines"] except RunAnsibleModuleFail: return @@ -375,7 +395,7 @@ def create_ssh_tunnel_sai_rpc(self): raise Exception("Invalid V4 address {}".format(ns_docker_if_ipv4)) self.sonichost.shell( - ("ssh -o StrictHostKeyChecking=no -fN" + ("ssh -o StrictHostKeyChecking=no -fN -o ServerAliveInterval=60" " -L *:{}:{}:{} localhost").format(self.get_rpc_port_ssh_tunnel(), ns_docker_if_ipv4, self._RPC_PORT_FOR_SSH_TUNNEL)) diff --git a/tests/common/dualtor/constants.py b/tests/common/dualtor/constants.py index 3738c252d2f..b44eb630e51 100644 --- a/tests/common/dualtor/constants.py +++ b/tests/common/dualtor/constants.py @@ -12,6 +12,6 @@ CLEAR_FLAP_COUNTER = "clear_flap_counter" RESET = "reset" -MUX_SIM_ALLOWED_DISRUPTION_SEC = 10 +MUX_SIM_ALLOWED_DISRUPTION_SEC = 30 CONFIG_RELOAD_ALLOWED_DISRUPTION_SEC = 120 PHYSICAL_CABLE_ALLOWED_DISRUPTION_SEC = 1 diff --git a/tests/common/dualtor/data_plane_utils.py b/tests/common/dualtor/data_plane_utils.py index bb7e1bcdefe..9787c1c8643 100644 --- a/tests/common/dualtor/data_plane_utils.py +++ b/tests/common/dualtor/data_plane_utils.py @@ -257,7 +257,7 @@ def send_t1_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo, cable_t arp_setup(ptfhost) def t1_to_server_io_test(activehost, tor_vlan_port=None, - delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.01, + delay=0, allowed_disruption=0, action=None, verify=False, send_interval=0.1, stop_after=None, allow_disruption_before_traffic=False): """ Helper method for `send_t1_to_server_with_action`. diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py index 9422ebcf2fd..9b04a9fd3cc 100644 --- a/tests/common/dualtor/dual_tor_utils.py +++ b/tests/common/dualtor/dual_tor_utils.py @@ -1073,6 +1073,8 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add tbinfo, downlink_ints): HASH_KEYS = ["src-port", "dst-port", "src-ip"] expect_packet_num = 1000 + expect_packet_num_high = expect_packet_num * (0.90) + expect_packet_num_low = expect_packet_num * (1.1) # expect this packet to be sent to downlinks (active mux) and uplink (stanby mux) expected_downlink_ports = [get_ptf_server_intf_index(rand_selected_dut, tbinfo, iface) for iface in downlink_ints] @@ -1098,7 +1100,7 @@ def check_nexthops_single_downlink(rand_selected_dut, ptfadapter, dst_server_add # packets should be either 0 or expect_packet_num: count = port_packet_count.get(downlink_int, 0) logging.info("Packets received on downlink port {}: {}".format(downlink_int, count)) - if count > 0 and count != expect_packet_num: + if count > 0 and count <= expect_packet_num_high and count >= expect_packet_num_low: pytest.fail("Packets not sent down single active port {}".format(downlink_int)) if len(downlink_ints) == 0: diff --git a/tests/common/fixtures/duthost_utils.py b/tests/common/fixtures/duthost_utils.py index fb4007d6441..09bfbe9e2bb 100644 --- a/tests/common/fixtures/duthost_utils.py +++ b/tests/common/fixtures/duthost_utils.py @@ -1,3 +1,6 @@ +from typing import Dict, List + +import paramiko import pytest import logging import itertools @@ -5,10 +8,14 @@ import ipaddress import time import json + +from paramiko.ssh_exception import AuthenticationException + +from tests.common import config_reload from tests.common.helpers.assertions import pytest_assert from tests.common.utilities import wait_until from jinja2 import Template -from netaddr import valid_ipv4 +from netaddr import valid_ipv4, valid_ipv6 logger = logging.getLogger(__name__) @@ -598,3 +605,147 @@ def check_bgp_router_id(duthost, mgFacts): return False except Exception as e: logger.error("Error loading BGP routerID - {}".format(e)) + + +@pytest.fixture(scope="module") +def convert_and_restore_config_db_to_ipv6_only(duthosts): + """Back up the existing config_db.json file and restore it once the test ends. + + Some cases will update the running config during the test and save the config + to be recovered after reboot. In such a case we need to backup config_db.json before + the test starts and then restore it after the test ends. + """ + config_db_file = "/etc/sonic/config_db.json" + config_db_bak_file = "/etc/sonic/config_db.json.before_ipv6_only" + + # Sample MGMT_INTERFACE: + # "MGMT_INTERFACE": { + # "eth0|192.168.0.2/24": { + # "forced_mgmt_routes": [ + # "192.168.1.1/24" + # ], + # "gwaddr": "192.168.0.1" + # }, + # "eth0|fc00:1234:5678:abcd::2/64": { + # "gwaddr": "fc00:1234:5678:abcd::1", + # "forced_mgmt_routes": [ + # "fc00:1234:5678:abc1::1/64" + # ] + # } + # } + + # duthost_name: config_db_modified + config_db_modified: Dict[str, bool] = {duthost.hostname: False + for duthost in duthosts.nodes} + # duthost_name: [ip_addr] + ipv4_address: Dict[str, List] = {duthost.hostname: [] + for duthost in duthosts.nodes} + ipv6_address: Dict[str, List] = {duthost.hostname: [] + for duthost in duthosts.nodes} + # Check IPv6 mgmt-ip is set and available, otherwise the DUT will lose control after v4 mgmt-ip is removed + for duthost in duthosts.nodes: + mgmt_interface = json.loads(duthost.shell(f"jq '.MGMT_INTERFACE' {config_db_file}", + module_ignore_errors=True)["stdout"]) + # Use list() to make a copy of mgmt_interface.keys() to avoid + # "RuntimeError: dictionary changed size during iteration" error + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + has_available_ipv6_addr = False + for key in list(mgmt_interface): + ip_addr = key.split("|")[1] + ip_addr_without_mask = ip_addr.split('/')[0] + if ip_addr: + is_ipv6 = valid_ipv6(ip_addr_without_mask) + if is_ipv6: + logger.info(f"Host[{duthost.hostname}] IPv6[{ip_addr}]") + ipv6_address[duthost.hostname].append(ip_addr_without_mask) + try: + ssh_client.connect(ip_addr_without_mask, + username="WRONG_USER", password="WRONG_PWD", timeout=15) + except AuthenticationException: + logger.info(f"Host[{duthost.hostname}] IPv6[{ip_addr_without_mask}] mgmt-ip is available") + has_available_ipv6_addr = has_available_ipv6_addr or True + except BaseException: + pass + finally: + ssh_client.close() + + pytest_assert(len(ipv6_address[duthost.hostname]) > 0, + f"{duthost.hostname} doesn't have IPv6 Management IP address") + pytest_assert(has_available_ipv6_addr, + f"{duthost.hostname} doesn't have available IPv6 Management IP address") + + # Remove IPv4 mgmt-ip + for duthost in duthosts.nodes: + logger.info(f"Backup {config_db_file} to {config_db_bak_file} on {duthost.hostname}") + duthost.shell(f"cp {config_db_file} {config_db_bak_file}") + mgmt_interface = json.loads(duthost.shell(f"jq '.MGMT_INTERFACE' {config_db_file}", + module_ignore_errors=True)["stdout"]) + + # Use list() to make a copy of mgmt_interface.keys() to avoid + # "RuntimeError: dictionary changed size during iteration" error + for key in list(mgmt_interface): + ip_addr = key.split("|")[1] + ip_addr_without_mask = ip_addr.split('/')[0] + if ip_addr: + is_ipv4 = valid_ipv4(ip_addr_without_mask) + if is_ipv4: + ipv4_address[duthost.hostname].append(ip_addr_without_mask) + logger.info(f"Removing host[{duthost.hostname}] IPv4[{ip_addr}]") + duthost.shell(f"""jq 'del(."MGMT_INTERFACE"."{key}")' {config_db_file} > temp.json""" + f"""&& mv temp.json {config_db_file}""", module_ignore_errors=True) + config_db_modified[duthost.hostname] = True + config_reload(duthost, wait=120) + duthosts.reset() + + # Verify mgmt-interface status + mgmt_intf_name = "eth0" + for duthost in duthosts.nodes: + logger.info(f"Checking host[{duthost.hostname}] mgmt interface[{mgmt_intf_name}]") + mgmt_intf_ifconfig = duthost.shell(f"ifconfig {mgmt_intf_name}", module_ignore_errors=True)["stdout"] + assert_addr_in_ifconfig(addr_set=ipv4_address, hostname=duthost.hostname, + expect_exists=False, ifconfig_output=mgmt_intf_ifconfig) + assert_addr_in_ifconfig(addr_set=ipv6_address, hostname=duthost.hostname, + expect_exists=True, ifconfig_output=mgmt_intf_ifconfig) + + yield + + # Recover IPv4 mgmt-ip + for duthost in duthosts.nodes: + if config_db_modified[duthost.hostname]: + logger.info(f"Restore {config_db_file} with {config_db_bak_file} on {duthost.hostname}") + duthost.shell(f"mv {config_db_bak_file} {config_db_file}") + config_reload(duthost, safe_reload=True) + duthosts.reset() + + # Verify mgmt-interface status + for duthost in duthosts.nodes: + logger.info(f"Checking host[{duthost.hostname}] mgmt interface[{mgmt_intf_name}]") + mgmt_intf_ifconfig = duthost.shell(f"ifconfig {mgmt_intf_name}", module_ignore_errors=True)["stdout"] + assert_addr_in_ifconfig(addr_set=ipv4_address, hostname=duthost.hostname, + expect_exists=True, ifconfig_output=mgmt_intf_ifconfig) + assert_addr_in_ifconfig(addr_set=ipv6_address, hostname=duthost.hostname, + expect_exists=True, ifconfig_output=mgmt_intf_ifconfig) + + +def assert_addr_in_ifconfig(addr_set: Dict[str, List], hostname: str, expect_exists: bool, ifconfig_output: str): + """ + Assert the address status in the ifconfig output, + if status not as expected, assert as failure + + @param addr_set: addr_set, key is dut hostname, value is the list of ip addresses + @param hostname: hostname + @param expect_exists: Expectation of the ip, + True means expect all ip addresses in addr_set appears in the output of ifconfig + False means expect no ip addresses in addr_set appears in the output of ifconfig + @param ifconfig_output: output of 'ifconfig' + """ + for addr in addr_set[hostname]: + if expect_exists: + pytest_assert(addr in ifconfig_output, + f"{addr} not appeared in {hostname} mgmt interface") + logger.info(f"{addr} exists in the output of ifconfig") + else: + pytest_assert(addr not in ifconfig_output, + f"{hostname} mgmt interface still with addr {addr}") + logger.info(f"{addr} not exists in the output of ifconfig which is expected") diff --git a/tests/common/fixtures/ptfhost_utils.py b/tests/common/fixtures/ptfhost_utils.py index 7d4cc70448e..e33045446f2 100644 --- a/tests/common/fixtures/ptfhost_utils.py +++ b/tests/common/fixtures/ptfhost_utils.py @@ -33,7 +33,7 @@ GARP_SERVICE_PY = 'garp_service.py' GARP_SERVICE_CONF_TEMPL = 'garp_service.conf.j2' PTF_TEST_PORT_MAP = '/root/ptf_test_port_map.json' -PROBER_INTERVAL_MS = 1000 +PROBER_INTERVAL_MS = 3000 @pytest.fixture(scope="session", autouse=True) diff --git a/tests/common/helpers/assertions.py b/tests/common/helpers/assertions.py index f524eca7c29..93a3dfc79a7 100644 --- a/tests/common/helpers/assertions.py +++ b/tests/common/helpers/assertions.py @@ -4,6 +4,8 @@ def pytest_assert(condition, message=None): __tracebackhide__ = True if not condition: + if not isinstance(message, str): + message = str(message) pytest.fail(message) diff --git a/tests/common/helpers/gnmi_utils.py b/tests/common/helpers/gnmi_utils.py index 925c5a52153..9c812b86c27 100644 --- a/tests/common/helpers/gnmi_utils.py +++ b/tests/common/helpers/gnmi_utils.py @@ -52,7 +52,7 @@ def generate_telemetry_config(self, duthost): self.gnmi_config_table = "TELEMETRY" self.gnmi_container = "telemetry" # GNMI program is telemetry or gnmi-native - res = duthost.shell("docker exec gnmi supervisorctl status", module_ignore_errors=True) + res = duthost.shell("docker exec telemetry supervisorctl status", module_ignore_errors=True) if 'telemetry' in res['stdout']: self.gnmi_program = "telemetry" else: diff --git a/tests/common/helpers/platform_api/sfp.py b/tests/common/helpers/platform_api/sfp.py index ddd1ceb62cf..23d5ba836f9 100644 --- a/tests/common/helpers/platform_api/sfp.py +++ b/tests/common/helpers/platform_api/sfp.py @@ -66,6 +66,10 @@ def get_transceiver_info(conn, index): return sfp_api(conn, index, 'get_transceiver_info') +def get_transceiver_info_firmware_versions(conn, index): + return sfp_api(conn, index, 'get_transceiver_info_firmware_versions') + + def get_transceiver_bulk_status(conn, index): return sfp_api(conn, index, 'get_transceiver_bulk_status') diff --git a/tests/common/helpers/tcpdump_sniff_helper.py b/tests/common/helpers/tcpdump_sniff_helper.py new file mode 100644 index 00000000000..365084ff57c --- /dev/null +++ b/tests/common/helpers/tcpdump_sniff_helper.py @@ -0,0 +1,163 @@ +import time +import logging +import scapy.all as scapyall + + +class TcpdumpSniffHelper(object): + + def __init__(self, ptfadapter, duthost, ptfhost, pcap_path="/tmp/capture.pcap"): + self.ptfadpater = ptfadapter + self.duthost = duthost + self.ptfhost = ptfhost + self._pcap_path = pcap_path + self._tcpdump_filter = 'ip or ip6' + self._out_direct_ifaces = [] + self._in_direct_ifaces = [] + self._bi_direct_ifaces = [] + self._total_ifaces = [] + + @property + def tcpdump_filter(self): + return self._tcpdump_filter + + @tcpdump_filter.setter + def tcpdump_filter(self, value): + self._tcpdump_filter = value + + @property + def out_direct_ifaces(self): + return self._out_direct_ifaces + + @out_direct_ifaces.setter + def out_direct_ifaces(self, value): + self._out_direct_ifaces = list(set(value)) + + @property + def in_direct_ifaces(self): + return self._in_direct_ifaces + + @in_direct_ifaces.setter + def in_direct_ifaces(self, value): + self._in_direct_ifaces = list(set(value)) + + @property + def bi_direct_ifaces(self): + return self._bi_direct_ifaces + + @bi_direct_ifaces.setter + def bi_direct_ifaces(self, value): + self._bi_direct_ifaces = list(set(value)) + + @property + def pcap_path(self): + return self._pcap_path + + def update_total_ifaces(self): + self._total_ifaces = list(set(self._out_direct_ifaces + self._in_direct_ifaces + self._bi_direct_ifaces)) + + def start_dump_process(self, host, iface, direction="inout"): + """ + Start tcpdump on specific interface and save data to pcap file + """ + iface_pcap_path = '{}_{}'.format(self.pcap_path, iface) + if host is self.ptfhost: + iface = 'eth' + str(iface) + cmd = "tcpdump -i {} '{}' -w {} --immediate-mode --direction {} -U".format(iface, self._tcpdump_filter, + iface_pcap_path, direction) + logging.info('Tcpdump sniffer starting on iface: {} direction: {}'.format(iface, direction)) + if host is self.duthost: + cmd = "sudo " + cmd + host.shell(self.run_background_cmd(cmd)) + + def run_background_cmd(self, command): + return "nohup " + command + " &" + + def start_sniffer(self, host='ptf'): + """ + Start tcpdump sniffer + """ + host = self.ptfhost if host == 'ptf' else self.duthost + logging.info("Tcpdump sniffer starting") + for iface in self.out_direct_ifaces: + self.start_dump_process(host, iface, "out") + for iface in self.in_direct_ifaces: + self.start_dump_process(host, iface, "in") + for iface in self.bi_direct_ifaces: + self.start_dump_process(host, iface) + + def stop_sniffer(self, host='ptf'): + """ + Stop tcpdump sniffer + """ + cmd = "killall -s SIGINT tcpdump" + host = self.ptfhost if host == 'ptf' else self.duthost + time.sleep(2) # Wait for switch and server tcpdump packet processing + logging.info("Tcpdump sniffer stopping") + logging.info("Killed all tcpdump processes by SIGINT") + if host is self.duthost: + host.shell('sudo ' + cmd) + self.copy_pcaps_to_ptf() + else: + host.shell(cmd) + self.create_single_pcap() + logging.info("Copy {} from ptf docker to ngts docker".format(self.pcap_path)) + self.ptfhost.shell("chmod 777 {}".format(self.pcap_path)) + logging.info("Copy file {} from ptf docker to ngts docker".format(self.pcap_path)) + self.ptfhost.fetch(src=self.pcap_path, dest=self.pcap_path, flat=True) + + def copy_pcaps_to_ptf(self): + self.update_total_ifaces() + for iface in self._total_ifaces: + iface_pcap_path = '{}_{}'.format(self.pcap_path, iface) + logging.info("Copy {} from switch to ptf docker to do further operation".format(iface_pcap_path)) + self.duthost.fetch(src=iface_pcap_path, dest=iface_pcap_path, flat=True) + self.ptfhost.copy(src=iface_pcap_path, dest=iface_pcap_path) + logging.info("Remove {} at DUT".format(iface_pcap_path)) + self.duthost.shell("rm -f {}".format(iface_pcap_path)) + + def sniffer_result(self): + capture_packets = scapyall.rdpcap(self.pcap_path) + logging.info("Number of all packets captured: {}".format(len(capture_packets))) + return capture_packets + + def create_single_pcap(self): + """ + Merge all pcaps from each interface into single pcap file + """ + pcapng_full_capture = self.merge_pcaps() + self.convert_pcapng_to_pcap(pcapng_full_capture) + logging.info('Pcap files merged into single pcap file: {}'.format(self.pcap_path)) + + def convert_pcapng_to_pcap(self, pcapng_full_capture): + """ + Convert pcapng file into pcap. We can't just merge all in pcap, + mergecap can merge multiple files only into pcapng format + """ + cmd = "mergecap -F pcap -w {} {}".format(self.pcap_path, pcapng_full_capture) + logging.info('Converting pcapng file into pcap file') + self.ptfhost.shell(cmd) + logging.info('Pcapng file converted into pcap file') + self.ptfhost.shell("rm -f {}".format(pcapng_full_capture)) + + def merge_pcaps(self): + """ + Merge all pcaps into one, format: pcapng + """ + pcapng_full_capture = '{}.pcapng'.format(self.pcap_path) + cmd = "mergecap -w {}".format(pcapng_full_capture) + ifaces_pcap_files_list = [] + self.update_total_ifaces() + for iface in self._total_ifaces: + pcap_file_path = '{}_{}'.format(self.pcap_path, iface) + res = self.ptfhost.shell("ls -l {}".format(pcap_file_path), module_ignore_errors=True) + if res["rc"] == 0: + cmd += ' ' + (pcap_file_path) + ifaces_pcap_files_list.append(pcap_file_path) + + logging.info('Starting merge pcap files') + self.ptfhost.shell(cmd) + logging.info('Pcap files merged into tmp pcapng file') + for pcap_file in ifaces_pcap_files_list: + self.ptfhost.shell("rm -f {}".format(pcap_file)) + + return pcapng_full_capture diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index da3a7bac1c6..5c4356dd383 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -86,6 +86,12 @@ bfd/test_bfd.py::test_bfd_scale: conditions: - "platform in ['x86_64-8102_64h_o-r0', 'x86_64-8101_32fh_o-r0']" +bfd/test_bfd_static_route.py: + skip: + reason: "Only supported on chassis system & cisco platform." + conditions: + - "is_multi_asic is False" + - "asic_type in ['cisco-8000']" ####################################### ##### bgp ##### @@ -201,19 +207,29 @@ copp/test_copp.py: skip: reason: "Topology not supported by COPP tests" conditions: - - "(topo_name not in ['ptf32', 'ptf64', 't0', 't0-64', 't0-52', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 'm0', 'mx'] and 't2' not in topo)" + - "(topo_name not in ['ptf32', 'ptf64', 't0', 't0-64', 't0-52', 't0-116', 't1', 't1-lag', 't1-64-lag', 't1-56-lag', 't1-backend', 'm0', 'mx'] and 't2' not in topo_type)" copp/test_copp.py::TestCOPP::test_add_new_trap: skip: reason: "Copp test_add_new_trap is not yet supported on multi-asic platform" conditions: - "is_multi_asic==True" + xfail: + reason: "Can't unisntall trap on broadcom platform successfully" + conditions: + - "asic_type in ['broadcom']" + - "release in ['202305']" copp/test_copp.py::TestCOPP::test_remove_trap: skip: reason: "Copp test_remove_trap is not yet supported on multi-asic platform" conditions: - "is_multi_asic==True" + xfail: + reason: "Can't unisntall trap on broadcom platform successfully" + conditions: + - "asic_type in ['broadcom']" + - "release in ['202305']" copp/test_copp.py::TestCOPP::test_trap_config_save_after_reboot: skip: @@ -296,6 +312,20 @@ dhcp_relay/test_dhcpv6_relay.py: conditions: - "platform in ['x86_64-8111_32eh_o-r0']" +dhcp_relay/test_dhcpv6_relay.py::test_dhcp_relay_default: + skip: + reason: "Test is not supported in dualtor-aa" + conditions: + - https://github.com/sonic-net/sonic-mgmt/issues/12045 + - "'dualtor-aa' in topo_name" + +dhcp_relay/test_dhcpv6_relay.py::test_dhcpv6_relay_counter: + skip: + reason: "Test is not supported in dualtor-aa" + conditions: + - https://github.com/sonic-net/sonic-mgmt/issues/12045 + - "'dualtor-aa' in topo_name" + ####################################### ##### drop_packets ##### ####################################### @@ -610,9 +640,11 @@ generic_config_updater: generic_config_updater/test_dhcp_relay.py: skip: - reason: "Need to skip for platform x86_64-8111_32eh_o-r0" + reason: "Need to skip for platform x86_64-8111_32eh_o-r0 or backend topology" + conditions_logical_operator: "OR" conditions: - "platform in ['x86_64-8111_32eh_o-r0']" + - "'backend' in topo_name" generic_config_updater/test_ecn_config_update.py::test_ecn_config_updates: skip: @@ -684,11 +716,11 @@ http/test_http_copy.py: ####################################### iface_loopback_action/test_iface_loopback_action.py: skip: - reason: "Test not supported on Broadcom SKUs or dualtor topology." + reason: "Test only supported on Mellanox SKUs, didn't supported on dualtor topology." conditions_logical_operator: or conditions: - "'dualtor' in topo_name" - - "asic_type in ['broadcom']" + - "asic_type not in ['mellanox']" ####################################### ##### iface_namingmode ##### @@ -751,6 +783,12 @@ ip/test_ip_packet.py::TestIPPacket::test_forward_ip_packet_with_0xffff_chksum_to conditions: - "asic_type in ['mellanox'] or asic_subtype in ['broadcom-dnx']" +ip/test_mgmt_ipv6_only.py: + skip: + reason: "Skipping mgmt ipv6 test for mgmt topo" + conditions: + - "topo_type in ['m0', 'mx']" + ####################################### ##### ipfwd ##### ####################################### @@ -1050,6 +1088,12 @@ qos/test_qos_sai.py::TestQosSai::testIPIPQosSaiDscpToPgMapping: conditions: - "asic_type in ['mellanox']" +qos/test_qos_sai.py::TestQosSai::testPfcStormWithSharedHeadroomOccupancy: + skip: + reason: "This test is only for Mellanox." + conditions: + - "asic_type in ['cisco-8000']" + qos/test_qos_sai.py::TestQosSai::testQosSaiBufferPoolWatermark: skip: reason: "sai_thrift_read_buffer_pool_watermark are not supported on DNX" @@ -1090,13 +1134,15 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolSize: skip: reason: "Headroom pool size not supported." conditions: - - "hwsku not in ['Arista-7060CX-32S-C32', 'Celestica-DX010-C32', 'Arista-7260CX3-D108C8', 'Force10-S6100', 'Arista-7260CX3-Q64', 'Arista-7050CX3-32S-C32', 'Arista-7050CX3-32S-D48C8'] and asic_type not in ['mellanox']" + - "hwsku not in ['Arista-7060CX-32S-C32', 'Celestica-DX010-C32', 'Arista-7260CX3-D108C8', 'Force10-S6100', 'Arista-7260CX3-Q64', 'Arista-7050CX3-32S-C32', 'Arista-7050CX3-32S-D48C8', 'Arista-7060CX-32S-D48C8'] and asic_type not in ['mellanox']" + - "asic_type in ['cisco-8000']" qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolWatermark: skip: reason: "sai_thrift_read_buffer_pool_watermark are not supported on DNX" conditions: - "platform in ['x86_64-nokia_ixr7250e_36x400g-r0', 'x86_64-arista_7800r3_48cq2_lc', 'x86_64-arista_7800r3_48cqm2_lc', 'x86_64-arista_7800r3a_36d2_lc', 'x86_64-arista_7800r3a_36dm2_lc', 'x86_64-arista_7800r3ak_36dm2_lc'] or asic_type in ['mellanox']" + - "asic_type in ['cisco-8000']" xfail: reason: "Headroom pool size not supported." conditions: @@ -1235,6 +1281,15 @@ show_techsupport/test_techsupport.py::test_techsupport: - https://github.com/sonic-net/sonic-mgmt/issues/7520 - "asic_type not in ['mellanox']" +####################################### +##### snappi_tests ##### +####################################### +snappi_tests/ecn/test_red_accuracy_with_snappi: + skip: + reason: "Test should not be run as part of nightly." + conditions: + - "topo_type in ['tgen']" + ####################################### ##### snmp ##### ####################################### @@ -1375,14 +1430,18 @@ telemetry/test_telemetry.py: conditions: - "(is_multi_asic==True) and (release in ['201811', '201911'])" +telemetry/test_telemetry.py::test_osbuild_version: + skip: + reason: "Testcase ignored due to Github issue: https://github.com/sonic-net/sonic-mgmt/issues/12021" + conditions: + - https://github.com/sonic-net/sonic-mgmt/issues/12021 + ####################################### ##### pktgen ##### ####################################### test_pktgen.py: - skip: - reason: "Currently only supported in cisco-8000" - conditions: - - "asic_type not in ['cisco-8000']" + skip: + reason: "have known issue, skip for now" ####################################### ##### vlan ##### diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml index 42ee32efecf..1c598973415 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions_platform_tests.yaml @@ -36,10 +36,11 @@ platform_tests/api/test_chassis.py::TestChassisApi::test_get_presence: platform_tests/api/test_chassis.py::TestChassisApi::test_get_revision: xfail: - reason: "Testcase consistently fails, raised issue to track" + reason: "[DX010] Testcase consistently fails, raised issue to track; [E1031] API 'get_revision' not implemented" + conditions_logical_operator: "OR" conditions: - - "hwsku in ['Celestica-DX010-C32']" - - https://github.com/sonic-net/sonic-mgmt/issues/6512 + - "hwsku in ['Celestica-DX010-C32'] and https://github.com/sonic-net/sonic-mgmt/issues/6512" + - "platform in ['x86_64-cel_e1031-r0'] and https://github.com/sonic-net/sonic-buildimage/issues/18229" platform_tests/api/test_chassis.py::TestChassisApi::test_get_status: # Skip unsupported API test on Mellanox platform @@ -318,8 +319,10 @@ platform_tests/api/test_module.py::TestModuleApi::test_reboot: platform_tests/api/test_psu.py::TestPsuApi::test_fans: skip: reason: "Unsupported platform API" + conditions_logical_operator: "OR" conditions: - "asic_type in ['mellanox']" + - "platform in ['x86_64-cel_e1031-r0'] and https://github.com/sonic-net/sonic-buildimage/issues/18229" platform_tests/api/test_psu.py::TestPsuApi::test_get_model: skip: @@ -329,10 +332,11 @@ platform_tests/api/test_psu.py::TestPsuApi::test_get_model: platform_tests/api/test_psu.py::TestPsuApi::test_get_revision: xfail: - reason: "case failed and waiting for fix" + reason: "[DX010] case failed and waiting for fix; [E1031] API 'get_revision' not implemented" + conditions_logical_operator: "OR" conditions: - - "hwsku in ['Celestica-DX010-C32']" - - https://github.com/sonic-net/sonic-mgmt/issues/6767 + - "hwsku in ['Celestica-DX010-C32'] and https://github.com/sonic-net/sonic-mgmt/issues/6767" + - "platform in ['x86_64-cel_e1031-r0'] and https://github.com/sonic-net/sonic-buildimage/issues/18229" platform_tests/api/test_psu.py::TestPsuApi::test_get_serial: skip: @@ -412,6 +416,13 @@ platform_tests/api/test_psu_fans.py::TestPsuFans::test_set_fans_speed: ####################################### ##### api/test_sfp.py ##### ####################################### +platform_tests/api/test_sfp.py::TestSfpApi::test_get_error_description: + xfail: + reason: "Platform API 'get_error_description' not implemented" + conditions: + - "platform in ['x86_64-cel_e1031-r0']" + - https://github.com/sonic-net/sonic-buildimage/issues/18229 + platform_tests/api/test_sfp.py::TestSfpApi::test_get_model: skip: reason: "Unsupported platform API" @@ -751,6 +762,15 @@ platform_tests/test_advanced_reboot.py::test_fast_reboot_from_other_vendor: conditions: - https://github.com/sonic-net/sonic-mgmt/issues/11007 +####################################### +#### test_kdump.py ##### +####################################### +platform_tests/test_kdump.py: + skip: + reason: "Not supported on Nokia-7215 platform" + conditions: + - "platform in ['armhf-nokia_ixs7215_52x-r0']" + ####################################### #### test_memory_exhaustion.py ##### ####################################### @@ -893,3 +913,22 @@ platform_tests/test_sensors.py::test_sensors: platform_tests/test_sequential_restart.py::test_restart_syncd: skip: reason: "Restarting syncd is not supported yet" + +####################################### +##### test_service_warm_restart.py #### +####################################### +platform_tests/test_service_warm_restart.py: + skip: + reason: "Skip test_service_warm_restart on mellanox platform" + conditions: + - "asic_type in ['mellanox']" + +####################################### +##### test_xcvr_info_in_db.py ##### +####################################### +platform_tests/test_xcvr_info_in_db.py::test_xcvr_info_in_db: + xfail: + reason: "CLI Utility sfpshow not working correctly" + conditions: + - "platform in ['x86_64-cel_e1031-r0']" + - https://github.com/sonic-net/sonic-buildimage/issues/18231 diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py index 5528f405b1a..46dced8cd59 100644 --- a/tests/common/plugins/sanity_check/checks.py +++ b/tests/common/plugins/sanity_check/checks.py @@ -64,6 +64,25 @@ def _find_down_ip_ports(dut, ip_interfaces): return down_ip_ports +def _parse_bfd_output(output): + data_rows = output[3:] + data_dict = {} + for data in data_rows: + data = data.split() + data_dict[data[0]] = {} + data_dict[data[0]]['Interface'] = data[1] + data_dict[data[0]]['Vrf'] = data[2] + data_dict[data[0]]['State'] = data[3] + data_dict[data[0]]['Type'] = data[4] + data_dict[data[0]]['Local Addr'] = data[5] + data_dict[data[0]]['TX Interval'] = data[6] + data_dict[data[0]]['RX Interval'] = data[7] + data_dict[data[0]]['Multiplier'] = data[8] + data_dict[data[0]]['Multihop'] = data[9] + data_dict[data[0]]['Local Discriminator'] = data[10] + return data_dict + + def _find_down_ports(dut, phy_interfaces, ip_interfaces): """Finds the ports which are operationally down diff --git a/tests/common/snappi_tests/common_helpers.py b/tests/common/snappi_tests/common_helpers.py index ad62494d342..e2454656157 100644 --- a/tests/common/snappi_tests/common_helpers.py +++ b/tests/common/snappi_tests/common_helpers.py @@ -16,7 +16,6 @@ import re from netaddr import IPNetwork from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice -from tests.common.broadcom_data import is_broadcom_device as isBroadcomDevice from ipaddress import IPv6Network, IPv6Address from random import getrandbits @@ -486,8 +485,15 @@ def enable_ecn(host_ans, prio, asic_value=None): """ if asic_value is None: host_ans.shell('sudo ecnconfig -q {} on'.format(prio)) + results = host_ans.shell('ecnconfig -q {}'.format(prio)) + if re.search("queue {}: on".format(prio), results['stdout']): + return True else: host_ans.shell('sudo ip netns exec {} ecnconfig -q {} on'.format(asic_value, prio)) + results = host_ans.shell('sudo ip netns exec {} ecnconfig {}'.format(asic_value, prio)) + if re.search("queue {}: on".format(prio), results['stdout']): + return True + return False def disable_ecn(host_ans, prio, asic_value=None): @@ -746,7 +752,7 @@ def disable_packet_aging(duthost, asic_value=None): duthost.command("docker cp /tmp/packets_aging.py syncd:/") duthost.command("docker exec syncd python /packets_aging.py disable") duthost.command("docker exec syncd rm -rf /packets_aging.py") - elif isBroadcomDevice(duthost): + elif "platform_asic" in duthost.facts and duthost.facts["platform_asic"] == "broadcom-dnx": try: duthost.shell('bcmcmd -n {} "BCMSAI credit-watchdog disable"'.format(asic_value)) except Exception: @@ -767,7 +773,7 @@ def enable_packet_aging(duthost, asic_value=None): duthost.command("docker cp /tmp/packets_aging.py syncd:/") duthost.command("docker exec syncd python /packets_aging.py enable") duthost.command("docker exec syncd rm -rf /packets_aging.py") - elif isBroadcomDevice(duthost): + elif "platform_asic" in duthost.facts and duthost.facts["platform_asic"] == "broadcom-dnx": try: duthost.shell('bcmcmd -n {} "BCMSAI credit-watchdog enable"'.format(asic_value)) except Exception: @@ -885,8 +891,14 @@ def get_egress_queue_count(duthost, port, priority): tuple (int, int): total count of packets and bytes in the queue """ raw_out = duthost.shell("show queue counters {} | sed -n '/UC{}/p'".format(port, priority))['stdout'] - total_pkts = "0" if raw_out.split()[2] == "N/A" else raw_out.split()[2] - total_bytes = "0" if raw_out.split()[3] == "N/A" else raw_out.split()[3] + total_pkts = raw_out.split()[2] if 2 < len(raw_out.split()) else "0" + if total_pkts == "N/A": + total_pkts = "0" + + total_bytes = raw_out.split()[3] if 3 < len(raw_out.split()) else "0" + if total_bytes == "N/A": + total_bytes = "0" + return int(total_pkts.replace(',', '')), int(total_bytes.replace(',', '')) diff --git a/tests/common/utilities.py b/tests/common/utilities.py index bc04b7ce3d4..50f1590dcde 100644 --- a/tests/common/utilities.py +++ b/tests/common/utilities.py @@ -17,8 +17,10 @@ import traceback import copy import tempfile +import uuid from io import StringIO from ast import literal_eval +from scapy.all import sniff as scapy_sniff import pytest from ansible.parsing.dataloader import DataLoader @@ -1071,3 +1073,39 @@ def get_egress_queue_pkt_count_all_prio(duthost, port): queue_stats.append(int(total_pkts_str.replace(',', ''))) return queue_stats + + +@contextlib.contextmanager +def capture_and_check_packet_on_dut( + duthost, + interface='any', + pkts_filter='', + pkts_validator=lambda pkts: pytest_assert(len(pkts) > 0, "No packets captured"), + wait_time=1 +): + """ + Capture packets on DUT and check if the packet is expected + Parameters: + duthost: the DUT to perform the packet capture + interface: the interface to capture packets on, default is 'any' + pkts_filter: the PCAP-FILTER to apply to the captured packets, default is '' means no filter + pkts_validator: the function to validate the captured packets, default is to check if any packet is captured + """ + pcap_save_path = "/tmp/func_capture_and_check_packet_on_dut_%s.pcap" % (str(uuid.uuid4())) + cmd_capture_pkts = "sudo nohup tcpdump --immediate-mode -U -i %s -w %s >/dev/null 2>&1 %s & echo $!" \ + % (interface, pcap_save_path, pkts_filter) + tcpdump_pid = duthost.shell(cmd_capture_pkts)["stdout"] + cmd_check_if_process_running = "ps -p %s | grep %s |grep -v grep | wc -l" % (tcpdump_pid, tcpdump_pid) + pytest_assert(duthost.shell(cmd_check_if_process_running)["stdout"] == "1", + "Failed to start tcpdump on DUT") + logging.info("Start to capture packet on DUT, tcpdump pid: %s, pcap save path: %s, with command: %s" + % (tcpdump_pid, pcap_save_path, cmd_capture_pkts)) + try: + yield + time.sleep(wait_time) + duthost.shell("kill -s 2 %s" % tcpdump_pid) + with tempfile.NamedTemporaryFile() as temp_pcap: + duthost.fetch(src=pcap_save_path, dest=temp_pcap.name, flat=True) + pkts_validator(scapy_sniff(offline=temp_pcap.name)) + finally: + duthost.file(path=pcap_save_path, state="absent") diff --git a/tests/conftest.py b/tests/conftest.py index 2fd30161b5f..4e7424f1364 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2274,6 +2274,16 @@ def cleanup(self): on_exit.cleanup() +@pytest.fixture(scope="session", autouse=True) +def add_mgmt_test_mark(duthosts): + ''' + @summary: Create mark file at /etc/sonic/mgmt_test_mark, and DUT can use this mark to detect mgmt test. + @param duthosts: fixture to get DUT hosts + ''' + mark_file = "/etc/sonic/mgmt_test_mark" + duthosts.shell("touch %s" % mark_file, module_ignore_errors=True) + + def verify_packets_any_fixed(test, pkt, ports=[], device_number=0, timeout=None): """ Check that a packet is received on _any_ of the specified ports belonging to diff --git a/tests/container_checker/test_container_checker.py b/tests/container_checker/test_container_checker.py index 919c04f41ac..267baaa6d34 100644 --- a/tests/container_checker/test_container_checker.py +++ b/tests/container_checker/test_container_checker.py @@ -11,6 +11,7 @@ from tests.common.helpers.assertions import pytest_assert from tests.common.helpers.assertions import pytest_require from tests.common.helpers.dut_utils import check_container_state +from tests.common.helpers.dut_utils import is_container_running from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer from tests.common.utilities import wait_until from tests.common.helpers.dut_utils import get_disabled_container_list @@ -180,8 +181,8 @@ def get_expected_alerting_message(container_name): return expected_alerting_messages -def test_container_checker(duthosts, enum_rand_one_per_hwsku_hostname, - enum_rand_one_asic_index, enum_dut_feature, tbinfo): +def test_container_checker(duthosts, enum_rand_one_per_hwsku_hostname, enum_rand_one_asic_index, enum_dut_feature, + tbinfo, disable_container_autorestart): """Tests the feature of container checker. This function will check whether the container names will appear in the Monit @@ -203,7 +204,7 @@ def test_container_checker(duthosts, enum_rand_one_per_hwsku_hostname, container_name = asic.get_docker_name(service_name) loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="container_checker_{}".format(container_name)) - + sleep_time = 70 disabled_containers = get_disabled_container_list(duthost) skip_containers = disabled_containers[:] @@ -211,10 +212,18 @@ def test_container_checker(duthosts, enum_rand_one_per_hwsku_hostname, # Skip 'radv' container on devices whose role is not T0/M0. if tbinfo["topo"]["type"] not in ["t0", "m0"]: skip_containers.append("radv") - pytest_require(service_name not in skip_containers, "Container '{}' is skipped for testing.".format(container_name)) - + feature_autorestart_states = duthost.get_container_autorestart_states() + if feature_autorestart_states.get(service_name) == 'enabled': + disable_container_autorestart(duthost) + time.sleep(30) + if not is_container_running(duthost, container_name): + logger.info("Container '{}' is not running ...".format(container_name)) + logger.info("Reload config on DuT as Container is not up '{}' ...".format(duthost.hostname)) + config_reload(duthost, safe_reload=True) + time.sleep(300) + sleep_time = 80 asic.stop_service(service_name) logger.info("Waiting until container '{}' is stopped...".format(container_name)) stopped = wait_until(CONTAINER_STOP_THRESHOLD_SECS, @@ -226,6 +235,6 @@ def test_container_checker(duthosts, enum_rand_one_per_hwsku_hostname, loganalyzer.expect_regex = get_expected_alerting_message(container_name) with loganalyzer: - # Wait for 1 minutes such that Monit has a chance to write alerting message into syslog. - logger.info("Sleep 1 minutes to wait for the alerting message...") - time.sleep(70) + # Wait for 70s to 80s such that Monit has a chance to write alerting message into syslog. + logger.info("Sleep '{}'s to wait for the alerting message...".format(sleep_time)) + time.sleep(sleep_time) diff --git a/tests/copp/copp_utils.py b/tests/copp/copp_utils.py index 70d842f2580..82c6332ee5f 100644 --- a/tests/copp/copp_utils.py +++ b/tests/copp/copp_utils.py @@ -373,7 +373,10 @@ def uninstall_trap(dut, feature_name, trap_id): feature_name (str): feature name corresponding to the trap trap_id (str): trap id """ - disable_feature_entry(dut, feature_name) + feature_list, _ = dut.get_feature_status() + if feature_name in feature_list.keys(): + disable_feature_entry(dut, feature_name) + configure_always_enabled_for_trap(dut, trap_id, "false") diff --git a/tests/copp/test_copp.py b/tests/copp/test_copp.py index 87ae55c9b27..00aedb8788c 100644 --- a/tests/copp/test_copp.py +++ b/tests/copp/test_copp.py @@ -110,6 +110,10 @@ def test_add_new_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, logger.info("Uninstall trap {}".format(self.trap_id)) copp_utils.uninstall_trap(duthost, self.feature_name, self.trap_id) + # remove ip2me because bgp traffic can fall back to ip2me trap then interfere following traffic tests + if self.trap_id == "bgp": + logger.info("Uninstall trap ip2me") + copp_utils.uninstall_trap(duthost, "ip2me", "ip2me") logger.info("Verify {} trap status is uninstalled by sending traffic".format(self.trap_id)) _copp_runner(duthost, @@ -142,6 +146,10 @@ def test_remove_trap(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + if self.trap_id == "bgp": + logger.info("Uninstall trap ip2me") + copp_utils.uninstall_trap(duthost, "ip2me", "ip2me") + logger.info("Pre condition: make trap {} is installed".format(self.feature_name)) pre_condition_install_trap(ptfhost, duthost, copp_testbed, self.trap_id, self.feature_name) diff --git a/tests/crm/conftest.py b/tests/crm/conftest.py index 3a318a30d35..0e000da208d 100755 --- a/tests/crm/conftest.py +++ b/tests/crm/conftest.py @@ -217,3 +217,26 @@ def collector(duthosts, enum_rand_one_per_hwsku_frontend_hostname): data[asic.asic_index] = {} yield data + + +@pytest.fixture(scope="function") +def cleanup_ptf_interface(duthosts, ip_ver, enum_rand_one_per_hwsku_frontend_hostname, + enum_frontend_asic_index, ptfhost): + + duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + asichost = duthost.asic_instance(enum_frontend_asic_index) + if ip_ver == "4": + ip_remove_cmd = "config interface ip remove Ethernet1 2.2.2.1/24" + else: + ip_remove_cmd = "config interface ip remove Ethernet1 2001::2/64" + check_vlan_cmd = "show vlan br | grep -w 'Ethernet1'" + + yield + + if duthost.facts["asic_type"] == "marvell": + asichost.shell(ip_remove_cmd) + # Check if member not removed + output = asichost.shell(check_vlan_cmd, module_ignore_errors=True) + if "Ethernet1" not in output['stdout']: + asichost.sonichost.add_member_to_vlan(1000, 'Ethernet1', is_tagged=False) + ptfhost.remove_ip_addresses() diff --git a/tests/crm/test_crm.py b/tests/crm/test_crm.py index 6bb275d12c5..0a2365f339b 100755 --- a/tests/crm/test_crm.py +++ b/tests/crm/test_crm.py @@ -603,7 +603,7 @@ def get_expected_crm_stats_route_available(crm_stats_route_available, crm_stats_ @pytest.mark.parametrize("ip_ver,nexthop", [("4", "2.2.2.2"), ("6", "2001::1")]) def test_crm_nexthop(duthosts, enum_rand_one_per_hwsku_frontend_hostname, - enum_frontend_asic_index, crm_interface, ip_ver, nexthop, ptfhost): + enum_frontend_asic_index, crm_interface, ip_ver, nexthop, ptfhost, cleanup_ptf_interface): duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] asichost = duthost.asic_instance(enum_frontend_asic_index) RESTORE_CMDS["crm_threshold_name"] = "ipv{ip_ver}_nexthop".format(ip_ver=ip_ver) diff --git a/tests/dash/conftest.py b/tests/dash/conftest.py index 186ccfefc9c..d66b9623e4d 100644 --- a/tests/dash/conftest.py +++ b/tests/dash/conftest.py @@ -99,7 +99,7 @@ def use_underlay_route(request): @pytest.fixture(scope="function") -def dash_config_info(duthost, config_facts, minigraph_facts): +def dash_config_info(duthost, config_facts, minigraph_facts, tbinfo): dash_info = { ENI: "F4939FEFC47E", VM_VNI: 4321, @@ -120,14 +120,33 @@ def dash_config_info(duthost, config_facts, minigraph_facts): dash_info[DUT_MAC] = config_facts["DEVICE_METADATA"]["localhost"]["mac"] neigh_table = duthost.switch_arptable()['ansible_facts']['arptable'] + topo = tbinfo["topo"]["name"] for neigh_ip, config in list(config_facts["BGP_NEIGHBOR"].items()): - # Pick the first two BGP neighbor IPs since these should already be learned on the DUT + # For dpu with 2 ports Pick the first two BGP neighbor IPs since these should already be learned on the DUT + # Take neighbor 1 as local PA, take neighbor 2 as remote PA if ip_interface(neigh_ip).version == 4: if LOCAL_PA_IP not in dash_info: dash_info[LOCAL_PA_IP] = neigh_ip intf, _ = get_intf_from_ip(config['local_addr'], config_facts) dash_info[LOCAL_PTF_INTF] = minigraph_facts["minigraph_ptf_indices"][intf] dash_info[LOCAL_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"] + if topo == 'dpu-1' and REMOTE_PA_IP not in dash_info: + # For DPU with only one single port, we just have one neighbor (neighbor 1). + # So, we take neighbor 1 as the local PA. For the remote PA, + # we take the original neighbor 2's IP as the remote PA IP, + # and the original neighbor 2's network as the remote PA network. + # Take the mac of neighbor 1's mac as the mac of remote PA, + # because the BGP route to neighbor 1 is the default route, + # and only the mac of neighbor 1 exists in the arp table. + # The remote ptf intf will take the value of neighbor 1 + # because the packet to remote PA will be forwarded to the ptf port corresponding to neighbor 1. + fake_neighbor_2_ip = '10.0.2.2' + fake_neighbor_2_prefix = "10.0.2.0/24" + dash_info[REMOTE_PA_IP] = fake_neighbor_2_ip + dash_info[REMOTE_PTF_INTF] = dash_info[LOCAL_PTF_INTF] + dash_info[REMOTE_PTF_MAC] = dash_info[LOCAL_PTF_MAC] + dash_info[REMOTE_PA_PREFIX] = fake_neighbor_2_prefix + break elif REMOTE_PA_IP not in dash_info: dash_info[REMOTE_PA_IP] = neigh_ip intf, intf_ip = get_intf_from_ip(config['local_addr'], config_facts) @@ -140,7 +159,7 @@ def dash_config_info(duthost, config_facts, minigraph_facts): @pytest.fixture(scope="function") -def apply_config(duthost, ptfhost, skip_config, skip_cleanup): +def apply_config(localhost, duthost, ptfhost, skip_config, skip_cleanup): configs = [] op = "SET" @@ -155,7 +174,7 @@ def _apply_config(config_info): dest_path = "/tmp/{}.json".format(config) render_template_to_host(template_name, duthost, dest_path, config_info, op=op) if ENABLE_GNMI_API: - apply_gnmi_file(duthost, ptfhost, dest_path) + apply_gnmi_file(localhost, duthost, ptfhost, dest_path) else: apply_swssconfig_file(duthost, dest_path) @@ -186,11 +205,14 @@ def apply_inbound_configs(dash_inbound_configs, apply_config): @pytest.fixture(scope="function") -def dash_outbound_configs(dash_config_info, use_underlay_route, minigraph_facts): +def dash_outbound_configs(dash_config_info, use_underlay_route, minigraph_facts, tbinfo): if use_underlay_route: dash_config_info[REMOTE_PA_IP] = u"30.30.30.30" dash_config_info[REMOTE_PA_PREFIX] = "30.30.30.30/32" - dash_config_info[REMOTE_PTF_INTF] = list(minigraph_facts["minigraph_ptf_indices"].values()) + if tbinfo["topo"]["name"] == "dpu-1": + dash_config_info[REMOTE_PTF_INTF] = [dash_config_info[REMOTE_PTF_INTF]] + else: + dash_config_info[REMOTE_PTF_INTF] = list(minigraph_facts["minigraph_ptf_indices"].values()) else: dash_config_info[REMOTE_PTF_INTF] = [dash_config_info[REMOTE_PTF_INTF]] @@ -241,3 +263,8 @@ def _check_asic_db(tables): output = duthost.shell("sonic-db-cli ASIC_DB keys 'ASIC_STATE:{}:*'".format(table)) assert output["stdout"].strip() != "", "No entries found in ASIC_DB table {}".format(table) yield _check_asic_db + + +@pytest.fixture(scope="function", params=['udp', 'tcp', 'echo_request', 'echo_reply']) +def inner_packet_type(request): + return request.param diff --git a/tests/dash/constants.py b/tests/dash/constants.py index 0f2b4020276..d5105c1001d 100644 --- a/tests/dash/constants.py +++ b/tests/dash/constants.py @@ -37,4 +37,8 @@ ACL_DST_ADDR = "dst_addr" ACL_SRC_PORT = "src_port" ACL_DST_PORT = "dst_port" +ACL_SRC_TAG = "src_tag" +ACL_DST_TAG = "dst_tag" ACL_PROTOCOL = "protocol" +ACL_TAG = "acl_tag" +ACL_PREFIX_LIST = "prefix_list" diff --git a/tests/dash/crm/test_dash_crm.py b/tests/dash/crm/test_dash_crm.py index c504536b5ae..81d2e9e57cd 100644 --- a/tests/dash/crm/test_dash_crm.py +++ b/tests/dash/crm/test_dash_crm.py @@ -62,7 +62,7 @@ def default_crm_facts(duthost, set_polling_interval): @pytest.fixture(scope="class") -def apply_resources_configs(default_crm_facts, duthost, ptfhost): +def apply_resources_configs(default_crm_facts, localhost, duthost, ptfhost): """ Apply CRM configuration before run test :param default_crm_facts: CRM resources data collected before apply config @@ -74,12 +74,12 @@ def apply_resources_configs(default_crm_facts, duthost, ptfhost): src_path = os.path.join(os.path.abspath(""), "dash/crm/files/{}".format(config)) duthost.copy(src=src_path, dest=config) pytest.crm_res_cleanup_required = True - apply_gnmi_file(duthost, ptfhost, set_config) + apply_gnmi_file(localhost, duthost, ptfhost, set_config) yield set_config, del_config if pytest.crm_res_cleanup_required: - apply_gnmi_file(duthost, ptfhost, del_config) + apply_gnmi_file(localhost, duthost, ptfhost, del_config) duthost.shell("rm -f {}".format(set_config)) duthost.shell("rm -f {}".format(del_config)) @@ -108,9 +108,10 @@ def cleanup(duthost): class TestDashCRM: @pytest.fixture(autouse=True) - def setup(self, duthost, ptfhost, default_crm_facts, apply_resources_configs): + def setup(self, localhost, duthost, ptfhost, default_crm_facts, apply_resources_configs): self.duthost = duthost self.ptfhost = ptfhost + self.localhost = localhost self.default_crm_facts = default_crm_facts self.crm_facts = self.duthost.get_crm_facts() self.set_config, self.del_config = apply_resources_configs @@ -302,7 +303,7 @@ def test_dash_crm_cleanup(self): """ Validate that after cleanup CRM resources - CRM output the same as it was before test case(without config) """ - apply_gnmi_file(self.duthost, self.ptfhost, self.del_config) + apply_gnmi_file(self.localhost, self.duthost, self.ptfhost, self.del_config) pytest.crm_res_cleanup_required = False time.sleep(CRM_UPDATE_TIME) diff --git a/tests/dash/dash_acl.py b/tests/dash/dash_acl.py index aab5e34a4e6..fbe46d1fb14 100644 --- a/tests/dash/dash_acl.py +++ b/tests/dash/dash_acl.py @@ -3,10 +3,11 @@ import abc import logging import time +import ipaddress +import random from collections.abc import Iterable - from constants import * # noqa: F403 -from dash_utils import render_template_to_host +from dash_utils import render_template from gnmi_utils import apply_gnmi_file import packets import ptf.testutils as testutils @@ -15,23 +16,29 @@ ACL_GROUP_TEMPLATE = "dash_acl_group" ACL_RULE_TEMPLATE = "dash_acl_rule" +ACL_TAG_TEMPLATE = "dash_acl_tag" BIND_ACL_IN = "dash_bind_acl_in" BIND_ACL_OUT = "dash_bind_acl_out" DEFAULT_ACL_GROUP = "default_acl_group" +SRC_IP_RANGE = ['24.0.0.0', '24.255.255.255'] +BASE_SRC_SCALE_IP = '8.0.0.0' +SCALE_TAGS = 4096 +SCALE_TAG_IPS = 1 +WAIT_AFTER_CONFIG = 5 -def apply_acl_config(duthost, ptfhost, template_name, acl_config_info, op): +def apply_acl_config(localhost, duthost, ptfhost, template_name, acl_config_info, op): template_file = "{}.j2".format(template_name) - dest_path = "/tmp/{}.json".format(template_name) - render_template_to_host(template_file, duthost, dest_path, acl_config_info, op=op) + config_json = render_template(template_file, acl_config_info, op=op) # apply_swssconfig_file(duthost, dest_path) - apply_gnmi_file(duthost, ptfhost, dest_path) + apply_gnmi_file(localhost, duthost, ptfhost, config_json=config_json, wait_after_apply=0) class AclGroup(object): - def __init__(self, duthost, ptfhost, acl_group, eni, ip_version="ipv4"): + def __init__(self, localhost, duthost, ptfhost, acl_group, eni, ip_version="ipv4"): self.duthost = duthost self.ptfhost = ptfhost + self.localhost = localhost self.acl_group = acl_group self.eni = eni self.ip_version = ip_version @@ -39,10 +46,10 @@ def __init__(self, duthost, ptfhost, acl_group, eni, ip_version="ipv4"): ACL_GROUP: self.acl_group, IP_VERSION: self.ip_version } - apply_acl_config(self.duthost, self.ptfhost, ACL_GROUP_TEMPLATE, self.group_conf, op="SET") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, ACL_GROUP_TEMPLATE, self.group_conf, op="SET") def __del__(self): - apply_acl_config(self.duthost, self.ptfhost, ACL_GROUP_TEMPLATE, self.group_conf, op="DEL") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, ACL_GROUP_TEMPLATE, self.group_conf, op="DEL") def bind(self, stage): self.stage = stage @@ -51,12 +58,28 @@ def bind(self, stage): ACL_GROUP: self.acl_group, ACL_STAGE: self.stage, } - apply_acl_config(self.duthost, self.ptfhost, BIND_ACL_OUT, self.bind_conf, op="SET") - apply_acl_config(self.duthost, self.ptfhost, BIND_ACL_IN, self.bind_conf, op="SET") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, BIND_ACL_OUT, self.bind_conf, op="SET") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, BIND_ACL_IN, self.bind_conf, op="SET") def unbind(self): - apply_acl_config(self.duthost, self.ptfhost, BIND_ACL_OUT, self.bind_conf, op="DEL") - apply_acl_config(self.duthost, self.ptfhost, BIND_ACL_IN, self.bind_conf, op="DEL") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, BIND_ACL_OUT, self.bind_conf, op="DEL") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, BIND_ACL_IN, self.bind_conf, op="DEL") + + +class AclTag(object): + def __init__(self, localhost, duthost, ptfhost, acl_tag, acl_prefix_list, ip_version="ipv4"): + self.duthost = duthost + self.ptfhost = ptfhost + self.localhost = localhost + self.tag_conf = { + ACL_TAG: acl_tag, + IP_VERSION: ip_version, + ACL_PREFIX_LIST: acl_prefix_list + } + apply_acl_config(self.localhost, self.duthost, self.ptfhost, ACL_TAG_TEMPLATE, self.tag_conf, op="SET") + + def __del__(self): + apply_acl_config(self.localhost, self.duthost, self.ptfhost, ACL_TAG_TEMPLATE, self.tag_conf, op="DEL") class AclTestPacket(object): @@ -75,10 +98,11 @@ def get_description(self): class AclTestCase(object): - def __init__(self, duthost, ptfhost, dash_config_info): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): __metaclass__ = abc.ABCMeta # noqa: F841 self.duthost = duthost self.ptfhost = ptfhost + self.localhost = localhost self.dash_config_info = dash_config_info self.test_pkts = [] @@ -90,16 +114,23 @@ def config(self): def teardown(self): pass + def get_random_ip(self): + """ + Generate a random IP from ip range + """ + length = int(ipaddress.ip_address(SRC_IP_RANGE[1])) - int(ipaddress.ip_address(SRC_IP_RANGE[0])) + return str(ipaddress.ip_address(SRC_IP_RANGE[0]) + random.randint(0, length)) + class AclRuleTest(AclTestCase): - def __init__(self, duthost, ptfhost, dash_config_info, default_action="deny"): - super(AclRuleTest, self).__init__(duthost, ptfhost, dash_config_info) + def __init__(self, localhost, duthost, ptfhost, dash_config_info, default_action="deny"): + super(AclRuleTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) self.default_action = default_action self.rule_confs = [] def add_rule(self, rule_conf): rule_conf[ACL_RULE] = self.__class__.__name__ + "_" + rule_conf[ACL_RULE] - apply_acl_config(self.duthost, self.ptfhost, ACL_RULE_TEMPLATE, rule_conf, op="SET") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, ACL_RULE_TEMPLATE, rule_conf, op="SET") self.rule_confs.append(rule_conf) def add_test_pkt(self, test_pkt): @@ -108,13 +139,13 @@ def add_test_pkt(self, test_pkt): def teardown(self): for rule_conf in self.rule_confs: - apply_acl_config(self.duthost, self.ptfhost, ACL_RULE_TEMPLATE, rule_conf, op="DEL") + apply_acl_config(self.localhost, self.duthost, self.ptfhost, ACL_RULE_TEMPLATE, rule_conf, op="DEL") self.rule_confs = [] class DefaultAclRule(AclRuleTest): - def __init__(self, duthost, ptfhost, dash_config_info, default_action): - super(DefaultAclRule, self).__init__(duthost, ptfhost, dash_config_info, default_action) + def __init__(self, localhost, duthost, ptfhost, dash_config_info, default_action): + super(DefaultAclRule, self).__init__(localhost, duthost, ptfhost, dash_config_info, default_action) self.acl_group = DEFAULT_ACL_GROUP def config(self): @@ -124,13 +155,13 @@ def config(self): ACL_PRIORITY: 255, ACL_ACTION: self.default_action, ACL_TERMINATING: "false", - ACL_PROTOCOL: "17", + ACL_PROTOCOL: "17, 6, 1", }) class AclPriorityTest(AclRuleTest): - def __init__(self, duthost, ptfhost, dash_config_info, default_action): - super(AclPriorityTest, self).__init__(duthost, ptfhost, dash_config_info, default_action) + def __init__(self, localhost, duthost, ptfhost, dash_config_info, default_action): + super(AclPriorityTest, self).__init__(localhost, duthost, ptfhost, dash_config_info, default_action) self.acl_group = DEFAULT_ACL_GROUP self.src_ip = "10.0.0.2" self.src_ip_prefix = self.src_ip + "/32" @@ -189,8 +220,8 @@ def config(self): class AclProtocolTest(AclRuleTest): - def __init__(self, duthost, ptfhost, dash_config_info, default_action): - super(AclProtocolTest, self).__init__(duthost, ptfhost, dash_config_info, default_action) + def __init__(self, localhost, duthost, ptfhost, dash_config_info, default_action): + super(AclProtocolTest, self).__init__(localhost, duthost, ptfhost, dash_config_info, default_action) self.acl_group = DEFAULT_ACL_GROUP self.src_ip = "0.0.0.0" self.src_ip_prefix = self.src_ip + "/0" @@ -223,8 +254,8 @@ def config(self): class AclAddressTest(AclRuleTest): - def __init__(self, duthost, ptfhost, dash_config_info, default_action): - super(AclAddressTest, self).__init__(duthost, ptfhost, dash_config_info, default_action) + def __init__(self, localhost, duthost, ptfhost, dash_config_info, default_action): + super(AclAddressTest, self).__init__(localhost, duthost, ptfhost, dash_config_info, default_action) self.acl_group = DEFAULT_ACL_GROUP def config(self): @@ -308,8 +339,8 @@ def config(self): class AclPortTest(AclRuleTest): - def __init__(self, duthost, ptfhost, dash_config_info, default_action): - super(AclPortTest, self).__init__(duthost, ptfhost, dash_config_info, default_action) + def __init__(self, localhost, duthost, ptfhost, dash_config_info, default_action): + super(AclPortTest, self).__init__(localhost, duthost, ptfhost, dash_config_info, default_action) self.acl_group = DEFAULT_ACL_GROUP self.src_ip = "10.0.0.2" self.src_ip_prefix = self.src_ip + "/32" @@ -382,24 +413,340 @@ def config(self): expected_receiving=False)) +class AclTagTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclTagTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.src_ip1 = self.get_random_ip() + self.src_ip2 = self.get_random_ip() + self.src_ip_prefix1 = self.src_ip1 + "/32" + self.src_ip_prefix2 = self.src_ip2 + "/32" + + def config(self): + self.acl_tag = AclTag(self.localhost, self.duthost, self.ptfhost, "AclTag", + [",".join([self.src_ip_prefix1, self.src_ip_prefix2])]) + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_tag", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: "AclTag1", + ACL_SRC_PORT: "13" + }) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip1 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 13}, + expected_receiving=True)) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip2 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 13}, + expected_receiving=True)) + + def teardown(self): + super(AclTagTest, self).teardown() + del self.acl_tag + + +class AclMultiTagTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclMultiTagTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.src_ip1 = self.get_random_ip() + self.src_ip2 = self.get_random_ip() + self.src_ip_prefix1 = self.src_ip1 + "/32" + self.src_ip_prefix2 = self.src_ip2 + "/32" + + def config(self): + self.acl_tag = AclTag(self.localhost, self.duthost, self.ptfhost, "AclMultiTag", + [self.src_ip_prefix1, self.src_ip_prefix2]) + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_multi_tag", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: "AclMultiTag1,AclMultiTag2", + ACL_SRC_PORT: "15" + }) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip1 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 15}, + expected_receiving=True)) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip2 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 15}, + expected_receiving=True)) + + def teardown(self): + super(AclMultiTagTest, self).teardown() + del self.acl_tag + + +class AclTagNotExistsTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclTagNotExistsTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.acl_tag = None + self.src_ip = self.get_random_ip() + + def config(self): + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_tag_order", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: "AclTagOrder1", + ACL_SRC_PORT: "17" + }) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 17}, + expected_receiving=False)) + + +class AclTagOrderTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclTagOrderTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.acl_tag = None + self.src_ip = self.get_random_ip() + self.src_ip_prefix = self.src_ip + "/32" + + def config(self): + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_tag_order", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: "AclTagOrder1", + ACL_SRC_PORT: "17" + }) + self.acl_tag = AclTag(self.localhost, self.duthost, self.ptfhost, "AclTagOrder", [self.src_ip_prefix]) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 17}, + expected_receiving=True)) + + def teardown(self): + del self.acl_tag + super(AclTagOrderTest, self).teardown() + + +class AclMultiTagOrderTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclMultiTagOrderTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.src_ip1 = self.get_random_ip() + self.src_ip2 = self.get_random_ip() + self.src_ip_prefix1 = self.src_ip1 + "/32" + self.src_ip_prefix2 = self.src_ip2 + "/32" + + def config(self): + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_multi_tag_order", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: "AclMultiTagOrder1,AclMultiTagOrder2", + ACL_SRC_PORT: "18" + }) + self.acl_tag = AclTag( + self.localhost, self.duthost, self.ptfhost, "AclMultiTagOrder", [self.src_ip_prefix1, self.src_ip_prefix2]) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip1 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 18}, + expected_receiving=True)) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip2 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 18}, + expected_receiving=True)) + + def teardown(self): + del self.acl_tag + super(AclMultiTagOrderTest, self).teardown() + + +class AclTagUpdateIpTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclTagUpdateIpTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.src_ip1 = self.get_random_ip() + self.src_ip2 = self.get_random_ip() + self.src_ip_prefix1 = self.src_ip1 + "/32" + self.src_ip_prefix2 = self.src_ip2 + "/32" + + def config(self): + self.acl_tag1 = AclTag(self.localhost, self.duthost, self.ptfhost, "AclTagUpdateIp", [self.src_ip_prefix1]) + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_update_ip_tag", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: "AclTagUpdateIp1", + ACL_SRC_PORT: "19" + }) + self.acl_tag2 = AclTag(self.localhost, self.duthost, self.ptfhost, "AclTagUpdateIp", [self.src_ip_prefix2]) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip1 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 19}, + expected_receiving=False)) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip2 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 19}, + expected_receiving=True)) + + def teardown(self): + super(AclTagUpdateIpTest, self).teardown() + del self.acl_tag1 + del self.acl_tag2 + + +class AclTagRemoveIpTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclTagRemoveIpTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.src_ip1 = self.get_random_ip() + self.src_ip2 = self.get_random_ip() + self.src_ip_prefix1 = self.src_ip1 + "/32" + self.src_ip_prefix2 = self.src_ip2 + "/32" + + def config(self): + self.acl_tag1 = AclTag(self.localhost, self.duthost, self.ptfhost, "AclTagRemoveIp", + [",".join([self.src_ip_prefix1, self.src_ip_prefix2])]) + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_remove_ip_tag", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: "AclTagRemoveIp1", + ACL_SRC_PORT: "20" + }) + self.acl_tag2 = AclTag(self.localhost, self.duthost, self.ptfhost, "AclTagRemoveIp", [self.src_ip_prefix1]) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip1 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 20}, + expected_receiving=True)) + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = self.src_ip2 + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 20}, + expected_receiving=False)) + + def teardown(self): + super(AclTagRemoveIpTest, self).teardown() + del self.acl_tag1 + del self.acl_tag2 + + +class AclTagScaleTest(AclRuleTest): + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclTagScaleTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) + self.ptfhost = ptfhost + self.acl_group = DEFAULT_ACL_GROUP + self.ip_list = self.random_scale_ip_list() + self.src_ip_list = random.choices(self.ip_list, k=100) + self.src_ip_prefix_list = self.get_scale_prefixes_list() + self.tag_names_list = ",".join(["AclTagScale{}".format(tag_num) for tag_num in range(1, SCALE_TAGS+1)]) + + def config(self): + self.acl_tag = AclTag(self.localhost, self.duthost, self.ptfhost, "AclTagScale", self.src_ip_prefix_list) + self.add_rule({ + ACL_GROUP: self.acl_group, + ACL_RULE: "allow_scale_tag", + ACL_PRIORITY: 1, + ACL_ACTION: "allow", + ACL_TERMINATING: "true", + ACL_PROTOCOL: "17", + ACL_SRC_TAG: self.tag_names_list, + ACL_SRC_PORT: "21" + }) + for src_ip in self.src_ip_list: + dash_config_info = copy.deepcopy(self.dash_config_info) + dash_config_info[LOCAL_CA_IP] = src_ip + self.add_test_pkt(AclTestPacket(dash_config_info, + inner_extra_conf={"udp_sport": 21}, + expected_receiving=True)) + + def teardown(self): + super(AclTagScaleTest, self).teardown() + del self.acl_tag + + @staticmethod + def random_scale_ip_list(ip_type='ipv4'): + ip_list = [] + if ip_type == 'ipv4': + address_type = ipaddress.IPv4Address + else: + address_type = ipaddress.IPv6Address + first_ip = address_type(BASE_SRC_SCALE_IP) + last_ip = first_ip + (SCALE_TAGS * SCALE_TAG_IPS) - 1 + summarized_range = ipaddress.summarize_address_range(first_ip, last_ip) + for subnet in summarized_range: + for ip_address in subnet: + ip_list.append(str(ip_address)) + random.shuffle(ip_list) + return ip_list + + def get_scale_prefixes_list(self): + prefixes_list = [] + begin_index = 0 + for _ in range(SCALE_TAGS): + end_index = begin_index + SCALE_TAG_IPS + ip_list = self.ip_list[begin_index:end_index] + prefixes_list.append("/32,".join(ip_list) + "/32") + begin_index += SCALE_TAG_IPS + return prefixes_list + + @pytest.fixture(scope="function", params=["allow", "deny"]) -def acl_fields_test(request, apply_vnet_configs, duthost, ptfhost, dash_config_info): +def acl_fields_test(request, apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): testcases = [] - default_acl_group = AclGroup(duthost, ptfhost, DEFAULT_ACL_GROUP, dash_config_info[ENI]) + default_acl_group = AclGroup(localhost, duthost, ptfhost, DEFAULT_ACL_GROUP, dash_config_info[ENI]) default_action = request.param - default_acl_rule = DefaultAclRule(duthost, ptfhost, dash_config_info, default_action) - default_action = default_acl_rule.default_action + default_acl_rule = DefaultAclRule(localhost, duthost, ptfhost, dash_config_info, default_action) testcases.append(default_acl_rule) - testcases.append(AclPriorityTest(duthost, ptfhost, dash_config_info, default_action)) - testcases.append(AclProtocolTest(duthost, ptfhost, dash_config_info, default_action)) - testcases.append(AclAddressTest(duthost, ptfhost, dash_config_info, default_action)) - testcases.append(AclPortTest(duthost, ptfhost, dash_config_info, default_action)) + testcases.append(AclPriorityTest(localhost, duthost, ptfhost, dash_config_info, default_action)) + testcases.append(AclProtocolTest(localhost, duthost, ptfhost, dash_config_info, default_action)) + testcases.append(AclAddressTest(localhost, duthost, ptfhost, dash_config_info, default_action)) + testcases.append(AclPortTest(localhost, duthost, ptfhost, dash_config_info, default_action)) for t in testcases: t.config() default_acl_group.bind(1) + time.sleep(WAIT_AFTER_CONFIG) yield testcases @@ -407,6 +754,115 @@ def acl_fields_test(request, apply_vnet_configs, duthost, ptfhost, dash_config_i for t in reversed(testcases): t.teardown() del default_acl_group + time.sleep(WAIT_AFTER_CONFIG) + + +def acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, testcase): + testcases = [] + + default_acl_group = AclGroup(localhost, duthost, ptfhost, DEFAULT_ACL_GROUP, dash_config_info[ENI]) + default_acl_rule = DefaultAclRule(localhost, duthost, ptfhost, dash_config_info, 'deny') + testcases.append(default_acl_rule) + testcases.append(testcase) + + for t in testcases: + t.config() + default_acl_group.bind(1) + time.sleep(WAIT_AFTER_CONFIG) + return testcases, default_acl_group + + +def acl_tag_test_teardown(default_acl_group, testcases): + default_acl_group.unbind() + for t in reversed(testcases): + t.teardown() + del default_acl_group + time.sleep(WAIT_AFTER_CONFIG) + + +@pytest.fixture(scope="function") +def acl_tag_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclTagTest(localhost, duthost, ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) + + +@pytest.fixture(scope="function") +def acl_multi_tag_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclMultiTagTest(localhost, duthost, ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) + + +@pytest.fixture(scope="function") +def acl_tag_not_exists_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclTagNotExistsTest( + localhost, duthost, ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) + + +@pytest.fixture(scope="function") +def acl_tag_order_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclTagOrderTest(localhost, duthost, ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) + + +@pytest.fixture(scope="function") +def acl_multi_tag_order_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclMultiTagOrderTest(localhost, duthost, + ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) + + +@pytest.fixture(scope="function") +def acl_tag_update_ip_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclTagUpdateIpTest(localhost, duthost, + ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) + + +@pytest.fixture(scope="function") +def acl_tag_remove_ip_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclTagRemoveIpTest(localhost, duthost, + ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) + + +@pytest.fixture(scope="function") +def acl_tag_scale_test(apply_vnet_configs, localhost, duthost, ptfhost, dash_config_info): + testcases, default_acl_group = acl_tag_test_config(localhost, duthost, ptfhost, dash_config_info, + AclTagScaleTest(localhost, duthost, + ptfhost, dash_config_info)) + + yield testcases + + acl_tag_test_teardown(default_acl_group, testcases) STAGE_1_ACL_GROUP = "stage_1_acl_group" @@ -414,8 +870,8 @@ def acl_fields_test(request, apply_vnet_configs, duthost, ptfhost, dash_config_i class AclMultiStageTest(AclRuleTest): - def __init__(self, duthost, ptfhost, dash_config_info): - super(AclMultiStageTest, self).__init__(duthost, ptfhost, dash_config_info) + def __init__(self, localhost, duthost, ptfhost, dash_config_info): + super(AclMultiStageTest, self).__init__(localhost, duthost, ptfhost, dash_config_info) self.acl_group_1 = STAGE_1_ACL_GROUP self.acl_group_2 = STAGE_2_ACL_GROUP self.src_ip = "10.0.0.2" @@ -528,14 +984,15 @@ def config(self): @pytest.fixture(scope="function") -def acl_multi_stage_test(duthost, apply_vnet_configs, ptfhost, dash_config_info): - group_1 = AclGroup(duthost, ptfhost, STAGE_1_ACL_GROUP, dash_config_info[ENI]) - group_2 = AclGroup(duthost, ptfhost, STAGE_2_ACL_GROUP, dash_config_info[ENI]) - test = AclMultiStageTest(duthost, ptfhost, dash_config_info) +def acl_multi_stage_test(localhost, duthost, apply_vnet_configs, ptfhost, dash_config_info): + group_1 = AclGroup(localhost, duthost, ptfhost, STAGE_1_ACL_GROUP, dash_config_info[ENI]) + group_2 = AclGroup(localhost, duthost, ptfhost, STAGE_2_ACL_GROUP, dash_config_info[ENI]) + test = AclMultiStageTest(localhost, duthost, ptfhost, dash_config_info) test.config() group_1.bind(1) group_2.bind(2) + time.sleep(WAIT_AFTER_CONFIG) yield test @@ -544,6 +1001,7 @@ def acl_multi_stage_test(duthost, apply_vnet_configs, ptfhost, dash_config_info) test.teardown() del group_1 del group_2 + time.sleep(WAIT_AFTER_CONFIG) def check_dataplane(ptfadapter, testcases): diff --git a/tests/dash/dash_utils.py b/tests/dash/dash_utils.py index 2006e2ab613..d37bc558a88 100644 --- a/tests/dash/dash_utils.py +++ b/tests/dash/dash_utils.py @@ -67,6 +67,21 @@ def render_template_to_host(template_name, host, dest_file, *template_args, **te host.copy(content=rendered, dest=dest_file) +def render_template(template_name, *template_args, **template_kwargs): + """ + Renders a template with the given arguments and copies it to the host + + Args: + template_name: A template inside the "templates" folder (without the preceding "templates/") + *template_args: Any arguments to be passed to j2 during rendering + **template_kwargs: Any keyword arguments to be passed to j2 during rendering + """ + + combined_args = combine_dicts(*template_args) + + return safe_open_template(path.join(TEMPLATE_DIR, template_name)).render(combined_args, **template_kwargs) + + def apply_swssconfig_file(duthost, file_path): """ Copies config file from the DUT host to the SWSS docker and applies them with swssconfig diff --git a/tests/dash/gnmi_utils.py b/tests/dash/gnmi_utils.py index d958fed41c7..68860d61ee3 100644 --- a/tests/dash/gnmi_utils.py +++ b/tests/dash/gnmi_utils.py @@ -2,6 +2,7 @@ import json import time import uuid +import math from functools import lru_cache import pytest @@ -322,7 +323,8 @@ def gnmi_get(duthost, ptfhost, path_list): raise Exception("error:" + msg) -def apply_gnmi_file(duthost, ptfhost, dest_path): +def apply_gnmi_file(localhost, duthost, ptfhost, dest_path=None, config_json=None, + wait_after_apply=5, max_updates_in_single_cmd=1024): """ Apply dash configuration with gnmi client @@ -330,16 +332,22 @@ def apply_gnmi_file(duthost, ptfhost, dest_path): duthost: fixture for duthost ptfhost: fixture for ptfhost dest_path: configuration file path - + config_json: configuration in json + wait_after_apply: the seconds to wait after gNMI file applied + max_updates_in_single_cmd: threshold to separate the updates into multiple gnmi calls for linux command + length is limited Returns: """ env = GNMIEnvironment(duthost) - logger.info("Applying config files on DUT") - dut_command = "cat %s" % dest_path - ret = duthost.shell(dut_command) - assert ret["rc"] == 0, "Failed to read config file" - text = ret["stdout"] - res = json.loads(text) + if dest_path: + logger.info("Applying config files on DUT") + dut_command = "cat %s" % dest_path + ret = duthost.shell(dut_command) + assert ret["rc"] == 0, "Failed to read config file" + text = ret["stdout"] + res = json.loads(text) + elif config_json: + res = json.loads(config_json) delete_list = [] update_list = [] update_cnt = 0 @@ -359,13 +367,12 @@ def apply_gnmi_file(duthost, ptfhost, dest_path): text = json.dumps(v) with open(env.work_dir+filename, "w") as file: file.write(text) - ptfhost.copy(src=env.work_dir+filename, dest='/root/') keys = k.split(":", 1) k = keys[0] + "[key=" + keys[1] + "]" if proto_utils.ENABLE_PROTO: - path = "/APPL_DB/%s:$/root/%s" % (k, filename) + path = "/APPL_DB/localhost/%s:$/root/%s" % (k, filename) else: - path = "/APPL_DB/%s:@/root/%s" % (k, filename) + path = "/APPL_DB/localhost/%s:@/root/%s" % (k, filename) update_list.append(path) elif operation["OP"] == "DEL": for k, v in operation.items(): @@ -373,9 +380,33 @@ def apply_gnmi_file(duthost, ptfhost, dest_path): continue keys = k.split(":", 1) k = keys[0] + "[key=" + keys[1] + "]" - path = "/APPL_DB/%s" % (k) + path = "/APPL_DB/localhost/%s" % (k) delete_list.append(path) else: logger.info("Invalid operation %s" % operation["OP"]) - gnmi_set(duthost, ptfhost, delete_list, update_list, []) - time.sleep(5) + localhost.shell(f'tar -zcvf /tmp/updates.tar.gz -C {env.work_dir} .') + ptfhost.copy(src='/tmp/updates.tar.gz', dest='~') + ptfhost.shell('tar -xf updates.tar.gz') + + def _devide_list(operation_list): + list_group = [] + for i in range(math.ceil(len(operation_list) / max_updates_in_single_cmd)): + start_index = max_updates_in_single_cmd * i + end_index = max_updates_in_single_cmd * (i + 1) + list_group.append(operation_list[start_index:end_index]) + return list_group + + if delete_list: + delete_list_group = _devide_list(delete_list) + for delete_list in delete_list_group: + gnmi_set(duthost, ptfhost, delete_list, [], []) + if update_list: + update_list_group = _devide_list(update_list) + for update_list in update_list_group: + gnmi_set(duthost, ptfhost, [], update_list, []) + + localhost.shell('rm -f /tmp/updates.tar.gz') + ptfhost.shell('rm -f updates.tar.gz') + localhost.shell(f'rm -f {env.work_dir}update*') + ptfhost.shell('rm -f update*') + time.sleep(wait_after_apply) diff --git a/tests/dash/packets.py b/tests/dash/packets.py index 5bbe17f0810..00c26cba208 100644 --- a/tests/dash/packets.py +++ b/tests/dash/packets.py @@ -7,13 +7,32 @@ from constants import * # noqa: F403 -def inbound_vnet_packets(dash_config_info): - inner_packet = testutils.simple_udp_packet( +def generate_inner_packet(packet_type): + if packet_type == 'udp': + return testutils.simple_udp_packet + elif packet_type == 'tcp': + return testutils.simple_tcp_packet + elif packet_type == 'echo_request' or packet_type == 'echo_reply': + return testutils.simple_icmp_packet + + return None + + +def set_icmp_sub_type(packet, packet_type): + if packet_type == 'echo_request': + packet[scapy.ICMP].type = 8 + elif packet_type == 'echo_reply': + packet[scapy.ICMP].type = 0 + + +def inbound_vnet_packets(dash_config_info, inner_packet_type='udp'): + inner_packet = generate_inner_packet(inner_packet_type)( eth_src=dash_config_info[REMOTE_ENI_MAC], eth_dst=dash_config_info[LOCAL_ENI_MAC], ip_src=dash_config_info[REMOTE_CA_IP], ip_dst=dash_config_info[LOCAL_CA_IP], ) + set_icmp_sub_type(inner_packet, inner_packet_type) pa_match_vxlan_packet = testutils.simple_vxlan_packet( eth_src=dash_config_info[REMOTE_PTF_MAC], eth_dst=dash_config_info[DUT_MAC], @@ -47,19 +66,20 @@ def inbound_vnet_packets(dash_config_info): return inner_packet, pa_match_vxlan_packet, pa_mismatch_vxlan_packet, masked_exp_packet -def outbound_vnet_packets(dash_config_info, inner_extra_conf={}): +def outbound_vnet_packets(dash_config_info, inner_extra_conf={}, inner_packet_type='udp'): proto = None if "proto" in inner_extra_conf: proto = int(inner_extra_conf["proto"]) del inner_extra_conf["proto"] - inner_packet = testutils.simple_udp_packet( + inner_packet = generate_inner_packet(inner_packet_type)( eth_src=dash_config_info[LOCAL_ENI_MAC], eth_dst=dash_config_info[REMOTE_ENI_MAC], ip_src=dash_config_info[LOCAL_CA_IP], ip_dst=dash_config_info[REMOTE_CA_IP], **inner_extra_conf ) + set_icmp_sub_type(inner_packet, inner_packet_type) if proto: inner_packet[scapy.IP].proto = proto diff --git a/tests/dash/proto_utils.py b/tests/dash/proto_utils.py index f75b5cd4c2e..23a6e0b458e 100644 --- a/tests/dash/proto_utils.py +++ b/tests/dash/proto_utils.py @@ -17,6 +17,7 @@ from dash_api.acl_out_pb2 import AclOut from dash_api.acl_in_pb2 import AclIn from dash_api.acl_rule_pb2 import AclRule, Action +from dash_api.prefix_tag_pb2 import PrefixTag ENABLE_PROTO = True @@ -159,6 +160,24 @@ def acl_rule_from_json(json_obj): if "protocol" in json_obj: for proto in json_obj["protocol"].split(','): pb.protocol.append(int(proto)) + if "src_tag" in json_obj: + for tag in json_obj["src_tag"].split(','): + pb.src_tag.append(tag) + if "dst_tag" in json_obj: + for tag in json_obj["dst_tag"].split(','): + pb.dst_tag.append(tag) + return pb + + +def prefix_tag_from_json(json_obj): + pb = PrefixTag() + pb.ip_version = IpVersion.IP_VERSION_IPV4 + for ip_prefix in json_obj["prefix_list"].split(','): + net = ipaddress.IPv4Network(ip_prefix, False) + ip = IpPrefix() + ip.ip.ipv4 = socket.htonl(int(net.network_address)) + ip.mask.ipv4 = socket.htonl(int(net.netmask)) + pb.prefix_list.append(ip) return pb @@ -175,6 +194,7 @@ def acl_rule_from_json(json_obj): "ACL_OUT": acl_out_from_json, "ACL_IN": acl_in_from_json, "ACL_RULE": acl_rule_from_json, + "PREFIX_TAG": prefix_tag_from_json, } diff --git a/tests/dash/templates/dash_acl_rule.j2 b/tests/dash/templates/dash_acl_rule.j2 index 1b92841f1f8..1b63a250250 100644 --- a/tests/dash/templates/dash_acl_rule.j2 +++ b/tests/dash/templates/dash_acl_rule.j2 @@ -6,11 +6,15 @@ "terminating": "{{ terminating }}" {% if src_addr is defined %} ,"src_addr": "{{ src_addr }}" + {% elif src_tag is defined %} + ,"src_tag": "{{ src_tag }}" {% else %} ,"src_addr": "0.0.0.0/0" {% endif %} {% if dst_addr is defined %} ,"dst_addr": "{{ dst_addr }}" + {% elif dst_tag is defined %} + ,"dst_tag": "{{ dst_tag }}" {% else %} ,"dst_addr": "0.0.0.0/0" {% endif %} diff --git a/tests/dash/templates/dash_acl_tag.j2 b/tests/dash/templates/dash_acl_tag.j2 new file mode 100644 index 00000000000..709e48889c4 --- /dev/null +++ b/tests/dash/templates/dash_acl_tag.j2 @@ -0,0 +1,12 @@ +[ +{% for prefix in prefix_list %} +{% set acl_tag_name = acl_tag + loop.index|string %} + { + "DASH_PREFIX_TAG_TABLE:{{ acl_tag_name }}": { + "ip_version": "{{ip_version}}", + "prefix_list": "{{prefix}}" + }, + "OP": "{{op}}" + }{% if not loop.last %},{% endif %} +{% endfor %} +] diff --git a/tests/dash/test_dash_acl.py b/tests/dash/test_dash_acl.py index eee1f7f70e3..3a5b63d9052 100644 --- a/tests/dash/test_dash_acl.py +++ b/tests/dash/test_dash_acl.py @@ -4,7 +4,8 @@ import ptf.testutils as testutils from dash_acl import check_dataplane, acl_fields_test, acl_multi_stage_test # noqa: F401 - +from dash_acl import acl_tag_test, acl_multi_tag_test, acl_tag_order_test, acl_multi_tag_order_test # noqa: F401 +from dash_acl import acl_tag_update_ip_test, acl_tag_remove_ip_test, acl_tag_scale_test, acl_tag_not_exists_test # noqa: F401 logger = logging.getLogger(__name__) pytestmark = [ @@ -32,3 +33,91 @@ def test_acl_multi_stage( if skip_dataplane_checking: return check_dataplane(ptfadapter, acl_multi_stage_test) + + +# flake8: noqa: F811 +def test_acl_tag( + ptfadapter, + acl_tag_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_tag_test) + + +# flake8: noqa: F811 +def test_acl_multi_tag( + ptfadapter, + acl_multi_tag_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_multi_tag_test) + + +# flake8: noqa: F811 +def test_acl_tag_not_exists( + ptfadapter, + acl_tag_not_exists_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_tag_not_exists_test) + + +# flake8: noqa: F811 +def test_acl_tag_order( + ptfadapter, + acl_tag_order_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_tag_order_test) + + +# flake8: noqa: F811 +def test_acl_multi_tag_order( + ptfadapter, + acl_multi_tag_order_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_multi_tag_order_test) + + +# flake8: noqa: F811 +def test_acl_tag_update_ip( + ptfadapter, + acl_tag_update_ip_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_tag_update_ip_test) + + +# flake8: noqa: F811 +def test_acl_tag_remove_ip( + ptfadapter, + acl_tag_remove_ip_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_tag_remove_ip_test) + + +# flake8: noqa: F811 +def test_acl_tag_scale( + ptfadapter, + acl_tag_scale_test, + skip_dataplane_checking + ): + if skip_dataplane_checking: + return + check_dataplane(ptfadapter, acl_tag_scale_test) diff --git a/tests/dash/test_dash_vnet.py b/tests/dash/test_dash_vnet.py index 9091c1538e8..e48d2c1fe5a 100644 --- a/tests/dash/test_dash_vnet.py +++ b/tests/dash/test_dash_vnet.py @@ -3,8 +3,10 @@ import pytest import ptf.testutils as testutils -from constants import LOCAL_PTF_INTF, REMOTE_PTF_INTF +from constants import LOCAL_PTF_INTF, REMOTE_PTF_INTF, ENI +from dash_acl import AclGroup, DEFAULT_ACL_GROUP, WAIT_AFTER_CONFIG, DefaultAclRule import packets +import time logger = logging.getLogger(__name__) @@ -14,19 +16,43 @@ ] +@pytest.fixture(scope="function") +def acl_default_rule(duthost, ptfhost, dash_config_info): + hwsku = duthost.facts['hwsku'] + hwsku_list_with_default_acl_action_deny = ['Nvidia-9009d3b600CVAA-C1', 'Nvidia-9009d3b600SVAA-C1'] + if hwsku in hwsku_list_with_default_acl_action_deny: + default_acl_group = AclGroup(duthost, ptfhost, DEFAULT_ACL_GROUP, dash_config_info[ENI]) + default_acl_rule = DefaultAclRule(duthost, ptfhost, dash_config_info, "allow") + + default_acl_rule.config() + default_acl_group.bind(1) + time.sleep(WAIT_AFTER_CONFIG) + + yield + + if hwsku in hwsku_list_with_default_acl_action_deny: + default_acl_group.unbind() + default_acl_rule.teardown() + del default_acl_group + time.sleep(WAIT_AFTER_CONFIG) + + def test_outbound_vnet( ptfadapter, apply_vnet_configs, dash_config_info, skip_dataplane_checking, - asic_db_checker): + asic_db_checker, + inner_packet_type, + acl_default_rule): """ Send VXLAN packets from the VM VNI """ asic_db_checker(["SAI_OBJECT_TYPE_VNET", "SAI_OBJECT_TYPE_ENI"]) if skip_dataplane_checking: return - _, vxlan_packet, expected_packet = packets.outbound_vnet_packets(dash_config_info) + _, vxlan_packet, expected_packet = packets.outbound_vnet_packets(dash_config_info, + inner_packet_type=inner_packet_type) testutils.send(ptfadapter, dash_config_info[LOCAL_PTF_INTF], vxlan_packet, 1) testutils.verify_packets_any(ptfadapter, expected_packet, ports=dash_config_info[REMOTE_PTF_INTF]) # testutils.verify_packet(ptfadapter, expected_packet, dash_config_info[REMOTE_PTF_INTF]) @@ -37,11 +63,14 @@ def test_outbound_vnet_direct( apply_vnet_direct_configs, dash_config_info, skip_dataplane_checking, - asic_db_checker): + asic_db_checker, + inner_packet_type, + acl_default_rule): asic_db_checker(["SAI_OBJECT_TYPE_VNET", "SAI_OBJECT_TYPE_ENI"]) if skip_dataplane_checking: return - _, vxlan_packet, expected_packet = packets.outbound_vnet_packets(dash_config_info) + _, vxlan_packet, expected_packet = packets.outbound_vnet_packets(dash_config_info, + inner_packet_type=inner_packet_type) testutils.send(ptfadapter, dash_config_info[LOCAL_PTF_INTF], vxlan_packet, 1) testutils.verify_packets_any(ptfadapter, expected_packet, ports=dash_config_info[REMOTE_PTF_INTF]) # testutils.verify_packet(ptfadapter, expected_packet, dash_config_info[REMOTE_PTF_INTF]) @@ -52,11 +81,14 @@ def test_outbound_direct( apply_direct_configs, dash_config_info, skip_dataplane_checking, - asic_db_checker): + asic_db_checker, + inner_packet_type, + acl_default_rule): asic_db_checker(["SAI_OBJECT_TYPE_VNET", "SAI_OBJECT_TYPE_ENI"]) if skip_dataplane_checking: return - expected_inner_packet, vxlan_packet, _ = packets.outbound_vnet_packets(dash_config_info) + expected_inner_packet, vxlan_packet, _ = packets.outbound_vnet_packets(dash_config_info, + inner_packet_type=inner_packet_type) testutils.send(ptfadapter, dash_config_info[LOCAL_PTF_INTF], vxlan_packet, 1) testutils.verify_packets_any(ptfadapter, expected_inner_packet, ports=dash_config_info[REMOTE_PTF_INTF]) # testutils.verify_packet(ptfadapter, expected_inner_packet, dash_config_info[REMOTE_PTF_INTF]) @@ -67,7 +99,9 @@ def test_inbound_vnet_pa_validate( apply_inbound_configs, dash_config_info, skip_dataplane_checking, - asic_db_checker): + asic_db_checker, + inner_packet_type, + acl_default_rule): """ Send VXLAN packets from the remote VNI with PA validation enabled @@ -79,7 +113,8 @@ def test_inbound_vnet_pa_validate( asic_db_checker(["SAI_OBJECT_TYPE_VNET", "SAI_OBJECT_TYPE_ENI"]) if skip_dataplane_checking: return - _, pa_match_packet, pa_mismatch_packet, expected_packet = packets.inbound_vnet_packets(dash_config_info) + _, pa_match_packet, pa_mismatch_packet, expected_packet = packets.inbound_vnet_packets( + dash_config_info, inner_packet_type=inner_packet_type) testutils.send(ptfadapter, dash_config_info[REMOTE_PTF_INTF], pa_match_packet, 1) testutils.verify_packets_any(ptfadapter, expected_packet, ports=dash_config_info[LOCAL_PTF_INTF]) testutils.send(ptfadapter, dash_config_info[REMOTE_PTF_INTF], pa_mismatch_packet, 1) diff --git a/tests/dhcp_relay/acl/dhcpv6_pkt_recv_multicast_accept.json b/tests/dhcp_relay/acl/dhcpv6_pkt_recv_multicast_accept.json new file mode 100644 index 00000000000..59da3189dbc --- /dev/null +++ b/tests/dhcp_relay/acl/dhcpv6_pkt_recv_multicast_accept.json @@ -0,0 +1,30 @@ +{ + "acl": { + "acl-sets": { + "acl-set": { + "DHCPV6_PKT_RECV_TEST": { + "acl-entries": { + "acl-entry": { + "9011_ALLOW_DHCPv6": { + "actions": { + "config": { + "forwarding-action": "ACCEPT" + } + }, + "config": { + "sequence-id": 9011 + }, + "ip": { + "config": { + "destination-ip-address": "ff02::1:2/128", + "protocol": "17" + } + } + } + } + } + } + } + } + } +} diff --git a/tests/dhcp_relay/test_dhcp_pkt_recv.py b/tests/dhcp_relay/test_dhcp_pkt_recv.py new file mode 100644 index 00000000000..e5d5cb48505 --- /dev/null +++ b/tests/dhcp_relay/test_dhcp_pkt_recv.py @@ -0,0 +1,116 @@ +import logging +import ptf.packet as scapy +import pytest +import random + +from ptf import testutils +from scapy.layers.dhcp6 import DHCP6_Solicit +from tests.common.helpers.assertions import pytest_assert +from tests.common.utilities import capture_and_check_packet_on_dut + +pytestmark = [ + pytest.mark.topology('mx') +] + +ACL_TABLE_NAME_DHCPV6_PKT_RECV_TEST = "DHCPV6_PKT_RECV_TEST" +ACL_STAGE_INGRESS = "ingress" +ACL_TABLE_TYPE_L3V6 = "L3V6" + +ACL_RULE_FILE_PATH_MULTICAST_ACCEPT = "dhcp_relay/acl/dhcpv6_pkt_recv_multicast_accept.json" +ACL_RULE_DST_FILE = "/tmp/test_dchp_pkt_acl_rule.json" + +DHCP_RELAY_FEATRUE_NAME = "dhcp_relay" +DHCPV6_MAC_MULTICAST = "33:33:00:01:00:02" +DHCPV6_IP_MULTICAST = "ff02::1:2" +DHCPV6_UDP_CLIENT_PORT = 546 +DHCPV6_UDP_SERVER_PORT = 547 + + +@pytest.fixture(scope="module", autouse=True) +def check_dhcp_relay_feature_state(duthost): + features_state, _ = duthost.get_feature_status() + if "enabled" not in features_state.get(DHCP_RELAY_FEATRUE_NAME, ""): + pytest.skip('dhcp relay feature is not enabled, skip the test') + + +class Dhcpv6PktRecvBase: + + @pytest.fixture(scope="class") + def setup_teardown(self, duthost, tbinfo): + ptf_indices = tbinfo['topo']['properties']['topology']['host_interfaces'] + dut_intf_ptf_index = duthost.get_extended_minigraph_facts(tbinfo)['minigraph_ptf_indices'] + yield ptf_indices, dut_intf_ptf_index + + def test_dhcpv6_multicast_recv(self, duthost, ptfadapter, setup_teardown): + """ + Test the DUT can receive DHCPv6 multicast packet + """ + ptf_indices, dut_intf_ptf_index = setup_teardown + ptf_index = random.choice(ptf_indices) + intf, ptf_port_id = [(intf, id) for intf, id in dut_intf_ptf_index.items() if id == ptf_index][0] + logging.info("Start to verify dhcpv6 multicast with infterface=%s and ptf_port_id=%s" % (intf, ptf_port_id)) + + def func(pkts): + pytest_assert(len([pkt for pkt in pkts if pkt[DHCP6_Solicit].trid == test_trid]) > 0, + "Didn't get packet with expected transaction id") + src_mac = ptfadapter.dataplane.get_mac(0, ptf_port_id).decode('utf-8') + test_trid = 234 + pkts_filter = "ether src %s and udp dst port %s" % (src_mac, DHCPV6_UDP_SERVER_PORT) + with capture_and_check_packet_on_dut( + duthost=duthost, + interface=intf, + pkts_filter=pkts_filter, + pkts_validator=func + ): + link_local_ipv6_addr = duthost.get_intf_link_local_ipv6_addr(intf) + req_pkt = scapy.Ether(dst=DHCPV6_MAC_MULTICAST, src=src_mac) \ + / scapy.IPv6(src=link_local_ipv6_addr, dst=DHCPV6_IP_MULTICAST)\ + / scapy.UDP(sport=DHCPV6_UDP_CLIENT_PORT, dport=DHCPV6_UDP_SERVER_PORT)\ + / DHCP6_Solicit(trid=test_trid) + ptfadapter.dataplane.flush() + testutils.send_packet(ptfadapter, pkt=req_pkt, port_id=ptf_port_id) + + +class TestDhcpv6WithEmptyAclTable(Dhcpv6PktRecvBase): + """ + Test the DUT with empty ACL table + """ + @pytest.fixture(scope="class", autouse=True) + def setup_teardown_acl(self, duthost, setup_teardown): + ptf_indices, dut_intf_ptf_index = setup_teardown + ptf_intfs = [intf for intf, index in dut_intf_ptf_index.items() if index in ptf_indices] + acl_table_name = ACL_TABLE_NAME_DHCPV6_PKT_RECV_TEST + duthost.add_acl_table( + table_name=acl_table_name, + table_type=ACL_TABLE_TYPE_L3V6, + acl_stage=ACL_STAGE_INGRESS, + bind_ports=ptf_intfs + ) + + yield + + duthost.remove_acl_table(acl_table_name) + + +class TestDhcpv6WithMulticastAccpectAcl(Dhcpv6PktRecvBase): + """ + Test the DUT with multicast accept ACL rule and default drop all rule. + The drop all rule is added by default for L3V6 table type by acl-loader + """ + @pytest.fixture(scope="class", autouse=True) + def setup_teardown_acl(self, duthost, setup_teardown): + ptf_indices, dut_intf_ptf_index = setup_teardown + ptf_intfs = [intf for intf, index in dut_intf_ptf_index.items() if index in ptf_indices] + acl_table_name = ACL_TABLE_NAME_DHCPV6_PKT_RECV_TEST + duthost.add_acl_table( + table_name=acl_table_name, + table_type=ACL_TABLE_TYPE_L3V6, + acl_stage=ACL_STAGE_INGRESS, + bind_ports=ptf_intfs + ) + duthost.copy(src=ACL_RULE_FILE_PATH_MULTICAST_ACCEPT, dest=ACL_RULE_DST_FILE) + duthost.shell("acl-loader update full --table_name {} {}".format(acl_table_name, ACL_RULE_DST_FILE)) + + yield + + duthost.remove_acl_table(acl_table_name) diff --git a/tests/dhcp_relay/test_dhcp_relay.py b/tests/dhcp_relay/test_dhcp_relay.py index 93c3a7e8374..66fded51742 100644 --- a/tests/dhcp_relay/test_dhcp_relay.py +++ b/tests/dhcp_relay/test_dhcp_relay.py @@ -178,8 +178,8 @@ def check_routes_to_dhcp_server(duthost, dut_dhcp_relay_data): def validate_dut_routes_exist(duthosts, rand_one_dut_hostname, dut_dhcp_relay_data): """Fixture to valid a route to each DHCP server exist """ - pytest_assert(check_routes_to_dhcp_server(duthosts[rand_one_dut_hostname], dut_dhcp_relay_data), - "Failed to find route for DHCP server") + pytest_assert(wait_until(120, 5, 0, check_routes_to_dhcp_server, duthosts[rand_one_dut_hostname], + dut_dhcp_relay_data), "Failed to find route for DHCP server") def restart_dhcp_service(duthost): @@ -344,8 +344,8 @@ def test_dhcp_relay_default(ptfhost, dut_dhcp_relay_data, validate_dut_routes_ex expected_agg_counter_message = ( r".*dhcp_relay#dhcpmon\[[0-9]+\]: " r"\[\s*Agg-%s\s*-[\sA-Za-z0-9]+\s*rx/tx\] " - r"Discover: +1/ +%d, Offer: +1/ +1, Request: +3/ +%d, ACK: +1/ +1+" - ) % (dhcp_relay['downlink_vlan_iface']['name'], dhcp_server_num, dhcp_server_num * 3) + r"Discover: +1/ +%d, Offer: +1/ +1, Request: +2/ +%d, ACK: +1/ +1+" + ) % (dhcp_relay['downlink_vlan_iface']['name'], dhcp_server_num, dhcp_server_num * 2) loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="dhcpmon counter") marker = loganalyzer.init() loganalyzer.expect_regex = [expected_agg_counter_message] @@ -375,7 +375,7 @@ def test_dhcp_relay_default(ptfhost, dut_dhcp_relay_data, validate_dut_routes_ex "testing_mode": testing_mode}, log_file="/tmp/dhcp_relay_test.DHCPTest.log", is_python3=True) if not skip_dhcpmon: - time.sleep(18) # dhcpmon debug counter prints every 18 seconds + time.sleep(36) # dhcpmon debug counter prints every 18 seconds loganalyzer.analyze(marker) if testing_mode == DUAL_TOR_MODE: loganalyzer_standby.analyze(marker_standby) diff --git a/tests/dns/static_dns/test_static_dns.py b/tests/dns/static_dns/test_static_dns.py index b68e2de084c..c7ba00eb16b 100644 --- a/tests/dns/static_dns/test_static_dns.py +++ b/tests/dns/static_dns/test_static_dns.py @@ -40,6 +40,22 @@ EXCEED_MAX_ERR = r"Error: The maximum number \(3\) of nameservers exceeded" DUPLICATED_IP_ERR = r"Error: .* nameserver is already configured" +MGMT_PORT = "eth0" +DHCLIENT_PID_FILE = "/run/dhclient-dns-test.pid" + + +def start_dhclient(duthost): + duthost.shell(f"sudo dhclient -pf {DHCLIENT_PID_FILE} {MGMT_PORT}") + + +@pytest.fixture() +def stop_dhclient(duthost): + yield + + if duthost.shell(f'ls {DHCLIENT_PID_FILE}', module_ignore_errors=True)['rc'] == 0: + duthost.shell(f"sudo kill $(cat {DHCLIENT_PID_FILE})") + duthost.shell(f"rm -rf {DHCLIENT_PID_FILE}") + @pytest.mark.disable_loganalyzer def test_static_dns_basic(request, duthost, localhost, mgmt_interfaces): @@ -97,13 +113,13 @@ def test_static_dns_basic(request, duthost, localhost, mgmt_interfaces): if mgmt_interfaces: verify_nameserver_in_conf_file(duthost, []) else: - origin_dynamic_nameservers = get_nameserver_from_resolvconf(duthost, file_name=RESOLV_CONF_FILE+".bk") + origin_dynamic_nameservers = get_nameserver_from_resolvconf(duthost, file_name=RESOLV_CONF_FILE + ".bk") verify_nameserver_in_conf_file(duthost, origin_dynamic_nameservers) @pytest.mark.usefixtures('static_mgmt_ip_configured') class TestStaticMgmtPortIP(): - def test_dynamic_dns_not_working_when_static_ip_configured(self, duthost): + def test_dynamic_dns_not_working_when_static_ip_configured(self, duthost, stop_dhclient): """ Test to verify Dynamic DNS not work when static ip address is configured on the mgmt port :param duthost: DUT host object @@ -120,13 +136,13 @@ def test_dynamic_dns_not_working_when_static_ip_configured(self, duthost): verify_nameserver_in_conf_file(duthost, []) with allure.step("Renew dhcp to restore the dns configuration."): - duthost.shell("sudo dhclient") + start_dhclient(duthost) verify_nameserver_in_conf_file(duthost, []) @pytest.mark.usefixtures('static_mgmt_ip_not_configured') class TestDynamicMgmtPortIP(): - def test_static_dns_is_not_changing_when_do_dhcp_renew(self, duthost): + def test_static_dns_is_not_changing_when_do_dhcp_renew(self, duthost, stop_dhclient): """ Test case to verify Static DNS will not change when do dhcp renew for the mgmt port :param duthost: DUT host object @@ -143,7 +159,7 @@ def test_static_dns_is_not_changing_when_do_dhcp_renew(self, duthost): verify_nameserver_in_conf_file(duthost, expected_nameservers) with allure.step("Renew dhcp to restore the dns configuration."): - duthost.shell("sudo dhclient") + start_dhclient(duthost) with allure.step(f"Verify that {RESOLV_CONF_FILE} is not modified"): verify_nameserver_in_conf_file(duthost, expected_nameservers) @@ -153,7 +169,7 @@ def test_static_dns_is_not_changing_when_do_dhcp_renew(self, duthost): del_dns_nameserver(duthost, nameserver) @pytest.mark.usefixtures('static_mgmt_ip_not_configured') - def test_dynamic_dns_working_when_no_static_ip_and_static_dns(self, duthost): + def test_dynamic_dns_working_when_no_static_ip_and_static_dns(self, duthost, stop_dhclient): """ The test is to verify Dynamic DNS work as expected when no static ip configured on mgmt port and static DNS is configured. @@ -182,7 +198,7 @@ def test_dynamic_dns_working_when_no_static_ip_and_static_dns(self, duthost): config_mgmt_ip(duthost, mgmt_interfaces, "remove") with allure.step("Renew dhcp to restore the dns configuration."): - duthost.shell("sudo dhclient") + start_dhclient(duthost) verify_nameserver_in_conf_file(duthost, origin_dynamic_nameservers) diff --git a/tests/dualtor_mgmt/test_dualtor_bgp_update_delay.py b/tests/dualtor_mgmt/test_dualtor_bgp_update_delay.py index 180e42a5ce9..0b47921ed38 100644 --- a/tests/dualtor_mgmt/test_dualtor_bgp_update_delay.py +++ b/tests/dualtor_mgmt/test_dualtor_bgp_update_delay.py @@ -25,6 +25,13 @@ @contextlib.contextmanager def log_bgp_updates(duthost, iface, save_path): """Capture bgp packets to file.""" + + def _is_tcpdump_running(duthost, cmd): + check_cmd = "ps u -C tcpdump | grep '%s'" % cmd + if cmd in duthost.shell(check_cmd)['stdout']: + return True + return False + if iface == "any": # Scapy doesn't support LINUX_SLL2 (Linux cooked v2), and tcpdump on Bullseye # defaults to writing in that format when listening on any interface. Therefore, @@ -33,13 +40,18 @@ def log_bgp_updates(duthost, iface, save_path): else: start_pcap = "tcpdump -i %s -w %s port 179" % (iface, save_path) # for multi-asic dut, add 'ip netns exec asicx' to the beggining of tcpdump cmd - stop_pcap = "sudo pkill -SIGINT -f '%s'" % start_pcap - start_pcap = "nohup {} &".format(start_pcap) - duthost.shell(start_pcap) + stop_pcap_cmd = "sudo pkill -SIGINT -f '%s'" % start_pcap + start_pcap_cmd = "nohup {} &".format(start_pcap) + duthost.file(path=save_path, state="absent") + duthost.shell(start_pcap_cmd) + # wait until tcpdump process created + if not wait_until(20, 5, 2, lambda: _is_tcpdump_running(duthost, start_pcap),): + pytest.fail("Could not start tcpdump") + try: yield finally: - duthost.shell(stop_pcap, module_ignore_errors=True) + duthost.shell(stop_pcap_cmd, module_ignore_errors=True) @pytest.fixture(params=["ipv4", "ipv6"]) diff --git a/tests/ecmp/test_ecmp_sai_value.py b/tests/ecmp/test_ecmp_sai_value.py index 70c8fe34d71..2afacdb7328 100644 --- a/tests/ecmp/test_ecmp_sai_value.py +++ b/tests/ecmp/test_ecmp_sai_value.py @@ -163,7 +163,7 @@ def check_ecmp_offset_value(duthost, asic_name, topo_type, hwsku): Expected {}, but got {}.".format(392, offset_count)) elif topo_type == "t1": offset_count = offset_list.count('0xa') - if hwsku in ["Arista-7060CX-32S-C32", "Arista-7050QX32S-Q32"]: + if hwsku in ["Arista-7060CX-32S-C32", "Arista-7050QX32S-Q32", "Arista-7050-QX-32S"]: pytest_assert(offset_count >= 33, "the count of 0xa OFFSET_ECMP is not correct. \ Expected >= 33, but got {}.".format(offset_count)) else: @@ -211,11 +211,13 @@ def test_ecmp_hash_seed_value(localhost, duthosts, tbinfo, enum_rand_one_per_hws check_hash_seed_value(duthost, asic_name, topo_type) elif parameter == "reboot": logging.info("Run cold reboot on DUT") - reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD, reboot_helper=None, reboot_kwargs=None) + reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD, reboot_helper=None, + reboot_kwargs=None, safe_reboot=True) check_hash_seed_value(duthost, asic_name, topo_type) elif parameter == "warm-reboot" and topo_type == "t0": logging.info("Run warm reboot on DUT") - reboot(duthost, localhost, reboot_type=REBOOT_TYPE_WARM, reboot_helper=None, reboot_kwargs=None) + reboot(duthost, localhost, reboot_type=REBOOT_TYPE_WARM, reboot_helper=None, + reboot_kwargs=None, safe_reboot=True) check_hash_seed_value(duthost, asic_name, topo_type) @@ -257,9 +259,11 @@ def test_ecmp_offset_value(localhost, duthosts, tbinfo, enum_rand_one_per_hwsku_ check_hash_seed_value(duthost, asic_name, topo_type) elif parameter == "reboot": logging.info("Run cold reboot on DUT") - reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD, reboot_helper=None, reboot_kwargs=None) + reboot(duthost, localhost, reboot_type=REBOOT_TYPE_COLD, reboot_helper=None, + reboot_kwargs=None, safe_reboot=True) check_ecmp_offset_value(duthost, asic_name, topo_type, hwsku) elif parameter == "warm-reboot" and topo_type == "t0": logging.info("Run warm reboot on DUT") - reboot(duthost, localhost, reboot_type=REBOOT_TYPE_WARM, reboot_helper=None, reboot_kwargs=None) + reboot(duthost, localhost, reboot_type=REBOOT_TYPE_WARM, reboot_helper=None, + reboot_kwargs=None, safe_reboot=True) check_ecmp_offset_value(duthost, asic_name, topo_type, hwsku) diff --git a/tests/everflow/test_everflow_testbed.py b/tests/everflow/test_everflow_testbed.py index 85940644f09..06bd02e521f 100644 --- a/tests/everflow/test_everflow_testbed.py +++ b/tests/everflow/test_everflow_testbed.py @@ -8,6 +8,11 @@ from tests.ptf_runner import ptf_runner from .everflow_test_utilities import TARGET_SERVER_IP, BaseEverflowTest, DOWN_STREAM, UP_STREAM, DEFAULT_SERVER_IP + +from tests.common.dualtor.dual_tor_utils import config_active_active_dualtor_active_standby # noqa F401 +from tests.common.dualtor.dual_tor_utils import validate_active_active_dualtor_setup # noqa F401 +from tests.common.dualtor.dual_tor_common import active_active_ports # noqa F401 + # Module-level fixtures from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa: F401 from tests.common.fixtures.ptfhost_utils import copy_acstests_directory # noqa: F401 @@ -19,6 +24,7 @@ pytest.mark.topology("t0", "t1", "t2", "m0") ] +logger = logging.getLogger(__name__) MEGABYTE = 1024 * 1024 DEFAULT_PTF_SOCKET_RCV_SIZE = 1 * MEGABYTE @@ -79,6 +85,20 @@ class EverflowIPv4Tests(BaseEverflowTest): DEFAULT_DST_IP = "30.0.0.1" MIRROR_POLICER_UNSUPPORTED_ASIC_LIST = ["th3", "j2c+", "jr2"] + @pytest.fixture + def setup_active_active_ports(self, active_active_ports, rand_selected_dut, rand_unselected_dut, # noqa F811 + config_active_active_dualtor_active_standby, # noqa F811 + validate_active_active_dualtor_setup): # noqa F811 + if active_active_ports: + # for active-active dualtor, the upstream traffic is ECMPed to both ToRs, so let's + # config the unselected ToR as standby to ensure all ethernet type packets are + # forwarded to the selected ToR. + logger.info("Configuring {} as active".format(rand_selected_dut.hostname)) + logger.info("Configuring {} as standby".format(rand_unselected_dut.hostname)) + config_active_active_dualtor_active_standby(rand_selected_dut, rand_unselected_dut, active_active_ports) + + return + @pytest.fixture(params=[DOWN_STREAM, UP_STREAM]) def dest_port_type(self, setup_info, setup_mirror_session, tbinfo, request): # noqa F811 """ @@ -128,7 +148,8 @@ def add_dest_routes(self, setup_info, tbinfo, dest_port_type): # noqa F811 def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup_active_active_ports): """ Verify basic forwarding scenarios for the Everflow feature. @@ -228,7 +249,8 @@ def test_everflow_basic_forwarding(self, setup_info, setup_mirror_session, def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup_active_active_ports): """Verify that session destination MAC address is changed after neighbor MAC address update.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -275,6 +297,7 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, ) finally: + # Clean up the test remote_dut.shell( remote_dut.get_linux_ip_cmd_for_namespace("ip neigh del {} dev {}".format(peer_ip, tx_port), @@ -296,7 +319,8 @@ def test_everflow_neighbor_mac_change(self, setup_info, setup_mirror_session, def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup_active_active_ports): """Verify that session is still active after removal of next hop from ECMP route that was not in use.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -387,7 +411,8 @@ def test_everflow_remove_unused_ecmp_next_hop(self, setup_info, setup_mirror_ses def test_everflow_remove_used_ecmp_next_hop(self, setup_info, setup_mirror_session, # noqa F811 dest_port_type, ptfadapter, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor): # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup_active_active_ports): """Verify that session is still active after removal of next hop from ECMP route that was in use.""" everflow_dut = setup_info[dest_port_type]['everflow_dut'] @@ -498,7 +523,8 @@ def test_everflow_dscp_with_policer( partial_ptf_runner, config_method, tbinfo, - toggle_all_simulator_ports_to_rand_selected_tor # noqa F811 + toggle_all_simulator_ports_to_rand_selected_tor, # noqa F811 + setup_active_active_ports ): """Verify that we can rate-limit mirrored traffic from the MIRROR_DSCP table. This tests single rate three color policer mode and specifically checks CIR value diff --git a/tests/generic_config_updater/test_aaa.py b/tests/generic_config_updater/test_aaa.py index 8d03ae8c128..c1fae19a6a2 100644 --- a/tests/generic_config_updater/test_aaa.py +++ b/tests/generic_config_updater/test_aaa.py @@ -304,6 +304,9 @@ def test_tc1_aaa_suite(rand_selected_dut): contian AAA table. So we remove AAA config at first. """ aaa_add_init_config_without_table(rand_selected_dut) + # Recent AAA YANG update that passkey in TACPLUS must exist first for authorization tacacs+ + # Since tc2 it will clean and retest TACPLUS table, we don't care TACPLUS residue after tc1 + tacacs_global_tc2_add_config(rand_selected_dut) aaa_tc1_add_config(rand_selected_dut) aaa_tc1_replace(rand_selected_dut) aaa_tc1_add_duplicate(rand_selected_dut) diff --git a/tests/generic_config_updater/test_incremental_qos.py b/tests/generic_config_updater/test_incremental_qos.py index 54332c2e345..53fc6a1dd45 100644 --- a/tests/generic_config_updater/test_incremental_qos.py +++ b/tests/generic_config_updater/test_incremental_qos.py @@ -239,6 +239,9 @@ def test_incremental_qos_config_updates(duthost, tbinfo, ensure_dut_readiness, c try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) + if op == "replace" and not field_value: + expect_op_failure(output) + if is_valid_platform_and_version(duthost, "BUFFER_POOL", "Shared/headroom pool size changes", op, field_value): expect_op_success(duthost, output) ensure_application_of_updated_config(duthost, configdb_field, value) diff --git a/tests/gnmi/test_gnmi_appldb.py b/tests/gnmi/test_gnmi_appldb.py index def12cb3b22..145ee9e7ecf 100644 --- a/tests/gnmi/test_gnmi_appldb.py +++ b/tests/gnmi/test_gnmi_appldb.py @@ -22,12 +22,12 @@ def test_gnmi_appldb_01(duthosts, rand_one_dut_hostname, localhost): with open(file_name, 'w') as file: file.write(text) # Add DASH_VNET_TABLE - update_list = ["/sonic-db:APPL_DB/DASH_VNET_TABLE:@./%s" % (file_name)] + update_list = ["/sonic-db:APPL_DB/localhost/DASH_VNET_TABLE:@./%s" % (file_name)] ret, msg = gnmi_set(duthost, localhost, [], update_list, []) assert ret == 0, msg # Check gnmi_get result - path_list1 = ["/sonic-db:APPL_DB/DASH_VNET_TABLE/Vnet1/vni"] - path_list2 = ["/sonic-db:APPL_DB/_DASH_VNET_TABLE/Vnet1/vni"] + path_list1 = ["/sonic-db:APPL_DB/localhost/DASH_VNET_TABLE/Vnet1/vni"] + path_list2 = ["/sonic-db:APPL_DB/localhost/_DASH_VNET_TABLE/Vnet1/vni"] ret1, msg_list1 = gnmi_get(duthost, localhost, path_list1) ret2, msg_list2 = gnmi_get(duthost, localhost, path_list2) output = "" @@ -38,12 +38,12 @@ def test_gnmi_appldb_01(duthosts, rand_one_dut_hostname, localhost): assert output == "\"1000\"", output # Remove DASH_VNET_TABLE - delete_list = ["/sonic-db:APPL_DB/DASH_VNET_TABLE/Vnet1"] + delete_list = ["/sonic-db:APPL_DB/localhost/DASH_VNET_TABLE/Vnet1"] ret, msg = gnmi_set(duthost, localhost, delete_list, [], []) assert ret == 0, msg # Check gnmi_get result - path_list1 = ["/sonic-db:APPL_DB/DASH_VNET_TABLE/Vnet1/vni"] - path_list2 = ["/sonic-db:APPL_DB/_DASH_VNET_TABLE/Vnet1/vni"] + path_list1 = ["/sonic-db:APPL_DB/localhost/DASH_VNET_TABLE/Vnet1/vni"] + path_list2 = ["/sonic-db:APPL_DB/localhost/_DASH_VNET_TABLE/Vnet1/vni"] ret1, msg_list1 = gnmi_get(duthost, localhost, path_list1) ret2, msg_list2 = gnmi_get(duthost, localhost, path_list2) assert ret1 != 0 and ret2 != 0, msg_list1[0] + msg_list2[0] diff --git a/tests/gnmi/test_gnmi_configdb.py b/tests/gnmi/test_gnmi_configdb.py index f3654713216..e8efc6a24f6 100644 --- a/tests/gnmi/test_gnmi_configdb.py +++ b/tests/gnmi/test_gnmi_configdb.py @@ -23,14 +23,19 @@ def get_first_interface(duthost): status_data = output["stdout_lines"] if 'Admin' not in status_data[0]: return None + if 'Lanes' not in status_data[0]: + return None admin_index = status_data[0].split().index('Admin') + lanes_index = status_data[0].split().index('Lanes') for line in status_data: - if "routed" not in line: - interface_status = line.strip() - assert len(interface_status) > 0, "Failed to read interface properties" - sl = interface_status.split() - if sl[admin_index] == 'up': - return sl[0] + interface_status = line.strip() + assert len(interface_status) > 0, "Failed to read interface properties" + sl = interface_status.split() + # Skip portchannel + if sl[lanes_index] == 'N/A': + continue + if sl[admin_index] == 'up': + return sl[0] return None @@ -50,8 +55,8 @@ def test_gnmi_configdb_incremental_01(duthosts, rand_one_dut_hostname, localhost file_name = "port.txt" interface = get_first_interface(duthost) assert interface is not None, "Invalid interface" - update_list = ["/sonic-db:CONFIG_DB/PORT/%s/admin_status:@./%s" % (interface, file_name)] - path_list = ["/sonic-db:CONFIG_DB/PORT/%s/admin_status" % (interface)] + update_list = ["/sonic-db:CONFIG_DB/localhost/PORT/%s/admin_status:@./%s" % (interface, file_name)] + path_list = ["/sonic-db:CONFIG_DB/localhost/PORT/%s/admin_status" % (interface)] # Shutdown interface text = "\"down\"" @@ -87,7 +92,7 @@ def test_gnmi_configdb_incremental_02(duthosts, rand_one_dut_hostname, localhost ''' duthost = duthosts[rand_one_dut_hostname] file_name = "port.txt" - update_list = ["/sonic-db:CONFIG_DB/PORTABC/Ethernet100/admin_status:@./%s" % (file_name)] + update_list = ["/sonic-db:CONFIG_DB/localhost/PORTABC/Ethernet100/admin_status:@./%s" % (file_name)] # GNMI set request with invalid path text = "\"down\"" @@ -117,8 +122,8 @@ def test_gnmi_configdb_full_01(duthosts, rand_one_dut_hostname, localhost): filename = "full.txt" with open(filename, 'w') as file: json.dump(dic, file) - delete_list = ["/sonic-db:CONFIG_DB/"] - update_list = ["/sonic-db:CONFIG_DB/:@%s" % filename] + delete_list = ["/sonic-db:CONFIG_DB/localhost/"] + update_list = ["/sonic-db:CONFIG_DB/localhost/:@%s" % filename] ret, msg = gnmi_set(duthost, localhost, delete_list, update_list, []) assert ret == 0, msg # Check interface status and gnmi_get result diff --git a/tests/iface_loopback_action/conftest.py b/tests/iface_loopback_action/conftest.py index 767adedda30..53a07410ba5 100644 --- a/tests/iface_loopback_action/conftest.py +++ b/tests/iface_loopback_action/conftest.py @@ -16,7 +16,7 @@ def pytest_addoption(parser): """ parser.addoption( - "--rif_loppback_reboot_type", + "--rif_loopback_reboot_type", action="store", type=str, default="cold", diff --git a/tests/iface_loopback_action/iface_loopback_action_helper.py b/tests/iface_loopback_action/iface_loopback_action_helper.py index 91f1781df34..bdccc40fcc9 100644 --- a/tests/iface_loopback_action/iface_loopback_action_helper.py +++ b/tests/iface_loopback_action/iface_loopback_action_helper.py @@ -49,6 +49,8 @@ def generate_and_verify_traffic(duthost, ptfadapter, rif_interface, src_port_ind eth_dst = duthost.facts["router_mac"] eth_src = ptfadapter.dataplane.get_mac(0, src_port_index).decode('utf-8') duthost.shell("sudo ip neigh replace {} lladdr {} dev {}".format(ip_dst, eth_src, rif_interface)) + pytest_assert(wait_until(60, 3, 0, check_neighbor, duthost, ip_dst, eth_src, rif_interface), + "Failed to add neighbor for {}.".format(ip_dst)) logger.info("Traffic info is: eth_dst- {}, eth_src- {}, ip_src- {}, ip_dst- {}, vlan_vid- {}".format( eth_dst, eth_src, ip_src, ip_dst, vlan_vid)) pkt = testutils.simple_ip_packet( @@ -77,6 +79,30 @@ def generate_and_verify_traffic(duthost, ptfadapter, rif_interface, src_port_ind testutils.verify_packet(ptfadapter, exp_pkt, src_port_index) +def check_neighbor(duthost, ip_address, mac_address, interface): + """ + Verify the static ip neighbor is configured successfully + :param duthost: DUT host object + :param ip_address: Neighbor ip address + :param mac_address: Neighbor mac address + :param interface: Neighbor interface + """ + asic_db_output = duthost.shell("redis-cli -n 1 keys *NEIGHBOR_ENTRY* | grep {}".format(ip_address))['stdout_lines'] + if len(asic_db_output) < 1: + logger.error('No neighbor entry or extra neighbor entries of {} in ASIC db.'.format(ip_address)) + return False + + asic_db_neighbor_entry = asic_db_output[0] + mac_in_asic_db = duthost.shell("redis-cli -n 1 hget '{}' '{}'".format( + asic_db_neighbor_entry, "SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS"))['stdout_lines'] + + if len(mac_in_asic_db) < 1 or mac_in_asic_db[0].upper() != mac_address.upper(): + logger.error('The neighbor entry of {} in ASIC db is not correct.'.format(ip_address)) + return False + + return True + + def get_tested_up_ports(duthost, ptf_ifaces_map, count=10): """ Get the specified number of up ports @@ -464,8 +490,7 @@ def add_ptf_bond(ptfhost, port, bond_id, ip_addr): """ try: bond_port = 'bond{}'.format(bond_id) - ptfhost.shell("ip link add {} type bond".format(bond_port)) - ptfhost.shell("ip link set {} type bond miimon 100 mode 802.3ad".format(bond_port)) + ptfhost.shell("teamd -t {} -d -c '{{\"runner\": {{\"name\": \"lacp\"}}}}'".format(bond_port)) ptfhost.shell("ip link set {} down".format(port)) ptfhost.shell("ip link set {} master {}".format(port, bond_port)) ptfhost.shell("ip link set dev {} up".format(bond_port)) @@ -530,6 +555,19 @@ def clear_rif_counter(duthost): duthost.shell("sonic-clear rifcounters") +def check_ip_interface_up(duthost, interfaces): + """ + Check the ip interfaces are all up + :param duthost: DUT host object + :param interfaces: List of ip interfaces to check + """ + output = duthost.shell("show ip interface")['stdout'] + for interface in interfaces: + if not re.search("{}\\s.*up\\/up".format(interface), output): + return False + return True + + def show_loopback_action(duthost): """ Get the loopback action for every rif interface @@ -591,6 +629,9 @@ def verify_rif_tx_err_count(duthost, rif_interfaces, expect_counts): :param rif_interfaces: List of rif interface :param expect_counts: expected TX ERR for for every rif interface """ + # Wait for the rif counters polling + counter_poll_rif_interval = duthost.get_counter_poll_status()['RIF_STAT']['interval'] + time.sleep(counter_poll_rif_interval / 1000 + 1) rif_tx_err_map = get_rif_tx_err_count(duthost) for rif_interface, expected_count in zip(rif_interfaces, expect_counts): tx_err_count = int(rif_tx_err_map[rif_interface]) diff --git a/tests/iface_loopback_action/test_iface_loopback_action.py b/tests/iface_loopback_action/test_iface_loopback_action.py index bc9b325c433..8b5670e2368 100644 --- a/tests/iface_loopback_action/test_iface_loopback_action.py +++ b/tests/iface_loopback_action/test_iface_loopback_action.py @@ -11,7 +11,7 @@ from .iface_loopback_action_helper import config_loopback_action from .iface_loopback_action_helper import clear_rif_counter from .iface_loopback_action_helper import verify_interface_loopback_action -from .iface_loopback_action_helper import verify_rif_tx_err_count, is_rif_counters_ready +from .iface_loopback_action_helper import verify_rif_tx_err_count, is_rif_counters_ready, check_ip_interface_up from .iface_loopback_action_helper import shutdown_rif_interfaces, startup_rif_interfaces from tests.common.platform.interface_utils import check_interface_status_of_up_ports @@ -88,11 +88,21 @@ def test_loopback_action_reload(request, duthost, localhost, ptfadapter, ports_c count_list = [NUM_OF_TOTAL_PACKETS if action == ACTION_DROP else 0 for action in action_list] with allure.step("Configure the loopback action for {} to {}".format(rif_interfaces, action_list)): config_loopback_action(duthost, rif_interfaces, action_list) + with allure.step("Verify the loopback action is correct before config reload"): + with allure.step("Check the looback action is configured correctly with cli command"): + verify_interface_loopback_action(duthost, rif_interfaces, action_list) + with allure.step("Check the loopback traffic"): + with allure.step("Clear the rif counter"): + clear_rif_counter(duthost) + with allure.step("Check the traffic can be received or dropped as expected"): + verify_traffic(duthost, ptfadapter, rif_interfaces, ports_configuration, action_list) + with allure.step("Check the TX_ERR in rif counter statistic will increase or not as expected"): + verify_rif_tx_err_count(duthost, rif_interfaces, count_list) with allure.step("Save configuration"): duthost.shell("config save -y") with allure.step("System reload"): - reboot_type = request.config.getoption("--rif_loppback_reboot_type") + reboot_type = request.config.getoption("--rif_loopback_reboot_type") if reboot_type == "random": reload_types = ["reload", "cold", "fast", "warm"] reboot_type = random.choice(reload_types) @@ -110,6 +120,9 @@ def test_loopback_action_reload(request, duthost, localhost, ptfadapter, ports_c with allure.step("Check the looback action is configured correctly with cli command"): verify_interface_loopback_action(duthost, rif_interfaces, action_list) with allure.step("Check the loopback traffic"): + with allure.step("Check all ip interfaces are up"): + pytest_assert(wait_until(20, 5, 0, check_ip_interface_up, duthost, rif_interfaces), + "Not all ip interfaces are up.") with allure.step("Clear the rif counter"): clear_rif_counter(duthost) with allure.step("Check the traffic can be received or dropped as expected"): diff --git a/tests/ip/test_mgmt_ipv6_only.py b/tests/ip/test_mgmt_ipv6_only.py new file mode 100644 index 00000000000..57cf2492997 --- /dev/null +++ b/tests/ip/test_mgmt_ipv6_only.py @@ -0,0 +1,49 @@ +import pytest + +from tests.bgp.test_bgp_fact import run_bgp_facts +from tests.test_features import run_show_features +from tests.common.helpers.assertions import pytest_require +from tests.syslog.test_syslog import run_syslog, check_default_route # noqa F401 +from tests.common.fixtures.duthost_utils import convert_and_restore_config_db_to_ipv6_only # noqa F401 + +pytestmark = [ + pytest.mark.topology('any'), + pytest.mark.device_type('vs') +] + + +def test_bgp_facts_ipv6_only(duthosts, enum_frontend_dut_hostname, enum_asic_index, + convert_and_restore_config_db_to_ipv6_only): # noqa F811 + run_bgp_facts(duthosts, enum_frontend_dut_hostname, enum_asic_index) + + +def test_show_features_ipv6_only(duthosts, enum_dut_hostname, convert_and_restore_config_db_to_ipv6_only): # noqa F811 + run_show_features(duthosts, enum_dut_hostname) + + +def test_image_download_ipv6_only(creds, duthosts, enum_dut_hostname, + convert_and_restore_config_db_to_ipv6_only): # noqa F811 + """ + Test image download in mgmt ipv6 only scenario + """ + duthost = duthosts[enum_dut_hostname] + image_url = creds.get("test_image_url", {}).get("ipv6", "") + pytest_require(len(image_url) != 0, "Cannot get image url") + cfg_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + mgmt_interfaces = cfg_facts.get("MGMT_INTERFACE", {}).keys() + for mgmt_interface in mgmt_interfaces: + output = duthost.shell("curl --fail --interface {} {}".format(mgmt_interface, image_url), + module_ignore_errors=True) + if output["rc"] == 0: + break + else: + pytest.fail("Failed to download image from image_url {} via any of {}" + .format(image_url, list(mgmt_interfaces))) + + +@pytest.mark.parametrize("dummy_syslog_server_ip_a, dummy_syslog_server_ip_b", + [("fd82:b34f:cc99::100", None), + ("fd82:b34f:cc99::100", "fd82:b34f:cc99::200")]) +def test_syslog_ipv6_only(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, + check_default_route, convert_and_restore_config_db_to_ipv6_only): # noqa F811 + run_syslog(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, check_default_route) diff --git a/tests/ipfwd/test_nhop_group.py b/tests/ipfwd/test_nhop_group.py index 18c78b7b5b0..b7261c5d5c5 100644 --- a/tests/ipfwd/test_nhop_group.py +++ b/tests/ipfwd/test_nhop_group.py @@ -644,32 +644,31 @@ def built_and_send_tcp_ip_packet(): 45: 'c0:ff:ee:00:00:0c', 46: 'c0:ff:ee:00:00:0d', 47: 'c0:ff:ee:00:00:0b', 48: 'c0:ff:ee:00:00:11', 49: 'c0:ff:ee:00:00:0f'} - td3_asic_flow_map = {0: 'c0:ff:ee:00:00:12', 1: 'c0:ff:ee:00:00:10', - 2: 'c0:ff:ee:00:00:11', - 3: 'c0:ff:ee:00:00:0f', 4: 'c0:ff:ee:00:00:0d', - 5: 'c0:ff:ee:00:00:0b', 6: 'c0:ff:ee:00:00:0e', - 7: 'c0:ff:ee:00:00:0c', 8: 'c0:ff:ee:00:00:0f', - 9: 'c0:ff:ee:00:00:11', - 10: 'c0:ff:ee:00:00:10', 11: 'c0:ff:ee:00:00:12', - 12: 'c0:ff:ee:00:00:10', 13: 'c0:ff:ee:00:00:12', - 14: 'c0:ff:ee:00:00:0f', - 15: 'c0:ff:ee:00:00:11', 16: 'c0:ff:ee:00:00:0b', - 17: 'c0:ff:ee:00:00:0d', 18: 'c0:ff:ee:00:00:0c', - 19: 'c0:ff:ee:00:00:0e', - 20: 'c0:ff:ee:00:00:10', 21: 'c0:ff:ee:00:00:12', - 22: 'c0:ff:ee:00:00:0f', 23: 'c0:ff:ee:00:00:11', - 24: 'c0:ff:ee:00:00:11', - 25: 'c0:ff:ee:00:00:0f', 26: 'c0:ff:ee:00:00:12', - 27: 'c0:ff:ee:00:00:10', 28: 'c0:ff:ee:00:00:0f', 29: 'c0:ff:ee:00:00:11', - 30: 'c0:ff:ee:00:00:10', 31: 'c0:ff:ee:00:00:12', - 32: 'c0:ff:ee:00:00:0c', 33: 'c0:ff:ee:00:00:0e', - 34: 'c0:ff:ee:00:00:0b', - 35: 'c0:ff:ee:00:00:0d', 36: 'c0:ff:ee:00:00:0f', - 37: 'c0:ff:ee:00:00:11', 38: 'c0:ff:ee:00:00:10', 39: 'c0:ff:ee:00:00:12', - 40: 'c0:ff:ee:00:00:0d', 41: 'c0:ff:ee:00:00:0b', - 42: 'c0:ff:ee:00:00:0e', 43: 'c0:ff:ee:00:00:0c', 44: 'c0:ff:ee:00:00:0e', - 45: 'c0:ff:ee:00:00:0c', 46: 'c0:ff:ee:00:00:0d', - 47: 'c0:ff:ee:00:00:0b', 48: 'c0:ff:ee:00:00:11', 49: 'c0:ff:ee:00:00:0f'} + td3_asic_flow_map = {0: 'c0:ff:ee:00:00:10', 1: 'c0:ff:ee:00:00:0b', + 2: 'c0:ff:ee:00:00:12', 3: 'c0:ff:ee:00:00:0d', + 4: 'c0:ff:ee:00:00:11', 5: 'c0:ff:ee:00:00:0e', + 6: 'c0:ff:ee:00:00:0f', 7: 'c0:ff:ee:00:00:0c', + 8: 'c0:ff:ee:00:00:0e', 9: 'c0:ff:ee:00:00:11', + 10: 'c0:ff:ee:00:00:0c', 11: 'c0:ff:ee:00:00:0f', + 12: 'c0:ff:ee:00:00:12', 13: 'c0:ff:ee:00:00:0d', + 14: 'c0:ff:ee:00:00:10', 15: 'c0:ff:ee:00:00:0b', + 16: 'c0:ff:ee:00:00:11', 17: 'c0:ff:ee:00:00:0e', + 18: 'c0:ff:ee:00:00:0f', 19: 'c0:ff:ee:00:00:0c', + 20: 'c0:ff:ee:00:00:10', 21: 'c0:ff:ee:00:00:0b', + 22: 'c0:ff:ee:00:00:12', 23: 'c0:ff:ee:00:00:0d', + 24: 'c0:ff:ee:00:00:11', 25: 'c0:ff:ee:00:00:0e', + 26: 'c0:ff:ee:00:00:0f', 27: 'c0:ff:ee:00:00:0c', + 28: 'c0:ff:ee:00:00:0b', 29: 'c0:ff:ee:00:00:10', + 30: 'c0:ff:ee:00:00:0d', 31: 'c0:ff:ee:00:00:12', + 32: 'c0:ff:ee:00:00:0c', 33: 'c0:ff:ee:00:00:0f', + 34: 'c0:ff:ee:00:00:0e', 35: 'c0:ff:ee:00:00:11', + 36: 'c0:ff:ee:00:00:0d', 37: 'c0:ff:ee:00:00:12', + 38: 'c0:ff:ee:00:00:0b', 39: 'c0:ff:ee:00:00:10', + 40: 'c0:ff:ee:00:00:12', 41: 'c0:ff:ee:00:00:0d', + 42: 'c0:ff:ee:00:00:10', 43: 'c0:ff:ee:00:00:0b', + 44: 'c0:ff:ee:00:00:0e', 45: 'c0:ff:ee:00:00:11', + 46: 'c0:ff:ee:00:00:0c', 47: 'c0:ff:ee:00:00:0f', + 48: 'c0:ff:ee:00:00:0d', 49: 'c0:ff:ee:00:00:12'} th2_asic_flow_map = {0: 'c0:ff:ee:00:00:12', 1: 'c0:ff:ee:00:00:10', 2: 'c0:ff:ee:00:00:11', diff --git a/tests/lldp/test_lldp.py b/tests/lldp/test_lldp.py index f7a5d368aa8..0306763a166 100644 --- a/tests/lldp/test_lldp.py +++ b/tests/lldp/test_lldp.py @@ -18,7 +18,7 @@ def lldp_setup(duthosts, enum_rand_one_per_hwsku_frontend_hostname, patch_lldpct def test_lldp(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost, - collect_techsupport_all_duts, enum_frontend_asic_index): + collect_techsupport_all_duts, enum_frontend_asic_index, request): """ verify the LLDP message on DUT """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] @@ -33,11 +33,15 @@ def test_lldp(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost, # Compare the LLDP neighbor name with minigraph neigbhor name (exclude the management port) assert v['chassis']['name'] == config_facts['DEVICE_NEIGHBOR'][k]['name'] # Compare the LLDP neighbor interface with minigraph neigbhor interface (exclude the management port) - assert v['port']['ifname'] == config_facts['DEVICE_NEIGHBOR'][k]['port'] + if request.config.getoption("--neighbor_type") == 'eos': + assert v['port']['ifname'] == config_facts['DEVICE_NEIGHBOR'][k]['port'] + else: + # Dealing with KVM that advertises port description + assert v['port']['descr'] == config_facts['DEVICE_NEIGHBOR'][k]['port'] -def test_lldp_neighbor(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost, eos, - collect_techsupport_all_duts, loganalyzer, enum_frontend_asic_index, tbinfo): +def test_lldp_neighbor(duthosts, enum_rand_one_per_hwsku_frontend_hostname, localhost, eos, sonic, + collect_techsupport_all_duts, loganalyzer, enum_frontend_asic_index, tbinfo, request): """ verify LLDP information on neighbors """ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] @@ -80,16 +84,23 @@ def test_lldp_neighbor(duthosts, enum_rand_one_per_hwsku_frontend_hostname, loca logger.info("Neighbor device {} does not sent management IP via lldp".format(v['chassis']['name'])) hostip = nei_meta[v['chassis']['name']]['mgmt_addr'] - nei_lldp_facts = localhost.lldp_facts( - host=hostip, version='v2c', community=eos['snmp_rocommunity'])['ansible_facts'] - neighbor_interface = v['port']['ifname'] - logger.info("lldp facts for interface {}:{}".format(neighbor_interface, - nei_lldp_facts['ansible_lldp_facts'][neighbor_interface])) + if request.config.getoption("--neighbor_type") == 'eos': + nei_lldp_facts = localhost.lldp_facts(host=hostip, version='v2c', community=eos['snmp_rocommunity'])[ + 'ansible_facts'] + neighbor_interface = v['port']['ifname'] + else: + nei_lldp_facts = localhost.lldp_facts(host=hostip, version='v2c', community=sonic['snmp_rocommunity'])[ + 'ansible_facts'] + neighbor_interface = v['port']['local'] # Verify the published DUT system name field is correct assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_sys_name'] == duthost.hostname # Verify the published DUT chassis id field is not empty - assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_chassis_id'] == \ - "0x%s" % (switch_mac.replace(':', '')) + if request.config.getoption("--neighbor_type") == 'eos': + assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_chassis_id'] == \ + "0x%s" % (switch_mac.replace(':', '')) + else: + assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_chassis_id'] == switch_mac + # Verify the published DUT system description field is correct assert nei_lldp_facts['ansible_lldp_facts'][neighbor_interface]['neighbor_sys_desc'] == dut_system_description # Verify the published DUT port id field is correct diff --git a/tests/macsec/__init__.py b/tests/macsec/__init__.py index a28ba026200..5d07d12056a 100644 --- a/tests/macsec/__init__.py +++ b/tests/macsec/__init__.py @@ -150,6 +150,9 @@ def downstream_links(self, macsec_duthost, tbinfo, nbrhosts): def filter(interface, neighbor, mg_facts, tbinfo): if self.downstream_neighbor(tbinfo, neighbor): port = mg_facts["minigraph_neighbors"][interface]["port"] + if interface not in mg_facts["minigraph_ptf_indices"]: + logger.info("Interface {} not in minigraph_ptf_indices".format(interface)) + return links[interface] = { "name": neighbor["name"], "ptf_port_id": mg_facts["minigraph_ptf_indices"][interface], @@ -172,6 +175,9 @@ def filter(interface, neighbor, mg_facts, tbinfo): # The address of DUT peer_ipv4_addr = item["peer_addr"] break + if interface not in mg_facts["minigraph_ptf_indices"]: + logger.info("Interface {} not in minigraph_ptf_indices".format(interface)) + return port = mg_facts["minigraph_neighbors"][interface]["port"] links[interface] = { "name": neighbor["name"], diff --git a/tests/macsec/test_fault_handling.py b/tests/macsec/test_fault_handling.py index 62deedef614..dbbefa74b53 100644 --- a/tests/macsec/test_fault_handling.py +++ b/tests/macsec/test_fault_handling.py @@ -38,8 +38,8 @@ def test_link_flap(self, duthost, ctrl_links, wait_mka_establish): while retry > 0: retry -= 1 try: - nbr["host"].shell("ifconfig {} down && sleep 1 && ifconfig {} up".format( - nbr_eth_port, nbr_eth_port)) + nbr["host"].shell("config interface shutdown {} && sleep 1 && config interface startup {}".format( + nbr["port"], nbr["port"])) _, _, _, dut_egress_sa_table_new, dut_ingress_sa_table_new = get_appl_db( duthost, port_name, nbr["host"], nbr["port"]) assert dut_egress_sa_table_orig == dut_egress_sa_table_new @@ -56,8 +56,8 @@ def test_link_flap(self, duthost, ctrl_links, wait_mka_establish): sleep(TestFaultHandling.MKA_TIMEOUT) nbr["host"].no_shutdown(nbr_eth_port) else: - nbr["host"].shell("ifconfig {} down && sleep {} && ifconfig {} up".format( - nbr_eth_port, TestFaultHandling.MKA_TIMEOUT, nbr_eth_port)) + nbr["host"].shell("config interface shutdown {} && sleep {} && config interface startup {}".format( + nbr["port"], TestFaultHandling.MKA_TIMEOUT, nbr["port"])) def check_new_mka_session(): _, _, _, dut_egress_sa_table_new, dut_ingress_sa_table_new = get_appl_db( @@ -124,3 +124,4 @@ def check_mka_establishment(): disable_macsec_port(duthost, port_name) disable_macsec_port(nbr["host"], nbr["port"]) delete_macsec_profile(nbr["host"], nbr["port"], profile_name) + sleep(300) diff --git a/tests/pc/test_lag_2.py b/tests/pc/test_lag_2.py index c010be47a97..ef9bf965420 100644 --- a/tests/pc/test_lag_2.py +++ b/tests/pc/test_lag_2.py @@ -304,13 +304,18 @@ def has_lags(dut): "lacp_rate", "fallback"]) def test_lag(common_setup_teardown, duthosts, tbinfo, nbrhosts, fanouthosts, - conn_graph_facts, enum_dut_portchannel_with_completeness_level, testcase): # noqa F811 + conn_graph_facts, enum_dut_portchannel_with_completeness_level, testcase, request): # noqa F811 # We can't run single_lag test on vtestbed since there is no leaffanout if testcase == "single_lag" and is_vtestbed(duthosts[0]): pytest.skip("Skip single_lag test on vtestbed") if 'PortChannel201' in enum_dut_portchannel_with_completeness_level: pytest.skip("PortChannel201 is a specific configuration of t0-56-po2vlan topo, which is not supported by test") + # Skip lacp_rate testcases on KVM since setting lacp rate it is not supported on KVM + if testcase == "lacp_rate": + if request.config.getoption("--neighbor_type") == 'sonic': + pytest.skip("lacp_rate is not supported in vsonic") + ptfhost = common_setup_teardown dut_name, dut_lag = decode_dut_port_name(enum_dut_portchannel_with_completeness_level) diff --git a/tests/pfcwd/files/pfcwd_helper.py b/tests/pfcwd/files/pfcwd_helper.py index 7e78584a59e..dac7c3ab0bb 100644 --- a/tests/pfcwd/files/pfcwd_helper.py +++ b/tests/pfcwd/files/pfcwd_helper.py @@ -1,4 +1,3 @@ -import datetime import ipaddress import sys @@ -318,19 +317,22 @@ def select_test_ports(test_ports): """ selected_ports = dict() rx_ports = set() - seed = int(datetime.datetime.today().day) - for port, port_info in list(test_ports.items()): - rx_port = port_info["rx_port"] - if isinstance(rx_port, (list, tuple)): - rx_ports.update(rx_port) - else: - rx_ports.add(rx_port) - if (int(port_info['test_port_id']) % 15) == (seed % 15): - selected_ports[port] = port_info - - # filter out selected ports that also act as rx ports - selected_ports = {p: pi for p, pi in list(selected_ports.items()) - if p not in rx_port} + if len(test_ports) > 2: + modulo = int(len(test_ports)/3) + seed = int(len(test_ports)/2) + for port, port_info in test_ports.items(): + rx_port = port_info["rx_port"] + if isinstance(rx_port, (list, tuple)): + rx_ports.update(rx_port) + else: + rx_ports.add(rx_port) + if (int(port_info['test_port_id']) % modulo) == (seed % modulo): + selected_ports[port] = port_info + # filter out selected ports that also act as rx ports + selected_ports = {p: pi for p, pi in list(selected_ports.items()) + if p not in rx_port} + elif len(test_ports) == 2: + selected_ports = test_ports if not selected_ports: random_port = list(test_ports.keys())[0] diff --git a/tests/pfcwd/test_pfcwd_function.py b/tests/pfcwd/test_pfcwd_function.py index d5a83a683ea..6e0ae914900 100644 --- a/tests/pfcwd/test_pfcwd_function.py +++ b/tests/pfcwd/test_pfcwd_function.py @@ -559,7 +559,7 @@ def verify_rx_ingress(self, action): else: dst_port = "[ " + str(self.pfc_wd_rx_port_id) + " ]" ptf_params = {'router_mac': self.tx_mac, - 'vlan_mac': self.tx_mac, + 'vlan_mac': self.vlan_mac, 'queue_index': self.pfc_queue_index, 'pkt_count': self.pfc_wd_test_pkt_count, 'port_src': self.pfc_wd_test_port_id, @@ -625,7 +625,7 @@ def verify_other_pfc_pg(self): other_pg = self.pfc_queue_index + 1 ptf_params = {'router_mac': self.tx_mac, - 'vlan_mac': self.tx_mac, + 'vlan_mac': self.vlan_mac, 'queue_index': other_pg, 'pkt_count': self.pfc_wd_test_pkt_count, 'port_src': self.pfc_wd_test_port_id, @@ -817,6 +817,9 @@ def test_pfcwd_actions(self, request, fake_storm, setup_pfc_test, setup_dut_test self.timers = setup_info['pfc_timers'] self.ports = setup_info['selected_test_ports'] self.test_ports_info = setup_info['test_ports'] + if self.dut.topo_type == 't2': + key, value = list(self.ports.items())[0] + self.ports = {key: value} self.neighbors = setup_info['neighbors'] self.peer_dev_list = dict() self.fake_storm = fake_storm @@ -1065,6 +1068,9 @@ def test_pfcwd_port_toggle(self, request, fake_storm, setup_pfc_test, setup_dut_ self.timers = setup_info['pfc_timers'] self.ports = setup_info['selected_test_ports'] self.test_ports_info = setup_info['test_ports'] + if self.dut.topo_type == 't2': + key, value = list(self.ports.items())[0] + self.ports = {key: value} self.neighbors = setup_info['neighbors'] self.peer_dev_list = dict() self.fake_storm = fake_storm diff --git a/tests/platform_tests/api/test_sfp.py b/tests/platform_tests/api/test_sfp.py index 0c2f6cc401b..56dab31fbc5 100644 --- a/tests/platform_tests/api/test_sfp.py +++ b/tests/platform_tests/api/test_sfp.py @@ -97,17 +97,18 @@ class TestSfpApi(PlatformApiTestBase): ] # some new keys added for QSFP-DD and OSFP in 202205 or later branch - EXPECTED_XCVR_NEW_QSFP_DD_OSFP_INFO_KEYS = ['active_firmware', - 'host_lane_count', + EXPECTED_XCVR_NEW_QSFP_DD_OSFP_INFO_KEYS = ['host_lane_count', 'media_lane_count', 'cmis_rev', 'host_lane_assignment_option', - 'inactive_firmware', 'media_interface_technology', 'media_interface_code', 'host_electrical_interface', 'media_lane_assignment_option'] + EXPECTED_XCVR_NEW_QSFP_DD_OSFP_FIRMWARE_INFO_KEYS = ['active_firmware', + 'inactive_firmware'] + # These are fields which have been added in the common parsers # in sonic-platform-common/sonic_sfp, but since some vendors are # using their own custom parsers, they do not yet provide these @@ -389,7 +390,14 @@ def test_get_transceiver_info(self, duthosts, enum_rand_one_per_hwsku_hostname, active_apsel_hostlane_count = 8 UPDATED_EXPECTED_XCVR_INFO_KEYS = self.EXPECTED_XCVR_INFO_KEYS + \ self.EXPECTED_XCVR_NEW_QSFP_DD_OSFP_INFO_KEYS + \ + self.EXPECTED_XCVR_NEW_QSFP_DD_OSFP_FIRMWARE_INFO_KEYS + \ ["active_apsel_hostlane{}".format(n) for n in range(1, active_apsel_hostlane_count + 1)] + firmware_info_dict = sfp.get_transceiver_info_firmware_versions(platform_api_conn, i) + if self.expect(firmware_info_dict is not None, + "Unable to retrieve transceiver {} firmware info".format(i)): + if self.expect(isinstance(firmware_info_dict, dict), + "Transceiver {} firmware info appears incorrect".format(i)): + actual_keys.extend(list(firmware_info_dict.keys())) if 'ZR' in info_dict['media_interface_code']: UPDATED_EXPECTED_XCVR_INFO_KEYS = UPDATED_EXPECTED_XCVR_INFO_KEYS + \ self.QSFPZR_EXPECTED_XCVR_INFO_KEYS diff --git a/tests/platform_tests/conftest.py b/tests/platform_tests/conftest.py index 2e84158de2e..b80539232cb 100644 --- a/tests/platform_tests/conftest.py +++ b/tests/platform_tests/conftest.py @@ -418,10 +418,11 @@ def verify_required_events(duthost, event_counters, timing_data, verification_er observed_end_count = timing_data.get( key, {}).get(pattern, {}).get("End count", 0) expected_count = event_counters.get(pattern) - # If we're checking PORT_READY, and there are 0 port state change messages captured instead of however many - # was expected, treat it as a success. Some platforms (Mellanox, Dell S6100) have 0, some platforms (Arista - # 050cx3) have however many ports are up. - if observed_start_count != expected_count and (pattern != 'PORT_READY' or observed_start_count != 0): + # If we're checking PORT_READY, allow any number of PORT_READY messages between 0 and the number of ports. + # Some platforms appear to have a random number of these messages, other platforms have however many ports + # are up. + if observed_start_count != expected_count and ( + pattern != 'PORT_READY' or observed_start_count > expected_count): verification_errors.append("FAIL: Event {} was found {} times, when expected exactly {} times". format(pattern, observed_start_count, expected_count)) if key == "time_span" and observed_start_count != observed_end_count: diff --git a/tests/platform_tests/fwutil/conftest.py b/tests/platform_tests/fwutil/conftest.py index 1dd7ab85c60..8fa6c75487a 100644 --- a/tests/platform_tests/fwutil/conftest.py +++ b/tests/platform_tests/fwutil/conftest.py @@ -61,7 +61,7 @@ def extract_fw_data(fw_pkg_path): @pytest.fixture(scope='function') def random_component(duthost, fw_pkg): chass = list(show_firmware(duthost)["chassis"].keys())[0] - components = list(fw_pkg["chassis"].get(chass, {}).get("component", []).keys()) + components = list(fw_pkg["chassis"].get(chass, {}).get("component", {}).keys()) if 'ONIE' in components: components.remove('ONIE') if len(components) == 0: diff --git a/tests/platform_tests/test_kdump.py b/tests/platform_tests/test_kdump.py index 43e344775d2..34639446316 100644 --- a/tests/platform_tests/test_kdump.py +++ b/tests/platform_tests/test_kdump.py @@ -36,7 +36,7 @@ def wait_lc_healthy_if_sup(self, duthost, duthosts, localhost, conn_graph_facts, @pytest.fixture(autouse=True) def tearDown(self, duthosts, enum_rand_one_per_hwsku_hostname, - localhost, pdu_controller): + localhost, pdu_controller, conn_graph_facts, xcvr_skip_list): yield # If the SSH connection is not established, or any critical process is exited, # try to recover the DUT by PDU reboot. @@ -53,7 +53,7 @@ def tearDown(self, duthosts, enum_rand_one_per_hwsku_hostname, 'Recover {} by PDU reboot failed'.format(hostname)) # Wait until all critical processes are healthy. wait_critical_processes(duthost) - self.wait_lc_healthy_if_sup(duthost, duthosts, localhost) + self.wait_lc_healthy_if_sup(duthost, duthosts, localhost, conn_graph_facts, xcvr_skip_list) def test_kernel_panic(self, duthosts, enum_rand_one_per_hwsku_hostname, localhost, conn_graph_facts, xcvr_skip_list): diff --git a/tests/platform_tests/test_memory_exhaustion.py b/tests/platform_tests/test_memory_exhaustion.py index b829baab406..141df3dc657 100644 --- a/tests/platform_tests/test_memory_exhaustion.py +++ b/tests/platform_tests/test_memory_exhaustion.py @@ -64,6 +64,12 @@ def test_memory_exhaustion(self, duthosts, enum_rand_one_per_hwsku_hostname, loc # background process. # * Some DUTs with few free memory may reboot before ansible receive the result of shell # command, so we add `sleep 5` to ensure ansible receive the result first. + # Swapping is turned off so the OOM is triggered in a shorter time. + + res = duthost.command("sudo swapoff -a") + if res['rc']: + logging.error("Swapoff command failed: {}".format(res)) + cmd = 'nohup bash -c "sleep 5 && tail /dev/zero" &' res = duthost.shell(cmd) if not res.is_successful: diff --git a/tests/platform_tests/test_platform_info.py b/tests/platform_tests/test_platform_info.py index 55f0b690706..48a867c0414 100644 --- a/tests/platform_tests/test_platform_info.py +++ b/tests/platform_tests/test_platform_info.py @@ -206,6 +206,7 @@ def turn_all_outlets_on(pdu_ctrl): if not outlet["outlet_on"]: pdu_ctrl.turn_on_outlet(outlet) time.sleep(5) + time.sleep(5) def check_all_psu_on(dut, psu_test_results): @@ -288,7 +289,7 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, logging.info("Turn off outlet {}".format(outlet)) pdu_ctrl.turn_off_outlet(outlet) - time.sleep(30) + time.sleep(10) cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: @@ -302,7 +303,7 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, logging.info("Turn on outlet {}".format(outlet)) pdu_ctrl.turn_on_outlet(outlet) - time.sleep(30) + time.sleep(10) cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS) for line in cli_psu_status["stdout_lines"][2:]: diff --git a/tests/platform_tests/test_reboot.py b/tests/platform_tests/test_reboot.py index 13b0cf8c08e..3571c000435 100644 --- a/tests/platform_tests/test_reboot.py +++ b/tests/platform_tests/test_reboot.py @@ -41,7 +41,7 @@ def set_max_time_for_interfaces(duthost): global MAX_WAIT_TIME_FOR_INTERFACES plt_reboot_ctrl = get_plt_reboot_ctrl(duthost, 'test_reboot.py', 'cold') if plt_reboot_ctrl: - MAX_WAIT_TIME_FOR_INTERFACES = plt_reboot_ctrl.get('timeout', 300) + MAX_WAIT_TIME_FOR_INTERFACES = plt_reboot_ctrl.get('timeout', MAX_WAIT_TIME_FOR_INTERFACES) @pytest.fixture(scope="module", autouse=True) @@ -96,7 +96,7 @@ def reboot_and_check(localhost, dut, interfaces, xcvr_skip_list, def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, - interfaces_wait_time=MAX_WAIT_TIME_FOR_INTERFACES, reboot_type=None): + interfaces_wait_time=None, reboot_type=None): """ Perform a further check after reboot-cause, including transceiver status, interface status @param localhost: The Localhost object. @@ -106,6 +106,9 @@ def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, logging.info("Wait until all critical services are fully started") wait_critical_processes(dut) + if interfaces_wait_time is None: + interfaces_wait_time = MAX_WAIT_TIME_FOR_INTERFACES + if dut.is_supervisor_node(): logging.info("skipping interfaces related check for supervisor") else: @@ -126,7 +129,11 @@ def check_interfaces_and_services(dut, interfaces, xcvr_skip_list, dut, asic_index, interfaces_per_asic, xcvr_skip_list) logging.info("Check pmon daemon status") - assert check_pmon_daemon_status(dut), "Not all pmon daemons running." + if dut.facts["platform"] == "x86_64-cel_e1031-r0": + result = wait_until(300, 20, 0, check_pmon_daemon_status, dut) + else: + result = check_pmon_daemon_status(dut) + assert result, "Not all pmon daemons running." if dut.facts["asic_type"] in ["mellanox"]: diff --git a/tests/platform_tests/test_reload_config.py b/tests/platform_tests/test_reload_config.py index 8775090939c..9b1201a2deb 100644 --- a/tests/platform_tests/test_reload_config.py +++ b/tests/platform_tests/test_reload_config.py @@ -7,7 +7,6 @@ import logging import pytest -import re from tests.common.fixtures.conn_graph_facts import conn_graph_facts # noqa F401 from tests.common.utilities import wait_until @@ -70,8 +69,12 @@ def test_reload_configuration(duthosts, enum_rand_one_per_hwsku_hostname, wait_critical_processes(duthost) logging.info("Wait some time for all the transceivers to be detected") - assert wait_until(300, 20, 0, check_all_interface_information, duthost, interfaces, xcvr_skip_list), \ - "Not all transceivers are detected in 300 seconds" + max_wait_time_for_transceivers = 300 + if duthost.facts["platform"] == "x86_64-cel_e1031-r0": + max_wait_time_for_transceivers = 900 + assert wait_until(max_wait_time_for_transceivers, 20, 0, check_all_interface_information, + duthost, interfaces, xcvr_skip_list), "Not all transceivers are detected \ + in {} seconds".format(max_wait_time_for_transceivers) logging.info("Check transceiver status") for asic_index in duthost.get_frontend_asic_ids(): @@ -125,9 +128,6 @@ def test_reload_configuration_checks(duthosts, enum_rand_one_per_hwsku_hostname, # we must give it a little longer or else it may falsely fail the test. wait_until(360, 1, 0, check_database_status, duthost) - # Check if interfaces-config.service is exited - wait_until(60, 1, 0, check_interfaces_config_service_status, duthost) - logging.info("Reload configuration check") out = duthost.shell("sudo config reload -y", executable="/bin/bash", module_ignore_errors=True) @@ -145,8 +145,6 @@ def test_reload_configuration_checks(duthosts, enum_rand_one_per_hwsku_hostname, logging.info("Checking config reload after system is up") # Check if all database containers have started wait_until(60, 1, 0, check_database_status, duthost) - # Check if interfaces-config.service is exited - wait_until(60, 1, 0, check_interfaces_config_service_status, duthost) out = duthost.shell("sudo config reload -y", executable="/bin/bash", module_ignore_errors=True) assert "Retry later" in out['stdout'] @@ -170,11 +168,3 @@ def test_reload_configuration_checks(duthosts, enum_rand_one_per_hwsku_hostname, assert "Retry later" not in out['stdout'] assert wait_until(300, 20, 0, config_system_checks_passed, duthost, delayed_services) - - -def check_interfaces_config_service_status(duthost): - # check interfaces-config.service status - regx_interface_config_service_exit = r'.*Main PID: \d+ \(code=exited, status=0\/SUCCESS\).*' - interface_config_server_status = duthost.command( - 'systemctl status interfaces-config.service', module_ignore_errors=True)['stdout'] - return re.search(regx_interface_config_service_exit, interface_config_server_status) diff --git a/tests/platform_tests/test_sequential_restart.py b/tests/platform_tests/test_sequential_restart.py index 90951599425..1bdf9907fdb 100644 --- a/tests/platform_tests/test_sequential_restart.py +++ b/tests/platform_tests/test_sequential_restart.py @@ -70,9 +70,12 @@ def restart_service_and_check(localhost, dut, enum_frontend_asic_index, service, wait_critical_processes(dut) logging.info("Wait some time for all the transceivers to be detected") - pytest_assert(wait_until(300, 20, 0, check_interface_information, dut, + interface_wait_time = 300 + if dut.facts["platform"] == "x86_64-cel_e1031-r0": + interface_wait_time = 900 + pytest_assert(wait_until(interface_wait_time, 20, 0, check_interface_information, dut, enum_frontend_asic_index, interfaces, xcvr_skip_list), - "Not all interface information are detected within 300 seconds") + "Not all interface information are detected within {} seconds".format(interface_wait_time)) logging.info("Check transceiver status on asic %s" % enum_frontend_asic_index) check_transceiver_basic(dut, enum_frontend_asic_index, interfaces, xcvr_skip_list) diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py index 7d41c868e65..80f12e19c6a 100644 --- a/tests/ptf_runner.py +++ b/tests/ptf_runner.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) -def ptf_collect(host, log_file): +def ptf_collect(host, log_file, skip_pcap=False): pos = log_file.rfind('.') filename_prefix = log_file[0:pos] if pos > -1 else log_file @@ -18,6 +18,8 @@ def ptf_collect(host, log_file): filename_log = './logs/ptf_collect/' + rename_prefix + '.' + suffix + '.log' host.fetch(src=log_file, dest=filename_log, flat=True, fail_on_missing=False) allure.attach.file(filename_log, 'ptf_log: ' + filename_log, allure.attachment_type.TEXT) + if skip_pcap: + return pcap_file = filename_prefix + '.pcap' output = host.shell("[ -f {} ] && echo exist || echo null".format(pcap_file))['stdout'] if output == 'exist': @@ -92,7 +94,8 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={}, try: result = host.shell(cmd, chdir="/root", module_ignore_errors=module_ignore_errors) if log_file: - ptf_collect(host, log_file) + # when ptf cmd execution result is 0 (success), we need to skip collecting pcap file + ptf_collect(host, log_file, result is not None and result.get("rc", -1) == 0) if result: allure.attach(json.dumps(result, indent=4), 'ptf_console_result', allure.attachment_type.TEXT) if module_ignore_errors: diff --git a/tests/qos/files/cisco/qos_param_generator.py b/tests/qos/files/cisco/qos_param_generator.py index 3e2e65c7ed5..c291d4bc495 100644 --- a/tests/qos/files/cisco/qos_param_generator.py +++ b/tests/qos/files/cisco/qos_param_generator.py @@ -99,12 +99,12 @@ def gr_get_mantissa_exp(self, thr): found = False exp = 1 mantissa = 0 - reduced_thr = thr >> 4 - further_reduced_thr = thr >> 5 + reduced_thr = int(thr) >> 4 + further_reduced_thr = int(thr) >> 5 for i in range(32): ith_bit = 1 << i if further_reduced_thr < ith_bit <= reduced_thr: - mantissa = thr // ith_bit + mantissa = int(thr) // ith_bit exp = i found = True break @@ -156,6 +156,7 @@ def should_autogen(self, parametrizations): return autogen def __mark_skip(self, testcase, reason): + self.qos_params[testcase] = {} self.qos_params[testcase]["skip"] = reason def __define_shared_reservation_size(self): @@ -213,8 +214,13 @@ def __define_shared_reservation_size(self): "dst_port_i": [7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13], "pkt_counts": [3527, 3527, 3527, 3527, 3527, 3527, 1798, 1798, 846, 687, 687, 328, 1], "shared_limit_bytes": 41943552} - self.qos_params["shared_res_size_1"].update(res_1) - self.qos_params["shared_res_size_2"].update(res_2) + try: + self.qos_params["shared_res_size_1"].update(res_1) + self.qos_params["shared_res_size_2"].update(res_2) + except KeyError: + skip_reason = "Shared Res Size Keys are not found, will be skipping test." + self.__mark_skip("shared_res_size_1", skip_reason) + self.__mark_skip("shared_res_size_2", skip_reason) def __define_pfc_xoff_limit(self): if not self.should_autogen(["xoff_1", "xoff_2"]): diff --git a/tests/qos/files/qos_params.td3.yaml b/tests/qos/files/qos_params.td3.yaml index 644d554b341..9a16dc77f13 100644 --- a/tests/qos/files/qos_params.td3.yaml +++ b/tests/qos/files/qos_params.td3.yaml @@ -736,6 +736,36 @@ qos_params: pkts_num_trig_pfc: 59784 pkts_num_trig_ingr_drp: 60410 pkts_num_margin: 4 + pcbb_xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 60204 + pkts_num_trig_ingr_drp: 60829 + pkts_num_margin: 4 + pcbb_xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 60204 + pkts_num_trig_ingr_drp: 60829 + pkts_num_margin: 4 + pcbb_xoff_3: + outer_dscp: 2 + dscp: 3 + ecn: 1 + pg: 2 + pkts_num_trig_pfc: 60204 + pkts_num_trig_ingr_drp: 60829 + pkts_num_margin: 4 + pcbb_xoff_4: + outer_dscp: 6 + dscp: 4 + ecn: 1 + pg: 6 + pkts_num_trig_pfc: 60204 + pkts_num_trig_ingr_drp: 60829 + pkts_num_margin: 4 hdrm_pool_size: dscps: [3, 4] ecn: 1 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index 0bab888fa43..032d494c710 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -9,6 +9,7 @@ import sys import six import copy +import time from tests.common.fixtures.ptfhost_utils import ptf_portmap_file # noqa F401 from tests.common.helpers.assertions import pytest_assert, pytest_require @@ -120,7 +121,7 @@ def runPtfTest(self, ptfhost, testCase='', testParams={}, relax=False): Raises: RunAnsibleModuleFail if ptf test fails """ - custom_options = " --disable-ipv6 --disable-vxlan --disable-geneve" \ + custom_options = " --disable-vxlan --disable-geneve" \ " --disable-erspan --disable-mpls --disable-nvgre" ptf_runner( ptfhost, @@ -675,8 +676,8 @@ def get_src_dst_asic_and_duts(self, duthosts, tbinfo, select_src_dst_dut_and_asi rtn_dict.update(select_src_dst_dut_and_asic) yield rtn_dict - def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, - dst_port_ids, get_src_dst_asic_and_duts, uplinkPortIds): + def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dst_port_ids, + get_src_dst_asic_and_duts, uplinkPortIds, sysPortMap=None): """ Build map of test ports index and IPs @@ -687,6 +688,7 @@ def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, Returns: testPorts (dict): Map of test ports index and IPs + sysPortMap (dict): Map of system port IDs and Qos SAI test port IDs """ dstPorts = request.config.getoption("--qos_dst_ports") srcPorts = request.config.getoption("--qos_src_ports") @@ -754,19 +756,42 @@ def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dstVlan3 = dst_test_port_ips[dstPort3]['vlan_id'] if 'vlan_id' in dst_test_port_ips[dstPort3] else None srcPort = srcPorts[0] if src_port_ids else src_test_port_ids[srcPorts[0]] srcVlan = src_test_port_ips[srcPort]['vlan_id'] if 'vlan_id' in src_test_port_ips[srcPort] else None + + src_port_ip = src_test_port_ips[srcPorts[0] if src_port_ids else src_test_port_ids[srcPorts[0]]] + # collecting the system ports associated with dst ports + # In case of PortChannel as dst port, all lag ports will be added to the list + # ex. {dstPort: system_port, dstPort1:system_port1 ...} + dst_all_sys_port = {} + if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ + get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': + sysPorts = sysPortMap[get_src_dst_asic_and_duts['dst_dut_index']][ + get_src_dst_asic_and_duts['dst_asic_index']] + for port_id in [dstPort, dstPort2, dstPort3]: + if port_id in sysPorts and port_id not in dst_all_sys_port: + dst_all_sys_port.update({port_id: sysPorts[port_id]['system_port']}) + if 'PortChannel' in sysPorts[port_id]['port_type']: + for sport, sysMap in sysPorts.items(): + if sysMap['port_type'] == sysPorts[port_id]['port_type'] and sport != port_id: + dst_all_sys_port.update({sport: sysMap['system_port']}) + return { - "dst_port_id": dstPort, - "dst_port_ip": dst_test_port_ips[dstPort]['peer_addr'], - "dst_port_vlan": dstVlan, - "dst_port_2_id": dstPort2, - "dst_port_2_ip": dst_test_port_ips[dstPort2]['peer_addr'], - "dst_port_2_vlan": dstVlan2, - 'dst_port_3_id': dstPort3, - "dst_port_3_ip": dst_test_port_ips[dstPort3]['peer_addr'], - "dst_port_3_vlan": dstVlan3, - "src_port_id": srcPort, - "src_port_ip": src_test_port_ips[srcPorts[0] if src_port_ids else src_test_port_ids[srcPorts[0]]]["peer_addr"], - "src_port_vlan": srcVlan + "dst_port_id": dstPort, + "dst_port_ip": dst_test_port_ips[dstPort]['peer_addr'], + "dst_port_ipv6": dst_test_port_ips[dstPort]['peer_addr_ipv6'], + "dst_port_vlan": dstVlan, + "dst_port_2_id": dstPort2, + "dst_port_2_ip": dst_test_port_ips[dstPort2]['peer_addr'], + "dst_port_2_ipv6": dst_test_port_ips[dstPort2]['peer_addr_ipv6'], + "dst_port_2_vlan": dstVlan2, + 'dst_port_3_id': dstPort3, + "dst_port_3_ip": dst_test_port_ips[dstPort3]['peer_addr'], + "dst_port_3_ipv6": dst_test_port_ips[dstPort3]['peer_addr_ipv6'], + "dst_port_3_vlan": dstVlan3, + "src_port_id": srcPort, + "src_port_ip": src_port_ip["peer_addr"], + "src_port_ipv6": src_port_ip["peer_addr_ipv6"], + "src_port_vlan": srcVlan, + "dst_sys_ports": dst_all_sys_port } @pytest.fixture(scope='class', autouse=True) @@ -800,6 +825,7 @@ def dutConfig( downlinkPortIds = [] downlinkPortIps = [] downlinkPortNames = [] + sysPortMap = {} src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] @@ -866,6 +892,15 @@ def dutConfig( uplinkPortIds.append(portIndex) uplinkPortIps.append(portConfig["peer_addr"]) uplinkPortNames.append(intf) + elif ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 6: + portIndex = src_mgFacts["minigraph_ptf_indices"][intf] + if portIndex in testPortIds[src_dut_index][src_asic_index]: + if portIndex in dutPortIps[src_dut_index][src_asic_index]: + dutPortIps[src_dut_index][src_asic_index][portIndex].update( + {'peer_addr_ipv6': portConfig['peer_addr']}) + else: + portIpMap = {'peer_addr_ipv6': portConfig['peer_addr']} + dutPortIps[src_dut_index][src_asic_index].update({portIndex: portIpMap}) testPortIps[src_dut_index] = {} testPortIps[src_dut_index][src_asic_index] = self.__assignTestPortIps(src_mgFacts, topo) @@ -882,14 +917,14 @@ def dutConfig( testPortIds[src_dut_index] = {} for dut_asic in get_src_dst_asic_and_duts['all_asics']: dutPortIps[src_dut_index][dut_asic.asic_index] = {} - for iface, addr in dut_asic.get_active_ip_interfaces(tbinfo).items(): + for iface, addr in dut_asic.get_active_ip_interfaces(tbinfo, include_ipv6=True).items(): vlan_id = None if iface.startswith("Ethernet"): portName = iface if "." in iface: portName, vlan_id = iface.split(".") portIndex = src_mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"]} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'peer_addr_ipv6': addr["peer_ipv6"]} if vlan_id is not None: portIpMap['vlan_id'] = vlan_id dutPortIps[src_dut_index][dut_asic.asic_index].update({portIndex: portIpMap}) @@ -898,7 +933,7 @@ def dutConfig( iter(src_mgFacts["minigraph_portchannels"][iface]["members"]) ) portIndex = src_mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"]} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'peer_addr_ipv6': addr["peer_ipv6"]} dutPortIps[src_dut_index][dut_asic.asic_index].update({portIndex: portIpMap}) # If the leaf router is using separated DSCP_TO_TC_MAP on uplink/downlink ports. # we also need to test them separately @@ -934,24 +969,52 @@ def dutConfig( src_asic = get_src_dst_asic_and_duts['src_asic'] dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] dst_asic = get_src_dst_asic_and_duts['dst_asic'] + src_system_port = {} + if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ + get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': + src_system_port = src_dut.config_facts(host=src_dut.hostname, source='running')['ansible_facts'][ + 'SYSTEM_PORT'][src_dut.hostname] # Lets get data for the src dut and src asic dutPortIps[src_dut_index] = {} + sysPortMap[src_dut_index] = {} testPortIds[src_dut_index] = {} dutPortIps[src_dut_index][src_asic_index] = {} - active_ips = src_asic.get_active_ip_interfaces(tbinfo) + sysPortMap[src_dut_index][src_asic_index] = {} + active_ips = src_asic.get_active_ip_interfaces(tbinfo, include_ipv6=True) for iface, addr in active_ips.items(): if iface.startswith("Ethernet") and ("Ethernet-Rec" not in iface): portIndex = src_mgFacts["minigraph_ptf_indices"][iface] - portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': iface} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'peer_addr_ipv6': addr['peer_ipv6'], + 'port': iface} dutPortIps[src_dut_index][src_asic_index].update({portIndex: portIpMap}) + # Map port IDs to system port for dnx chassis + if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ + get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': + sys_key = src_asic.namespace + '|' + iface + if sys_key in src_system_port: + system_port = src_system_port[sys_key]['system_port_id'] + sysPort = {'port': iface, 'system_port': system_port, 'port_type': iface} + sysPortMap[src_dut_index][src_asic_index].update({portIndex: sysPort}) + elif iface.startswith("PortChannel"): portName = next( iter(src_mgFacts["minigraph_portchannels"][iface]["members"]) ) portIndex = src_mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': portName} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'peer_addr_ipv6': addr['peer_ipv6'], + 'port': portName} dutPortIps[src_dut_index][src_asic_index].update({portIndex: portIpMap}) + # Map lag port IDs to system port IDs for dnx chassis + if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ + get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': + for portName in src_mgFacts["minigraph_portchannels"][iface]["members"]: + sys_key = src_asic.namespace + '|' + portName + port_Index = src_mgFacts["minigraph_ptf_indices"][portName] + if sys_key in src_system_port: + system_port = src_system_port[sys_key]['system_port_id'] + sysPort = {'port': portName, 'system_port': system_port, 'port_type': iface} + sysPortMap[src_dut_index][src_asic_index].update({port_Index: sysPort}) testPortIds[src_dut_index][src_asic_index] = sorted(dutPortIps[src_dut_index][src_asic_index].keys()) @@ -963,22 +1026,50 @@ def dutConfig( dst_mgFacts = dst_dut.get_extended_minigraph_facts(tbinfo) dutPortIps[dst_dut_index] = {} testPortIds[dst_dut_index] = {} + sysPortMap[dst_dut_index] = {} + if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ + get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': + dst_system_port = dst_dut.config_facts(host=dst_dut.hostname, source='running')[ + 'ansible_facts']['SYSTEM_PORT'][dst_dut.hostname] else: dst_mgFacts = src_mgFacts + dst_system_port = src_system_port dutPortIps[dst_dut_index][dst_asic_index] = {} - active_ips = dst_asic.get_active_ip_interfaces(tbinfo) + sysPortMap[dst_dut_index][dst_asic_index] = {} + active_ips = dst_asic.get_active_ip_interfaces(tbinfo, include_ipv6=True) for iface, addr in active_ips.items(): if iface.startswith("Ethernet") and ("Ethernet-Rec" not in iface): portIndex = dst_mgFacts["minigraph_ptf_indices"][iface] - portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': iface} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'peer_addr_ipv6': addr['peer_ipv6'], + 'port': iface} dutPortIps[dst_dut_index][dst_asic_index].update({portIndex: portIpMap}) + # Map port IDs to system port IDs + if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ + get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': + sys_key = dst_asic.namespace + '|' + iface + if sys_key in dst_system_port: + system_port = dst_system_port[sys_key]['system_port_id'] + sysPort = {'port': iface, 'system_port': system_port, 'port_type': iface} + sysPortMap[dst_dut_index][dst_asic_index].update({portIndex: sysPort}) + elif iface.startswith("PortChannel"): portName = next( iter(dst_mgFacts["minigraph_portchannels"][iface]["members"]) ) portIndex = dst_mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': portName} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'peer_addr_ipv6': addr['peer_ipv6'], + 'port': portName} dutPortIps[dst_dut_index][dst_asic_index].update({portIndex: portIpMap}) + # Map lag port IDs to system port IDs + if 'platform_asic' in get_src_dst_asic_and_duts["src_dut"].facts and \ + get_src_dst_asic_and_duts["src_dut"].facts['platform_asic'] == 'broadcom-dnx': + for portName in dst_mgFacts["minigraph_portchannels"][iface]["members"]: + sys_key = dst_asic.namespace + '|' + portName + port_Index = dst_mgFacts["minigraph_ptf_indices"][portName] + if sys_key in dst_system_port: + system_port = dst_system_port[sys_key]['system_port_id'] + sysPort = {'port': portName, 'system_port': system_port, 'port_type': iface} + sysPortMap[dst_dut_index][dst_asic_index].update({port_Index: sysPort}) testPortIds[dst_dut_index][dst_asic_index] = sorted(dutPortIps[dst_dut_index][dst_asic_index].keys()) @@ -1047,8 +1138,8 @@ def dutConfig( if dualTor: testPortIds = dualTorPortIndexes - testPorts = self.__buildTestPorts(request, testPortIds, testPortIps, - src_port_ids, dst_port_ids, get_src_dst_asic_and_duts, uplinkPortIds) + testPorts = self.__buildTestPorts(request, testPortIds, testPortIps, src_port_ids, dst_port_ids, + get_src_dst_asic_and_duts, uplinkPortIds, sysPortMap) # Update the uplink/downlink ports to testPorts testPorts.update({ "uplink_port_ids": uplinkPortIds, @@ -1476,7 +1567,6 @@ def dutQosConfig( if sub_folder_dir not in sys.path: sys.path.append(sub_folder_dir) import qos_param_generator - dutTopo = "topo-any" if (get_src_dst_asic_and_duts['src_dut_index'] == get_src_dst_asic_and_duts['dst_dut_index'] and get_src_dst_asic_and_duts['src_asic_index'] == @@ -1609,14 +1699,23 @@ def populateArpEntries_T2( Raises: RunAnsibleModuleFail if ptf test fails """ + testParams = dict() + src_is_multi_asic = False + dst_is_multi_asic = False if ('platform_asic' in dutTestParams["basicParams"] and dutTestParams["basicParams"]["platform_asic"] == "broadcom-dnx"): - testParams = dutTestParams["basicParams"] + if get_src_dst_asic_and_duts['src_dut'].sonichost.is_multi_asic: + src_is_multi_asic = True + if get_src_dst_asic_and_duts['dst_dut'].sonichost.is_multi_asic: + dst_is_multi_asic = True + testParams.update(dutTestParams["basicParams"]) testParams.update(dutConfig["testPorts"]) testParams.update({ "testPortIds": dutConfig["testPortIds"], "testPortIps": dutConfig["testPortIps"], - "testbed_type": dutTestParams["topo"] + "testbed_type": dutTestParams["topo"], + "src_is_multi_asic": src_is_multi_asic, + "dst_is_multi_asic": dst_is_multi_asic }) self.runPtfTest( ptfhost, testCase="sai_qos_tests.ARPpopulate", testParams=testParams @@ -1960,7 +2059,9 @@ def resetWatermark( for dut_asic in get_src_dst_asic_and_duts['all_asics']: dut_asic.command("counterpoll watermark enable") dut_asic.command("counterpoll queue enable") - dut_asic.command("sleep 70") + + time.sleep(70) + for dut_asic in get_src_dst_asic_and_duts['all_asics']: dut_asic.command("counterpoll watermark disable") dut_asic.command("counterpoll queue disable") @@ -2202,22 +2303,12 @@ def skip_src_dst_different_asic(self, dutConfig): @pytest.fixture(scope="function", autouse=False) def skip_pacific_dst_asic(self, dutConfig): - if dutConfig['dstDutAsic'] == "pac": + if dutConfig.get('dstDutAsic', 'UnknownDstDutAsic') == "pac": pytest.skip( "This test is skipped since egress asic is cisco-8000 Q100.") yield return - @pytest.fixture(scope="function", autouse=False) - def skip_longlink(self, dutQosConfig): - portSpeedCableLength = dutQosConfig["portSpeedCableLength"] - match = re.search("_([0-9]*)m", portSpeedCableLength) - if match and int(match.group(1)) > 2000: - pytest.skip( - "This test is skipped for longlink.") - yield - return - def populate_arp_entries( self, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host # noqa F811 @@ -2253,13 +2344,3 @@ def populate_arp_entries( self.runPtfTest( ptfhost, testCase=saiQosTest, testParams=testParams ) - - @pytest.fixture(scope="function", autouse=False) - def skip_longlink(self, dutQosConfig): - portSpeedCableLength = dutQosConfig["portSpeedCableLength"] - match = re.search("_([0-9]*)m", portSpeedCableLength) - if match and int(match.group(1)) > 2000: - pytest.skip( - "This test is skipped for longlink.") - yield - return diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 366de0b5c49..92f0740d331 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -69,6 +69,8 @@ def ignore_expected_loganalyzer_exception(get_src_dst_asic_and_duts, loganalyzer # The following error log is related to the bug of https://github.com/sonic-net/sonic-buildimage/issues/13265 ".*ERR lldp#lldpmgrd.*Command failed.*lldpcli.*configure.*ports.*lldp.*unknown command from argument" ".*configure.*command was failed.*times, disabling retry.*" + # Error related to syncd socket-timeout intermittenly + ".*ERR syncd[0-9]*#dsserve: _ds2tty broken pipe.*" ] if loganalyzer: @@ -375,7 +377,8 @@ def testQosSaiPfcXoffLimit( "pkts_num_leak_out": qosConfig["pkts_num_leak_out"], "pkts_num_trig_pfc": qosConfig[xoffProfile]["pkts_num_trig_pfc"], "pkts_num_trig_ingr_drp": qosConfig[xoffProfile]["pkts_num_trig_ingr_drp"], - "hwsku": dutTestParams['hwsku'] + "hwsku": dutTestParams['hwsku'], + "src_dst_asic_diff": (dutConfig['dutAsic'] != dutConfig['dstDutAsic']) }) if "platform_asic" in dutTestParams["basicParams"]: @@ -650,8 +653,8 @@ def testQosSaiPfcXonLimit( "pkts_num_dismiss_pfc": qosConfig[xonProfile]["pkts_num_dismiss_pfc"], "pkts_num_leak_out": dutQosConfig["param"][portSpeedCableLength]["pkts_num_leak_out"], "hwsku": dutTestParams['hwsku'], - "pkts_num_egr_mem": qosConfig[xonProfile].get('pkts_num_egr_mem', None) - + "pkts_num_egr_mem": qosConfig[xonProfile].get('pkts_num_egr_mem', None), + "src_dst_asic_diff": (dutConfig['dutAsic'] != dutConfig['dstDutAsic']) }) if "platform_asic" in dutTestParams["basicParams"]: @@ -1166,6 +1169,7 @@ def testQosSaiLossyQueue( "buffer_max_size": ingressLossyProfile["static_th"], "headroom_size": ingressLossyProfile["size"], "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_sys_ports": dutConfig["testPorts"]["dst_sys_ports"], "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], "dst_port_2_id": dutConfig["testPorts"]["dst_port_2_id"], "dst_port_2_ip": dutConfig["testPorts"]["dst_port_2_ip"], @@ -1289,8 +1293,9 @@ def testQosSaiLossyQueueVoq( except Exception: raise + @pytest.mark.parametrize("ip_version", ["ipv4", "ipv6"]) def testQosSaiDscpQueueMapping( - self, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dut_qos_maps # noqa F811 + self, ip_version, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dut_qos_maps # noqa F811 ): """ Test QoS SAI DSCP to queue mapping @@ -1318,11 +1323,22 @@ def testQosSaiDscpQueueMapping( testParams = dict() testParams.update(dutTestParams["basicParams"]) + + if ip_version == "ipv6": + testParams.update({ + "src_port_ip": dutConfig["testPorts"]["src_port_ipv6"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ipv6"], + "ipv6": True + }) + else: + testParams.update({ + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + }) + testParams.update({ "dst_port_id": dutConfig["testPorts"]["dst_port_id"], - "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], "src_port_id": dutConfig["testPorts"]["src_port_id"], - "src_port_ip": dutConfig["testPorts"]["src_port_ip"], "hwsku": dutTestParams['hwsku'], "dual_tor": dutConfig['dualTor'], "dual_tor_scenario": dutConfig['dualTorScenario'] @@ -1552,7 +1568,7 @@ def testQosSaiDwrr( @pytest.mark.parametrize("pgProfile", ["wm_pg_shared_lossless", "wm_pg_shared_lossy"]) def testQosSaiPgSharedWatermark( self, pgProfile, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, - resetWatermark, _skip_watermark_multi_DUT + resetWatermark, _skip_watermark_multi_DUT, skip_src_dst_different_asic ): """ Test QoS SAI PG shared watermark test for lossless/lossy traffic @@ -1586,7 +1602,7 @@ def testQosSaiPgSharedWatermark( if "wm_pg_shared_lossless" in pgProfile: pktsNumFillShared = qosConfig[pgProfile]["pkts_num_trig_pfc"] elif "wm_pg_shared_lossy" in pgProfile: - if dutConfig['dstDutAsic'] == "pac": + if dutConfig.get('dstDutAsic', 'UnknownDstDutAsic') == "pac": pytest.skip( "PGSharedWatermark: Lossy test is not applicable in " "cisco-8000 Q100 platform.") diff --git a/tests/route/test_forced_mgmt_route.py b/tests/route/test_forced_mgmt_route.py new file mode 100644 index 00000000000..8d8c3a94c0e --- /dev/null +++ b/tests/route/test_forced_mgmt_route.py @@ -0,0 +1,281 @@ +import ipaddress +import json +import logging +import pytest + +from tests.common.helpers.assertions import pytest_assert +from tests.common.utilities import wait_until +from tests.override_config_table.utilities import backup_config, restore_config, \ + reload_minigraph_with_golden_config +from tests.syslog.syslog_utils import is_mgmt_vrf_enabled + +pytestmark = [ + pytest.mark.topology('t0'), + pytest.mark.device_type('vs') +] + +logger = logging.getLogger(__name__) + + +# forced mgmt route priority hardcoded to 32764 in following j2 template: +# https://github.com/sonic-net/sonic-buildimage/blob/master/files/image_config/interfaces/interfaces.j2#L82 +FORCED_MGMT_ROUTE_PRIORITY = 32764 + + +@pytest.fixture +def backup_restore_config(duthosts, enum_rand_one_per_hwsku_hostname): + """make sure tacacs server running after UT finish""" + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + + # Backup config before test + CONFIG_DB = "/etc/sonic/config_db.json" + CONFIG_DB_BACKUP = "/etc/sonic/config_db.json_before_override" + backup_config(duthost, CONFIG_DB, CONFIG_DB_BACKUP) + + yield + + # Restore config after test finish + restore_config(duthost, CONFIG_DB, CONFIG_DB_BACKUP) + + +def get_interface_reload_timestamp(duthost): + timestamp = duthost.command("sudo systemctl show --no-pager interfaces-config" + " -p ExecMainExitTimestamp --value")["stdout"] + logger.info("interfaces config timestamp {}".format(timestamp)) + + return timestamp + + +def get_file_hash(duthost, file): + hash = duthost.command("sha1sum {}".format(file))["stdout"] + logger.debug("file hash: {}".format(hash)) + + return hash + + +def wait_for_file_changed(duthost, file, action, *args, **kwargs): + original_hash = get_file_hash(duthost, file) + last_timestamp = get_interface_reload_timestamp(duthost) + + action(*args, **kwargs) + + def hash_and_timestamp_changed(duthost, file): + latest_hash = get_file_hash(duthost, file) + latest_timestamp = get_interface_reload_timestamp(duthost) + return latest_hash != original_hash and latest_timestamp != last_timestamp + + exist = wait_until(10, 1, 0, hash_and_timestamp_changed, duthost, file) + pytest_assert(exist, "File {} does not change after 10 seconds.".format(file)) + + +def address_type(address): + return type(ipaddress.ip_network(str(address), False)) + + +def check_ip_rule_exist(duthost, address, check_exist): + logging.debug("check_ip_rule_exist for ip:{} exist:{}".format(address, check_exist)) + rule_command = "ip --json rule list" + if address_type(address) is ipaddress.IPv6Network: + rule_command = "ip --json -6 rule list" + + ip_rules = json.loads(duthost.command(rule_command)["stdout"]) + logging.debug("ip rule list: {}".format(ip_rules)) + + exist = False + dst = address.split("/")[0] + dstlen = address.split("/")[1] + for ip_rule in ip_rules: + if (ip_rule.get("priority", "") == FORCED_MGMT_ROUTE_PRIORITY and + ip_rule.get("src", "") == 'all' and + ip_rule.get("dst", "") == dst and + ip_rule.get("dstlen", "") == int(dstlen) and + ip_rule.get("table", "") == 'default'): + exist = True + + return check_exist == exist + + +def test_forced_mgmt_route_add_and_remove_by_mgmt_port_status( + duthosts, + enum_rand_one_per_hwsku_hostname, + backup_restore_config): # noqa: F401 + """ + Check when mgmt. port is up, then forced mgmt route added to route table. + When mgmt. port is down (oper down), then forced mgmt route removed from route table. + """ + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + + # When management-vrf enabled, IPV6 route of management interface will not add to 'default' route table + if is_mgmt_vrf_enabled(duthost): + logging.info("Ignore IPV6 default route table test because management-vrf enabled") + return + + # Skip multi-asic because override_config format are different. + if duthost.is_multi_asic: + pytest.skip("Skip test_forced_mgmt_route_add_and_remove_by_mgmt_port_status for multi-asic device") + + # get current mgmt interface data from config_db.json + config_db_data = duthost.shell("cat /etc/sonic/config_db.json")['stdout'] + config_db_json = json.loads(config_db_data) + config_db_mgmt_interface = config_db_json["MGMT_INTERFACE"] + config_db_port = config_db_json["MGMT_PORT"] + + # Skip multi-asic because override_config format are different. + if 'eth1' in config_db_port: + pytest.skip("Skip test_forced_mgmt_route_add_and_remove_by_mgmt_port_status for multi-mgmt device") + + # Add eth1 to mgmt interface and port + ipv4_forced_mgmt_address = "172.17.1.1/24" + ipv6_forced_mgmt_address = "fec1::fffe:afa:1/64" + config_db_mgmt_interface["eth1|10.250.1.101/24"] = { + "forced_mgmt_routes": [ + ipv4_forced_mgmt_address + ], + "gwaddr": "10.250.1.1" + } + config_db_mgmt_interface["eth1|fec1::ffff:afa:1/64"] = { + "forced_mgmt_routes": [ + ipv6_forced_mgmt_address + ], + "gwaddr": "fec1::1" + } + config_db_port["eth1"] = { + "admin_status": "up", + "alias": "eth1" + } + + override_config = {} + override_config["MGMT_INTERFACE"] = config_db_mgmt_interface + override_config["MGMT_PORT"] = config_db_port + logging.debug("override_config: {}".format(override_config)) + wait_for_file_changed( + duthost, + "/etc/network/interfaces", + reload_minigraph_with_golden_config, + duthost, + override_config) + + # Get interface and check config generate correct + interfaces = duthost.command("cat /etc/network/interfaces")['stdout'] + logging.debug("interfaces: {}".format(interfaces)) + pytest_assert("iface eth1 inet static" in interfaces) + pytest_assert("up ip -4 rule add pref {} to {} table default" + .format(FORCED_MGMT_ROUTE_PRIORITY, ipv4_forced_mgmt_address) in interfaces) + pytest_assert("pre-down ip -4 rule delete pref {} to {} table default" + .format(FORCED_MGMT_ROUTE_PRIORITY, ipv4_forced_mgmt_address) in interfaces) + pytest_assert("iface eth1 inet6 static" in interfaces) + pytest_assert("up ip -6 rule add pref {} to {} table default" + .format(FORCED_MGMT_ROUTE_PRIORITY, ipv6_forced_mgmt_address) in interfaces) + pytest_assert("pre-down ip -6 rule delete pref {} to {} table default" + .format(FORCED_MGMT_ROUTE_PRIORITY, ipv6_forced_mgmt_address) in interfaces) + + # startup eth1 and check forced mgmt route exist + duthost.command("sudo ifup eth1") + interfaces = duthost.command("show ip interfaces") + logging.debug("show ip interfaces: {}".format(interfaces)) + + # when eth1 up, forced mgmt route on this interface should exit + exist = wait_until(10, 1, 0, check_ip_rule_exist, duthost, ipv4_forced_mgmt_address, True) + pytest_assert(exist, "IP rule for {} does not exist.".format(ipv4_forced_mgmt_address)) + + exist = wait_until(10, 1, 0, check_ip_rule_exist, duthost, ipv6_forced_mgmt_address, True) + pytest_assert(exist, "IP rule for {} does not exist.".format(ipv6_forced_mgmt_address)) + + # shutdown eth1 and check forced mgmt route exist + duthost.command("sudo ifdown eth1") + interfaces = duthost.command("show ip interfaces") + logging.debug("show ip interfaces: {}".format(interfaces)) + + # when eth1 down, forced mgmt route on this interface should not exit + exist = wait_until(10, 1, 0, check_ip_rule_exist, duthost, ipv4_forced_mgmt_address, False) + pytest_assert(exist, "IP rule for {} should not exist.".format(ipv4_forced_mgmt_address)) + + exist = wait_until(10, 1, 0, check_ip_rule_exist, duthost, ipv6_forced_mgmt_address, False) + pytest_assert(exist, "IP rule for {} should not exist.".format(ipv6_forced_mgmt_address)) + + +def test_update_forced_mgmt( + duthosts, + enum_rand_one_per_hwsku_hostname, + backup_restore_config): # noqa: F401 + """ + Check when update forced mgmt in CONFIG_DB, interfaces and routes will be update automatically. + """ + duthost = duthosts[enum_rand_one_per_hwsku_hostname] + + # Get interface and check config generate correct + mgmt_interface_keys = duthost.command("sonic-db-cli CONFIG_DB keys 'MGMT_INTERFACE|eth0|*'")['stdout'] + logging.debug("mgmt_interface_keys: {}".format(mgmt_interface_keys)) + + for interface_key in mgmt_interface_keys.split('\n'): + logging.debug("interface_key: {}".format(interface_key)) + interface_address = interface_key.split('|')[2] + + # Get current forced mgmt routes + original_forced_mgmt_routes = duthost.command("sonic-db-cli CONFIG_DB HGET '{}' forced_mgmt_routes@" + .format(interface_key))['stdout'] + logging.debug("forced_mgmt_routes: {}, interface address: {}" + .format(original_forced_mgmt_routes, interface_address)) + + # Prepare new forced mgmt routes + test_route = "1::2:3:4/64" + ip_type = "-6" + if type(ipaddress.ip_network(interface_address, False)) == ipaddress.IPv4Network: + test_route = "1.2.3.4/24" + ip_type = "-4" + + updated_forced_mgmt_routes = original_forced_mgmt_routes + if original_forced_mgmt_routes != "": + updated_forced_mgmt_routes += "," + updated_forced_mgmt_routes += test_route + + # Update current forced mgmt routes + logging.debug("updated_forced_mgmt_routes: {}".format(updated_forced_mgmt_routes)) + command = "sonic-db-cli CONFIG_DB HSET '{}' forced_mgmt_routes@ '{}'"\ + .format(interface_key, updated_forced_mgmt_routes) + + def update_interface_config(duthost, command): + duthost.command(command) + + wait_for_file_changed( + duthost, + "/etc/network/interfaces", + update_interface_config, + duthost, + command) + + # Check /etc/network/interfaces generate correct + interfaces = duthost.command("cat /etc/network/interfaces")['stdout'] + logging.debug("interfaces: {}".format(interfaces)) + + pytest_assert("up ip {} rule add pref {} to {} table default" + .format(ip_type, FORCED_MGMT_ROUTE_PRIORITY, test_route) in interfaces) + pytest_assert("pre-down ip {} rule delete pref {} to {} table default" + .format(ip_type, FORCED_MGMT_ROUTE_PRIORITY, test_route) in interfaces) + + # Check forced mgmt route add to route table + exist = wait_until(10, 1, 0, check_ip_rule_exist, duthost, test_route, True) + pytest_assert(exist, "IP rule for {} does not exist.".format(test_route)) + + # Revert current forced mgmt routes + logging.debug("updated_forced_mgmt_routes: {}".format(original_forced_mgmt_routes)) + command = "sonic-db-cli CONFIG_DB HSET '{}' forced_mgmt_routes@ '{}'"\ + .format(interface_key, original_forced_mgmt_routes) + wait_for_file_changed( + duthost, + "/etc/network/interfaces", + update_interface_config, + duthost, + command) + + # Check /etc/network/interfaces generate correct + interfaces = duthost.command("cat /etc/network/interfaces")['stdout'] + logging.debug("interfaces: {}".format(interfaces)) + pytest_assert("up ip {} rule add pref {} to {} table default" + .format(ip_type, FORCED_MGMT_ROUTE_PRIORITY, test_route) not in interfaces) + pytest_assert("pre-down ip {} rule delete pref {} to {} table default" + .format(ip_type, FORCED_MGMT_ROUTE_PRIORITY, test_route) not in interfaces) + + # Check forced mgmt route removed from route table + exist = wait_until(10, 1, 0, check_ip_rule_exist, duthost, test_route, False) + pytest_assert(exist, "IP rule for {} should not exist.".format(test_route)) diff --git a/tests/route/test_route_flap.py b/tests/route/test_route_flap.py index 901fbd02bbb..cc92131a4ef 100644 --- a/tests/route/test_route_flap.py +++ b/tests/route/test_route_flap.py @@ -98,6 +98,96 @@ def get_ptf_recv_ports(duthost, tbinfo): return recv_ports +def get_neighbor_info(duthost, dev_port, tbinfo): + """ + This function returns the neighbor type of + the chosen dev_port based on route info + + Args: + duthost: DUT belong to the testbed. + dev_port: Chosen dev_port based on route info + tbinfo: A fixture to gather information about the testbed. + """ + neighbor_type = '' + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + neighs = config_facts['BGP_NEIGHBOR'] + dev_neigh_mdata = config_facts['DEVICE_NEIGHBOR_METADATA'] if 'DEVICE_NEIGHBOR_METADATA' in config_facts else {} + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + nbr_port_map = mg_facts['minigraph_port_name_to_alias_map'] \ + if 'minigraph_port_name_to_alias_map' in mg_facts else {} + for neighbor in neighs: + local_ip = neighs[neighbor]['local_addr'] + nbr_port = get_port_by_ip(config_facts, local_ip) + if 'Ethernet' in nbr_port: + for p_key, p_value in nbr_port_map.items(): + if p_value == nbr_port: + nbr_port = p_key + if dev_port == nbr_port: + neighbor_name = neighs[neighbor]['name'] + for k, v in dev_neigh_mdata.items(): + if k == neighbor_name: + neighbor_type = v['type'] + return neighbor_type + + +def get_port_by_ip(config_facts, ipaddr): + """ + This function returns port name based on ip address + """ + if ':' in ipaddr: + iptype = "ipv6" + else: + iptype = "ipv4" + + intf = {} + intf.update(config_facts.get('INTERFACE', {})) + if "PORTCHANNEL_INTERFACE" in config_facts: + intf.update(config_facts['PORTCHANNEL_INTERFACE']) + for a_intf in intf: + for addrs in intf[a_intf]: + intf_ip = addrs.split('/') + if iptype == 'ipv6' and ':' in intf_ip[0] and intf_ip[0].lower() == ipaddr.lower(): + return a_intf + elif iptype == 'ipv4' and ':' not in intf_ip[0] and intf_ip[0] == ipaddr: + return a_intf + + raise Exception("Did not find port for IP %s" % ipaddr) + + +def get_all_ptf_recv_ports(duthosts, tbinfo, recv_neigh_list): + """ + This function returns all the ptf ports of + all the duts w.r.t received neighbors' list, even for multi dut chassis + """ + recv_ports = [] + for duthost in duthosts: + if duthost.is_supervisor_node(): + continue + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + for interface, neighbor in mg_facts["minigraph_neighbors"].items(): + if neighbor['name'] in recv_neigh_list and interface in mg_facts["minigraph_ptf_indices"]: + ptf_idx = mg_facts["minigraph_ptf_indices"][interface] + recv_ports.append(ptf_idx) + return recv_ports + + +def get_all_recv_neigh(duthosts, neigh_type): + """ + This function returns all the neighbors of + same type for dut, including multi dut chassis + """ + recv_neigh_list = [] + for duthost in duthosts: + if duthost.is_supervisor_node(): + continue + config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'] + device_neighbor_metadata = config_facts['DEVICE_NEIGHBOR_METADATA'] + for k, v in device_neighbor_metadata.items(): + if v['type'] == neigh_type: + recv_neigh_list.append(k) + return recv_neigh_list + + def get_ptf_send_ports(duthost, tbinfo, dev_port): if tbinfo['topo']['name'] in ['t0', 't1-lag', 'm0']: mg_facts = duthost.get_extended_minigraph_facts(tbinfo) @@ -132,7 +222,7 @@ def check_route(duthost, route, dev_port, operation): "Route {} was not announced {}".format(route, result)) -def send_recv_ping_packet(ptfadapter, ptf_send_port, ptf_recv_ports, dst_mac, exp_src_mac, src_ip, dst_ip): +def send_recv_ping_packet(ptfadapter, ptf_send_port, ptf_recv_ports, dst_mac, exp_src_mac, src_ip, dst_ip, tbinfo): # use ptf sender interface mac for easy identify testing packets src_mac = ptfadapter.dataplane.get_mac(0, ptf_send_port) pkt = testutils.simple_icmp_packet( @@ -142,6 +232,9 @@ def send_recv_ping_packet(ptfadapter, ptf_send_port, ptf_recv_ports, dst_mac, ex ext_pkt['Ether'].src = exp_src_mac masked_exp_pkt = Mask(ext_pkt) + # Mask src_mac for T2 multi-dut chassis, since the packet can be received on any of the dut's ptf-ports + if 't2' in tbinfo["topo"]["name"]: + masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "tos") masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "len") @@ -227,7 +320,7 @@ def get_internal_interfaces(duthost): # Then check for voq chassis: get voq inband interface for later filtering voq_inband_interfaces = duthost.get_voq_inband_interfaces() - internal_intfs.append([voq_inband_interfaces]) + internal_intfs += voq_inband_interfaces return internal_intfs @@ -339,7 +432,13 @@ def test_route_flap(duthosts, tbinfo, ptfhost, ptfadapter, # choose one ptf port to send msg ptf_send_port = get_ptf_send_ports(duthost, tbinfo, dev_port) - ptf_recv_ports = get_ptf_recv_ports(duthost, tbinfo) + + # Get the list of ptf ports to receive msg, even for multi-dut scenario + neighbor_type = get_neighbor_info(duthost, dev_port, tbinfo) + recv_neigh_list = get_all_recv_neigh(duthosts, neighbor_type) + logger.info("Receiving ports neighbor list : {}".format(recv_neigh_list)) + ptf_recv_ports = get_all_ptf_recv_ports(duthosts, tbinfo, recv_neigh_list) + logger.info("Receiving ptf ports list : {}".format(ptf_recv_ports)) exabgp_port = get_exabgp_port(duthost, tbinfo, dev_port) logger.info("exabgp_port = %d" % exabgp_port) @@ -367,7 +466,7 @@ def switch(x): # test link status send_recv_ping_packet( - ptfadapter, ptf_send_port, ptf_recv_ports, vlan_mac, dut_mac, ptf_ip, ping_ip) + ptfadapter, ptf_send_port, ptf_recv_ports, vlan_mac, dut_mac, ptf_ip, ping_ip, tbinfo) withdraw_route(ptf_ip, dst_prefix, nexthop, exabgp_port, aspath) # Check if route is withdraw with first 3 routes @@ -375,7 +474,7 @@ def switch(x): time.sleep(1) check_route(duthost, dst_prefix, dev_port, WITHDRAW) send_recv_ping_packet( - ptfadapter, ptf_send_port, ptf_recv_ports, vlan_mac, dut_mac, ptf_ip, ping_ip) + ptfadapter, ptf_send_port, ptf_recv_ports, vlan_mac, dut_mac, ptf_ip, ping_ip, tbinfo) announce_route(ptf_ip, dst_prefix, nexthop, exabgp_port, aspath) # Check if route is announced with first 3 routes @@ -383,7 +482,7 @@ def switch(x): time.sleep(1) check_route(duthost, dst_prefix, dev_port, ANNOUNCE) send_recv_ping_packet( - ptfadapter, ptf_send_port, ptf_recv_ports, vlan_mac, dut_mac, ptf_ip, ping_ip) + ptfadapter, ptf_send_port, ptf_recv_ports, vlan_mac, dut_mac, ptf_ip, ping_ip, tbinfo) route_index += 1 diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py old mode 100644 new mode 100755 index ad60f32a629..f1b23c78304 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -20,6 +20,7 @@ simple_qinq_tcp_packet, simple_ip_packet, simple_ipv4ip_packet, + simple_ipv6ip_packet, hex_dump_buffer, verify_packet_any_port) from ptf.mask import Mask @@ -35,7 +36,10 @@ sai_thrift_read_buffer_pool_watermark, sai_thrift_read_headroom_pool_watermark, sai_thrift_read_queue_occupancy, - sai_thrift_read_pg_occupancy) + sai_thrift_read_pg_occupancy, + sai_thrift_read_port_voq_counters, + sai_thrift_get_voq_port_id + ) from switch_sai_thrift.ttypes import (sai_thrift_attribute_value_t, sai_thrift_attribute_t) from switch_sai_thrift.sai_headers import SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID @@ -277,37 +281,60 @@ def construct_ip_pkt(pkt_len, dst_mac, src_mac, src_ip, dst_ip, dscp, src_vlan, ip_id = kwargs.get('ip_id', None) ttl = kwargs.get('ttl', None) exp_pkt = kwargs.get('exp_pkt', False) + ipv6 = kwargs.get('ipv6', False) tos = (dscp << 2) | ecn pkt_args = { 'pktlen': pkt_len, 'eth_dst': dst_mac, 'eth_src': src_mac, - 'ip_src': src_ip, - 'ip_dst': dst_ip, - 'ip_tos': tos } - if ip_id is not None: + + if ipv6: + pkt_args.update({ + 'ipv6_src': src_ip, + 'ipv6_dst': dst_ip, + 'ipv6_dscp': dscp, + 'ipv6_ecn': ecn + }) + else: + pkt_args.update({ + 'ip_src': src_ip, + 'ip_dst': dst_ip, + 'ip_tos': tos + }) + + if ip_id is not None and not ipv6: pkt_args['ip_id'] = ip_id if ttl is not None: - pkt_args['ip_ttl'] = ttl + if ipv6: + pkt_args['ipv6_hlim'] = ttl + else: + pkt_args['ip_ttl'] = ttl if src_vlan is not None: pkt_args['dl_vlan_enable'] = True pkt_args['vlan_vid'] = int(src_vlan) pkt_args['vlan_pcp'] = dscp - pkt = simple_ip_packet(**pkt_args) + if ipv6: + pkt = simple_ipv6ip_packet(**pkt_args) + else: + pkt = simple_ip_packet(**pkt_args) if exp_pkt: masked_exp_pkt = Mask(pkt, ignore_extra_bytes=True) masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") - masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") - masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") - masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "len") - masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "len") + + if ipv6: + masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6, "hlim") + else: + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "len") + if src_vlan is not None: masked_exp_pkt.set_do_not_care_scapy(scapy.Dot1Q, "vlan") return masked_exp_pkt @@ -334,11 +361,11 @@ def construct_arp_pkt(eth_dst, eth_src, arp_op, src_ip, dst_ip, hw_dst, src_vlan return pkt -def get_rx_port(dp, device_number, src_port_id, dst_mac, dst_ip, src_ip, src_vlan=None): +def get_rx_port(dp, device_number, src_port_id, dst_mac, dst_ip, src_ip, src_vlan=None, ipv6=False): ip_id = 0xBABE src_port_mac = dp.dataplane.get_mac(device_number, src_port_id) - pkt = construct_ip_pkt(64, dst_mac, src_port_mac, - src_ip, dst_ip, 0, src_vlan, ip_id=ip_id) + pkt = construct_ip_pkt(64, dst_mac, src_port_mac, src_ip, dst_ip, 0, src_vlan, ip_id=ip_id, ipv6=ipv6) + # Send initial packet for any potential ARP resolution, which may cause the LAG # destination to change. Can occur especially when running tests in isolation on a # first test attempt. @@ -348,7 +375,7 @@ def get_rx_port(dp, device_number, src_port_id, dst_mac, dst_ip, src_ip, src_vla send_packet(dp, src_port_id, pkt, 1) masked_exp_pkt = construct_ip_pkt( - 48, dst_mac, src_port_mac, src_ip, dst_ip, 0, src_vlan, ip_id=ip_id, exp_pkt=True) + 48, dst_mac, src_port_mac, src_ip, dst_ip, 0, src_vlan, ip_id=ip_id, ipv6=ipv6, exp_pkt=True) pre_result = dp.dataplane.poll( device_number=0, exp_pkt=masked_exp_pkt, timeout=3) @@ -409,70 +436,26 @@ def fill_leakout_plus_one( return False -def fill_egress_plus_one( - test_case, src_port_id, pkt, queue, asic_type, pkts_num_egr_mem=0, - dst_port_id=None, pkt2=None): - # Attempts to queue 1 packet while compensating for a varying packet leakout and egress queues. +def fill_egress_plus_one(test_case, src_port_id, pkt, queue, asic_type, pkts_num_egr_mem): + # Attempts to enqueue 1 packet while compensating for a varying packet leakout and egress queues. # pkts_num_egr_mem is the number of packets in full egress queues, to provide an initial filling boost - # Returns whether 1 packet was successfully enqueued. - # - # pkts_num_egr_mem=0 is not applicable for multi-src-port cases - # for multi-src-port case, get pkts_num_egr_mem via overflow_egress(), - # then call fill_egress_plus_one() with pkts_num_egr_mem - if asic_type in ['cisco-8000']: - # if pkts_num_egr_mem unknown, get estimated pkts_num_egr_mem - if pkts_num_egr_mem == 0: - if dst_port_id is None: - raise RuntimeError( - "fill_egress_plus_one: please input pkts_num_egr_mem or " - "dst_port_id", file=sys.stderr) - pkts_num_egr_mem, extra_bytes_occupied = overflow_egress( - test_case, src_port_id, pkt, queue, asic_type) - # tx enable - test_case.sai_thrift_port_tx_enable( - test_case.dst_client, asic_type, [dst_port_id]) - # tx disable - test_case.sai_thrift_port_tx_disable(test_case.dst_client, asic_type, [dst_port_id]) - - pkt_list = [pkt] - if pkt2: - pkt_list.append(pkt2) - for packet in pkt_list: - # send 1 packet, if pg occupancy increases, return - pg_cntrs_base = sai_thrift_read_pg_occupancy( - test_case.src_client, port_list['src'][src_port_id]) - send_packet(test_case, src_port_id, packet, 1) - pg_cntrs = sai_thrift_read_pg_occupancy( - test_case.src_client, port_list['src'][src_port_id]) - if pg_cntrs[queue] > pg_cntrs_base[queue]: - print("fill_egress_plus_one: Success, sent 1 packets, SQ occupancy bytes rose from %d to %d" % ( - pg_cntrs_base[queue], pg_cntrs[queue]), file=sys.stderr) - continue - - # fill egress plus one - pg_cntrs_base = sai_thrift_read_pg_occupancy( - test_case.src_client, port_list['src'][src_port_id]) - send_packet(test_case, src_port_id, packet, pkts_num_egr_mem) - max_packets = 1000 - for packet_i in range(max_packets): - send_packet(test_case, src_port_id, packet, 1) - pg_cntrs = sai_thrift_read_pg_occupancy( - test_case.src_client, port_list['src'][src_port_id]) - if pg_cntrs[queue] > pg_cntrs_base[queue]: - print("fill_egress_plus_one: Success, sent %d packets, SQ occupancy bytes rose from %d to %d" % ( - pkts_num_egr_mem + packet_i + 1, pg_cntrs_base[queue], pg_cntrs[queue]), file=sys.stderr) - break - if pg_cntrs[queue] <= pg_cntrs_base[queue]: - raise RuntimeError( - "fill_egress_plus_one: Failure, sent %d packets, SQ " - "occupancy bytes rose from %d to %d" % ( - pkts_num_egr_mem + max_packets, - pg_cntrs_base[queue], - pg_cntrs[queue]), - file=sys.stderr) - return False - - return True + # Returns whether 1 packet is successfully enqueued. + if asic_type not in ['cisco-8000']: + return False + pg_cntrs_base = sai_thrift_read_pg_occupancy( + test_case.src_client, port_list['src'][src_port_id]) + send_packet(test_case, src_port_id, pkt, pkts_num_egr_mem) + max_packets = 1000 + for packet_i in range(max_packets): + send_packet(test_case, src_port_id, pkt, 1) + pg_cntrs = sai_thrift_read_pg_occupancy( + test_case.src_client, port_list['src'][src_port_id]) + if pg_cntrs[queue] > pg_cntrs_base[queue]: + print("fill_egress_plus_one: Success, sent %d packets, SQ occupancy bytes rose from %d to %d" % ( + pkts_num_egr_mem + packet_i + 1, pg_cntrs_base[queue], pg_cntrs[queue]), file=sys.stderr) + return True + raise RuntimeError("fill_egress_plus_one: Failure, sent %d packets, SQ occupancy bytes rose from %d to %d" % ( + pkts_num_egr_mem + max_packets, pg_cntrs_base[queue], pg_cntrs[queue])) def overflow_egress(test_case, src_port_id, pkt, queue, asic_type): @@ -549,7 +532,6 @@ def setUp(self): self.dst_dut_index = self.test_params['dst_dut_index'] self.dst_asic_index = self.test_params.get('dst_asic_index', None) self.testbed_type = self.test_params['testbed_type'] - self.is_multi_asic = (self.clients['src'] != self.clients['dst']) def tearDown(self): sai_base_test.ThriftInterfaceDataPlane.tearDown(self) @@ -557,31 +539,39 @@ def tearDown(self): def runTest(self): # ARP Populate # Ping only required for testports - if 't2' in self.testbed_type and self.is_multi_asic: - stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.dst_server_ip, self.test_params['dut_username'], - self.test_params['dut_password'], - 'sudo ip netns exec asic{} ping -q -c 3 {}'.format( - self.dst_asic_index, self.dst_port_ip)) - assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on asic '{}' on Dut '{}'".format( - self.dst_port_ip, self.dst_asic_index, self.dst_server_ip) - stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.dst_server_ip, self.test_params['dut_username'], - self.test_params['dut_password'], - 'sudo ip netns exec asic{} ping -q -c 3 {}'.format( - self.dst_asic_index, self.dst_port_2_ip)) - assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on asic '{}' on Dut '{}'".format( - self.dst_port_2_ip, self.dst_asic_index, self.dst_server_ip) - stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.dst_server_ip, self.test_params['dut_username'], - self.test_params['dut_password'], - 'sudo ip netns exec asic{} ping -q -c 3 {}'.format( - self.dst_asic_index, self.dst_port_3_ip)) - assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on asic '{}' on Dut '{}'".format( - self.dst_port_3_ip, self.dst_asic_index, self.dst_server_ip) - stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.src_server_ip, self.test_params['dut_username'], - self.test_params['dut_password'], - 'sudo ip netns exec asic{} ping -q -c 3 {}'.format( - self.src_asic_index, self.src_port_ip)) - assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on asic '{}' on Dut '{}'".format( - self.src_port_ip, self.src_asic_index, self.src_server_ip) + if 't2' in self.testbed_type: + src_is_multi_asic = self.test_params['src_is_multi_asic'] + dst_is_multi_asic = self.test_params['dst_is_multi_asic'] + dst_port_ips = [self.dst_port_ip, self.dst_port_2_ip, self.dst_port_3_ip] + for ip in dst_port_ips: + if dst_is_multi_asic: + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.dst_server_ip, + self.test_params['dut_username'], + self.test_params['dut_password'], + 'sudo ip netns exec asic{} ping -q -c 3 {}'.format( + self.dst_asic_index, ip)) + assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on asic '{}' on Dut '{}'".format( + ip, self.dst_asic_index, self.dst_server_ip) + else: + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.dst_server_ip, + self.test_params['dut_username'], + self.test_params['dut_password'], + 'ping -q -c 3 {}'.format(ip)) + assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on Dut '{}'".format( + ip, self.dst_server_ip) + if src_is_multi_asic: + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.src_server_ip, self.test_params['dut_username'], + self.test_params['dut_password'], + 'sudo ip netns exec asic{} ping -q -c 3 {}'.format( + self.src_asic_index, self.src_port_ip)) + assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on asic '{}' on Dut '{}'".format( + self.src_port_ip, self.src_asic_index, self.src_server_ip) + else: + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.src_server_ip, self.test_params['dut_username'], + self.test_params['dut_password'], + 'ping -q -c 3 {}'.format(self.src_port_ip)) + assert ' 0% packet loss' in stdOut[3], "Ping failed for IP:'{}' on Dut '{}'".format( + self.src_port_ip, self.src_server_ip) else: arpreq_pkt = construct_arp_pkt('ff:ff:ff:ff:ff:ff', self.src_port_mac, 1, self.src_port_ip, '192.168.0.1', '00:00:00:00:00:00', self.src_vlan) @@ -672,6 +662,7 @@ def runTest(self): dual_tor = self.test_params.get('dual_tor', None) leaf_downstream = self.test_params.get('leaf_downstream', None) asic_type = self.test_params['sonic_asic_type'] + ipv6 = self.test_params.get('ipv6', False) exp_ip_id = 101 exp_ttl = 63 pkt_dst_mac = router_mac if router_mac != '' else dst_port_mac @@ -681,7 +672,7 @@ def runTest(self): # in case dst_port_id is part of LAG, find out the actual dst port # for given IP parameters dst_port_id = get_rx_port( - self, 0, src_port_id, pkt_dst_mac, dst_port_ip, src_port_ip + self, 0, src_port_id, pkt_dst_mac, dst_port_ip, src_port_ip, ipv6=ipv6 ) print("actual dst_port_id: %d" % (dst_port_id), file=sys.stderr) print("dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % ( @@ -714,14 +705,24 @@ def runTest(self): for dscp in range(0, 64): tos = (dscp << 2) tos |= 1 - pkt = simple_ip_packet(pktlen=64, - eth_dst=pkt_dst_mac, - eth_src=src_port_mac, - ip_src=src_port_ip, - ip_dst=dst_port_ip, - ip_tos=tos, - ip_id=exp_ip_id, - ip_ttl=ip_ttl) + + if ipv6: + pkt = simple_ipv6ip_packet(pktlen=64, + eth_dst=pkt_dst_mac, + eth_src=src_port_mac, + ipv6_src=src_port_ip, + ipv6_dst=dst_port_ip, + ipv6_tc=tos, + ipv6_hlim=ip_ttl) + else: + pkt = simple_ip_packet(pktlen=64, + eth_dst=pkt_dst_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=ip_ttl) send_packet(self, src_port_id, pkt, 1) print("dscp: %d, calling send_packet()" % (tos >> 2), file=sys.stderr) @@ -740,11 +741,16 @@ def runTest(self): # Verify dscp flag try: - if (recv_pkt.payload.tos == tos and - recv_pkt.payload.src == src_port_ip and - recv_pkt.payload.dst == dst_port_ip and - recv_pkt.payload.ttl == exp_ttl and - recv_pkt.payload.id == exp_ip_id): + if ((recv_pkt.payload.src == src_port_ip and + recv_pkt.payload.dst == dst_port_ip) + and + ((ipv6 and + recv_pkt.payload.hlim == exp_ttl and + recv_pkt.payload.tc == tos) + or + (recv_pkt.payload.ttl == exp_ttl and + recv_pkt.payload.id == exp_ip_id and + recv_pkt.payload.tos == tos))): dscp_received = True print("dscp: %d, total received: %d" % (tos >> 2, cnt), file=sys.stderr) @@ -957,6 +963,7 @@ def runTest(self): dscp_to_pg_map = self.test_params.get('dscp_to_pg_map', None) pkt_dst_mac = router_mac if router_mac != '' else dst_port_mac asic_type = self.test_params.get("sonic_asic_type") + platform_asic = self.test_params['platform_asic'] print("dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id), file=sys.stderr) @@ -1025,21 +1032,26 @@ def runTest(self): print(list(map(operator.sub, pg_cntrs, pg_cntrs_base)), file=sys.stderr) for i in range(0, PG_NUM): - if i == pg: - if i == 0 or i == 4: - assert (pg_cntrs[pg] >= - pg_cntrs_base[pg] + len(dscps)) + # DNX/Chassis: + # pg = 0 => Some extra packets with unmarked TC + # pg = 4 => Extra packets for LACP/BGP packets + # pg = 7 => packets from cpu to front panel ports + if platform_asic and platform_asic == "broadcom-dnx": + if i == pg: + if i == 3: + assert (pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) + else: + assert (pg_cntrs[pg] >= pg_cntrs_base[pg] + len(dscps)) else: - assert (pg_cntrs[pg] == - pg_cntrs_base[pg] + len(dscps)) + if i in [0, 4, 7]: + assert (pg_cntrs[i] >= pg_cntrs_base[i]) + else: + assert (pg_cntrs[i] == pg_cntrs_base[i]) else: - # LACP packets are mapped to queue0 and tcp syn packets for BGP to queue4 - # So for those queues the count could be more - if i == 0 or i == 4: - assert (pg_cntrs[i] >= pg_cntrs_base[i]) + if i == pg: + assert (pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) else: assert (pg_cntrs[i] == pg_cntrs_base[i]) - # confirm that dscp pkts are received total_recv_cnt = 0 dscp_recv_cnt = 0 @@ -1500,6 +1512,7 @@ def runTest(self): self.test_params['pkts_num_trig_ingr_drp']) hwsku = self.test_params['hwsku'] platform_asic = self.test_params['platform_asic'] + src_dst_asic_diff = self.test_params['src_dst_asic_diff'] pkt_dst_mac = router_mac if router_mac != '' else dst_port_mac # get counter names to query @@ -1569,6 +1582,15 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) + # generate pkts_num_egr_mem in runtime + if 'cisco-8000' in asic_type and src_dst_asic_diff: + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) + pkts_num_egr_mem, extra_bytes_occupied = overflow_egress(self, src_port_id, pkt, + int(self.test_params['pg']), + asic_type) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) + time.sleep(2) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) try: @@ -2156,6 +2178,7 @@ def runTest(self): else: hysteresis = 0 hwsku = self.test_params['hwsku'] + src_dst_asic_diff = self.test_params['src_dst_asic_diff'] self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id, dst_port_2_id, dst_port_3_id]) # get a snapshot of counter values at recv and transmit ports @@ -2296,6 +2319,18 @@ def runTest(self): pkts_num_egr_mem = int(pkts_num_egr_mem) is_multi_asic = (self.clients['src'] != self.clients['dst']) + # generate pkts_num_egr_mem in runtime + pkts_num_egr_mem2 = pkts_num_egr_mem3 = pkts_num_egr_mem + if 'cisco-8000' in asic_type and src_dst_asic_diff: + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id, dst_port_2_id, dst_port_3_id]) + pkts_num_egr_mem, _ = overflow_egress( + self, src_port_id, pkt, int(self.test_params['pg']), asic_type) + pkts_num_egr_mem2, _ = overflow_egress( + self, src_port_id, pkt2, int(self.test_params['pg']), asic_type) + pkts_num_egr_mem3, _ = overflow_egress( + self, src_port_id, pkt3, int(self.test_params['pg']), asic_type) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id, dst_port_2_id, dst_port_3_id]) + time.sleep(2) step_id = 1 step_desc = 'disable TX for dst_port_id, dst_port_2_id, dst_port_3_id' @@ -2392,7 +2427,7 @@ def runTest(self): else: fill_egress_plus_one( self, src_port_id, - pkt2, int(self.test_params['pg']), asic_type, pkts_num_egr_mem) + pkt2, int(self.test_params['pg']), asic_type, pkts_num_egr_mem2) send_packet( self, src_port_id, pkt2, (pkts_num_leak_out + pkts_num_dismiss_pfc + @@ -2429,7 +2464,7 @@ def runTest(self): else: fill_egress_plus_one( self, src_port_id, - pkt3, int(self.test_params['pg']), asic_type, pkts_num_egr_mem) + pkt3, int(self.test_params['pg']), asic_type, pkts_num_egr_mem3) send_packet(self, src_port_id, pkt3, pkts_num_leak_out + 1) else: send_packet(self, src_port_id, pkt3, pkts_num_leak_out + 1) @@ -2632,6 +2667,7 @@ def setUp(self): self.pgs = [pg + 2 for pg in self.test_params['pgs']] self.src_port_ids = self.test_params['src_port_ids'] self.src_port_ips = self.test_params['src_port_ips'] + self.platform_asic = self.test_params['platform_asic'] print(self.src_port_ips, file=sys.stderr) sys.stderr.flush() # get counter names to query @@ -2724,6 +2760,17 @@ def setUp(self): def_vlan_mac = self.test_params.get('def_vlan_mac', None) if is_dualtor and def_vlan_mac is not None: self.dst_port_mac = def_vlan_mac + self.pkt_dst_mac = self.router_mac if self.router_mac != '' else self.dst_port_mac + # Collect destination ports that may be in a lag + if self.platform_asic and self.platform_asic == "broadcom-dnx": + dst_port_ids = [] + self.src_dst = {} + for i in range(len(self.src_port_ids)): + dst_port = get_rx_port(self, 0, self.src_port_ids[i], self.pkt_dst_mac, + self.dst_port_ip, self.src_port_ips[i]) + dst_port_ids.append(dst_port) + self.src_dst.update({self.src_port_ids[i]: dst_port}) + self.uniq_dst_ports = list(set(dst_port_ids)) def tearDown(self): sai_base_test.ThriftInterfaceDataPlane.tearDown(self) @@ -2742,16 +2789,22 @@ def show_port_counter(self, asic_type, rx_base, tx_base, banner): rx_curr, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][srcPortId]) port_cnt_tbl.add_row([' src_port{}_id{}'.format(srcPortIdx, srcPortId)] + [rx_curr[fieldIdx] for fieldIdx in port_counter_indexes]) - port_cnt_tbl.add_row(['base dst_port_id{}'.format(self.dst_port_id)] + [tx_base[fieldIdx] - for fieldIdx in port_counter_indexes]) - tx_curr, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][self.dst_port_id]) - port_cnt_tbl.add_row([' dst_port_id{}'.format(self.dst_port_id)] + [tx_curr[fieldIdx] - for fieldIdx in port_counter_indexes]) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + for dstPortIdx, dstPortId in enumerate(self.uniq_dst_ports): + port_cnt_tbl.add_row(['base dst_port{}_id{}'.format(dstPortIdx, dstPortId)] + + [tx_base[dstPortIdx][fieldIdx] for fieldIdx in port_counter_indexes]) + tx_curr, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dstPortId]) + port_cnt_tbl.add_row([' dst_port{}_id{}'.format(dstPortIdx, dstPortId)] + + [tx_curr[fieldIdx] for fieldIdx in port_counter_indexes]) + else: + port_cnt_tbl.add_row(['base dst_port_id{}'.format(self.dst_port_id)] + + [tx_base[fieldIdx] for fieldIdx in port_counter_indexes]) + tx_curr, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][self.dst_port_id]) + port_cnt_tbl.add_row([' dst_port_id{}'.format(self.dst_port_id)] + + [tx_curr[fieldIdx] for fieldIdx in port_counter_indexes]) sys.stderr.write('{}\n{}\n'.format(banner, port_cnt_tbl)) def runTest(self): - platform_asic = self.test_params['platform_asic'] - margin = self.test_params.get('margin') if not margin: margin = 0 @@ -2765,15 +2818,24 @@ def runTest(self): # queue_counters value is not of our interest here recv_counters_bases = [sai_thrift_read_port_counters(self.src_client, self.asic_type, port_list['src'][sid])[ 0] for sid in self.src_port_ids] - xmit_counters_base, _ = sai_thrift_read_port_counters( - self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id]) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + xmit_counters_bases = [sai_thrift_read_port_counters(self.dst_client, self.asic_type, + port_list['dst'][did])[0] + for did in self.uniq_dst_ports] + else: + xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, + self.asic_type, port_list['dst'][self.dst_port_id]) # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) # Pause egress of dut xmit port - self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, [self.dst_port_id]) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + # Disable all dst ports + self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, self.uniq_dst_ports) + else: + self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, [self.dst_port_id]) try: # send packets to leak out @@ -2817,14 +2879,14 @@ def runTest(self): pkt_cnt = pkts_num_trig_pfc // self.pkt_size_factor send_packet( self, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, int(pkt_cnt)) - if platform_asic != "broadcom-dnx": + if self.platform_asic != "broadcom-dnx": time.sleep(8) # wait pfc counter refresh and show the counters self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, 'To fill service pool, send {} pkt with DSCP {} PG {} from src_port{}' ' to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) - if platform_asic and platform_asic == "broadcom-dnx": + if self.platform_asic and self.platform_asic == "broadcom-dnx": time.sleep(8) # wait pfc counter refresh and show the counters for i in range(0, self.pgs_num): if self.pkts_num_trig_pfc: @@ -2833,10 +2895,11 @@ def runTest(self): pkts_num_trig_pfc = self.pkts_num_trig_pfc_shp[i] pkt_cnt = pkts_num_trig_pfc // self.pkt_size_factor - self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, - 'To fill service pool, send {} pkt with DSCP {} PG {} from src_port{}' - ' to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], - sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) + self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_bases, + 'To fill service pool, send {} pkt with DSCP {} PG {} from' + ' src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], + sidx_dscp_pg_tuples[i][2], + sidx_dscp_pg_tuples[i][0])) print("Service pool almost filled", file=sys.stderr) sys.stderr.flush() @@ -2873,18 +2936,24 @@ def runTest(self): recv_counters, _ = sai_thrift_read_port_counters( self.src_client, self.asic_type, port_list['src'][self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) - if platform_asic != "broadcom-dnx": + if self.platform_asic != "broadcom-dnx": time.sleep(8) # wait pfc counter refresh - self.show_port_counter( - self.asic_type, recv_counters_bases, xmit_counters_base, - 'To trigger PFC, send {} pkt with DSCP {} PG {} from src_port{} to dst_port' - .format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) + self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, + 'To trigger PFC, send {} pkt with DSCP {} PG {} from src_port{} to dst_port' + .format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], + sidx_dscp_pg_tuples[i][0])) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_bases, + 'To trigger PFC, send {} pkt with DSCP {} PG {} from src_port{} to dst_port' + .format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], + sidx_dscp_pg_tuples[i][0])) if pkt_cnt == 10: - sys.exit("Too many pkts needed to trigger pfc: %d" % - (pkt_cnt)) - assert (recv_counters[sidx_dscp_pg_tuples[i][2]] > - recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, self.uniq_dst_ports) + sys.exit("Too many pkts needed to trigger pfc: %d" % (pkt_cnt)) + assert(recv_counters[sidx_dscp_pg_tuples[i][2]] > + recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) print("%d packets for sid: %d, pg: %d to trigger pfc" % ( pkt_cnt, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], sidx_dscp_pg_tuples[i][2] - 2), file=sys.stderr) @@ -2925,23 +2994,13 @@ def runTest(self): send_packet( self, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, pkt_cnt) # allow enough time for the dut to sync up the counter values in counters_db - if platform_asic != "broadcom-dnx": + if self.platform_asic != "broadcom-dnx": time.sleep(8) self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, 'To fill headroom pool, send {} pkt with DSCP {} PG {} from src_port{} ' 'to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) - if platform_asic and platform_asic == "broadcom-dnx": - time.sleep(8) - for i in range(0, self.pgs_num): - pkt_cnt = self.pkts_num_hdrm_full // self.pkt_size_factor if i != self.pgs_num - 1 \ - else self.pkts_num_hdrm_partial // self.pkt_size_factor - self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, - 'To fill headroom pool, send {} pkt with DSCP {} PG {} from src_port{}' - ' to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], - sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) - recv_counters, _ = sai_thrift_read_port_counters( self.src_client, self.asic_type, port_list['src'][self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) # assert no ingress drop @@ -2970,9 +3029,18 @@ def runTest(self): print("pkts sent: %d, lower bound: %d, actual headroom pool watermark: %d, upper_bound: %d" % ( wm_pkt_num, expected_wm, hdrm_pool_wm, upper_bound_wm), file=sys.stderr) if 'innovium' not in self.asic_type: - assert (expected_wm <= hdrm_pool_wm) - assert (hdrm_pool_wm <= upper_bound_wm) - + assert(expected_wm <= hdrm_pool_wm) + assert(hdrm_pool_wm <= upper_bound_wm) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + time.sleep(8) + for i in range(0, self.pgs_num): + pkt_cnt = self.pkts_num_hdrm_full // self.pkt_size_factor if i != self.pgs_num - 1 \ + else self.pkts_num_hdrm_partial // self.pkt_size_factor + self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_bases, + 'To fill headroom pool, send {} pkt with DSCP {} PG {} from' + ' src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], + sidx_dscp_pg_tuples[i][2], + sidx_dscp_pg_tuples[i][0])) print("all but the last pg hdrms filled", file=sys.stderr) sys.stderr.flush() @@ -2985,28 +3053,35 @@ def runTest(self): # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) - self.show_port_counter( - self.asic_type, recv_counters_bases, xmit_counters_base, - 'To fill last PG and trigger ingress drop, send {} pkt with DSCP {} PG {} from src_port{} to dst_port' - .format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_bases, + 'To fill last PG and trigger ingress drop, send {} pkt with DSCP {} PG {}' + ' from src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], + sidx_dscp_pg_tuples[i][2], + sidx_dscp_pg_tuples[i][0])) + else: + self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, + 'To fill last PG and trigger ingress drop, send {} pkt with DSCP {} PG {}' + ' from src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], + sidx_dscp_pg_tuples[i][2], + sidx_dscp_pg_tuples[i][0])) recv_counters, _ = sai_thrift_read_port_counters( self.src_client, self.asic_type, port_list['src'][self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) - if platform_asic and platform_asic == "broadcom-dnx": - logging.info( - "On J2C+ don't support port level drop counters - so ignoring this step for now") + if self.platform_asic and self.platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support port level drop counters - so ignoring this step for now") else: # assert ingress drop for cntr in self.ingress_counters: assert(recv_counters[cntr] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][cntr]) # assert no egress drop at the dut xmit port - xmit_counters, _ = sai_thrift_read_port_counters( - self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id]) + if self.platform_asic != "broadcom-dnx": + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, + port_list['dst'][self.dst_port_id]) - if platform_asic and platform_asic == "broadcom-dnx": - logging.info( - "On J2C+ don't support port level drop counters - so ignoring this step for now") + if self.platform_asic and self.platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support port level drop counters - so ignoring this step for now") else: for cntr in self.egress_counters: assert (xmit_counters[cntr] == xmit_counters_base[cntr]) @@ -3030,7 +3105,10 @@ def runTest(self): sys.stderr.flush() finally: - self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) + if self.platform_asic and self.platform_asic == "broadcom-dnx": + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, self.uniq_dst_ports) + else: + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) class SharedResSizeTest(sai_base_test.ThriftInterfaceDataPlane): @@ -3584,6 +3662,7 @@ def runTest(self): sonic_version = self.test_params['sonic_version'] router_mac = self.test_params['router_mac'] dst_port_id = int(self.test_params['dst_port_id']) + dst_sys_port_ids = self.test_params.get('dst_sys_ports', None) dst_port_ip = self.test_params['dst_port_ip'] dst_port_mac = self.dataplane.get_mac(0, dst_port_id) src_port_id = int(self.test_params['src_port_id']) @@ -3640,6 +3719,15 @@ def runTest(self): self.src_client, asic_type, port_list['src'][src_port_id]) xmit_counters_base, queue_counters = sai_thrift_read_port_counters( self.dst_client, asic_type, port_list['dst'][dst_port_id]) + # for t2 chassis + if platform_asic and platform_asic == "broadcom-dnx": + if dst_port_id in dst_sys_port_ids: + for port_id, sysport in dst_sys_port_ids.items(): + if dst_port_id == port_id: + dst_sys_port_id = int(sysport) + print("actual dst_sys_port_id: %d" % (dst_sys_port_id), file=sys.stderr) + voq_list = sai_thrift_get_voq_port_id(self.src_client, dst_sys_port_id) + voq_queue_counters_base = sai_thrift_read_port_voq_counters(self.src_client, voq_list) # add slight tolerance in threshold characterization to consider # the case that cpu puts packets in the egress queue after we pause the egress # or the leak out is simply less than expected as we have occasionally observed @@ -3707,6 +3795,9 @@ def runTest(self): self.src_client, asic_type, port_list['src'][src_port_id]) xmit_counters, queue_counters = sai_thrift_read_port_counters( self.dst_client, asic_type, port_list['dst'][dst_port_id]) + # for t2 chassis + if platform_asic and platform_asic == "broadcom-dnx": + voq_queue_counters = sai_thrift_read_port_voq_counters(self.src_client, voq_list) # recv port no pfc assert (recv_counters[pg] == recv_counters_base[pg]) # recv port no ingress drop @@ -3753,6 +3844,13 @@ def runTest(self): for cntr in egress_counters: assert (xmit_counters[cntr] > xmit_counters_base[cntr]) + # voq ingress drop + if platform_asic and platform_asic == "broadcom-dnx": + voq_index = pg - 2 + print("voq_counters_base: %d, voq_counters: %d " % (voq_queue_counters_base[voq_index], + voq_queue_counters[voq_index]), file=sys.stderr) + assert (voq_queue_counters[voq_index] > ( + voq_queue_counters_base[voq_index] + pkts_num_trig_egr_drp - margin)) finally: self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) @@ -4106,6 +4204,7 @@ def runTest(self): self.src_client, asic_type, port_list['src'][src_port_id]) dst_pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark( self.dst_client, asic_type, port_list['dst'][dst_port_id]) + print("Initial watermark:{}".format(pg_shared_wm_res_base)) # send packets try: @@ -4127,8 +4226,8 @@ def runTest(self): pkts_num_leak_out + pkts_num_fill_min + margin send_packet(self, src_port_id, pkt, pg_min_pkts_num) elif 'cisco-8000' in asic_type: - assert (fill_leakout_plus_one( - self, src_port_id, dst_port_id, pkt, pg, asic_type)) + fill_leakout_plus_one( + self, src_port_id, dst_port_id, pkt, pg, asic_type, pkts_num_egr_mem) else: pg_min_pkts_num = pkts_num_leak_out + pkts_num_fill_min send_packet(self, src_port_id, pkt, pg_min_pkts_num) @@ -4231,12 +4330,14 @@ def runTest(self): ((pkts_num_leak_out + pkts_num_fill_min + expected_wm + margin) * (packet_length + internal_hdr_size))) else: - print("lower bound: %d, actual value: %d, upper bound (+%d): %d" % ( - expected_wm * cell_size, pg_shared_wm_res[pg], margin, (expected_wm + margin) * cell_size), - file=sys.stderr) - assert (pg_shared_wm_res[pg] <= ( - expected_wm + margin) * cell_size) - assert (expected_wm * cell_size <= pg_shared_wm_res[pg]) + msg = "lower bound: %d, actual value: %d, upper bound (+%d): %d" % ( + expected_wm * cell_size, + pg_shared_wm_res[pg], + margin, + (expected_wm + margin) * cell_size) + assert pg_shared_wm_res[pg] <= ( + expected_wm + margin) * cell_size, msg + assert expected_wm * cell_size <= pg_shared_wm_res[pg], msg pkts_num = pkts_inc @@ -4522,10 +4623,10 @@ def runTest(self): # Send packets to trigger PFC print("Iteration {}/{}, sending {} packets to trigger PFC".format( test_i + 1, iterations, pkts_num_trig_pfc), file=sys.stderr) - send_packet(self, src_port_id, pkt, pkts_num_trig_pfc) + send_packet(self, src_port_id, pkt, pkt_num) # Account for leakout - if 'cisco-8000' in asic_type: + if 'cisco-8000' in asic_type and not is_multi_asic: queue_counters = sai_thrift_read_queue_occupancy( self.dst_client, "dst", dst_port_id) occ_pkts = queue_counters[queue] // (packet_length + 24) @@ -5029,7 +5130,7 @@ def runTest(self): self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm = sai_thrift_read_buffer_pool_watermark( - self.src_client, buf_pool_roid) - buffer_pool_wm_base + client_to_use, buf_pool_roid) - buffer_pool_wm_base print("Init pkts num sent: %d, min: %d, actual watermark value to start: %d" % ( (pkts_num_leak_out + pkts_num_fill_min), pkts_num_fill_min, buffer_pool_wm), file=sys.stderr) if pkts_num_fill_min: @@ -5076,7 +5177,7 @@ def runTest(self): self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm = sai_thrift_read_buffer_pool_watermark( - self.src_client, buf_pool_roid) - buffer_pool_wm_base + client_to_use, buf_pool_roid) - buffer_pool_wm_base print( "lower bound (-%d): %d, actual value: %d, upper bound (+%d): %d" % ( diff --git a/tests/saitests/py3/switch.py b/tests/saitests/py3/switch.py index 7ebde5f8034..225aadf12df 100644 --- a/tests/saitests/py3/switch.py +++ b/tests/saitests/py3/switch.py @@ -68,7 +68,7 @@ SAI_INGRESS_PRIORITY_GROUP_STAT_CURR_OCCUPANCY_BYTES -from switch_sai_thrift.sai_headers import SAI_SWITCH_ATTR_SRC_MAC_ADDRESS +from switch_sai_thrift.sai_headers import SAI_SWITCH_ATTR_SRC_MAC_ADDRESS, SAI_SYSTEM_PORT_ATTR_QOS_VOQ_LIST this_dir = os.path.dirname(os.path.abspath(__file__)) @@ -822,6 +822,33 @@ def sai_thrift_read_port_counters(client, asic_type, port): return (counters_results, queue_counters_results) +def sai_thrift_get_voq_port_id(client, system_port_id): + object_id = client.sai_thrift_get_sys_port_obj_id_by_port_id(system_port_id) + voq_list = [] + port_attr_list = client.sai_thrift_get_system_port_attribute(object_id) + attr_list = port_attr_list.attr_list + for attribute in attr_list: + if attribute.id == SAI_SYSTEM_PORT_ATTR_QOS_VOQ_LIST: + for voq_id in attribute.value.objlist.object_id_list: + voq_list.append(voq_id) + return (voq_list) + + +def sai_thrift_read_port_voq_counters(client, voq_list): + cnt_ids = [] + thrift_results = [] + voq_counters_results = [] + cnt_ids.append(SAI_QUEUE_STAT_PACKETS) + counter = 0 + for voq in voq_list: + if counter <= 7: + thrift_results = client.sai_thrift_get_queue_stats( + voq, cnt_ids, len(cnt_ids)) + voq_counters_results.append(thrift_results[0]) + counter += 1 + return (voq_counters_results) + + def sai_thrift_read_port_watermarks(client, port): q_wm_ids = [] q_wm_ids.append(SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES) diff --git a/tests/scripts/sai_qualify/DUTScript/tests/mlnx-saiserver-files/usr/lib/systemd/system/saiserver.service b/tests/scripts/sai_qualify/DUTScript/tests/mlnx-saiserver-files/usr/lib/systemd/system/saiserver.service index 7d46da42c4f..dcfd2e6ee20 100644 --- a/tests/scripts/sai_qualify/DUTScript/tests/mlnx-saiserver-files/usr/lib/systemd/system/saiserver.service +++ b/tests/scripts/sai_qualify/DUTScript/tests/mlnx-saiserver-files/usr/lib/systemd/system/saiserver.service @@ -5,8 +5,8 @@ After=database.service -Requires=updategraph.service -After=updategraph.service +Requires=config-setup.service +After=config-setup.service After=interfaces-config.service BindsTo=sonic.target After=sonic.target diff --git a/tests/scripts/sai_qualify/DUTScript/tests/mlnx-syncd-files/usr/lib/systemd/system/syncd.service b/tests/scripts/sai_qualify/DUTScript/tests/mlnx-syncd-files/usr/lib/systemd/system/syncd.service index 14c16d3d2a9..12b43cfaa48 100644 --- a/tests/scripts/sai_qualify/DUTScript/tests/mlnx-syncd-files/usr/lib/systemd/system/syncd.service +++ b/tests/scripts/sai_qualify/DUTScript/tests/mlnx-syncd-files/usr/lib/systemd/system/syncd.service @@ -5,8 +5,8 @@ After=database.service After=swss.service -Requires=updategraph.service -After=updategraph.service +Requires=config-setup.service +After=config-setup.service After=interfaces-config.service BindsTo=sonic.target After=sonic.target diff --git a/tests/show_techsupport/test_auto_techsupport.py b/tests/show_techsupport/test_auto_techsupport.py index 958a9795489..ff77a39327a 100644 --- a/tests/show_techsupport/test_auto_techsupport.py +++ b/tests/show_techsupport/test_auto_techsupport.py @@ -908,7 +908,8 @@ def validate_techsupport_generation(duthost, dut_cli, is_techsupport_expected, e expected_techsupport_files = False if expected_techsupport_files: - # techsupport file creation may took some time after generate dump process already finished + # ensure that creation of tar.gz file is complete by checking if the intermediate tar + # file generated is removed assert wait_until(600, 10, 0, is_new_techsupport_file_generated, duthost, available_tech_support_files), \ 'New expected techsupport file was not generated' @@ -952,21 +953,38 @@ def validate_techsupport_generation(duthost, dut_cli, is_techsupport_expected, e def is_new_techsupport_file_generated(duthost, available_tech_support_files): """ - Check if new techsupport dump created + Check if new techsupport dump is generated and complete by verifying intermediate tar file is removed :param duthost: duthost object :param available_tech_support_files: list of already available techsupport files - :return: True in case when new techsupport file created + :return: True in case when new techsupport tar.gz file created and intermediate tar file removed from /var/dump """ - logger.info('Checking that new techsupport file created') + logger.info('Checking that new techsupport "*.tar.gz" file created and intermediate "*.tar" file is removed') new_techsupport_files_list = get_new_techsupport_files_list(duthost, available_tech_support_files) + new_techsupport_tar_files_list = get_new_techsupport_tar_files(duthost) new_techsupport_files_num = len(new_techsupport_files_list) + new_techsupport_tar_files_num = len(new_techsupport_tar_files_list) - if new_techsupport_files_num == 1: + if new_techsupport_files_num == 1 and new_techsupport_tar_files_num == 0: return True return False +def get_new_techsupport_tar_files(duthost): + """ + Get list of tar files generated during techsupport collection + :param duthost: duthost object + :return: list of new tar files generated by the techsupport + """ + try: + duthost.shell('ls -lh /var/dump/') # print into logs full folder content(for debug purpose) + new_available_tech_support_tar_files = duthost.shell('ls /var/dump/*.tar')['stdout_lines'] + except RunAnsibleModuleFail: + new_available_tech_support_tar_files = [] + + return new_available_tech_support_tar_files + + def get_new_techsupport_files_list(duthost, available_tech_support_files): """ Get list of new created techsupport files diff --git a/tests/snappi_tests/ecn/files/helper.py b/tests/snappi_tests/ecn/files/helper.py index 739725dc79d..067afdf0700 100644 --- a/tests/snappi_tests/ecn/files/helper.py +++ b/tests/snappi_tests/ecn/files/helper.py @@ -56,7 +56,7 @@ def run_ecn_test(api, logger.info("Stopping PFC watchdog") stop_pfcwd(duthost) - logger.info("Disabling packet aging") + logger.info("Disabling packet aging if necessary") disable_packet_aging(duthost) # Configure WRED/ECN thresholds diff --git a/tests/snappi_tests/multidut/ecn/__init__.py b/tests/snappi_tests/multidut/ecn/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/snappi_tests/multidut/ecn/files/__init__.py b/tests/snappi_tests/multidut/ecn/files/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/snappi_tests/multidut/ecn/files/multidut_helper.py b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py new file mode 100644 index 00000000000..a9bca730987 --- /dev/null +++ b/tests/snappi_tests/multidut/ecn/files/multidut_helper.py @@ -0,0 +1,180 @@ +import logging +from tests.common.helpers.assertions import pytest_assert +from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts # noqa: F401 +from tests.common.snappi_tests.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port, \ + snappi_api # noqa: F401 +from tests.common.snappi_tests.snappi_helpers import get_dut_port_id +from tests.common.snappi_tests.common_helpers import pfc_class_enable_vector, config_wred, \ + enable_ecn, config_ingress_lossless_buffer_alpha, stop_pfcwd, disable_packet_aging, \ + config_capture_pkt, traffic_flow_mode, calc_pfc_pause_flow_rate # noqa: F401 +from tests.common.snappi_tests.read_pcap import get_ip_pkts +from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.common.snappi_tests.traffic_generation import setup_base_traffic_config, generate_test_flows, \ + generate_pause_flows, run_traffic # noqa: F401 + +logger = logging.getLogger(__name__) + +EXP_DURATION_SEC = 1 +DATA_START_DELAY_SEC = 0.1 +SNAPPI_POLL_DELAY_SEC = 2 +PAUSE_FLOW_NAME = 'Pause Storm' +DATA_FLOW_NAME = 'Data Flow' + + +def run_ecn_test(api, + testbed_config, + port_config_list, + conn_data, + fanout_data, + dut_port, + lossless_prio, + prio_dscp_map, + iters, + snappi_extra_params=None): + """ + Run multidut ECN test + + Args: + api (obj): SNAPPI session + testbed_config (obj): testbed L1/L2/L3 configuration + port_config_list (list): list of port configuration + conn_data (dict): the dictionary returned by conn_graph_fact. + fanout_data (dict): the dictionary returned by fanout_graph_fact. + dut_port (str): DUT port to test + lossless_prio (int): lossless priority + prio_dscp_map (dict): Priority vs. DSCP map (key = priority). + + Returns: + Return captured IP packets (list of list) + """ + + if snappi_extra_params is None: + snappi_extra_params = SnappiTestParams() + + duthost1 = snappi_extra_params.multi_dut_params.duthost1 + rx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[0] + duthost2 = snappi_extra_params.multi_dut_params.duthost2 + tx_port = snappi_extra_params.multi_dut_params.multi_dut_ports[1] + iters = snappi_extra_params.test_iterations + + pytest_assert(testbed_config is not None, 'Failed to get L2/3 testbed config') + + logger.info("Stopping PFC watchdog") + stop_pfcwd(duthost1, rx_port['asic_value']) + stop_pfcwd(duthost2, tx_port['asic_value']) + logger.info("Disabling packet aging if necessary") + disable_packet_aging(duthost1) + disable_packet_aging(duthost2) + + # Configure WRED/ECN thresholds + logger.info("Configuring WRED and ECN thresholds") + config_result = config_wred(host_ans=duthost1, + kmin=snappi_extra_params.ecn_params["kmin"], + kmax=snappi_extra_params.ecn_params["kmax"], + pmax=snappi_extra_params.ecn_params["pmax"]) + pytest_assert(config_result is True, 'Failed to configure WRED/ECN at the DUT') + config_result = config_wred(host_ans=duthost2, + kmin=snappi_extra_params.ecn_params["kmin"], + kmax=snappi_extra_params.ecn_params["kmax"], + pmax=snappi_extra_params.ecn_params["pmax"]) + pytest_assert(config_result is True, 'Failed to configure WRED/ECN at the DUT') + + # Enable ECN marking + logger.info("Enabling ECN markings") + pytest_assert(enable_ecn(host_ans=duthost1, prio=lossless_prio), 'Unable to enable ecn') + pytest_assert(enable_ecn(host_ans=duthost2, prio=lossless_prio), 'Unable to enable ecn') + + config_result = config_ingress_lossless_buffer_alpha(host_ans=duthost1, + alpha_log2=3) + + pytest_assert(config_result is True, 'Failed to configure PFC threshold to 8') + config_result = config_ingress_lossless_buffer_alpha(host_ans=duthost2, + alpha_log2=3) + + pytest_assert(config_result is True, 'Failed to configure PFC threshold to 8') + + # Get the ID of the port to test + port_id = get_dut_port_id(dut_hostname=duthost1.hostname, + dut_port=dut_port, + conn_data=conn_data, + fanout_data=fanout_data) + + pytest_assert(port_id is not None, + 'Failed to get ID for port {}'.format(dut_port)) + + speed_str = testbed_config.layer1[0].speed + speed_gbps = int(speed_str.split('_')[1]) + + # Generate base traffic config + port_id = 0 + logger.info("Generating base flow config") + snappi_extra_params.base_flow_config = setup_base_traffic_config(testbed_config=testbed_config, + port_config_list=port_config_list, + port_id=port_id) + + logger.info("Setting test flow config params") + snappi_extra_params.traffic_flow_config.data_flow_config.update({ + "flow_name": DATA_FLOW_NAME, + "flow_rate_percent": 100, + "flow_delay_sec": DATA_START_DELAY_SEC, + "flow_traffic_type": traffic_flow_mode.FIXED_PACKETS + }) + + logger.info("Setting pause flow config params") + snappi_extra_params.traffic_flow_config.pause_flow_config = { + "flow_name": PAUSE_FLOW_NAME, + "flow_dur_sec": EXP_DURATION_SEC, + "flow_rate_percent": None, + "flow_rate_pps": calc_pfc_pause_flow_rate(speed_gbps), + "flow_rate_bps": None, + "flow_pkt_size": 64, + "flow_pkt_count": None, + "flow_delay_sec": 0, + "flow_traffic_type": traffic_flow_mode.FIXED_DURATION + } + + # Generate traffic config of one test flow and one pause storm + logger.info("Generating test flows") + generate_test_flows(testbed_config=testbed_config, + test_flow_prio_list=[lossless_prio], + prio_dscp_map=prio_dscp_map, + snappi_extra_params=snappi_extra_params) + + logger.info("Generating pause flows") + generate_pause_flows(testbed_config=testbed_config, + pause_prio_list=[lossless_prio], + global_pause=False, + snappi_extra_params=snappi_extra_params) + + flows = testbed_config.flows + + all_flow_names = [flow.name for flow in flows] + data_flow_names = [flow.name for flow in flows if PAUSE_FLOW_NAME not in flow.name] + + logger.info("Setting packet capture port to {}".format(testbed_config.ports[port_id].name)) + snappi_extra_params.packet_capture_ports = [testbed_config.ports[port_id].name] + + result = [] + logger.info("Running {} iteration(s)".format(iters)) + for i in range(iters): + logger.info("Running iteration {}".format(i)) + snappi_extra_params.packet_capture_file = "ECN_cap-{}".format(i) + logger.info("Packet capture file: {}.pcapng".format(snappi_extra_params.packet_capture_file)) + + config_capture_pkt(testbed_config=testbed_config, + port_names=snappi_extra_params.packet_capture_ports, + capture_type=snappi_extra_params.packet_capture_type, + capture_name=snappi_extra_params.packet_capture_file) + + logger.info("Running traffic") + run_traffic(duthost=duthost1, + api=api, + config=testbed_config, + data_flow_names=data_flow_names, + all_flow_names=all_flow_names, + exp_dur_sec=EXP_DURATION_SEC, + snappi_extra_params=snappi_extra_params) + + result.append(get_ip_pkts(snappi_extra_params.packet_capture_file + ".pcapng")) + + return result diff --git a/tests/snappi_tests/multidut/ecn/test_multidut_dequeue_ecn_with_snappi.py b/tests/snappi_tests/multidut/ecn/test_multidut_dequeue_ecn_with_snappi.py new file mode 100644 index 00000000000..60aab367ed5 --- /dev/null +++ b/tests/snappi_tests/multidut/ecn/test_multidut_dequeue_ecn_with_snappi.py @@ -0,0 +1,124 @@ +import pytest +import random +import logging + +from tests.common.helpers.assertions import pytest_assert, pytest_require +from tests.common.fixtures.conn_graph_facts import conn_graph_facts, \ + fanout_graph_facts # noqa: F401 +from tests.common.snappi_tests.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port, \ + snappi_api, snappi_dut_base_config, get_tgen_peer_ports, get_multidut_snappi_ports, \ + get_multidut_tgen_peer_port_set, cleanup_config # noqa: F401 +from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, lossless_prio_list # noqa F401 + +from tests.snappi_tests.variables import config_set, line_card_choice +from tests.snappi_tests.multidut.ecn.files.multidut_helper import run_ecn_test +from tests.common.snappi_tests.read_pcap import is_ecn_marked +from tests.snappi_tests.files.helper import skip_ecn_tests +from tests.common.snappi_tests.common_helpers import packet_capture +from tests.common.config_reload import config_reload +from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +logger = logging.getLogger(__name__) +pytestmark = [pytest.mark.topology('multidut-tgen')] + + +@pytest.mark.parametrize('line_card_choice', [line_card_choice]) +@pytest.mark.parametrize('linecard_configuration_set', [config_set]) +def test_dequeue_ecn(request, + snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts, # noqa: F811 + duthosts, + rand_one_dut_lossless_prio, + line_card_choice, + linecard_configuration_set, + get_multidut_snappi_ports, # noqa: F811 + prio_dscp_map): # noqa: F811 + """ + Test if the device under test (DUT) performs ECN marking at the egress + + Args: + request (pytest fixture): pytest request object + snappi_api (pytest fixture): SNAPPI session + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + rand_one_dut_lossless_prio (str): name of lossless priority to test, e.g., 's6100-1|3' + line_card_choice: Line card choice to be mentioned in the variable.py file + linecard_configuration_set : Line card classification, (min 1 or max 2 hostnames and asics to be given) + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + + Returns: + N/A + """ + + if line_card_choice not in linecard_configuration_set.keys(): + pytest_require(False, "Invalid line_card_choice value passed in parameter") + + if (len(linecard_configuration_set[line_card_choice]['hostname']) == 2): + dut_list = random.sample(duthosts, 2) + duthost1, duthost2 = dut_list + elif (len(linecard_configuration_set[line_card_choice]['hostname']) == 1): + dut_list = [dut for dut in duthosts if + linecard_configuration_set[line_card_choice]['hostname'] == [dut.hostname]] + duthost1, duthost2 = dut_list[0], dut_list[0] + else: + pytest_require(False, "Hostname can't be an empty list") + + snappi_port_list = get_multidut_snappi_ports(line_card_choice=line_card_choice, + line_card_info=linecard_configuration_set[line_card_choice]) + if len(snappi_port_list) < 2: + pytest_require(False, "Need Minimum of 2 ports for the test") + + snappi_ports = get_multidut_tgen_peer_port_set(line_card_choice, snappi_port_list, config_set, 2) + + testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(dut_list, + snappi_ports, + snappi_api) + + _, lossless_prio = rand_one_dut_lossless_prio.split('|') + skip_ecn_tests(duthost1) + skip_ecn_tests(duthost2) + lossless_prio = int(lossless_prio) + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.duthost1 = duthost1 + snappi_extra_params.multi_dut_params.duthost2 = duthost2 + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + snappi_extra_params.packet_capture_type = packet_capture.IP_CAPTURE + snappi_extra_params.is_snappi_ingress_port_cap = True + snappi_extra_params.ecn_params = {'kmin': 50000, 'kmax': 51000, 'pmax': 100} + data_flow_pkt_size = 1024 + data_flow_pkt_count = 101 + logger.info("Running ECN dequeue test with params: {}".format(snappi_extra_params.ecn_params)) + + snappi_extra_params.traffic_flow_config.data_flow_config = { + "flow_pkt_size": data_flow_pkt_size, + "flow_pkt_count": data_flow_pkt_count + } + + ip_pkts = run_ecn_test(api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + dut_port=snappi_ports[0]['peer_port'], + lossless_prio=lossless_prio, + prio_dscp_map=prio_dscp_map, + iters=1, + snappi_extra_params=snappi_extra_params)[0] + + logger.info("Running verification for ECN dequeue test") + # Check if all the packets are captured + pytest_assert(len(ip_pkts) == data_flow_pkt_count, + 'Only capture {}/{} IP packets'.format(len(ip_pkts), data_flow_pkt_count)) + + # Check if the first packet is ECN marked + pytest_assert(is_ecn_marked(ip_pkts[0]), "The first packet should be marked") + + # Check if the last packet is not ECN marked + pytest_assert(not is_ecn_marked(ip_pkts[-1]), + "The last packet should not be marked") + + # Teardown ECN config through a reload + logger.info("Reloading config to teardown ECN config") + config_reload(sonic_host=duthost1, config_source='config_db', safe_reload=True) + config_reload(sonic_host=duthost2, config_source='config_db', safe_reload=True) diff --git a/tests/snappi_tests/multidut/ecn/test_multidut_red_accuracy_with_snappi.py b/tests/snappi_tests/multidut/ecn/test_multidut_red_accuracy_with_snappi.py new file mode 100644 index 00000000000..e813e8dfbbc --- /dev/null +++ b/tests/snappi_tests/multidut/ecn/test_multidut_red_accuracy_with_snappi.py @@ -0,0 +1,152 @@ +import pytest +import collections +import random +import logging +from tabulate import tabulate # noqa F401 +from tests.common.helpers.assertions import pytest_assert, pytest_require +from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts # noqa: F401 +from tests.common.snappi_tests.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port, \ + snappi_api, snappi_dut_base_config, get_tgen_peer_ports, get_multidut_snappi_ports, \ + get_multidut_tgen_peer_port_set # noqa: F401 +from tests.common.snappi_tests.qos_fixtures import prio_dscp_map, \ + lossless_prio_list # noqa F401 +from tests.snappi_tests.variables import config_set, line_card_choice +from tests.snappi_tests.files.helper import skip_ecn_tests +from tests.common.snappi_tests.read_pcap import is_ecn_marked +from tests.snappi_tests.multidut.ecn.files.multidut_helper import run_ecn_test +from tests.common.snappi_tests.common_helpers import packet_capture # noqa F401 +from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +from tests.common.config_reload import config_reload +logger = logging.getLogger(__name__) +pytestmark = [pytest.mark.topology('multidut-tgen')] + + +@pytest.mark.parametrize('line_card_choice', [line_card_choice]) +@pytest.mark.parametrize('linecard_configuration_set', [config_set]) +def test_red_accuracy(request, + snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts, # noqa: F811 + duthosts, + rand_one_dut_lossless_prio, + line_card_choice, + linecard_configuration_set, + get_multidut_snappi_ports, # noqa: F811 + prio_dscp_map): # noqa: F811 + """ + Measure RED/ECN marking accuracy of the device under test (DUT). + Dump queue length vs. ECN marking probability results into a file. + + Args: + request (pytest fixture): pytest request object + snappi_api (pytest fixture): SNAPPI session + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + rand_one_dut_lossless_prio (str): name of lossless priority to test, e.g., 's6100-1|3' + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + line_card_choice: Line card choice to be mentioned in the variable.py file + linecard_configuration_set : Line card classification, (min 1 or max 2 hostnames and asics to be given) + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + + Returns: + N/A + """ + # disable_test = request.config.getoption("--disable_ecn_snappi_test") + # if disable_test: + # pytest.skip("test_red_accuracy is disabled") + + if line_card_choice not in linecard_configuration_set.keys(): + pytest_require(False, "Invalid line_card_choice value passed in parameter") + + if (len(linecard_configuration_set[line_card_choice]['hostname']) == 2): + dut_list = random.sample(duthosts, 2) + duthost1, duthost2 = dut_list + elif (len(linecard_configuration_set[line_card_choice]['hostname']) == 1): + dut_list = [dut for dut in duthosts if linecard_configuration_set[line_card_choice]['hostname'] == [dut.hostname]] # noqa: E501 + duthost1, duthost2 = dut_list[0], dut_list[0] + else: + pytest_require(False, "Hostname can't be an empty list") + + snappi_port_list = get_multidut_snappi_ports(line_card_choice=line_card_choice, + line_card_info=linecard_configuration_set[line_card_choice]) + if len(snappi_port_list) < 2: + pytest_require(False, "Need Minimum of 2 ports for the test") + + snappi_ports = get_multidut_tgen_peer_port_set(line_card_choice, snappi_port_list, config_set, 2) + + testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(dut_list, + snappi_ports, + snappi_api) + + _, lossless_prio = rand_one_dut_lossless_prio.split('|') + skip_ecn_tests(duthost1) or skip_ecn_tests(duthost2) + lossless_prio = int(lossless_prio) + + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.duthost1 = duthost1 + snappi_extra_params.multi_dut_params.duthost2 = duthost2 + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + + snappi_extra_params.packet_capture_type = packet_capture.IP_CAPTURE + snappi_extra_params.is_snappi_ingress_port_cap = True + snappi_extra_params.ecn_params = {'kmin': 500000, 'kmax': 2000000, 'pmax': 5} + data_flow_pkt_size = 1024 + data_flow_pkt_count = 2100 + num_iterations = 1 + + logger.info("Running ECN red accuracy test with ECN params: {}".format(snappi_extra_params.ecn_params)) + logger.info("Running ECN red accuracy test for {} iterations".format(num_iterations)) + + snappi_extra_params.traffic_flow_config.data_flow_config = { + "flow_pkt_size": data_flow_pkt_size, + "flow_pkt_count": data_flow_pkt_count + } + + ip_pkts_list = run_ecn_test(api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + dut_port=snappi_ports[0]['peer_port'], + lossless_prio=lossless_prio, + prio_dscp_map=prio_dscp_map, + iters=num_iterations, + snappi_extra_params=snappi_extra_params) + + # Check if we capture packets of all the rounds + pytest_assert(len(ip_pkts_list) == num_iterations, + 'Only capture {}/{} rounds of packets'.format(len(ip_pkts_list), num_iterations)) + + logger.info("Instantiating a queue length vs. ECN marking probability dictionary") + queue_mark_cnt = {} + for i in range(data_flow_pkt_count): + queue_len = (data_flow_pkt_count - i) * data_flow_pkt_size + queue_mark_cnt[queue_len] = 0 + + logger.info("Check that all packets are captured for each iteration") + for i in range(num_iterations): + ip_pkts = ip_pkts_list[i] + # Check if we capture all the packets in each round + pytest_assert(len(ip_pkts) == data_flow_pkt_count, + 'Only capture {}/{} packets in round {}'.format(len(ip_pkts), data_flow_pkt_count, i)) + + for j in range(data_flow_pkt_count): + ip_pkt = ip_pkts[j] + queue_len = (data_flow_pkt_count - j) * data_flow_pkt_size + + if is_ecn_marked(ip_pkt): + queue_mark_cnt[queue_len] += 1 + + # Dump queue length vs. ECN marking probability into logger file + logger.info("------- Dumping queue length vs. ECN marking probability data ------") + output_table = [] + queue_mark_cnt = collections.OrderedDict(sorted(queue_mark_cnt.items())) + for queue, mark_cnt in list(queue_mark_cnt.items()): + output_table.append([queue, float(mark_cnt)/num_iterations]) + logger.info(tabulate(output_table, headers=['Queue Length', 'ECN Marking Probability'])) + + # Teardown ECN config through a reload + logger.info("Reloading config to teardown ECN config") + config_reload(sonic_host=duthost1, config_source='config_db', safe_reload=True) + config_reload(sonic_host=duthost2, config_source='config_db', safe_reload=True) diff --git a/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py new file mode 100644 index 00000000000..25eba58eb7e --- /dev/null +++ b/tests/snappi_tests/multidut/pfc/test_multidut_pfc_pause_lossy_with_snappi.py @@ -0,0 +1,353 @@ +import pytest +from tests.common.helpers.assertions import pytest_require, pytest_assert # noqa: F401 +from tests.common.fixtures.conn_graph_facts import conn_graph_facts, \ + fanout_graph_facts # noqa: F401 +from tests.common.snappi_tests.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port, \ + snappi_api, snappi_dut_base_config, get_tgen_peer_ports, get_multidut_snappi_ports, \ + get_multidut_tgen_peer_port_set, cleanup_config # noqa: F401 +from tests.common.snappi_tests.qos_fixtures import lossy_prio_list, prio_dscp_map, \ + lossless_prio_list # noqa: F401 +from tests.snappi_tests.variables import config_set, line_card_choice +from tests.snappi_tests.multidut.pfc.files.multidut_helper import run_pfc_test +from tests.common.reboot import reboot +from tests.common.utilities import wait_until +import logging +import random +from tests.common.snappi_tests.snappi_test_params import SnappiTestParams +logger = logging.getLogger(__name__) + +pytestmark = [pytest.mark.topology('multidut-tgen')] + + +@pytest.mark.parametrize('line_card_choice', [line_card_choice]) +@pytest.mark.parametrize('linecard_configuration_set', [config_set]) +def test_pfc_pause_single_lossy_prio(snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts, # noqa: F811 + duthosts, + line_card_choice, + linecard_configuration_set, + get_multidut_snappi_ports): # noqa: F811 + """ + Test if PFC will impact a single lossy priority + + Args: + snappi_api (pytest fixture): SNAPPI session + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + line_card_choice: Line card choice to be mentioned in the variable.py file + linecard_configuration_set : Line card classification, (min 1 or max 2 hostnames and asics to be given) + + Returns: + N/A + """ + if line_card_choice not in linecard_configuration_set.keys(): + pytest_require(False, "Invalid line_card_choice value passed in parameter") + + if (len(linecard_configuration_set[line_card_choice]['hostname']) >= 2): + dut_list = random.sample(duthosts, 2) + duthost1, duthost2 = dut_list + elif (len(linecard_configuration_set[line_card_choice]['hostname']) == 1): + dut_list = [dut for dut in duthosts + if linecard_configuration_set[line_card_choice]['hostname'] == [dut.hostname]] + duthost1, duthost2 = dut_list[0], dut_list[0] + else: + pytest_require(False, "Hostname can't be an empty list") + + snappi_port_list = get_multidut_snappi_ports(line_card_choice=line_card_choice, + line_card_info=linecard_configuration_set[line_card_choice]) + if len(snappi_port_list) < 2: + pytest_require(False, "Need Minimum of 2 ports for the test") + + snappi_ports = get_multidut_tgen_peer_port_set(line_card_choice, snappi_port_list, config_set, 2) + tgen_ports = [port['location'] for port in snappi_ports] + testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(dut_list, + tgen_ports, + snappi_ports, + snappi_api) + all_prio_list = prio_dscp_map.keys() + lossy_prio_list = [x for x in all_prio_list if x not in lossless_prio_list] # noqa: F811 + lossy_prio = int(random.sample(lossy_prio_list, 1)[0]) + pause_prio_list = [lossy_prio] + test_prio_list = pause_prio_list + bg_prio_list = [p for p in all_prio_list] + bg_prio_list.remove(lossy_prio) + + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.duthost1 = duthost1 + snappi_extra_params.multi_dut_params.duthost2 = duthost2 + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + + run_pfc_test(api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + global_pause=False, + pause_prio_list=pause_prio_list, + test_prio_list=test_prio_list, + bg_prio_list=bg_prio_list, + prio_dscp_map=prio_dscp_map, + test_traffic_pause=False, + snappi_extra_params=snappi_extra_params) + + cleanup_config(dut_list, snappi_ports) + + +@pytest.mark.parametrize('line_card_choice', [line_card_choice]) +@pytest.mark.parametrize('linecard_configuration_set', [config_set]) +def test_pfc_pause_multi_lossy_prio(snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts, # noqa: F811 + duthosts, + line_card_choice, + linecard_configuration_set, + get_multidut_snappi_ports # noqa: F811 + ): + """ + Test if PFC will impact multiple lossy priorities + + Args: + snappi_api (pytest fixture): SNAPPI session + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + line_card_choice: Line card choice to be mentioned in the variable.py file + linecard_configuration_set : Line card classification, (min 1 or max 2 hostnames and asics to be given) + + Returns: + N/A + """ + if line_card_choice not in linecard_configuration_set.keys(): + pytest_require(False, "Invalid line_card_choice value passed in parameter") + + if (len(linecard_configuration_set[line_card_choice]['hostname']) >= 2): + dut_list = random.sample(duthosts, 2) + duthost1, duthost2 = dut_list + elif (len(linecard_configuration_set[line_card_choice]['hostname']) == 1): + dut_list = [dut for dut in duthosts + if linecard_configuration_set[line_card_choice]['hostname'] == [dut.hostname]] + duthost1, duthost2 = dut_list[0], dut_list[0] + else: + pytest_require(False, "Hostname can't be an empty list") + + snappi_port_list = get_multidut_snappi_ports(line_card_choice=line_card_choice, + line_card_info=linecard_configuration_set[line_card_choice]) + if len(snappi_port_list) < 2: + pytest_require(False, "Need Minimum of 2 ports for the test") + + snappi_ports = get_multidut_tgen_peer_port_set(line_card_choice, snappi_port_list, config_set, 2) + tgen_ports = [port['location'] for port in snappi_ports] + testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(dut_list, + tgen_ports, + snappi_ports, + snappi_api) + + all_prio_list = prio_dscp_map.keys() + lossy_prio_list = [x for x in all_prio_list if x not in lossless_prio_list] # noqa: F811 + pause_prio_list = lossy_prio_list + test_prio_list = lossy_prio_list + bg_prio_list = lossless_prio_list + + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.duthost1 = duthost1 + snappi_extra_params.multi_dut_params.duthost2 = duthost2 + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + + run_pfc_test(api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + global_pause=False, + pause_prio_list=pause_prio_list, + test_prio_list=test_prio_list, + bg_prio_list=bg_prio_list, + prio_dscp_map=prio_dscp_map, + test_traffic_pause=False, + snappi_extra_params=snappi_extra_params) + + cleanup_config(dut_list, snappi_ports) + + +@pytest.mark.disable_loganalyzer +@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) +@pytest.mark.parametrize('line_card_choice', [line_card_choice]) +@pytest.mark.parametrize('linecard_configuration_set', [config_set]) +def test_pfc_pause_single_lossy_prio_reboot(snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts, # noqa: F811 + duthosts, + localhost, + line_card_choice, + linecard_configuration_set, + get_multidut_snappi_ports, # noqa: F811 + reboot_type): + """ + Test if PFC will impact a single lossy priority after various kinds of reboots + + Args: + snappi_api (pytest fixture): SNAPPI session + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + duthosts (pytest fixture): list of DUTs + localhost (pytest fixture): localhost handle + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + reboot_type (str): reboot type to be issued on the DUT + line_card_choice: Line card choice to be mentioned in the variable.py file + linecard_configuration_set : Line card classification, (min 1 or max 2 hostnames and asics to be given) + + Returns: + N/A + """ + if line_card_choice not in linecard_configuration_set.keys(): + pytest_require(False, "Invalid line_card_choice value passed in parameter") + + if (len(linecard_configuration_set[line_card_choice]['hostname']) >= 2): + dut_list = random.sample(duthosts, 2) + duthost1, duthost2 = dut_list + elif (len(linecard_configuration_set[line_card_choice]['hostname']) == 1): + dut_list = [dut for dut in duthosts + if linecard_configuration_set[line_card_choice]['hostname'] == [dut.hostname]] + duthost1, duthost2 = dut_list[0], dut_list[0] + else: + pytest_require(False, "Hostname can't be an empty list") + + snappi_port_list = get_multidut_snappi_ports(line_card_choice=line_card_choice, + line_card_info=linecard_configuration_set[line_card_choice]) + if len(snappi_port_list) < 2: + pytest_require(False, "Need Minimum of 2 ports for the test") + + snappi_ports = get_multidut_tgen_peer_port_set(line_card_choice, snappi_port_list, config_set, 2) + tgen_ports = [port['location'] for port in snappi_ports] + testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(dut_list, + tgen_ports, + snappi_ports, + snappi_api) + + all_prio_list = prio_dscp_map.keys() + lossy_prio_list = [x for x in all_prio_list if x not in lossless_prio_list] # noqa: F811 + lossy_prio = int(random.sample(lossy_prio_list, 1)[0]) + pause_prio_list = [lossy_prio] + test_prio_list = pause_prio_list + bg_prio_list = [p for p in all_prio_list] + bg_prio_list.remove(lossy_prio) + + logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost1.hostname)) + reboot(duthost1, localhost, reboot_type=reboot_type) + logger.info("Wait until the system is stable") + pytest_assert(wait_until(300, 20, 0, duthost1.critical_services_fully_started), + "Not all critical services are fully started") + + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.duthost1 = duthost1 + snappi_extra_params.multi_dut_params.duthost2 = duthost2 + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + + run_pfc_test(api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + global_pause=False, + pause_prio_list=pause_prio_list, + test_prio_list=test_prio_list, + bg_prio_list=bg_prio_list, + prio_dscp_map=prio_dscp_map, + test_traffic_pause=False, + snappi_extra_params=snappi_extra_params) + + cleanup_config(dut_list, snappi_ports) + + +@pytest.mark.disable_loganalyzer +@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast']) +@pytest.mark.parametrize('line_card_choice', [line_card_choice]) +@pytest.mark.parametrize('linecard_configuration_set', [config_set]) +def test_pfc_pause_multi_lossy_prio_reboot(snappi_api, # noqa: F811 + conn_graph_facts, # noqa: F811 + fanout_graph_facts, # noqa: F811 + duthosts, + localhost, + line_card_choice, + linecard_configuration_set, + get_multidut_snappi_ports, # noqa: F811 + reboot_type): + """ + Test if PFC will impact multiple lossy priorities after various kinds of reboots + + Args: + snappi_api (pytest fixture): SNAPPI session + snappi_testbed_config (pytest fixture): testbed configuration information + conn_graph_facts (pytest fixture): connection graph + fanout_graph_facts (pytest fixture): fanout graph + localhost (pytest fixture): localhost handle + duthosts (pytest fixture): list of DUTs + prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority). + reboot_type (str): reboot type to be issued on the DUT + line_card_choice: Line card choice to be mentioned in the variable.py file + linecard_configuration_set : Line card classification, (min 1 or max 2 hostnames and asics to be given) + + Returns: + N/A + """ + + if line_card_choice not in linecard_configuration_set.keys(): + pytest_require(False, "Invalid line_card_choice value passed in parameter") + + if (len(linecard_configuration_set[line_card_choice]['hostname']) >= 2): + dut_list = random.sample(duthosts, 2) + duthost1, duthost2 = dut_list + elif (len(linecard_configuration_set[line_card_choice]['hostname']) == 1): + dut_list = [dut for dut in duthosts + if linecard_configuration_set[line_card_choice]['hostname'] == [dut.hostname]] + duthost1, duthost2 = dut_list[0], dut_list[0] + else: + pytest_require(False, "Hostname can't be an empty list") + + snappi_port_list = get_multidut_snappi_ports(line_card_choice=line_card_choice, + line_card_info=linecard_configuration_set[line_card_choice]) + if len(snappi_port_list) < 2: + pytest_require(False, "Need Minimum of 2 ports for the test") + + snappi_ports = get_multidut_tgen_peer_port_set(line_card_choice, snappi_port_list, config_set, 2) + tgen_ports = [port['location'] for port in snappi_ports] + testbed_config, port_config_list, snappi_ports = snappi_dut_base_config(dut_list, + tgen_ports, + snappi_ports, + snappi_api) + + all_prio_list = prio_dscp_map.keys() + lossy_prio_list = [x for x in all_prio_list if x not in lossless_prio_list] # noqa: F811 + pause_prio_list = lossy_prio_list + test_prio_list = lossy_prio_list + bg_prio_list = lossless_prio_list + + logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost1.hostname)) + reboot(duthost1, localhost, reboot_type=reboot_type) + logger.info("Wait until the system is stable") + pytest_assert(wait_until(300, 20, 0, duthost1.critical_services_fully_started), + "Not all critical services are fully started") + + snappi_extra_params = SnappiTestParams() + snappi_extra_params.multi_dut_params.duthost1 = duthost1 + snappi_extra_params.multi_dut_params.duthost2 = duthost2 + snappi_extra_params.multi_dut_params.multi_dut_ports = snappi_ports + + run_pfc_test(api=snappi_api, + testbed_config=testbed_config, + port_config_list=port_config_list, + conn_data=conn_graph_facts, + fanout_data=fanout_graph_facts, + global_pause=False, + pause_prio_list=pause_prio_list, + test_prio_list=test_prio_list, + bg_prio_list=bg_prio_list, + prio_dscp_map=prio_dscp_map, + test_traffic_pause=False, + snappi_extra_params=snappi_extra_params) + + cleanup_config(dut_list, snappi_ports) diff --git a/tests/snmp/test_snmp_cpu.py b/tests/snmp/test_snmp_cpu.py index 734ebb4e15d..e03c34d57ec 100644 --- a/tests/snmp/test_snmp_cpu.py +++ b/tests/snmp/test_snmp_cpu.py @@ -75,7 +75,7 @@ def test_snmp_cpu(duthosts, enum_rand_one_per_hwsku_hostname, localhost, creds_a pytest.fail("cpu diff large than 5%%, %d, %d" % ( int(snmp_facts['ansible_ChStackUnitCpuUtil5sec']), int(output['stdout']))) - duthost.shell("killall yes") except Exception: - duthost.shell("killall yes") raise + finally: + duthost.shell("killall yes") diff --git a/tests/snmp/test_snmp_fdb.py b/tests/snmp/test_snmp_fdb.py index ff8dcb7c326..dc9011e2e03 100644 --- a/tests/snmp/test_snmp_fdb.py +++ b/tests/snmp/test_snmp_fdb.py @@ -104,7 +104,7 @@ def test_snmp_fdb_send_tagged(ptfadapter, duthosts, rand_one_dut_hostname, # Flush dataplane ptfadapter.dataplane.flush() - time.sleep(10) + time.sleep(20) hostip = duthost.host.options['inventory_manager'].get_host( duthost.hostname).vars['ansible_host'] snmp_facts = get_snmp_facts( diff --git a/tests/snmp/test_snmp_memory.py b/tests/snmp/test_snmp_memory.py index d94dcb8fb89..5e7397d8b93 100644 --- a/tests/snmp/test_snmp_memory.py +++ b/tests/snmp/test_snmp_memory.py @@ -142,6 +142,9 @@ def test_snmp_memory_load(duthosts, enum_rand_one_per_hwsku_hostname, localhost, mem_free = int(outputs['results'][1]['stdout']) mem_total = int(outputs['results'][2]['stdout']) percentage = get_percentage_threshold(int(mem_total)) + # if total mem less than 2G + if mem_total <= 2 * 1024 * 1024: + pytest.skip("Total memory is too small for percentage.") logger.info("SNMP Free Memory: {}".format(snmp_free_memory)) logger.info("DUT Free Memory: {}".format(mem_free)) logger.info("Difference: {}".format( diff --git a/tests/stress/conftest.py b/tests/stress/conftest.py index bb230aca25a..b392113c8bf 100644 --- a/tests/stress/conftest.py +++ b/tests/stress/conftest.py @@ -3,6 +3,7 @@ import time from tests.common.utilities import wait_until from utils import get_crm_resources, check_queue_status, sleep_to_wait +from tests.common import config_reload CRM_POLLING_INTERVAL = 1 CRM_DEFAULT_POLL_INTERVAL = 300 @@ -54,3 +55,21 @@ def withdraw_and_announce_existing_routes(duthost, localhost, tbinfo): sleep_to_wait(CRM_POLLING_INTERVAL * 5) logger.info("ipv4 route used {}".format(get_crm_resources(duthost, "ipv4_route", "used"))) logger.info("ipv6 route used {}".format(get_crm_resources(duthost, "ipv6_route", "used"))) + + +@pytest.fixture(scope="module", autouse=True) +def check_system_memmory(duthost): + for index in range(1, 4): + cmd = 'echo {} > /proc/sys/vm/drop_caches'.format(index) + duthost.shell(cmd, module_ignore_errors=True) + + cmd = "show system-memory" + cmd_response = duthost.shell(cmd, module_ignore_errors=True) + logger.debug("CMD {}: before test {}".format(cmd, cmd_response.get('stdout', None))) + + yield + cmd = "show system-memory" + cmd_response = duthost.shell(cmd, module_ignore_errors=True) + logger.debug("CMD {}: after test {}".format(cmd, cmd_response.get('stdout', None))) + + config_reload(duthost, safe_reload=True, check_intf_up_ports=True) diff --git a/tests/stress/test_stress_routes.py b/tests/stress/test_stress_routes.py index d7d536629ce..49c3be0c7d0 100644 --- a/tests/stress/test_stress_routes.py +++ b/tests/stress/test_stress_routes.py @@ -45,8 +45,15 @@ def test_announce_withdraw_route(duthost, localhost, tbinfo, get_function_conple ignoreRegex = [ ".*ERR route_check.py:.*", ".*ERR.* \'routeCheck\' status failed.*", - ".*Process \'orchagent\' is stuck in namespace \'host\'.*" + ".*Process \'orchagent\' is stuck in namespace \'host\'.*", + ".*ERR rsyslogd: .*" ] + + hwsku = duthost.facts['hwsku'] + if hwsku in ['Arista-7050-QX-32S', 'Arista-7050QX32S-Q32', 'Arista-7050-QX32', 'Arista-7050QX-32S-S4Q31']: + ignoreRegex.append(".*ERR memory_threshold_check:.*") + ignoreRegex.append(".*ERR monit.*memory_check.*") + ignoreRegex.append(".*ERR monit.*mem usage of.*matches resource limit.*") loganalyzer[duthost.hostname].ignore_regex.extend(ignoreRegex) normalized_level = get_function_conpleteness_level diff --git a/tests/syslog/syslog_utils.py b/tests/syslog/syslog_utils.py index 44ffa4be605..d173c591ecf 100644 --- a/tests/syslog/syslog_utils.py +++ b/tests/syslog/syslog_utils.py @@ -2,6 +2,8 @@ Helpful utilities for writing tests for the syslog feature. """ import logging +import random +import string import time import os @@ -110,7 +112,7 @@ def replace_ip_neigh(dut, neighbour, neigh_mac_addr, dev): dev=dev)) -def capture_syslog_packets(dut, tcpdump_cmd): +def capture_syslog_packets(dut, tcpdump_cmd, logger_flags='', logger_msg=''): """ Capture syslog packets @@ -130,8 +132,10 @@ def capture_syslog_packets(dut, tcpdump_cmd): logging.debug("Generating log message from DUT") # Generate syslog msgs from the DUT logger_info_msg_count = 20 + default_priority = '--priority INFO' + random_msg = ''.join(random.choice(string.ascii_letters) for _ in range(logger_info_msg_count)) for i in range(logger_info_msg_count): - dut.shell("logger --priority INFO ....{}".format("i")) + dut.shell("logger {flags} ....{msg}".format(flags=default_priority+''+logger_flags, msg=random_msg+logger_msg)) time.sleep(0.2) # wait for stoping tcpdump diff --git a/tests/syslog/test_syslog.py b/tests/syslog/test_syslog.py index 3465f5775d2..7189c468226 100644 --- a/tests/syslog/test_syslog.py +++ b/tests/syslog/test_syslog.py @@ -97,10 +97,7 @@ def check_default_route(rand_selected_dut): yield ret -@pytest.mark.parametrize("dummy_syslog_server_ip_a, dummy_syslog_server_ip_b", - [("7.0.80.166", None), ("fd82:b34f:cc99::100", None), ("7.0.80.165", "7.0.80.166"), - ("fd82:b34f:cc99::100", "7.0.80.166"), ("fd82:b34f:cc99::100", "fd82:b34f:cc99::200")]) -def test_syslog(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, check_default_route): +def run_syslog(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, check_default_route): duthost = rand_selected_dut logger.info("Starting syslog tests") test_message = "Basic Test Message" @@ -188,3 +185,13 @@ def test_syslog(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server logger.debug("DUT's syslog server IPs:\n%s" % syslog_config) pytest.fail("Dummy syslog server IP not seen in the pcap file") + + +@pytest.mark.parametrize("dummy_syslog_server_ip_a, dummy_syslog_server_ip_b", + [("7.0.80.166", None), + ("fd82:b34f:cc99::100", None), + ("7.0.80.165", "7.0.80.166"), + ("fd82:b34f:cc99::100", "7.0.80.166"), + ("fd82:b34f:cc99::100", "fd82:b34f:cc99::200")]) +def test_syslog(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, check_default_route): + run_syslog(rand_selected_dut, dummy_syslog_server_ip_a, dummy_syslog_server_ip_b, check_default_route) diff --git a/tests/syslog/test_syslog_source_ip.py b/tests/syslog/test_syslog_source_ip.py index 71d6193fba7..869a127f638 100644 --- a/tests/syslog/test_syslog_source_ip.py +++ b/tests/syslog/test_syslog_source_ip.py @@ -351,11 +351,13 @@ def check_syslog_config_nonexist(self, port, vrf_list, is_set_source, is_set_vrf "Syslog config: server_ip {}, source_ip {}, vrf {}, port {} still exist".format( v["syslog_server_ip"], source_ip, vrf, port)) - def check_syslog_msg_is_sent(self, routed_interfaces, mgmt_interface, port, vrf_list, is_set_source): + def check_syslog_msg_is_sent(self, routed_interfaces, mgmt_interface, port, vrf_list, is_set_source, + logger_flags='', logger_msg=''): thread_pool = [] for vrf in vrf_list: def check_syslog_one_vrf(routed_interfaces, port, vrf): - tcpdump_file = self.gen_tcpdump_cmd_and_capture_syslog_packets(routed_interfaces, port, vrf) + tcpdump_file = self.gen_tcpdump_cmd_and_capture_syslog_packets(routed_interfaces, port, vrf, + logger_flags, logger_msg) for k, v in list(SYSLOG_TEST_DATA[vrf].items()): if self.is_link_local_ip(v["source_ip"]): continue @@ -383,11 +385,13 @@ def is_link_local_ip(self, ip): return True return False - def check_syslog_msg_is_stopped(self, routed_interfaces, mgmt_interface, port, vrf_list, is_set_source): + def check_syslog_msg_is_stopped(self, routed_interfaces, mgmt_interface, port, vrf_list, is_set_source, + logger_flags='', logger_msg=''): thread_pool = [] for vrf in vrf_list: def check_no_syslog_one_vrf(routed_interfaces, port, vrf): - tcpdump_file = self.gen_tcpdump_cmd_and_capture_syslog_packets(routed_interfaces, port, vrf) + tcpdump_file = self.gen_tcpdump_cmd_and_capture_syslog_packets(routed_interfaces, port, vrf, + logger_flags, logger_msg) for k, v in list(SYSLOG_TEST_DATA[vrf].items()): source_ip = v["source_ip"].split("/")[0] if is_set_source else None pytest_assert( @@ -407,7 +411,7 @@ def check_no_syslog_one_vrf(routed_interfaces, port, vrf): for thread in thread_pool: thread.join(60) - def gen_tcpdump_cmd_and_capture_syslog_packets(self, routed_interfaces, port, vrf): + def gen_tcpdump_cmd_and_capture_syslog_packets(self, routed_interfaces, port, vrf, logger_flags='', logger_msg=''): if vrf == VRF_LIST[0]: tcpdump_interface = routed_interfaces[0] else: @@ -416,7 +420,7 @@ def gen_tcpdump_cmd_and_capture_syslog_packets(self, routed_interfaces, port, vr .format(tcpdump_capture_time=TCPDUMP_CAPTURE_TIME, interface=tcpdump_interface, port=port if port else SYSLOG_DEFAULT_PORT, dut_pcap_file=DUT_PCAP_FILEPATH.format(vrf=vrf)) - tcpdump_file = capture_syslog_packets(self.duthost, tcpdump_cmd) + tcpdump_file = capture_syslog_packets(self.duthost, tcpdump_cmd, logger_flags, logger_msg) return tcpdump_file @pytest.mark.parametrize("syslog_config_combination_case", SYSLOG_CONFIG_COMBINATION_CASE) @@ -595,3 +599,112 @@ def test_remove_vrf_exist_syslog_config(self, duthosts, enum_rand_one_per_hwsku_ logger.info("Check there is an error prompt:{}".format(err_msg)) pytest_assert(re.search(expected_msg, err_msg), "Error msg is not correct: Expectd msg:{}, actual msg:{}".format(expected_msg, err_msg)) + + def test_syslog_protocol_filter_severity(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, + enum_frontend_asic_index, routed_interfaces, mgmt_interface): + """ + Validates syslog protocol, filter and severity work + + 1. Add syslog config + 2. Check adding syslog config succeeds + 3. Add protocol tcp and verify changes + 4. Send message with tcp protocol and verify packet send + 5. Send message with udp protocol and verify packet not send + 6. Configure include filter + 7. Send message with include filter and verify packet send + 8. Send message without include filter and verify packet not send + 9. Remove include filter + 10. Configure exclude filter + 11. Send message with exclude regex and verify packet not send + 12. Send message without exclude filter and verify packet send + 13. Remove exclude filter + 14. Send message with not default syslog severity and verify it not sent + """ + syslog_config = {"is_set_vrf": False, "is_set_source": True, "port": 650, "vrf_list": 'default'} + default_vrf_rsyslog_ip = '100.100.100.1' + self.duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + self.asichost = self.duthost.asic_instance(enum_frontend_asic_index) + port = syslog_config["port"] + vrf_list = syslog_config["vrf_list"].split() + is_set_source = syslog_config["is_set_source"] + is_set_vrf = syslog_config["is_set_vrf"] + + with allure.step("Add syslog config"): + self.add_syslog_config(port, vrf_list=vrf_list, is_set_source=is_set_source, is_set_vrf=is_set_vrf) + + with allure.step("Check syslog config is configured successfully"): + self.check_syslog_config_exist( + port, vrf_list=vrf_list, is_set_source=is_set_source, is_set_vrf=is_set_vrf) + + with allure.step("Configure protocol and verify"): + self.duthost.shell('sonic-db-cli CONFIG_DB hset "SYSLOG_SERVER|{0}" "protocol" "tcp"' + .format(default_vrf_rsyslog_ip)) + + with allure.step("Check interface of {} send syslog msg ".format(routed_interfaces[0])): + logger_flags = '--protocol tcp' + self.check_syslog_msg_is_sent(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source, logger_flags=logger_flags) + + with allure.step("Check interface of {} will not send syslog msg ".format(routed_interfaces[0])): + logger_flags = '--protocol udp' + self.check_syslog_msg_is_stopped(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source, logger_flags=logger_flags) + + with allure.step("Configure include filter and verify"): + filter_regex = 'sonic' + self.duthost.shell('sonic-db-cli CONFIG_DB hset "SYSLOG_SERVER|{0}" ' + '"filter_type" "include" "filter_regex" {1}'.format( + default_vrf_rsyslog_ip, filter_regex)) + + with allure.step("Check interface of {} send syslog msg with include regex".format(routed_interfaces[0])): + self.check_syslog_msg_is_sent(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source, logger_flags=logger_flags, + logger_msg=filter_regex) + + with allure.step("Check interface of {} will not send without include msg ".format(routed_interfaces[0])): + self.check_syslog_msg_is_stopped(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source, logger_flags=logger_flags) + + with allure.step("Remove include filter and verify"): + self.duthost.shell('sonic-db-cli CONFIG_DB hdel ' + '"SYSLOG_SERVER|{0}" "filter_type"'.format(default_vrf_rsyslog_ip)) + + with allure.step("Configure exclude filter and verify"): + filter_regex = 'aa' + self.duthost.shell('sonic-db-cli CONFIG_DB hset' + ' "SYSLOG_SERVER|{0}" "filter_type" "exclude" "filter_regex" {1}'.format( + default_vrf_rsyslog_ip, filter_regex)) + + with allure.step("Check interface of {} will not send syslog msg with exclude".format(routed_interfaces[0])): + self.check_syslog_msg_is_stopped(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source, logger_flags=logger_flags, + logger_msg=filter_regex) + + with allure.step("Check interface of {} send syslog msg without exclude filter".format(routed_interfaces[0])): + self.check_syslog_msg_is_sent(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source, logger_flags=logger_flags, + logger_msg=filter_regex) + + with allure.step("Remove exclude filter and verify"): + self.duthost.shell('sonic-db-cli CONFIG_DB hdel ' + '"SYSLOG_SERVER|{0}" "filter_type"'.format(default_vrf_rsyslog_ip)) + + with allure.step("Change severity level to notice"): + self.duthost.shell('sonic-db-cli CONFIG_DB hset' + ' "SYSLOG_SERVER|{0}" "severity" "notice"'.format(default_vrf_rsyslog_ip)) + + with allure.step("Check interface of {} will not send syslog msg due to severity level".format( + routed_interfaces[0])): + self.check_syslog_msg_is_stopped(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source, logger_flags=logger_flags) + + with allure.step("Remove syslog config"): + self.remove_syslog_config(vrf_list=vrf_list) + + with allure.step("Check syslog config is removed"): + self.check_syslog_config_nonexist(port, vrf_list=vrf_list, is_set_source=is_set_source, + is_set_vrf=is_set_vrf) + + with allure.step("Check interface of {} will not send syslog msg ".format(routed_interfaces[0])): + self.check_syslog_msg_is_stopped(routed_interfaces, mgmt_interface, port, vrf_list=vrf_list, + is_set_source=is_set_source) diff --git a/tests/tacacs/utils.py b/tests/tacacs/utils.py index d18e631ede0..27f32534c67 100644 --- a/tests/tacacs/utils.py +++ b/tests/tacacs/utils.py @@ -315,7 +315,7 @@ def log_exist(ptfhost, sed_command): def get_auditd_config_reload_timestamp(duthost): - res = duthost.command("sudo service auditd status | grep 'audisp-tacplus re-initializing configuration'") + res = duthost.shell("sudo journalctl -u auditd --boot | grep 'audisp-tacplus re-initializing configuration'") logger.info("aaa config file timestamp {}".format(res["stdout_lines"])) if len(res["stdout_lines"]) == 0: diff --git a/tests/telemetry/events/bgp_events.py b/tests/telemetry/events/bgp_events.py index ce065e2ea0f..c407d89183b 100644 --- a/tests/telemetry/events/bgp_events.py +++ b/tests/telemetry/events/bgp_events.py @@ -17,6 +17,10 @@ def test_event(duthost, gnxi_path, ptfhost, data_dir, validate_yang): def drop_tcp_packets(duthost): + bgp_neighbor = list(duthost.get_bgp_neighbors().keys())[0] + + holdtime_timer_ms = duthost.get_bgp_neighbor_info(bgp_neighbor)["bgpTimerConfiguredHoldTimeMsecs"] + logger.info("Adding rule to drop TCP packets to test bgp-notification") ret = duthost.shell("iptables -I INPUT -p tcp --dport 179 -j DROP") @@ -28,7 +32,7 @@ def drop_tcp_packets(duthost): ret = duthost.shell("iptables -L") assert ret["rc"] == 0, "Unable to list iptables rules" - time.sleep(10) # Give time for hold timer expiry notif to fire, val from config db + time.sleep(holdtime_timer_ms / 1000) # Give time for hold timer expiry event, val from configured bgp neighbor info ret = duthost.shell("iptables -D INPUT -p tcp --dport 179 -j DROP") assert ret["rc"] == 0, "Unable to remove DROP rule from iptables" diff --git a/tests/telemetry/events/swss_events.py b/tests/telemetry/events/swss_events.py index 91d15a01e58..f7e8da1eebd 100644 --- a/tests/telemetry/events/swss_events.py +++ b/tests/telemetry/events/swss_events.py @@ -2,13 +2,16 @@ import logging import time +import random +import re from run_events_test import run_test +from tests.common.utilities import wait_until +random.seed(10) logger = logging.getLogger(__name__) tag = "sonic-events-swss" -PFC_STORM_TEST_PORT = "Ethernet4" PFC_STORM_TEST_QUEUE = "4" PFC_STORM_DETECTION_TIME = 100 PFC_STORM_RESTORATION_TIME = 100 @@ -26,6 +29,9 @@ def test_event(duthost, gnxi_path, ptfhost, data_dir, validate_yang): + if duthost.topo_type.lower() in ["m0", "mx"]: + logger.info("Skipping swss events test on MGFX topologies") + return logger.info("Beginning to test swss events") run_test(duthost, gnxi_path, ptfhost, data_dir, validate_yang, shutdown_interface, "if_state.json", "sonic-events-swss:if-state", tag) @@ -38,19 +44,38 @@ def test_event(duthost, gnxi_path, ptfhost, data_dir, validate_yang): def shutdown_interface(duthost): logger.info("Shutting down interface") interfaces = duthost.get_interfaces_status() - if_state_test_port = next((interface for interface, status in interfaces.items() - if status["oper"] == "up" and status["admin"] == "up"), None) + pattern = re.compile(r'^Ethernet[0-9]{1,2}$') + interface_list = [] + for interface, status in interfaces.items(): + if pattern.match(interface) and status["oper"] == "up" and status["admin"] == "up": + interface_list.append(interface) + if_state_test_port = random.choice(interface_list) assert if_state_test_port is not None, "Unable to find valid interface for test" ret = duthost.shell("config interface shutdown {}".format(if_state_test_port)) assert ret["rc"] == 0, "Failing to shutdown interface {}".format(if_state_test_port) + # Wait until port goes down + wait_until(15, 1, 0, verify_port_admin_oper_status, duthost, if_state_test_port, "down") + ret = duthost.shell("config interface startup {}".format(if_state_test_port)) assert ret["rc"] == 0, "Failing to startup interface {}".format(if_state_test_port) + # Wait until port comes back up + wait_until(15, 1, 0, verify_port_admin_oper_status, duthost, if_state_test_port, "up") + def generate_pfc_storm(duthost): logger.info("Generating pfc storm") + interfaces = duthost.get_interfaces_status() + pattern = re.compile(r'^Ethernet[0-9]{1,2}$') + interface_list = [] + for interface, status in interfaces.items(): + if pattern.match(interface) and status["oper"] == "up" and status["admin"] == "up": + interface_list.append(interface) + PFC_STORM_TEST_PORT = random.choice(interface_list) + assert PFC_STORM_TEST_PORT is not None, "Unable to find valid interface for test" + queue_oid = duthost.get_queue_oid(PFC_STORM_TEST_PORT, PFC_STORM_TEST_QUEUE) duthost.shell("sonic-db-cli COUNTERS_DB HSET \"COUNTERS:{}\" \"DEBUG_STORM\" \"enabled\"". format(queue_oid)) @@ -86,3 +111,10 @@ def trigger_crm_threshold_exceeded(duthost): duthost.shell("crm config thresholds ipv4 route type free") duthost.shell("crm config thresholds ipv4 route low {}".format(CRM_DEFAULT_IPV4_ROUTE_LOW)) duthost.shell("crm config thresholds ipv4 route high {}".format(CRM_DEFAULT_IPV4_ROUTE_HIGH)) + + +def verify_port_admin_oper_status(duthost, interface, state): + interface_facts = duthost.get_interfaces_status()[interface] + admin_status = interface_facts["admin"] + oper_status = interface_facts["oper"] + return admin_status == state and oper_status == state diff --git a/tests/telemetry/test_telemetry.py b/tests/telemetry/test_telemetry.py index a25593709e1..56da75e3f12 100644 --- a/tests/telemetry/test_telemetry.py +++ b/tests/telemetry/test_telemetry.py @@ -112,9 +112,9 @@ def test_osbuild_version(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, lo show_gnmi_out = ptfhost.shell(cmd)['stdout'] result = str(show_gnmi_out) - assert_equal(len(re.findall(r'"build_version": "sonic\.', result)), + assert_equal(len(re.findall(r'"build_version": "SONiC\.', result)), 1, "build_version value at {0}".format(result)) - assert_equal(len(re.findall(r'sonic\.NA', result, flags=re.IGNORECASE)), + assert_equal(len(re.findall(r'SONiC\.NA', result, flags=re.IGNORECASE)), 0, "invalid build_version value at {0}".format(result)) @@ -190,11 +190,10 @@ def test_virtualdb_table_streaming(duthosts, enum_rand_one_per_hwsku_hostname, p "Timestamp markers for each update message in:\n{0}".format(result)) -def invoke_py_cli_from_ptf(ptfhost, cmd, results): +def invoke_py_cli_from_ptf(ptfhost, cmd, callback): ret = ptfhost.shell(cmd) assert ret["rc"] == 0, "PTF docker did not get a response" - if results is not None and len(results) > 0: - results[0] = ret["stdout"] + callback(ret["stdout"]) def test_on_change_updates(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, localhost, gnxi_path): @@ -205,7 +204,6 @@ def test_on_change_updates(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, cmd = generate_client_cli(duthost=duthost, gnxi_path=gnxi_path, method=METHOD_SUBSCRIBE, submode=SUBMODE_ONCHANGE, update_count=2, xpath="NEIGH_STATE_TABLE", target="STATE_DB") - results = [""] bgp_nbrs = list(duthost.get_bgp_neighbors().keys()) bgp_neighbor = random.choice(bgp_nbrs) @@ -213,22 +211,23 @@ def test_on_change_updates(duthosts, enum_rand_one_per_hwsku_hostname, ptfhost, original_state = bgp_info["bgpState"] new_state = "Established" if original_state.lower() == "active" else "Active" - client_thread = threading.Thread(target=invoke_py_cli_from_ptf, args=(ptfhost, cmd, results,)) + def callback(result): + logger.info("Assert that ptf client output is non empty and contains on change update") + try: + assert result != "", "Did not get output from PTF client" + finally: + duthost.shell("sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, + original_state)) + ret = parse_gnmi_output(result, 1, bgp_neighbor) + assert ret is True, "Did not find key in update" + + client_thread = threading.Thread(target=invoke_py_cli_from_ptf, args=(ptfhost, cmd, callback,)) client_thread.start() wait_until(5, 1, 0, check_gnmi_cli_running, ptfhost) duthost.shell("sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, new_state)) - - client_thread.join(30) - - try: - assert results[0] != "", "Did not get output from PTF client" - finally: - duthost.shell("sonic-db-cli STATE_DB HSET \"NEIGH_STATE_TABLE|{}\" \"state\" {}".format(bgp_neighbor, - original_state)) - ret = parse_gnmi_output(results[0], 1, bgp_neighbor) - assert ret is True, "Did not find key in update" + client_thread.join(60) # max timeout of 60s, expect update to come in <=30s @pytest.mark.disable_loganalyzer diff --git a/tests/test_features.py b/tests/test_features.py index d964a79000a..8611e235667 100644 --- a/tests/test_features.py +++ b/tests/test_features.py @@ -8,7 +8,7 @@ # Test Functions -def test_show_features(duthosts, enum_dut_hostname): +def run_show_features(duthosts, enum_dut_hostname): """Verify show features command output against CONFIG_DB """ duthost = duthosts[enum_dut_hostname] @@ -19,3 +19,7 @@ def test_show_features(duthosts, enum_dut_hostname): .format(cmd_key), module_ignore_errors=False)['stdout'] pytest_assert(redis_value.lower() == cmd_value.lower(), "'{}' is '{}' which does not match with config_db".format(cmd_key, cmd_value)) + + +def test_show_features(duthosts, enum_dut_hostname): + run_show_features(duthosts, enum_dut_hostname) diff --git a/tests/vlan/test_autostate_disabled.py b/tests/vlan/test_autostate_disabled.py index d7dd1d73dec..91e0cd82c02 100644 --- a/tests/vlan/test_autostate_disabled.py +++ b/tests/vlan/test_autostate_disabled.py @@ -22,7 +22,7 @@ def ignore_expected_loganalyzer_exceptions(duthosts, rand_one_dut_hostname, loga duthost = duthosts[rand_one_dut_hostname] if loganalyzer and duthost.facts["platform"] == "x86_64-cel_e1031-r0": loganalyzer_ignore_regex = [ - ".*ERR swss#orchagent: :- doPortTask: .*: autoneg is not supported.*", + ".*ERR swss#orchagent:.*:- doPortTask: .*: autoneg is not supported.*", ] loganalyzer[duthost.hostname].ignore_regex.extend(loganalyzer_ignore_regex) diff --git a/tests/voq/test_voq_fabric_status_all.py b/tests/voq/test_voq_fabric_status_all.py index 5b0f08d79b2..0787845d3a0 100644 --- a/tests/voq/test_voq_fabric_status_all.py +++ b/tests/voq/test_voq_fabric_status_all.py @@ -87,6 +87,16 @@ def test_voq_fabric_link_status(duthosts, refData, fabricSlots): for i in range(totalAsics): keys.append('asic' + str(i)) supReferenceData = {key: {} for key in keys} + linecardModule = [] + localModule = 0 + asicPerSlot = 2 + + for duthost in duthosts.frontend_nodes: + num_asics = duthost.num_asics() + for asic in range(num_asics): + if localModule not in linecardModule: + linecardModule.append(localModule) + localModule += asicPerSlot # skip supervisors, on Linecards now: for duthost in duthosts.frontend_nodes: @@ -117,7 +127,10 @@ def test_voq_fabric_link_status(duthosts, refData, fabricSlots): if asic not in referenceData: pytest_assert(False, "{} is not expected to be up.".format(asic)) if lk not in referenceData[asic]: - pytest_assert(False, "link {} is not expected to be up.".format(lk)) + pytest_assert(status.lower() != 'up', + "link {} is not expected to be up.".format(lk)) + logger.info("Skip udpating the information as this is designed to be down") + continue # update link information on suppervisor lkData = {'peer slot': slot, 'peer lk': lk, 'peer asic': asic} @@ -134,8 +147,9 @@ def test_voq_fabric_link_status(duthosts, refData, fabricSlots): if status.lower() != 'up': if fabricSlot in fabricSlots: + logger.info("link {}. is expected to be up.".format(linkKey)) pytest_assert(status.lower() == 'up', - "link {}. is expected to be up.".format(lk)) + "link {}. is expected to be up.".format(linkKey)) else: logger.info("Header line {}".format(content)) @@ -169,6 +183,12 @@ def test_voq_fabric_link_status(duthosts, refData, fabricSlots): continue else: # check link status + cmd = "sonic-db-cli -n {} STATE_DB hget 'FABRIC_PORT_TABLE|PORT{}' REMOTE_MOD".format(asic, lk) + cmd_output = duthost.shell(cmd, module_ignore_errors=True)["stdout"].split("\n") + logger.info(cmd_output) + module = cmd_output[0] + if module not in linecardModule: + continue pytest_assert(False, "link {} is not expected to be up.".format(lk)) pytest_assert(status.lower() == 'up', - "link {}. is expected to be up.".format(lk)) + "link {}. is expected to be up.".format(linkKey)) diff --git a/tests/voq/test_voq_nbr.py b/tests/voq/test_voq_nbr.py index 170030d2e27..6475248502c 100644 --- a/tests/voq/test_voq_nbr.py +++ b/tests/voq/test_voq_nbr.py @@ -25,6 +25,7 @@ from .voq_helpers import get_inband_info from .voq_helpers import get_ptf_port from .voq_helpers import get_vm_with_ip +from tests.common.devices.eos import EosHost from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # noqa F401 logger = logging.getLogger(__name__) @@ -127,16 +128,13 @@ def restore_bgp(duthosts, nbrhosts, all_cfg_facts): for peer in nbr['conf']['bgp']['peers']: for neighbor in nbr['conf']['bgp']['peers'][peer]: - nbr['host'].eos_config( - lines=["no neighbor %s shutdown" % neighbor], - parents=['router bgp {}'.format(nbr['conf']['bgp']['asn'])]) - - if ":" in address: + if isinstance(nbr['host'], EosHost): nbr['host'].eos_config( - lines=["no ipv6 route ::/0 %s " % neighbor]) + lines=["no neighbor %s shutdown" % neighbor], + parents=['router bgp {}'.format(nbr['conf']['bgp']['asn'])]) else: - nbr['host'].eos_config( - lines=["no ip route 0.0.0.0/0 %s " % neighbor]) + nbr['host'].shell("sudo vtysh -c 'configure terminal' -c 'router bgp " + str( + nbr['conf']['bgp']['asn']) + "' -c 'no neighbor {} shutdown'".format(neighbor)) @pytest.fixture(scope="module", autouse=True) @@ -219,21 +217,25 @@ def disable_nbr_bgp_neighs(node=None, results=None): 'disable neighbors {} on neighbor host {}'.format(node['conf']['bgp']['peers'], node['host'].hostname)) for peer in node['conf']['bgp']['peers']: for neighbor in node['conf']['bgp']['peers'][peer]: - node_results.append(node['host'].eos_config( - lines=["neighbor %s shutdown" % neighbor], - parents=['router bgp {}'.format(node['conf']['bgp']['asn'])], - module_ignore_errors=True) - ) - if ":" in neighbor: + if isinstance(node['host'], EosHost): node_results.append(node['host'].eos_config( - lines=["ipv6 route ::/0 %s " % neighbor], + lines=["neighbor %s shutdown" % neighbor], + parents=['router bgp {}'.format(node['conf']['bgp']['asn'])], module_ignore_errors=True) ) + if ":" in neighbor: + node_results.append(node['host'].eos_config( + lines=["ipv6 route ::/0 %s " % neighbor], + module_ignore_errors=True) + ) + else: + node_results.append(node['host'].eos_config( + lines=["ip route 0.0.0.0/0 %s " % neighbor], + module_ignore_errors=True) + ) else: - node_results.append(node['host'].eos_config( - lines=["ip route 0.0.0.0/0 %s " % neighbor], - module_ignore_errors=True) - ) + node_results.append(node['host'].shell("sudo vtysh -c 'configure terminal' -c 'router bgp " + str( + node['conf']['bgp']['asn']) + "' -c 'neighbor {} shutdown'".format(neighbor))) results[node['host'].hostname] = node_results @@ -310,36 +312,47 @@ def enable_nbr_bgp_neighs(node=None, results=None): for peer in node['conf']['bgp']['peers']: for neighbor in node['conf']['bgp']['peers'][peer]: try: - node_results.append(node['host'].eos_config( - lines=["no neighbor %s shutdown" % neighbor], - parents=['router bgp {}'.format(node['conf']['bgp']['asn'])]) - ) - if ":" in neighbor: + if isinstance(node['host'], EosHost): node_results.append(node['host'].eos_config( - lines=["no ipv6 route ::/0 %s " % neighbor]) + lines=["no neighbor %s shutdown" % neighbor], + parents=['router bgp {}'.format(node['conf']['bgp']['asn'])]) ) + if ":" in neighbor: + node_results.append(node['host'].eos_config( + lines=["no ipv6 route ::/0 %s " % neighbor]) + ) + else: + node_results.append(node['host'].eos_config( + lines=["no ip route 0.0.0.0/0 %s " % neighbor], + ) + ) else: - node_results.append(node['host'].eos_config( - lines=["no ip route 0.0.0.0/0 %s " % neighbor], - ) - ) + node_results.append(node['host'].shell( + "sudo vtysh -c 'configure terminal' -c 'router bgp " + str( + node['conf']['bgp']['asn']) + "' -c 'no neighbor {} shutdown'".format(neighbor))) + except Exception: logger.warning("Enable of neighbor on VM: %s failed, retrying", node['host'].hostname) time.sleep(10) - node_results.append(node['host'].eos_config( - lines=["no neighbor %s shutdown" % neighbor], - parents=['router bgp {}'.format(node['conf']['bgp']['asn'])]) - ) - if ":" in neighbor: + if isinstance(node['host'], EosHost): node_results.append(node['host'].eos_config( - lines=["no ipv6 route ::/0 %s " % neighbor], - ) + lines=["no neighbor %s shutdown" % neighbor], + parents=['router bgp {}'.format(node['conf']['bgp']['asn'])]) ) + if ":" in neighbor: + node_results.append(node['host'].eos_config( + lines=["no ipv6 route ::/0 %s " % neighbor], + ) + ) + else: + node_results.append(node['host'].eos_config( + lines=["no ip route 0.0.0.0/0 %s " % neighbor], + ) + ) else: - node_results.append(node['host'].eos_config( - lines=["no ip route 0.0.0.0/0 %s " % neighbor], - ) - ) + node_results.append(node['host'].shell( + "sudo vtysh -c 'configure terminal' -c 'router bgp " + str( + node['conf']['bgp']['asn']) + "' -c 'no neighbor {} shutdown'".format(neighbor))) results[node['host'].hostname] = node_results @@ -435,12 +448,26 @@ def _change_vm_interface_on_vm(nbrhosts, state="up", node=None, results=None): for eos_intf in list(nbr['conf']['interfaces'].keys()): if "Loopback" in eos_intf: continue + if state == "up": - logger.info("Startup EOS %s interface %s", node, eos_intf) - node_results.append(nbr['host'].eos_config(lines=["no shutdown"], parents=["interface %s" % eos_intf])) + logger.info("Startup Nbr %s interface %s", node, eos_intf) + if isinstance(nbr['host'], EosHost): + node_results.append( + nbr['host'].eos_config(lines=["no shutdown"], parents=["interface %s" % eos_intf])) + else: + if "port-channel" in eos_intf.lower(): + # convert PortChannel-1 to PortChannel1 + eos_intf = "PortChannel" + eos_intf[-1] + node_results.append(nbr['host'].shell("config interface startup {}".format(eos_intf))) else: - logger.info("Shutdown EOS %s interface %s", node, eos_intf) - node_results.append(nbr['host'].eos_config(lines=["shutdown"], parents=["interface %s" % eos_intf])) + logger.info("Shutdown Nbr %s interface %s", node, eos_intf) + if isinstance(nbr['host'], EosHost): + node_results.append(nbr['host'].eos_config(lines=["shutdown"], parents=["interface %s" % eos_intf])) + else: + if "port-channel" in eos_intf.lower(): + # convert PortChannel-1 to PortChannel1 + eos_intf = "PortChannel" + eos_intf[-1] + node_results.append(nbr['host'].shell("config interface shutdown {}".format(eos_intf))) results[node] = node_results diff --git a/tests/voq/voq_helpers.py b/tests/voq/voq_helpers.py index 922b5711942..89df1c231d3 100644 --- a/tests/voq/voq_helpers.py +++ b/tests/voq/voq_helpers.py @@ -6,6 +6,7 @@ from tests.common.utilities import wait_until from tests.common.helpers.assertions import pytest_assert from tests.common.helpers.sonic_db import AsicDbCli, AppDbCli, VoqDbCli, SonicDbKeyNotFound +from tests.common.devices.eos import EosHost logger = logging.getLogger(__name__) @@ -410,12 +411,20 @@ def get_eos_mac(nbr, nbr_intf): Returns: A dictionary with the mac address and shell interface name. """ - if "port-channel" in nbr_intf.lower(): - # convert Port-Channel1 to po1 - shell_intf = "po" + nbr_intf[-1] + + if isinstance(nbr['host'], EosHost): + if "port-channel" in nbr_intf.lower(): + # convert Port-Channel1 to po1 + shell_intf = "po" + nbr_intf[-1] + else: + # convert Ethernet1 to eth1 + shell_intf = "eth" + nbr_intf[-1] else: - # convert Ethernet1 to eth1 - shell_intf = "eth" + nbr_intf[-1] + if "port-channel" in nbr_intf.lower(): + # convert Port-Channel1 to PortChannel1 + shell_intf = "PortChannel" + nbr_intf[-1] + else: + shell_intf = nbr_intf output = nbr['host'].command("ip addr show dev %s" % shell_intf) # 8: Ethernet0: mtu 9100 ... diff --git a/tests/vxlan/test_vxlan_decap.py b/tests/vxlan/test_vxlan_decap.py index 9a166906534..01907a1c145 100644 --- a/tests/vxlan/test_vxlan_decap.py +++ b/tests/vxlan/test_vxlan_decap.py @@ -28,7 +28,7 @@ COUNT = 1 -def prepare_ptf(ptfhost, mg_facts, duthost): +def prepare_ptf(ptfhost, mg_facts, duthost, unslctd_mg_facts=None): """Prepare arp responder configuration and store temporary vxlan decap related information to PTF docker Args: @@ -55,6 +55,7 @@ def prepare_ptf(ptfhost, mg_facts, duthost): vxlan_decap = { "minigraph_port_indices": mg_facts["minigraph_ptf_indices"], + "mg_unslctd_port_idx": [] if unslctd_mg_facts is None else unslctd_mg_facts["mg_ptf_idx"], "minigraph_portchannel_interfaces": mg_facts["minigraph_portchannel_interfaces"], "minigraph_portchannels": mg_facts["minigraph_portchannels"], "minigraph_lo_interfaces": mg_facts["minigraph_lo_interfaces"], @@ -112,6 +113,19 @@ def setup(duthosts, rand_one_dut_hostname, ptfhost, tbinfo): logger.info("Gather some facts") mg_facts = duthost.get_extended_minigraph_facts(tbinfo) + if "dualtor-aa" in tbinfo["topo"]["name"]: + idx = duthosts.index(duthost) + unselected_duthost = duthosts[1 - idx] + unslctd_mg_facts = unselected_duthost.minigraph_facts(host=unselected_duthost.hostname)['ansible_facts'] + unslctd_mg_facts['mg_ptf_idx'] = unslctd_mg_facts['minigraph_port_indices'].copy() + try: + map = tbinfo['topo']['ptf_map'][str(1 - idx)] + if map: + for port, index in list(unslctd_mg_facts['minigraph_port_indices'].items()): + if str(index) in map: + unslctd_mg_facts['mg_ptf_idx'][port] = map[str(index)] + except (ValueError, KeyError): + pass logger.info("Copying vxlan_switch.json") render_template_to_host("vxlan_switch.j2", duthost, DUT_VXLAN_PORT_JSON) @@ -121,7 +135,10 @@ def setup(duthosts, rand_one_dut_hostname, ptfhost, tbinfo): sleep(3) logger.info("Prepare PTF") - prepare_ptf(ptfhost, mg_facts, duthost) + if "dualtor-aa" in tbinfo["topo"]["name"]: + prepare_ptf(ptfhost, mg_facts, duthost, unslctd_mg_facts) + else: + prepare_ptf(ptfhost, mg_facts, duthost) logger.info("Generate VxLAN config files") generate_vxlan_config_files(duthost, mg_facts) @@ -166,7 +183,7 @@ def vxlan_status(setup, request, duthosts, rand_one_dut_hostname): return False, request.param -def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, +def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, tbinfo, ptfhost, creds, toggle_all_simulator_ports_to_rand_selected_tor_m): # noqa F811 duthost = duthosts[rand_one_dut_hostname] @@ -174,6 +191,9 @@ def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, 'variable_manager']._hostvars[duthost.hostname].get("ansible_altpassword") vxlan_enabled, scenario = vxlan_status + is_active_active_dualtor = False + if "dualtor-aa" in tbinfo["topo"]["name"]: + is_active_active_dualtor = True logger.info("vxlan_enabled=%s, scenario=%s" % (vxlan_enabled, scenario)) log_file = "/tmp/vxlan-decap.Vxlan.{}.{}.log".format( scenario, datetime.now().strftime('%Y-%m-%d-%H:%M:%S')) @@ -187,6 +207,7 @@ def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, "sonic_admin_user": creds.get('sonicadmin_user'), "sonic_admin_password": creds.get('sonicadmin_password'), "sonic_admin_alt_password": sonic_admin_alt_password, + "is_active_active_dualtor": is_active_active_dualtor, "dut_hostname": duthost.host.options[ 'inventory_manager'].get_host(duthost.hostname).vars['ansible_host']}, qlen=10000,