diff --git a/ansible/devutils b/ansible/devutils
index d0564bb84a4..f306ffb878e 100755
--- a/ansible/devutils
+++ b/ansible/devutils
@@ -16,7 +16,7 @@ from devutil.task_runner import TaskRunner
import sys
sys.path.append("..")
from tests.common.connections import ConsoleHost
-from tests.common.plugins.pdu_controller.snmp_pdu_controllers import get_pdu_controller
+from tests.common.plugins.pdu_controller.pdu_manager import pdu_manager_factory
g_inv_mgr = None
g_task_runner = None
@@ -53,12 +53,12 @@ def get_pdu_info(pdu_host):
return g_pdu_dict[pdu_host]
hosts = retrieve_hosts('all', pdu_host)
- pdus=[]
+ pdus = {}
g_pdu_dict[pdu_host] = pdus
for ph in pdu_host.split(','):
if ph in hosts:
pdu = hosts[ph]
- pdus.append(pdu)
+ pdus[ph] = pdu
return pdus
@@ -79,7 +79,7 @@ def action_list(parameters):
data.append(dict(zip(header, (name, vars['ansible_host']))))
else:
for name, vars in hosts.items():
- data.append((name, vars['ansible_host']))
+ data.append((name, vars['ansible_host'] if 'ansible_host' in vars else 'not_available'))
show_data_output(header, data, parameters['json'])
@@ -92,7 +92,8 @@ def action_ping(parameters):
g_task_runner.submit_task(name + '|' + vars['ansible_host'], run_cmd, cmd=cmd)
if parameters['json']:
for name, result in g_task_runner.task_results():
- data.append(dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
+ data.append(
+ dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
else:
for name, result in g_task_runner.task_results():
data.append((name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))
@@ -104,7 +105,8 @@ def action_ping(parameters):
g_task_runner.submit_task(name + '|' + vars['ansible_hostv6'], run_cmd, cmd=cmd)
if parameters['json']:
for name, result in g_task_runner.task_results():
- data.append(dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
+ data.append(
+ dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
else:
for name, result in g_task_runner.task_results():
data.append((name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))
@@ -116,7 +118,8 @@ def action_ssh(parameters):
hosts = parameters['hosts']
for _, vars in hosts.items():
client = SSHClient()
- client.connect(hostname=vars['ansible_host'], username=vars['creds']['username'], passwords=vars['creds']['password'])
+ client.connect(hostname=vars['ansible_host'], username=vars[
+ 'creds']['username'], passwords=vars['creds']['password'])
client.posix_shell()
@@ -125,17 +128,17 @@ def action_console(parameters):
# Todo: Retrieve console vars from conn_graph_fact
for _, vars in hosts.items():
console_host = ConsoleHost(console_type=vars['console_type'],
- console_host=vars['console_host'],
- console_port=vars['console_port'],
- sonic_username=vars['creds']['username'],
- sonic_password=vars['creds']['password'],
- console_username=vars['creds']['console_user'][vars['console_type']],
- console_password=vars['creds']['console_password'][vars['console_type']])
+ console_host=vars['console_host'],
+ console_port=vars['console_port'],
+ sonic_username=vars['creds']['username'],
+ sonic_password=vars['creds']['password'],
+ console_username=vars['creds']['console_user'][vars['console_type']],
+ console_password=vars['creds']['console_password'][vars['console_type']])
console_host.posix_shell()
def pdu_action_on_dut(host, attrs, action):
- ret = { 'Host' : host, 'PDU status' : [], 'Summary' : [], 'Action' : action }
+ ret = {'Host': host, 'PDU status': [], 'Summary': [], 'Action': action}
pdu_name = attrs['pdu_host'] if 'pdu_host' in attrs else None
if not pdu_name:
ret['Summary'].append('DUT has no PDU configuration')
@@ -146,35 +149,27 @@ def pdu_action_on_dut(host, attrs, action):
ret['Summary'].append('PDU not found in inventory')
return ret
- for pdu_info in pdu_list:
- pdu_host = pdu_info['ansible_host'] if pdu_info and 'ansible_host' in pdu_info else None
- p_name = pdu_info['inventory_hostname'] if pdu_info and 'inventory_hostname' in pdu_info else None
- if not pdu_host or not p_name:
- ret['Summary'].append('No PDU IP or name')
- continue
-
- controller = get_pdu_controller(pdu_host, host, pdu_info)
-
- if not controller:
- ret['Summary'].append('Failed to communicate with controller {}'.format(p_name))
- continue
-
- status = controller.get_outlet_status()
- if action == 'off':
- for outlet in status:
- controller.turn_off_outlet(outlet['outlet_id'])
- status = controller.get_outlet_status()
- elif action == 'on':
- for outlet in status:
- controller.turn_on_outlet(outlet['outlet_id'])
- status = controller.get_outlet_status()
- elif action != 'status':
- ret['Summary'].append('Unsupported action {}.'.format(action))
- continue
-
- for outlet in status:
- outlet.update({ 'PDU' : p_name, 'PDU_IP' : pdu_host })
- ret['PDU status'].append(outlet)
+ # TODO: fake graph data to force building pdu manager from inventory until
+ # we add code to read and construct the conn_graph_facts
+ fake_graph = {'device_pdu_info': {}, 'device_pdu_links': {}}
+
+ pduman = pdu_manager_factory(host, pdu_list, fake_graph, pdu_list.values()[0])
+
+ if not pduman:
+ ret['Summary'].append('Failed to communicate with PDU controller {}'.format(pdu_name))
+ return ret
+
+ if action == 'off':
+ pduman.turn_off_outlet()
+ elif action == 'on':
+ pduman.turn_on_outlet()
+ elif action != 'status':
+ ret['Summary'].append('Unsupported action {}.'.format(action))
+ return ret
+
+ status = pduman.get_outlet_status()
+ for outlet in status:
+ ret['PDU status'].append(outlet)
return ret
@@ -182,7 +177,7 @@ def pdu_action_on_dut(host, attrs, action):
def action_pdu(parameters, action):
hosts = parameters['hosts']
data = []
- header = [ 'Host', 'Action', 'PDU status', 'Summary' ]
+ header = ['Host', 'Action', 'PDU status', 'Summary']
for host, attrs in hosts.items():
g_task_runner.submit_task(host, pdu_action_on_dut, host=host, attrs=attrs, action=action)
@@ -191,7 +186,7 @@ def action_pdu(parameters, action):
if parameters['json']:
data.append(status)
else:
- data.append([ status[x] for x in header ])
+ data.append([status[x] for x in header])
return header, data
@@ -246,17 +241,20 @@ def parallel_run(parameters):
for name, result in g_task_runner.task_results():
print("task result for {} ===============>\n{}".format(name, str(result['result'][1])))
+
def ssh_run_command(hostname, username, passwords, cmd):
client = SSHClient()
client.connect(hostname=hostname, username=username, passwords=passwords)
return client.run_command(cmd)
+
def validate_args(args):
if args.action == 'run' and args.cmd == '':
print("command is missing for run action")
return False
return True
+
def main():
parser = argparse.ArgumentParser(description='Device utilities')
parser.add_argument('-6', '--ipv6', help='Include IPv6', action='store_true',
@@ -275,7 +273,8 @@ def main():
type=str, required=False)
parser.add_argument('-u', '--user', help='User: user account to login to host with, default admin',
type=str, required=False, default='admin')
- parser.add_argument('-c', '--concurrency', help='Concurrency: the max concurrency for tasks that can run simultaneously, default 1',
+ parser.add_argument(
+ '-c', '--concurrency', help='Concurrency: the max concurrency for tasks that can run simultaneously, default 1',
type=int, required=False, default=1)
parser.add_argument('-j', '--json', help='json output', action='store_true',
required=False, default=False)
@@ -283,26 +282,26 @@ def main():
args = parser.parse_args()
if not validate_args(args):
return
- build_global_vars(args.concurrency, args.inventory);
+ build_global_vars(args.concurrency, args.inventory)
hosts = retrieve_hosts(args.group, args.limit)
- actions = { 'list' : action_list,
- 'ping' : action_ping,
- 'ssh' : action_ssh,
- 'console' : action_console,
- 'run' : ssh_run_command,
- 'pdu_status' : action_pdu_status,
- 'pdu_off' : action_pdu_off,
- 'pdu_on' : action_pdu_on,
- 'pdu_reboot' : action_pdu_reboot,
- }
- parameters = { 'hosts' : hosts,
- 'limit' : args.limit,
- 'action' : actions[args.action],
- 'user' : args.user,
- 'ipv6' : args.ipv6,
- 'cmd': args.cmd,
- 'json' : args.json,
- }
+ actions = {'list': action_list,
+ 'ping': action_ping,
+ 'ssh': action_ssh,
+ 'console': action_console,
+ 'run': ssh_run_command,
+ 'pdu_status': action_pdu_status,
+ 'pdu_off': action_pdu_off,
+ 'pdu_on': action_pdu_on,
+ 'pdu_reboot': action_pdu_reboot,
+ }
+ parameters = {'hosts': hosts,
+ 'limit': args.limit,
+ 'action': actions[args.action],
+ 'user': args.user,
+ 'ipv6': args.ipv6,
+ 'cmd': args.cmd,
+ 'json': args.json,
+ }
action_dispatcher(parameters)
diff --git a/ansible/files/creategraph.py b/ansible/files/creategraph.py
index 5db0a55b5e0..2068daeedf8 100755
--- a/ansible/files/creategraph.py
+++ b/ansible/files/creategraph.py
@@ -8,6 +8,8 @@
DEFAULT_DEVICECSV = 'sonic_lab_devices.csv'
DEFAULT_LINKCSV = 'sonic_lab_links.csv'
+DEFAULT_CONSOLECSV = 'sonic_lab_console_links.csv'
+DEFAULT_PDUCSV = 'sonic_lab_pdu_links.csv'
LAB_CONNECTION_GRAPH_ROOT_NAME = 'LabConnectionGraph'
LAB_CONNECTION_GRAPH_DPGL2_NAME = 'DevicesL2Info'
@@ -20,45 +22,87 @@ class LabGraph(object):
infrastucture for Sonic development and testing environment.
"""
- def __init__(self, dev_csvfile=None, link_csvfile=None, graph_xmlfile=None):
+ def __init__(self, dev_csvfile=None, link_csvfile=None, cons_csvfile=None, pdu_csvfile=None, graph_xmlfile=None):
#TODO:make generated xml file name as parameters in the future to make it more flexible
self.devices = []
self.links = []
+ self.consoles = []
+ self.pdus = []
self.devcsv = dev_csvfile
self.linkcsv = link_csvfile
+ self.conscsv = cons_csvfile
+ self.pducsv = pdu_csvfile
self.png_xmlfile = 'str_sonic_png.xml'
self.dpg_xmlfile = 'str_sonic_dpg.xml'
self.one_xmlfile = graph_xmlfile
self.pngroot = etree.Element('PhysicalNetworkGraphDeclaration')
self.dpgroot = etree.Element('DataPlaneGraph')
+ self.csgroot = etree.Element('ConsoleGraphDeclaration')
+ self.pcgroot = etree.Element('PowerControlGraphDeclaration')
def read_devices(self):
- csv_dev = open(self.devcsv)
- csv_devices = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_dev))
- devices_root = etree.SubElement(self.pngroot, 'Devices')
- for row in csv_devices:
- attrs = {}
- self.devices.append(row)
- for key in row:
- if key.lower() != 'managementip':
- attrs[key]=row[key].decode('utf-8')
- prod = etree.SubElement(devices_root, 'Device', attrs)
- csv_dev.close()
+ with open(self.devcsv) as csv_dev:
+ csv_devices = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_dev))
+ devices_root = etree.SubElement(self.pngroot, 'Devices')
+ pdus_root = etree.SubElement(self.pcgroot, 'DevicesPowerControlInfo')
+ cons_root = etree.SubElement(self.csgroot, 'DevicesConsoleInfo')
+ for row in csv_devices:
+ attrs = {}
+ self.devices.append(row)
+ devtype=row['Type'].lower()
+ if 'pdu' in devtype:
+ for key in row:
+ attrs[key]=row[key].decode('utf-8')
+ etree.SubElement(pdus_root, 'DevicePowerControlInfo', attrs)
+ elif 'consoleserver' in devtype:
+ for key in row:
+ attrs[key]=row[key].decode('utf-8')
+ etree.SubElement(cons_root, 'DeviceConsoleInfo', attrs)
+ else:
+ for key in row:
+ if key.lower() != 'managementip' and key.lower() !='protocol':
+ attrs[key]=row[key].decode('utf-8')
+ etree.SubElement(devices_root, 'Device', attrs)
def read_links(self):
- csv_file = open(self.linkcsv)
- csv_links = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_file))
- links_root = etree.SubElement(self.pngroot, 'DeviceInterfaceLinks')
- for link in csv_links:
- attrs = {}
- for key in link:
- if key.lower() != 'vlanid' and key.lower() != 'vlanmode':
- attrs[key]=link[key].decode('utf-8')
- prod = etree.SubElement(links_root, 'DeviceInterfaceLink', attrs)
- self.links.append(link)
- csv_file.close()
+ with open(self.linkcsv) as csv_file:
+ csv_links = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_file))
+ links_root = etree.SubElement(self.pngroot, 'DeviceInterfaceLinks')
+ for link in csv_links:
+ attrs = {}
+ for key in link:
+ if key.lower() != 'vlanid' and key.lower() != 'vlanmode':
+ attrs[key]=link[key].decode('utf-8')
+ etree.SubElement(links_root, 'DeviceInterfaceLink', attrs)
+ self.links.append(link)
+ def read_consolelinks(self):
+ if not os.path.exists(self.conscsv):
+ return
+ with open(self.conscsv) as csv_file:
+ csv_cons = csv.DictReader(csv_file)
+ conslinks_root = etree.SubElement(self.csgroot, 'ConsoleLinksInfo')
+ for cons in csv_cons:
+ attrs = {}
+ for key in cons:
+ attrs[key]=cons[key].decode('utf-8')
+ etree.SubElement(conslinks_root, 'ConsoleLinkInfo', attrs)
+ self.consoles.append(cons)
+
+ def read_pdulinks(self):
+ if not os.path.exists(self.pducsv):
+ return
+ with open(self.pducsv) as csv_file:
+ csv_pdus = csv.DictReader(csv_file)
+ pduslinks_root = etree.SubElement(self.pcgroot, 'PowerControlLinksInfo')
+ for pdu_link in csv_pdus:
+ attrs = {}
+ for key in pdu_link:
+ attrs[key]=pdu_link[key].decode('utf-8')
+ etree.SubElement(pduslinks_root, 'PowerControlLinkInfo', attrs)
+ self.pdus.append(pdu_link)
+
def generate_dpg(self):
for dev in self.devices:
hostname = dev.get('Hostname', '')
@@ -99,21 +143,40 @@ def create_xml(self):
root=etree.Element(LAB_CONNECTION_GRAPH_ROOT_NAME)
root.append(self.pngroot)
root.append(self.dpgroot)
+ root.append(self.csgroot)
+ root.append(self.pcgroot)
result = etree.tostring(root, pretty_print=True)
onexml.write(result)
+def get_file_names(args):
+ if not args.inventory:
+ device, links, console, pdu = args.device, args.links, args.console, args.pdu
+ else:
+ device = 'sonic_{}_devices.csv'.format(args.inventory)
+ links = 'sonic_{}_links.csv'.format(args.inventory)
+ console = 'sonic_{}_console_links.csv'.format(args.inventory)
+ pdu = 'sonic_{}_pdu_links.csv'.format(args.inventory)
+
+ return device, links, console, pdu
+
def main():
parser = argparse.ArgumentParser()
- parser.add_argument("-d", "--device", help="device file", default=DEFAULT_DEVICECSV)
- parser.add_argument("-l", "--links", help="link file", default=DEFAULT_LINKCSV)
+ parser.add_argument("-d", "--device", help="device file [deprecate warning: use -i instead]", default=DEFAULT_DEVICECSV)
+ parser.add_argument("-l", "--links", help="link file [deprecate warning: use -i instead]", default=DEFAULT_LINKCSV)
+ parser.add_argument("-c", "--console", help="console connection file [deprecate warning: use -i instead]", default=DEFAULT_CONSOLECSV)
+ parser.add_argument("-p", "--pdu", help="pdu connection file [deprecate warning: use -i instead]", default=DEFAULT_PDUCSV)
+ parser.add_argument("-i", "--inventory", help="specify inventory namei to generate device/link/console/pdu file names, default none", default=None)
parser.add_argument("-o", "--output", help="output xml file", required=True)
args = parser.parse_args()
- mygraph = LabGraph(args.device, args.links, args.output)
+ device, links, console, pdu = get_file_names(args)
+ mygraph = LabGraph(device, links, console, pdu, args.output)
mygraph.read_devices()
mygraph.read_links()
+ mygraph.read_consolelinks()
+ mygraph.read_pdulinks()
mygraph.generate_dpg()
mygraph.create_xml()
diff --git a/ansible/files/lab_connection_graph.xml b/ansible/files/lab_connection_graph.xml
index 885b21a8fd8..a4f4e48afdf 100644
--- a/ansible/files/lab_connection_graph.xml
+++ b/ansible/files/lab_connection_graph.xml
@@ -90,4 +90,30 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ansible/files/sonic_lab_console_links.csv b/ansible/files/sonic_lab_console_links.csv
new file mode 100644
index 00000000000..c0914c54044
--- /dev/null
+++ b/ansible/files/sonic_lab_console_links.csv
@@ -0,0 +1,4 @@
+StartDevice,StartPort,EndDevice
+console-1,10,str-msn2700-01
+console-1,11,str-7260-10
+console-1,12,str-7260-11
\ No newline at end of file
diff --git a/ansible/files/sonic_lab_devices.csv b/ansible/files/sonic_lab_devices.csv
index b11a58d53d3..5ee6bf8f45e 100644
--- a/ansible/files/sonic_lab_devices.csv
+++ b/ansible/files/sonic_lab_devices.csv
@@ -1,5 +1,8 @@
-Hostname,ManagementIp,HwSku,Type
-str-msn2700-01,10.251.0.188/23,Mellanox-2700,DevSonic
-str-7260-10,10.251.0.13/23,Arista-7260QX-64,FanoutLeaf
-str-7260-11,10.251.0.234/23,Arista-7260QX-64,FanoutRoot
-str-acs-serv-01,10.251.0.245/23,TestServ,Server
+Hostname,ManagementIp,HwSku,Type,Protocol
+str-msn2700-01,10.251.0.188/23,Mellanox-2700,DevSonic,
+str-7260-10,10.251.0.13/23,Arista-7260QX-64,FanoutLeaf,
+str-7260-11,10.251.0.234/23,Arista-7260QX-64,FanoutRoot,
+str-acs-serv-01,10.251.0.245/23,TestServ,Server,
+pdu-1,192.168.9.2,Apc,Pdu,snmp
+pdu-2,192.168.9.3,Sentry,Pdu,snmp
+console-1,192.168.10.1,Cisco,ConsoleServer,ssh
\ No newline at end of file
diff --git a/ansible/files/sonic_lab_pdu_links.csv b/ansible/files/sonic_lab_pdu_links.csv
new file mode 100644
index 00000000000..8574ca0d5f5
--- /dev/null
+++ b/ansible/files/sonic_lab_pdu_links.csv
@@ -0,0 +1,9 @@
+StartDevice,StartPort,EndDevice,EndPort
+pdu-1,1,str-msn2700-01,PSU1
+pdu-1,3,str-msn2700-01,PSU2
+pdu-1,2,str-7260-10,PSU1
+pdu-1,4,str-7260-10,PSU2
+pdu-2,1,str-7260-11,PSU1
+pdu-2,3,str-7260-11,PSU2
+pdu-2,2,str-acs-serv-01,PSU1
+pdu-2,4,str-acs-serv-01,PSU2
\ No newline at end of file
diff --git a/ansible/lab b/ansible/lab
index 806ea9bd3b4..d1329755124 100644
--- a/ansible/lab
+++ b/ansible/lab
@@ -94,11 +94,25 @@ sonic_s6100:
sonic_a7260:
vars:
+ hwsku: Arista-7260CX3-C64
iface_speed: 100000
hosts:
lab-a7260-01:
ansible_host: 10.251.0.191
- hwsku: Arista-7260CX3-D108C8
+ model: DCS-7260CX3-64
+ serial: SSJ12345678
+ base_mac: 76:34:ab:08:cd:a0
+ syseeprom_info:
+ "0x21": "DCS-7260CX3-64"
+ "0x22": "ASY0250504B0"
+ "0x23": "SSJ12345678"
+ "0x24": "7634ab08cda0"
+ "0x25": "2017/06/07 12:36:05"
+ "0x26": "01"
+ "0x27": "03.00"
+ "0x28": "x86_64-arista_7260cx3_64"
+ "0x2d": "Arista Networks"
+ "0x2e": "Aboot-norcal7-7.2.3-pcie2x4-12345678"
sonic_multi_asic:
vars:
diff --git a/ansible/library/conn_graph_facts.py b/ansible/library/conn_graph_facts.py
index dbeca320a9b..8493801395f 100755
--- a/ansible/library/conn_graph_facts.py
+++ b/ansible/library/conn_graph_facts.py
@@ -50,6 +50,10 @@
device_vlan_range: all configured vlan range for the device(host)
device_port_vlans: detailed vlanids for each physical port and switchport mode
server_links: each server port vlan ids
+ device_console_info: The device's console server type, mgmtip, hwsku and protocol
+ device_console_link: The console server port connected to the device
+ device_pdu_info: The device's pdu server type, mgmtip, hwsku and protocol
+ device_pdu_links: The pdu server ports connected to the device
'''
@@ -94,6 +98,9 @@
'''
+debug_fname = None
+
+
class Parse_Lab_Graph():
"""
Parse the generated lab physical connection graph and insert Ansible fact of the graph
@@ -112,9 +119,13 @@ def __init__(self, xmlfile):
self.vlanport = {}
self.vlanrange = {}
self.links = {}
+ self.consolelinks = {}
+ self.pdulinks = {}
self.server = defaultdict(dict)
self.pngtag = 'PhysicalNetworkGraphDeclaration'
self.dpgtag = 'DataPlaneGraph'
+ self.pcgtag = 'PowerControlGraphDeclaration'
+ self.csgtag = 'ConsoleGraphDeclaration'
def port_vlanlist(self, vlanrange):
vlans = []
@@ -182,6 +193,75 @@ def parse_graph(self):
self.links[start_dev][link.attrib['StartPort']] = {'peerdevice':link.attrib['EndDevice'], 'peerport': link.attrib['EndPort'], 'speed': link.attrib['BandWidth']}
if end_dev:
self.links[end_dev][link.attrib['EndPort']] = {'peerdevice': link.attrib['StartDevice'], 'peerport': link.attrib['StartPort'], 'speed': link.attrib['BandWidth']}
+ console_root = self.root.find(self.csgtag)
+ if console_root:
+ devicecsgroot = console_root.find('DevicesConsoleInfo')
+ devicescsg = devicecsgroot.findall('DeviceConsoleInfo')
+ if devicescsg is not None:
+ for dev in devicescsg:
+ hostname = dev.attrib['Hostname']
+ if hostname is not None:
+ deviceinfo[hostname] = {}
+ hwsku = dev.attrib['HwSku']
+ devtype = dev.attrib['Type']
+ protocol = dev.attrib['Protocol']
+ mgmt_ip = dev.attrib['ManagementIp']
+ deviceinfo[hostname]['HwSku'] = hwsku
+ deviceinfo[hostname]['Type'] = devtype
+ deviceinfo[hostname]['Protocol'] = protocol
+ deviceinfo[hostname]['ManagementIp'] = mgmt_ip
+ self.consolelinks[hostname] = {}
+ console_link_root = console_root.find('ConsoleLinksInfo')
+ if console_link_root:
+ allconsolelinks = console_link_root.findall('ConsoleLinkInfo')
+ if allconsolelinks is not None:
+ for consolelink in allconsolelinks:
+ start_dev = consolelink.attrib['StartDevice']
+ end_dev = consolelink.attrib['EndDevice']
+ if start_dev:
+ if start_dev not in self.consolelinks:
+ self.consolelinks.update({start_dev : {}})
+ self.consolelinks[start_dev][consolelink.attrib['StartPort']] = {'peerdevice':consolelink.attrib['EndDevice'], 'peerport': 'ConsolePort'}
+ if end_dev:
+ if end_dev not in self.consolelinks:
+ self.consolelinks.update({end_dev : {}})
+ self.consolelinks[end_dev]['ConsolePort'] = {'peerdevice': consolelink.attrib['StartDevice'], 'peerport': consolelink.attrib['StartPort']}
+
+ pdu_root = self.root.find(self.pcgtag)
+ if pdu_root:
+ devicepcgroot = pdu_root.find('DevicesPowerControlInfo')
+ devicespcsg = devicepcgroot.findall('DevicePowerControlInfo')
+ if devicespcsg is not None:
+ for dev in devicespcsg:
+ hostname = dev.attrib['Hostname']
+ if hostname is not None:
+ deviceinfo[hostname] = {}
+ hwsku = dev.attrib['HwSku']
+ devtype = dev.attrib['Type']
+ protocol = dev.attrib['Protocol']
+ mgmt_ip = dev.attrib['ManagementIp']
+ deviceinfo[hostname]['HwSku'] = hwsku
+ deviceinfo[hostname]['Type'] = devtype
+ deviceinfo[hostname]['Protocol'] = protocol
+ deviceinfo[hostname]['ManagementIp'] = mgmt_ip
+ self.pdulinks[hostname] = {}
+ pdu_link_root = pdu_root.find('PowerControlLinksInfo')
+ if pdu_link_root:
+ allpdulinks = pdu_link_root.findall('PowerControlLinkInfo')
+ if allpdulinks is not None:
+ for pdulink in allpdulinks:
+ start_dev = pdulink.attrib['StartDevice']
+ end_dev = pdulink.attrib['EndDevice']
+ print_debug_msg(debug_fname, "pdulink {}".format(pdulink.attrib))
+ print_debug_msg(debug_fname, "self.pdulinks {}".format(self.pdulinks))
+ if start_dev:
+ if start_dev not in self.pdulinks:
+ self.pdulinks.update({start_dev : {}})
+ self.pdulinks[start_dev][pdulink.attrib['StartPort']] = {'peerdevice':pdulink.attrib['EndDevice'], 'peerport': pdulink.attrib['EndPort']}
+ if end_dev:
+ if end_dev not in self.pdulinks:
+ self.pdulinks.update({end_dev : {}})
+ self.pdulinks[end_dev][pdulink.attrib['EndPort']] = {'peerdevice': pdulink.attrib['StartDevice'], 'peerport': pdulink.attrib['StartPort']}
self.devices = deviceinfo
self.vlanport = devicel2info
@@ -249,6 +329,53 @@ def contains_hosts(self, hostnames):
return set(hostnames) <= set(self.devices)
+ def get_host_console_info(self, hostname):
+ """
+ return the given hostname console info of mgmtip, protocol, hwsku and type
+ """
+ if hostname in self.devices:
+ try:
+ ret = self.devices[self.consolelinks[hostname]['ConsolePort']['peerdevice']]
+ except KeyError:
+ ret = {}
+ return ret
+ else:
+ return self.devices
+
+ def get_host_console_link(self, hostname):
+ """
+ return the given hostname console link info of console server and port
+ """
+ if hostname in self.consolelinks:
+ return self.consolelinks[hostname]
+ else:
+ return self.consolelinks
+
+ def get_host_pdu_info(self, hostname):
+ """
+ return the given hostname pdu info of mgmtip, protocol, hwsku and type
+ """
+ if hostname in self.devices:
+ ret = {}
+ for key in ['PSU1', 'PSU2']:
+ try:
+ ret.update({key : self.devices[self.pdulinks[hostname][key]['peerdevice']]})
+ except KeyError:
+ pass
+ return ret
+ else:
+ return self.devices
+
+ def get_host_pdu_links(self, hostname):
+ """
+ return the given hostname pdu links info of pdu servers and ports
+ """
+ if hostname in self.pdulinks:
+ return self.pdulinks[hostname]
+ else:
+ return self.pdulinks
+
+
LAB_CONNECTION_GRAPH_FILE = 'graph_files.yml'
EMPTY_GRAPH_FILE = 'empty_graph.xml'
LAB_GRAPHFILE_PATH = 'files/'
@@ -262,6 +389,7 @@ def find_graph(hostnames):
Parameters:
hostnames: list of duts in the target testbed.
"""
+ global debug_fname
filename = os.path.join(LAB_GRAPHFILE_PATH, LAB_CONNECTION_GRAPH_FILE)
with open(filename) as fd:
file_list = yaml.safe_load(fd)
@@ -300,9 +428,6 @@ def get_port_name_list(hwsku):
return port_name_list_sorted
-debug_fname = None
-
-
def main():
module = AnsibleModule(
argument_spec=dict(
@@ -364,6 +489,10 @@ def main():
device_vlan_range = {}
device_vlan_list = {}
device_vlan_map_list = {}
+ device_console_info = {}
+ device_console_link = {}
+ device_pdu_info = {}
+ device_pdu_links = {}
for hostname in hostnames:
dev = lab_graph.get_host_device_info(hostname)
if dev is None:
@@ -398,6 +527,10 @@ def main():
if not found_port_for_vlan:
module.fail_json(msg="Did not find corresponding link for vlan %d in %s for host %s" % (a_host_vlan, port_vlans, hostname))
device_port_vlans[hostname] = port_vlans
+ device_console_info[hostname] = lab_graph.get_host_console_info(hostname)
+ device_console_link[hostname] = lab_graph.get_host_console_link(hostname)
+ device_pdu_info[hostname] = lab_graph.get_host_pdu_info(hostname)
+ device_pdu_links[hostname] = lab_graph.get_host_pdu_links(hostname)
results = {k: v for k, v in locals().items()
if (k.startswith("device_") and v)}
diff --git a/ansible/library/test_facts.py b/ansible/library/test_facts.py
index c50d02ff599..e2c7ddf26a9 100644
--- a/ansible/library/test_facts.py
+++ b/ansible/library/test_facts.py
@@ -28,13 +28,20 @@
'''
EXAMPLES = '''
- Testbed CSV file example:
+ Testbed CSV file example - deprecated:
# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,Tests ptf
vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
...
+ Testbed CSV file example - recommended:
+ # conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_file,auto_recover,comment
+ ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Tests ptf
+ vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
+ vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
+ ...
+
Testcases YAML File example:
testcases:
acl:
@@ -98,7 +105,8 @@
class ParseTestbedTopoinfo():
"""Parse the testbed file used to describe whole testbed info"""
- TESTBED_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_DEPRECATED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_RECOMMENDED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'inv_name', 'auto_recover', 'comment')
def __init__(self, testbed_file):
self.testbed_filename = testbed_file
@@ -113,13 +121,17 @@ def _cidr_to_ip_mask(network):
def _read_testbed_topo_from_csv():
"""Read csv testbed info file."""
with open(self.testbed_filename) as f:
- topo = csv.DictReader(f, fieldnames=self.TESTBED_FIELDS,
- delimiter=',')
-
- # Validate all field are in the same order and are present
- header = next(topo)
- for field in self.TESTBED_FIELDS:
- assert header[field].replace('#', '').strip() == field
+ header = [field.strip(' #') for field in f.readline().strip().split(',')]
+ if len(header) == len(self.TESTBED_FIELDS_DEPRECATED):
+ testbed_fields = self.TESTBED_FIELDS_DEPRECATED
+ elif len(header) == len(self.TESTBED_FIELDS_RECOMMENDED):
+ testbed_fields = self.TESTBED_FIELDS_RECOMMENDED
+ else:
+ raise ValueError('Unsupported testbed fields %s' % str(header))
+ for header_field, expect_field in zip(header, testbed_fields):
+ assert header_field == expect_field
+
+ topo = csv.DictReader(f, fieldnames=testbed_fields, delimiter=',')
for line in topo:
if line['conf-name'].lstrip().startswith('#'):
@@ -133,7 +145,7 @@ def _read_testbed_topo_from_csv():
_cidr_to_ip_mask(line["ptf_ipv6"])
line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';')
- line['duts_map'] = {dut:line['duts'].index(dut) for dut in line['duts']}
+ line['duts_map'] = {dut: line['duts'].index(dut) for dut in line['duts']}
del line['dut']
self.testbed_topo[line['conf-name']] = line
diff --git a/ansible/roles/test/files/ptftests/advanced-reboot.py b/ansible/roles/test/files/ptftests/advanced-reboot.py
index 75aa5fe2bb3..a3a4da42a36 100644
--- a/ansible/roles/test/files/ptftests/advanced-reboot.py
+++ b/ansible/roles/test/files/ptftests/advanced-reboot.py
@@ -155,6 +155,7 @@ def __init__(self):
self.check_param('vnet', False, required=False)
self.check_param('vnet_pkts', None, required=False)
self.check_param('target_version', '', required=False)
+ self.check_param('bgp_v4_v6_time_diff', 40, required=False)
if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None':
self.test_params['preboot_oper'] = None
if not self.test_params['inboot_oper'] or self.test_params['inboot_oper'] == 'None':
@@ -871,6 +872,7 @@ def handle_post_reboot_health_check(self):
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
+ self.log('Waiting till SSH threads stop')
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
@@ -1147,8 +1149,10 @@ def cmd(self, cmds):
return stdout, stderr, return_code
def peer_state_check(self, ip, queue):
- ssh = Arista(ip, queue, self.test_params)
+ self.log('SSH thread for VM {} started'.format(ip))
+ ssh = Arista(ip, queue, self.test_params, log_cb=self.log)
self.fails[ip], self.info[ip], self.cli_info[ip], self.logs_info[ip] = ssh.run()
+ self.log('SSH thread for VM {} finished'.format(ip))
def wait_until_cpu_port_down(self, signal):
while not signal.is_set():
diff --git a/ansible/roles/test/files/ptftests/arista.py b/ansible/roles/test/files/ptftests/arista.py
index 53885eb62f0..dfd47555a25 100644
--- a/ansible/roles/test/files/ptftests/arista.py
+++ b/ansible/roles/test/files/ptftests/arista.py
@@ -35,9 +35,10 @@
class Arista(object):
DEBUG = False
- def __init__(self, ip, queue, test_params, login='admin', password='123456'):
+ def __init__(self, ip, queue, test_params, log_cb=None, login='admin', password='123456'):
self.ip = ip
self.queue = queue
+ self.log_cb = log_cb
self.login = login
self.password = password
self.conn = None
@@ -48,10 +49,15 @@ def __init__(self, ip, queue, test_params, login='admin', password='123456'):
self.info = set()
self.min_bgp_gr_timeout = int(test_params['min_bgp_gr_timeout'])
self.reboot_type = test_params['reboot_type']
+ self.bgp_v4_v6_time_diff = test_params['bgp_v4_v6_time_diff']
def __del__(self):
self.disconnect()
+ def log(self, msg):
+ if self.log_cb is not None:
+ self.log_cb('SSH thread VM={}: {}'.format(self.ip, msg))
+
def connect(self):
self.conn = paramiko.SSHClient()
self.conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
@@ -117,6 +123,7 @@ def run(self):
while not (quit_enabled and v4_routing_ok and v6_routing_ok):
cmd = self.queue.get()
if cmd == 'quit':
+ self.log('quit command received')
quit_enabled = True
continue
cur_time = time.time()
@@ -159,20 +166,29 @@ def run(self):
attempts = 60
log_present = False
- for _ in range(attempts):
+ log_data = {}
+ for attempt in range(attempts):
+ self.log('Collecting logs for attempt {}'.format(attempt))
log_output = self.do_cmd("show log | begin %s" % log_first_line)
+ self.log('Log output "{}"'.format(log_output))
log_lines = log_output.split("\r\n")[1:-1]
- log_data = self.parse_logs(log_lines)
- if (self.reboot_type == 'fast-reboot' and \
- any(k.startswith('BGP') for k in log_data) and any(k.startswith('PortChannel') for k in log_data)) \
- or (self.reboot_type == 'warm-reboot' and any(k.startswith('BGP') for k in log_data)):
- log_present = True
- break
- time.sleep(1) # wait until logs are populated
+ try:
+ log_data = self.parse_logs(log_lines)
+ if (self.reboot_type == 'fast-reboot' and \
+ any(k.startswith('BGP') for k in log_data) and any(k.startswith('PortChannel') for k in log_data)) \
+ or (self.reboot_type == 'warm-reboot' and any(k.startswith('BGP') for k in log_data)):
+ log_present = True
+ break
+ time.sleep(1) # wait until logs are populated
+ except Exception as err:
+ msg = 'Exception occured when parsing logs from VM: msg={} type={}'.format(err, type(err))
+ self.log(msg)
+ self.fails.add(msg)
if not log_present:
log_data['error'] = 'Incomplete output'
+ self.log('Disconnecting from VM')
self.disconnect()
# save data for troubleshooting
@@ -186,6 +202,7 @@ def run(self):
with open("/tmp/%s.logging" % self.ip, "w") as fp:
fp.write("\n".join(log_lines))
+ self.log('Checking BGP GR peer status on VM')
self.check_gr_peer_status(data)
cli_data = {}
cli_data['lacp'] = self.check_series_status(data, "lacp", "LACP session")
@@ -193,15 +210,17 @@ def run(self):
cli_data['bgp_v6'] = self.check_series_status(data, "bgp_route_v6", "BGP v6 routes")
cli_data['po'] = self.check_change_time(samples, "po_changetime", "PortChannel interface")
- route_timeout = log_data['route_timeout']
- cli_data['route_timeout'] = route_timeout
+ if 'route_timeout' in log_data:
+ route_timeout = log_data['route_timeout']
+ cli_data['route_timeout'] = route_timeout
- # {'10.0.0.38': [(0, '4200065100)')], 'fc00::2d': [(0, '4200065100)')]}
- for nei in route_timeout.keys():
- asn = route_timeout[nei][0][-1]
- msg = 'BGP route GR timeout: neighbor %s (ASN %s' % (nei, asn)
- self.fails.add(msg)
+ # {'10.0.0.38': [(0, '4200065100)')], 'fc00::2d': [(0, '4200065100)')]}
+ for nei in route_timeout.keys():
+ asn = route_timeout[nei][0][-1]
+ msg = 'BGP route GR timeout: neighbor %s (ASN %s' % (nei, asn)
+ self.fails.add(msg)
+ self.log('Finishing run()')
return self.fails, self.info, cli_data, log_data
def extract_from_logs(self, regexp, data):
@@ -248,24 +267,29 @@ def parse_logs(self, data):
# first state is Idle, last state is Established
for events in result_bgp.values():
if len(events) > 1:
- assert(events[0][1] != 'Established')
+ first_state = events[0][1]
+ assert first_state != 'Established', 'First BGP state should not be Established, it was {}'.format(first_state)
- assert(events[-1][1] == 'Established')
+ last_state = events[-1][1]
+ assert last_state == 'Established', 'Last BGP state is not Established, it was {}'.format(last_state)
- # verify BGP establishment time between v4 and v6 peer is not more than 20s
+ # verify BGP establishment time between v4 and v6 peer is not more than self.bgp_v4_v6_time_diff
if self.reboot_type == 'warm-reboot':
estab_time = 0
for ip in result_bgp:
if estab_time > 0:
diff = abs(result_bgp[ip][-1][0] - estab_time)
- assert(diff <= 20)
+ assert diff <= self.bgp_v4_v6_time_diff, \
+ 'BGP establishement time between v4 and v6 peer is longer than {} sec, it was {}'.format(self.bgp_v4_v6_time_diff, diff)
break
estab_time = result_bgp[ip][-1][0]
# first state is down, last state is up
for events in result_if.values():
- assert(events[0][1] == 'down')
- assert(events[-1][1] == 'up')
+ first_state = events[0][1]
+ last_state = events[-1][1]
+ assert first_state == 'down', 'First PO state should be down, it was {}'.format(first_state)
+ assert last_state == 'up', 'Last PO state should be up, it was {}'.format(last_state)
neigh_ipv4 = [neig_ip for neig_ip in result_bgp.keys() if '.' in neig_ip][0]
for neig_ip in result_bgp.keys():
diff --git a/ansible/roles/test/files/ptftests/dualtor_sniffer.py b/ansible/roles/test/files/ptftests/dualtor_sniffer.py
new file mode 100644
index 00000000000..56285e8a676
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/dualtor_sniffer.py
@@ -0,0 +1,57 @@
+"""
+PTF test script to be used by dualtor dataplane utilities.
+This ptf test, uses Scapy to sniff packets based on the filter and timeout provided.
+Captured packets are dumped into a pcap file which later can be extracted from ptf.
+"""
+
+import ptf
+from ptf.base_tests import BaseTest
+import ptf.testutils as testutils
+import scapy.all as scapyall
+import socket
+import logging
+
+from ptf import config # lgtm[py/unused-import]
+
+SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
+
+
+class Sniff(BaseTest):
+ def __init__(self):
+ BaseTest.__init__(self)
+ self.sniff_timeout = testutils.test_params_get().get("sniff_timeout")
+ self.sniff_filter = testutils.test_params_get().get("sniff_filter")
+ self.capture_pcap = testutils.test_params_get().get("capture_pcap")
+ self.sniffer_log = testutils.test_params_get().get("sniffer_logs")
+ self.port_filter_expression = testutils.test_params_get().get("port_filter_expression")
+
+
+ def setUp(self):
+ self.dataplane = ptf.dataplane_instance
+ logging.info("Setting socket configuration and filters")
+ for p in self.dataplane.ports.values():
+ port = p.get_packet_source()
+ port.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, SOCKET_RECV_BUFFER_SIZE)
+ #scapyall.attach_filter(port.socket, self.port_filter_expression)
+ logging.info("Socket configuration and filters complete")
+
+
+ def runTest(self):
+ """
+ @summary: Sniff packets based on given filters and timeout
+ """
+ logging.info("Scappy sniffer started with wait {} and filter: {}".format(self.sniff_timeout, self.sniff_filter))
+ self.packets = scapyall.sniff(timeout=self.sniff_timeout, filter=self.sniff_filter)
+ logging.info("Scappy sniffer ended")
+ self.save_sniffed_packets()
+
+
+ def save_sniffed_packets(self):
+ """
+ @summary: Dump all the captured packets into a pcap file
+ """
+ if self.packets:
+ scapyall.wrpcap(self.capture_pcap, self.packets)
+ logging.info("Pcap file dumped to {}".format(self.capture_pcap))
+ else:
+ logging.info("Pcap file is empty")
diff --git a/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py b/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py
new file mode 100644
index 00000000000..db0906e378c
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py
@@ -0,0 +1,162 @@
+'''
+Description: This file contains the IPinIP test for dualtor testbed
+
+Usage: Examples of how to start this script
+ /usr/bin/ptf --test-dir ptftests ip_in_ip_tunnel_test.IpinIPTunnelTest --platform-dir ptftests --qlen=2000 --platform remote -t hash_key_list=['src-port', 'dst-port', 'src-mac', 'dst-mac', 'src-ip'];server_ip='192.168.0.2';active_tor_ip='10.1.0.33';standby_tor_mac='d4:af:f7:4d:af:18';standby_tor_ip='10.1.0.32';active_tor_mac='d4:af:f7:4d:a4:44';ptf_portchannel_indices={u'PortChannel0001': [29], u'PortChannel0003': [33], u'PortChannel0002': [31], u'PortChannel0004': [35]} --relax --debug info --log-file /tmp/ip_in_ip_tunnel_test.2021-02-10-07:14:46.log --socket-recv-size 16384
+
+'''
+#---------------------------------------------------------------------
+# Global imports
+#---------------------------------------------------------------------
+import logging
+import random
+from ipaddress import ip_address
+import ptf
+import ptf.packet as scapy
+from ptf.base_tests import BaseTest
+from ptf.mask import Mask
+from ptf.testutils import *
+
+PACKET_NUM = 10000
+DIFF = 0.25 # The valid range for balance check
+SRC_IP_RANGE = [unicode('8.0.0.0'), unicode('8.255.255.255')]
+TIMEOUT = 1
+
+class IpinIPTunnelTest(BaseTest):
+ '''
+ @summary: Overview of functionality
+ This script send traffic to standby ToR, and capture traffic
+ on all portchannel interfaces to check balance.
+ '''
+ def __init__(self):
+ '''
+ @summary: constructor
+ '''
+ BaseTest.__init__(self)
+ self.test_params = test_params_get()
+
+ def setUp(self):
+ self.server_ip = self.test_params['server_ip']
+ self.active_tor_mac = self.test_params['active_tor_mac']
+ self.standby_tor_mac = self.test_params['standby_tor_mac']
+ self.active_tor_ip = self.test_params['active_tor_ip']
+ self.standby_tor_ip = self.test_params['standby_tor_ip']
+ self.ptf_portchannel_indices = self.test_params['ptf_portchannel_indices']
+ self.indice_to_portchannel = {}
+ for port_channel, indices in self.ptf_portchannel_indices.items():
+ for indice in indices:
+ self.indice_to_portchannel[indice] = port_channel
+
+ self.hash_key_list = self.test_params['hash_key_list']
+ self.dataplane = ptf.dataplane_instance
+
+ def runTest(self):
+ """
+ Entrypoint of test script.
+ """
+ self.send_and_verify_packets()
+
+ def random_ip(self, begin, end):
+ """
+ Generate a random IP from given ip range
+ """
+ length = int(ip_address(end)) - int(ip_address(begin))
+ return str(ip_address(begin) + random.randint(0, length))
+
+ def generate_packet_to_server(self, hash_key):
+ """
+ Generate a packet to server. The value of field in packet is filled with random value according to hash_key
+ """
+ base_src_mac = self.dataplane.get_mac(0, 0)
+ ip_src = self.random_ip(SRC_IP_RANGE[0], SRC_IP_RANGE[1]) if hash_key == 'src-ip' else SRC_IP_RANGE[0]
+ ip_dst = self.server_ip
+ sport = random.randint(1, 65535) if hash_key == 'src-port' else 1234
+ dport = random.randint(1, 65535) if hash_key == 'dst-port' else 80
+ src_mac = (base_src_mac[:-5] + "%02x" % random.randint(0, 255) + ":" + "%02x" % random.randint(0, 255)) if hash_key == 'src-mac' else base_src_mac
+ dst_mac = self.standby_tor_mac
+ vlan_id = random.randint(1, 4094) if hash_key == 'vlan-id' else 0
+ pkt = simple_tcp_packet(pktlen=128 if vlan_id == 0 else 132,
+ eth_dst=dst_mac,
+ eth_src=src_mac,
+ dl_vlan_enable=False if vlan_id == 0 else True,
+ vlan_vid=vlan_id,
+ vlan_pcp=0,
+ ip_src=ip_src,
+ ip_dst=ip_dst,
+ tcp_sport=sport,
+ tcp_dport=dport,
+ ip_ttl=64)
+ return pkt
+
+ def generate_expected_packet(self, inner_pkt):
+ """
+ Generate ip_in_ip packet for verifying.
+ """
+ inner_pkt = inner_pkt.copy()
+ inner_pkt.ttl = inner_pkt.ttl - 1
+ pkt = scapy.Ether(dst=self.active_tor_mac, src=self.standby_tor_mac) / \
+ scapy.IP(src=self.standby_tor_ip, dst=self.active_tor_ip) / inner_pkt['IP']
+ exp_pkt = Mask(pkt)
+ exp_pkt.set_do_not_care_scapy(scapy.Ether, 'dst')
+
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "ihl")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "tos")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "len")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "id")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "flags")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "frag")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "proto")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
+
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "sport")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "seq")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "ack")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "reserved")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "dataofs")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "window")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "chksum")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "urgptr")
+ exp_pkt.set_ignore_extra_bytes()
+
+ return exp_pkt
+
+
+ def check_balance(self, pkt_distribution, hash_key):
+ portchannel_num = len(self.ptf_portchannel_indices)
+ expect_packet_num = PACKET_NUM / portchannel_num
+ pkt_num_lo = expect_packet_num * (1.0 - DIFF)
+ pkt_num_hi = expect_packet_num * (1.0 + DIFF)
+ logging.info("hash key = {}".format(hash_key))
+ logging.info("%-10s \t %10s \t %10s \t" % ("port(s)", "exp_cnt", "act_cnt"))
+ balance = True
+ for portchannel, count in pkt_distribution.items():
+ logging.info("%-10s \t %10s \t %10s \t" % (portchannel, str(expect_packet_num), str(count)))
+ if count < pkt_num_lo or count > pkt_num_hi:
+ balance = False
+ if not balance:
+ print("Check balance failed for {}".format(hash_key))
+ assert(balance)
+
+ def send_and_verify_packets(self):
+ """
+ Send packet from ptf (T1) to standby ToR, and verify
+ """
+ dst_ports = self.indice_to_portchannel.keys()
+ # Select the first ptf indice as src port
+ src_port = dst_ports[0]
+ for hash_key in self.hash_key_list:
+ pkt_distribution = {}
+ for i in range(0, PACKET_NUM):
+ inner_pkt = self.generate_packet_to_server(hash_key)
+ tunnel_pkt = self.generate_expected_packet(inner_pkt)
+ send_packet(self, src_port, inner_pkt)
+ idx, count = verify_packet_any_port(test=self,
+ pkt=tunnel_pkt,
+ ports=dst_ports,
+ device_number=0,
+ timeout=TIMEOUT)
+ pkt_distribution[self.indice_to_portchannel[dst_ports[idx]]] = pkt_distribution.get(self.indice_to_portchannel[dst_ports[idx]], 0) + 1
+ self.check_balance(pkt_distribution, hash_key)
+
+
diff --git a/ansible/roles/test/files/ptftests/pfc_pause_test.py b/ansible/roles/test/files/ptftests/pfc_pause_test.py
index 7fdf5a156d2..ea41dfb22d3 100755
--- a/ansible/roles/test/files/ptftests/pfc_pause_test.py
+++ b/ansible/roles/test/files/ptftests/pfc_pause_test.py
@@ -1,5 +1,8 @@
+import datetime
+import glob
import ipaddress
import logging
+import os
import random
import socket
import sys
@@ -10,6 +13,7 @@
import ptf
import ptf.packet as scapy
import ptf.dataplane as dataplane
+import scapy as sc
from ptf import config
from ptf.base_tests import BaseTest
@@ -31,7 +35,7 @@ def capture_matched_packets(test, exp_packet, port, device_number=0, timeout=1):
"""
if timeout <= 0:
raise Exception("%s() requires positive timeout value." % sys._getframe().f_code.co_name)
-
+
pkts = list()
while True:
result = dp_poll(test, device_number=device_number, port_number=port, timeout=timeout)
@@ -40,9 +44,9 @@ def capture_matched_packets(test, exp_packet, port, device_number=0, timeout=1):
pkts.append(result.packet)
else:
break
-
- return pkts
-
+
+ return pkts
+
class PfcPauseTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
@@ -54,7 +58,7 @@ def setUp(self):
self.mac_src = self.test_params['mac_src']
self.mac_dst = self.test_params['mac_dst']
self.pkt_count = int(self.test_params['pkt_count'])
- self.pkt_intvl = float(self.test_params['pkt_intvl'])
+ self.pkt_intvl = float(self.test_params['pkt_intvl'])
self.port_src = int(self.test_params['port_src'])
self.port_dst = self.test_params['port_dst']
self.ip_src = self.test_params['ip_src']
@@ -65,34 +69,42 @@ def setUp(self):
self.queue_paused = self.test_params['queue_paused']
""" if DUT has MAC information """
self.dut_has_mac = self.test_params['dut_has_mac']
-
+ self.debug = self.test_params.get('debug', False)
+
def runTest(self):
pass_cnt = 0
tos = self.dscp<<2
tos_bg = self.dscp_bg<<2
-
+ if self.debug:
+ # remove previous debug files
+ files = glob.glob("/tmp/pfc_pause_{}*".format(self.dscp))
+ for file in files:
+ os.remove(file)
+ current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
+ log_file = open("/tmp/pfc_pause_{}_{}".format(self.dscp, current_time), "w")
+
""" If DUT needs to learn MAC addresses """
- if not self.dut_has_mac:
+ if not self.dut_has_mac:
pkt = simple_udp_packet(
eth_dst=self.mac_dst,
eth_src=self.mac_src,
ip_src=self.ip_src,
ip_dst=self.ip_dst)
-
+
send_packet(self, self.port_src, pkt, 5)
-
+
pkt = simple_udp_packet(
eth_dst=self.mac_src,
eth_src=self.mac_dst,
ip_src=self.ip_dst,
ip_dst=self.ip_src)
-
+
send_packet(self, self.port_dst, pkt, 5)
-
+
for x in range(self.pkt_count):
sport = random.randint(0, 65535)
dport = random.randint(0, 65535)
-
+
pkt = simple_udp_packet(
eth_dst=self.mac_dst,
eth_src=self.mac_src,
@@ -102,7 +114,7 @@ def runTest(self):
udp_sport=sport,
udp_dport=dport,
ip_ttl=64)
-
+
pkt_bg = simple_udp_packet(
eth_dst=self.mac_dst,
eth_src=self.mac_src,
@@ -112,7 +124,7 @@ def runTest(self):
udp_sport=sport,
udp_dport=dport,
ip_ttl=64)
-
+
exp_pkt = simple_udp_packet(
ip_src=self.ip_src,
ip_dst=self.ip_dst,
@@ -120,30 +132,37 @@ def runTest(self):
udp_sport=sport,
udp_dport=dport,
ip_ttl=63)
-
+
masked_exp_pkt = Mask(exp_pkt)
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src")
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "tos")
-
+
send_packet(self, self.port_src, pkt, 1)
send_packet(self, self.port_src, pkt_bg, 1)
-
+
pkts = capture_matched_packets(self, masked_exp_pkt, self.port_dst)
-
+
+ if self.debug:
+ for i, pkt in enumerate(pkts):
+ dump_msg = "Iteration {}:\n Pkt num {}:\n Hex dump: {}\n\n".format(x, i, sc.utils.hexstr(pkt))
+ log_file.write(dump_msg)
+
time.sleep(self.pkt_intvl)
-
+
""" If the queue is paused, we should only receive the background packet """
if self.queue_paused:
pass_cnt += int(len(pkts) == 1 and scapy.Ether(pkts[0])[scapy.IP].tos == tos_bg)
else:
pass_cnt += int(len(pkts) == 2)
-
+
+ if self.debug:
+ log_file.close()
print "Passes: %d / %d" % (pass_cnt, self.pkt_count)
-
- def tearDown(self):
+
+ def tearDown(self):
reset_filters()
BaseTest.tearDown(self)
diff --git a/ansible/roles/test/files/ptftests/vxlan-decap.py b/ansible/roles/test/files/ptftests/vxlan-decap.py
index 21ff40fcc18..848a1047f62 100644
--- a/ansible/roles/test/files/ptftests/vxlan-decap.py
+++ b/ansible/roles/test/files/ptftests/vxlan-decap.py
@@ -10,9 +10,10 @@
# 1. 'config_file' is a filename of a file which contains all necessary information to run the test. The file is populated by ansible. This parameter is mandatory.
# 2. 'vxlan_enabled' is a boolean parameter. When the parameter is true the test will fail if vxlan test failing. When the parameter is false the test will not fail. By default this parameter is false.
# 3. 'count' is an integer parameter. It defines how many packets are sent for each combination of ingress/egress interfaces. By default the parameter equal to 1
-# 4. 'dut_host' is the ip address of dut.
+# 4. 'dut_hostname' is the name of dut.
# 5. 'sonic_admin_user': User name to login dut
# 6. 'sonic_admin_password': Password for sonic_admin_user to login dut
+# 7. 'sonic_admin_alt_password': Alternate Password for sonic_admin_user to login dut
import sys
import os.path
@@ -170,9 +171,9 @@ def setUp(self):
raise Exception("required parameter 'config_file' is not present")
config = self.test_params['config_file']
- if 'dut_host' not in self.test_params:
- raise Exception("required parameter 'dut_host' is not present")
- self.dut_host = self.test_params['dut_host']
+ if 'dut_hostname' not in self.test_params:
+ raise Exception("required parameter 'dut_hostname' is not present")
+ self.dut_hostname = self.test_params['dut_hostname']
if 'sonic_admin_user' not in self.test_params:
raise Exception("required parameter 'sonic_admin_user' is not present")
@@ -182,6 +183,10 @@ def setUp(self):
raise Exception("required parameter 'sonic_admin_password' is not present")
self.sonic_admin_password = self.test_params['sonic_admin_password']
+ if 'sonic_admin_alt_password' not in self.test_params:
+ raise Exception("required parameter 'sonic_admin_alt_password' is not present")
+ self.sonic_admin_alt_password = self.test_params['sonic_admin_alt_password']
+
if not os.path.isfile(config):
raise Exception("the config file %s doesn't exist" % config)
@@ -252,9 +257,10 @@ def setUp(self):
time.sleep(10)
self.dataplane.flush()
self.dut_connection = DeviceConnection(
- self.dut_host,
+ self.dut_hostname,
self.sonic_admin_user,
- password=self.sonic_admin_password
+ password=self.sonic_admin_password,
+ alt_password=self.sonic_admin_alt_password
)
return
diff --git a/ansible/roles/vm_set/files/mux_simulator.md b/ansible/roles/vm_set/files/mux_simulator.md
index 0eb91c78349..3e851c70770 100644
--- a/ansible/roles/vm_set/files/mux_simulator.md
+++ b/ansible/roles/vm_set/files/mux_simulator.md
@@ -62,6 +62,28 @@ sudo systemctl restart mux-simulator
The mux-simulator service is shared by multiple dualtor test setups using the same test server. Any dualtor test setups using it is recorded in a persistent file on test server `{{ root_path }}/mux_simulator.setups.txt`. During `testbed-cli.sh add-topo`, the vm set name of current setup will be added into it. During `testbed-cli.sh remove-topo`, the vm set name of current setup will be removed from it. When the file is empty, the mux-simulator service will be stopped.
+## How to troubleshoot mux simulator
+By default, the mux-simulator service output its logs to `/tmp/mux_simulator.log`. Default debug level is INFO. If DEBUG level logging is needed for troubleshooting, please follow below steps:
+
+1. Stop the mux-simulator service.
+```
+sudo systemctl stop mux-simulator
+```
+2. Find out path of the mux_simulator.py script from the mux-simulator systemd service file.
+```
+cat /etc/systemd/system/mux-simulator.service
+```
+3. Manually run the mux_simulator.py script with `-v` option to **turn on DEBUG level logging**.
+```
+ sudo /usr/bin/env python /home/azure/veos-vm/mux_simulator.py 8080 -v
+```
+4. Try to call the mux simulator HTTP APIs and check the log file `/tmp/mux_simulator.log` for detailed logging.
+5. After troubleshooting is done, stop the manually started mux_simulator.py script (for example: Ctrl+C).
+6. Start the mux-simulator service again.
+```
+sudo systemctl start mux-simulator
+```
+
## APIs
The APIs using json for data exchange.
diff --git a/ansible/roles/vm_set/files/mux_simulator.py b/ansible/roles/vm_set/files/mux_simulator.py
index 2103b60fb03..a34608f7f18 100644
--- a/ansible/roles/vm_set/files/mux_simulator.py
+++ b/ansible/roles/vm_set/files/mux_simulator.py
@@ -15,9 +15,11 @@
import subprocess
import sys
+from logging.handlers import RotatingFileHandler
from collections import defaultdict
from flask import Flask, request, jsonify
+from flask.logging import default_handler
app = Flask(__name__)
@@ -27,12 +29,6 @@
NIC = 'nic'
-logging.basicConfig(
- filename='/tmp/mux_simulator.log',
- level=logging.INFO,
- format='%(asctime)s %(levelname)s %(message)s')
-
-
def run_cmd(cmdline):
"""Use subprocess to run a command line with shell=True
@@ -402,8 +398,13 @@ def get_mux_bridges(vm_set):
"""
bridge_prefix = 'mbr-{}-'.format(vm_set)
mux_bridges = [intf for intf in os.listdir('/sys/class/net') if intf.startswith(bridge_prefix)]
+ valid_mux_bridges = []
+ for mux_bridge in mux_bridges:
+ out = run_cmd('ovs-vsctl list-ports {}'.format(mux_bridge))
+ if len(out.splitlines()) ==3:
+ valid_mux_bridges.append(mux_bridge)
- return mux_bridges
+ return valid_mux_bridges
def get_all_mux_status(vm_set):
@@ -606,13 +607,29 @@ def mux_cable_flow_update(vm_set, port_index, action):
return jsonify({'err_msg': err_msg}), 500
+def config_logging():
+ rfh = RotatingFileHandler(
+ '/tmp/mux_simulator.log',
+ maxBytes=1024*1024,
+ backupCount=5)
+ fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+ rfh.setFormatter(fmt)
+ rfh.setLevel(logging.INFO)
+ app.logger.addHandler(rfh)
+ app.logger.removeHandler(default_handler)
+
+
if __name__ == '__main__':
usage = '''
Start mux simulator server at specified port.
$ sudo python
'''
+ config_logging()
+
if '-v' in sys.argv:
app.logger.setLevel(logging.DEBUG)
+ for handler in app.logger.handlers:
+ handler.setLevel(logging.DEBUG)
if len(sys.argv) < 2:
app.logger.error(usage)
diff --git a/ansible/testbed.csv b/ansible/testbed.csv
index 3d0c7faf50e..53723f7746d 100644
--- a/ansible/testbed.csv
+++ b/ansible/testbed.csv
@@ -1,13 +1,13 @@
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-ptf1-m,ptf1,ptf32,docker-ptf,ptf-unknown,10.255.0.188/24,,server_1,,str-msn2700-01,Test ptf Mellanox
-ptf2-b,ptf2,ptf64,docker-ptf,ptf-unknown,10.255.0.189/24,,server_1,,lab-s6100-01,Test ptf Broadcom
-vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms
-vms-sn2700-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms
-vms-sn2700-t0,vms1-1,t0,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms
-vms-s6000-t0,vms2-1,t0,docker-ptf,ptf-unknown,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,Tests Dell S6000 vms
-vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf-unknown,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,Tests Arista A7260 vms
-vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf-unknown,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms
-vms-s6100-t1,vms4-1,t1-64,docker-ptf,ptf-unknown,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms
-vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf-unknown,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms
-vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf-unknown,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],Example Multi DUTs testbed
-vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,superman
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+ptf1-m,ptf1,ptf32,docker-ptf,ptf-unknown,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Test ptf Mellanox
+ptf2-b,ptf2,ptf64,docker-ptf,ptf-unknown,10.255.0.189/24,,server_1,,lab-s6100-01,lab,False,Test ptf Broadcom
+vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms
+vms-sn2700-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms
+vms-sn2700-t0,vms1-1,t0,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms
+vms-s6000-t0,vms2-1,t0,docker-ptf,ptf-unknown,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,lab,True,Tests Dell S6000 vms
+vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf-unknown,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,lab,True,Tests Arista A7260 vms
+vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf-unknown,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms
+vms-s6100-t1,vms4-1,t1-64,docker-ptf,ptf-unknown,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms
+vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf-unknown,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,lab,True,ests Dell S6100 vms
+vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf-unknown,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],lab,True,Example Multi DUTs testbed
+vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,lab,True,superman
diff --git a/ansible/vars/topo_dualtor-56.yml b/ansible/vars/topo_dualtor-56.yml
index a8cfcb5c0d4..ab474ebb613 100644
--- a/ansible/vars/topo_dualtor-56.yml
+++ b/ansible/vars/topo_dualtor-56.yml
@@ -54,7 +54,7 @@ topology:
- 0.3@3,1.3@3
- 0.5@5,1.5@5
- 0.7@7,1.7@7
- - 0.9@9,1.5@9
+ - 0.9@9,1.9@9
- 0.11@11,1.11@11
- 0.17@17,1.17@17
- 0.19@19,1.19@19
diff --git a/ansible/veos_vtb b/ansible/veos_vtb
index 4ab6b40ffce..48be6f91739 100644
--- a/ansible/veos_vtb
+++ b/ansible/veos_vtb
@@ -91,6 +91,8 @@ all:
type: kvm
hwsku: Force10-S6000
serial_port: 9002
+ ansible_password: password
+ ansible_user: admin
vlab-05:
ansible_host: 10.250.0.110
ansible_hostv6: fec0::ffff:afa:a
diff --git a/ansible/vtestbed.csv b/ansible/vtestbed.csv
index 93a9f97c9b8..46c75c55a90 100644
--- a/ansible/vtestbed.csv
+++ b/ansible/vtestbed.csv
@@ -1,7 +1,7 @@
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-01],Tests virtual switch vm
-vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-02],Tests virtual switch vm
-vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-02,10.250.0.106/24,fec0::ffff:afa:6/64,server_1,VM0104,[vlab-03],Tests virtual switch vm
-vms-kvm-t0-2,vms6-3,t0,docker-ptf,ptf-03,10.250.0.108/24,fec0::ffff:afa:8/64,server_1,VM0104,[vlab-04],Tests virtual switch vm
-vms-kvm-dual-t0,vms6-4,dualtor,docker-ptf,ptf-04,10.250.0.109/24,fec0::ffff:afa:9/64,server_1,VM0108,[vlab-05;vlab-06],Dual-TOR testbed
-vms-kvm-multi-asic-t1-lag,vms6-4,t1-64-lag,docker-ptf,ptf-05,10.250.0.110/24,fec0::ffff:afa:a/64,server_1,VM0104,[vlab-07],Tests multi-asic virtual switch vm
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-01],lab,False,Tests virtual switch vm
+vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-02],lab,False,Tests virtual switch vm
+vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-02,10.250.0.106/24,fec0::ffff:afa:6/64,server_1,VM0104,[vlab-03],lab,False,Tests virtual switch vm
+vms-kvm-t0-2,vms6-3,t0,docker-ptf,ptf-03,10.250.0.108/24,fec0::ffff:afa:8/64,server_1,VM0104,[vlab-04],lab,False,Tests virtual switch vm
+vms-kvm-dual-t0,vms6-4,dualtor,docker-ptf,ptf-04,10.250.0.109/24,fec0::ffff:afa:9/64,server_1,VM0108,[vlab-05;vlab-06],lab,False,Dual-TOR testbed
+vms-kvm-multi-asic-t1-lag,vms6-4,t1-64-lag,docker-ptf,ptf-05,10.250.0.110/24,fec0::ffff:afa:a/64,server_1,VM0104,[vlab-07],lab,False,Tests multi-asic virtual switch vm
diff --git a/docs/testbed/README.new.testbed.Configuration.md b/docs/testbed/README.new.testbed.Configuration.md
index eb6b0571f91..49e7c0681ae 100644
--- a/docs/testbed/README.new.testbed.Configuration.md
+++ b/docs/testbed/README.new.testbed.Configuration.md
@@ -49,12 +49,12 @@ The devices section is a dictionary that contains all devices and hosts. This se
For each device that you add, add the following:
-| Hostname | ansible_host | ansible_ssh_user | ansible_ssh_pass | HwSKU | device_type |
-| ------ | ------ | ------ | ------ | ------ | ------ |
-| str-msn2700-01 | [IP Address] | [username] | [password] | DevSonic | DevSonic |
-| str-7260-10 | [IP Address] | [username] | [password] |Arista-7260QX-64 | FanoutRoot |
-| str-7260-10 | [IP Address] | [username] | [password] |Arista-7260QX-64 | FanoutLeaf |
-| str-acs-serv-01 | [IP Address] | [username] | [password] | TestServ | Server |
+| Hostname | ansible_host | ansible_ssh_user | ansible_ssh_pass | HwSKU | device_type |
+| --------------- | ------------ | ---------------- | ---------------- | ---------------- | ----------- |
+| str-msn2700-01 | [IP Address] | [username] | [password] | DevSonic | DevSonic |
+| str-7260-10 | [IP Address] | [username] | [password] | Arista-7260QX-64 | FanoutRoot |
+| str-7260-10 | [IP Address] | [username] | [password] | Arista-7260QX-64 | FanoutLeaf |
+| str-acs-serv-01 | [IP Address] | [username] | [password] | TestServ | Server |
- hostname - names the devices you will use
- ansible_host - this is the managementIP where you can connect to to the device
@@ -111,10 +111,10 @@ Define:
This is where the topology configuration file for the testbed will collect information from when running TestbedProcessing.py.
-| #conf-name | group-name | topo | ptf_image_name | ptf_ip | server | vm_base | dut | comment |
-| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
-| [ptf32 conf-name] | [ptf32 group-name] | [ptf32] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [comment] |
-| [t0 conf-name] | [t0 group-name] | [t0] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [comment] |
+| #conf-name | group-name | topo | ptf_image_name | ptf_ip | server | vm_base | dut | inv_name | auto_recover | comment |
+| ----------------- | ------------------ | ------- | -------------- | ------------ | -------------- | --------- | ----- | ---------- | -------------- | --------- |
+| [ptf32 conf-name] | [ptf32 group-name] | [ptf32] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [inv_name] | [auto_recover] | [comment] |
+| [t0 conf-name] | [t0 group-name] | [t0] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [inv_name] | [auto_recover] | [comment] |
For each topology you use in your testbed environment, define the following:
@@ -129,6 +129,8 @@ For each topology you use in your testbed environment, define the following:
- server - server where the testbed resides. Choose a veos_group to use that contains both the lab server and virtual machines
- vm_base - enter in the lowest ID value for the VMs you will be using to run the test cases. The lowest VM ID value can be found under the veos section of the testbed configuration file. IF empty, no VMs are used
- dut - enter in the target DUT that is used in the testbed environment
+- inv_name - inventory file name that contains the definition of the target DUTs
+- auto_recover - (`yes`|`True`|`true`) to recover this testbed when runnings serve recovery script, (`no`|`False`|`false`) otherwise
- comment - make a little note here
- ansible
- ansible_host - IP address with port number
diff --git a/docs/testbed/README.testbed.Cli.md b/docs/testbed/README.testbed.Cli.md
index 22d65128c80..e9d61dd27e7 100644
--- a/docs/testbed/README.testbed.Cli.md
+++ b/docs/testbed/README.testbed.Cli.md
@@ -12,9 +12,9 @@
## Add/Remove topo
```
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-vms1-1-t1,vms1-1,t1,docker-ptf,ptf-1,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,t1 tests
-vms1-1-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-2,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,t1-lag tests
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+vms1-1-t1,vms1-1,t1,docker-ptf,ptf-1,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,lab,True,t1 tests
+vms1-1-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-2,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,lab,False,t1-lag tests
```
Goal is to use one VM with different topologies
diff --git a/docs/testbed/README.testbed.Config.md b/docs/testbed/README.testbed.Config.md
index 19d32950464..316c470f851 100644
--- a/docs/testbed/README.testbed.Config.md
+++ b/docs/testbed/README.testbed.Config.md
@@ -25,10 +25,10 @@
### ```testbed.csv``` format
```
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,Tests ptf
-vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
-vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Tests ptf
+vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
+vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
```
@@ -40,6 +40,8 @@ vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-m
- server – server where the testbed resides
- vm_base – first VM for the testbed. If empty, no VMs are used
- dut – target dut name
+- inv_name - inventory file name that contains the definition of the target DUTs
+- auto_recover - (`yes`|`True`|`true`) to recover this testbed when runnings serve recovery script, (`no`|`False`|`false`) otherwise
- comment – any text here
### ```testbed.csv``` consistency rules
diff --git a/docs/testplan/BGP Convergence Testplan for single DUT.md b/docs/testplan/BGP Convergence Testplan for single DUT.md
new file mode 100644
index 00000000000..cc654192573
--- /dev/null
+++ b/docs/testplan/BGP Convergence Testplan for single DUT.md
@@ -0,0 +1,124 @@
+# BGP convergence test plan for benchmark performance
+
+- [BGP convergence test plan for benchmark performance](#bgp-convergence-test-plan-for-benchmark-performance)
+ - [Overview](#Overview)
+ - [Scope](#Scope)
+ - [Testbed](#Keysight-Testbed)
+ - [Topology](#Topology)
+ - [SONiC switch as ToR](#SONiC-switch-as-ToR)
+ - [SONiC switch as Leaf](#SONiC-switch-as-Leaf)
+ - [Setup configuration](#Setup-configuration)
+ - [Test methodology](#Test-methodology)
+ - [Test cases](#Test-cases)
+ - [Test case # 1 – Convergence performance when remote link fails (route withdraw)](#test-case--1--convergence-performance-when-remote-link-fails-route-withdraw)
+ - [Test objective](#Test-objective)
+ - [Test steps](#Test-steps)
+ - [Test results](#Test-results)
+ - [Test case # 2 – RIB-IN Convergence](#Test-case--2--RIB-IN-Convergence)
+ - [Test objective](#Test-objective-1)
+ - [Test steps](#Test-steps-1)
+ - [Test results](#Test-results-1)
+ - [Call for action](#Call-for-action)
+
+## Overview
+The purpose of these tests is to test the overall convergence of a data center network by simulating multiple network devices such as ToR/Leafs and using SONiC switch DUT as one of the ToR/Leaf, closely resembling production environment.
+
+### Scope
+These tests are targeted on fully functioning SONiC system. The purpose of these tests are to measure convergence when some unexpected failures such as remote link failure, local link failure, node failure or link faults etc occur and some expected failures such as maintenance or upgrade of devices occur in the SONiC system.
+
+### Keysight Testbed
+The tests will run on following testbeds:
+* t0
+
+![Single DUT Topology ](Img/Single_DUT_Topology.png)
+
+## Topology
+### SONiC switch as ToR
+
+![SONiC DUT as ToR ](Img/Switch_as_ToR.png)
+
+### SONiC switch as Leaf
+
+![SONiC DUT as ToR ](Img/Switch_acting_as_leaf.png)
+
+## Setup configuration
+IPv4 EBGP neighborship will be configured between SONiC DUT and directly connected test ports. Test ports inturn will simulate the ToR's and Leafs by advertising IPv4/IPv6, dual-stack routes.
+
+## Test Methodology
+Following test methodologies will be used for measuring convergence.
+* Traffic generator will be used to configure ebgp peering between chassis ports and SONiC DUT by advertising IPv4/IPv6, dual-stack routes.
+* Receiving ports will be advertising the same VIP(virtual IP) addresses.
+* Data traffic will be sent from server to these VIP addresses.
+* Depending on the test case, the faults will be generated. Local link failures can be simulated on the port by "simulating link down" event.
+* Remote link failures can be simulated by withdrawing the routes.
+* Control to data plane convergence will be measured by noting down the precise time of the control plane event and the data plane event. Convergence will be measured by taking the difference between contol and data plane events. Traffic generator will create those events and provide us with the control to data plane convergence value under statistics.
+* RIB-IN Convergence is the time it takes to install the routes in its RIB and then in its FIB to forward the traffic without any loss. In order to measure RIB-IN convergence, initially IPv4/IPv6 routes will not be advertised. Once traffic is sent, IPv4/IPv6 routes will be advertised and the timestamp will be noted. Once the traffic received rate goes above the configured threshold value, it will note down the data plane above threshold timestamp. The difference between these two event timestamps will provide us with the RIB-IN convergence value.
+* Route capacity can be measured by advertising routes in a linear search fashion. By doing this we can figure out the maximum routes a switch can learn and install in its RIB and then in its FIB to forward traffic without any loss.
+
+## Test cases
+### Test case # 1 – Convergence performance when remote link fails (route withdraw)
+#### Test objective
+Measure the convergence time when remote link failure event happens with in the network.
+
+
+
+
+
+
+
+#### Test steps
+* Configure IPv4 EBGP sessions between Keysight ports and the SONiC switch.
+* Advertise IPv4 routes along with AS number via configured IPv4 BGP sessions.
+* Configure and advertise same IPv4 routes from both the test ports.
+* Configure another IPv4 session to send the traffic. This is the server port from which traffic will be sent to the VIP addresses.
+* Start all protocols and verify that IPv4 BGP neighborship is established.
+* Create a data traffic between the server port and receiver ports where the same VIP addresses are configured and enable tracking by "Destination Endpoint" and by "Destination session description".
+* Set the desired threshold value for receiving traffic. By default we will be set to 90% of expected receiving rate.
+* Apply and start the data traffic.
+* Verify that traffic is equally distributed between the receiving ports without any loss.
+* Simulate remote link failure by withdrawing the routes from one receiving port.
+* Verify that the traffic is re-balanced and use the other available path to route the traffic.
+* Drill down by "Destination Endpoint" under traffic statistics to get the control plane to data plane convergence value.
+* In general the convergence value will fall in certain range. In order to achieve proper results, run the test multiple times and average out the test results.
+* Set it back to default configuration.
+#### Test results
+![Single remote link failure](Img/Single_Remote_Link_Failure.png)
+
+For above test case, below are the test results when multiple remote link fails.
+
+![Multiple link failure](Img/Multi_link_failure.png)
+
+![Multiple remote link failure](Img/Multiple_Remote_Link_Failure.png)
+
+### Test case # 2 – RIB-IN Convergence
+#### Test objective
+Measure the convergence time to install the routes in its RIB and then in its FIB to forward the packets after the routes are advertised.
+
+
+
+
+
+
+#### Test steps
+* Configure IPv4 EBGP sessions between Keysight ports and the SONiC switch.
+* Configure IPv4 routes via configured IPv4 BGP sessions. Initially disable the routes so that they don't get advertised after starting the protocols.
+* Configure the same IPv4 routes from both the test receiving ports.
+* Configure another IPv4 session to send the traffic. This is the server port from which traffic will be sent to the VIP addresses.
+* Start all protocols and verify that IPv4 BGP neighborship is established.
+* Create a data traffic between the server port and receiver ports where the same VIP addresses are configured and enable tracking by "Destination Endpoint" and by "Destination session description".
+* Set the desired threshold value for receiving traffic. By default we will be set to 90% of expected receiving rate.
+* Apply and start the data traffic.
+* Verify that no traffic is being forwarded.
+* Enable/advertise the routes which are already configured.
+* Control plane event timestamp will be noted down and once the receiving traffic rate goes above the configured threshold value, it will note down the data plane threshold timestamp.
+* The difference between these two event timestamp will provide us with the RIB-IN convergence time.
+* In general the convergence value will fall in certain range. In order to achieve proper results, run the test multiple times and average out the test results.
+* Set it back to default configuration.
+#### Test results
+![RIB-IN Convergence](Img/RIB-IN_convergence_test.png)
+
+In order to measure RIB-IN capacity of the switch, we can follow the same test methodology as RIB-IN convergence test. Below are the results for RIB-IN capacity test.
+
+![RIB-IN Capacity Test](Img/RIB-IN_Capacity_Test.png)
+### Call for action
+* Solicit experience in multi-DUT system test scenarios.
diff --git a/docs/testplan/Distributed-VoQ-Arch-test-plan.md b/docs/testplan/Distributed-VoQ-Arch-test-plan.md
new file mode 100644
index 00000000000..a9480b0edba
--- /dev/null
+++ b/docs/testplan/Distributed-VoQ-Arch-test-plan.md
@@ -0,0 +1,800 @@
+# **Distributed VoQ Architecture Test Plan**
+
+ - [Introduction](#intro)
+ - [References](#reference)
+ - [Debuggability](#debug)
+ - [Test Setup](#test-setup)
+ - [Test Cases](#test-cases)
+ - [System Initialization](#sys_init)
+ - [Neighbors](#arp)
+ - [Router Interfaces](#ri)
+ - [Host IP Connectivity](#ipfwd)
+ - [Inband VLAN](#inbandvlan)
+
+
+# Introduction
+
+This is the test plan for SONIC Distributed VOQ support, as described in the [Distributed VOQ HLD](https://github.com/Azure/SONiC/blob/master/doc/voq/voq_hld.md).
+
+The associated PRs covered in this test plan are:
+
+1. [Distributed VOQ PR 380](https://github.com/Azure/sonic-swss-common/pull/380)
+2. [Distributed VOQ PR 657](https://github.com/Azure/sonic-sairedis/pull/657)
+3. [Distributed VOQ PR 1431](https://github.com/Azure/sonic-swss/pull/1431)
+
+Redis CLI commands will be used for some validation until SONIC CLI commands are available for system port information.
+
+## Scope
+
+The functionalty covered in this test plan is:
+* system ports,
+* router interfaces, when configured on multiple cards, and
+* neighbors, when learned on local and remote ports.
+
+Other HLDs in the [Chassis Subgroup feature list](https://github.com/Azure/SONiC/wiki/SONiC-Chassis-Subgroup) will be covered in other test plans.
+
+## Debuggability
+The following are useful commands for validating the testcases that follow.
+
+1. Keys from redis in container when no redis-dump exists:
+
+`docker exec database1 redis-cli -h -n 6 KEYS "*"`
+
+2. Values from redis in container when no redis-dump exists:
+
+`docker exec database1 redis-cli -h -n 6 hgetall "SYSTEM_NEIGH_TABLE|Inband4|3.3.3.5"`
+
+3. Chassis App Database on Supervisor card:
+
+`redis-dump -h -p 6380 -d 12 -y -k "*SYSTEM_INT*"`
+
+
+# Test Setup
+
+These test cases will be run in the proposed [T2 topology](https://github.com/Azure/sonic-mgmt/pull/2638/). It is assumed that such a configuration is deployed on the chassis.
+
+# Test Cases
+
+## System Initialization
+
+#### Test Case 1. System Bringup
+
+##### Test Objective
+Verify VoQ system initializes correctly on startup.
+
+##### Test Steps
+* Configure a VoQ system with valid configuration files and verify the system comes up.
+* Verify supervisor card is up, and all required containers and processes are running.
+* Verify redis on supervisor is running and Chassis AppDB is reachable.
+* Verify line cards are up and reachable from supervisor.
+
+#### Test Case 2. Switch Creation
+##### Test Objective
+Verify ASIC Switch object is correct on all line cards.
+
+##### Test Steps
+* Verify ASIC_DB gets switch object created on all asics and linecards (redis-dump -h -d 1 on each linecard)
+* Verify switch ID, cores, port list in ASIC DB have the same values as the config_db.json file.
+* Verify switch type is voq.
+
+##### Sample output
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000": {
+ "expireat": 1550863898.649604,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "NULL": "NULL",
+ "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_SEED": "0",
+ "SAI_SWITCH_ATTR_FDB_AGING_TIME": "600",
+ "SAI_SWITCH_ATTR_FDB_EVENT_NOTIFY": "0x55df0bc54540",
+ "SAI_SWITCH_ATTR_INIT_SWITCH": "true",
+ "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_SEED": "0",
+ "SAI_SWITCH_ATTR_MAX_SYSTEM_CORES": "48",
+ "SAI_SWITCH_ATTR_PORT_STATE_CHANGE_NOTIFY": "0x55df0bc54550",
+ "SAI_SWITCH_ATTR_SRC_MAC_ADDRESS": "14:7B:AC:3A:C9:7F",
+ "SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO": "8:48,52,58,48,48,46,48,0",
+ "SAI_SWITCH_ATTR_SWITCH_ID": "36",
+ "SAI_SWITCH_ATTR_SWITCH_SHUTDOWN_REQUEST_NOTIFY": "0x55df0bc54560",
+ "SAI_SWITCH_ATTR_SYSTEM_PORT_CONFIG_LIST": "{\"count\":304,\"list\":[{\"attached_core_index\":\"0\", etc
+```
+
+#### Test Case 3. System port creation.
+##### Test Objective
+Verify system ports are created on all line cards.
+
+##### Test Steps
+* Verify ASIC_DB get all system ports referenced in config_db.json created on all hosts and ASICs.
+* Verify object creation and values of port attributes.
+
+##### Sample output
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_SYSTEM_PORT:oid:0x5d0000000000e4": {
+ "expireat": 1550863898.617927,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "NULL": "NULL",
+ "SAI_SYSTEM_PORT_ATTR_CONFIG_INFO": "{\"attached_core_index\":\"0\",\"attached_core_port_index\":\"20\",\"attached_switch_id\":\"18\",\"num_voq\":\"8\",\"port_id\":\"596\",\"speed\":\"400000\"}"
+ }
+ },
+```
+
+
+#### Test Case 4. Local Ports
+##### Test Objective
+Verify local ports are created on all line cards.
+
+##### Test Steps
+* Verify ASIC_DB has host interface information for all local ports on all cards and ASICs.
+* Verify host interfaces exist on host CLI (ifconfig).
+* Verify interfaces exist in show interfaces on the linecard.
+
+##### Sample output
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000126b": {
+ "expireat": 1550863898.591804,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_HOSTIF_ATTR_NAME": "Ethernet0",
+ "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000002",
+ "SAI_HOSTIF_ATTR_OPER_STATUS": "false",
+ "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV"
+ }
+ },
+```
+```
+admin@dut1-imm2:~$ sudo ifconfig Ethernet0
+Ethernet0: flags=4098 mtu 9100
+ ether 14:7b:ac:3a:c9:7f txqueuelen 1000 (Ethernet)
+ RX packets 0 bytes 0 (0.0 B)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 0 bytes 0 (0.0 B)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+```
+
+#### Test Case 5. Router Interface Creation
+##### Test Objective
+Verify router interfaces are created on all line cards and present in Chassis App Db.
+
+##### Test Steps
+* Verify router interface creation on local ports in ASIC DB.
+* PORT_ID should match system port table and traced back to config_db.json, mac and MTU should match as well.
+* Verify SYSTEM_INTERFACE table in Chassis AppDb (redis-dump -h -p 6380 -d 12 on supervisor).
+* Verify creation interfaces with different MTUs in config_db.json.
+* Verify creation of different subnet masks in config_db.json.
+* Repeat with IPv4, IPv6, dual-stack.
+
+##### Sample output
+ASIC:
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x60000000012b3": {
+ "expireat": 1550863898.6557322,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
+ "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": "oid:0x5d00000000015a",
+ "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": "14:7B:AC:3A:C9:7F",
+ "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_PORT",
+ "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000027"
+ }
+ },
+```
+
+Chassis AppDB:
+
+```
+ "SYSTEM_INTERFACE|Slot7|Asic0|Ethernet24": {
+ "expireat": 1605628181.7629092,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "rif_id": "oid:0x19000600001499"
+ }
+ },
+```
+
+#### Test Case 6. Inband Configuration Type
+
+##### Test Objective
+Verify inband ports, neighbors, and routes are setup as in device configuration.
+
+##### Test Steps
+
+* Configure system in inband port mode.
+```
+"VOQ_INBAND_INTERFACE": {
+ "Inband3": {
+ "inband_type": "port"
+ },
+ "Inband3|133.133.133.4/32": {}
+},
+```
+* On each linecard, verify inband ports are present in ASICDB.
+* On each linecard, verify inband router interfaces are present in ASICDB
+* On supervisor card, verify inband router interfaces are present in Chassis App DB
+* On each linecard, verify permanent neighbors for all inband ports.
+* On each linecard, verify kernel routes for all inband ports.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 7. Local Neighbors
+
+##### Test Objective
+Verify neighbor entries are created on linecards for locally adjacent VMS.
+
+##### Test Steps
+* ARP/NDP should be resolved when BGP to adjacent VMs is established.
+* On local linecard, verify ASIC DB entries.
+ * MAC address matches MAC of neighbor VM.
+ * Router interface OID matches back to the correct interface and port the neighbor was learned on.
+* On local linecard, verify show arp/ndp, ip neigh commands.
+ * MAC address matches MAC of neighbor VM.
+* On local linecard. verify neighbor table in appDB.
+ * MAC address matches MAC of neighbor VM.
+* On supervisor card, verify SYSTEM_NEIGH table in Chassis AppDB (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify encap index and MAC address match between ASICDB the Chassis AppDB
+* Repeat with IPv4, IPv6, dual-stack.
+
+##### Sample output
+* Asic:
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"102.0.0.1\",\"rif\":\"oid:0x6000000001290\",\"switch_id\":\"oid:0x21000000000000\"}": {
+ "expireat": 1550863898.638045,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS": "6E:3A:88:CF:C6:2A",
+ "SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX": "1074790407"
+ }
+ },
+
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP:oid:0x40000000012c2": {
+ "expireat": 1550863898.637784,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEXT_HOP_ATTR_IP": "102.0.0.1",
+ "SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID": "oid:0x6000000001290",
+ "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_IP"
+ }
+ },
+```
+* AppDb:
+```
+ "NEIGH_TABLE:Ethernet8:102.0.0.1": {
+ "expireat": 1550863889.965874,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "family": "IPv4",
+ "neigh": "6e:3a:88:cf:c6:2a"
+ }
+ },
+```
+* Chassis AppDb:
+```
+ "SYSTEM_NEIGH|Slot7|Asic0|Ethernet8:102.0.0.1": {
+ "expireat": 1605628181.762964,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "encap_index": "1074790407",
+ "neigh": "6e:3a:88:cf:c6:2a"
+ }
+ },
+```
+
+#### Test Case 8. Remote Neighbors
+
+##### Test Objective
+Verify when local neighbors are established on a linecard, other linecards in the VoQ system will be programmed with neighbor entries.
+
+##### Test Steps
+* When local neighbors are established as in the Local Neighbor testcase, corresponding entries will be established on all other line cards. On each remote card, verify:
+* Verify ASIC DB entries on remote linecards.
+ * Verify impose index=True in ASIC DB.
+ * Verify MAC address in ASIC DB is the remote neighbor mac.
+ * Verify encap index for ASIC DB entry matches Chassis App DB.
+ * Verify router interface OID matches the interface the neighbor was learned on.
+* Verify on linecard CLI, show arp/ndp, ip neigh commands.
+ * For inband port, MAC should be inband port mac in kernel table and LC appDb.
+ * For inband vlan mode, MAC will be remote ASIC mac in kernel table and LC appdb.
+* Verify neighbor table in linecard appdb.
+* Verify static route is installed in kernel routing table with /32 (or /128 for IPv6) for neighbor entry.
+* Repeat with IPv4, IPv6, dual-stack.
+
+##### Sample Output
+* Asic DB
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"101.0.0.1\",\"rif\":\"oid:0x60000000012b3\",\"switch_id\":\"oid:0x21000000000000\"}": {
+ "expireat": 1550863898.651915,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS": "4E:49:E4:62:ED:88",
+ "SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_IMPOSE_INDEX": "true",
+ "SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX": "1074790407",
+ "SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL": "false"
+ }
+ },
+
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP:oid:0x40000000012c0": {
+ "expireat": 1550863898.6276,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEXT_HOP_ATTR_IP": "101.0.0.1",
+ "SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID": "oid:0x60000000012b3",
+ "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_IP"
+ }
+ },
+```
+* App DB
+```
+ "NEIGH_TABLE:Inband18:101.0.0.1": {
+ "expireat": 1550863889.96545,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "family": "IPv4",
+ "neigh": "14:7b:ac:3a:c9:7f"
+ }
+ },
+```
+* Chassis App DB
+```
+"SYSTEM_NEIGH|Slot8|Asic0|Ethernet23:101.0.0.1": {
+"expireat": 1605628181.7629762,
+"ttl": -0.001,
+"type": "hash",
+"value": {
+ "encap_index": "1074790407",
+ "neigh": "4e:49:e4:62:ed:88"
+}
+},
+```
+
+* Host
+```
+show ip route
+K>* 101.0.0.1/32 [0/0] is directly connected, Inband18, 20:55:26
+
+netstat -rn
+101.0.0.1 0.0.0.0 255.255.255.255 UH 0 0 0 Inband18
+```
+
+
+
+## Neighbor Lifecycle
+
+### Preconditions
+
+In order to verify neighbor behaviors, BGP sessions on the DUT and attached VMs will be temporarily shutdown. This
+will allow the tests to validate the various table deletes before the entries are recreated.
+
+
+### Test cases
+
+#### Test Case 1. Neighbor established from a remote card.
+
+##### Test Objective
+Verify local neighbor behavior is correct when ARP/NDP request is triggered by a packet from a remote line card.
+
+##### Test Steps
+* Send ping to from linecard 1 to a VM attached to linecard 2. This will establish a local neighbor on linecard 2 and a remote neighbor on linecard 1.
+
+* On linecard 2:
+ * Verify ASIC DB entries on local linecard.
+ * MAC address matches MAC of neighbor VM.
+ * Router interface OID matches back to the correct interface and port the neighbor was learned on.
+ * Verify on local linecard CLI, show arp/ndp, ip neigh commands.
+ * MAC address matches MAC of neighbor VM.
+ * Verify neighbor table in linecard appDB.
+ * MAC address matches MAC of neighbor VM.
+
+* On supervisor card:
+ * Verify SYSTEM_NEIGH table in Chassis AppDB (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify encap index and MAC address match between ASICDB the Chassis AppDB
+
+* On linecard 1:
+ * Verify ASIC DB entries as a remote neighbor.
+ * Verify impose index=True in ASIC DB.
+ * Verify MAC address in ASIC DB is the remote neighbor mac.
+ * Verify encap index for ASIC DB entry matches Chassis App DB.
+ * Verify router interface OID matches the interface the neighbor was learned on.
+ * Verify on linecard CLI, show arp/ndp, ip neigh commands.
+ * For inband port, MAC should be inband port mac in kernel table and LC appDb.
+ * For inband vlan mode, MAC will be remote ASIC mac in kernel table and LC appdb.
+ * Verify neighbor table in linecard appdb.
+ * Verify static route in kernel with /32 for entry.
+
+#### Test Case 2. Clear ARP, single address.
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly deleted when a single neighbor adjacency is cleared.
+##### Test Steps
+* On local linecard:
+ * Clear single address with command: `ip neigh flush to "addr"`.
+ * Verify ARP/NDP entry removed from CLI.
+ * Verify table entries in ASIC, AppDb are removed for only the cleared address.
+* On Supervisor card:
+ * Verify Chassis App DB entry are removed for only the cleared address.
+* On remote linecards:
+ * Verify table entries in ASICDB, APPDB, and host ARP table are removed.
+ * Verify kernel route for cleared address is deleted.
+* Restart traffic, verify relearn.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 3. Clear ARP table via sonic command.
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly deleted when the entire neighbor table is cleared.
+##### Test Steps
+* On local linecard:
+ * Issue `sonic-clear arp` command. and verify all addresses are removed and kernel routes are deleted on all hosts and ASICs.
+ * Verify ARP/NDP entries are removed from CLI.
+ * Verify table entries in ASIC, AppDb are removed for all cleared addresses.
+* On Supervisor card:
+ * Verify Chassis App DB entry are removed for only the cleared address. Entries for addresses on other line cards
+ should still be present.
+* On remote linecards:
+ * Verify table entries in ASICDB, APPDB, and host ARP table are removed for cleared addresses.
+ * Verify kernel routes for cleared address are deleted.
+* Send full mesh traffic and verify relearn and DB.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 4. Front panel port link flap
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly deleted when the front panel port flaps.
+##### Test Steps
+* Admin down interface on fanout to cause LOS on DUT.
+* On local linecard:
+ * Verify ARP/NDP entries are removed from CLI for neighbors on down port.
+ * Verify table entries in ASIC, AppDb are removed for addresses on down port.
+* On Supervisor card:
+ * Verify Chassis App DB entry are removed for only the cleared address. Entries for addresses on other line cards
+ should still be present.
+* On remote linecards:
+ * Verify table entries in ASICDB, APPDB, and host ARP table are removed for cleared addresses.
+ * Verify kernel routes for cleared address are deleted.
+* Admin interface up, verify recreation after restarting traffic.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 5. Gratuitous ARP - Previously Known IP
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly updated when receiving a unsolicited ARP packet for a previously known IP address..
+##### Test Steps
+* Clear ARP table on a line card.
+* Send unsolicited ARP packet into linecard for an IP that was known on that card and is now stale.
+* On local linecard:
+ * Verify table entries in local ASIC, APP, host arp table are recreated.
+* On supervisor card:
+ * Verify Chassis App DB entry is correct for the relearned address.
+* On remote linecards:
+ * Verify table entries in remote hosts/ASICs in ASICDB, APPDB, and host ARP table are recreated.
+ * Verify kernel routes in remote hosts are still present.
+* Verify that packets can be sent from local and remote linecards to learned address.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 6. Gratuitous ARP - Known IP - Mac change
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly updated when a unsolicited ARP packet changes the MAC address of learned neighbor.
+##### Test Steps
+* Send unsolicited ARP packet into DUT for an IP known by DUT with a different MAC address for the neighbor.
+* Change the MAC address of the neighbor VM.
+* On local linecard:
+ * Verify table entries in local ASIC, APP, and host ARP table are updated with new MAC.
+* On supervisor card:
+ * Verify Chassis App DB entry is correct for with the updated MAC address.
+* On remote linecards:
+ * Verify table entries in remote hosts/ASICs in APPDB, and host ARP table are still present with inband MAC address
+ * Verify ASIC DB is updated with new MAC.
+ * Verify kernel route in remote hosts are still present to inband port.
+* Verify that packets can be sent from local and remote linecards to learned address.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 7. ARP Request/Reply - Mac change
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly updated when the MAC address of a neighbor changes and is updated via request/reply exchange.
+##### Test Steps
+* Change the MAC address on a remote host that is already present in the ARP table.
+* Without clearing the entry in the DUT, allow the existing entry to time out and the new reply to have the new MAC address.
+* On local linecard:
+ * Verify table entries in local ASIC, APP, and host ARP table are updated with new MAC.
+* On supervisor card:
+ * Verify Chassis App DB entry is correct for with the updated MAC address.
+* On remote linecards:
+ * Verify table entries in remote hosts/ASICs in APPDB, and host ARP table are still present with inband MAC address
+ * Verify ASIC DB is updated with new MAC.
+ * Verify kernel route in remote hosts are still present to inband port.
+* Verify that packets can be sent from local and remote linecards to the learned address.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 8. Disruptive Events
+##### Test Objective
+Verify port, router interface, and neighbor recovery after disruptive events.
+##### Test Steps
+* After the following events:
+ * chassis power cycle,
+ * supervisor reboot,
+* Verify, as in the previous test cases:
+ * Local neighbor learning,
+ * remote neighbor learning and route creation
+ * timeout and clear of neighbors
+
+
+## Router Interface Lifecycle
+
+#### Test Case 1. IP Interface Creation
+##### Test Objective
+Verify Chassis App DB is updated with new interface entry when a new IP Interface is added.
+##### Test Steps
+* Add IP to a previously unconfigured port by adding minigraph configuration to that linecard.
+* Reload the new minigraph and line card.
+* On the line card:
+ * Verify address state in CLI.
+ * Verify interface in ASIC DB
+* On the supervisor card:
+ * Verify the interface is present in the SYSTEM_INTERFACE table of the Chassis App DB.
+ * Verify the OID is unique, and matches the router interface ID in the ASIC DB.
+ * Verify the slot and port are correct.
+* Verify bidirectional traffic to an attached host on the newly created port from local and remote linecards.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 2. Interface Deletion
+##### Test Objective
+Verify Chassis App DB is updated with new interface entry when an IP interface is removed from a port.
+##### Test Steps
+* Remove IP configuration from a previously configured port by removing the minigraph configuration for that port
+on the linecard minigraph.
+* Reload the new minigraph and line card.
+* On the line card:
+ * Verify address is removed from CLI.
+ * Verify interface is removed from ASIC DB.
+* On the supervisor card:
+ * Verify the interface is removed from the SYSTEM_INTERFACE table of the Chassis App DB.
+* Verify bidirectional traffic to attached host on the port from local and remote ASICs is dropped.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+## Host IP Forwarding
+
+
+### Configuration
+
+Please reference the [T2 topology](https://github.com/Azure/sonic-mgmt/pull/2638/) files topo_t2.yml and testbed-t2.png for network topology and sample IP addresses. The addresses and VMS below are taken from that example topology.
+
+VMs attached to line card 1 and line card 2 will be used for this test.
+DUT Port A&B are on line card 1, D is on line card 2.
+```
+ ---------- DUT ----------
+ |--- LC1 ---|--- LC2 ---|
+VM01T3 -------------|A | |
+ | F0|F1 D|------------- VM01T1
+VM02T3 -------------|B LB1 | LB2 |
+```
+
+_VM01T3_
+* Loopbacks:
+ * ipv4: `100.1.0.1/32`
+ * ipv6: `2064:100::1/128`
+* Ethernet:
+ * ipv4: `10.0.0.1/31`
+ * ipv6: `FC00:2/126`
+
+
+_VM02T3_
+* Loopbacks:
+ * ipv4: `100.1.0.2/32`
+ * ipv6: `2064:100::2/128`
+* Ethernet:
+ * ipv4: `10.0.0.3/31`
+ * ipv6: `FC00:6/126`
+
+_VM01T1_
+* Loopbacks:
+ * ipv4: `100.1.0.33/32`
+ * ipv6: `2064:100::21/128`
+* Ethernet:
+ * ipv4: `10.0.0.65/31`
+ * ipv6: `FC00:82/126`
+
+_DUT_
+
+* Linecard 1
+ * Port A (to VM01T3)
+ * `10.0.0.0/31`
+ * `FC00:1/126`
+ * Port B (to VM02T3)
+ * `10.0.0.2/31`
+ * `FC00:5/126`
+ * Inband IP ( Port F0)
+ * `133.133.133.1`
+ * `2064:133::1`
+ * Loopback LB1
+ * `11.1.0.1/32`
+ * `2064:111::1/128`
+* Linecard 2
+ * Port D (to VM01T1)
+ * `10.0.0.64/31`
+ * `FC00:81/126`
+ * Inband IP (Port F1)
+ * `133.133.133.5`
+ * `2064:133::5`
+ * Loopback LB2
+ * `11.1.0.2/32`
+ * `2064:111::2/128`
+
+#### Test Case 1. Table Verification
+##### Test Objective
+Verify the kernel route table is correct based on the topology.
+##### Test Steps
+* Verify routes for local addresses on both line cards are directly connected.
+* Verify routes for local inband interfaces are directly connected.
+* Verify BGP established between line cards.
+* Verify routes of remote linecard inband interfaces are connected via local linecard inband interface.
+* Verify IP interface addresses on remote network ports have a next hop of their inband IP. On linecard 1, route 10.0.0.64/31 next hop is 133.133.133.5.
+* Verify all learned prefixes from neighbors have their neighbors as next hop.
+* Repeat for IPv4 only, IPv6 only, dual-stack.
+
+#### Test Case 2. Router Interface to Router Interface
+##### Test Objective
+Verify Host IP forwarding for IPv4 and IPv6 for various packet sizes and ttls to local line card interfaces.
+##### Test Steps
+* On linecard 1, send ping from:
+ * DUT IP interface A to DUT IP Interface B. (10.0.0.0 to 10.0.0.2)
+ * DUT IP interface A to DUT IP Interface D. (10.0.0.0 to 10.0.0.64)
+* On linecard 2, send ping from:
+ * DUT IP interface D to DUT IP Interface A.
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 3. Router Interface to neighbor addresses
+##### Test Objective
+Verify Host IP forwarding for IPv4 and IPv6 for various packet sizes and ttls to neighbor addresses.
+##### Test Steps
+* On linecard 1, send ping from:
+ * DUT IP Interface on port A to directly connected neighbor address. (10.0.0.0 to 10.0.0.1)
+ * DUT IP Interface A to neighbor address on port B. (10.0.0.0 to 10.0.0.3)
+ * DUT IP Interface A to neighbor address on port D. (10.0.0.0 to 10.0.0.65)
+* On linecard 2, send ping from:
+ * DUT IP interface D to neighbor address on port A. (10.0.0.64 to 10.0.0.1)
+* On Router 01T3, send ping from:
+ * Router IP interface to DUT address on port A. (10.0.0.1 to 10.0.0.0)
+ * Router IP interface to DUT address on port D. (10.0.0.1 to 10.0.0.64)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 4. Router Interface to routed addresses.
+##### Test Objective
+Verify Host IP forwarding for IPv4 and IPv6 for various packet sizes and ttls to learned route addresses.
+##### Test Steps
+* On linecard 1, send ping from:
+ * DUT IP Interface A to routed loopback address from router 01T3. (10.0.0.0 to 100.1.0.1)
+ * DUT IP Interface A to routed loopback address from router 02T3. (10.0.0.0 to 100.1.0.2)
+ * DUT IP Interface A to routed loopback address from router 01T1. (10.0.0.0 to 100.1.0.33)
+* On linecard 2, send ping from:
+ * DUT IP interface D to routed loopback address from router 01T3. (200.0.0.1 to 100.1.0.1)
+* On Router 01T3, send ping from:
+ * Router loopback interface to DUT address on port A. (100.1.0.1 to 10.0.0.0)
+ * Router loopback interface to DUT address on port D. (100.1.0.1 to 10.0.0.64)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 5. Inband Router Interface connectivity
+##### Test Objective
+Verify IP connectivity over inband interfaces.
+##### Test Steps
+* On linecard 1 send ping from:
+ * Inband interface F0 to inband interface F1 (133.133.133.1 to 133.133.133.5)
+ * Inband interface F0 to interface D (133.133.133.1 to 10.0.0.64)
+ * Inband interface F0 to neighbor on port A (133.133.133.1 to 10.0.0.1)
+ * Inband interface F0 to neighbor on port D (133.133.133.1 to 10.0.0.65)
+ * Inband interface F0 to routed loopback from router 01T3 (133.133.133.1 to 100.1.0.1)
+ * Inband interface F0 to routed loopback from router 01T1 (133.133.133.1 to 100.1.0.33)
+* On linecard 2, send ping from:
+ * Inband interface F1 to inband interface F0 (133.133.133.5 to 133.133.133.1)
+ * Inband interface F1 to interface D (133.133.133.5 to 10.0.0.64)
+ * Inband interface F1 to neighbor on port A (133.133.133.5 to 10.0.0.1)
+ * Inband interface F1 to neighbor on port D (133.133.133.5 to 10.0.0.65)
+ * Inband interface F1 to routed loopback from router 01T3 (133.133.133.5 to 100.1.0.1)
+ * Inband interface F1 to routed loopback from router 01T1 (133.133.133.5 to 100.1.0.33)
+* On Router 01T3, send ping from:
+ * Router loopback interface to DUT inband address on linecard 1. (100.1.0.1 to 133.133.133.1)
+ * Router loopback interface to DUT inband address on linecard 2. (100.1.0.1 to 133.133.133.5)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 6. Line card loopback interface connectivity
+##### Test Objective
+Verify IP Connectivity to DUT loopback addresses.
+##### Test Steps
+* On linecard 1 send ping from:
+ * Loopback to IP interface of port D (11.1.0.1 to 10.0.0.64)
+ * Loopback to neighbor on port D (11.1.0.1 to 10.0.0.65)
+ * Loopback to routed loopback address (11.1.0.1 to 100.1.0.1)
+ * Loopback to routed loopback address (11.1.0.1 to 100.1.0.33)
+* On Router 01T3, send ping from:
+ * Router loopback interface to DUT loopback address on linecard 1. (100.1.0.1 to 11.1.0.1)
+ * Router loopback interface to DUT loopback address on linecard 2. (100.1.0.1 to 11.1.0.2)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 7. End to End traffic.
+##### Test Objective
+Verify end to end routing IPv4/v6, packet sizes, ttl(0,1,2,255)
+##### Test Steps
+* On Router 1, send ping from:
+ * End to end port A to B, ports on same linecard. (100.1.0.1 to 100.1.0.2)
+ * End to end port A to D, ports across multiple linecards. (100.1.0.1 to 100.1.0.33)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 8. Front Panel port link flap
+##### Test Objective
+Traffic to Sonic host interfaces recovers after the front panel port flaps.
+##### Test Steps
+* Admin down interface on fanout connected to DUT port A to cause LOS on DUT.
+* On linecard 1 verify ping is successful from:
+ * DUT IP Interface B to DUT Interface D
+ * DUT Neighbor IP B to DUT Neighbor IP D
+* On Router 02T3, verify ping is successful from Router Interface to DUT IP Interface B and D.
+* On linecard 1, verify ping fails from:
+ * DUT IP Interface A to DUT IP interface B and D.
+ * DUT IP Interface A to attached neighbor.
+* On Router 01T3, verify ping fails to all DUT addresses.
+* On fanout switch, admin up the downed interface.
+* Validate all traffic flows are correct as in test cases 2-7.
+* Retry traffic with TTL 0,1,2,255
+* Retry traffic with 64, 1500, 9100B packets
+* Retry traffic with IPv6
+
+## VLAN Inband Mode
+
+#### Test Case 1. Inband VLAN mode configuration.
+##### Test Objective
+Verify system initialization in Inband VLAN mode.
+##### Test Steps
+* Verify vlan inband interface is used when in this mode.
+* Verify correct VLAN ID is used on all nodes.
+* On each linecard, verify inband VLAN router interfaces are present in ASICDB
+* On supervisor card, verify inband VLAN router interfaces are present in Chassis App DB
+
+#### Test Case 2. Inband VLAN neighbors
+##### Test Objective
+Verify neighbor adjacency as in [arp](#arp). Inband port will be replaced with VLAN interface as neighbor interface.
+##### Test Steps
+* Repeat tests for:
+ * Local neighbor learning,
+ * remote neighbor learning and route creation
+ * timeout and clearing of neighbors
+
+#### Test Case 3. Inband VLAN host connectivity
+##### Test Objective
+Verify host reachability as in [Host IP Connectivity](#ipfwd). VLAN interface will replace inband port as next hop.
+##### Test Steps
+* Repeat traffic tests for:
+ * router interface to remote ports,
+ * router interface to local and remote neighbors,
+ * router interface to learned routes.
+ * inband interface to all addresses.
+ * DUT loopback interface to all addresses.
+
+#### Test Case 4. Mode Switch.
+##### Test Objective
+Verify VoQ system can be switched between modes when configuration is replaced.
+##### Test Steps
+* Regenerate configuration of VoQ system, switching device from inband port to inband VLAN.
+* Reboot the chassis.
+* Verify system is stable in new mode.
+* Restore to inband port mode.
diff --git a/docs/testplan/Img/Failover_convergence.png b/docs/testplan/Img/Failover_convergence.png
new file mode 100644
index 00000000000..9d710dfd3e4
Binary files /dev/null and b/docs/testplan/Img/Failover_convergence.png differ
diff --git a/docs/testplan/Img/Multi_link_failure.png b/docs/testplan/Img/Multi_link_failure.png
new file mode 100644
index 00000000000..c560171b644
Binary files /dev/null and b/docs/testplan/Img/Multi_link_failure.png differ
diff --git a/docs/testplan/Img/Multiple_Remote_Link_Failure.png b/docs/testplan/Img/Multiple_Remote_Link_Failure.png
new file mode 100644
index 00000000000..c6c0583dd42
Binary files /dev/null and b/docs/testplan/Img/Multiple_Remote_Link_Failure.png differ
diff --git a/docs/testplan/Img/RIB-IN-Convergence_Topology.png b/docs/testplan/Img/RIB-IN-Convergence_Topology.png
new file mode 100644
index 00000000000..d822b2a43cf
Binary files /dev/null and b/docs/testplan/Img/RIB-IN-Convergence_Topology.png differ
diff --git a/docs/testplan/Img/RIB-IN_Capacity_Test.png b/docs/testplan/Img/RIB-IN_Capacity_Test.png
new file mode 100644
index 00000000000..133adb324c9
Binary files /dev/null and b/docs/testplan/Img/RIB-IN_Capacity_Test.png differ
diff --git a/docs/testplan/Img/RIB-IN_Convergence_graph.png b/docs/testplan/Img/RIB-IN_Convergence_graph.png
new file mode 100644
index 00000000000..6c57bcad7b0
Binary files /dev/null and b/docs/testplan/Img/RIB-IN_Convergence_graph.png differ
diff --git a/docs/testplan/Img/RIB-IN_convergence_test.png b/docs/testplan/Img/RIB-IN_convergence_test.png
new file mode 100644
index 00000000000..a0237853e7e
Binary files /dev/null and b/docs/testplan/Img/RIB-IN_convergence_test.png differ
diff --git a/docs/testplan/Img/Single_DUT_Topology.png b/docs/testplan/Img/Single_DUT_Topology.png
new file mode 100644
index 00000000000..d27866ebfb0
Binary files /dev/null and b/docs/testplan/Img/Single_DUT_Topology.png differ
diff --git a/docs/testplan/Img/Single_Link_Failure.png b/docs/testplan/Img/Single_Link_Failure.png
new file mode 100644
index 00000000000..7cfc87ea038
Binary files /dev/null and b/docs/testplan/Img/Single_Link_Failure.png differ
diff --git a/docs/testplan/Img/Single_Remote_Link_Failure.png b/docs/testplan/Img/Single_Remote_Link_Failure.png
new file mode 100644
index 00000000000..1303c68b7d1
Binary files /dev/null and b/docs/testplan/Img/Single_Remote_Link_Failure.png differ
diff --git a/docs/testplan/Img/Switch_acting_as_leaf.png b/docs/testplan/Img/Switch_acting_as_leaf.png
new file mode 100644
index 00000000000..4461e3cf5cd
Binary files /dev/null and b/docs/testplan/Img/Switch_acting_as_leaf.png differ
diff --git a/docs/testplan/Img/Switch_as_ToR.png b/docs/testplan/Img/Switch_as_ToR.png
new file mode 100644
index 00000000000..dc15218b7e7
Binary files /dev/null and b/docs/testplan/Img/Switch_as_ToR.png differ
diff --git a/docs/testplan/SNMP-v2mib-test-plan.md b/docs/testplan/SNMP-v2mib-test-plan.md
new file mode 100644
index 00000000000..d04fc4ddbdd
--- /dev/null
+++ b/docs/testplan/SNMP-v2mib-test-plan.md
@@ -0,0 +1,29 @@
+# SNMP-v2mib test plan
+
+* [Overview](#Overview)
+ * [Scope](#Scope)
+ * [Testbed](#Testbed)
+* [Setup configuration](#Setup%20configuration)
+* [Test cases](#Test%20cases)
+
+## Overview
+The purpose is to test that SNMPv2-MIB objects are functioning properly on the SONIC switch DUT.
+
+### Scope
+The test is targeting a running SONIC system with fully functioning configuration. The purpose of the test is not to test specific API, but functional testing of SNMP on SONIC system.
+
+### Testbed
+The test will run on any testbeds.
+
+## Setup configuration
+This test requires no specific setup.
+
+## Test
+Retrieve facts for a device using SNMP, and compare it to system values.
+
+## Test cases
+### Test case test_snmp_v2mib
+#### Test steps
+* Retrieve facts for a device using SNMP
+* Get expected values for a device from system.
+* Compare that facts received by SNMP are equal to values received from system.
diff --git a/tests/bgp/bgp_helpers.py b/tests/bgp/bgp_helpers.py
index 1e6e07e477d..3f5e4217f67 100644
--- a/tests/bgp/bgp_helpers.py
+++ b/tests/bgp/bgp_helpers.py
@@ -1,6 +1,7 @@
import os
import re
import time
+import json
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = os.path.join('tmp', os.path.basename(BASE_DIR))
@@ -9,6 +10,14 @@
BGP_NO_EXPORT_TEMPLATE = 'bgp_no_export.j2'
BGP_CONFIG_BACKUP = 'backup_bgpd.conf.j2'
DEFAULT_BGP_CONFIG = 'bgp:/usr/share/sonic/templates/bgpd/bgpd.conf.j2'
+DUMP_FILE = "/tmp/bgp_monitor_dump.log"
+CUSTOM_DUMP_SCRIPT = "bgp/bgp_monitor_dump.py"
+CUSTOM_DUMP_SCRIPT_DEST = "/usr/share/exabgp/bgp_monitor_dump.py"
+BGPMON_TEMPLATE_FILE = 'bgp/templates/bgp_template.j2'
+BGPMON_CONFIG_FILE = '/tmp/bgpmon.json'
+BGP_MONITOR_NAME = "bgp_monitor"
+BGP_MONITOR_PORT = 7000
+BGP_ANNOUNCE_TIME = 30 #should be enough to receive and parse bgp updates
def apply_bgp_config(duthost, template_name):
@@ -74,3 +83,38 @@ def apply_default_bgp_config(duthost, copy=False):
# Skip 'start-limit-hit' threshold
duthost.shell('systemctl reset-failed bgp')
restart_bgp(duthost)
+
+def parse_exabgp_dump(host):
+ """
+ Parse the dump file of exabgp, and build a set for checking routes
+ """
+ routes = set()
+ output_lines = host.shell("cat {}".format(DUMP_FILE))['stdout_lines']
+ for line in output_lines:
+ routes.add(line)
+ return routes
+
+def parse_rib(host, ip_ver):
+ """
+ Parse output of 'show bgp ipv4/6' and parse into a dict for checking routes
+ """
+ routes = {}
+ cmd = "vtysh -c \"show bgp ipv%d json\"" % ip_ver
+ route_data = json.loads(host.shell(cmd)['stdout'])
+ for ip, nexthops in route_data['routes'].iteritems():
+ aspath = set()
+ for nexthop in nexthops:
+ aspath.add(nexthop['path'])
+ routes[ip] = aspath
+ return routes
+
+def verify_all_routes_announce_to_bgpmon(duthost, ptfhost):
+ time.sleep(BGP_ANNOUNCE_TIME)
+ bgpmon_routes = parse_exabgp_dump(ptfhost)
+ rib_v4 = parse_rib(duthost, 4)
+ rib_v6 = parse_rib(duthost, 6)
+ routes_dut = dict(rib_v4.items() + rib_v6.items())
+ for route in routes_dut.keys():
+ if route not in bgpmon_routes:
+ return False
+ return True
diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py
index 2bd19432f42..3bb2dbba83c 100644
--- a/tests/bgp/conftest.py
+++ b/tests/bgp/conftest.py
@@ -7,11 +7,13 @@
import pytest
import random
+from jinja2 import Template
from tests.common.helpers.assertions import pytest_assert as pt_assert
from tests.common.helpers.generators import generate_ips
from tests.common.helpers.parallel import parallel_run
from tests.common.helpers.parallel import reset_ansible_local_tmp
from tests.common.utilities import wait_until
+from tests.common.utilities import wait_tcp_connection
from tests.common import config_reload
from bgp_helpers import define_config
from bgp_helpers import apply_default_bgp_config
@@ -19,6 +21,7 @@
from bgp_helpers import TEMPLATE_DIR
from bgp_helpers import BGP_PLAIN_TEMPLATE
from bgp_helpers import BGP_NO_EXPORT_TEMPLATE
+from bgp_helpers import DUMP_FILE, CUSTOM_DUMP_SCRIPT, CUSTOM_DUMP_SCRIPT_DEST, BGPMON_TEMPLATE_FILE, BGPMON_CONFIG_FILE, BGP_MONITOR_NAME, BGP_MONITOR_PORT
logger = logging.getLogger(__name__)
@@ -294,3 +297,47 @@ def backup_bgp_config(duthost):
except Exception:
config_reload(duthost)
apply_default_bgp_config(duthost)
+
+@pytest.fixture(scope="module")
+def bgpmon_setup_teardown(ptfhost, duthost, localhost, setup_interfaces):
+ connection = setup_interfaces[0]
+ dut_lo_addr = connection['local_addr'].split("/")[0]
+ peer_addr = connection['neighbor_addr'].split("/")[0]
+ mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
+ asn = mg_facts['minigraph_bgp_asn']
+ # TODO: Add a common method to load BGPMON config for test_bgpmon and test_traffic_shift
+ logger.info("Configuring bgp monitor session on DUT")
+ bgpmon_args = {
+ 'db_table_name': 'BGP_MONITORS',
+ 'peer_addr': peer_addr,
+ 'asn': asn,
+ 'local_addr': dut_lo_addr,
+ 'peer_name': BGP_MONITOR_NAME
+ }
+ bgpmon_template = Template(open(BGPMON_TEMPLATE_FILE).read())
+ duthost.copy(content=bgpmon_template.render(**bgpmon_args),
+ dest=BGPMON_CONFIG_FILE)
+ # Start bgpmon on DUT
+ logger.info("Starting bgpmon on DUT")
+ duthost.command("sonic-cfggen -j {} -w".format(BGPMON_CONFIG_FILE))
+
+ logger.info("Starting bgp monitor session on PTF")
+ ptfhost.file(path=DUMP_FILE, state="absent")
+ ptfhost.copy(src=CUSTOM_DUMP_SCRIPT, dest=CUSTOM_DUMP_SCRIPT_DEST)
+ ptfhost.exabgp(name=BGP_MONITOR_NAME,
+ state="started",
+ local_ip=peer_addr,
+ router_id=peer_addr,
+ peer_ip=dut_lo_addr,
+ local_asn=asn,
+ peer_asn=asn,
+ port=BGP_MONITOR_PORT,
+ dump_script=CUSTOM_DUMP_SCRIPT_DEST)
+ pt_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT),
+ "Failed to start bgp monitor session on PTF")
+ yield
+ # Cleanup bgp monitor
+ duthost.shell("redis-cli -n 4 -c DEL 'BGP_MONITORS|{}'".format(peer_addr))
+ ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent")
+ ptfhost.file(path=CUSTOM_DUMP_SCRIPT_DEST, state="absent")
+ ptfhost.file(path=DUMP_FILE, state="absent")
diff --git a/tests/bgp/test_bgp_allow_list.py b/tests/bgp/test_bgp_allow_list.py
index 78f294d88cd..d496ebc352f 100644
--- a/tests/bgp/test_bgp_allow_list.py
+++ b/tests/bgp/test_bgp_allow_list.py
@@ -14,6 +14,7 @@
from tests.common.helpers.assertions import pytest_assert
from tests.common.helpers.parallel import reset_ansible_local_tmp
from tests.common.helpers.parallel import parallel_run
+from bgp_helpers import verify_all_routes_announce_to_bgpmon
pytestmark = [
pytest.mark.topology('t1'),
@@ -355,20 +356,24 @@ def check_other_neigh(nbrhosts, permit, node=None, results=None):
results = parallel_run(check_other_neigh, (nbrhosts, permit), {}, other_neighbors, timeout=180)
self.check_results(results)
- def test_default_allow_list_preconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts):
+ def test_default_allow_list_preconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, ptfhost, bgpmon_setup_teardown):
permit = True if DEFAULT_ACTION == "permit" else False
duthost = duthosts[rand_one_dut_hostname]
self.check_routes_on_tor1(setup, nbrhosts)
self.check_routes_on_dut(duthost)
self.check_routes_on_neighbors_empty_allow_list(nbrhosts, setup, permit)
-
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
+ "Not all routes are announced to bgpmon")
+
@pytest.mark.parametrize('load_remove_allow_list', ["permit", "deny"], indirect=['load_remove_allow_list'])
- def test_allow_list(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, load_remove_allow_list):
+ def test_allow_list(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, load_remove_allow_list, ptfhost, bgpmon_setup_teardown):
permit = True if load_remove_allow_list == "permit" else False
duthost = duthosts[rand_one_dut_hostname]
self.check_routes_on_tor1(setup, nbrhosts)
self.check_routes_on_dut(duthost)
self.check_routes_on_neighbors(nbrhosts, setup, permit)
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
+ "Not all routes are announced to bgpmon")
- def test_default_allow_list_postconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts):
- self.test_default_allow_list_preconfig(duthosts, rand_one_dut_hostname, setup, nbrhosts)
+ def test_default_allow_list_postconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, ptfhost, bgpmon_setup_teardown):
+ self.test_default_allow_list_preconfig(duthosts, rand_one_dut_hostname, setup, nbrhosts, ptfhost, bgpmon_setup_teardown)
diff --git a/tests/bgp/test_bgpmon.py b/tests/bgp/test_bgpmon.py
index 4caa569d85e..b946f9f45b7 100644
--- a/tests/bgp/test_bgpmon.py
+++ b/tests/bgp/test_bgpmon.py
@@ -11,18 +11,14 @@
from tests.common.helpers.generators import generate_ips as generate_ips
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait_until
-
+from bgp_helpers import BGPMON_TEMPLATE_FILE, BGPMON_CONFIG_FILE, BGP_MONITOR_NAME, BGP_MONITOR_PORT
pytestmark = [
pytest.mark.topology('any'),
]
-BGPMON_TEMPLATE_FILE = 'bgp/templates/bgp_template.j2'
-BGPMON_CONFIG_FILE = '/tmp/bgpmon.json'
BGP_PORT = 179
BGP_CONNECT_TIMEOUT = 121
ZERO_ADDR = r'0.0.0.0/0'
-BGP_MONITOR_NAME = "bgp_monitor"
-BGP_MONITOR_PORT = 7000
logger = logging.getLogger(__name__)
def route_through_default_routes(host, ip_addr):
diff --git a/tests/bgp/test_traffic_shift.py b/tests/bgp/test_traffic_shift.py
index aaf9dcc94cb..ac1a0036864 100644
--- a/tests/bgp/test_traffic_shift.py
+++ b/tests/bgp/test_traffic_shift.py
@@ -1,11 +1,8 @@
import pytest
import logging
-import json
-import time
import ipaddr as ipaddress
+from bgp_helpers import parse_rib, verify_all_routes_announce_to_bgpmon
from tests.common.helpers.assertions import pytest_assert
-from tests.common.utilities import wait_tcp_connection
-from jinja2 import Template
import re
pytestmark = [
@@ -18,65 +15,11 @@
TS_MAINTENANCE = "System Mode: Maintenance"
TS_INCONSISTENT = "System Mode: Not consistent"
-DUMP_FILE = "/tmp/bgp_monitor_dump.log"
-CUSTOM_DUMP_SCRIPT = "bgp/bgp_monitor_dump.py"
-CUSTOM_DUMP_SCRIPT_DEST = "/usr/share/exabgp/bgp_monitor_dump.py"
-BGP_MONITOR_PORT = 7000
-BGP_MONITOR_NAME = "bgp_monitor"
-BGP_ANNOUNCE_TIME = 30 #should be enough to receive and parse bgp updates
-
-# TODO: remove me
-BGPMON_TEMPLATE_FILE = 'bgp/templates/bgp_template.j2'
-BGPMON_CONFIG_FILE = '/tmp/bgpmon.json'
-
-PEER_COUNT = 1
-
@pytest.fixture
def traffic_shift_community(duthost):
community = duthost.shell('sonic-cfggen -y /etc/sonic/constants.yml -v constants.bgp.traffic_shift_community')['stdout']
return community
-@pytest.fixture
-def common_setup_teardown(ptfhost, duthost, localhost, setup_interfaces):
- connection = setup_interfaces[0]
- dut_lo_addr = connection['local_addr'].split("/")[0]
- peer_addr = connection['neighbor_addr'].split("/")[0]
- mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
- asn = mg_facts['minigraph_bgp_asn']
- # TODO: Add a common method to load BGPMON config for test_bgpmon and test_traffic_shift
- logger.info("Configuring bgp monitor session on DUT")
- bgpmon_args = {
- 'db_table_name': 'BGP_MONITORS',
- 'peer_addr': peer_addr,
- 'asn': asn,
- 'local_addr': dut_lo_addr,
- 'peer_name': BGP_MONITOR_NAME
- }
- bgpmon_template = Template(open(BGPMON_TEMPLATE_FILE).read())
- duthost.copy(content=bgpmon_template.render(**bgpmon_args),
- dest=BGPMON_CONFIG_FILE)
-
- logger.info("Starting bgp monitor session on PTF")
- ptfhost.file(path=DUMP_FILE, state="absent")
- ptfhost.copy(src=CUSTOM_DUMP_SCRIPT, dest=CUSTOM_DUMP_SCRIPT_DEST)
- ptfhost.exabgp(name=BGP_MONITOR_NAME,
- state="started",
- local_ip=peer_addr,
- router_id=peer_addr,
- peer_ip=dut_lo_addr,
- local_asn=asn,
- peer_asn=asn,
- port=BGP_MONITOR_PORT,
- dump_script=CUSTOM_DUMP_SCRIPT_DEST)
- pytest_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT),
- "Failed to start bgp monitor session on PTF")
- yield
- # Cleanup bgp monitor
- duthost.shell("redis-cli -n 4 -c DEL 'BGP_MONITORS|{}'".format(peer_addr))
- ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent")
- ptfhost.file(path=CUSTOM_DUMP_SCRIPT_DEST, state="absent")
- ptfhost.file(path=DUMP_FILE, state="absent")
-
def get_traffic_shift_state(host):
outputs = host.shell('TSC')['stdout_lines']
for out in outputs:
@@ -88,37 +31,6 @@ def get_traffic_shift_state(host):
return TS_INCONSISTENT
pytest.fail("TSC return unexpected state {}".format(out))
-def parse_exabgp_dump(host):
- """
- Parse the dump file of exabgp, and build a set for checking routes
- """
- routes = set()
- output_lines = host.shell("cat {}".format(DUMP_FILE))['stdout_lines']
- for line in output_lines:
- routes.add(line)
- return routes
-
-def parse_rib(host, ip_ver):
- """
- Parse output of 'show bgp ipv4/6' and parse into a dict for checking routes
- """
- routes = {}
- cmd = "vtysh -c \"show bgp ipv%d json\"" % ip_ver
- route_data = json.loads(host.shell(cmd)['stdout'])
- for ip, nexthops in route_data['routes'].iteritems():
- aspath = set()
- for nexthop in nexthops:
- aspath.add(nexthop['path'])
- routes[ip] = aspath
- return routes
-
-def verify_all_routes_announce_to_bgpmon(routes_bgpmon, routes_dut):
- logger.info("Verifying all routes are announced to BGPMON")
- for route in routes_dut.keys():
- if route not in routes_bgpmon:
- return False
- return True
-
def parse_routes_on_eos(dut_host, neigh_hosts, ip_ver):
"""
Parse the output of 'show ip bgp neigh received-routes' on eos, and store in a dict
@@ -225,7 +137,7 @@ def verify_only_loopback_routes_are_announced_to_neighs(dut_host, neigh_hosts, c
return verify_loopback_route_with_community(dut_host, neigh_hosts, 4, community) and \
verify_loopback_route_with_community(dut_host, neigh_hosts, 6, community)
-def test_TSA(duthost, ptfhost, nbrhosts, common_setup_teardown, traffic_shift_community):
+def test_TSA(duthost, ptfhost, nbrhosts, bgpmon_setup_teardown, traffic_shift_community):
"""
Test TSA
Verify all routes are announced to bgp monitor, and only loopback routes are announced to neighs
@@ -236,14 +148,7 @@ def test_TSA(duthost, ptfhost, nbrhosts, common_setup_teardown, traffic_shift_co
# Verify DUT is in maintenance state.
pytest_assert(TS_MAINTENANCE == get_traffic_shift_state(duthost),
"DUT is not in maintenance state")
- # Start bgpmon on DUT
- logger.info("Starting bgpmon on DUT")
- duthost.command("sonic-cfggen -j {} -w".format(BGPMON_CONFIG_FILE))
- time.sleep(BGP_ANNOUNCE_TIME)
- bgpmon_routes = parse_exabgp_dump(ptfhost)
- rib_v4 = parse_rib(duthost, 4)
- rib_v6 = parse_rib(duthost, 6)
- pytest_assert(verify_all_routes_announce_to_bgpmon(bgpmon_routes, dict(rib_v4.items() + rib_v6.items())),
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
"Not all routes are announced to bgpmon")
pytest_assert(verify_only_loopback_routes_are_announced_to_neighs(duthost, nbrhosts, traffic_shift_community),
"Failed to verify routes on eos in TSA")
@@ -251,7 +156,7 @@ def test_TSA(duthost, ptfhost, nbrhosts, common_setup_teardown, traffic_shift_co
# Recover to Normal state
duthost.shell("TSB")
-def test_TSB(duthost, ptfhost, nbrhosts, common_setup_teardown):
+def test_TSB(duthost, ptfhost, nbrhosts, bgpmon_setup_teardown):
"""
Test TSB.
Establish BGP session between PTF and DUT, and verify all routes are announced to bgp monitor,
@@ -262,16 +167,9 @@ def test_TSB(duthost, ptfhost, nbrhosts, common_setup_teardown):
# Verify DUT is in normal state.
pytest_assert(TS_NORMAL == get_traffic_shift_state(duthost),
"DUT is not in normal state")
- # Start bgpmon on DUT
- logger.info("Starting bgpmon on DUT")
- duthost.command("sonic-cfggen -j {} -w".format(BGPMON_CONFIG_FILE))
- time.sleep(BGP_ANNOUNCE_TIME)
- bgpmon_routes = parse_exabgp_dump(ptfhost)
- rib_v4 = parse_rib(duthost, 4)
- rib_v6 = parse_rib(duthost, 6)
- pytest_assert(verify_all_routes_announce_to_bgpmon(bgpmon_routes, dict(rib_v4.items() + rib_v6.items())),
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
"Not all routes are announced to bgpmon")
- pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, rib_v4, 4),
+ pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, parse_rib(duthost, 4), 4),
"Not all ipv4 routes are announced to neighbors")
- pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, rib_v6, 6),
+ pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, parse_rib(duthost, 6), 6),
"Not all ipv6 routes are announced to neighbors")
diff --git a/tests/common/devices.py b/tests/common/devices.py
index 42607f0be18..27aebe81b71 100644
--- a/tests/common/devices.py
+++ b/tests/common/devices.py
@@ -1362,6 +1362,20 @@ def remove_ssh_tunnel_sai_rpc(self):
for pid in pid_list:
self.shell("kill {}".format(pid))
+ def get_up_ip_ports(self):
+ """
+ Get a list for all up ip interfaces
+ """
+ up_ip_ports = []
+ ip_intf_facts = self.show_ip_interface()['ansible_facts']['ip_interfaces']
+ for intf in ip_intf_facts:
+ try:
+ if ip_intf_facts[intf]['oper_state'] == 'up':
+ up_ip_ports.append(intf)
+ except KeyError:
+ pass
+ return up_ip_ports
+
class K8sMasterHost(AnsibleHostBase):
"""
diff --git a/tests/common/dualtor/data_plane_utils.py b/tests/common/dualtor/data_plane_utils.py
new file mode 100644
index 00000000000..387a91d83c1
--- /dev/null
+++ b/tests/common/dualtor/data_plane_utils.py
@@ -0,0 +1,154 @@
+import pytest
+from tests.common.dualtor.dual_tor_io import DualTorIO
+from tests.common.helpers.assertions import pytest_assert
+import threading
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def arp_setup(ptfhost):
+ logger.info('Copy ARP responder to the PTF container {}'.format(ptfhost.hostname))
+ ptfhost.copy(src='scripts/arp_responder.py', dest='/opt')
+ ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
+ ptfhost.template(src="templates/arp_responder.conf.j2",
+ dest="/etc/supervisor/conf.d/arp_responder.conf")
+ logging.info("Refreshing supervisorctl")
+ ptfhost.shell("supervisorctl reread && supervisorctl update")
+
+
+def validate_IO_results(tor_IO, allowed_disruption, delay):
+ received_counter = tor_IO.get_total_received_packets()
+ total_disruptions = tor_IO.get_total_disruptions()
+ longest_disruption = tor_IO.get_longest_disruption()
+ total_lost_packets = tor_IO.get_total_dropped_packets()
+
+ if received_counter:
+ pytest_assert(total_disruptions <= 1, "Traffic was disrupted {} times. Allowed number of disruption: {}"\
+ .format(total_disruptions, allowed_disruption))
+
+ pytest_assert(longest_disruption <= delay, "Traffic was disrupted for {}s. Maximum allowed disruption: {}s".\
+ format(longest_disruption, delay))
+ else:
+ pytest_assert(received_counter > 0, "Test failed to capture any meaningful received packet")
+
+ if total_lost_packets:
+ logging.warn("Packets were lost during the test. Total lost count: {}".format(total_lost_packets))
+
+
+@pytest.fixture
+def send_t1_to_server_after_action(ptfhost, ptfadapter, tbinfo):
+ """
+ Starts IO test from T1 router to server.
+ As part of IO test the background thread sends and sniffs packets.
+ As soon as sender and sniffer threads are in running state, a callback action is performed.
+ When action is finished, the sender and sniffer threads are given time to complete.
+ Finally, the collected packets are sniffed, and the disruptions are measured.
+
+ As part of teardown, the ARP table is cleared and ptf dataplane is flushed.
+ Args:
+ ptfhost (fixture): Fixture for PTF instance to be used during the test
+ ptfadapter (fixture): Fixture which provides helper utility to use ptf ptf testutils
+ tbinfo (fixture): Fixture for testebd inventory information
+
+ Yields:
+ function: A helper function to run and monitor the IO test
+ """
+ arp_setup(ptfhost)
+
+ duthosts = []
+ def t1_to_server_io_test(duthost, server_port=None, tor_port=None, delay=1, timeout=5, action=None):
+ """
+ Helper method for `send_t1_to_server_after_action`.
+ Starts sender and sniffer before performing the action on the tor host.
+
+ Args:
+ server_port: The port intended to receive the packet
+ tor_port: The T1 port through which to send the packet. Connected to either the upper or lower ToR.
+ default - None. If set to None, the test chooses random portchannel member port for this test.
+ delay: Maximum acceptable delay for traffic to continue flowing again
+ timeout: Time to wait for packet to be transmitted
+ action: Some function (with args) which performs the desired action, or `None` if no action/delay is desired
+ """
+ duthosts.append(duthost)
+ io_ready = threading.Event()
+ tor_IO = DualTorIO(duthost, ptfhost, ptfadapter, tbinfo, server_port, tor_port, delay, timeout, io_ready)
+ send_and_sniff = threading.Thread(target=tor_IO.start_io_test, kwargs={'traffic_generator': tor_IO.generate_from_t1_to_server})
+ send_and_sniff.start()
+ if action:
+ # do not perform the provided action until IO threads (sender and sniffer) are ready
+ io_ready.wait()
+ logger.info("Sender and sniffer threads started, ready to execute the callback action")
+ action()
+
+ # Wait for the IO to complete before doing checks
+ logger.info("Waiting for sender and sniffer threads to finish..")
+ send_and_sniff.join()
+ validate_IO_results(tor_IO, allowed_disruption=1, delay=delay)
+
+ yield t1_to_server_io_test
+
+ # cleanup torIO
+ ptfadapter.dataplane.flush()
+ for duthost in duthosts:
+ logger.info('Clearing arp entries on DUT {}'.format(duthost.hostname))
+ duthost.shell('sonic-clear arp')
+
+
+@pytest.fixture
+def send_server_to_t1_after_action(ptfhost, ptfadapter, tbinfo):
+ """
+ Starts IO test from server to T1 router.
+ As part of IO test the background thread sends and sniffs packets.
+ As soon as sender and sniffer threads are in running state, a callback action is performed.
+ When action is finished, the sender and sniffer threads are given time to complete.
+ Finally, the collected packets are sniffed, and the disruptions are measured.
+
+ As part of teardown, the ARP, FDB tables are cleared and ptf dataplane is flushed.
+ Args:
+ ptfhost (fixture): Fixture for PTF instance to be used during the test
+ ptfadapter (fixture): Fixture which provides helper utility to use ptf testutils
+ tbinfo (fixture): Fixture for testebd inventory information
+
+ Yields:
+ function: A helper function to run and monitor the IO test
+ """
+ arp_setup(ptfhost)
+
+ duthosts = []
+ def server_to_t1_io_test(duthost, server_port=None, tor_port=None, delay=1, timeout=5, action=None):
+ """
+ Helper method for `send_server_to_t1_after_action`.
+ Starts sender and sniffer before performing the action on the tor host.
+
+ Args:
+ server_port: The port intended to receive the packet
+ tor_port: The port through which to send the packet. Connected to either the upper or lower ToR.
+ default - None. If set to None, the test chooses random portchannel member port for this test.
+ delay: Maximum acceptable delay for traffic to continue flowing again
+ timeout: Time to wait for packet to be transmitted
+ action: Some function (with args) which performs the desired action, or `None` if no action/delay is desired
+ """
+ duthosts.append(duthost)
+ io_ready = threading.Event()
+ tor_IO = DualTorIO(duthost, ptfhost, ptfadapter, tbinfo, server_port, tor_port, delay, timeout, io_ready)
+ send_and_sniff = threading.Thread(target=tor_IO.start_io_test, kwargs={'traffic_generator': tor_IO.generate_from_server_to_t1})
+ send_and_sniff.start()
+
+ if action:
+ # do not perform the provided action until IO threads (sender and sniffer) are ready
+ io_ready.wait()
+ logger.info("Sender and sniffer threads started, ready to execute the callback action")
+ action()
+
+ # Wait for the IO to complete before doing checks
+ send_and_sniff.join()
+ validate_IO_results(tor_IO, allowed_disruption=1, delay=delay)
+
+ yield server_to_t1_io_test
+
+ # cleanup torIO
+ ptfadapter.dataplane.flush()
+ for duthost in duthosts:
+ logger.info('Clearing arp entries on DUT {}'.format(duthost.hostname))
+ duthost.shell('sonic-clear arp')
diff --git a/tests/common/dualtor/dual_tor_io.py b/tests/common/dualtor/dual_tor_io.py
new file mode 100644
index 00000000000..98212f666db
--- /dev/null
+++ b/tests/common/dualtor/dual_tor_io.py
@@ -0,0 +1,484 @@
+import datetime
+import threading
+import time
+import socket
+import random
+import struct
+import ipaddress
+import logging
+import json
+from netaddr import IPNetwork
+from collections import defaultdict
+
+import scapy.all as scapyall
+import ptf.testutils as testutils
+from tests.ptf_runner import ptf_runner
+
+DOWNSTREAM_DST_IP = "192.168.0.2"
+UPSTREAM_DST_IP = "192.168.128.1"
+TCP_DST_PORT = 5000
+SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
+PTFRUNNER_QLEN = 1000
+VLAN_INDEX = 0
+VLAN_HOSTS = 100
+VLAN_BASE_MAC_PATTERN = "72060001{:04}"
+LAG_BASE_MAC_PATTERN = '5c010203{:04}'
+
+logger = logging.getLogger(__name__)
+
+
+class DualTorIO:
+ def __init__(self, duthost, ptfhost, ptfadapter, tbinfo, server_port, tor_port, delay, timeout, io_ready):
+ self.tor_port = tor_port
+ self.server_port = server_port
+ self.duthost = duthost
+ self.ptfadapter = ptfadapter
+ self.ptfhost = ptfhost
+ self.tbinfo = tbinfo
+ self.io_ready_event = io_ready
+ self.dut_mac = self.duthost.facts["router_mac"]
+
+ self.time_to_listen = 180.0
+ self.sniff_time_incr = 60
+ self.send_interval = 0.0035 # Inter-packet interval
+ self.packets_to_send = min(int(self.time_to_listen / (self.send_interval + 0.0015)), 45000) # How many packets to be sent by sender thread
+
+ self.dataplane = self.ptfadapter.dataplane
+ self.dataplane.flush()
+ self.total_disrupt_time = None
+ self.disrupts_count = None
+ self.total_disrupt_packets = None
+ self.max_lost_id = None
+ self.max_disrupt_time = None
+ self.received_counter = 0
+ self.total_lost_packets = None
+ # This list will contain all unique Payload ID, to filter out received floods.
+ self.unique_id = set()
+
+ mg_facts = self.duthost.get_extended_minigraph_facts(self.tbinfo)
+ prefix_len = mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['prefixlen'] - 3
+ test_network = ipaddress.ip_address(mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['addr']) + (1 << (32 - prefix_len))
+ self.default_ip_range = str(ipaddress.ip_interface(unicode(str(test_network) + '/{0}'.format(prefix_len))).network)
+ self.src_addr, mask = self.default_ip_range.split('/')
+ self.n_hosts = 2**(32 - int(mask))
+ self.port_indices = mg_facts['minigraph_ptf_indices']
+ portchannel_info = mg_facts['minigraph_portchannels']
+ self.port_channel_ports = []
+ for pc in portchannel_info.values():
+ self.port_channel_ports.extend([self.port_indices[member] for member in pc['members']])
+
+ self.vlan_interfaces = mg_facts["minigraph_vlan_interfaces"][VLAN_INDEX]
+ self.vlan_network = self.vlan_interfaces["subnet"]
+ self.vlan_ports = [self.port_indices[ifname] for ifname in mg_facts["minigraph_vlans"].values()[VLAN_INDEX]["members"]]
+ self.vlan_host_map = self._generate_vlan_servers()
+ self.__configure_arp_responder()
+
+ vlan_table = self.duthost.get_running_config_facts()['VLAN']
+ vlan_name = list(vlan_table.keys())[0]
+ self.vlan_mac = vlan_table[vlan_name]['mac']
+
+ logger.debug("VLAN ports: {}".format(str(self.vlan_ports)))
+ logger.debug("PORTCHANNEL ports: {}".format(str(self.port_channel_ports)))
+
+
+ def _generate_vlan_servers(self):
+ """
+ @summary: Generates physical port maps which is a set of IP address and their associated MAC addresses
+ - MACs are generated sequentially as offsets from VLAN_BASE_MAC_PATTERN
+ - IP addresses are randomly selected from the given VLAN network
+ - "Hosts" (IP/MAC pairs) are distributed evenly amongst the ports in the VLAN
+ """
+ vlan_host_map = defaultdict(dict)
+
+ addr_list = list(IPNetwork(self.vlan_network))
+ for counter, i in enumerate(range(2, VLAN_HOSTS + 2)):
+ mac = VLAN_BASE_MAC_PATTERN.format(counter)
+ port = self.vlan_ports[i % len(self.vlan_ports)]
+ addr = random.choice(addr_list)
+ # Ensure that we won't get a duplicate ip address
+ addr_list.remove(addr)
+
+ vlan_host_map[port][str(addr)] = mac
+
+ return vlan_host_map
+
+
+ def __configure_arp_responder(self):
+ """
+ @summary: Generate ARP responder configuration using vlan_host_map.
+ Copy this configuration to PTF and restart arp_responder
+ """
+ arp_responder_conf = {}
+ for port in self.vlan_host_map:
+ arp_responder_conf['eth{}'.format(port)] = self.vlan_host_map[port]
+ with open("/tmp/from_t1.json", "w") as fp:
+ json.dump(arp_responder_conf, fp)
+ self.ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
+ self.ptfhost.shell("supervisorctl reread && supervisorctl update")
+ self.ptfhost.shell("supervisorctl restart arp_responder")
+ logger.info("arp_responder restarted")
+
+
+ def start_io_test(self, traffic_generator=None):
+ """
+ @summary: The entry point to start the TOR dataplane I/O test.
+ Args:
+ traffic_generator (function): A callback function to decide the traffic direction (T1 to server / server to T1)
+ Allowed values: self.generate_from_t1_to_server or self.generate_from_server_to_t1
+ """
+ # Check in a conditional for better readability
+ if traffic_generator == self.generate_from_t1_to_server:
+ self.generate_from_t1_to_server()
+ elif traffic_generator == self.generate_from_server_to_t1:
+ self.generate_from_server_to_t1()
+ else:
+ logger.error("Traffic generator not provided or invalid")
+ return
+ # start and later join the sender and sniffer threads
+ self.send_and_sniff(sender=self.traffic_sender_thread, sniffer=self.traffic_sniffer_thread)
+
+ # Sender and sniffer have finished the job. Start examining the collected flow
+ self.examine_flow()
+ if self.lost_packets:
+ self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start)
+ logger.error("The longest disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
+ logger.error("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \
+ (self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
+
+
+ def generate_from_t1_to_server(self):
+ """
+ @summary: Generate (not send) the packets to be sent from T1 to server
+ """
+ eth_dst = self.dut_mac
+ eth_src = self.ptfadapter.dataplane.get_mac(0, 0)
+ ip_dst = DOWNSTREAM_DST_IP
+ ip_ttl = 255
+ tcp_dport = TCP_DST_PORT
+
+ if self.tor_port:
+ self.from_tor_src_port = self.tor_port
+ else:
+ self.from_tor_src_port = random.choice(self.port_channel_ports)
+
+ logger.info("-"*20 + "T1 to server packet" + "-"*20)
+ logger.info("Ethernet address: dst: {} src: {}".format(eth_dst, eth_src))
+ logger.info("IP address: dst: {} src: random".format(ip_dst))
+ logger.info("TCP port: dst: {}".format(tcp_dport))
+ logger.info("DUT mac: {}".format(self.dut_mac))
+ logger.info("VLAN mac: {}".format(self.vlan_mac))
+ logger.info("-"*50)
+
+ self.packets_list = []
+ for i in range(self.packets_to_send):
+ tcp_tx_packet = testutils.simple_tcp_packet(
+ eth_dst=eth_dst,
+ eth_src=eth_src,
+ ip_dst=ip_dst,
+ ip_src=self.random_host_ip(),
+ ip_ttl=ip_ttl,
+ tcp_dport=tcp_dport)
+ payload = str(i) + 'X' * 60
+ packet = scapyall.Ether(str(tcp_tx_packet))
+ packet.load = payload
+ self.packets_list.append((self.from_tor_src_port, str(packet)))
+
+ self.sent_pkt_dst_mac = self.dut_mac
+ self.received_pkt_src_mac = self.vlan_mac
+
+
+ def generate_from_server_to_t1(self):
+ """
+ @summary: Generate (not send) the packets to be sent from server to T1
+ """
+ if self.server_port:
+ self.from_server_src_port = self.server_port
+ else:
+ self.from_server_src_port = random.choice(self.vlan_ports)
+ self.from_server_src_addr = random.choice(self.vlan_host_map[self.from_server_src_port].keys())
+ self.from_server_dst_addr = self.random_host_ip()
+ tcp_dport = TCP_DST_PORT
+ tcp_tx_packet = testutils.simple_tcp_packet(
+ eth_dst=self.vlan_mac,
+ ip_src=self.from_server_src_addr,
+ ip_dst=self.from_server_dst_addr,
+ tcp_dport=tcp_dport
+ )
+
+ self.packets_list = []
+ for i in range(self.packets_to_send):
+ payload = str(i) + 'X' * 60
+ packet = scapyall.Ether(str(tcp_tx_packet))
+ packet.load = payload
+ self.packets_list.append((self.from_server_src_port, str(packet)))
+
+ self.sent_pkt_dst_mac = self.vlan_mac
+ self.received_pkt_src_mac = self.dut_mac
+
+
+ def random_host_ip(self):
+ """
+ @summary: Helper method to find a random host IP for generating a random src/dst IP address
+ Returns:
+ host_ip (str): Random IP address
+ """
+ host_number = random.randint(2, self.n_hosts - 2)
+ if host_number > (self.n_hosts - 2):
+ raise Exception("host number {} is greater than number of hosts {} in the network {}".format(host_number, self.n_hosts - 2, self.default_ip_range))
+ src_addr_n = struct.unpack(">I", socket.inet_aton(self.src_addr))[0]
+ net_addr_n = src_addr_n & (2**32 - self.n_hosts)
+ host_addr_n = net_addr_n + host_number
+ host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
+
+ return host_ip
+
+
+ def send_and_sniff(self, sender, sniffer):
+ """
+ @summary: This method starts and joins two background threads in parallel: sender and sniffer
+ """
+ self.sender_thr = threading.Thread(target=sender)
+ self.sniff_thr = threading.Thread(target=sniffer)
+ self.sniffer_started = threading.Event()
+ self.sniff_thr.start()
+ self.sender_thr.start()
+ self.sniff_thr.join()
+ self.sender_thr.join()
+
+
+ def traffic_sender_thread(self):
+ """
+ @summary: Generalized Sender thread (to be used for traffic in both directions)
+ Waits for a signal from the `traffic_sniffer_thread` before actually starting.
+ This is to make sure that that packets are not sent before they are ready to be captured.
+ """
+
+ logger.info("Sender waiting to send {} packets".format(len(self.packets_list)))
+
+ self.sniffer_started.wait(timeout=10)
+ sender_start = datetime.datetime.now()
+ logger.info("Sender started at {}".format(str(sender_start)))
+
+ # Signal data_plane_utils that sender and sniffer threads have begun
+ self.io_ready_event.set()
+
+ for entry in self.packets_list:
+ time.sleep(self.send_interval)
+ testutils.send_packet(self.ptfadapter, *entry)
+
+ logger.info("Sender has been running for {}".format(str(datetime.datetime.now() - sender_start)))
+
+
+ def traffic_sniffer_thread(self):
+ """
+ @summary: Generalized sniffer thread (to be used for traffic in both directions)
+ Starts `scapy_sniff` thread, and waits for its setup before signalling the sender thread to start
+ """
+ wait = self.time_to_listen + self.sniff_time_incr
+ sniffer_start = datetime.datetime.now()
+ logger.info("Sniffer started at {}".format(str(sniffer_start)))
+ sniff_filter = "tcp and tcp dst port {} and tcp src port 1234 and not icmp".format(TCP_DST_PORT)
+
+ scapy_sniffer = threading.Thread(target=self.scapy_sniff, kwargs={'sniff_timeout': wait, 'sniff_filter': sniff_filter})
+ scapy_sniffer.start()
+ time.sleep(2) # Let the scapy sniff initialize completely.
+ self.sniffer_started.set() # Unblock waiter for the send_in_background.
+ scapy_sniffer.join()
+ logger.info("Sniffer has been running for {}".format(str(datetime.datetime.now() - sniffer_start)))
+ self.sniffer_started.clear()
+
+
+ def scapy_sniff(self, sniff_timeout=180, sniff_filter=''):
+ """
+ @summary: PTF runner - runs a sniffer in PTF container.
+ Running sniffer in sonic-mgmt container has missing SOCKET problem
+ and permission issues (scapy and tcpdump require root user)
+ The remote function listens on all ports. Once found, all packets are dumped to local pcap file,
+ and all packets are saved to self.all_packets as scapy type.
+
+ Args:
+ sniff_timeout (int): Duration in seconds to sniff the traffic
+ sniff_filter (str): Filter that Scapy will use to collect only relevant packets
+ """
+ capture_pcap = '/tmp/capture.pcap'
+ sniffer_log = '/tmp/dualtor-sniffer.log'
+ result = ptf_runner(
+ self.ptfhost,
+ "ptftests",
+ "dualtor_sniffer.Sniff",
+ qlen=PTFRUNNER_QLEN,
+ platform_dir="ptftests",
+ platform="remote",
+ params={
+ "sniff_timeout" : sniff_timeout,
+ "sniff_filter" : sniff_filter,
+ "capture_pcap": capture_pcap,
+ "sniffer_log": sniffer_log,
+ "port_filter_expression": 'not (arp and ether src {}) and not tcp'.format(self.dut_mac)
+ },
+ log_file=sniffer_log,
+ module_ignore_errors=False
+ )
+ logger.debug("Ptf_runner result: {}".format(result))
+
+ logger.info('Fetching log files from ptf and dut hosts')
+ logs_list = [
+ {'src': sniffer_log, 'dest': '/tmp/', 'flat': True, 'fail_on_missing': False},
+ {'src': capture_pcap, 'dest': '/tmp/', 'flat': True, 'fail_on_missing': False}
+ ]
+
+ for log_item in logs_list:
+ self.ptfhost.fetch(**log_item)
+
+ self.all_packets = scapyall.rdpcap(capture_pcap)
+ logger.info("Number of all packets captured: {}".format(len(self.all_packets)))
+
+
+ def get_total_disruptions(self):
+ return self.disrupts_count
+
+
+ def get_longest_disruption(self):
+ return self.max_disrupt_time
+
+
+ def get_total_disrupted_packets(self):
+ return self.total_disrupt_packets
+
+
+ def get_total_received_packets(self):
+ return self.received_counter
+
+
+ def get_total_dropped_packets(self):
+ return self.total_lost_packets
+
+
+ def no_flood(self, packet):
+ """
+ @summary: This method filters packets which are unique (i.e. no floods).
+ """
+ if (not int(str(packet[scapyall.TCP].payload).replace('X','')) in self.unique_id) and (packet[scapyall.Ether].src == self.received_pkt_src_mac):
+ # This is a unique (no flooded) received packet.
+ self.unique_id.add(int(str(packet[scapyall.TCP].payload).replace('X','')))
+ return True
+ elif packet[scapyall.Ether].dst == self.sent_pkt_dst_mac:
+ # This is a sent packet.
+ return True
+ else:
+ return False
+
+
+ def examine_flow(self):
+ """
+ @summary: This method examines packets collected by sniffer thread
+ The method compares TCP payloads of the packets one by one (assuming all payloads are consecutive integers),
+ and the losses if found - are treated as disruptions in Dataplane forwarding.
+ All disruptions are saved to self.lost_packets dictionary, in format:
+ disrupt_start_id = (missing_packets_count, disrupt_time, disrupt_start_timestamp, disrupt_stop_timestamp)
+ """
+ def examine_each_packet(packets):
+ lost_packets = dict()
+ sent_packets = dict()
+ prev_payload, prev_time = 0, 0
+ sent_payload = 0
+ disruption_start, disruption_stop = None, None
+ received_counter = 0 # Counts packets from dut.
+ for packet in packets:
+ if packet[scapyall.Ether].dst == self.sent_pkt_dst_mac:
+ # This is a sent packet - keep track of it as payload_id:timestamp.
+ sent_payload = int(str(packet[scapyall.TCP].payload).replace('X',''))
+ sent_packets[sent_payload] = packet.time
+ continue
+ if packet[scapyall.Ether].src == self.received_pkt_src_mac:
+ # This is a received packet.
+ received_time = packet.time
+ received_payload = int(str(packet[scapyall.TCP].payload).replace('X',''))
+ received_counter += 1
+ if not (received_payload and received_time):
+ # This is the first valid received packet.
+ prev_payload = received_payload
+ prev_time = received_time
+ continue
+ if received_payload - prev_payload > 1:
+ # Packets in a row are missing, a disruption.
+ lost_id = (received_payload - 1) - prev_payload # How many packets lost in a row.
+ disrupt = (sent_packets[received_payload] - sent_packets[prev_payload + 1]) # How long disrupt lasted.
+ # Add disruption to the lost_packets dict:
+ lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time)
+ logger.info("Disruption between packet ID %d and %d. For %.4f " % (prev_payload, received_payload, disrupt))
+ if not disruption_start:
+ disruption_start = datetime.datetime.fromtimestamp(prev_time)
+ disruption_stop = datetime.datetime.fromtimestamp(received_time)
+ prev_payload = received_payload
+ prev_time = received_time
+ if received_counter == 0:
+ logger.error("Sniffer failed to filter any traffic from DUT")
+ else:
+ logger.info("Total number of filtered incoming packets captured {}".format(received_counter))
+ if lost_packets:
+ logger.info("Disruptions happen between {} and {}.".format(str(disruption_start), str(disruption_stop)))
+ elif len(sent_packets) != received_counter:
+ logger.info("Number of packets lost: {}".format(len(sent_packets) - received_counter))
+ self.total_lost_packets = len(sent_packets) - received_counter
+
+ return received_counter, lost_packets
+
+ def check_tcp_payload(packet, packets_to_send):
+ """
+ @summary: Helper method
+
+ Returns: Bool: True if a packet is not corrupted and has a valid TCP sequential TCP Payload
+ """
+ try:
+ int(str(packet[scapyall.TCP].payload).replace('X','')) in range(packets_to_send)
+ return True
+ except Exception as err:
+ return False
+
+ examine_start = datetime.datetime.now()
+ logger.info("Packet flow examine started {}".format(str(examine_start)))
+
+ if not self.all_packets:
+ logger.error("self.all_packets not defined.")
+ return None
+ # Filter out packets and remove floods:
+ filtered_packets = [ pkt for pkt in self.all_packets if
+ scapyall.TCP in pkt and
+ not scapyall.ICMP in pkt and
+ pkt[scapyall.TCP].sport == 1234 and
+ pkt[scapyall.TCP].dport == TCP_DST_PORT and
+ check_tcp_payload(pkt, self.packets_to_send) and
+ self.no_flood(pkt)
+ ]
+ logger.info("Number of filtered packets captured: {}".format(len(filtered_packets)))
+
+ # Re-arrange packets, if delayed, by Payload ID and Timestamp:
+ packets = sorted(filtered_packets, key = lambda packet: (int(str(packet[scapyall.TCP].payload).replace('X','')), packet.time ))
+ self.max_disrupt, self.total_disruption = 0, 0
+
+ if not packets or len(packets) == 0:
+ logger.error("Sniffer failed to capture any traffic")
+ return
+ else:
+ logger.info("Measuring traffic disruptions..")
+ filename = '/tmp/capture_filtered.pcap'
+ scapyall.wrpcap(filename, packets)
+ logger.info("Filtered pcap dumped to {}".format(filename))
+
+ self.received_counter, self.lost_packets = examine_each_packet(packets)
+
+ self.disrupts_count = len(self.lost_packets) # Total disrupt counter.
+ if self.lost_packets:
+ # Find the longest loss with the longest time:
+ _, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start, self.no_routing_stop) = \
+ max(self.lost_packets.items(), key = lambda item:item[1][0:2])
+ self.total_disrupt_packets = sum([item[0] for item in self.lost_packets.values()])
+ self.total_disrupt_time = sum([item[1] for item in self.lost_packets.values()])
+ elif self.received_counter > 0:
+ self.max_lost_id = 0
+ self.max_disrupt_time = 0
+ self.total_disrupt_packets = 0
+ self.total_disrupt_time = 0
+ logger.info("Gaps in forwarding not found.")
+ logger.info("Packet flow examine finished after {}".format(str(datetime.datetime.now() - examine_start)))
diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py
index 136f0e526bf..b08029efdd7 100644
--- a/tests/common/dualtor/dual_tor_utils.py
+++ b/tests/common/dualtor/dual_tor_utils.py
@@ -1,9 +1,9 @@
import logging
import pytest
import json
-import ptf.testutils as testutils
+from datetime import datetime
+from tests.ptf_runner import ptf_runner
-from ipaddress import ip_interface
from natsort import natsorted
from tests.common.config_reload import config_reload
from tests.common.helpers.assertions import pytest_assert
@@ -95,27 +95,57 @@ def map_hostname_to_tor_side(tbinfo, hostname):
return None
+def get_t1_ptf_pc_ports(dut, tbinfo):
+ """Gets the PTF portchannel ports connected to the T1 switchs."""
+ config_facts = dut.get_running_config_facts()
+ mg_facts = dut.get_extended_minigraph_facts(tbinfo)
+
+ pc_ports = {}
+ for pc in config_facts['PORTCHANNEL'].keys():
+ pc_ports[pc] = []
+ for intf in config_facts["PORTCHANNEL"][pc]["members"]:
+ ptf_port_index = mg_facts["minigraph_ptf_indices"][intf]
+ intf_name = "eth{}".format(ptf_port_index)
+ pc_ports[pc].append(intf_name)
+
+ return pc_ports
+
+
def get_t1_ptf_ports(dut, tbinfo):
'''
Gets the PTF ports connected to a given DUT for the first T1
'''
- config_facts = dut.get_running_config_facts()
- mg_facts = dut.get_extended_minigraph_facts(tbinfo)
+ pc_ports = get_t1_ptf_pc_ports(dut, tbinfo)
# Always choose the first portchannel
- portchannel = sorted(config_facts['PORTCHANNEL'].keys())[0]
- dut_portchannel_members = config_facts['PORTCHANNEL'][portchannel]['members']
+ portchannel = sorted(pc_ports.keys())[0]
+ ptf_portchannel_intfs = pc_ports[portchannel]
- ptf_portchannel_intfs = []
+ logger.info("Using portchannel ports {} on PTF for DUT {}".format(ptf_portchannel_intfs, dut.hostname))
+ return ptf_portchannel_intfs
- for intf in dut_portchannel_members:
- member = mg_facts['minigraph_ptf_indices'][intf]
- intf_name = 'eth{}'.format(member)
- ptf_portchannel_intfs.append(intf_name)
- logger.info("Using portchannel ports {} on PTF for DUT {}".format(ptf_portchannel_intfs, dut.hostname))
+def get_t1_active_ptf_ports(dut, tbinfo):
+ """
+ @summary: Get ptf port indices for active PortChannels on DUT
+ @param dut: The DUT we are testing against
+ @param tbinfo: The fixture tbinfo
+ @return: A dict { "PortChannel0001": [0, 1], ...}
+ """
+ config_facts = dut.get_running_config_facts()
+ mg_facts = dut.get_extended_minigraph_facts(tbinfo)
+
+ up_portchannels = dut.get_up_ip_ports()
+ ptf_portchannel_intfs = {}
+ for k, v in config_facts['PORTCHANNEL'].items():
+ if k in up_portchannels:
+ ptf_portchannel_intfs[k] = []
+ for member in v['members']:
+ ptf_portchannel_intfs[k].append(mg_facts['minigraph_ptf_indices'][member])
+
return ptf_portchannel_intfs
+
def update_mux_configs_and_config_reload(dut, state):
"""
@summary: Update config_db.json, and then load with 'config reload'
@@ -133,7 +163,7 @@ def update_mux_configs_and_config_reload(dut, state):
# Update mux_cable state and dump to a temp file
mux_cable_config_json = json.loads(mux_cable_config)
for _, config in mux_cable_config_json.items():
- config['state'] = state
+ config['state'] = state
mux_cable_config_json = {"MUX_CABLE": mux_cable_config_json}
TMP_FILE = "/tmp/mux_config.json"
with open(TMP_FILE, "w") as f:
@@ -165,6 +195,7 @@ def force_active_tor(dut, intf):
cmds.append("config muxcable mode active {}".format(i))
dut.shell_cmds(cmds=cmds)
+
def _get_tor_fanouthosts(tor_host, fanouthosts):
"""Helper function to get the fanout host objects that the current tor_host connected to.
@@ -475,29 +506,54 @@ def shutdown(vm_names=None, upper=False, lower=False):
eos_host.no_shutdown(vm_intf)
-@pytest.fixture(scope='function', autouse=True)
-def start_linkmgrd_heartbeat(ptfadapter, duthost, tbinfo):
- '''
- Send a GARP from from PTF->ToR from each PTF port connected to a mux cable
-
- This is needed since linkmgrd will not start sending heartbeats until the PTF MAC is learned in the DUT neighbor table
- '''
- garp_pkts = {}
+def mux_cable_server_ip(dut):
+ """Function for retrieving all ip of servers connected to mux_cable
- ptf_indices = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_ptf_indices"]
- mux_cable_table = duthost.get_running_config_facts()['MUX_CABLE']
+ Args:
+ dut: The host object
- for vlan_intf, config in mux_cable_table.items():
- ptf_port_index = ptf_indices[vlan_intf]
- server_ip = ip_interface(config['server_ipv4'])
- ptf_mac = ptfadapter.dataplane.ports[(0, ptf_port_index)].mac()
+ Returns:
+ A dict: {"Ethernet12" : {"server_ipv4":"192.168.0.4/32", "server_ipv6":"fc02:1000::4/128"}, ....}
+ """
+ mux_cable_config = dut.shell("sonic-cfggen -d --var-json 'MUX_CABLE'")['stdout']
+ return json.loads(mux_cable_config)
- garp_pkt = testutils.simple_arp_packet(eth_src=ptf_mac,
- hw_snd=ptf_mac,
- ip_snd=str(server_ip.ip),
- ip_tgt=str(server_ip.ip), # Re-use server IP as target IP, since it is within the subnet of the VLAN IP
- arp_op=2)
- garp_pkts[ptf_port_index] = garp_pkt
- for port, pkt in garp_pkts.items():
- testutils.send_packet(ptfadapter, port, pkt)
+def check_tunnel_balance(ptfhost, active_tor_mac, standby_tor_mac, active_tor_ip, standby_tor_ip, targer_server_ip, ptf_portchannel_indices):
+ """
+ Function for testing traffic distribution among all avtive T1.
+ A test script will be running on ptf to generate traffic to standby interface, and the traffic will be forwarded to
+ active ToR. The running script will capture all traffic and verify if these packets are distributed evenly.
+ Args:
+ ptfhost: The ptf host connectet to current testbed
+ active_tor_mac: MAC address of active ToR
+ standby_tor_mac: MAC address of the standby ToR
+ active_tor_ip: IP Address of Loopback0 of active ToR (For verifying packet)
+ standby_tor_ip: IP Address of Loopback0 of standby ToR (For verifying packet)
+ target_server_ip: The IP address of server for testing. The mux cable connected to this server must be standby
+ ptf_portchannel_indices: A dict, the mapping from portchannel to ptf port indices
+ Returns:
+ None.
+ """
+ HASH_KEYS = ["src-port", "dst-port", "src-ip"]
+ params = {
+ "server_ip": targer_server_ip,
+ "active_tor_mac": active_tor_mac,
+ "standby_tor_mac": standby_tor_mac,
+ "active_tor_ip": active_tor_ip,
+ "standby_tor_ip": standby_tor_ip,
+ "ptf_portchannel_indices": ptf_portchannel_indices,
+ "hash_key_list": HASH_KEYS
+ }
+ logging.info("run ptf test for verifying IPinIP tunnel balance")
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
+ log_file = "/tmp/ip_in_ip_tunnel_test.{}.log".format(timestamp)
+ logging.info("PTF log file: %s" % log_file)
+ ptf_runner(ptfhost,
+ "ptftests",
+ "ip_in_ip_tunnel_test.IpinIPTunnelTest",
+ platform_dir="ptftests",
+ params=params,
+ log_file=log_file,
+ qlen=2000,
+ socket_recv_size=16384)
diff --git a/tests/common/dualtor/tunnel_traffic_utils.py b/tests/common/dualtor/tunnel_traffic_utils.py
new file mode 100644
index 00000000000..793c843d479
--- /dev/null
+++ b/tests/common/dualtor/tunnel_traffic_utils.py
@@ -0,0 +1,152 @@
+"""Tunnel traffic verification utilities."""
+import ipaddress
+import logging
+import operator
+import pytest
+import sys
+
+from io import BytesIO
+from ptf import mask, testutils
+from scapy.all import IP, Ether
+from tests.common.dualtor import dual_tor_utils
+
+
+@pytest.fixture(scope="function")
+def tunnel_traffic_monitor(ptfadapter, tbinfo):
+ """Return TunnelTrafficMonitor to verify inter-ToR tunnel traffic."""
+
+ class TunnelTrafficMonitor(object):
+ """Monit tunnel traffic from standby ToR to active ToR."""
+
+ @staticmethod
+ def _get_t1_ptf_port_indexes(dut, tbinfo):
+ """Get the port indexes of those ptf port connecting to T1 switches."""
+ pc_ports = dual_tor_utils.get_t1_ptf_pc_ports(dut, tbinfo)
+ return [int(_.strip("eth")) for _ in reduce(operator.add, pc_ports.values(), [])]
+
+ @staticmethod
+ def _find_ipv4_lo_addr(config_facts):
+ """Find the ipv4 Loopback0 address."""
+ for addr in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]:
+ if isinstance(ipaddress.ip_network(addr), ipaddress.IPv4Network):
+ return addr.split("/")[0]
+
+ @staticmethod
+ def _build_tunnel_packet(outer_src_ip, outer_dst_ip):
+ """Build the expected tunnel packet."""
+ exp_pkt = testutils.simple_ip_packet(
+ ip_src=outer_src_ip,
+ ip_dst=outer_dst_ip,
+ pktlen=20
+ )
+ exp_pkt = mask.Mask(exp_pkt)
+ exp_pkt.set_do_not_care_scapy(Ether, "dst")
+ exp_pkt.set_do_not_care_scapy(Ether, "src")
+ exp_pkt.set_do_not_care_scapy(IP, "ihl")
+ exp_pkt.set_do_not_care_scapy(IP, "tos")
+ exp_pkt.set_do_not_care_scapy(IP, "len")
+ exp_pkt.set_do_not_care_scapy(IP, "id")
+ exp_pkt.set_do_not_care_scapy(IP, "flags")
+ exp_pkt.set_do_not_care_scapy(IP, "frag")
+ exp_pkt.set_do_not_care_scapy(IP, "ttl")
+ exp_pkt.set_do_not_care_scapy(IP, "proto")
+ exp_pkt.set_do_not_care_scapy(IP, "chksum")
+ exp_pkt.set_ignore_extra_bytes()
+ return exp_pkt
+
+ @staticmethod
+ def _dump_show_str(packet):
+ """Dump packet show output to string."""
+ _stdout, sys.stdout = sys.stdout, BytesIO()
+ try:
+ packet.show()
+ return sys.stdout.getvalue()
+ finally:
+ sys.stdout = _stdout
+
+ @staticmethod
+ def _check_ttl(packet):
+ """Check ttl field in the packet."""
+ outer_ttl, inner_ttl = packet[IP].ttl, packet[IP].payload[IP].ttl
+ logging.debug("Outer packet TTL: %s, inner packet TTL: %s", outer_ttl, inner_ttl)
+ if outer_ttl != 255:
+ return "outer packet's TTL expected TTL 255, actual %s" % outer_ttl
+ return ""
+
+ @staticmethod
+ def _check_tos(packet):
+ """Check ToS field in the packet."""
+
+ def _disassemble_ip_tos(tos):
+ return tos >> 2, tos & 0x3
+
+ outer_tos, inner_tos = packet[IP].tos, packet[IP].payload[IP].tos
+ outer_dscp, outer_ecn = _disassemble_ip_tos(outer_tos)
+ inner_dscp, inner_ecn = _disassemble_ip_tos(inner_tos)
+ logging.debug("Outer packet DSCP: {0:06b}, inner packet DSCP: {1:06b}".format(outer_dscp, inner_dscp))
+ logging.debug("Outer packet ECN: {0:02b}, inner packet ECN: {0:02b}".format(outer_ecn, inner_ecn))
+ check_res = []
+ if outer_dscp != inner_ecn:
+ check_res.append("outer packet DSCP not same as inner packet DSCP")
+ if outer_ecn != inner_ecn:
+ check_res.append("outer packet ECN not same as inner packet ECN")
+ return " ,".join(check_res)
+
+ def __init__(self, active_tor, standby_tor, existing=True):
+ """
+ Init the tunnel traffic monitor.
+
+ @param active_tor: active ToR that decaps the tunnel traffic.
+ @param standby_tor: standby ToR that does the encap.
+ """
+ self.active_tor = active_tor
+ self.standby_tor = standby_tor
+ self.listen_ports = sorted(self._get_t1_ptf_port_indexes(standby_tor, tbinfo))
+ self.ptfadapter = ptfadapter
+ active_tor_cfg_facts = self.active_tor.config_facts(
+ host=self.active_tor.hostname, source="persistent"
+ )["ansible_facts"]
+ standby_tor_cfg_facts = self.standby_tor.config_facts(
+ host=self.standby_tor.hostname, source="persistent"
+ )["ansible_facts"]
+ self.active_tor_lo_addr = self._find_ipv4_lo_addr(active_tor_cfg_facts)
+ self.standby_tor_lo_addr = self._find_ipv4_lo_addr(standby_tor_cfg_facts)
+ self.exp_pkt = self._build_tunnel_packet(self.standby_tor_lo_addr, self.active_tor_lo_addr)
+ self.rec_pkt = None
+ self.existing = existing
+
+ def __enter__(self):
+ self.ptfadapter.dataplane.flush()
+
+ def __exit__(self, *exc_info):
+ if exc_info[0]:
+ return
+ try:
+ port_index, rec_pkt = testutils.verify_packet_any_port(
+ ptfadapter,
+ self.exp_pkt,
+ ports=self.listen_ports
+ )
+ except AssertionError as detail:
+ logging.debug("Error occurred in polling for tunnel traffic", exc_info=True)
+ if "Did not receive expected packet on any of ports" in str(detail):
+ if self.existing:
+ raise detail
+ else:
+ raise detail
+ else:
+ self.rec_pkt = Ether(rec_pkt)
+ rec_port = self.listen_ports[port_index]
+ logging.debug("Receive encap packet from PTF interface %s", "eth%s" % rec_port)
+ logging.debug("Encapsulated packet:\n%s", self._dump_show_str(self.rec_pkt))
+ ttl_check_res = self._check_ttl(self.rec_pkt)
+ tos_check_res = self._check_tos(self.rec_pkt)
+ check_res = []
+ if ttl_check_res:
+ check_res.append(ttl_check_res)
+ if tos_check_res:
+ check_res.append(tos_check_res)
+ if check_res:
+ raise ValueError(", ".join(check_res) + ".")
+
+ return TunnelTrafficMonitor
diff --git a/tests/common/fixtures/advanced_reboot.py b/tests/common/fixtures/advanced_reboot.py
index dae77f4a90e..8d0432564f9 100644
--- a/tests/common/fixtures/advanced_reboot.py
+++ b/tests/common/fixtures/advanced_reboot.py
@@ -79,6 +79,7 @@ def __extractTestParam(self):
self.readyTimeout = self.request.config.getoption("--ready_timeout")
self.replaceFastRebootScript = self.request.config.getoption("--replace_fast_reboot_script")
self.postRebootCheckScript = self.request.config.getoption("--post_reboot_check_script")
+ self.bgpV4V6TimeDiff = self.request.config.getoption("--bgp_v4_v6_time_diff")
# Set default reboot limit if it is not given
if self.rebootLimit is None:
@@ -482,6 +483,7 @@ def __runPtfRunner(self, rebootOper=None):
"setup_fdb_before_test" : True,
"vnet" : self.vnet,
"vnet_pkts" : self.vnetPkts,
+ "bgp_v4_v6_time_diff": self.bgpV4V6TimeDiff
},
log_file=u'/tmp/advanced-reboot.ReloadTest.log',
module_ignore_errors=self.moduleIgnoreErrors
diff --git a/tests/common/fixtures/ptfhost_utils.py b/tests/common/fixtures/ptfhost_utils.py
index 068a904c5db..922ba39933f 100644
--- a/tests/common/fixtures/ptfhost_utils.py
+++ b/tests/common/fixtures/ptfhost_utils.py
@@ -1,7 +1,9 @@
+import json
import os
import pytest
import logging
+from ipaddress import ip_interface
from jinja2 import Template
from natsort import natsorted
@@ -10,6 +12,7 @@
ROOT_DIR = "/root"
OPT_DIR = "/opt"
+TMP_DIR = '/tmp'
SUPERVISOR_CONFIG_DIR = "/etc/supervisor/conf.d/"
SCRIPTS_SRC_DIR = "scripts/"
TEMPLATES_DIR = "templates/"
@@ -21,6 +24,8 @@
ICMP_RESPONDER_CONF_TEMPL = "icmp_responder.conf.j2"
CHANGE_MAC_ADDRESS_SCRIPT = "scripts/change_mac.sh"
REMOVE_IP_ADDRESS_SCRIPT = "scripts/remove_ip.sh"
+GARP_SERVICE_PY = 'garp_service.py'
+GARP_SERVICE_CONF_TEMPL = 'garp_service.conf.j2'
@pytest.fixture(scope="session", autouse=True)
@@ -173,7 +178,7 @@ def run_icmp_responder(duthost, ptfhost, tbinfo):
logger.debug("Copy icmp_responder.py to ptfhost '{0}'".format(ptfhost.hostname))
ptfhost.copy(src=os.path.join(SCRIPTS_SRC_DIR, ICMP_RESPONDER_PY), dest=OPT_DIR)
- logging.debug("Start running icmp_responder")
+ logging.info("Start running icmp_responder")
templ = Template(open(os.path.join(TEMPLATES_DIR, ICMP_RESPONDER_CONF_TEMPL)).read())
ptf_indices = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_ptf_indices"]
vlan_intfs = duthost.get_vlan_intfs()
@@ -191,5 +196,39 @@ def run_icmp_responder(duthost, ptfhost, tbinfo):
yield
- logging.debug("Stop running icmp_responder")
+ logging.info("Stop running icmp_responder")
ptfhost.shell("supervisorctl stop icmp_responder")
+
+
+@pytest.fixture(scope='session', autouse=True)
+def run_garp_service(duthost, ptfhost, tbinfo, change_mac_addresses):
+
+ garp_config = {}
+
+ ptf_indices = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_ptf_indices"]
+ mux_cable_table = duthost.get_running_config_facts()['MUX_CABLE']
+
+ logger.info("Generating GARP service config file")
+
+ for vlan_intf, config in mux_cable_table.items():
+ ptf_port_index = ptf_indices[vlan_intf]
+ server_ip = ip_interface(config['server_ipv4']).ip
+
+ garp_config[ptf_port_index] = {
+ 'target_ip': '{}'.format(server_ip)
+ }
+
+ ptfhost.copy(src=os.path.join(SCRIPTS_SRC_DIR, GARP_SERVICE_PY), dest=OPT_DIR)
+
+ with open(os.path.join(TEMPLATES_DIR, GARP_SERVICE_CONF_TEMPL)) as f:
+ template = Template(f.read())
+
+ ptfhost.copy(content=json.dumps(garp_config, indent=4, sort_keys=True), dest=os.path.join(TMP_DIR, 'garp_conf.json'))
+ ptfhost.copy(content=template.render(garp_service_args = '--interval 1'), dest=os.path.join(SUPERVISOR_CONFIG_DIR, 'garp_service.conf'))
+ logger.info("Starting GARP Service on PTF host")
+ ptfhost.shell('supervisorctl update')
+ ptfhost.shell('supervisorctl start garp_service')
+
+ yield
+
+ ptfhost.shell('supervisorctl stop garp_service')
diff --git a/tests/common/helpers/redis.py b/tests/common/helpers/redis.py
new file mode 100644
index 00000000000..9ce7f4505fb
--- /dev/null
+++ b/tests/common/helpers/redis.py
@@ -0,0 +1,392 @@
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class RedisCli(object):
+ """Base class for interface to RedisDb using redis-cli command.
+
+ Attributes:
+ host: a SonicHost or SonicAsic. Commands will be run on this shell.
+ database: Redis database number.
+ pid: Port number of redis db.
+ """
+
+ def __init__(self, host, database=1, pid=6379):
+ """Initializes base class with defaults"""
+ self.host = host
+ self.database = database
+ self.pid = pid
+
+ def _cli_prefix(self):
+ """Builds opening of redis CLI command for other methods."""
+ # return "docker exec -i {docker} redis-cli -p {pid} --raw -n {db} ".format(
+ # docker=self.docker, db=self.database, pid=self.pid)
+ return " -p {pid} --raw -n {db} ".format(db=self.database, pid=self.pid)
+
+ def _run_and_check(self, cmd):
+ """
+ Executes a redis CLI command and checks the output for empty string.
+
+ Args:
+ cmd: Full CLI command to run.
+
+ Returns:
+ Ansible CLI output dictionary with stdout and stdout_lines keys on success.
+ Empty dictionary on error.
+
+ """
+ result = self.host.run_redis_cli_cmd(cmd)
+
+ if len(result["stdout_lines"]) == 0:
+ logger.error("No command response: %s" % cmd)
+ return {}
+
+ return result
+
+ def _run_and_raise(self, cmd):
+ """
+ Executes a redis CLI command and checks the output for empty string.
+
+ Args:
+ cmd: Full CLI command to run.
+
+ Returns:
+ Ansible CLI output dictionary with stdout and stdout_lines keys on success.
+
+ Raises:
+ Exception: If the command had no output.
+
+ """
+ result = self.host.run_redis_cli_cmd(cmd)
+
+ if len(result["stdout_lines"]) == 0:
+ logger.error("No command response: %s" % cmd)
+ raise Exception("Command: %s returned no response." % cmd)
+
+ return result
+
+ def get_key_value(self, key):
+ """
+ Executes a redis CLI get command.
+
+ Args:
+ key: full name of the key to get.
+
+ Returns:
+ The corresponding value of the key.
+
+ Raises:
+ RedisKeyNotFound: If the key has no value or is not present.
+
+ """
+ cmd = self._cli_prefix() + "get " + key
+ result = self._run_and_check(cmd)
+ if result == {}:
+ raise RedisKeyNotFound("Key: %s not found in rediscmd: %s" % (key, cmd))
+ else:
+ return result['stdout']
+
+ def hget_key_value(self, key, field):
+ """
+ Executes a redis CLI hget command.
+
+ Args:
+ key: full name of the key to get.
+ field: Name of the hash field to get.
+
+ Returns:
+ The corresponding value of the key.
+
+ Raises:
+ RedisKeyNotFound: If the key or field has no value or is not present.
+
+ """
+ cmd = self._cli_prefix() + "hget {} {}".format(key, field)
+ result = self._run_and_check(cmd)
+ if result == {}:
+ raise RedisKeyNotFound("Key: %s, field: %s not found in rediscmd: %s" % (key, field, cmd))
+ else:
+ return result['stdout']
+
+ def get_and_check_key_value(self, key, value, field=None):
+ """
+ Executes a redis CLI get or hget and validates the response against a provided field.
+
+ Args:
+ key: full name of the key to get.
+ value: expected value to test against.
+ field: Optional; Name of the hash field to use with hget.
+
+ Returns:
+ True if the validation succeeds.
+
+ Raises:
+ RedisKeyNotFound: If the key or field has no value or is not present.
+ AssertionError: If the fetched value from redis does not match the provided value.
+
+ """
+ if field is None:
+ result = self.get_key_value(key)
+ else:
+ result = self.hget_key_value(key, field)
+
+ if str(result).lower() == str(value).lower():
+ logger.info("Value {val} matches output {out}".format(val=value, out=result))
+ return True
+ else:
+ raise AssertionError("redis value error: %s != %s key was: %s" % (result, value, key))
+
+
+class AsicDbCli(RedisCli):
+ """
+ Class to interface with the ASICDB on a host.
+
+ Attributes:
+ host: a SonicHost or SonicAsic. Commands will be run on this shell.
+
+ """
+ ASIC_SWITCH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH"
+ ASIC_SYSPORT_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SYSTEM_PORT"
+ ASIC_PORT_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_PORT"
+ ASIC_HOSTIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF"
+ ASIC_ROUTERINTF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"
+ ASIC_NEIGH_ENTRY_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY"
+
+ def __init__(self, host):
+ """
+ Initializes a connection to the ASIC DB (database 1)
+ """
+ super(AsicDbCli, self).__init__(host, 1)
+ # cache this to improve speed
+ self.hostif_portidlist = []
+
+ def get_switch_key(self):
+ """Returns a list of keys in the switch table"""
+ cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_SWITCH_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"][0]
+
+ def get_system_port_key_list(self):
+ """Returns a list of keys in the system port table"""
+ cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_SYSPORT_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_port_key_list(self):
+ """Returns a list of keys in the local port table"""
+ cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_PORT_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_hostif_list(self):
+ """Returns a list of keys in the host interface table"""
+ cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_HOSTIF_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_router_if_list(self):
+ """Returns a list of keys in the router interface table"""
+ cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_ROUTERINTF_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_neighbor_key_by_ip(self, ipaddr):
+ """Returns the key in the neighbor table that is for a specific IP neighbor
+
+ Args:
+ ipaddr: The IP address to search for in the neighbor table.
+
+ """
+ result = self._run_and_raise(self._cli_prefix() + "KEYS %s*%s*" % (AsicDbCli.ASIC_NEIGH_ENTRY_TABLE, ipaddr))
+ neighbor_key = None
+ match_str = '"ip":"%s"' % ipaddr
+ for key in result["stdout_lines"]:
+ if match_str in key:
+ neighbor_key = key
+ break
+
+ return neighbor_key
+
+ def get_neighbor_value(self, neighbor_key, field):
+ """
+ Returns a value of a field in the neighbor table.
+
+ Note:
+ The structure of the keys in this table cause the command() method to fail, so this function uses shell() to
+ retrieve the command output.
+
+ Args:
+ neighbor_key: The full key of the neighbor table.
+ field: The field to get in the neighbor hash table.
+ """
+ cmd = ["/usr/bin/redis-cli", "-n", "1", "HGET", neighbor_key, field]
+ if self.host.namespace is not None:
+ cmd = ["sudo", "ip", "netns", "exec"] + cmd
+ result = self.host.sonichost.shell(argv=cmd)
+ logger.debug("neigh result: %s", result['stdout'])
+ return result['stdout']
+
+ def get_hostif_portid_oidlist(self, refresh=False):
+ """
+ Returns a list of portids associated with the hostif entries on the asics.
+
+ Walks through the HOSTIF table getting each port ID from the cache and returns the list. The list
+ is saved so it can be returned directly in subsequent calls.
+
+ Args:
+ refresh: Forces the redis DB to be requeried after the first time.
+
+
+ """
+ if self.hostif_portidlist != [] and refresh is False:
+ return self.hostif_portidlist
+
+ hostif_keylist = self.get_hostif_list()
+ return_list = []
+ for hostif_key in hostif_keylist:
+ hostif_portid = self.hget_key_value(hostif_key, 'SAI_HOSTIF_ATTR_OBJ_ID')
+ return_list.append(hostif_portid)
+ self.hostif_portidlist = return_list
+ return return_list
+
+ def find_hostif_by_portid(self, portid):
+ """
+ Returns an HOSTIF table key for the port specified.
+
+ Args:
+ portid: A port OID (oid:0x1000000000004)
+
+ Raises:
+ RedisKeyNotFound: If no hostif exists with the portid provided.
+ """
+ hostif_keylist = self.get_hostif_list()
+ for hostif_key in hostif_keylist:
+ hostif_portid = self.hget_key_value(hostif_key, 'SAI_HOSTIF_ATTR_OBJ_ID')
+ if hostif_portid == portid:
+ return hostif_key
+
+ raise RedisKeyNotFound("Can't find hostif in asicdb with portid: %s", portid)
+
+ def get_rif_porttype(self, portid):
+ """
+ Determines whether a specific port OID referenced in a router interface entry is a local port or a system port.
+
+ Args:
+ portid: the port oid from SAI_ROUTER_INTERFACE_ATTR_PORT_ID (oid:0x6000000000c4d)
+
+ Returns:
+ "hostif" if the port ID has a host interface
+ "sysport" if it is a system port.
+ "port" if the port ID is in local port table but has no hostif
+ "other" if it is not found in any port table
+ """
+ # could be a localport
+ if "%s:%s" % (
+ AsicDbCli.ASIC_PORT_TABLE,
+ portid) in self.get_port_key_list() and portid in self.get_hostif_portid_oidlist():
+ return "hostif"
+ # could be a system port
+ elif "%s:%s" % (AsicDbCli.ASIC_SYSPORT_TABLE, portid) in self.get_system_port_key_list():
+ return "sysport"
+ # could be something else
+ elif "%s:%s" % (AsicDbCli.ASIC_PORT_TABLE, portid) in self.get_port_key_list():
+ return "port"
+ else:
+ return "other"
+
+
+class AppDbCli(RedisCli):
+ """
+ Class to interface with the APPDB on a host.
+
+ Attributes:
+ host: a SonicHost or SonicAsic. Commands will be run on this shell.
+
+ """
+ APP_NEIGH_TABLE = "NEIGH_TABLE"
+
+ def __init__(self, host):
+ super(AppDbCli, self).__init__(host, 0)
+
+ def get_neighbor_key_by_ip(self, ipaddr):
+ """Returns the key in the neighbor table that is for a specific IP neighbor
+
+ Args:
+ ipaddr: The IP address to search for in the neighbor table.
+
+ """
+ result = self._run_and_raise(self._cli_prefix() + "KEYS %s:*%s*" % (AppDbCli.APP_NEIGH_TABLE, ipaddr))
+ neighbor_key = None
+ for key in result["stdout_lines"]:
+ if key.endswith(ipaddr):
+ neighbor_key = key
+ break
+
+ return neighbor_key
+
+
+class VoqDbCli(RedisCli):
+ """
+ Class to interface with the Chassis VOQ DB on a supervisor.
+
+ Attributes:
+ host: a SonicHost instance for a supervisor card. Commands will be run on this shell.
+
+ """
+
+ def __init__(self, host):
+ """Initializes the class with the database parameters and finds the IP address of the database"""
+ super(VoqDbCli, self).__init__(host, 12, 6380)
+ output = host.command("grep chassis_db_address /etc/sonic/chassisdb.conf")
+ # chassis_db_address=10.0.0.16
+ self.ip = output['stdout'].split("=")[1]
+
+ def _cli_prefix(self):
+ """Builds opening of redis CLI command for other methods."""
+ return "-h {ip} -p {pid} --raw -n {db} ".format(
+ ip=self.ip, db=self.database, pid=self.pid)
+
+ def get_neighbor_key_by_ip(self, ipaddr):
+ """Returns the key in the neighbor table that is for a specific IP neighbor
+
+ Args:
+ ipaddr: The IP address to search for in the neighbor table.
+
+ """
+ cmd = self._cli_prefix() + 'KEYS "SYSTEM_NEIGH|*%s*"' % ipaddr
+ result = self._run_and_raise(cmd)
+ neighbor_key = None
+ for key in result["stdout_lines"]:
+ if key.endswith(ipaddr):
+ neighbor_key = key
+ break
+
+ return neighbor_key
+
+ def get_router_interface_id(self, slot, asic, port):
+ """Returns the router OID stored in the router interface table entry for the provided entry.
+
+ Args:
+ slot: slot of the router interface in either numeric or text. (3 or Slot3)
+ asic: ASIC number of the router interface in either numeric or text (0 or Asic0)
+ port: Full text of port (Ethernet17)
+
+
+ """
+ slot = str(slot)
+ if slot.isdigit():
+ slot_str = "Linecard" + slot
+ else:
+ slot_str = slot
+
+ asic = str(asic)
+ if asic.isdigit():
+ asic_str = "Asic" + asic
+ else:
+ asic_str = asic
+
+ key = "SYSTEM_INTERFACE|{}|{}|{}".format(slot_str, asic_str, port)
+ return self.hget_key_value(key, "rif_id")
+
+
+class RedisKeyNotFound(KeyError):
+ """
+ Raised when requested keys or fields are not found in the redis db.
+ """
+ pass
diff --git a/tests/common/plugins/pdu_controller/__init__.py b/tests/common/plugins/pdu_controller/__init__.py
index 6a2cc651557..35d1d846eb4 100644
--- a/tests/common/plugins/pdu_controller/__init__.py
+++ b/tests/common/plugins/pdu_controller/__init__.py
@@ -1,23 +1,14 @@
import logging
import pytest
+from pdu_manager import pdu_manager_factory
-def pdu_controller_factory(controller_ip, controller_protocol, dut_hostname, pdu):
- """
- @summary: Factory function for creating PDU controller according to different management protocol.
- @param controller_ip: IP address of the PDU controller host.
- @param controller_protocol: Management protocol supported by the PDU controller host.
- @param dut_hostname: Hostname of the DUT to be controlled by the PDU controller.
- """
- logging.info("Creating pdu controller object")
- if controller_protocol == "snmp":
- import snmp_pdu_controllers
- return snmp_pdu_controllers.get_pdu_controller(controller_ip, dut_hostname, pdu)
+logger = logging.getLogger(__name__)
@pytest.fixture(scope="module")
-def pdu_controller(duthosts, rand_one_dut_hostname, pdu):
+def pdu_controller(duthosts, rand_one_dut_hostname, conn_graph_facts, pdu):
"""
@summary: Fixture for controlling power supply to PSUs of DUT
@param duthost: Fixture duthost defined in sonic-mgmt/tests/conftest.py
@@ -25,39 +16,18 @@ def pdu_controller(duthosts, rand_one_dut_hostname, pdu):
controller_base.py.
"""
duthost = duthosts[rand_one_dut_hostname]
-
- logging.info("Creating pdu_controller fixture")
inv_mgr = duthost.host.options["inventory_manager"]
- pdu_host = inv_mgr.get_host(duthost.hostname).get_vars().get("pdu_host")
- if not pdu_host:
- logging.info("No 'pdu_host' is defined in inventory file for '%s'. Unable to create pdu_controller" %
- duthost.hostname)
- yield None
- return
-
- controller_vars = inv_mgr.get_host(pdu_host).get_vars()
-
- controller_ip = controller_vars.get("ansible_host")
- if not controller_ip:
- logging.info("No 'ansible_host' is defined in inventory file for '%s'" % pdu_host)
- logging.info("Unable to create pdu_controller for %s" % duthost.hostname)
- yield None
- return
-
- controller_protocol = controller_vars.get("protocol")
- if not controller_protocol:
- logging.info("No protocol is defined in inventory file for '%s'. Try to use default 'snmp'" % pdu_host)
- controller_protocol = "snmp"
+ pdu_host_list = inv_mgr.get_host(duthost.hostname).get_vars().get("pdu_host")
+ pdu_hosts = {}
+ for ph in pdu_host_list.split(','):
+ var_list = inv_mgr.get_host(ph).get_vars()
+ pdu_hosts[ph] = var_list
- controller = pdu_controller_factory(controller_ip, controller_protocol, duthost.hostname, pdu)
+ controller = pdu_manager_factory(duthost.hostname, pdu_hosts, conn_graph_facts, pdu)
yield controller
- logging.info("pdu_controller fixture teardown, ensure that all PDU outlets are turned on after test")
+ logger.info("pdu_controller fixture teardown, ensure that all PDU outlets are turned on after test")
if controller:
- outlet_status = controller.get_outlet_status()
- if outlet_status:
- for outlet in outlet_status:
- if not outlet["outlet_on"]:
- controller.turn_on_outlet(outlet["outlet_id"])
+ controller.turn_on_outlet()
controller.close()
diff --git a/tests/common/plugins/pdu_controller/pdu_manager.py b/tests/common/plugins/pdu_controller/pdu_manager.py
new file mode 100644
index 00000000000..5ecf706aa32
--- /dev/null
+++ b/tests/common/plugins/pdu_controller/pdu_manager.py
@@ -0,0 +1,258 @@
+"""
+ PduManager is intended to solve the issue where DUT connects to
+ multiple PDU controllers.
+
+ It also intended to hide the dependency on the fake outlet_id,
+ and reference outlet buy outlet dictionary directly. With this,
+ we could enable different way to identify outlet, e.g. with the
+ outlet number from graph.
+
+ It also intended to create a smooth transition from defining
+ PDU in inventory to defining PDU in connection graph. Data in
+ graph is preferred, but if graph data is missing, existing
+ inventory data will be used.
+
+ PDU manager implements the same base PDU controller APIs and
+ collect status from and distribute operations to individual PDU
+ controllers.
+"""
+
+import logging
+import copy
+from snmp_pdu_controllers import get_pdu_controller
+
+logger = logging.getLogger(__name__)
+
+
+class PduManager():
+
+ def __init__(self, dut_hostname):
+ """
+ dut_hostname is the target DUT host name. The dut
+ defines which PDU(s) and outlet(s) it connected to.
+
+ It is NOT the PDU host name. PDU host name is defined
+ either in graph or in inventory and associated with
+ the DUT.
+ """
+ self.dut_hostname = dut_hostname
+ """
+ controlers is an array of controller dictionaries with
+ following information:
+ {
+ 'psu_name' : name of the PSU on DUT,
+ 'host' : controller_IP_address,
+ 'controller' : controller instance,
+ 'outlets' : cached outlet status,
+ 'psu_peer' : psu peer information,
+ }
+ """
+ self.controllers = []
+
+ def _update_outlets(self, outlets, pdu_index):
+ for outlet in outlets:
+ outlet['pdu_index'] = pdu_index
+ outlet['pdu_name'] = self.controllers[pdu_index]['psu_peer']['peerdevice']
+
+ def add_controller(self, psu_name, psu_peer, pdu_vars):
+ """
+ Add a controller to be managed.
+ Sampel psu_peer:
+ {
+ "peerdevice": "pdu-107",
+ "HwSku": "Sentry",
+ "Protocol": "snmp",
+ "ManagementIp": "10.0.0.107",
+ "Type": "Pdu",
+ "peerport": "39"
+ }
+ """
+ if 'Protocol' not in psu_peer or 'ManagementIp' not in psu_peer:
+ logger.info('psu_peer {} missing critical inforamtion'.format(psu_peer))
+ return
+
+ if psu_peer['Protocol'] != 'snmp':
+ logger.warning('Controller protocol {} is not supported'.format(protocol))
+ return
+
+ controller = None
+ pdu_ip = psu_peer['ManagementIp']
+ shared_pdu = False
+ for pdu in self.controllers:
+ if psu_name in pdu:
+ logger.warning('PSU {} already has a pdu definition'.format(psu_name))
+ return
+ if pdu_ip == pdu['host']:
+ shared_pdu = True # Sharing controller with another outlet
+ controller = pdu['controller']
+
+ outlets = []
+ pdu = {
+ 'psu_name': psu_name,
+ 'host': pdu_ip,
+ 'controller': controller,
+ 'outlets': outlets,
+ 'psu_peer': psu_peer,
+ }
+ next_index = len(self.controllers)
+ self.controllers.append(pdu)
+ if not shared_pdu:
+ controller = get_pdu_controller(pdu_ip, self.dut_hostname, pdu_vars)
+ if not controller:
+ logger.warning('Failed creating pdu controller: {}'.format(psu_peer))
+ return
+ outlets = controller.get_outlet_status()
+ self._update_outlets(outlets, next_index)
+ pdu['outlets'] = outlets
+ pdu['controller'] = controller
+
+ def _get_pdu_controller(self, pdu_index):
+ pdu = self.controllers[pdu_index]
+ return pdu['controller']
+
+ def turn_on_outlet(self, outlet=None):
+ """
+ Turnning on an outlet. The outlet contains enough information
+ to identify the pdu controller + outlet ID.
+ when outlet is None, all outlets will be turned off.
+ """
+ if outlet is not None:
+ controller = self._get_pdu_controller(outlet['pdu_index'])
+ return controller.turn_on_outlet(outlet['outlet_id'])
+ else:
+ # turn on all outlets
+ ret = True
+ for controller in self.controllers:
+ for outlet in controller['outlets']:
+ rc = controller['controller'].turn_on_outlet(outlet['outlet_id'])
+ ret = ret and rc
+
+ return ret
+
+ def turn_off_outlet(self, outlet=None):
+ """
+ Turnning off an outlet. The outlet contains enough information
+ to identify the pdu controller + outlet ID.
+ when outlet is None, all outlets will be turned off.
+ """
+ if outlet is not None:
+ controller = self._get_pdu_controller(outlet['pdu_index'])
+ return controller.turn_off_outlet(outlet['outlet_id'])
+ else:
+ # turn on all outlets
+ ret = True
+ for controller in self.controllers:
+ for outlet in controller['outlets']:
+ rc = controller['controller'].turn_off_outlet(outlet['outlet_id'])
+ ret = ret and rc
+
+ return ret
+
+ def get_outlet_status(self, outlet=None):
+ """
+ Getting outlet status. The outlet contains enough information
+ to identify the pdu controller + outlet ID.
+ when outlet is None, status of all outlets will be returned.
+ """
+ status = []
+ if outlet is not None:
+ pdu_index = outlet['pdu_index']
+ controller = self._get_pdu_controller(pdu_index)
+ outlets = controller.get_outlet_status(outlet['outlet_id'])
+ self._update_outlets(outlets, pdu_index)
+ status = status + outlets
+ else:
+ # collect all status
+ for pdu_index, controller in enumerate(self.controllers):
+ if len(controller['outlets']) > 0:
+ outlets = controller['controller'].get_outlet_status()
+ self._update_outlets(outlets, pdu_index)
+ status = status + outlets
+
+ return status
+
+ def close(self):
+ for controller in self.controllers:
+ if len(controller['outlets']) > 0:
+ controller['controller'].close()
+
+
+def _merge_dev_link(devs, links):
+ ret = copy.deepcopy(devs)
+ for host, info in links.items():
+ if host not in ret:
+ ret[host] = {}
+
+ for key, val in info.items():
+ if key not in ret[host]:
+ ret[host][key] = {}
+ ret[host][key].update(val)
+
+ return ret
+
+
+def _build_pdu_manager_from_graph(pduman, dut_hostname, conn_graph_facts, pdu_vars):
+ logger.info('Creating pdu manager from graph information')
+ pdu_devs = conn_graph_facts['device_pdu_info']
+ pdu_links = conn_graph_facts['device_pdu_links']
+ pdu_info = _merge_dev_link(pdu_devs, pdu_links)
+ if dut_hostname not in pdu_info or not pdu_info[dut_hostname]:
+ # No PDU information in graph
+ logger.info('PDU informatin for {} is not found in graph'.format(dut_hostname))
+ return False
+
+ for psu_name, psu_peer in pdu_info[dut_hostname].items():
+ pduman.add_controller(psu_name, psu_peer, pdu_vars)
+
+ return len(pduman.controllers) > 0
+
+
+def _build_pdu_manager_from_inventory(pduman, dut_hostname, pdu_hosts, pdu_vars):
+ logger.info('Creating pdu manager from inventory information')
+ if not pdu_hosts:
+ logger.info('Do not have sufficient PDU information to create PDU manager for host {}'.format(dut_hostname))
+ return False
+
+ for ph, var_list in pdu_hosts.items():
+ controller_ip = var_list.get("ansible_host")
+ if not controller_ip:
+ logger.info('No "ansible_host" is defined in inventory file for "{}"'.format(pdu_hosts))
+ logger.info('Unable to create pdu_controller for {}'.format(dut_hostname))
+ continue
+
+ controller_protocol = var_list.get("protocol")
+ if not controller_protocol:
+ logger.info(
+ 'No protocol is defined in inventory file for "{}". Try to use default "snmp"'.format(pdu_hosts))
+ controller_protocol = 'snmp'
+
+ psu_peer = {
+ 'peerdevice': ph,
+ 'HwSku': 'unknown',
+ 'Protocol': controller_protocol,
+ 'ManagementIp': controller_ip,
+ 'Type': 'Pdu',
+ 'peerport': 'probing',
+ }
+ pduman.add_controller(ph, psu_peer, pdu_vars)
+
+ return len(pduman.controllers) > 0
+
+
+def pdu_manager_factory(dut_hostname, pdu_hosts, conn_graph_facts, pdu_vars):
+ """
+ @summary: Factory function for creating PDU manager instance.
+ @param dut_hostname: DUT host name.
+ @param pdu_hosts: comma separated PDU host names.
+ @param conn_graph_facts: connection graph facts.
+ @param pdu_vars: pdu community strings
+ """
+ logger.info('Creating pdu manager object')
+ pduman = PduManager(dut_hostname)
+ if _build_pdu_manager_from_graph(pduman, dut_hostname, conn_graph_facts, pdu_vars):
+ return pduman
+
+ if _build_pdu_manager_from_inventory(pduman, dut_hostname, pdu_hosts, pdu_vars):
+ return pduman
+
+ return None
diff --git a/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py b/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py
index 56d437ba604..63f9132b071 100644
--- a/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py
+++ b/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py
@@ -118,7 +118,7 @@ def _get_pdu_ports(self):
This method depends on this configuration to find out the PDU ports connected to PSUs of specific DUT.
"""
if not self.pduType:
- logging.info('PDU type is unknown')
+ logging.info('PDU type is unknown: pdu_ip {} dut {}'.format(self.controller, self.hostname))
return
max_lane = 5
@@ -149,7 +149,7 @@ def _get_pdu_ports(self):
self.map_host_to_lane(lane_id)
break
else:
- logging.error("{} device is not attached to any of PDU port".format(self.hostname.lower()))
+ logging.error("{} device is not attached to any outlet of PDU {}".format(self.hostname.lower(), self.controller))
def map_host_to_lane(self, lane_id):
"""
@@ -198,7 +198,7 @@ def turn_on_outlet(self, outlet):
@return: Return true if successfully execute the command for turning on power. Otherwise return False.
"""
if not self.pduType:
- logging.error('Unable to turn on: PDU type is unknown')
+ logging.error('Unable to turn on: PDU type is unknown: pdu_ip {} dut {}'.format(self.controller, self.hostname))
return False
port_oid = self.pPORT_CONTROL_BASE_OID + self.pdu_ports[rfc1902.Integer(outlet)]
@@ -231,7 +231,7 @@ def turn_off_outlet(self, outlet):
@return: Return true if successfully execute the command for turning off power. Otherwise return False.
"""
if not self.pduType:
- logging.error('Unable to turn off: PDU type is unknown')
+ logging.error('Unable to turn off: PDU type is unknown: pdu_ip {} dut {}'.format(self.controller, self.hostname))
return False
port_oid = self.pPORT_CONTROL_BASE_OID + self.pdu_ports[rfc1902.Integer(outlet)]
@@ -268,7 +268,7 @@ def get_outlet_status(self, outlet=None):
"""
results = []
if not self.pduType:
- logging.error('Unable to retrieve status: PDU type is unknown')
+ logging.error('Unable to retrieve status: PDU type is unknown: pdu_ip {} dut {}'.format(self.controller, self.hostname))
return results
cmdGen = cmdgen.CommandGenerator()
diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py
index 81ff0df9466..8b1fff29f09 100644
--- a/tests/common/plugins/sanity_check/checks.py
+++ b/tests/common/plugins/sanity_check/checks.py
@@ -106,7 +106,7 @@ def _find_down_ports(dut, phy_interfaces, ip_interfaces):
def check_interfaces(duthosts):
def _check():
check_results = []
- for dut in duthosts:
+ for dut in duthosts.frontend_nodes:
logger.info("Checking interfaces status on %s..." % dut.hostname)
networking_uptime = dut.get_networking_uptime().seconds
@@ -161,7 +161,7 @@ def _check():
def check_bgp(duthosts):
def _check():
check_results = []
- for dut in duthosts:
+ for dut in duthosts.frontend_nodes:
def _check_bgp_status_helper():
asic_check_results = []
bgp_facts = dut.bgp_facts(asic_index='all')
diff --git a/tests/common/reboot.py b/tests/common/reboot.py
index 09183f7cb6c..1c1cbd50515 100644
--- a/tests/common/reboot.py
+++ b/tests/common/reboot.py
@@ -161,7 +161,7 @@ def execute_reboot_helper():
logger.info('waiting for warmboot-finalizer service to become activating')
finalizer_state = get_warmboot_finalizer_state(duthost)
while finalizer_state != 'activating':
- dut_datetime_after_ssh = duthost.get_up_time()
+ dut_datetime_after_ssh = duthost.get_now_time()
time_passed = float(dut_datetime_after_ssh.strftime("%s")) - float(dut_datetime.strftime("%s"))
if time_passed > wait:
raise Exception('warmboot-finalizer never reached state "activating"')
diff --git a/tests/common/testbed.py b/tests/common/testbed.py
index 83bab7cecb3..55c45de7b91 100644
--- a/tests/common/testbed.py
+++ b/tests/common/testbed.py
@@ -21,7 +21,8 @@
class TestbedInfo(object):
"""Parse the testbed file used to describe whole testbed info."""
- TESTBED_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_DEPRECATED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_RECOMMENDED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'inv_name', 'auto_recover', 'comment')
def __init__(self, testbed_file):
if testbed_file.endswith(".csv"):
@@ -69,13 +70,18 @@ def _ip_mask_to_cidr(self, ip_address, netmask):
def _read_testbed_topo_from_csv(self):
"""Read csv testbed info file."""
with open(self.testbed_filename) as f:
- topo = csv.DictReader(f, fieldnames=self.TESTBED_FIELDS,
- delimiter=',')
+ header = [field.strip(' #') for field in f.readline().strip().split(',')]
+ print(header)
+ if len(header) == len(self.TESTBED_FIELDS_DEPRECATED):
+ self.testbed_fields = self.TESTBED_FIELDS_DEPRECATED
+ elif len(header) == len(self.TESTBED_FIELDS_RECOMMENDED):
+ self.testbed_fields = self.TESTBED_FIELDS_RECOMMENDED
+ else:
+ raise ValueError('Unsupported testbed fields %s' % str(header))
+ for header_field, expect_field in zip(header, self.testbed_fields):
+ assert header_field == expect_field
- # Validate all field are in the same order and are present
- header = next(topo)
- for field in self.TESTBED_FIELDS:
- assert header[field].replace('#', '').strip() == field
+ topo = csv.DictReader(f, fieldnames=self.testbed_fields, delimiter=',')
for line in topo:
if line['conf-name'].lstrip().startswith('#'):
@@ -89,7 +95,7 @@ def _read_testbed_topo_from_csv(self):
self._cidr_to_ip_mask(line['ptf_ipv6'])
line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';')
- line['duts_map'] = {dut:line['duts'].index(dut) for dut in line['duts']}
+ line['duts_map'] = {dut: line['duts'].index(dut) for dut in line['duts']}
del line['dut']
self.testbed_topo[line['conf-name']] = line
@@ -135,6 +141,7 @@ class IncIndentDumper(yaml.Dumper):
[1]: https://web.archive.org/web/20170903201521/https://pyyaml.org/ticket/64
[2]: https://github.com/yaml/pyyaml/issues/127
"""
+
def increase_indent(self, flow=False, indentless=False):
return yaml.Dumper.increase_indent(self, flow, False)
@@ -153,7 +160,7 @@ def write_line_break(self, data=None):
ptf_ipv6 = self._ip_mask_to_cidr(tb_dict["ptf_ipv6"],
tb_dict["ptf_netmask_v6"])
testbed_mapping = zip(
- self.TESTBED_FIELDS,
+ self.testbed_fields,
[
tb_name,
tb_dict["group-name"],
diff --git a/tests/conftest.py b/tests/conftest.py
index 739bf6be6d8..55f959c1169 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -880,3 +880,19 @@ def duthost_console(localhost, creds, request):
console_password=creds['console_password'][vars['console_type']])
yield host
host.disconnect()
+
+@pytest.fixture(scope='session')
+def cleanup_cache_for_session(request):
+ """
+ This fixture allows developers to cleanup the cached data for all DUTs in the testbed before test.
+ Use cases:
+ - Running tests where some 'facts' about the DUT that get cached are changed.
+ - Running tests/regression without running test_pretest which has a test to clean up cache (PR#2978)
+ - Test case development phase to work out testbed information changes.
+
+ This fixture is not automatically applied, if you want to use it, you have to add a call to it in your tests.
+ """
+ tbname, tbinfo = get_tbinfo(request)
+ cache.cleanup(zone=tbname)
+ for a_dut in tbinfo['duts']:
+ cache.cleanup(zone=a_dut)
diff --git a/tests/ipfwd/test_dir_bcast.py b/tests/ipfwd/test_dir_bcast.py
index 4976d466ff5..0a33a6ebd96 100644
--- a/tests/ipfwd/test_dir_bcast.py
+++ b/tests/ipfwd/test_dir_bcast.py
@@ -8,7 +8,7 @@
pytest.mark.topology('t0')
]
-def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, fib):
+def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
support_testbed_types = frozenset(['t0', 't0-16', 't0-52', 't0-56', 't0-64', 't0-64-32', 't0-116'])
testbed_type = tbinfo['topo']['name']
diff --git a/tests/ixia/ecn/files/helper.py b/tests/ixia/ecn/files/helper.py
index a252d77f02a..a57823899c0 100644
--- a/tests/ixia/ecn/files/helper.py
+++ b/tests/ixia/ecn/files/helper.py
@@ -308,6 +308,9 @@ def __run_traffic(api,
time.sleep(1)
attempts += 1
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
""" Dump captured packets """
pcap_bytes = api.get_capture_results(CaptureRequest(port_name=capture_port_name))
with open(pcap_file_name, 'wb') as fid:
diff --git a/tests/ixia/pfc/files/helper.py b/tests/ixia/pfc/files/helper.py
index 5e4d49321f1..5d000ada58c 100644
--- a/tests/ixia/pfc/files/helper.py
+++ b/tests/ixia/pfc/files/helper.py
@@ -326,6 +326,9 @@ def __run_traffic(api,
time.sleep(1)
attempts += 1
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
""" Dump per-flow statistics """
rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
api.set_state(State(FlowTransmitState(state='stop')))
diff --git a/tests/ixia/pfcwd/files/pfcwd_basic_helper.py b/tests/ixia/pfcwd/files/pfcwd_basic_helper.py
index 161ba4debae..fb9d309d3ba 100644
--- a/tests/ixia/pfcwd/files/pfcwd_basic_helper.py
+++ b/tests/ixia/pfcwd/files/pfcwd_basic_helper.py
@@ -275,6 +275,9 @@ def __run_traffic(api, config, all_flow_names, exp_dur_sec):
time.sleep(1)
attempts += 1
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
""" Dump per-flow statistics """
rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
api.set_state(State(FlowTransmitState(state='stop')))
diff --git a/tests/ixia/pfcwd/files/pfcwd_runtime_traffic_helper.py b/tests/ixia/pfcwd/files/pfcwd_runtime_traffic_helper.py
new file mode 100644
index 00000000000..a3c1260624e
--- /dev/null
+++ b/tests/ixia/pfcwd/files/pfcwd_runtime_traffic_helper.py
@@ -0,0 +1,226 @@
+import time
+
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
+ ixia_api_serv_user, ixia_api_serv_passwd, ixia_api
+from tests.common.ixia.ixia_helpers import get_dut_port_id
+from tests.common.ixia.common_helpers import start_pfcwd, stop_pfcwd
+
+from abstract_open_traffic_generator.flow import DeviceTxRx, TxRx, Flow, Header,\
+ Size, Rate, Duration, FixedSeconds
+from abstract_open_traffic_generator.flow_ipv4 import Priority, Dscp
+from abstract_open_traffic_generator.flow import Pattern as FieldPattern
+from abstract_open_traffic_generator.flow import Ipv4 as Ipv4Header
+from abstract_open_traffic_generator.flow import Ethernet as EthernetHeader
+from abstract_open_traffic_generator.control import State, ConfigState,\
+ FlowTransmitState
+from abstract_open_traffic_generator.result import FlowRequest
+
+DATA_FLOW_NAME = "Data Flow"
+DATA_PKT_SIZE = 1024
+DATA_FLOW_DURATION_SEC = 15
+PFCWD_START_DELAY_SEC = 3
+IXIA_POLL_DELAY_SEC = 2
+TOLERANCE_THRESHOLD = 0.05
+
+def run_pfcwd_runtime_traffic_test(api,
+ testbed_config,
+ conn_data,
+ fanout_data,
+ duthost,
+ dut_port,
+ prio_list,
+ prio_dscp_map):
+ """
+ Test PFC watchdog's impact on runtime traffic
+
+ Args:
+ api (obj): IXIA session
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ conn_data (dict): the dictionary returned by conn_graph_fact.
+ fanout_data (dict): the dictionary returned by fanout_graph_fact.
+ duthost (Ansible host instance): device under test
+ dut_port (str): DUT port to test
+ prio_list (list): priorities of data flows
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ N/A
+ """
+ pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config')
+
+ stop_pfcwd(duthost)
+
+ """ Get the ID of the port to test """
+ port_id = get_dut_port_id(dut_hostname=duthost.hostname,
+ dut_port=dut_port,
+ conn_data=conn_data,
+ fanout_data=fanout_data)
+
+ pytest_assert(port_id is not None,
+ 'Fail to get ID for port {}'.format(dut_port))
+
+ flows = __gen_traffic(testbed_config=testbed_config,
+ port_id=port_id,
+ data_flow_name=DATA_FLOW_NAME,
+ data_flow_dur_sec=DATA_FLOW_DURATION_SEC,
+ data_pkt_size=DATA_PKT_SIZE,
+ prio_list=prio_list,
+ prio_dscp_map=prio_dscp_map)
+
+ """ Tgen config = testbed config + flow config """
+ config = testbed_config
+ config.flows = flows
+
+ all_flow_names = [flow.name for flow in flows]
+
+ flow_stats = __run_traffic(api=api,
+ config=config,
+ duthost=duthost,
+ all_flow_names=all_flow_names,
+ pfcwd_start_delay_sec=PFCWD_START_DELAY_SEC,
+ exp_dur_sec=DATA_FLOW_DURATION_SEC)
+
+ speed_str = config.layer1[0].speed
+ speed_gbps = int(speed_str.split('_')[1])
+
+ __verify_results(rows=flow_stats,
+ speed_gbps=speed_gbps,
+ data_flow_dur_sec=DATA_FLOW_DURATION_SEC,
+ data_pkt_size=DATA_PKT_SIZE,
+ tolerance=TOLERANCE_THRESHOLD)
+
+def __gen_traffic(testbed_config,
+ port_id,
+ data_flow_name,
+ data_flow_dur_sec,
+ data_pkt_size,
+ prio_list,
+ prio_dscp_map):
+ """
+ Generate configurations of flows
+
+ Args:
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ port_id (int): ID of DUT port to test.
+ data_flow_name (str): data flow name
+ data_flow_dur_sec (int): duration of data flows in second
+ data_pkt_size (int): size of data packets in byte
+ prio_list (list): priorities of data flows
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ flows configurations (list): the list should have configurations of
+ len(prio_list) data flows
+ """
+ rx_port_id = port_id
+ tx_port_id = (port_id + 1) % len(testbed_config.devices)
+
+ data_endpoint = DeviceTxRx(
+ tx_device_names=[testbed_config.devices[tx_port_id].name],
+ rx_device_names=[testbed_config.devices[rx_port_id].name],
+ )
+
+ result = list()
+ data_flow_rate_percent = int(100 / len(prio_list))
+
+ """ For each priority """
+ for prio in prio_list:
+ ip_prio = Priority(Dscp(phb=FieldPattern(choice=prio_dscp_map[prio]),
+ ecn=FieldPattern(choice=Dscp.ECN_CAPABLE_TRANSPORT_1)))
+ pfc_queue = FieldPattern([prio])
+
+ data_flow = Flow(
+ name='{} Prio {}'.format(data_flow_name, prio),
+ tx_rx=TxRx(data_endpoint),
+ packet=[
+ Header(choice=EthernetHeader(pfc_queue=pfc_queue)),
+ Header(choice=Ipv4Header(priority=ip_prio))
+ ],
+ size=Size(data_pkt_size),
+ rate=Rate('line', data_flow_rate_percent),
+ duration=Duration(FixedSeconds(seconds=data_flow_dur_sec))
+ )
+
+ result.append(data_flow)
+
+ return result
+
+def __run_traffic(api, config, duthost, all_flow_names, pfcwd_start_delay_sec, exp_dur_sec):
+ """
+ Start traffic at time 0 and enable PFC watchdog at pfcwd_start_delay_sec
+
+ Args:
+ api (obj): IXIA session
+ config (obj): experiment config (testbed config + flow config)
+ duthost (Ansible host instance): device under test
+ all_flow_names (list): list of names of all the flows
+ pfcwd_start_delay_sec (int): PFC watchdog start delay in second
+ exp_dur_sec (int): experiment duration in second
+
+ Returns:
+ per-flow statistics (list)
+ """
+
+ api.set_state(State(ConfigState(config=config, state='set')))
+ api.set_state(State(FlowTransmitState(state='start')))
+
+ time.sleep(pfcwd_start_delay_sec)
+ start_pfcwd(duthost)
+ time.sleep(exp_dur_sec - pfcwd_start_delay_sec)
+
+ attempts = 0
+ max_attempts = 20
+
+ while attempts < max_attempts:
+ rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
+ """ If all the flows have stopped """
+ transmit_states = [row['transmit'] for row in rows]
+ if len(rows) == len(all_flow_names) and\
+ list(set(transmit_states)) == ['stopped']:
+ time.sleep(IXIA_POLL_DELAY_SEC)
+ break
+ else:
+ time.sleep(1)
+ attempts += 1
+
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
+ """ Dump per-flow statistics """
+ rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
+ api.set_state(State(FlowTransmitState(state='stop')))
+
+ return rows
+
+def __verify_results(rows, speed_gbps, data_flow_dur_sec, data_pkt_size, tolerance):
+ """
+ Verify if we get expected experiment results
+
+ Args:
+ rows (list): per-flow statistics
+ speed_gbps (int): link speed in Gbps
+ data_flow_dur_sec (int): duration of data flows in second
+ data_pkt_size (int): size of data packets in byte
+ tolerance (float): maximum allowable deviation
+
+ Returns:
+ N/A
+ """
+ data_flow_rate_percent = int(100 / len(rows))
+
+ for row in rows:
+ flow_name = row['name']
+ tx_frames = row['frames_tx']
+ rx_frames = row['frames_rx']
+
+ pytest_assert(tx_frames == rx_frames, "{} packets of {} are dropped".\
+ format(tx_frames-rx_frames, flow_name))
+
+ exp_rx_pkts = data_flow_rate_percent / 100.0 * speed_gbps \
+ * 1e9 * data_flow_dur_sec / 8.0 / data_pkt_size
+
+ deviation = (rx_frames - exp_rx_pkts) / float(exp_rx_pkts)
+ pytest_assert(abs(deviation) < tolerance,
+ "{} should receive {} packets (actual {})".\
+ format(flow_name, exp_rx_pkts, rx_frames))
diff --git a/tests/ixia/pfcwd/test_pfcwd_basic.py b/tests/ixia/pfcwd/test_pfcwd_basic.py
index 6716f0017ae..0c4123f90ef 100644
--- a/tests/ixia/pfcwd/test_pfcwd_basic.py
+++ b/tests/ixia/pfcwd/test_pfcwd_basic.py
@@ -92,7 +92,7 @@ def test_pfcwd_basic_multi_lossless_prio(ixia_api,
Returns:
N/A
"""
- dut_hostname, dut_port = enum_dut_portname_oper_up.split('|')
+ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname,
"Port is not mapped to the expected DUT")
diff --git a/tests/ixia/pfcwd/test_pfcwd_runtime_traffic.py b/tests/ixia/pfcwd/test_pfcwd_runtime_traffic.py
new file mode 100644
index 00000000000..6c7063bb0ce
--- /dev/null
+++ b/tests/ixia/pfcwd/test_pfcwd_runtime_traffic.py
@@ -0,0 +1,53 @@
+import pytest
+
+from tests.common.helpers.assertions import pytest_require, pytest_assert
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
+ fanout_graph_facts
+from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
+ ixia_api_serv_user, ixia_api_serv_passwd, ixia_api, ixia_testbed
+from tests.common.ixia.qos_fixtures import prio_dscp_map, all_prio_list
+
+from files.pfcwd_runtime_traffic_helper import run_pfcwd_runtime_traffic_test
+
+@pytest.mark.topology("tgen")
+
+def test_pfcwd_runtime_traffic(ixia_api,
+ ixia_testbed,
+ conn_graph_facts,
+ fanout_graph_facts,
+ duthosts,
+ rand_one_dut_hostname,
+ rand_one_dut_portname_oper_up,
+ all_prio_list,
+ prio_dscp_map):
+ """
+ Test PFC watchdog's impact on runtime traffic
+
+ Args:
+ ixia_api (pytest fixture): IXIA session
+ ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
+ conn_graph_facts (pytest fixture): connection graph
+ fanout_graph_facts (pytest fixture): fanout graph
+ duthosts (pytest fixture): list of DUTs
+ rand_one_dut_hostname (str): hostname of DUT
+ rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
+ all_prio_list (pytest fixture): list of all the priorities
+ prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority)
+
+ Returns:
+ N/A
+ """
+ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
+ pytest_require(rand_one_dut_hostname == dut_hostname,
+ "Port is not mapped to the expected DUT")
+
+ duthost = duthosts[rand_one_dut_hostname]
+
+ run_pfcwd_runtime_traffic_test(api=ixia_api,
+ testbed_config=ixia_testbed,
+ conn_data=conn_graph_facts,
+ fanout_data=fanout_graph_facts,
+ duthost=duthost,
+ dut_port=dut_port,
+ prio_list=all_prio_list,
+ prio_dscp_map=prio_dscp_map)
diff --git a/tests/kvmtest.sh b/tests/kvmtest.sh
index b6bb03cb0c5..50a4a42a608 100755
--- a/tests/kvmtest.sh
+++ b/tests/kvmtest.sh
@@ -97,6 +97,7 @@ test_t0() {
monit/test_monit_status.py \
platform_tests/test_advanced_reboot.py \
test_interfaces.py \
+ arp/test_arp_dualtor.py \
bgp/test_bgp_fact.py \
bgp/test_bgp_gr_helper.py \
bgp/test_bgp_speaker.py \
diff --git a/tests/nat/conftest.py b/tests/nat/conftest.py
index bed9dc6f838..a911c0c4dc5 100644
--- a/tests/nat/conftest.py
+++ b/tests/nat/conftest.py
@@ -151,6 +151,10 @@ def apply_global_nat_config(duthost):
after test run cleanup DUT's NAT configration
:param duthost: DUT host object
"""
+ status, _ = duthost.get_feature_status()
+ if 'nat' not in status:
+ pytest.skip('nat feature is not enabled with image version {}'.format(duthost.os_version))
+
nat_global_config(duthost)
yield
# reload config on teardown
diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py
index 1d8ed9d936b..49e71bb9258 100644
--- a/tests/pfcwd/conftest.py
+++ b/tests/pfcwd/conftest.py
@@ -4,6 +4,7 @@
from tests.common.fixtures.conn_graph_facts import conn_graph_facts
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
+from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice
from .files.pfcwd_helper import TrafficPorts, set_pfc_timers, select_test_ports
logger = logging.getLogger(__name__)
@@ -27,6 +28,22 @@ def pytest_addoption(parser):
parser.addoption('--fake-storm', action='store', type=bool, default=True,
help='Fake storm for most ports instead of using pfc gen')
+@pytest.fixture(scope="module")
+def fake_storm(request, duthosts, rand_one_dut_hostname):
+ """
+ Enable/disable fake storm based on platform and input parameters
+
+ Args:
+ request: pytest request object
+ duthosts: AnsibleHost instance for multi DUT
+ rand_one_dut_hostname: hostname of DUT
+
+ Returns:
+ fake_storm: False/True
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ return request.config.getoption('--fake-storm') if not isMellanoxDevice(duthost) else False
+
@pytest.fixture(scope="module")
def setup_pfc_test(duthosts, rand_one_dut_hostname, ptfhost, conn_graph_facts, tbinfo):
"""
diff --git a/tests/pfcwd/test_pfcwd_function.py b/tests/pfcwd/test_pfcwd_function.py
index 58dd34809b5..244b1468c89 100644
--- a/tests/pfcwd/test_pfcwd_function.py
+++ b/tests/pfcwd/test_pfcwd_function.py
@@ -499,12 +499,13 @@ def run_test(self, dut, port, action):
logger.info("--- Verify PFCwd counters for port {} ---".format(port))
self.stats.verify_pkt_cnts(self.pfc_wd['port_type'], self.pfc_wd['test_pkt_count'])
- def test_pfcwd_actions(self, request, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, fanouthosts):
+ def test_pfcwd_actions(self, request, fake_storm, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, fanouthosts):
"""
PFCwd functional test
Args:
request(object) : pytest request object
+ fake_storm(fixture) : Module scoped fixture for enable/disable fake storm
setup_pfc_test(fixture) : Module scoped autouse fixture for PFCwd
fanout_graph_facts(fixture) : fanout graph info
ptfhost(AnsibleHost) : ptf host instance
@@ -522,10 +523,10 @@ def test_pfcwd_actions(self, request, setup_pfc_test, fanout_graph_facts, ptfhos
self.neighbors = setup_info['neighbors']
dut_facts = self.dut.facts
self.peer_dev_list = dict()
- self.fake_storm = request.config.getoption("--fake-storm")
+ self.fake_storm = fake_storm
+ self.storm_hndle = None
for idx, port in enumerate(self.ports):
- self.storm_hndle = None
logger.info("")
logger.info("--- Testing various Pfcwd actions on {} ---".format(port))
self.setup_test_params(port, setup_info['vlan'], init=not idx)
diff --git a/tests/pfcwd/test_pfcwd_warm_reboot.py b/tests/pfcwd/test_pfcwd_warm_reboot.py
index 6410e171a1a..22f65c675d9 100644
--- a/tests/pfcwd/test_pfcwd_warm_reboot.py
+++ b/tests/pfcwd/test_pfcwd_warm_reboot.py
@@ -430,12 +430,13 @@ def stop_all_storm(self):
logger.info("--- Disabling fake storm on port {} queue {}".format(port, queue))
PfcCmd.set_storm_status(self.dut, self.oid_map[(port, queue)], "disabled")
- def pfcwd_wb_helper(self, request, testcase_actions, setup_pfc_test, fanout_graph_facts, ptfhost,
+ def pfcwd_wb_helper(self, fake_storm, testcase_actions, setup_pfc_test, fanout_graph_facts, ptfhost,
duthost, localhost, fanouthosts):
"""
Helper method that initializes the vars and starts the test execution
Args:
+ fake_storm(bool): if fake storm is enabled or disabled
testcase_actions(list): list of actions that the test will go through
setup_pfc_test(fixture): module scoped autouse fixture
fanout_graph_facts(fixture): fanout info
@@ -460,7 +461,7 @@ def pfcwd_wb_helper(self, request, testcase_actions, setup_pfc_test, fanout_grap
storm_deferred = 0
storm_restored = 0
self.max_wait = 0
- self.fake_storm = request.config.getoption("--fake-storm")
+ self.fake_storm = fake_storm
self.oid_map = dict()
self.storm_threads = []
@@ -519,11 +520,12 @@ def testcase_action(self, request):
"""
yield request.param
- def test_pfcwd_wb(self, request, testcase_action, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, localhost, fanouthosts):
+ def test_pfcwd_wb(self, fake_storm, testcase_action, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, localhost, fanouthosts):
"""
Tests PFCwd warm reboot with various testcase actions
Args:
+ fake_storm(fixture): fake storm status
testcase_action(fixture): testcase to execute (values: 'no_storm', 'storm', 'async_storm')
'no_storm' : PFCwd storm detection/restore before and after warm reboot
@@ -543,5 +545,5 @@ def test_pfcwd_wb(self, request, testcase_action, setup_pfc_test, fanout_graph_f
"""
duthost = duthosts[rand_one_dut_hostname]
logger.info("--- {} ---".format(TESTCASE_INFO[testcase_action]['desc']))
- self.pfcwd_wb_helper(request, TESTCASE_INFO[testcase_action]['test_sequence'], setup_pfc_test,
+ self.pfcwd_wb_helper(fake_storm, TESTCASE_INFO[testcase_action]['test_sequence'], setup_pfc_test,
fanout_graph_facts, ptfhost, duthost, localhost, fanouthosts)
diff --git a/tests/platform_tests/api/test_thermal.py b/tests/platform_tests/api/test_thermal.py
index 3c960949512..839391718c4 100644
--- a/tests/platform_tests/api/test_thermal.py
+++ b/tests/platform_tests/api/test_thermal.py
@@ -142,7 +142,7 @@ def test_get_temperature(self, duthost, localhost, platform_api_conn):
if self.expect(temperature is not None, "Unable to retrieve Thermal {} temperature".format(i)):
if self.expect(isinstance(temperature, float), "Thermal {} temperature appears incorrect".format(i)):
- self.expect(temperature > 0 and temperature <= 100,
+ self.expect(temperature >= 0 and temperature <= 100,
"Thermal {} temperature {} reading is not within range".format(i, temperature))
self.assert_expectations()
@@ -153,7 +153,7 @@ def test_get_low_threshold(self, duthost, localhost, platform_api_conn):
if self.expect(low_threshold is not None, "Unable to retrieve Thermal {} low threshold".format(i)):
if self.expect(isinstance(low_threshold, float), "Thermal {} low threshold appears incorrect".format(i)):
- self.expect(low_threshold > 0 and low_threshold <= 100,
+ self.expect(low_threshold >= 0 and low_threshold <= 100,
"Thermal {} low threshold {} reading is not within range".format(i, low_threshold))
self.assert_expectations()
@@ -175,7 +175,7 @@ def test_get_low_critical_threshold(self, duthost, localhost, platform_api_conn)
if self.expect(low_critical_threshold is not None, "Unable to retrieve Thermal {} low critical threshold".format(i)):
if self.expect(isinstance(low_critical_threshold, float), "Thermal {} low threshold appears incorrect".format(i)):
- self.expect(low_critical_threshold > 0 and low_critical_threshold <= 110,
+ self.expect(low_critical_threshold >= 0 and low_critical_threshold <= 110,
"Thermal {} low critical threshold {} reading is not within range".format(i, low_critical_threshold))
self.assert_expectations()
diff --git a/tests/platform_tests/args/advanced_reboot_args.py b/tests/platform_tests/args/advanced_reboot_args.py
index 9cf2fdfb0de..7f9d7a739e8 100644
--- a/tests/platform_tests/args/advanced_reboot_args.py
+++ b/tests/platform_tests/args/advanced_reboot_args.py
@@ -91,3 +91,11 @@ def add_advanced_reboot_args(parser):
default=None,
help="Script for checking additional states on DUT"
)
+
+ parser.addoption(
+ "--bgp_v4_v6_time_diff",
+ action="store",
+ type=int,
+ default=40,
+ help="Time difference (in sec) between BGP V4 and V6 establishment time"
+ )
diff --git a/tests/platform_tests/test_platform_info.py b/tests/platform_tests/test_platform_info.py
index 8e2a5e5737d..0a2a1bfafb8 100644
--- a/tests/platform_tests/test_platform_info.py
+++ b/tests/platform_tests/test_platform_info.py
@@ -158,7 +158,7 @@ def turn_all_outlets_on(pdu_ctrl):
pytest_require(all_outlet_status and len(all_outlet_status) >= 2, 'Skip the test, cannot to get at least 2 outlet status: {}'.format(all_outlet_status))
for outlet in all_outlet_status:
if not outlet["outlet_on"]:
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_on_outlet(outlet)
time.sleep(5)
@@ -210,8 +210,8 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, rand_one_dut_hostname, pd
for outlet in all_outlet_status:
psu_under_test = None
- logging.info("Turn off outlet %s" % str(outlet["outlet_id"]))
- pdu_ctrl.turn_off_outlet(outlet["outlet_id"])
+ logging.info("Turn off outlet {}".format(outlet))
+ pdu_ctrl.turn_off_outlet(outlet)
time.sleep(5)
cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS)
@@ -223,8 +223,8 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, rand_one_dut_hostname, pd
check_vendor_specific_psustatus(duthost, line)
pytest_assert(psu_under_test is not None, "No PSU is turned off")
- logging.info("Turn on outlet %s" % str(outlet["outlet_id"]))
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ logging.info("Turn on outlet {}".format(outlet))
+ pdu_ctrl.turn_on_outlet(outlet)
time.sleep(5)
cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS)
@@ -378,7 +378,7 @@ def turn_off_outlet_and_check_thermal_control(dut, pdu_ctrl, outlet, mocker):
control policy file.
"""
logging.info("Turn off outlet %s" % str(outlet["psu_id"]))
- pdu_ctrl.turn_off_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_off_outlet(outlet)
time.sleep(5)
psu_under_test = None
@@ -397,7 +397,7 @@ def turn_off_outlet_and_check_thermal_control(dut, pdu_ctrl, outlet, mocker):
mocker.check_all_fan_speed,
100), 'FAN speed not turn to 100% after PSU off')
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_on_outlet(outlet)
time.sleep(5)
diff --git a/tests/platform_tests/test_reboot.py b/tests/platform_tests/test_reboot.py
index 6deebc2b9f0..7ba625b4fe2 100644
--- a/tests/platform_tests/test_reboot.py
+++ b/tests/platform_tests/test_reboot.py
@@ -161,12 +161,12 @@ def _power_off_reboot_helper(kwargs):
for outlet in all_outlets:
logging.debug("turning off {}".format(outlet))
- pdu_ctrl.turn_off_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_off_outlet(outlet)
time.sleep(delay_time)
logging.info("Power on {}".format(power_on_seq))
for outlet in power_on_seq:
logging.debug("turning on {}".format(outlet))
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_on_outlet(outlet)
def test_power_off_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list, pdu_controller, power_off_delay):
diff --git a/tests/qos/test_pfc_pause.py b/tests/qos/test_pfc_pause.py
index 2a607024172..719cd99f027 100644
--- a/tests/qos/test_pfc_pause.py
+++ b/tests/qos/test_pfc_pause.py
@@ -1,21 +1,23 @@
-
-import pytest
+import logging
import os
-import time
-import re
-import struct
+import pytest
import random
-from tests.common.fixtures.conn_graph_facts import conn_graph_facts
-from qos_fixtures import lossless_prio_dscp_map, leaf_fanouts
-from qos_helpers import ansible_stdout_to_str, eos_to_linux_intf, start_pause, stop_pause, setup_testbed, gen_testbed_t0, PFC_GEN_FILE, PFC_GEN_REMOTE_PATH
+import time
+from qos_fixtures import lossless_prio_dscp_map
+from qos_helpers import ansible_stdout_to_str, get_phy_intfs, get_addrs_in_subnet, get_active_vlan_members, get_vlan_subnet, natural_keys
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.pfc_storm import PFCStorm
pytestmark = [
pytest.mark.topology('t0')
]
+logger = logging.getLogger(__name__)
+
PFC_PKT_COUNT = 1000000000
PTF_FILE_REMOTE_PATH = '~/ptftests/pfc_pause_test.py'
@@ -26,45 +28,90 @@
""" Maximum number of interfaces to test on a DUT """
MAX_TEST_INTFS_COUNT = 4
-def run_test_t0(fanouthosts,
- duthost,
- ptfhost,
- conn_graph_facts,
- leaf_fanouts,
- dscp,
- dscp_bg,
- queue_paused,
- send_pause,
- pfc_pause,
- pause_prio,
- pause_time=65535,
- max_test_intfs_count=128):
+@pytest.fixture(scope="module", autouse=True)
+def pfc_test_setup(duthosts, rand_one_dut_hostname):
"""
- @Summary: Run a series of tests on a T0 topology.
- For the T0 topology, we only test Vlan (server-faced) interfaces.
- @param conn_graph_facts: Testbed topology
- @param leaf_fanouts: Leaf fanout switches
- @param dscp: DSCP value of test data packets
- @param dscp_bg: DSCP value of background data packets
- @param queue_paused: if the queue is expected to be paused
- @param send_pause: send pause frames or not
- @param pfc_pause: send PFC pause frames or not
- @param pause_prio: priority of PFC franme
- @param pause_time: pause time quanta. It is 65535 (maximum pause time quanta) by default.
- @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces.
- return: Return # of iterations and # of passed iterations for each tested interface.
+ Generate configurations for the tests
+
+ Args:
+ duthosts(AnsibleHost) : multi dut instance
+ rand_one_dut_hostname(string) : one of the dut instances from the multi dut
+
+ Yields:
+ setup(dict): DUT interfaces, PTF interfaces, PTF IP addresses, and PTF MAC addresses
"""
- """ Clear DUT's PFC counters """
- duthost.sonic_pfc_counters(method="clear")
+ """ Get all the active physical interfaces enslaved to the Vlan """
+ """ These interfaces are actually server-faced interfaces at T0 """
+ duthost = duthosts[rand_one_dut_hostname]
+ vlan_members = get_active_vlan_members(duthost)
+
+ """ Get Vlan subnet """
+ vlan_subnet = get_vlan_subnet(duthost)
+
+ """ Generate IP addresses for servers in the Vlan """
+ vlan_ip_addrs = get_addrs_in_subnet(vlan_subnet, len(vlan_members))
+
+ """ Generate MAC addresses 00:00:00:00:00:XX for servers in the Vlan """
+ vlan_mac_addrs = [5 * '00:' + format(k, '02x') for k in random.sample(range(1, 256), len(vlan_members))]
+
+ """ Find correspoinding interfaces on PTF """
+ phy_intfs = get_phy_intfs(duthost)
+ phy_intfs.sort(key=natural_keys)
+ vlan_members.sort(key=natural_keys)
+ vlan_members_index = [phy_intfs.index(intf) for intf in vlan_members]
+ ptf_intfs = ['eth' + str(i) for i in vlan_members_index]
""" Disable DUT's PFC wd """
duthost.shell('sudo pfcwd stop')
- """ Generate a T0 testbed configuration """
- dut_intfs, ptf_intfs, ptf_ip_addrs, ptf_mac_addrs = gen_testbed_t0(duthost)
+ yield {
+ 'vlan_members': vlan_members,
+ 'ptf_intfs': ptf_intfs,
+ 'vlan_ip_addrs': vlan_ip_addrs,
+ 'vlan_mac_addrs': vlan_mac_addrs
+ }
+
+ """ Enable DUT's PFC wd """
+ duthost.shell('sudo pfcwd start_default')
+
+def run_test(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts,
+ fanout_info, traffic_params, pause_prio=None, queue_paused=True,
+ send_pause=True, pfc_pause=True, max_test_intfs_count=128):
+ """
+ Run the test
+
+ Args:
+ pfc_test_setup(fixture) : setup fixture
+ fanouthosts(AnsibleHost) : fanout instance
+ duthost(AnsibleHost) : dut instance
+ ptfhost(AnsibleHost) : ptf instance
+ conn_graph_facts(fixture) : Testbed topology
+ fanout_info(fixture) : fanout graph info
+ traffic_params(dict) : dict containing the dscp of test dscp and background dscp
+ pause_prio(string) : priority of PFC franme
+ queue_paused(bool) : if the queue is expected to be paused
+ send_pause(bool) : send pause frames or not
+ pfc_pause(bool) : send PFC pause frames or not
+ max_test_intfs_count(int) : maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces
+
+ Return:
+ Number of iterations and number of passed iterations for each tested interface.
+ """
+
+ setup = pfc_test_setup
+ dut_intfs = setup['vlan_members']
+ ptf_intfs = setup['ptf_intfs']
+ ptf_ip_addrs = setup['vlan_ip_addrs']
+ ptf_mac_addrs = setup['vlan_mac_addrs']
+ """ Clear DUT's PFC counters """
+ duthost.sonic_pfc_counters(method="clear")
+
+
results = dict()
+ all_peer_dev = set()
+ storm_handle = None
for i in range(min(max_test_intfs_count, len(ptf_intfs))):
src_index = i
dst_index = (i + 1) % len(ptf_intfs)
@@ -88,31 +135,37 @@ def run_test_t0(fanouthosts,
if send_pause:
peer_device = conn_graph_facts['device_conn'][duthost.hostname][dut_intf_paused]['peerdevice']
peer_port = conn_graph_facts['device_conn'][duthost.hostname][dut_intf_paused]['peerport']
- peer_port_name = eos_to_linux_intf(peer_port)
- peerdev_ans = fanouthosts[peer_device]
+ peer_info = { 'peerdevice': peer_device,
+ 'pfc_fanout_interface': peer_port
+ }
if not pfc_pause:
pause_prio = None
- start_pause(host_ans=peerdev_ans,
- pkt_gen_path=PFC_GEN_REMOTE_PATH,
- intf=peer_port_name,
- pkt_count=PFC_PKT_COUNT,
- pause_duration=pause_time,
- pause_priority=pause_prio)
-
+ if not storm_handle:
+ storm_handle = PFCStorm(duthost, fanout_info, fanouthosts,
+ pfc_queue_idx=pause_prio,
+ pfc_frames_number=PFC_PKT_COUNT,
+ peer_info=peer_info)
+
+ storm_handle.update_peer_info(peer_info)
+ if not all_peer_dev or peer_device not in all_peer_dev:
+ storm_handle.deploy_pfc_gen()
+ all_peer_dev.add(peer_device)
+ storm_handle.start_storm()
""" Wait for PFC pause frame generation """
time.sleep(1)
""" Run PTF test """
+ logger.info("Running test: src intf: {} dest intf: {}".format(dut_intfs[src_index], dut_intfs[dst_index]))
intf_info = '--interface %d@%s --interface %d@%s' % (src_index, src_intf, dst_index, dst_intf)
test_params = ("mac_src=\'%s\';" % src_mac
+ "mac_dst=\'%s\';" % dst_mac
+ "ip_src=\'%s\';" % src_ip
+ "ip_dst=\'%s\';" % dst_ip
- + "dscp=%d;" % dscp
- + "dscp_bg=%d;" % dscp_bg
+ + "dscp=%d;" % traffic_params['dscp']
+ + "dscp_bg=%d;" % traffic_params['dscp_bg']
+ "pkt_count=%d;" % PTF_PKT_COUNT
+ "pkt_intvl=%f;" % PTF_PKT_INTVL_SEC
+ "port_src=%d;" % src_index
@@ -139,197 +192,151 @@ def run_test_t0(fanouthosts,
if send_pause:
""" Stop PFC / FC storm """
- stop_pause(peerdev_ans, PFC_GEN_FILE)
+ storm_handle.stop_storm()
time.sleep(1)
return results
+def test_pfc_pause_lossless(pfc_test_setup, fanouthosts, duthost, ptfhost,
+ conn_graph_facts, fanout_graph_facts,
+ lossless_prio_dscp_map, enum_dut_lossless_prio):
-def run_test(fanouthosts,
- duthost,
- ptfhost,
- tbinfo,
- conn_graph_facts,
- leaf_fanouts,
- dscp,
- dscp_bg,
- queue_paused,
- send_pause,
- pfc_pause,
- pause_prio,
- pause_time=65535,
- max_test_intfs_count=128):
"""
- @Summary: Run a series of tests (only support T0 topology)
- @param tbinfo: Testbed information
- @param conn_graph_facts: Testbed topology
- @param leaf_fanouts: Leaf fanout switches
- @param dscp: DSCP value of test data packets
- @param dscp_bg: DSCP value of background data packets
- @param queue_paused: if the queue is expected to be paused
- @param send_pause: send pause frames or not
- @param pfc_pause: send PFC pause frames or not
- @param pause_prio: priority of PFC franme
- @param pause_time: pause time quanta. It is 65535 (maximum pause time quanta) by default.
- @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces.
- return: Return # of iterations and # of passed iterations for each tested interface.
+ Test if PFC pause frames can pause a lossless priority without affecting the other priorities
+
+ Args:
+ pfc_test_setup(fixture) : setup fixture
+ fanouthosts(AnsibleHost) : fanout instance
+ duthost(AnsibleHost) : dut instance
+ ptfhost(AnsibleHost) : ptf instance
+ conn_graph_facts(fixture) : Testbed topology
+ fanout_graph_facts(fixture) : fanout graph info
+ lossless_prio_dscp_map(dict) : lossless priorities and their DSCP values
+ enum_dut_lossless_prio (str): name of lossless priority to test
"""
- print tbinfo
- if tbinfo['topo']['name'].startswith('t0'):
- return run_test_t0(fanouthosts=fanouthosts,
- duthost=duthost,
- ptfhost=ptfhost,
- conn_graph_facts=conn_graph_facts, leaf_fanouts=leaf_fanouts,
- dscp=dscp,
- dscp_bg=dscp_bg,
- queue_paused=queue_paused,
- send_pause=send_pause,
- pfc_pause=pfc_pause,
- pause_prio=pause_prio,
- pause_time=pause_time,
- max_test_intfs_count=max_test_intfs_count)
-
- else:
- return None
-
-def test_pfc_pause_lossless(fanouthosts,
- duthost,
- ptfhost,
- tbinfo,
- conn_graph_facts,
- leaf_fanouts,
- lossless_prio_dscp_map):
-
- """
- @Summary: Test if PFC pause frames can pause a lossless priority without affecting the other priorities
- @param tbinfo: Testbed information
- @param conn_graph_facts: Testbed topology
- @param lossless_prio_dscp_map: lossless priorities and their DSCP values
- """
- setup_testbed(fanouthosts=fanouthosts,
- ptfhost=ptfhost,
- leaf_fanouts=leaf_fanouts)
-
+ test_errors = ""
errors = []
+ prio = int(enum_dut_lossless_prio.split('|')[-1])
+ dscp = lossless_prio_dscp_map[prio]
+ other_lossless_prio = 4 if prio == 3 else 3
+
- """ DSCP vlaues for lossless priorities """
- lossless_dscps = [int(dscp) for prio in lossless_prio_dscp_map for dscp in lossless_prio_dscp_map[prio]]
+ """ DSCP values for other lossless priority """
+ other_lossless_dscps = lossless_prio_dscp_map[other_lossless_prio]
""" DSCP values for lossy priorities """
- lossy_dscps = list(set(range(64)) - set(lossless_dscps))
-
- for prio in lossless_prio_dscp_map:
- """ DSCP values of the other lossless priorities """
- other_lossless_dscps = list(set(lossless_dscps) - set(lossless_prio_dscp_map[prio]))
- """ We also need to test some DSCP values for lossy priorities """
- other_dscps = other_lossless_dscps + lossy_dscps[0:2]
-
- for dscp in lossless_prio_dscp_map[prio]:
- for dscp_bg in other_dscps:
- results = run_test(fanouthosts=fanouthosts,
- duthost=duthost,
- ptfhost=ptfhost,
- tbinfo=tbinfo,
- conn_graph_facts=conn_graph_facts,
- leaf_fanouts=leaf_fanouts,
- dscp=dscp,
- dscp_bg=dscp_bg,
- queue_paused=True,
- send_pause=True,
- pfc_pause=True,
- pause_prio=prio,
- pause_time=65535,
- max_test_intfs_count=MAX_TEST_INTFS_COUNT)
-
- """ results should not be none """
- if results is None:
- assert 0
-
- errors = dict()
- for intf in results:
- if len(results[intf]) != 2:
- continue
-
- pass_count = results[intf][0]
- total_count = results[intf][1]
-
- if total_count == 0:
- continue
-
- if pass_count < total_count * PTF_PASS_RATIO_THRESH:
- errors[intf] = results[intf]
-
- if len(errors) > 0:
- print "errors occured:\n{}".format("\n".join(errors))
- assert 0
-
-def test_no_pfc(fanouthosts,
- duthost,
- ptfhost,
- tbinfo,
- conn_graph_facts,
- leaf_fanouts,
- lossless_prio_dscp_map):
+ lossy_dscps = list(set(range(64)) - set(other_lossless_dscps) - set(dscp))
+
+ """ We also need to test some DSCP values for lossy priorities """
+ other_dscps = other_lossless_dscps + lossy_dscps[0:2]
+
+ for dscp_bg in other_dscps:
+ logger.info("Testing dscp: {} and background dscp: {}".format(dscp, dscp_bg))
+ traffic_params = {'dscp': dscp[0], 'dscp_bg': dscp_bg}
+ results = run_test(pfc_test_setup,
+ fanouthosts,
+ duthost,
+ ptfhost,
+ conn_graph_facts,
+ fanout_graph_facts,
+ traffic_params,
+ queue_paused=True,
+ send_pause=True,
+ pfc_pause=True,
+ pause_prio=prio,
+ max_test_intfs_count=MAX_TEST_INTFS_COUNT)
+
+ """ results should not be none """
+ if results is None:
+ test_errors += "Dscp: {}, Background Dscp: {}, Result is empty\n".format(dscp, dscp_bg)
+
+ errors = dict()
+ for intf in results:
+ if len(results[intf]) != 2:
+ continue
+
+ pass_count = results[intf][0]
+ total_count = results[intf][1]
+
+ if total_count == 0:
+ continue
+
+ if pass_count < total_count * PTF_PASS_RATIO_THRESH:
+ errors[intf] = results[intf]
+
+ if len(errors) > 0:
+ test_errors += "Dscp: {}, Background Dscp: {}, errors occured: {}\n"\
+ .format(dscp, dscp_bg, " ".join(["{}:{}".format(k,v) for k, v in errors.items()]))
+
+ pytest_assert(len(test_errors) == 0, test_errors)
+
+def test_no_pfc(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts,
+ fanout_graph_facts, lossless_prio_dscp_map, enum_dut_lossless_prio):
"""
- @Summary: Test if lossless and lossy priorities can forward packets in the absence of PFC pause frames
- @param fanouthosts: Fixture for fanout hosts
- @param tbinfo: Testbed information
- @param conn_graph_facts: Testbed topology
- @param lossless_prio_dscp_map: lossless priorities and their DSCP values
+ Test if lossless and lossy priorities can forward packets in the absence of PFC pause frames
+
+ Args:
+ pfc_test_setup(fixture) : setup fixture
+ fanouthosts(AnsibleHost) : fanout instance
+ duthost(AnsibleHost) : dut instance
+ ptfhost(AnsibleHost) : ptf instance
+ conn_graph_facts(fixture) : Testbed topology
+ fanout_graph_facts(fixture) : fanout graph info
+ lossless_prio_dscp_map(dict) : lossless priorities and their DSCP values
+ enum_dut_lossless_prio (str): name of lossless priority to test
"""
- setup_testbed(fanouthosts=fanouthosts,
- ptfhost=ptfhost,
- leaf_fanouts=leaf_fanouts)
+ test_errors = ""
errors = []
+ prio = int(enum_dut_lossless_prio.split('|')[-1])
+ dscp = lossless_prio_dscp_map[prio]
+ other_lossless_prio = 4 if prio == 3 else 3
- """ DSCP vlaues for lossless priorities """
- lossless_dscps = [int(dscp) for prio in lossless_prio_dscp_map for dscp in lossless_prio_dscp_map[prio]]
+ """ DSCP values for other lossless priority """
+ other_lossless_dscps = lossless_prio_dscp_map[other_lossless_prio]
""" DSCP values for lossy priorities """
- lossy_dscps = list(set(range(64)) - set(lossless_dscps))
-
- for prio in lossless_prio_dscp_map:
- """ DSCP values of the other lossless priorities """
- other_lossless_dscps = list(set(lossless_dscps) - set(lossless_prio_dscp_map[prio]))
- """ We also need to test some DSCP values for lossy priorities """
- other_dscps = other_lossless_dscps + lossy_dscps[0:2]
-
- for dscp in lossless_prio_dscp_map[prio]:
- for dscp_bg in other_dscps:
- results = run_test(fanouthosts=fanouthosts,
- duthost=duthost,
- ptfhost=ptfhost,
- tbinfo=tbinfo,
- conn_graph_facts=conn_graph_facts,
- leaf_fanouts=leaf_fanouts,
- dscp=dscp,
- dscp_bg=dscp_bg,
- queue_paused=False,
- send_pause=False,
- pfc_pause=None,
- pause_prio=None,
- pause_time=None,
- max_test_intfs_count=MAX_TEST_INTFS_COUNT)
-
- """ results should not be none """
- if results is None:
- assert 0
-
- errors = dict()
- for intf in results:
- if len(results[intf]) != 2:
- continue
-
- pass_count = results[intf][0]
- total_count = results[intf][1]
-
- if total_count == 0:
- continue
-
- if pass_count < total_count * PTF_PASS_RATIO_THRESH:
- errors[intf] = results[intf]
-
- if len(errors) > 0:
- print "errors occured:\n{}".format("\n".join(errors))
- assert 0
+ lossy_dscps = list(set(range(64)) - set(other_lossless_dscps) - set(dscp))
+
+ """ We also need to test some DSCP values for lossy priorities """
+ other_dscps = other_lossless_dscps + lossy_dscps[0:2]
+
+ for dscp_bg in other_dscps:
+ logger.info("Testing dscp: {} and background dscp: {}".format(dscp, dscp_bg))
+ traffic_params = {'dscp': dscp[0], 'dscp_bg': dscp_bg}
+ results = run_test(pfc_test_setup,
+ fanouthosts,
+ duthost,
+ ptfhost,
+ conn_graph_facts,
+ fanout_graph_facts,
+ traffic_params,
+ queue_paused=False,
+ send_pause=False,
+ pfc_pause=None,
+ pause_prio=None,
+ max_test_intfs_count=MAX_TEST_INTFS_COUNT)
+
+ """ results should not be none """
+ if results is None:
+ test_errors += "Dscp: {}, Background Dscp: {}, Result is empty\n".format(dscp, dscp_bg)
+
+ errors = dict()
+ for intf in results:
+ if len(results[intf]) != 2:
+ continue
+
+ pass_count = results[intf][0]
+ total_count = results[intf][1]
+
+ if total_count == 0:
+ continue
+
+ if pass_count < total_count * PTF_PASS_RATIO_THRESH:
+ errors[intf] = results[intf]
+
+ if len(errors) > 0:
+ test_errors += "Dscp: {}, Background Dscp: {}, errors occured: {}\n"\
+ .format(dscp, dscp_bg, " ".join(["{}:{}".format(k,v) for k, v in errors.items()]))
+
+ pytest_assert(len(test_errors) == 0, test_errors)
diff --git a/tests/scripts/garp_service.py b/tests/scripts/garp_service.py
new file mode 100644
index 00000000000..3a52fd6730d
--- /dev/null
+++ b/tests/scripts/garp_service.py
@@ -0,0 +1,79 @@
+import argparse
+import json
+import ptf
+import ptf.testutils as testutils
+import time
+
+from ipaddress import ip_interface
+from scapy.all import conf
+from scapy.arch import get_if_hwaddr
+
+class GarpService:
+
+ def __init__(self, garp_config_file, interval):
+ self.garp_config_file = garp_config_file
+ self.interval = interval
+ self.packets = {}
+ self.dataplane = ptf.dataplane_instance
+
+ def gen_garp_packets(self):
+ '''
+ Read the config file and generate a GARP packet for each configured interface
+ '''
+
+ with open(self.garp_config_file) as f:
+ garp_config = json.load(f)
+
+ for port, config in garp_config.items():
+ intf_name = 'eth{}'.format(port)
+ source_mac = get_if_hwaddr(intf_name)
+ source_ip_str = config['target_ip']
+ source_ip = str(ip_interface(source_ip_str).ip)
+
+ # PTF uses Scapy to create packets, so this is ok to create
+ # packets through PTF even though we are using Scapy to send the packets
+ garp_pkt = testutils.simple_arp_packet(eth_src=source_mac,
+ hw_snd=source_mac,
+ ip_snd=source_ip,
+ ip_tgt=source_ip, # Re-use server IP as target IP, since it is within the subnet of the VLAN IP
+ arp_op=2)
+ self.packets[intf_name] = garp_pkt
+
+ def send_garp_packets(self):
+ '''
+ For each created GARP packet/interface pair, create an L2 socket.
+ Then send every packet through its associated socket according to the self.interval
+ '''
+ self.gen_garp_packets()
+
+ sockets = {}
+
+ for intf, packet in self.packets.items():
+ socket = conf.L2socket(iface=intf)
+ sockets[socket] = packet
+
+ try:
+ while True:
+ for socket, packet in sockets.items():
+ socket.send(packet)
+
+ if self.interval is None:
+ break
+
+ time.sleep(self.interval)
+
+ finally:
+ for socket in sockets.keys():
+ socket.close()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='GARP Service')
+ parser.add_argument('--conf', '-c', dest='conf_file', required=False, default='/tmp/garp_conf.json', action='store', help='The configuration file for GARP Service (default "/tmp/garp_conf.json")')
+ parser.add_argument('--interval', '-i', dest='interval', required=False, type=int, default=None, action='store', help='The interval at which to re-send GARP messages. If None or not specified, messages will only be set once at service startup')
+ args = parser.parse_args()
+ conf_file = args.conf_file
+ interval = args.interval
+
+ garp_service = GarpService(conf_file, interval)
+ garp_service.send_garp_packets()
diff --git a/tests/scripts/getbuild.py b/tests/scripts/getbuild.py
new file mode 100755
index 00000000000..ff0eed2d8b7
--- /dev/null
+++ b/tests/scripts/getbuild.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+
+import json
+import time
+import sys
+import argparse
+from urllib.request import urlopen, urlretrieve
+
+_start_time = None
+_last_time = None
+artifact_size = 0
+def reporthook(count, block_size, total_size):
+ global _start_time, _last_time, artifact_size
+ cur_time = int(time.time())
+ if count == 0:
+ _start_time = cur_time
+ _last_time = cur_time
+ return
+
+ if cur_time == _last_time:
+ return
+
+ _last_time = cur_time
+
+ duration = cur_time - _start_time
+ progress_size = int(count * block_size)
+ speed = int(progress_size / (1024 * duration))
+ if total_size < 0 and artifact_size > 0:
+ total_size = artifact_size
+ if total_size > 0:
+ percent = int(count * block_size * 100 / total_size)
+ time_left = (total_size - progress_size) / speed / 1024
+ sys.stdout.write("\r...%d%%, %d(%d) MB, %d KB/s, %d seconds left..." %
+ (percent, progress_size / (1024 * 1024), total_size / (1024 * 1024), speed, time_left))
+ else:
+ sys.stdout.write("\r...%d MB, %d KB/s, ..." %
+ (progress_size / (1024 * 1024), speed))
+ sys.stdout.flush()
+
+def validate_url_or_abort(url):
+ # Attempt to retrieve HTTP response code
+ try:
+ urlfile = urlopen(url)
+ response_code = urlfile.getcode()
+ urlfile.close()
+ except IOError:
+ response_code = None
+
+ if not response_code:
+ print("Did not receive a response from remote machine. Aborting...")
+ sys.exit(1)
+ else:
+ # Check for a 4xx response code which indicates a nonexistent URL
+ if response_code / 100 == 4:
+ print("Image file not found on remote machine. Aborting...")
+ sys.exit(1)
+
+def get_download_url(buildid, artifact_name):
+ """get download url"""
+
+ artifact_url = "https://dev.azure.com/mssonic/build/_apis/build/builds/{}/artifacts?artifactName={}&api-version=5.0".format(buildid, artifact_name)
+
+ resp = urlopen(artifact_url)
+
+ j = json.loads(resp.read().decode('utf-8'))
+
+ download_url = j['resource']['downloadUrl']
+ artifact_size = int(j['resource']['properties']['artifactsize'])
+
+ return (download_url, artifact_size)
+
+
+def download_artifacts(url, content_type, platform, buildid):
+ """find latest successful build id for a branch"""
+
+ if content_type == 'image':
+ if platform == 'kvm':
+ filename = 'sonic-vs.img.gz'
+ else:
+ filename = "sonic-{}.bin".format(platform)
+
+ url = url.replace('zip', 'file')
+ url += "&subPath=%2Ftarget%2F{}".format(filename)
+ else:
+ filename = "{}.zip".format(platform)
+
+ if url.startswith('http://') or url.startswith('https://'):
+ print('Downloading {} from build {}...'.format(filename, buildid))
+ validate_url_or_abort(url)
+ try:
+ urlretrieve(url, filename, reporthook)
+ except Exception as e:
+ print("Download error", e)
+ sys.exit(1)
+
+def find_latest_build_id(branch):
+ """find latest successful build id for a branch"""
+
+ builds_url = "https://dev.azure.com/mssonic/build/_apis/build/builds?definitions=1&branchName=refs/heads/{}&resultFilter=succeeded&statusFilter=completed&api-version=6.0".format(branch)
+
+ resp = urlopen(builds_url)
+
+ j = json.loads(resp.read().decode('utf-8'))
+
+ latest_build_id = int(j['value'][0]['id'])
+
+ return latest_build_id
+
+def main():
+ global artifact_size
+
+ parser = argparse.ArgumentParser(description='Download artifacts from sonic azure devops.')
+ parser.add_argument('--buildid', metavar='buildid', type=int, help='build id')
+ parser.add_argument('--branch', metavar='branch', type=str, help='branch name')
+ parser.add_argument('--platform', metavar='platform', type=str,
+ choices=['broadcom', 'mellanox', 'kvm'],
+ help='platform to download')
+ parser.add_argument('--content', metavar='content', type=str,
+ choices=['all', 'image'], default='image',
+ help='download content type [all|image(default)]')
+ args = parser.parse_args()
+
+ if args.buildid is None:
+ buildid = find_latest_build_id(args.branch)
+ else:
+ buildid = int(args.buildid)
+
+ artifact_name = "sonic-buildimage.{}".format(args.platform)
+
+ (dl_url, artifact_size) = get_download_url(buildid, artifact_name)
+
+ download_artifacts(dl_url, args.content, args.platform, buildid)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/icmp_responder.py b/tests/scripts/icmp_responder.py
index 5dca3cf4c80..d68f369f5ee 100644
--- a/tests/scripts/icmp_responder.py
+++ b/tests/scripts/icmp_responder.py
@@ -52,7 +52,7 @@ def __call__(self):
for s in sel[0]:
packet = s.recv()
if packet is not None:
- if packet[ICMP].type == self.TYPE_ECHO_REQUEST and self.request_handler:
+ if ICMP in packet and packet[ICMP].type == self.TYPE_ECHO_REQUEST and self.request_handler:
self.request_handler(s, packet, self.dst_mac)
finally:
for s in self.sniff_sockets:
diff --git a/tests/snmp/test_snmp_phy_entity.py b/tests/snmp/test_snmp_phy_entity.py
index 7a88d62bf28..ea39839f1e3 100644
--- a/tests/snmp/test_snmp_phy_entity.py
+++ b/tests/snmp/test_snmp_phy_entity.py
@@ -444,20 +444,20 @@ def test_turn_off_pdu_and_check_psu_info(duthost, localhost, creds, pdu_controll
pytest.skip('At least 2 outlets required for rest of the testing in this case')
# turn on all PSU
- for item in outlet_status:
- if not item['outlet_on']:
- pdu_controller.turn_on_outlet(item["outlet_id"])
+ for outlet in outlet_status:
+ if not outlet['outlet_on']:
+ pdu_controller.turn_on_outlet(outlet)
time.sleep(5)
outlet_status = pdu_controller.get_outlet_status()
- for item in outlet_status:
- if not item['outlet_on']:
+ for outlet in outlet_status:
+ if not outlet['outlet_on']:
pytest.skip('Not all outlet are powered on, skip rest of the testing in this case')
# turn off the first PSU
- first_outlet_id = outlet_status[0]['outlet_id']
- pdu_controller.turn_off_outlet(first_outlet_id)
- assert wait_until(30, 5, check_outlet_status, pdu_controller, first_outlet_id, False)
+ first_outlet = outlet_status[0]
+ pdu_controller.turn_off_outlet(first_outlet)
+ assert wait_until(30, 5, check_outlet_status, pdu_controller, first_outlet, False)
# wait for psud update the database
assert wait_until(120, 20, _check_psu_status_after_power_off, duthost, localhost, creds)
@@ -581,13 +581,13 @@ def is_null_str(value):
return not value or value == str(None) or value == 'N/A'
-def check_outlet_status(pdu_controller, outlet_id, expect_status):
+def check_outlet_status(pdu_controller, outlet, expect_status):
"""
Check if a given PSU is at expect status
:param pdu_controller: PDU controller
- :param outlet_id: outlet id
+ :param outlet: PDU outlet
:param expect_status: Expect bool status, True means on, False means off
:return: True if a given PSU is at expect status
"""
- status = pdu_controller.get_outlet_status(outlet_id)
+ status = pdu_controller.get_outlet_status(outlet)
return 'outlet_on' in status[0] and status[0]['outlet_on'] == expect_status
diff --git a/tests/snmp/test_snmp_v2mib.py b/tests/snmp/test_snmp_v2mib.py
new file mode 100644
index 00000000000..cdc41ecd3dd
--- /dev/null
+++ b/tests/snmp/test_snmp_v2mib.py
@@ -0,0 +1,46 @@
+"""
+Test SNMPv2MIB in SONiC.
+"""
+
+import pytest
+from tests.common.helpers.assertions import pytest_assert # pylint: disable=import-error
+
+pytestmark = [
+ pytest.mark.topology('any')
+]
+
+
+def test_snmp_v2mib(duthosts, rand_one_dut_hostname, localhost, creds):
+ """
+ Verify SNMPv2-MIB objects are functioning properly
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ host_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
+ snmp_facts = localhost.snmp_facts(host=host_ip, version="v2c",
+ community=creds["snmp_rocommunity"])['ansible_facts']
+ dut_facts = duthost.setup()['ansible_facts']
+ debian_ver = duthost.shell('cat /etc/debian_version')['stdout']
+ cmd = 'docker exec snmp grep "sysContact" /etc/snmp/snmpd.conf'
+ sys_contact = " ".join(duthost.shell(cmd)['stdout'].split()[1:])
+ sys_location = duthost.shell("grep 'snmp_location' /etc/sonic/snmp.yml")['stdout'].split()[-1]
+
+ expected_res = {'kernel_version': dut_facts['ansible_kernel'],
+ 'hwsku': duthost.facts['hwsku'],
+ 'os_version': 'SONiC.{}'.format(duthost.os_version),
+ 'debian_version': '{} {}'.format(dut_facts['ansible_distribution'], debian_ver)}
+
+ #Verify that sysName, sysLocation and sysContact MIB objects functions properly
+ pytest_assert(snmp_facts['ansible_sysname'] == duthost.hostname,
+ "Unexpected MIB result {}".format(snmp_facts['ansible_sysname']))
+ pytest_assert(snmp_facts['ansible_syslocation'] == sys_location,
+ "Unexpected MIB result {}".format(snmp_facts['ansible_syslocation']))
+ pytest_assert(snmp_facts['ansible_syscontact'] == sys_contact,
+ "Unexpected MIB result {}".format(snmp_facts['ansible_syscontact']))
+
+ #Verify that sysDescr MIB object functions properly
+ missed_values = []
+ for system_value in expected_res:
+ if expected_res[system_value] not in snmp_facts['ansible_sysdescr']:
+ missed_values.append(expected_res[system_value])
+ pytest_assert(not missed_values, "System values {} was not found in SNMP facts: {}"
+ .format(missed_values, snmp_facts['ansible_sysdescr']))
diff --git a/tests/templates/garp_service.conf.j2 b/tests/templates/garp_service.conf.j2
new file mode 100644
index 00000000000..c15d7967ccb
--- /dev/null
+++ b/tests/templates/garp_service.conf.j2
@@ -0,0 +1,10 @@
+[program:garp_service]
+command=/usr/bin/python /opt/garp_service.py {{ garp_service_args }}
+process_name=garp_service
+stdout_logfile=/tmp/garp_service.out.log
+stderr_logfile=/tmp/garp_service.err.log
+redirect_stderr=false
+autostart=false
+autorestart=false
+startsecs=1
+numprocs=1
diff --git a/tests/templates/y_cable_simulator_client.j2 b/tests/templates/y_cable_simulator_client.j2
index 5847feee84d..909b6e6836c 100644
--- a/tests/templates/y_cable_simulator_client.j2
+++ b/tests/templates/y_cable_simulator_client.j2
@@ -1,6 +1,11 @@
from urllib import request, error
import json
-from sonic_py_common import logger
+import os
+import re
+from sonic_py_common import logger, device_info
+from portconfig import get_port_config
+from natsort import natsorted
+from sonic_py_common.interface import backplane_prefix
DUTS_MAP = {{ duts_map }}
@@ -16,6 +21,118 @@ helper_logger = logger.Logger(SYSLOG_IDENTIFIER)
UPPER_TOR = "upper_tor"
LOWER_TOR = "lower_tor"
+PLATFORM_JSON = 'platform.json'
+PORT_CONFIG_INI = 'port_config.ini'
+
+# A dict for mapping physical port to host interface
+g_physical_to_host_port_mapping = {}
+
+def _physical_port_to_host_port(physical_port):
+ """
+ Convert physical port to host interface index.
+ The physical port index got from spf is different with host interface index when interface is splited.
+ However, the mux bridges on vm_host are named according to host interface index (vm_topology.py).
+ So a convert is needed.
+
+ @arg physical_port: The physical port index got from sfputil
+ @returns: The host interface index (0-based)
+ """
+ global g_physical_to_host_port_mapping
+
+ if not g_physical_to_host_port_mapping:
+ _load_port_info()
+ # Return physical_port - 1 in case loading port config file failure
+ return g_physical_to_host_port_mapping.get(physical_port, physical_port - 1)
+
+def _load_port_info():
+ """
+ Parse platform.json or port_config.ini to get the mapping between physical_port and host interface index
+ and physical_port and logical port name. The mapping is saved in two global variables.
+ """
+ porttabfile = device_info.get_path_to_port_config_file()
+ parse_fmt_platform_json = (os.path.basename(porttabfile) == PLATFORM_JSON)
+
+ if parse_fmt_platform_json:
+ _load_port_config_json()
+ else:
+ _load_port_config_ini(porttabfile)
+
+def _load_port_config_json():
+ """
+ A helper function for loading port config from 'platform.json'
+ """
+ global g_physical_to_host_port_mapping
+
+ (platform, hwsku) = device_info.get_platform_and_hwsku()
+ ports, _, _ = get_port_config(hwsku, platform)
+ if not ports:
+ helper_logger.log_warning('Failed to get port config')
+ return
+ else:
+ logical_list = []
+ for intf in ports.keys():
+ logical_list.append(intf)
+
+ logical = natsorted(logical_list, key=lambda y: y.lower())
+ host_intf_index = 0
+ for intf_name in logical:
+ fp_port_index = -1
+ if 'index' in ports[intf_name].keys():
+ fp_port_index = int(ports[intf_name]['index'])
+ if fp_port_index not in g_physical_to_host_port_mapping:
+ g_physical_to_host_port_mapping[fp_port_index] = host_intf_index;
+ host_intf_index += 1
+
+def _load_port_config_ini(porttabfile):
+ """
+ A helper function for loading port config from 'port_config.ini'
+ """
+ global g_physical_to_host_port_mapping
+
+ parse_fmt_port_config_ini = (os.path.basename(porttabfile) == PORT_CONFIG_INI)
+ host_intf_index = 0
+ with open(porttabfile, "r") as f:
+ # Read the porttab file and generate dicts
+ # with mapping for future reference.
+ title = []
+ for line in f:
+ line.strip()
+ if re.search("^#", line) is not None:
+ # The current format is: # name lanes alias index speed
+ # Where the ordering of the columns can vary
+ title = line.lstrip('#').strip().split()
+ continue
+ # Parsing logic for 'port_config.ini' file
+ if (parse_fmt_port_config_ini):
+ # bcm_port is not explicitly listed in port_config.ini format
+ # Currently we assume ports are listed in numerical order according to bcm_port
+ # so we use the port's position in the file (zero-based) as bcm_port
+ portname = line.split()[0]
+
+ # Ignore if this is an internal backplane interface
+ if portname.startswith(backplane_prefix()):
+ continue
+
+ if "index" in title:
+ fp_port_index = int(line.split()[title.index("index")])
+ # Leave the old code for backward compatibility
+ elif "asic_port_name" not in title and len(line.split()) >= 4:
+ fp_port_index = int(line.split()[3])
+ else:
+ fp_port_index = portname.split("Ethernet").pop()
+ fp_port_index = int(fp_port_index.split("s").pop(0))/4
+ else:
+ # Parsing logic for older 'portmap.ini' file
+ (portname, bcm_port) = line.split("=")[1].split(",")[:2]
+
+ fp_port_index = portname.split("Ethernet").pop()
+ fp_port_index = int(fp_port_index.split("s").pop(0))/4
+ if fp_port_index not in g_physical_to_host_port_mapping:
+ g_physical_to_host_port_mapping[fp_port_index] = host_intf_index
+
+ # Next line, next host index
+ host_intf_index += 1
+
def _url(physical_port):
"""
Helper function to build an url for given physical_port
@@ -25,7 +142,8 @@ def _url(physical_port):
Returns:
str: The url for post/get.
"""
- return BASE_URL + "/mux/{}/{}".format(VM_SET, physical_port - 1)
+ host_intf_index = _physical_port_to_host_port(physical_port)
+ return BASE_URL + "/mux/{}/{}".format(VM_SET, host_intf_index)
def _post(physical_port, data):
"""
diff --git a/tests/voq/__init__.py b/tests/voq/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/voq/test_voq_init.py b/tests/voq/test_voq_init.py
new file mode 100644
index 00000000000..17eec6d8c9d
--- /dev/null
+++ b/tests/voq/test_voq_init.py
@@ -0,0 +1,436 @@
+"""Test initialization of VoQ objects, switch, system ports, router interfaces, neighbors, inband port."""
+import json
+import logging
+import pytest
+from tests.common.helpers.assertions import pytest_assert
+
+from tests.common.helpers.redis import AsicDbCli, RedisKeyNotFound
+from tests.common.errors import RunAnsibleModuleFail
+from voq_helpers import check_local_neighbor, check_voq_remote_neighbor, get_sonic_mac, get_neighbor_mac
+from voq_helpers import check_local_neighbor_asicdb, get_device_system_ports, get_inband_info, get_port_by_ip
+from voq_helpers import check_rif_on_sup, check_voq_neighbor_on_sup, find_system_port
+
+pytestmark = [
+ pytest.mark.topology('t2')
+]
+
+logger = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="module", autouse=True)
+def chassis_facts(duthosts):
+ """
+ Fixture to add some items to host facts from inventory file.
+ """
+ for a_host in duthosts.nodes:
+
+ if len(duthosts.supervisor_nodes) > 0:
+ out = a_host.command("cat /etc/sonic/card_details.json")
+ card_details = json.loads(out['stdout'])
+ if 'slot_num' in card_details:
+ a_host.facts['slot_num'] = card_details['slot_num']
+
+
+@pytest.fixture(scope="module")
+def nbrhosts_facts(nbrhosts):
+ nbrhosts_facts = {}
+ for a_vm in nbrhosts:
+ try:
+ vm_facts = nbrhosts[a_vm]['host'].eos_facts()
+ except RunAnsibleModuleFail:
+ logger.error("VM: %s is down, skipping config fetching.", a_vm)
+ continue
+ logger.debug("vm facts: {}".format(json.dumps(vm_facts, indent=4)))
+ nbrhosts_facts[a_vm] = vm_facts
+ return nbrhosts_facts
+
+
+def test_voq_switch_create(duthosts):
+ """Compare the config facts with the asic db for switch:
+ * Verify ASIC_DB get all system ports referenced in configDB created on all hosts and ASICs.
+ * Verify object creation and values of port attributes.
+ """
+
+ switch_id_list = []
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_facts = cfg_facts['DEVICE_METADATA']['localhost']
+ asicdb = AsicDbCli(asic)
+
+ switchkey = asicdb.get_switch_key()
+ logger.info("Checking switch %s", switchkey)
+ check_list = {
+ "max_cores": "SAI_SWITCH_ATTR_MAX_SYSTEM_CORES",
+ "switch_id": "SAI_SWITCH_ATTR_SWITCH_ID"}
+ for k in check_list:
+ asicdb.get_and_check_key_value(switchkey, dev_facts[k], field=check_list[k])
+
+ pytest_assert(dev_facts["switch_id"] not in switch_id_list,
+ "Switch ID: %s has been used more than once" % dev_facts["switch_id"])
+ switch_id_list.append(dev_facts["switch_id"])
+
+ asicdb.get_and_check_key_value(switchkey, "SAI_SWITCH_TYPE_VOQ", field="SAI_SWITCH_ATTR_TYPE")
+
+
+def test_voq_system_port_create(duthosts):
+ """Compare the config facts with the asic db for system ports
+
+ * Verify ASIC_DB get all system ports referenced in configDB created on all hosts and ASICs.
+ * Verify object creation and values of port attributes.
+
+ """
+
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ logger.info("Checking system ports on host: %s, asic: %s", per_host.hostname, asic.asic_index)
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_ports = get_device_system_ports(cfg_facts)
+ asicdb = AsicDbCli(asic)
+ keylist = asicdb.get_system_port_key_list()
+ pytest_assert(len(keylist) == len(dev_ports.keys()),
+ "Found %d system port keys, %d entries in cfg_facts, not matching" % (
+ len(keylist), len(dev_ports.keys())))
+ logger.info("Found %d system port keys, %d entries in cfg_facts, checking each.",
+ len(keylist), len(dev_ports.keys()))
+ for portkey in keylist:
+ try:
+ port_output = asicdb.hget_key_value(portkey, field="SAI_SYSTEM_PORT_ATTR_CONFIG_INFO")
+ except RedisKeyNotFound:
+ # TODO: Need to check on behavior here.
+ logger.warning("System port: %s had no SAI_SYSTEM_PORT_ATTR_CONFIG_INFO", portkey)
+ continue
+ port_data = json.loads(port_output)
+ for cfg_port in dev_ports:
+ if dev_ports[cfg_port]['system_port_id'] == port_data['port_id']:
+ # "switch_id": "0",
+ # "core_index": "1",
+ # "core_port_index": "6",
+ # "speed": "400000"
+ pytest_assert(dev_ports[cfg_port]['switch_id'] == port_data[
+ 'attached_switch_id'], "switch IDs do not match for port: %s" % portkey)
+ pytest_assert(dev_ports[cfg_port]['core_index'] == port_data[
+ 'attached_core_index'], "switch IDs do not match for port: %s" % portkey)
+ pytest_assert(dev_ports[cfg_port]['core_port_index'] == port_data[
+ 'attached_core_port_index'], "switch IDs do not match for port: %s" % portkey)
+ pytest_assert(dev_ports[cfg_port]['speed'] == port_data[
+ 'speed'], "switch IDs do not match for port: %s" % portkey)
+ break
+ else:
+ logger.error("Could not find config entry for portkey: %s" % portkey)
+
+ logger.info("Host: %s, Asic: %s all ports match all parameters", per_host.hostname, asic.asic_index)
+
+
+def test_voq_local_port_create(duthosts):
+ """Compare the config facts with the asic db for local ports
+
+ * Verify ASIC_DB has host interface information for all local ports on all cards and ASICs.
+ * Verify host interfaces exist on host CLI (ifconfig).
+ * Verify interfaces exist in show interfaces on the linecard.
+ """
+
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_ports = cfg_facts['PORT']
+
+ asicdb = AsicDbCli(asic)
+
+ keylist = asicdb.get_hostif_list()
+ pytest_assert(len(keylist) == len(dev_ports.keys()),
+ "Found %d hostif keys, %d entries in cfg_facts" % (len(keylist), len(dev_ports.keys())))
+ logger.info("Found %s ports to check on host:%s, asic: %s.", len(dev_ports.keys()), per_host.hostname,
+ asic.asic_index)
+
+ show_intf = asic.show_interface(command="status")['ansible_facts']
+ for portkey in keylist:
+ port_name = asicdb.hget_key_value(portkey, "SAI_HOSTIF_ATTR_NAME")
+ port_state = asicdb.hget_key_value(portkey, "SAI_HOSTIF_ATTR_OPER_STATUS")
+ port_type = asicdb.hget_key_value(portkey, "SAI_HOSTIF_ATTR_TYPE")
+ logger.info("Checking port: %s, state: %s", port_name, port_state)
+ # "SAI_HOSTIF_ATTR_NAME": "Ethernet0",
+ # "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000002",
+ # "SAI_HOSTIF_ATTR_OPER_STATUS": "false",
+ # "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV"
+ pytest_assert(port_type == "SAI_HOSTIF_TYPE_NETDEV", "Port %s is not type netdev" % portkey)
+ if port_state == "true":
+ pytest_assert(show_intf['int_status'][port_name]['oper_state'] == "up",
+ "Show interface state is down when it should be up")
+ if port_state == "false":
+ pytest_assert(show_intf['int_status'][port_name]['oper_state'] == "down",
+ "Show interface state is up when it should be down")
+
+ if asic.namespace is None:
+ cmd = "sudo ifconfig %s" % port_name
+ else:
+ cmd = "sudo ip netns exec %s ifconfig %s" % (asic.namespace, port_name)
+ ifout = per_host.command(cmd)
+ assert "not found" not in ifout['stdout_lines'][0], "Interface %s not found" % port_name
+ if port_state == "true" and "RUNNING" in ifout['stdout_lines'][0]:
+ logger.debug("Interface state is up and matches")
+ elif port_state == "false" and "RUNNING" not in ifout['stdout_lines'][0]:
+ logger.debug("Interface state is down and matches")
+ else:
+ raise AssertionError("Interface state does not match: %s %s", port_state, ifout['stdout_lines'][0])
+
+
+def test_voq_interface_create(duthosts):
+ """
+ Verify router interfaces are created on all line cards and present in Chassis App Db.
+
+ * Verify router interface creation on local ports in ASIC DB.
+ * PORT_ID should match system port table and traced back to config_db.json, mac and MTU should match as well.
+ * Verify SYSTEM_INTERFACE table in Chassis AppDb (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify creation interfaces with different MTUs in configdb.json.
+ * Verify creation of different subnet masks in configdb.json.
+ * Repeat with IPv4, IPv6, dual-stack.
+
+ """
+ for per_host in duthosts.frontend_nodes:
+ logger.info("Check router interfaces on node: %s", per_host.hostname)
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_intfs = cfg_facts['INTERFACE']
+ dev_sysports = get_device_system_ports(cfg_facts)
+
+ slot = per_host.facts['slot_num']
+ rif_ports_in_asicdb = []
+
+ # intf_list = get_router_interface_list(dev_intfs)
+ asicdb = AsicDbCli(asic)
+
+ asicdb_intf_key_list = asicdb.get_router_if_list()
+ # Check each rif in the asicdb, if it is local port, check VOQ DB for correct RIF.
+ # If it is on system port, verify slot/asic/port and OID match a RIF in VoQDB
+ for rif in asicdb_intf_key_list:
+ rif_type = asicdb.hget_key_value(rif, "SAI_ROUTER_INTERFACE_ATTR_TYPE")
+ if rif_type != "SAI_ROUTER_INTERFACE_TYPE_PORT":
+ logger.info("Skip this rif: %s, it is not on a port: %s", rif, rif_type)
+ continue
+ else:
+ portid = asicdb.hget_key_value(rif, "SAI_ROUTER_INTERFACE_ATTR_PORT_ID")
+ logger.info("Process RIF %s, Find port with ID: %s", rif, portid)
+
+ porttype = asicdb.get_rif_porttype(portid)
+ logger.info("RIF: %s is of type: %s", rif, porttype)
+ if porttype == 'hostif':
+ # find the hostif entry to get the physical port the router interface is on.
+ hostifkey = asicdb.find_hostif_by_portid(portid)
+ hostif = asicdb.hget_key_value(hostifkey, 'SAI_HOSTIF_ATTR_NAME')
+ logger.info("RIF: %s is on local port: %s", rif, hostif)
+ rif_ports_in_asicdb.append(hostif)
+ if hostif not in dev_intfs:
+ pytest.fail("Port: %s has a router interface, but it isn't in configdb." % portid)
+
+ # check MTU and ethernet address
+ asicdb.get_and_check_key_value(rif, cfg_facts['PORT'][hostif]['mtu'],
+ field="SAI_ROUTER_INTERFACE_ATTR_MTU")
+ intf_mac = get_sonic_mac(per_host, asic.asic_index, hostif)
+ asicdb.get_and_check_key_value(rif, intf_mac, field="SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS")
+
+ sup_rif = asicdb.hget_key_value("VIDTORID", "oid:" + rif.split(":")[3])
+ sysport_info = find_system_port(dev_sysports, slot, asic.asic_index, hostif)
+ for sup in duthosts.supervisor_nodes:
+ check_rif_on_sup(sup, sup_rif, sysport_info['slot'], sysport_info['asic'], hostif)
+
+ elif porttype == 'sysport':
+ try:
+ port_output = asicdb.hget_key_value("ASIC_STATE:SAI_OBJECT_TYPE_SYSTEM_PORT:" + portid,
+ field="SAI_SYSTEM_PORT_ATTR_CONFIG_INFO")
+ except RedisKeyNotFound:
+ # not a hostif or system port, log error and continue
+ logger.error("Did not find OID %s in local or system tables" % portid)
+ continue
+ port_data = json.loads(port_output)
+ for cfg_port in dev_sysports:
+ if dev_sysports[cfg_port]['system_port_id'] == port_data['port_id']:
+ logger.info("RIF: %s is on remote port: %s", rif, cfg_port)
+ break
+ else:
+ raise AssertionError("Did not find OID %s in local or system tables" % portid)
+
+ sys_slot, sys_asic, sys_port = cfg_port.split("|")
+ sup_rif = asicdb.hget_key_value("VIDTORID", "oid:" + rif.split(":")[3])
+ for sup in duthosts.supervisor_nodes:
+ check_rif_on_sup(sup, sup_rif, sys_slot, sys_asic, sys_port)
+
+ elif porttype == 'port':
+ # this is the RIF on the inband port.
+ inband = get_inband_info(cfg_facts)
+ logger.info("RIF: %s is on local port: %s", rif, inband['port'])
+
+ # check MTU and ethernet address
+ asicdb.get_and_check_key_value(rif, cfg_facts['PORT'][inband['port']]['mtu'],
+ field="SAI_ROUTER_INTERFACE_ATTR_MTU")
+ intf_mac = get_sonic_mac(per_host, asic.asic_index, inband['port'])
+ asicdb.get_and_check_key_value(rif, intf_mac, field="SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS")
+
+ sup_rif = asicdb.hget_key_value("VIDTORID", "oid:" + rif.split(":")[3])
+ sysport_info = find_system_port(dev_sysports, slot, asic.asic_index, inband['port'])
+ for sup in duthosts.supervisor_nodes:
+ check_rif_on_sup(sup, sup_rif, sysport_info['slot'], sysport_info['asic'], inband['port'])
+
+ # Verify each RIF in config had a corresponding local port RIF in the asicDB.
+ for rif in dev_intfs:
+ pytest_assert(rif in rif_ports_in_asicdb, "Interface %s is in configdb.json but not in asicdb" % rif)
+ logger.info("Interfaces %s are present in configdb.json and asicdb" % str(dev_intfs.keys()))
+
+
+def test_voq_neighbor_create(duthosts, nbrhosts, nbrhosts_facts):
+ """
+ Verify neighbor entries are created on linecards for local and remote VMS.
+
+ For local neighbors:
+ * ARP/NDP should be resolved when BGP to adjacent VMs is established.
+ * On local linecard, verify ASIC DB entries.
+ * MAC address matches MAC of neighbor VM.
+ * Router interface OID matches back to the correct interface and port the neighbor was learned on.
+ * On local linecard, verify show arp/ndp, ip neigh commands.
+ * MAC address matches MAC of neighbor VM.
+ * On local linecard. verify neighbor table in appDB.
+ * MAC address matches MAC of neighbor VM.
+ * On supervisor card, verify SYSTEM_NEIGH table in Chassis AppDB (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify encap index and MAC address match between ASICDB the Chassis AppDB
+ * Repeat with IPv4, IPv6, dual-stack.
+
+ For remote neighbors:
+ * When local neighbors are established as in the Local Neighbor testcase, corresponding entries will be established
+ on all other line cards. On each remote card, verify:
+ * Verify ASIC DB entries on remote linecards.
+ * Verify impose index=True in ASIC DB.
+ * Verify MAC address in ASIC DB is the remote neighbor mac.
+ * Verify encap index for ASIC DB entry matches Chassis App DB.
+ * Verify router interface OID matches the interface the neighbor was learned on.
+ * Verify on linecard CLI, show arp/ndp, ip neigh commands.
+ * For inband port, MAC should be inband port mac in kernel table and LC appDb.
+ * For inband vlan mode, MAC will be remote ASIC mac in kernel table and LC appdb.
+ * Verify neighbor table in linecard appdb.
+ * Verify static route is installed in kernel routing table with /32 (or /128 for IPv6) for neighbor entry.
+ * Repeat with IPv4, IPv6, dual-stack.
+
+ """
+
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ logger.info("Checking local neighbors on host: %s, asic: %s", per_host.hostname, asic.asic_index)
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_sysports = get_device_system_ports(cfg_facts)
+ neighs = cfg_facts['BGP_NEIGHBOR']
+ inband_info = get_inband_info(cfg_facts)
+
+ # Check each neighbor in table
+ for neighbor in neighs:
+ local_ip = neighs[neighbor]['local_addr']
+ if local_ip == inband_info['ipv4_addr'] or local_ip == inband_info['ipv6_addr']:
+ # skip inband neighbors
+ continue
+
+ # Check neighbor on local linecard
+ local_port = get_port_by_ip(cfg_facts, local_ip)
+ show_intf = asic.show_interface(command="status")['ansible_facts']
+ if local_port is None:
+ logger.error("Did not find port for this neighbor %s, must skip", local_ip)
+ continue
+ elif "portchannel" in local_port.lower():
+ # TODO: LAG support
+ logger.info("Port channel is not supported yet by this test, skip port: %s", local_port)
+ continue
+ if show_intf['int_status'][local_port]['oper_state'] == "down":
+ logger.error("Port is down, must skip interface: %s, IP: %s", local_port, local_ip)
+ continue
+
+ neigh_mac = get_neighbor_mac(neighbor, nbrhosts, nbrhosts_facts)
+ if neigh_mac is None:
+ logger.error("Could not find neighbor MAC, must skip. IP: %s, port: %s", local_ip, local_port)
+
+ local_dict = check_local_neighbor(per_host, asic, neighbor, neigh_mac, local_port)
+ logger.info("Local_dict: %s", local_dict)
+
+ # Check the same neighbor entry on the supervisor nodes
+ sysport_info = find_system_port(dev_sysports, per_host.facts['slot_num'], asic.asic_index, local_port)
+ for sup in duthosts.supervisor_nodes:
+ check_voq_neighbor_on_sup(sup, sysport_info['slot'], sysport_info['asic'], local_port,
+ neighbor, local_dict['encap_index'], neigh_mac)
+
+ # Check the neighbor entry on each remote linecard
+ for rem_host in duthosts.frontend_nodes:
+
+ for rem_asic in rem_host.asics:
+ if rem_host == per_host and rem_asic == asic:
+ # skip remote check on local host
+ continue
+ rem_cfg_facts = rem_asic.config_facts(source="persistent")['ansible_facts']
+ remote_inband_info = get_inband_info(rem_cfg_facts)
+ remote_inband_mac = get_sonic_mac(rem_host, rem_asic.asic_index, remote_inband_info['port'])
+ check_voq_remote_neighbor(rem_host, rem_asic, neighbor, neigh_mac, remote_inband_info['port'],
+ local_dict['encap_index'], remote_inband_mac)
+
+
+def test_voq_inband_port_create(duthosts):
+ """
+ Test inband port creation.
+
+ These steps are covered by previous test cases:
+ * On each linecard, verify inband ports are present in ASICDB.
+ * On each linecard, verify inband router interfaces are present in ASICDB
+ * On supervisor card, verify inband router interfaces are present in Chassis App DB
+
+ This test function will cover:
+ * On each linecard, verify permanent neighbors for all inband ports.
+ * On each linecard, verify kernel routes for all inband ports.
+ * Repeat with IPv4, IPv6, dual-stack.
+
+
+ """
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_sysports = get_device_system_ports(cfg_facts)
+ inband_info = get_inband_info(cfg_facts)
+ inband_mac = get_sonic_mac(per_host, asic.asic_index, inband_info['port'])
+
+ inband_ips = []
+ if 'ipv6_addr' in inband_info:
+ inband_ips.append(inband_info['ipv6_addr'])
+ if 'ipv4_addr' in inband_info:
+ inband_ips.append(inband_info['ipv4_addr'])
+
+ for neighbor_ip in inband_ips:
+
+ host = per_host
+ neighbor_mac = inband_mac
+ interface = inband_info['port']
+
+ logger.info("Check local neighbor on host %s, asic %s for %s/%s via port: %s", host.hostname,
+ str(asic.asic_index),
+ neighbor_ip, neighbor_mac, interface)
+
+ asic_dict = check_local_neighbor_asicdb(asic, neighbor_ip, neighbor_mac)
+ encap_idx = asic_dict['encap_index']
+
+ # Check the inband neighbor entry on the supervisor nodes
+ sysport_info = find_system_port(dev_sysports, per_host.facts['slot_num'], asic.asic_index, interface)
+ for sup in duthosts.supervisor_nodes:
+ check_voq_neighbor_on_sup(sup, sysport_info['slot'], sysport_info['asic'], interface, neighbor_ip,
+ encap_idx, inband_mac)
+
+ # Check the neighbor entry on each remote linecard
+ for rem_host in duthosts.frontend_nodes:
+
+ for rem_asic in rem_host.asics:
+ if rem_host == per_host and rem_asic == asic:
+ # skip remote check on local host
+ continue
+ rem_cfg_facts = rem_asic.config_facts(source="persistent")['ansible_facts']
+ remote_inband_info = get_inband_info(rem_cfg_facts)
+ remote_inband_mac = get_sonic_mac(rem_host, rem_asic.asic_index, remote_inband_info['port'])
+ check_voq_remote_neighbor(rem_host, rem_asic, neighbor_ip, inband_mac,
+ remote_inband_info['port'],
+ encap_idx, remote_inband_mac)
diff --git a/tests/voq/voq_helpers.py b/tests/voq/voq_helpers.py
new file mode 100644
index 00000000000..c2d0b3a8268
--- /dev/null
+++ b/tests/voq/voq_helpers.py
@@ -0,0 +1,484 @@
+import json
+import logging
+import re
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.redis import AsicDbCli, AppDbCli, VoqDbCli
+
+logger = logging.getLogger(__name__)
+
+
+def check_host_arp_table(host, neighbor_ip, neighbor_mac, interface, state):
+ """
+ Validates the ARP table of a host by running ip neigh for a single neighbor.
+
+ Args:
+ host: instance of SonicHost to run the arp show.
+ neighbor_ip: IP address of the neighbor to verify.
+ neighbor_mac: MAC address expected in the show command output.
+ interface: Port expected in the show command output.
+ state: ARP entry state expected in the show command output.
+
+ """
+ arptable = host.switch_arptable()['ansible_facts']
+ logger.debug("ARP: %s", arptable)
+ if ':' in neighbor_ip:
+ table = arptable['arptable']['v6']
+ else:
+ table = arptable['arptable']['v4']
+ pytest_assert(neighbor_ip in table, "IP %s not in arp list: %s" % (neighbor_ip, table.keys()))
+ pytest_assert(table[neighbor_ip]['macaddress'] == neighbor_mac,
+ "table MAC %s does not match neighbor mac: %s" % (table[neighbor_ip]['macaddress'], neighbor_mac))
+ pytest_assert(table[neighbor_ip]['interface'] == interface,
+ "table interface %s does not match interface: %s" % (table[neighbor_ip]['interface'], interface))
+ pytest_assert(table[neighbor_ip]['state'].lower() == state.lower(),
+ "table state %s is not %s" % (table[neighbor_ip]['state'].lower(), state.lower()))
+
+
+def check_local_neighbor_asicdb(asic, neighbor_ip, neighbor_mac):
+ """
+ Verifies the neighbor information of a sonic host in the asicdb for a locally attached neighbor.
+
+ Args:
+ asic: The SonicAsic instance to be checked.
+ neighbor_ip: The IP address of the neighbor.
+ neighbor_mac: The MAC address of the neighbor.
+
+ Returns:
+ A dictionary with the encap ID from the ASIC neighbor table.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ asicdb = AsicDbCli(asic)
+ neighbor_key = asicdb.get_neighbor_key_by_ip(neighbor_ip)
+ pytest_assert(neighbor_key is not None, "Did not find neighbor in asictable for IP: %s" % neighbor_ip)
+ asic_mac = asicdb.get_neighbor_value(neighbor_key, 'SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS')
+ pytest_assert(asic_mac.lower() == neighbor_mac.lower(),
+ "MAC does not match in asicDB, asic %s, device %s" % (asic_mac.lower(), neighbor_mac.lower()))
+ encap_idx = asicdb.get_neighbor_value(neighbor_key, 'SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX')
+ return {"encap_index": encap_idx}
+
+
+def check_local_neighbor(host, asic, neighbor_ip, neighbor_mac, interface):
+ """
+ Verifies the neighbor information of a sonic host for a locally attached neighbor.
+
+ The ASIC DB, APP DB, and host ARP table are checked.
+
+ Args:
+ host: Instance of SonicHost to check.
+ asic: Instance of SonicAsic to check.
+ neighbor_ip: IP address if the neighbor to check.
+ neighbor_mac: Expected ethernet MAC address of the neighbor.
+ interface: Expected interface the neighbor was learned on.
+
+ Returns:
+ A dictionary with the key into the LC APP DB neighbor table and the encap ID from the ASIC DB neighbor table.
+ {'encap_index': u'1074790408',
+ 'neighbor_key': u'NEIGH_TABLE:Ethernet10:2064:103::1'}
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ logger.info("Check local neighbor on host %s, asic %s for %s/%s via port: %s", host.hostname, str(asic.asic_index),
+ neighbor_ip, neighbor_mac, interface)
+
+ # verify asic db
+ asic_dict = check_local_neighbor_asicdb(asic, neighbor_ip, neighbor_mac)
+
+ # verify LC appdb
+ appdb = AppDbCli(asic)
+ neighbor_key = appdb.get_neighbor_key_by_ip(neighbor_ip)
+ appdb.get_and_check_key_value(neighbor_key, neighbor_mac, field="neigh")
+ pytest_assert(":{}:".format(interface) in neighbor_key, "Port for %s does not match" % neighbor_key)
+
+ # verify linux arp table
+ check_host_arp_table(host, neighbor_ip, neighbor_mac, interface, 'REACHABLE')
+
+ return {'neighbor_key': neighbor_key, 'encap_index': asic_dict['encap_index']}
+
+
+def check_bgp_kernel_route(host, asicnum, prefix, ipver, interface, present=True):
+ """
+ Checks the kernel route is installed from the bgp container.
+
+ Args:
+ host: sonic duthost instance to check.
+ asicnum: asic index to check.
+ prefix: IP address plus mask to check in routing table.
+ ipver: ip or ipv6.
+ interface: Attached interface for the neighbor route.
+ present: Optional; Check whether route is installed or removed.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ docker = "bgp"
+ if host.facts["num_asic"] > 1:
+ docker = "bgp" + str(asicnum)
+
+ output = host.command("docker exec " + docker + " vtysh -c \"show {} route {} json\"".format(ipver, prefix))
+ parsed = json.loads(output["stdout"])
+ if present is True:
+ pytest_assert(prefix in parsed.keys(), "Prefix: %s not in route list: %s" % (prefix, parsed.keys()))
+ for route in parsed[prefix]:
+ if route['distance'] != 0:
+ found = False
+ continue
+ pytest_assert(route['protocol'] == "kernel", "Prefix: %s not kernel route" % prefix)
+ pytest_assert(route['nexthops'][0]['directlyConnected'] is True,
+ "Prefix: %s not directly connected" % prefix)
+ pytest_assert(route['nexthops'][0]['active'] is True, "Prefix: %s not active" % prefix)
+ pytest_assert(route['nexthops'][0]['interfaceName'] == interface,
+ "Prefix: %s out interface is not correct" % prefix)
+
+ found = True
+ break
+ pytest_assert(found, "Kernel route is not present in bgp output: %s" % parsed[prefix])
+ logger.info("Route %s is present in remote neighbor: %s/%s", prefix, host.hostname, str(asicnum))
+
+
+def check_host_kernel_route(host, asicnum, ipaddr, ipver, interface, present=True):
+ """
+ Checks the kernel route on the host OS.
+
+ Args:
+ host: sonic duthost instance to check.
+ asicnum: asic index to check.
+ ipaddr: IP address to check in routing table.
+ ipver: ip or ipv6.
+ interface: Attached interface for the neighbor route.
+ present: Optional; Check whether route is installed or removed.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ ver = "-4" if ipver == "ip" else "-6"
+ if host.facts["num_asic"] == 1:
+ cmd = "ip {} route show exact {}".format(ver, ipaddr)
+ else:
+ cmd = "ip netns exec asic{} ip {} route show exact {}".format(asicnum, ver, ipaddr)
+ logger.debug("Kernel rt cmd: %s", cmd)
+ output = host.command(cmd)['stdout']
+ if present is True:
+ logger.info("host ip route output: %s", output)
+ pytest_assert(output.startswith(ipaddr), "Address: %s not in netstat output list: %s" % (ipaddr, output))
+ pytest_assert("dev %s" % interface in output, "Interface is not %s: %s" % (interface, output))
+
+
+def check_neighbor_kernel_route(host, asicnum, ipaddr, interface, present=True):
+ """
+ Verifies if a neighbor kernel route is installed or not.
+
+ Checks BGP docker and linux kernel route tables.
+
+ Args:
+ host: sonic duthost instance to check.
+ asicnum: asic index to check.
+ ipaddr: IP address to check in routing table. Mask will be applied by this function.
+ interface: Attached interface for the neighbor route.
+ present: Optional; Check whether route is installed or removed.
+ """
+ if ":" in ipaddr:
+ ipver = "ipv6"
+ prefix = ipaddr + "/128"
+ else:
+ ipver = "ip"
+ prefix = ipaddr + "/32"
+ check_bgp_kernel_route(host, asicnum, prefix, ipver, interface, present)
+ check_host_kernel_route(host, asicnum, ipaddr, ipver, interface, present)
+
+
+def check_voq_remote_neighbor(host, asic, neighbor_ip, neighbor_mac, interface, encap_idx, inband_mac):
+ """
+ Verifies the neighbor information of a neighbor learned on a different host.
+
+ The ASIC DB, APP DB, and host ARP table are checked. The host kernal route is verified. The encap ID from the
+ local neighbor is provided as a parameter and verified that it is imposed.
+
+ Args:
+ host: Instance of SonicHost to check.
+ asic: Instance of SonicAsic to check.
+ neighbor_ip: IP address if the neighbor to check.
+ neighbor_mac: Expected ethernet MAC address of the neighbor.
+ interface: Expected interface the neighbor was learned on.
+ encap_idx: The encap index from the SONIC host the neighbor is directly attached to.
+ inband_mac: The MAC of the inband port of the remote host.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+ """
+ logger.info("Check remote neighbor on host %s, asic: %s for %s/%s via port: %s", host.hostname,
+ str(asic.asic_index), neighbor_ip, neighbor_mac, interface)
+
+ # asic db
+ asicdb = AsicDbCli(asic)
+ neighbor_key = asicdb.get_neighbor_key_by_ip(neighbor_ip)
+ pytest_assert(neighbor_key is not None, "Did not find neighbor in asic table for IP: %s" % neighbor_ip)
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS').lower() == neighbor_mac.lower(),
+ "MAC does not match in asicDB")
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX') == encap_idx,
+ "Encap index does not match in asicDB")
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_IMPOSE_INDEX') == "true",
+ "Encap impose is not true in asicDB")
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL') == "false",
+ "is local is not false in asicDB")
+
+ # LC app db
+ appdb = AppDbCli(asic)
+ neighbor_key = appdb.get_neighbor_key_by_ip(neighbor_ip)
+ pytest_assert(":{}:".format(interface) in neighbor_key, "Port for %s does not match" % neighbor_key)
+ appdb.get_and_check_key_value(neighbor_key, inband_mac, field="neigh")
+
+ # verify linux arp table
+ check_host_arp_table(host, neighbor_ip, inband_mac, interface, 'PERMANENT')
+
+ # verify linux route entry
+ check_neighbor_kernel_route(host, asic.asic_index, neighbor_ip, interface)
+
+
+def check_rif_on_sup(sup, rif, slot, asic, port):
+ """
+ Checks the router interface entry on the supervisor card.
+
+ Args:
+ sup: duthost for the supervisor card
+ rif: OID of the router interface to check for.
+ slot: The slot number the router interface is on.
+ asic: The asic number the asic is on, or 0 if a single asic card.
+ port: the name of the port (Ethernet1)
+
+ """
+ voqdb = VoqDbCli(sup)
+
+ rif_oid = voqdb.get_router_interface_id(slot, asic, port)
+
+ if rif_oid == rif:
+ logger.info("RIF on sup: %s = %s", rif_oid, rif)
+ elif rif_oid[-10:-1] == rif[-10:-1]:
+ logger.warning("RIF on sup is a partial match: %s != %s", rif_oid, rif)
+ else:
+ logger.error("RIF on sup does not match: %s != %s" % (rif_oid, rif))
+
+
+def check_voq_neighbor_on_sup(sup, slot, asic, port, neighbor, encap_index, mac):
+ """
+ Checks the neighbor entry on the supervisor card.
+
+ Args:
+ sup: duthost for the supervisor card
+ slot: The slot the router interface is on, as in system port table (Slot2).
+ asic: The asic the router interface is on, as in the system port table (Asic0) .
+ port: the name of the port (Ethernet1)
+ neighbor: The IP of the neighbor
+ encap_index: The encap ID of the neighbor from the local asic db
+ mac: The MAC address of the neighbor
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ voqdb = VoqDbCli(sup)
+ neigh_key = voqdb.get_neighbor_key_by_ip(neighbor)
+ logger.info("Neigh key: %s, slotnum: %s", neigh_key, slot)
+ pytest_assert("|%s|" % slot in neigh_key,
+ "Slot for %s does not match %s" % (neigh_key, slot))
+ pytest_assert("|%s:" % port in neigh_key,
+ "Port for %s does not match %s" % (neigh_key, port))
+ pytest_assert("|%s|" % asic in neigh_key,
+ "Asic for %s does not match %s" % (neigh_key, asic))
+
+ voqdb.get_and_check_key_value(neigh_key, mac, field="neigh")
+ voqdb.get_and_check_key_value(neigh_key, encap_index, field="encap_index")
+
+
+def get_neighbor_mac(neigh_ip, nbrhosts, nbrhosts_facts):
+ """
+ Gets the MAC address of a neighbor IP on an EOS host.
+
+ We need to get the MAC of the VM out of the linux shell, not from the EOS CLI. The MAC used for punt/inject
+ on the EOS seems to be the linux one. Find the interface name on the VM that is associated with the IP address,
+ then look on the linux OS shell for the MAC address of that interface.
+
+ Args:
+ neigh_ip: The IP address of the neighbor.
+ nbrhosts: dictionary provided by the nbrhosts fixture.
+
+ Returns:
+ A string with the MAC address.
+ """
+ nbr_vm = ""
+ nbr_intf = ""
+
+ for a_vm in nbrhosts_facts:
+
+ intfs = nbrhosts_facts[a_vm]['ansible_facts']['ansible_net_interfaces']
+ for intf in intfs:
+ if intfs[intf]['ipv4'] != {} and intfs[intf]['ipv4']['address'] == neigh_ip:
+ nbr_vm = a_vm
+ nbr_intf = intf
+ break
+ if 'ipv6' in intfs[intf] and intfs[intf]['ipv6']['address'].lower() == neigh_ip.lower():
+ nbr_vm = a_vm
+ nbr_intf = intf
+ break
+ if nbr_vm != "":
+ break
+ else:
+ logger.error("Could not find port for neighbor IP: %s", neigh_ip)
+ logger.info("vm facts: {}".format(json.dumps(nbrhosts_facts, indent=4)))
+ return None
+ # convert Ethernet1 to eth1
+ shell_intf = "eth" + nbr_intf[-1]
+ nbrhosts[nbr_vm]['host'].eos_command(commands=["enable"])
+ output = nbrhosts[nbr_vm]['host'].eos_command(commands=["bash ip addr show dev %s" % shell_intf])
+ # 8: Ethernet0: mtu 9100 ...
+ # link/ether a6:69:05:fd:da:5f brd ff:ff:ff:ff:ff:ff
+ mac = output['stdout_lines'][0][1].split()[1]
+ logger.info("mac: %s", mac)
+ return mac
+
+
+def get_sonic_mac(host, asicnum, port):
+ """Gets the MAC address of an SONIC port.
+
+ Args:
+ host: a duthost instance
+ asicnum: The asic number to run on, or empty string.
+ port: The name of the port to get the MAC
+
+ Returns:
+ A string with the MAC address.
+ """
+ if host.facts["num_asic"] == 1:
+ cmd = "sudo ip link show {}".format(port)
+ else:
+ ns = "asic" + str(asicnum)
+ cmd = "sudo ip netns exec {} ip link show {}".format(ns, port)
+ output = host.command(cmd)
+ mac = output['stdout_lines'][1].split()[1]
+ logger.info("host: %s, asic: %d, port: %s, mac: %s", host.hostname, asicnum, port, mac)
+ return mac
+
+
+def get_device_system_ports(cfg_facts):
+ """Returns the system ports from the config facts as a single dictionary, instead of a nested dictionary.
+
+ The ansible module for config facts automatically makes a 2 level nested dictionary when the keys are in the form
+ of part1|part2|part3 or part1|part2. The first dictionary is keyed as "part1" and the nested dictionary is the
+ remainder of the key with the value. This function returns a flat dictionary with the keys restored to their values
+ from the files.
+
+ Args:
+ cfg_facts: The "ansible_facts" output from the duthost "config_facts" module.
+
+ Returns:
+ The system port config facts in a single layer dictionary.
+
+ """
+
+ sys_port_slot_dict = cfg_facts['SYSTEM_PORT']
+ merge_dict = {}
+ for slot in sys_port_slot_dict:
+ for port in sys_port_slot_dict[slot]:
+ merge_dict[slot + "|" + port] = sys_port_slot_dict[slot][port]
+ return merge_dict
+
+
+def get_inband_info(cfg_facts):
+ """
+ Returns the inband port and IP addresses present in the configdb.json.
+
+ Args:
+ cfg_facts: The "ansible_facts" output from the duthost "config_facts" module.
+
+ Returns:
+ A dictionary with the inband port and IP addresses.
+ """
+
+ intf = cfg_facts['VOQ_INBAND_INTERFACE']
+ ret = {}
+ for a_intf in intf:
+ for addrs in intf[a_intf]:
+ ret['port'] = a_intf
+ intf_ip = addrs.split('/')
+ if ':' in intf_ip[0]:
+ ret['ipv6_addr'] = intf_ip[0]
+ ret['ipv6_mask'] = intf_ip[1]
+ elif ':' not in intf_ip[0]:
+ ret['ipv4_addr'] = intf_ip[0]
+ ret['ipv4_mask'] = intf_ip[1]
+ return ret
+
+
+def get_port_by_ip(cfg_facts, ipaddr):
+ """
+ Returns the port which has a given IP address from the dut config.
+
+ Args:
+ cfg_facts: The "ansible_facts" output from the duthost "config_facts" module.
+ ipaddr: The IP address to search for.
+
+ Returns:
+ A string with the port name or None if not found. ("Ethernet12")
+
+ """
+ if ':' in ipaddr:
+ iptype = "ipv6"
+ else:
+ iptype = "ipv4"
+
+ intf = {}
+ intf.update(cfg_facts['INTERFACE'])
+ if "PORTCHANNEL_INTERFACE" in cfg_facts:
+ intf.update(cfg_facts['PORTCHANNEL_INTERFACE'])
+ for a_intf in intf:
+ for addrs in intf[a_intf]:
+ intf_ip = addrs.split('/')
+ if iptype == 'ipv6' and ':' in intf_ip[0] and intf_ip[0].lower() == ipaddr.lower():
+ return a_intf
+ elif iptype == 'ipv4' and ':' not in intf_ip[0] and intf_ip[0] == ipaddr:
+ return a_intf
+
+ raise Exception("Dod not find port for IP %s" % ipaddr)
+
+
+def find_system_port(dev_sysports, slot, asic_index, hostif):
+ """
+ System key string can be arbitrary text with slot, asic, and port, so try to find the match
+ and return the correct string. ex. "Slot1|asic3|Ethernet12" or "Linecard4|Asic1|Portchannel23"
+
+ Args:
+ dev_sysports: dictionary from config_facts with all of the system ports on the system.
+ slot: The slot number of the system port to find.
+ asic_index: The asic number of ths system port to find.
+ hostif: The interface of the system port to find.
+
+ Returns:
+ A dictionary with the system port text strings.
+
+ Raises:
+ KeyError if the system port can't be found in the dictionary.
+
+ """
+
+ sys_re = re.compile(r'([a-zA-Z]+{})\|([a-zA-Z]+{})\|{}'.format(slot, asic_index, hostif))
+ sys_info = {}
+
+ for sysport in dev_sysports:
+ match = sys_re.match(sysport)
+ if match:
+ sys_info['slot'] = match.group(1)
+ sys_info['asic'] = match.group(2)
+ sys_info['key'] = sysport
+ return sys_info
+
+ raise KeyError("Could not find system port for {}/{}/{}".format(slot, asic_index, hostif))
diff --git a/tests/vxlan/test_vxlan_decap.py b/tests/vxlan/test_vxlan_decap.py
index 0cdc3a14a59..7a9f757ca02 100644
--- a/tests/vxlan/test_vxlan_decap.py
+++ b/tests/vxlan/test_vxlan_decap.py
@@ -149,6 +149,8 @@ def vxlan_status(setup, request, duthosts, rand_one_dut_hostname):
def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfhost, creds):
duthost = duthosts[rand_one_dut_hostname]
+ sonic_admin_alt_password = duthost.host.options['variable_manager']._hostvars[duthost.hostname].get("ansible_altpassword")
+
vxlan_enabled, scenario = vxlan_status
logger.info("vxlan_enabled=%s, scenario=%s" % (vxlan_enabled, scenario))
log_file = "/tmp/vxlan-decap.Vxlan.{}.{}.log".format(scenario, datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
@@ -161,6 +163,7 @@ def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfho
"count": COUNT,
"sonic_admin_user": creds.get('sonicadmin_user'),
"sonic_admin_password": creds.get('sonicadmin_password'),
- "dut_host": duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']},
+ "sonic_admin_alt_password": sonic_admin_alt_password,
+ "dut_hostname": duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']},
qlen=10000,
log_file=log_file)