diff --git a/acl_loader/main.py b/acl_loader/main.py index 7b7e480f6b..c719100fd9 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -718,21 +718,30 @@ def show_session(self, session_name): :param session_name: Optional. Mirror session name. Filter sessions by specified name. :return: """ - header = ("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue", "Policer", "Monitor Port") + erspan_header = ("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue", + "Policer", "Monitor Port", "SRC Port", "Direction") + span_header = ("Name", "Status", "DST Port", "SRC Port", "Direction", "Queue", "Policer") - data = [] + erspan_data = [] + span_data = [] for key, val in self.get_sessions_db_info().iteritems(): if session_name and key != session_name: continue - # For multi-mpu platform status and monitor port will be dict() - # of 'asic-x':value - data.append([key, val["status"], val["src_ip"], val["dst_ip"], - val.get("gre_type", ""), val.get("dscp", ""), - val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""), - val.get("monitor_port", "")]) - - print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) + if val.get("type") == "SPAN": + span_data.append([key, val.get("status", ""), val.get("dst_port", ""), + val.get("src_port", ""), val.get("direction", "").lower(), + val.get("queue", ""), val.get("policer", "")]) + else: + erspan_data.append([key, val.get("status", ""), val.get("src_ip", ""), + val.get("dst_ip", ""), val.get("gre_type", ""), val.get("dscp", ""), + val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""), + val.get("monitor_port", ""), val.get("src_port", ""), val.get("direction", "").lower()]) + + print("ERSPAN Sessions") + print(tabulate.tabulate(erspan_data, headers=erspan_header, tablefmt="simple", missingval="")) + print("\nSPAN Sessions") + print(tabulate.tabulate(span_data, headers=span_header, tablefmt="simple", missingval="")) def show_policer(self, policer_name): """ diff --git a/config/config_mgmt.py b/config/config_mgmt.py new file mode 100644 index 0000000000..c9db79ea90 --- /dev/null +++ b/config/config_mgmt.py @@ -0,0 +1,840 @@ +''' +config_mgmt.py provides classes for configuration validation and for Dynamic +Port Breakout. +''' +try: + import re + import syslog + + from json import load + from time import sleep as tsleep + from imp import load_source + from jsondiff import diff + from sys import flags + + # SONiC specific imports + import sonic_yang + from swsssdk import ConfigDBConnector, SonicV2Connector, port_util + + # Using load_source to 'import /usr/local/bin/sonic-cfggen as sonic_cfggen' + # since /usr/local/bin/sonic-cfggen does not have .py extension. + load_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + from sonic_cfggen import deep_update, FormatConverter, sort_data + +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +# Globals +YANG_DIR = "/usr/local/yang-models" +CONFIG_DB_JSON_FILE = '/etc/sonic/confib_db.json' +# TODO: Find a place for it on sonic switch. +DEFAULT_CONFIG_DB_JSON_FILE = '/etc/sonic/port_breakout_config_db.json' + +class ConfigMgmt(): + ''' + Class to handle config managment for SONIC, this class will use sonic_yang + to verify config for the commands which are capable of change in config DB. + ''' + + def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): + ''' + Initialise the class, --read the config, --load in data tree. + + Parameters: + source (str): source for input config, default configDb else file. + debug (bool): verbose mode. + allowTablesWithoutYang (bool): allow tables without yang model in + config or not. + + Returns: + void + ''' + try: + self.configdbJsonIn = None + self.configdbJsonOut = None + self.allowTablesWithoutYang = allowTablesWithoutYang + + # logging vars + self.SYSLOG_IDENTIFIER = "ConfigMgmt" + self.DEBUG = debug + + self.sy = sonic_yang.SonicYang(YANG_DIR, debug=debug) + # load yang models + self.sy.loadYangModel() + # load jIn from config DB or from config DB json file. + if source.lower() == 'configdb': + self.readConfigDB() + # treat any other source as file input + else: + self.readConfigDBJson(source) + # this will crop config, xlate and load. + self.sy.loadData(self.configdbJsonIn) + + # Raise if tables without YANG models are not allowed but exist. + if not allowTablesWithoutYang and len(self.sy.tablesWithOutYang): + raise Exception('Config has tables without YANG models') + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise(Exception('ConfigMgmt Class creation failed')) + + return + + def __del__(self): + pass + + def tablesWithoutYang(self): + ''' + Return tables loaded in config for which YANG model does not exist. + + Parameters: + void + + Returns: + tablesWithoutYang (list): list of tables. + ''' + return self.sy.tablesWithOutYang + + def loadData(self, configdbJson): + ''' + Explicit function to load config data in Yang Data Tree. + + Parameters: + configdbJson (dict): dict similar to configDb. + + Returns: + void + ''' + self.sy.loadData(configdbJson) + # Raise if tables without YANG models are not allowed but exist. + if not self.allowTablesWithoutYang and len(self.sy.tablesWithOutYang): + raise Exception('Config has tables without YANG models') + + return + + def validateConfigData(self): + ''' + Validate current config data Tree. + + Parameters: + void + + Returns: + bool + ''' + try: + self.sy.validate_data_tree() + except Exception as e: + self.sysLog(msg='Data Validation Failed') + return False + + self.sysLog(msg='Data Validation successful', doPrint=True) + return True + + def sysLog(self, logLevel=syslog.LOG_INFO, msg=None, doPrint=False): + ''' + Log the msg in syslog file. + + Parameters: + debug : syslog level + msg (str): msg to be logged. + + Returns: + void + ''' + # log debug only if enabled + if self.DEBUG == False and logLevel == syslog.LOG_DEBUG: + return + if flags.interactive !=0 and doPrint == True: + print("{}".format(msg)) + syslog.openlog(self.SYSLOG_IDENTIFIER) + syslog.syslog(logLevel, msg) + syslog.closelog() + + return + + def readConfigDBJson(self, source=CONFIG_DB_JSON_FILE): + ''' + Read the config from a Config File. + + Parameters: + source(str): config file name. + + Returns: + (void) + ''' + self.sysLog(msg='Reading data from {}'.format(source)) + self.configdbJsonIn = readJsonFile(source) + #self.sysLog(msg=type(self.configdbJsonIn)) + if not self.configdbJsonIn: + raise(Exception("Can not load config from config DB json file")) + self.sysLog(msg='Reading Input {}'.format(self.configdbJsonIn)) + + return + + """ + Get config from redis config DB + """ + def readConfigDB(self): + ''' + Read the config in Config DB. Assign it in self.configdbJsonIn. + + Parameters: + (void) + + Returns: + (void) + ''' + self.sysLog(doPrint=True, msg='Reading data from Redis configDb') + # Read from config DB on sonic switch + db_kwargs = dict(); data = dict() + configdb = ConfigDBConnector(**db_kwargs) + configdb.connect() + deep_update(data, FormatConverter.db_to_output(configdb.get_config())) + self.configdbJsonIn = FormatConverter.to_serialized(data) + self.sysLog(syslog.LOG_DEBUG, 'Reading Input from ConfigDB {}'.\ + format(self.configdbJsonIn)) + + return + + def writeConfigDB(self, jDiff): + ''' + Write the diff in Config DB. + + Parameters: + jDiff (dict): config to push in config DB. + + Returns: + void + ''' + self.sysLog(doPrint=True, msg='Writing in Config DB') + db_kwargs = dict(); data = dict() + configdb = ConfigDBConnector(**db_kwargs) + configdb.connect(False) + deep_update(data, FormatConverter.to_deserialized(jDiff)) + data = sort_data(data) + self.sysLog(msg="Write in DB: {}".format(data)) + configdb.mod_config(FormatConverter.output_to_db(data)) + + return + +# End of Class ConfigMgmt + +class ConfigMgmtDPB(ConfigMgmt): + ''' + Config MGMT class for Dynamic Port Breakout(DPB). This is derived from + ConfigMgmt. + ''' + + def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): + ''' + Initialise the class + + Parameters: + source (str): source for input config, default configDb else file. + debug (bool): verbose mode. + allowTablesWithoutYang (bool): allow tables without yang model in + config or not. + + Returns: + void + ''' + try: + ConfigMgmt.__init__(self, source=source, debug=debug, \ + allowTablesWithoutYang=allowTablesWithoutYang) + self.oidKey = 'ASIC_STATE:SAI_OBJECT_TYPE_PORT:oid:0x' + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise(Exception('ConfigMgmtDPB Class creation failed')) + + return + + def __del__(self): + pass + + def _checkKeyinAsicDB(self, key, db): + ''' + Check if a key exists in ASIC DB or not. + + Parameters: + db (SonicV2Connector): database. + key (str): key in ASIC DB, with table Seperator if applicable. + + Returns: + (bool): True, if given key is present. + ''' + self.sysLog(msg='Check Key in Asic DB: {}'.format(key)) + try: + # chk key in ASIC DB + if db.exists('ASIC_DB', key): + return True + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise(e) + + return False + + def _checkNoPortsInAsicDb(self, db, ports, portMap): + ''' + Check ASIC DB for PORTs in port List + + Parameters: + db (SonicV2Connector): database. + ports (list): List of ports + portMap (dict): port to OID map. + + Returns: + (bool): True, if all ports are not present. + ''' + try: + # connect to ASIC DB, + db.connect(db.ASIC_DB) + for port in ports: + key = self.oidKey + portMap[port] + if self._checkKeyinAsicDB(key, db) == True: + return False + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + return False + + return True + + def _verifyAsicDB(self, db, ports, portMap, timeout): + ''' + Verify in the Asic DB that port are deleted, Keep on trying till timeout + period. + + Parameters: + db (SonicV2Connector): database. + ports (list): port list to check in ASIC DB. + portMap (dict): oid<->port map. + timeout (int): timeout period + + Returns: + (bool) + ''' + self.sysLog(doPrint=True, msg="Verify Port Deletion from Asic DB, Wait...") + try: + for waitTime in range(timeout): + self.sysLog(logLevel=syslog.LOG_DEBUG, msg='Check Asic DB: {} \ + try'.format(waitTime+1)) + # checkNoPortsInAsicDb will return True if all ports are not + # present in ASIC DB + if self._checkNoPortsInAsicDb(db, ports, portMap): + break + tsleep(1) + + # raise if timer expired + if waitTime + 1 == timeout: + self.sysLog(syslog.LOG_CRIT, "!!! Critical Failure, Ports \ + are not Deleted from ASIC DB, Bail Out !!!", doPrint=True) + raise(Exception("Ports are present in ASIC DB after {} secs".\ + format(timeout))) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return True + + def breakOutPort(self, delPorts=list(), portJson=dict(), force=False, \ + loadDefConfig=True): + ''' + This is the main function for port breakout. Exposed to caller. + + Parameters: + delPorts (list): ports to be deleted. + portJson (dict): Config DB json Part of all Ports, generated from + platform.json. + force (bool): if false return dependecies, else delete dependencies. + loadDefConfig: If loadDefConfig, add default config for ports as well. + + Returns: + (deps, ret) (tuple)[list, bool]: dependecies and success/failure. + ''' + MAX_WAIT = 60 + try: + # delete Port and get the Config diff, deps and True/False + delConfigToLoad, deps, ret = self._deletePorts(ports=delPorts, \ + force=force) + # return dependencies if delete port fails + if ret == False: + return deps, ret + + # add Ports and get the config diff and True/False + addConfigtoLoad, ret = self._addPorts(portJson=portJson, \ + loadDefConfig=loadDefConfig) + # return if ret is False, Great thing, no change is done in Config + if ret == False: + return None, ret + + # Save Port OIDs Mapping Before Deleting Port + dataBase = SonicV2Connector(host="127.0.0.1") + if_name_map, if_oid_map = port_util.get_interface_oid_map(dataBase) + self.sysLog(syslog.LOG_DEBUG, 'if_name_map {}'.format(if_name_map)) + + # If we are here, then get ready to update the Config DB, Update + # deletion of Config first, then verify in Asic DB for port deletion, + # then update addition of ports in config DB. + self.writeConfigDB(delConfigToLoad) + # Verify in Asic DB, + self._verifyAsicDB(db=dataBase, ports=delPorts, portMap=if_name_map, \ + timeout=MAX_WAIT) + self.writeConfigDB(addConfigtoLoad) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + return None, False + + return None, True + + def _deletePorts(self, ports=list(), force=False): + ''' + Delete ports and dependecies from data tree, validate and return resultant + config. + + Parameters: + ports (list): list of ports + force (bool): if false return dependecies, else delete dependencies. + + Returns: + (configToLoad, deps, ret) (tuple)[dict, list, bool]: config, dependecies + and success/fail. + ''' + configToLoad = None; deps = None + try: + self.sysLog(msg="delPorts ports:{} force:{}".format(ports, force)) + + self.sysLog(doPrint=True, msg='Start Port Deletion') + deps = list() + + # Get all dependecies for ports + for port in ports: + xPathPort = self.sy.findXpathPortLeaf(port) + self.sysLog(doPrint=True, msg='Find dependecies for port {}'.\ + format(port)) + dep = self.sy.find_data_dependencies(str(xPathPort)) + if dep: + deps.extend(dep) + + # No further action with no force and deps exist + if force == False and deps: + return configToLoad, deps, False; + + # delets all deps, No topological sort is needed as of now, if deletion + # of deps fails, return immediately + elif deps: + for dep in deps: + self.sysLog(msg='Deleting {}'.format(dep)) + self.sy.deleteNode(str(dep)) + # mark deps as None now, + deps = None + + # all deps are deleted now, delete all ports now + for port in ports: + xPathPort = self.sy.findXpathPort(port) + self.sysLog(doPrint=True, msg="Deleting Port: " + port) + self.sy.deleteNode(str(xPathPort)) + + # Let`s Validate the tree now + if self.validateConfigData()==False: + return configToLoad, deps, False; + + # All great if we are here, Lets get the diff + self.configdbJsonOut = self.sy.getData() + # Update configToLoad + configToLoad = self._updateDiffConfigDB() + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Port Deletion Failed") + return configToLoad, deps, False + + return configToLoad, deps, True + + def _addPorts(self, portJson=dict(), loadDefConfig=True): + ''' + Add ports and default confug in data tree, validate and return resultant + config. + + Parameters: + portJson (dict): Config DB json Part of all Ports, generated from + platform.json. + loadDefConfig: If loadDefConfig, add default config for ports as well. + + Returns: + (configToLoad, ret) (tuple)[dict, bool] + ''' + configToLoad = None + ports = portJson['PORT'].keys() + try: + self.sysLog(doPrint=True, msg='Start Port Addition') + self.sysLog(msg="addPorts Args portjson: {} loadDefConfig: {}".\ + format(portJson, loadDefConfig)) + + if loadDefConfig: + defConfig = self._getDefaultConfig(ports) + self.sysLog(msg='Default Config: {}'.format(defConfig)) + + # get the latest Data Tree, save this in input config, since this + # is our starting point now + self.configdbJsonIn = self.sy.getData() + + # Get the out dict as well, if not done already + if self.configdbJsonOut is None: + self.configdbJsonOut = self.sy.getData() + + # update portJson in configdbJsonOut PORT part + self.configdbJsonOut['PORT'].update(portJson['PORT']) + # merge new config with data tree, this is json level merge. + # We do not allow new table merge while adding default config. + if loadDefConfig: + self.sysLog(doPrint=True, msg="Merge Default Config for {}".\ + format(ports)) + self._mergeConfigs(self.configdbJsonOut, defConfig, True) + + # create a tree with merged config and validate, if validation is + # sucessful, then configdbJsonOut contains final and valid config. + self.sy.loadData(self.configdbJsonOut) + if self.validateConfigData()==False: + return configToLoad, False + + # All great if we are here, Let`s get the diff and update COnfig + configToLoad = self._updateDiffConfigDB() + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Port Addition Failed") + return configToLoad, False + + return configToLoad, True + + def _mergeConfigs(self, D1, D2, uniqueKeys=True): + ''' + Merge D2 dict in D1 dict, Note both first and second dict will change. + First Dict will have merged part D1 + D2. Second dict will have D2 - D1 + i.e [unique keys in D2]. Unique keys in D2 will be merged in D1 only + if uniqueKeys=True. + Usage: This function can be used with 'config load' command to merge + new config with old. + + Parameters: + D1 (dict): Partial Config 1. + D2 (dict): Partial Config 2. + uniqueKeys (bool) + + Returns: + bool + ''' + try: + def _mergeItems(it1, it2): + if isinstance(it1, list) and isinstance(it2, list): + it1.extend(it2) + elif isinstance(it1, dict) and isinstance(it2, dict): + self._mergeConfigs(it1, it2) + elif isinstance(it1, list) or isinstance(it2, list): + raise Exception("Can not merge Configs, List problem") + elif isinstance(it1, dict) or isinstance(it2, dict): + raise Exception("Can not merge Configs, Dict problem") + else: + # First Dict takes priority + pass + return + + for it in D1.keys(): + # D2 has the key + if D2.get(it): + _mergeItems(D1[it], D2[it]) + del D2[it] + + # if uniqueKeys are needed, merge rest of the keys of D2 in D1 + if uniqueKeys: + D1.update(D2) + except Exce as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Merge Config failed") + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return D1 + + def _searchKeysInConfig(self, In, Out, skeys): + ''' + Search Relevant Keys in Input Config using DFS, This function is mainly + used to search ports related config in Default ConfigDbJson file. + + Parameters: + In (dict): Input Config to be searched + skeys (list): Keys to be searched in Input Config i.e. search Keys. + Out (dict): Contains the search result, i.e. Output Config with skeys. + + Returns: + found (bool): True if any of skeys is found else False. + ''' + found = False + if isinstance(In, dict): + for key in In.keys(): + for skey in skeys: + # pattern is very specific to current primary keys in + # config DB, may need to be updated later. + pattern = '^' + skey + '\|' + '|' + skey + '$' + \ + '|' + '^' + skey + '$' + reg = re.compile(pattern) + if reg.search(key): + # In primary key, only 1 match can be found, so return + Out[key] = In[key] + found = True + break + # Put the key in Out by default, if not added already. + # Remove later, if subelements does not contain any port. + if Out.get(key) is None: + Out[key] = type(In[key])() + if self._searchKeysInConfig(In[key], Out[key], skeys) == False: + del Out[key] + else: + found = True + + elif isinstance(In, list): + for skey in skeys: + if skey in In: + found = True + Out.append(skey) + + else: + # nothing for other keys + pass + + return found + + def configWithKeys(self, configIn=dict(), keys=list()): + ''' + This function returns the config with relavant keys in Input Config. + It calls _searchKeysInConfig. + + Parameters: + configIn (dict): Input Config + keys (list): Key list. + + Returns: + configOut (dict): Output Config containing only key related config. + ''' + configOut = dict() + try: + if len(configIn) and len(keys): + self._searchKeysInConfig(configIn, configOut, skeys=keys) + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="configWithKeys Failed, Error: {}".format(str(e))) + raise e + + return configOut + + def _getDefaultConfig(self, ports=list()): + ''' + Create a default Config for given Port list from Default Config File. + It calls _searchKeysInConfig. + + Parameters: + ports (list): list of ports, for which default config must be fetched. + + Returns: + defConfigOut (dict): default Config for given Ports. + ''' + # function code + try: + self.sysLog(doPrint=True, msg="Generating default config for {}".format(ports)) + defConfigIn = readJsonFile(DEFAULT_CONFIG_DB_JSON_FILE) + defConfigOut = dict() + self._searchKeysInConfig(defConfigIn, defConfigOut, skeys=ports) + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="getDefaultConfig Failed, Error: {}".format(str(e))) + raise e + + return defConfigOut + + def _updateDiffConfigDB(self): + ''' + Return ConfigDb format Diff b/w self.configdbJsonIn, self.configdbJsonOut + + Parameters: + void + + Returns: + configToLoad (dict): ConfigDb format Diff + ''' + try: + # Get the Diff + self.sysLog(msg='Generate Final Config to write in DB') + configDBdiff = self._diffJson() + # Process diff and create Config which can be updated in Config DB + configToLoad = self._createConfigToLoad(configDBdiff, \ + self.configdbJsonIn, self.configdbJsonOut) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Config Diff Generation failed") + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return configToLoad + + def _createConfigToLoad(self, diff, inp, outp): + ''' + Create the config to write in Config DB, i.e. compitible with mod_config() + This functions has 3 inner functions: + -- _deleteHandler: to handle delete in diff. See example below. + -- _insertHandler: to handle insert in diff. See example below. + -- _recurCreateConfig: recursively create this config. + + Parameters: + diff: jsondiff b/w 2 configs. + Example: + {u'VLAN': {u'Vlan100': {'members': {delete: [(95, 'Ethernet1')]}}, + u'Vlan777': {u'members': {insert: [(92, 'Ethernet2')]}}}, + 'PORT': {delete: {u'Ethernet1': {...}}}} + + inp: input config before delete/add ports, i.e. current config Db. + outp: output config after delete/add ports. i.e. config DB once diff + is applied. + + Returns: + configToLoad (dict): config in a format compitible with mod_Config(). + ''' + + ### Internal Functions ### + def _deleteHandler(diff, inp, outp, config): + ''' + Handle deletions in diff dict + ''' + if isinstance(inp, dict): + # Example Case: diff = PORT': {delete: {u'Ethernet1': {...}}}} + for key in diff: + # make sure keys from diff are present in inp but not in outp + if key in inp and key not in outp: + # assign key to None(null), redis will delete entire key + config[key] = None + else: + # should not happen + raise Exception('Invalid deletion of {} in diff'.format(key)) + + elif isinstance(inp, list): + # Example case: {u'VLAN': {u'Vlan100': {'members': {delete: [(95, 'Ethernet1')]}} + # just take list from outputs + config.extend(outp) + return + + def _insertHandler(diff, inp, outp, config): + ''' + Handle inserts in diff dict + ''' + if isinstance(outp, dict): + # Example Case: diff = PORT': {insert: {u'Ethernet1': {...}}}} + for key in diff: + # make sure keys are only in outp + if key not in inp and key in outp: + # assign key in config same as outp + config[key] = outp[key] + else: + # should not happen + raise Exception('Invalid insertion of {} in diff'.format(key)) + + elif isinstance(outp, list): + # just take list from output + # Example case: {u'VLAN': {u'Vlan100': {'members': {insert: [(95, 'Ethernet1')]}} + config.extend(outp) + return + + def _recurCreateConfig(diff, inp, outp, config): + ''' + Recursively iterate diff to generate config to write in configDB + ''' + changed = False + # updates are represented by list in diff and as dict in outp\inp + # we do not allow updates right now + if isinstance(diff, list) and isinstance(outp, dict): + return changed + + idx = -1 + for key in diff: + idx = idx + 1 + if str(key) == '$delete': + _deleteHandler(diff[key], inp, outp, config) + changed = True + elif str(key) == '$insert': + _insertHandler(diff[key], inp, outp, config) + changed = True + else: + # insert in config by default, remove later if not needed + if isinstance(diff, dict): + # config should match type of outp + config[key] = type(outp[key])() + if _recurCreateConfig(diff[key], inp[key], outp[key], \ + config[key]) == False: + del config[key] + else: + changed = True + elif isinstance(diff, list): + config.append(key) + if _recurCreateConfig(diff[idx], inp[idx], outp[idx], \ + config[-1]) == False: + del config[-1] + else: + changed = True + + return changed + + ### Function Code ### + try: + configToLoad = dict() + _recurCreateConfig(diff, inp, outp, configToLoad) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Create Config to load in DB, Failed") + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return configToLoad + + def _diffJson(self): + ''' + Return json diff between self.configdbJsonIn, self.configdbJsonOut dicts. + + Parameters: + void + + Returns: + (dict): json diff between self.configdbJsonIn, self.configdbJsonOut + dicts. + Example: + {u'VLAN': {u'Vlan100': {'members': {delete: [(95, 'Ethernet1')]}}, + u'Vlan777': {u'members': {insert: [(92, 'Ethernet2')]}}}, + 'PORT': {delete: {u'Ethernet1': {...}}}} + ''' + return diff(self.configdbJsonIn, self.configdbJsonOut, syntax='symmetric') + +# end of class ConfigMgmtDPB + +# Helper Functions +def readJsonFile(fileName): + ''' + Read Json file. + + Parameters: + fileName (str): file + + Returns: + result (dict): json --> dict + ''' + try: + with open(fileName) as f: + result = load(f) + except Exception as e: + raise Exception(e) + + return result diff --git a/config/main.py b/config/main.py index 731043a668..bd37962005 100755 --- a/config/main.py +++ b/config/main.py @@ -114,7 +114,7 @@ def get_command(self, ctx, cmd_name): try: version_info = sonic_device_util.get_sonic_version_info() asic_type = version_info['asic_type'] -except KeyError, TypeError: +except (KeyError, TypeError): raise click.Abort() # @@ -515,8 +515,8 @@ def _get_disabled_services_list(): log_warning("Status of feature '{}' is None".format(feature_name)) continue - if status == "disabled": - disabled_services_list.append(feature_name) + if status == "disabled": + disabled_services_list.append(feature_name) else: log_warning("Unable to retreive FEATURE table") @@ -604,6 +604,99 @@ def is_ipaddress(val): return False return True +def interface_is_in_vlan(vlan_member_table, interface_name): + """ Check if an interface is in a vlan """ + for _,intf in vlan_member_table.keys(): + if intf == interface_name: + return True + + return False + +def interface_is_in_portchannel(portchannel_member_table, interface_name): + """ Check if an interface is part of portchannel """ + for _,intf in portchannel_member_table.keys(): + if intf == interface_name: + return True + + return False + +def interface_is_router_port(interface_table, interface_name): + """ Check if an interface has router config """ + for intf in interface_table.keys(): + if (interface_name == intf[0]): + return True + + return False + +def interface_is_mirror_dst_port(config_db, interface_name): + """ Check if port is already configured as mirror destination port """ + mirror_table = config_db.get_table('MIRROR_SESSION') + for _,v in mirror_table.items(): + if 'dst_port' in v and v['dst_port'] == interface_name: + return True + + return False + +def interface_has_mirror_config(mirror_table, interface_name): + """ Check if port is already configured with mirror config """ + for _,v in mirror_table.items(): + if 'src_port' in v and v['src_port'] == interface_name: + return True + if 'dst_port' in v and v['dst_port'] == interface_name: + return True + + return False + +def validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction): + """ Check if SPAN mirror-session config is valid """ + if len(config_db.get_entry('MIRROR_SESSION', session_name)) != 0: + click.echo("Error: {} already exists".format(session_name)) + return False + + vlan_member_table = config_db.get_table('VLAN_MEMBER') + mirror_table = config_db.get_table('MIRROR_SESSION') + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') + interface_table = config_db.get_table('INTERFACE') + + if dst_port: + if not interface_name_is_valid(dst_port): + click.echo("Error: Destination Interface {} is invalid".format(dst_port)) + return False + + if interface_is_in_vlan(vlan_member_table, dst_port): + click.echo("Error: Destination Interface {} has vlan config".format(dst_port)) + return False + + if interface_has_mirror_config(mirror_table, dst_port): + click.echo("Error: Destination Interface {} already has mirror config".format(dst_port)) + return False + + if interface_is_in_portchannel(portchannel_member_table, dst_port): + click.echo("Error: Destination Interface {} has portchannel config".format(dst_port)) + return False + + if interface_is_router_port(interface_table, dst_port): + click.echo("Error: Destination Interface {} is a L3 interface".format(dst_port)) + return False + + if src_port: + for port in src_port.split(","): + if not interface_name_is_valid(port): + click.echo("Error: Source Interface {} is invalid".format(port)) + return False + if dst_port and dst_port == port: + click.echo("Error: Destination Interface cant be same as Source Interface") + return False + if interface_has_mirror_config(mirror_table, port): + click.echo("Error: Source Interface {} already has mirror config".format(port)) + return False + + if direction: + if direction not in ['rx', 'tx', 'both']: + click.echo("Error: Direction {} is invalid".format(direction)) + return False + + return True # This is our main entrypoint - the main 'config' command @click.group(cls=AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @@ -1030,6 +1123,8 @@ def portchannel_member(ctx): def add_portchannel_member(ctx, portchannel_name, port_name): """Add member to port channel""" db = ctx.obj['db'] + if interface_is_mirror_dst_port(db, port_name): + ctx.fail("{} is configured as mirror destination port".format(port_name)) db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name), {'NULL': 'NULL'}) @@ -1051,7 +1146,11 @@ def del_portchannel_member(ctx, portchannel_name, port_name): def mirror_session(): pass -@mirror_session.command() +# +# 'add' subgroup ('config mirror_session add ...') +# + +@mirror_session.command('add') @click.argument('session_name', metavar='', required=True) @click.argument('src_ip', metavar='', required=True) @click.argument('dst_ip', metavar='', required=True) @@ -1061,25 +1160,70 @@ def mirror_session(): @click.argument('queue', metavar='[queue]', required=False) @click.option('--policer') def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer): - """ - Add mirror session - """ + """ Add ERSPAN mirror session.(Legacy support) """ + add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer) + +@mirror_session.group(cls=AbbreviationGroup, name='erspan') +@click.pass_context +def erspan(ctx): + """ ERSPAN mirror_session """ + pass + + +# +# 'add' subcommand +# + +@erspan.command('add') +@click.argument('session_name', metavar='', required=True) +@click.argument('src_ip', metavar='', required=True) +@click.argument('dst_ip', metavar='', required=True) +@click.argument('dscp', metavar='', required=True) +@click.argument('ttl', metavar='', required=True) +@click.argument('gre_type', metavar='[gre_type]', required=False) +@click.argument('queue', metavar='[queue]', required=False) +@click.argument('src_port', metavar='[src_port]', required=False) +@click.argument('direction', metavar='[direction]', required=False) +@click.option('--policer') +def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction): + """ Add ERSPAN mirror session """ + add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction) + +def gather_session_info(session_info, policer, queue, src_port, direction): + if policer: + session_info['policer'] = policer + + if queue: + session_info['queue'] = queue + + if src_port: + if get_interface_naming_mode() == "alias": + src_port_list = [] + for port in src_port.split(","): + src_port_list.append(interface_alias_to_name(port)) + src_port=",".join(src_port_list) + + session_info['src_port'] = src_port + if not direction: + direction = "both" + session_info['direction'] = direction.upper() + + return session_info + +def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port=None, direction=None): session_info = { + "type" : "ERSPAN", "src_ip": src_ip, "dst_ip": dst_ip, "dscp": dscp, "ttl": ttl } - if policer is not None: - session_info['policer'] = policer - - if gre_type is not None: + if gre_type: session_info['gre_type'] = gre_type - if queue is not None: - session_info['queue'] = queue - + session_info = gather_session_info(session_info, policer, queue, src_port, direction) + """ For multi-npu platforms we need to program all front asic namespaces """ @@ -1087,20 +1231,73 @@ def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer): if not namespaces['front_ns']: config_db = ConfigDBConnector() config_db.connect() + if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: + return config_db.set_entry("MIRROR_SESSION", session_name, session_info) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) per_npu_configdb[front_asic_namespaces].connect() + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: + return per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) -@mirror_session.command() +@mirror_session.group(cls=AbbreviationGroup, name='span') +@click.pass_context +def span(ctx): + """ SPAN mirror session """ + pass + +@span.command('add') @click.argument('session_name', metavar='', required=True) -def remove(session_name): +@click.argument('dst_port', metavar='', required=True) +@click.argument('src_port', metavar='[src_port]', required=False) +@click.argument('direction', metavar='[direction]', required=False) +@click.argument('queue', metavar='[queue]', required=False) +@click.option('--policer') +def add(session_name, dst_port, src_port, direction, queue, policer): + """ Add SPAN mirror session """ + add_span(session_name, dst_port, src_port, direction, queue, policer) + +def add_span(session_name, dst_port, src_port, direction, queue, policer): + if get_interface_naming_mode() == "alias": + dst_port = interface_alias_to_name(dst_port) + if dst_port is None: + click.echo("Error: Destination Interface {} is invalid".format(dst_port)) + return + + session_info = { + "type" : "SPAN", + "dst_port": dst_port, + } + + session_info = gather_session_info(session_info, policer, queue, src_port, direction) + """ - Delete mirror session + For multi-npu platforms we need to program all front asic namespaces """ + namespaces = sonic_device_util.get_all_namespaces() + if not namespaces['front_ns']: + config_db = ConfigDBConnector() + config_db.connect() + if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: + return + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + else: + per_npu_configdb = {} + for front_asic_namespaces in namespaces['front_ns']: + per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces].connect() + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: + return + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + + +@mirror_session.command() +@click.argument('session_name', metavar='', required=True) +def remove(session_name): + """ Delete mirror session """ """ For multi-npu platforms we need to program all front asic namespaces @@ -1116,6 +1313,7 @@ def remove(session_name): per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) per_npu_configdb[front_asic_namespaces].connect() per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + # # 'pfcwd' group ('config pfcwd ...') # @@ -1390,6 +1588,9 @@ def add_vlan_member(ctx, vid, interface_name, untagged): if len(vlan) == 0: ctx.fail("{} doesn't exist".format(vlan_name)) + if interface_is_mirror_dst_port(db, interface_name): + ctx.fail("{} is configured as mirror destination port".format(interface_name)) + members = vlan.get('members', []) if interface_name in members: if get_interface_naming_mode() == "alias": @@ -1404,7 +1605,7 @@ def add_vlan_member(ctx, vid, interface_name, untagged): for entry in interface_table: if (interface_name == entry[0]): ctx.fail("{} is a L3 interface!".format(interface_name)) - + members.append(interface_name) vlan['members'] = members db.set_entry('VLAN', vlan_name, vlan) @@ -2045,7 +2246,9 @@ def add(ctx, interface_name, ip_addr, gw): ctx.fail("'interface_name' is None!") try: - ipaddress.ip_network(unicode(ip_addr), strict=False) + net = ipaddress.ip_network(unicode(ip_addr), strict=False) + if '/' not in ip_addr: + ip_addr = str(net) if interface_name == 'eth0': @@ -2102,7 +2305,9 @@ def remove(ctx, interface_name, ip_addr): ctx.fail("'interface_name' is None!") try: - ipaddress.ip_network(unicode(ip_addr), strict=False) + net = ipaddress.ip_network(unicode(ip_addr), strict=False) + if '/' not in ip_addr: + ip_addr = str(net) if interface_name == 'eth0': config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), None) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 9ee91a88cd..8971445418 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2345,6 +2345,7 @@ Subsequent pages explain each of these commands in detail. -?, -h, --help Show this message and exit. Commands: + breakout Show Breakout Mode information by interfaces counters Show interface counters description Show interface status, protocol and... naming_mode Show interface naming_mode status @@ -2354,6 +2355,56 @@ Subsequent pages explain each of these commands in detail. transceiver Show SFP Transceiver information ``` +**show interfaces breakout** + +This show command displays the port capability for all interfaces i.e. index, lanes, default_brkout_mode, breakout_modes(i.e. all the available breakout modes) and brkout_mode (i.e. current breakout mode). To display current breakout mode, "current-mode" subcommand can be used.For a single interface, provide the interface name with the sub-command. + +- Usage: + ``` + show interfaces breakout + show interfaces breakout current-mode + show interfaces breakout current-mode + ``` + +- Example: + ``` + admin@lnos-x1-a-fab01:~$ show interfaces breakout + { + "Ethernet0": { + "index": "1,1,1,1", + "default_brkout_mode": "1x100G[40G]", + "child ports": "Ethernet0", + "child port speed": "100G", + "breakout_modes": "1x100G[40G],2x50G,4x25G[10G]", + "Current Breakout Mode": "1x100G[40G]", + "lanes": "65,66,67,68", + "alias_at_lanes": "Eth1/1, Eth1/2, Eth1/3, Eth1/4" + },... continue + } + +The "current-mode" subcommand is used to display current breakout mode for all interfaces. + + admin@lnos-x1-a-fab01:~$ show interfaces breakout current-mode + +-------------+-------------------------+ + | Interface | Current Breakout Mode | + +=============+=========================+ + | Ethernet0 | 4x25G[10G] | + +-------------+-------------------------+ + | Ethernet4 | 4x25G[10G] | + +-------------+-------------------------+ + | Ethernet8 | 4x25G[10G] | + +-------------+-------------------------+ + | Ethernet12 | 4x25G[10G] | + +-------------+-------------------------+ + + admin@lnos-x1-a-fab01:~$ show interfaces breakout current-mode Ethernet0 + +-------------+-------------------------+ + | Interface | Current Breakout Mode | + +=============+=========================+ + | Ethernet0 | 4x25G[10G] | + +-------------+-------------------------+ + ``` + **show interfaces counters** This show command displays packet counters for all interfaces since the last time the counters were cleared. To display l3 counters "rif" subcommand can be used. There is no facility to display counters for one specific l2 interface. For l3 interfaces a single interface output mode is present. Optional argument "-a" provides two additional columns - RX-PPS and TX_PPS. @@ -3828,7 +3879,6 @@ This command deletes the SNMP Trap server IP address to which SNMP agent is expe Go Back To [Beginning of the document](#) or [Beginning of this section](#management-vrf) - ## Mirroring ### Mirroring Show commands @@ -3844,10 +3894,16 @@ This command displays all the mirror sessions that are configured. - Example: ``` - admin@sonic:~$ show mirror session - Name Status SRC IP DST IP GRE DSCP TTL Queue - --------- -------- --------- -------- ----- ------ ----- ------- + admin@sonic:~$ show mirror_session + ERSPAN Sessions + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + ------ -------- -------- -------- ----- ------ ----- ------- --------- -------------- ---------- ----------- everflow0 active 10.1.0.32 10.0.0.7 + + SPAN Sessions + Name Status DST Port SRC Port Direction + ------ -------- ---------- ------------- ----------- + port0 active Ethernet0 PortChannel10 rx ``` ### Mirroring Config commands @@ -3855,7 +3911,12 @@ This command displays all the mirror sessions that are configured. **config mirror_session** This command is used to add or remove mirroring sessions. Mirror session is identified by "session_name". -While adding a new session, users need to configure the following fields that are used while forwarding the mirrored packets. +This command supports configuring both SPAN/ERSPAN sessions. +In SPAN user can configure mirroring of list of source ports/LAG to destination port in ingress/egress/both directions. +In ERSPAN user can configure mirroring of list of source ports/LAG to a destination IP. +Both SPAN/ERSPAN support ACL based mirroring and can be used in ACL configurations. + +While adding a new ERSPAN session, users need to configure the following fields that are used while forwarding the mirrored packets. 1) source IP address, 2) destination IP address, @@ -3863,19 +3924,65 @@ While adding a new session, users need to configure the following fields that ar 4) TTL value 5) optional - GRE Type in case if user wants to send the packet via GRE tunnel. GRE type could be anything; it could also be left as empty; by default, it is 0x8949 for Mellanox; and 0x88be for the rest of the chips. 6) optional - Queue in which packets shall be sent out of the device. Valid values 0 to 7 for most of the devices. Users need to know their device and the number of queues supported in that device. +7) optional - Policer which will be used to control the rate at which frames are mirrored. +8) optional - List of source ports which can have both Ethernet and LAG ports. +9) optional - Direction - Mirror session direction when configured along with Source port. (Supported rx/tx/both. default direction is both) - Usage: + ``` + config mirror_session erspan add [gre_type] [queue] [policer ] [source-port-list] [direction] + ``` + + The following command is also supported to be backward compatible. + This command will be deprecated in future releases. ``` config mirror_session add [gre_type] [queue] ``` - Example: ``` - admin@sonic:~$ sudo config mirror_session add mrr_abcd 1.2.3.4 20.21.22.23 8 100 0x6558 0 - admin@sonic:~$ show mirror_session - Name Status SRC IP DST IP GRE DSCP TTL Queue - --------- -------- ----------- ----------- ------ ------ ----- ------- - mrr_abcd inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 + root@T1-2:~# config mirror_session add mrr_legacy 1.2.3.4 20.21.22.23 8 100 0x6558 0 + root@T1-2:~# show mirror_session + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + --------- -------- -------- ----------- ------ ------ ----- ------- --------- -------------- ---------- ----------- + mrr_legacy inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 + + + root@T1-2:~# config mirror_session erspan add mrr_abcd 1.2.3.4 20.21.22.23 8 100 0x6558 0 + root@T1-2:~# show mirror_session + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + --------- -------- -------- ----------- ------ ------ ----- ------- --------- -------------- ---------- ----------- + mrr_abcd inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 + root@T1-2:~# + + root@T1-2:~# config mirror_session erspan add mrr_port 1.2.3.4 20.21.22.23 8 100 0x6558 0 Ethernet0 + root@T1-2:~# show mirror_session + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + --------- -------- -------- ----------- ------ ------ ----- ------- --------- -------------- ---------- ----------- + mrr_port inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 Ethernet0 both + root@T1-2:~# + ``` + +While adding a new SPAN session, users need to configure the following fields that are used while forwarding the mirrored packets. +1) destination port, +2) optional - List of source ports- List of source ports which can have both Ethernet and LAG ports. +3) optional - Direction - Mirror session direction when configured along with Source port. (Supported rx/tx/both. default direction is both) +4) optional - Queue in which packets shall be sent out of the device. Valid values 0 to 7 for most of the devices. Users need to know their device and the number of queues supported in that device. +5) optional - Policer which will be used to control the rate at which frames are mirrored. + +- Usage: + ``` + config mirror_session span add [source-port-list] [direction] [queue] [policer ] + ``` + +- Example: + ``` + root@T1-2:~# config mirror_session span add port0 Ethernet0 Ethernet4,PortChannel001,Ethernet8 + root@T1-2:~# show mirror_session + Name Status DST Port SRC Port Direction + ------ -------- ---------- --------------------------------- ----------- + port0 active Ethernet0 Ethernet4,PortChannel10,Ethernet8 both + root@T1-2:~# ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#mirroring) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index d251246776..0d874e7f14 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -415,6 +415,7 @@ if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then # Dump the ARP and FDB tables to files also as default routes for both IPv4 and IPv6 # into /host/fast-reboot DUMP_DIR=/host/fast-reboot + CONFIG_DB_FILE=/etc/sonic/config_db.json mkdir -p $DUMP_DIR FAST_REBOOT_DUMP_RC=0 /usr/bin/fast-reboot-dump.py -t $DUMP_DIR || FAST_REBOOT_DUMP_RC=$? @@ -426,7 +427,7 @@ if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then FILTER_FDB_ENTRIES_RC=0 # Filter FDB entries using MAC addresses from ARP table - /usr/bin/filter_fdb_entries.py -f $DUMP_DIR/fdb.json -a $DUMP_DIR/arp.json || FILTER_FDB_ENTRIES_RC=$? + /usr/bin/filter_fdb_entries.py -f $DUMP_DIR/fdb.json -a $DUMP_DIR/arp.json -c $CONFIG_DB_FILE || FILTER_FDB_ENTRIES_RC=$? if [[ FILTER_FDB_ENTRIES_RC -ne 0 ]]; then error "Failed to filter FDb entries. Exit code: $FILTER_FDB_ENTRIES_RC" unload_kernel diff --git a/scripts/filter_fdb_entries.py b/scripts/filter_fdb_entries.py index 1efe30ebe4..d7f93d3e1e 100755 --- a/scripts/filter_fdb_entries.py +++ b/scripts/filter_fdb_entries.py @@ -8,9 +8,36 @@ import traceback import time +from ipaddress import ip_address, ip_network, ip_interface from collections import defaultdict -def get_arp_entries_map(filename): +def get_vlan_cidr_map(filename): + """ + Generate Vlan CIDR information from Config DB file + + fdb entries could be contaminated with foreigh Vlan entries as seen in the case of + FTOS fast conversion. SONiC Vlan CIDR configuration will be used to filter out + those invalid Vlan entries out. + + Args: + filename(str): Config DB data file + + Returns: + vlan_cidr(dict) map of Vlan CIDR configuration for SONiC device + """ + with open(filename, 'r') as fp: + config_db_entries = json.load(fp) + + vlan_cidr = defaultdict() + if "VLAN_INTERFACE" in config_db_entries.keys() and "VLAN" in config_db_entries.keys(): + for vlan_key in config_db_entries["VLAN_INTERFACE"].keys(): + vlan, cidr = tuple(vlan_key.split('|')) + if vlan in config_db_entries["VLAN"]: + vlan_cidr[vlan] = ip_interface(cidr).network + + return vlan_cidr + +def get_arp_entries_map(arp_filename, config_db_filename): """ Generate map for ARP entries @@ -18,23 +45,30 @@ def get_arp_entries_map(filename): to match FDB table formatting Args: - filename(str): ARP entry file name + arp_filename(str): ARP entry file name + config_db_filename(str): Config DB file name Returns: arp_map(dict) map of ARP entries using MAC as key. """ - with open(filename, 'r') as fp: + vlan_cidr = get_vlan_cidr_map(config_db_filename) + + with open(arp_filename, 'r') as fp: arp_entries = json.load(fp) arp_map = defaultdict() for arp in arp_entries: for key, config in arp.items(): - if 'NEIGH_TABLE' in key: + if "NEIGH_TABLE" not in key: + continue + table, vlan, ip = tuple(key.split(':')) + if "NEIGH_TABLE" in table and vlan in vlan_cidr.keys() \ + and ip_address(ip) in ip_network(vlan_cidr[vlan]) and "neigh" in config.keys(): arp_map[config["neigh"].replace(':', '-')] = "" return arp_map -def filter_fdb_entries(fdb_filename, arp_filename, backup_file): +def filter_fdb_entries(fdb_filename, arp_filename, config_db_filename, backup_file): """ Filter FDB entries based on MAC presence into ARP entries @@ -44,12 +78,13 @@ def filter_fdb_entries(fdb_filename, arp_filename, backup_file): Args: fdb_filename(str): FDB entries file name arp_filename(str): ARP entry file name + config_db_filename(str): Config DB file name backup_file(bool): Create backup copy of FDB file before creating new one Returns: None """ - arp_map = get_arp_entries_map(arp_filename) + arp_map = get_arp_entries_map(arp_filename, config_db_filename) with open(fdb_filename, 'r') as fp: fdb_entries = json.load(fp) @@ -91,20 +126,23 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--fdb', type=str, default='/tmp/fdb.json', help='fdb file name') parser.add_argument('-a', '--arp', type=str, default='/tmp/arp.json', help='arp file name') + parser.add_argument('-c', '--config_db', type=str, default='/tmp/config_db.json', help='config db file name') parser.add_argument('-b', '--backup_file', type=bool, default=True, help='Back up old fdb entries file') args = parser.parse_args() fdb_filename = args.fdb arp_filename = args.arp + config_db_filename = args.config_db backup_file = args.backup_file try: file_exists_or_raise(fdb_filename) file_exists_or_raise(arp_filename) + file_exists_or_raise(config_db_filename) except Exception as e: syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc())) else: - filter_fdb_entries(fdb_filename, arp_filename, backup_file) + filter_fdb_entries(fdb_filename, arp_filename, config_db_filename, backup_file) return 0 diff --git a/scripts/sfpshow b/scripts/sfpshow index 01970b3191..bf0b90408a 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -24,16 +24,21 @@ try: except KeyError: pass -qsfp_data_map = {'modelname': 'Vendor PN', 'vendor_oui': 'Vendor OUI', +qsfp_data_map = {'model': 'Vendor PN', + 'vendor_oui': 'Vendor OUI', 'vendor_date': 'Vendor Date Code(YYYY-MM-DD Lot)', - 'manufacturename': 'Vendor Name', - 'hardwarerev': 'Vendor Rev', 'serialnum': 'Vendor SN', - 'type': 'Identifier', 'ext_identifier': 'Extended Identifier', + 'manufacturer': 'Vendor Name', + 'hardware_rev': 'Vendor Rev', + 'serial': 'Vendor SN', + 'type': 'Identifier', + 'ext_identifier': 'Extended Identifier', 'ext_rateselect_compliance': 'Extended RateSelect Compliance', - 'cable_length': 'cable_length', 'cable_type': 'Length', + 'cable_length': 'cable_length', + 'cable_type': 'Length', 'nominal_bit_rate': 'Nominal Bit Rate(100Mbs)', - 'specification_compliance':'Specification compliance', - 'encoding': 'Encoding', 'Connector': 'Connector' + 'specification_compliance': 'Specification compliance', + 'encoding': 'Encoding', + 'connector': 'Connector' } sfp_dom_channel_monitor_map = {'rx1power': 'RXPower', @@ -141,7 +146,7 @@ class SFPShow(object): ident = ' ' seperator = ": " for key in sorted_key_table: - if dom_info_dict is not None and dom_info_dict[key] != 'N/A': + if dom_info_dict is not None and key in dom_info_dict and dom_info_dict[key] != 'N/A': current_val = (ident + ident + dom_value_map[key]) current_val = (current_val + seperator.rjust(len(seperator) + diff --git a/setup.py b/setup.py index edffa77cd0..8bccb843f1 100644 --- a/setup.py +++ b/setup.py @@ -49,6 +49,7 @@ 'pddf_ledutil', 'show', 'sonic_installer', + 'sonic_installer.bootloader', 'sonic-utilities-tests', 'undebug', 'utilities_common', @@ -148,7 +149,8 @@ # - tabulate install_requires=[ 'click', - 'natsort' + 'natsort', + 'm2crypto' ], setup_requires= [ 'pytest-runner' diff --git a/show/main.py b/show/main.py index 6e2ea82c8c..720f1cdc00 100755 --- a/show/main.py +++ b/show/main.py @@ -9,6 +9,7 @@ import sys import ipaddress from pkg_resources import parse_version +from collections import OrderedDict import click from natsort import natsorted @@ -17,10 +18,16 @@ import sonic_device_util from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector +from portconfig import get_child_ports import mlnx +# Global Variable +PLATFORM_ROOT_PATH = "/usr/share/sonic/device" +PLATFORM_JSON = 'platform.json' +HWSKU_JSON = 'hwsku.json' SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' +PORT_STR = "Ethernet" VLAN_SUB_INTERFACE_SEPARATOR = '.' @@ -181,6 +188,15 @@ def get_routing_stack(): # Global Routing-Stack variable routing_stack = get_routing_stack() +# Read given JSON file +def readJsonFile(fileName): + try: + with open(fileName) as f: + result = json.load(f) + except Exception as e: + click.echo(str(e)) + raise click.Abort() + return result def run_command(command, display_cmd=False, return_cmd=False): if display_cmd: @@ -789,6 +805,101 @@ def alias(interfacename): click.echo(tabulate(body, header)) + +# +# 'breakout' group ### +# +@interfaces.group(invoke_without_command=True) +@click.pass_context +def breakout(ctx): + """Show Breakout Mode information by interfaces""" + # Reading data from Redis configDb + config_db = ConfigDBConnector() + config_db.connect() + ctx.obj = {'db': config_db} + + try: + curBrkout_tbl = config_db.get_table('BREAKOUT_CFG') + except Exception as e: + click.echo("Breakout table is not present in Config DB") + raise click.Abort() + + if ctx.invoked_subcommand is None: + + # Get HWSKU and Platform information + hw_info_dict = get_hw_info_dict() + platform = hw_info_dict['platform'] + hwsku = hw_info_dict['hwsku'] + + # Get port capability from platform and hwsku related files + platformFile = "{}/{}/{}".format(PLATFORM_ROOT_PATH, platform, PLATFORM_JSON) + platformDict = readJsonFile(platformFile)['interfaces'] + hwskuDict = readJsonFile("{}/{}/{}/{}".format(PLATFORM_ROOT_PATH, platform, hwsku, HWSKU_JSON))['interfaces'] + + if not platformDict or not hwskuDict: + click.echo("Can not load port config from {} or {} file".format(PLATFORM_JSON, HWSKU_JSON)) + raise click.Abort() + + for port_name in platformDict.keys(): + curBrkout_mode = curBrkout_tbl[port_name]["brkout_mode"] + + # Update deafult breakout mode and current breakout mode to platformDict + platformDict[port_name].update(hwskuDict[port_name]) + platformDict[port_name]["Current Breakout Mode"] = curBrkout_mode + + # List all the child ports if present + child_portDict = get_child_ports(port_name, curBrkout_mode, platformFile) + if not child_portDict: + click.echo("Cannot find ports from {} file ".format(PLATFORM_JSON)) + raise click.Abort() + + child_ports = natsorted(child_portDict.keys()) + + children, speeds = [], [] + # Update portname and speed of child ports if present + for port in child_ports: + speed = config_db.get_entry('PORT', port).get('speed') + if speed is not None: + speeds.append(str(int(speed)//1000)+'G') + children.append(port) + + platformDict[port_name]["child ports"] = ",".join(children) + platformDict[port_name]["child port speeds"] = ",".join(speeds) + + # Sorted keys by name in natural sort Order for human readability + parsed = OrderedDict((k, platformDict[k]) for k in natsorted(platformDict.keys())) + click.echo(json.dumps(parsed, indent=4)) + +# 'breakout current-mode' subcommand ("show interfaces breakout current-mode") +@breakout.command('current-mode') +@click.argument('interface', metavar='', required=False, type=str) +@click.pass_context +def currrent_mode(ctx, interface): + """Show current Breakout mode Info by interface(s)""" + + config_db = ctx.obj['db'] + + header = ['Interface', 'Current Breakout Mode'] + body = [] + + try: + curBrkout_tbl = config_db.get_table('BREAKOUT_CFG') + except Exception as e: + click.echo("Breakout table is not present in Config DB") + raise click.Abort() + + # Show current Breakout Mode of user prompted interface + if interface is not None: + body.append([interface, str(curBrkout_tbl[interface]['brkout_mode'])]) + click.echo(tabulate(body, header, tablefmt="grid")) + return + + # Show current Breakout Mode for all interfaces + for name in natsorted(curBrkout_tbl.keys()): + body.append([name, str(curBrkout_tbl[name]['brkout_mode'])]) + click.echo(tabulate(body, header, tablefmt="grid")) + + # # 'neighbor' group ### # @@ -1546,6 +1657,7 @@ def interfaces(): if netifaces.AF_INET6 in ipaddresses: ifaddresses = [] + neighbor_info = [] for ipaddr in ipaddresses[netifaces.AF_INET6]: neighbor_name = 'N/A' neighbor_ip = 'N/A' @@ -1557,6 +1669,7 @@ def interfaces(): neighbor_ip = bgp_peer[local_ip][1] except Exception: pass + neighbor_info.append([neighbor_name, neighbor_ip]) if len(ifaddresses) > 0: admin = get_if_admin_state(iface) @@ -1567,9 +1680,11 @@ def interfaces(): master = get_if_master(iface) if get_interface_mode() == "alias": iface = iface_alias_converter.name_to_alias(iface) - data.append([iface, master, ifaddresses[0][1], admin + "/" + oper, neighbor_name, neighbor_ip]) - for ifaddr in ifaddresses[1:]: - data.append(["", "", ifaddr[1], ""]) + data.append([iface, master, ifaddresses[0][1], admin + "/" + oper, neighbor_info[0][0], neighbor_info[0][1]]) + neighbor_info.pop(0) + for ifaddr in ifaddresses[1:]: + data.append(["", "", ifaddr[1], admin + "/" + oper, neighbor_info[0][0], neighbor_info[0][1]]) + neighbor_info.pop(0) print tabulate(data, header, tablefmt="simple", stralign='left', missingval="") @@ -2646,7 +2761,8 @@ def reboot_cause(): # 'line' command ("show line") # @cli.command('line') -def line(): +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def line(verbose): """Show all /dev/ttyUSB lines and their info""" cmd = "consutil show" run_command(cmd, display_cmd=verbose) diff --git a/sonic-utilities-tests/config_mgmt_test.py b/sonic-utilities-tests/config_mgmt_test.py new file mode 100644 index 0000000000..aec7f75e30 --- /dev/null +++ b/sonic-utilities-tests/config_mgmt_test.py @@ -0,0 +1,721 @@ +import imp +import os +# import file under test i.e. config_mgmt.py +imp.load_source('config_mgmt', \ + os.path.join(os.path.dirname(__file__), '..', 'config', 'config_mgmt.py')) +import config_mgmt + +from unittest import TestCase +from mock import MagicMock, call +from json import dump + +class TestConfigMgmt(TestCase): + ''' + Test Class for config_mgmt.py + ''' + + def setUp(self): + config_mgmt.CONFIG_DB_JSON_FILE = "startConfigDb.json" + config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE = "portBreakOutConfigDb.json" + return + + def test_config_validation(self): + curConfig = dict(configDbJson) + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cm = config_mgmt.ConfigMgmt(source=config_mgmt.CONFIG_DB_JSON_FILE) + assert cm.validateConfigData() == True + return + + def test_table_without_yang(self): + curConfig = dict(configDbJson) + unknown = {"unknown_table": {"ukey": "uvalue"}} + self.updateConfig(curConfig, unknown) + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cm = config_mgmt.ConfigMgmt(source=config_mgmt.CONFIG_DB_JSON_FILE) + #assert "unknown_table" in cm.tablesWithoutYang() + return + + def test_search_keys(self): + curConfig = dict(configDbJson) + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cmdpb = config_mgmt.ConfigMgmtDPB(source=config_mgmt.CONFIG_DB_JSON_FILE) + out = cmdpb.configWithKeys(portBreakOutConfigDbJson, \ + ["Ethernet8","Ethernet9"]) + assert "VLAN" not in out.keys() + assert "INTERFACE" not in out.keys() + for k in out['ACL_TABLE'].keys(): + # only ports must be chosen + len(out['ACL_TABLE'][k]) == 1 + out = cmdpb.configWithKeys(portBreakOutConfigDbJson, \ + ["Ethernet10","Ethernet11"]) + assert "INTERFACE" in out.keys() + for k in out['ACL_TABLE'].keys(): + # only ports must be chosen + len(out['ACL_TABLE'][k]) == 1 + return + + def test_break_out(self): + # prepare default config + self.writeJson(portBreakOutConfigDbJson, \ + config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE) + # prepare config dj json to start with + curConfig = dict(configDbJson) + #Ethernet8: start from 4x25G-->2x50G with -f -l + self.dpb_port8_4x25G_2x50G_f_l(curConfig) + #Ethernet8: move from 2x50G-->1x100G without force, list deps + self.dpb_port8_2x50G_1x100G(curConfig) + # Ethernet8: move from 2x50G-->1x100G with force, where deps exists + self.dpb_port8_2x50G_1x100G_f(curConfig) + # Ethernet8: move from 1x100G-->4x25G without force, no deps + self.dpb_port8_1x100G_4x25G(curConfig) + # Ethernet8: move from 4x25G-->1x100G with force, no deps + self.dpb_port8_4x25G_1x100G_f(curConfig) + # Ethernet8: move from 1x100G-->1x50G(2)+2x25G(2) with -f -l, + self.dpb_port8_1x100G_1x50G_2x25G_f_l(curConfig) + # Ethernet4: breakout from 4x25G to 2x50G with -f -l + self.dpb_port4_4x25G_2x50G_f_l(curConfig) + return + + def tearDown(self): + try: + os.remove(config_mgmt.CONFIG_DB_JSON_FILE) + os.remove(config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE) + except Exception as e: + pass + return + + ########### HELPER FUNCS ##################################### + def writeJson(self, d, file): + with open(file, 'w') as f: + dump(d, f, indent=4) + return + + def config_mgmt_dpb(self, curConfig): + ''' + config_mgmt.ConfigMgmtDPB class instance with mocked functions. Not using + pytest fixture, because it is used in non test funcs. + + Parameter: + curConfig (dict): Config to start with. + + Return: + cmdpb (ConfigMgmtDPB): Class instance of ConfigMgmtDPB. + ''' + # create object + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cmdpb = config_mgmt.ConfigMgmtDPB(source=config_mgmt.CONFIG_DB_JSON_FILE) + # mock funcs + cmdpb.writeConfigDB = MagicMock(return_value=True) + cmdpb._verifyAsicDB = MagicMock(return_value=True) + import mock_tables.dbconnector + return cmdpb + + def generate_args(self, portIdx, laneIdx, curMode, newMode): + ''' + Generate port to deleted, added and {lanes, speed} setting based on + current and new mode. + Example: + For generate_args(8, 73, '4x25G', '2x50G'): + output: + ( + ['Ethernet8', 'Ethernet9', 'Ethernet10', 'Ethernet11'], + ['Ethernet8', 'Ethernet10'], + {'Ethernet8': {'lanes': '73,74', 'speed': '50000'}, + 'Ethernet10': {'lanes': '75,76', 'speed': '50000'}}) + + Parameters: + portIdx (int): Port Index. + laneIdx (int): Lane Index. + curMode (str): current breakout mode of Port. + newMode (str): new breakout mode of Port. + + Return: + dPorts, pJson (tuple)[list, dict] + ''' + # default params + pre = "Ethernet" + laneMap = {"4x25G": [1,1,1,1], "2x50G": [2,2], "1x100G":[4], \ + "1x50G(2)+2x25G(2)":[2,1,1], "2x25G(2)+1x50G(2)":[1,1,2]} + laneSpeed = 25000 + # generate dPorts + l = list(laneMap[curMode]); l.insert(0, 0); id = portIdx; dPorts = list() + for i in l[:-1]: + id = id + i + portName = portName = "{}{}".format(pre, id) + dPorts.append(portName) + # generate aPorts + l = list(laneMap[newMode]); l.insert(0, 0); id = portIdx; aPorts = list() + for i in l[:-1]: + id = id + i + portName = portName = "{}{}".format(pre, id) + aPorts.append(portName) + # generate pJson + l = laneMap[newMode]; pJson = {"PORT": {}}; li = laneIdx; pi = 0 + for i in l: + speed = laneSpeed*i + lanes = [str(li+j) for j in range(i)]; lanes = ','.join(lanes) + pJson['PORT'][aPorts[pi]] = {"speed": str(speed), "lanes": str(lanes)} + li = li+i; pi = pi + 1 + return dPorts, pJson + + def updateConfig(self, conf, uconf): + ''' + update the config to emulate continous breakingout a single port. + + Parameters: + conf (dict): current config in config DB. + uconf (dict): config Diff to be pushed in config DB. + + Return: + void + conf will be updated with uconf, i.e. config diff. + ''' + try: + for it in uconf.keys(): + # if conf has the key + if conf.get(it): + # if marked for deletion + if uconf[it] == None: + del conf[it] + else: + if isinstance(conf[it], list) and isinstance(uconf[it], list): + conf[it] = list(uconf[it]) + elif isinstance(conf[it], dict) and isinstance(uconf[it], dict): + self.updateConfig(conf[it], uconf[it]) + else: + conf[it] = uconf[it] + del uconf[it] + # update new keys in conf + conf.update(uconf) + except Exception as e: + print("update Config failed") + print(e) + raise e + return + + def checkResult(self, cmdpb, delConfig, addConfig): + ''' + Usual result check in many test is: Make sure delConfig and addConfig is + pushed in order to configDb + + Parameters: + cmdpb (ConfigMgmtDPB): Class instance of ConfigMgmtDPB. + delConfig (dict): config Diff to be pushed in config DB while deletion + of ports. + addConfig (dict): config Diff to be pushed in config DB while addition + of ports. + + Return: + void + ''' + calls = [call(delConfig), call(addConfig)] + assert cmdpb.writeConfigDB.call_count == 2 + cmdpb.writeConfigDB.assert_has_calls(calls, any_order=False) + return + + def postUpdateConfig(self, curConfig, delConfig, addConfig): + ''' + After breakout, update the config to emulate continous breakingout a + single port. + + Parameters: + curConfig (dict): current Config in config DB. + delConfig (dict): config Diff to be pushed in config DB while deletion + of ports. + addConfig (dict): config Diff to be pushed in config DB while addition + of ports. + + Return: + void + curConfig will be updated with delConfig and addConfig. + ''' + # update the curConfig with change + self.updateConfig(curConfig, delConfig) + self.updateConfig(curConfig, addConfig) + return + + def dpb_port8_1x100G_1x50G_2x25G_f_l(self, curConfig): + ''' + Breakout Port 8 1x100G->1x50G_2x25G with -f -l + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='1x100G', newMode='1x50G(2)+2x25G(2)') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=True, loadDefConfig=True) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'PORT': { + u'Ethernet8': None + } + } + addConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4', 'Ethernet8', 'Ethernet10'] + }, + u'NO-NSW-PACL-TEST': { + u'ports': ['Ethernet11'] + } + }, + u'INTERFACE': { + u'Ethernet11|2a04:1111:40:a709::1/126': { + u'scope': u'global', + u'family': u'IPv6' + }, + u'Ethernet11': {} + }, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': { + u'tagging_mode': u'untagged' + }, + u'Vlan100|Ethernet11': { + u'tagging_mode': u'untagged' + } + }, + u'PORT': { + 'Ethernet8': { + 'speed': '50000', + 'lanes': '73,74' + }, + 'Ethernet10': { + 'speed': '25000', + 'lanes': '75' + }, + 'Ethernet11': { + 'speed': '25000', + 'lanes': '76' + } + } + } + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port8_4x25G_1x100G_f(self, curConfig): + ''' + Breakout Port 8 4x25G->1x100G with -f + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='4x25G', newMode='1x100G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=False, loadDefConfig=False) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'PORT': { + u'Ethernet8': None, + u'Ethernet9': None, + u'Ethernet10': None, + u'Ethernet11': None + } + } + addConfig = pJson + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port8_1x100G_4x25G(self, curConfig): + ''' + Breakout Port 8 1x100G->4x25G + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='1x100G', newMode='4x25G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=False, loadDefConfig=False) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'PORT': { + u'Ethernet8': None + } + } + addConfig = pJson + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port8_2x50G_1x100G_f(self, curConfig): + ''' + Breakout Port 8 2x50G->1x100G with -f + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='2x50G', newMode='1x100G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=True, loadDefConfig=False) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4'] + } + }, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': None + }, + u'PORT': { + u'Ethernet8': None, + u'Ethernet10': None + } + } + addConfig = pJson + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + + def dpb_port8_2x50G_1x100G(self, curConfig): + ''' + Breakout Port 8 2x50G->1x100G + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='2x50G', newMode='1x100G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=False, loadDefConfig=False) + # Expected Result + assert ret == False and len(deps) == 3 + assert cmdpb.writeConfigDB.call_count == 0 + return + + def dpb_port8_4x25G_2x50G_f_l(self, curConfig): + ''' + Breakout Port 8 4x25G->2x50G with -f -l + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='4x25G', newMode='2x50G') + cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, \ + loadDefConfig=True) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4'] + }, + u'NO-NSW-PACL-TEST': { + u'ports': None + } + }, + u'INTERFACE': None, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': None, + u'Vlan100|Ethernet11': None + }, + u'PORT': { + u'Ethernet8': None, + u'Ethernet9': None, + u'Ethernet10': None, + u'Ethernet11': None + } + } + addConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4', 'Ethernet8', 'Ethernet10'] + } + }, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': { + u'tagging_mode': u'untagged' + } + }, + u'PORT': { + 'Ethernet8': { + 'speed': '50000', + 'lanes': '73,74' + }, + 'Ethernet10': { + 'speed': '50000', + 'lanes': '75,76' + } + } + } + assert cmdpb.writeConfigDB.call_count == 2 + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port4_4x25G_2x50G_f_l(self, curConfig): + ''' + Breakout Port 4 4x25G->2x50G with -f -l + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=4, laneIdx=69, \ + curMode='4x25G', newMode='2x50G') + cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, \ + loadDefConfig=True) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet8', 'Ethernet10'] + } + }, + u'PORT': { + u'Ethernet4': None, + u'Ethernet5': None, + u'Ethernet6': None, + u'Ethernet7': None + } + } + addConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet8', 'Ethernet10', 'Ethernet4'] + } + }, + u'PORT': { + 'Ethernet4': { + 'speed': '50000', + 'lanes': '69,70' + }, + 'Ethernet6': { + 'speed': '50000', + 'lanes': '71,72' + } + } + } + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + +###########GLOBAL Configs##################################### +configDbJson = { + "ACL_TABLE": { + "NO-NSW-PACL-TEST": { + "policy_desc": "NO-NSW-PACL-TEST", + "type": "L3", + "stage": "INGRESS", + "ports": [ + "Ethernet9", + "Ethernet11", + ] + }, + "NO-NSW-PACL-V4": { + "policy_desc": "NO-NSW-PACL-V4", + "type": "L3", + "stage": "INGRESS", + "ports": [ + "Ethernet0", + "Ethernet4", + "Ethernet8", + "Ethernet10" + ] + } + }, + "VLAN": { + "Vlan100": { + "admin_status": "up", + "description": "server_vlan", + "dhcp_servers": [ + "10.186.72.116" + ] + }, + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet2": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet11": { + "tagging_mode": "untagged" + }, + }, + "INTERFACE": { + "Ethernet10": {}, + "Ethernet10|2a04:0000:40:a709::1/126": { + "scope": "global", + "family": "IPv6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet1": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet2": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet3": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet4": { + "alias": "Eth2/1", + "lanes": "69", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet5": { + "alias": "Eth2/2", + "lanes": "70", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet6": { + "alias": "Eth2/3", + "lanes": "71", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet7": { + "alias": "Eth2/4", + "lanes": "72", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet8": { + "alias": "Eth3/1", + "lanes": "73", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet9": { + "alias": "Eth3/2", + "lanes": "74", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet10": { + "alias": "Eth3/3", + "lanes": "75", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet11": { + "alias": "Eth3/4", + "lanes": "76", + "description": "", + "speed": "25000", + "admin_status": "up" + } + } +} + +portBreakOutConfigDbJson = { + "ACL_TABLE": { + "NO-NSW-PACL-TEST": { + "ports": [ + "Ethernet9", + "Ethernet11", + ] + }, + "NO-NSW-PACL-V4": { + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet4", + "Ethernet8", + "Ethernet10" + ] + } + }, + "VLAN": { + "Vlan100": { + "admin_status": "up", + "description": "server_vlan", + "dhcp_servers": [ + "10.186.72.116" + ] + } + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet11": { + "tagging_mode": "untagged" + } + }, + "INTERFACE": { + "Ethernet11": {}, + "Ethernet11|2a04:1111:40:a709::1/126": { + "scope": "global", + "family": "IPv6" + } + } +} diff --git a/sonic-utilities-tests/filter_fdb_entries_test.py b/sonic-utilities-tests/filter_fdb_entries_test.py index 22abeb1f28..af1f7712c3 100644 --- a/sonic-utilities-tests/filter_fdb_entries_test.py +++ b/sonic-utilities-tests/filter_fdb_entries_test.py @@ -14,6 +14,7 @@ class TestFilterFdbEntries(object): """ ARP_FILENAME = "/tmp/arp.json" FDB_FILENAME = "/tmp/fdb.json" + CONFIG_DB_FILENAME = "/tmp/config_db.json" EXPECTED_FDB_FILENAME = "/tmp/expected_fdb.json" def __setUp(self, testData): @@ -45,16 +46,17 @@ def create_file_or_raise(data, filename): Raises: Exception if data type is not supported """ - if isinstance(data, list): + if isinstance(data, list) or isinstance(data, dict): with open(filename, 'w') as fp: json.dump(data, fp, indent=2, separators=(',', ': ')) elif isinstance(data, str): shutil.copyfile(data, filename) else: - raise Exception("Unknown test data type: {0}".format(type(test_data))) + raise Exception("Unknown test data type: {0}".format(type(data))) create_file_or_raise(testData["arp"], self.ARP_FILENAME) create_file_or_raise(testData["fdb"], self.FDB_FILENAME) + create_file_or_raise(testData["config_db"], self.CONFIG_DB_FILENAME) create_file_or_raise(testData["expected_fdb"], self.EXPECTED_FDB_FILENAME) def __tearDown(self): @@ -72,6 +74,7 @@ def __tearDown(self): fdbFiles = glob.glob(self.FDB_FILENAME + '*') for file in fdbFiles: os.remove(file) + os.remove(self.CONFIG_DB_FILENAME) def __runCommand(self, cmds): """ @@ -166,8 +169,10 @@ def testFilterFdbEntries(self, testData): self.ARP_FILENAME, "-f", self.FDB_FILENAME, + "-c", + self.CONFIG_DB_FILENAME, ]) - assert rc == 0, "CFilter_fbd_entries.py failed with '{0}'".format(stderr) + assert rc == 0, "Filter_fdb_entries.py failed with '{0}'".format(stderr) assert self.__verifyOutput(), "Test failed for test data: {0}".format(testData) finally: self.__tearDown() diff --git a/sonic-utilities-tests/filter_fdb_input/config_db.json b/sonic-utilities-tests/filter_fdb_input/config_db.json new file mode 100644 index 0000000000..8c34fcc5b6 --- /dev/null +++ b/sonic-utilities-tests/filter_fdb_input/config_db.json @@ -0,0 +1,2517 @@ +{ + "NTP_SERVER": { + "10.20.8.129": {}, + "10.20.8.130": {} + }, + "TACPLUS_SERVER": { + "100.127.20.21": { + "priority": "1", + "tcp_port": "49" + } + }, + "DEVICE_METADATA": { + "localhost": { + "hwsku": "Force10-S6000", + "default_bgp_status": "down", + "type": "ToRRouter", + "hostname": "str-s6000-acs-14", + "platform": "x86_64-dell_s6000_s1220-r0", + "mac": "f4:8e:38:16:bc:8d", + "default_pfcwd_status": "enable", + "bgp_asn": "65100", + "deployment_id": "1", + "docker_routing_config_mode": "unified" + } + }, + "BGP_PEER_RANGE": { + "BGPSLBPassive": { + "src_address": "10.1.0.32", + "name": "BGPSLBPassive", + "ip_range": [ + "10.255.0.0/25" + ] + }, + "BGPVac": { + "src_address": "10.1.0.32", + "name": "BGPVac", + "ip_range": [ + "192.168.0.0/21" + ] + } + }, + "VLAN": { + "Vlan1000": { + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4", + "192.0.0.5", + "192.0.0.6", + "192.0.0.7", + "192.0.0.8", + "192.0.0.9", + "192.0.0.10", + "192.0.0.11", + "192.0.0.12", + "192.0.0.13", + "192.0.0.14", + "192.0.0.15", + "192.0.0.16", + "192.0.0.17", + "192.0.0.18", + "192.0.0.19", + "192.0.0.20", + "192.0.0.21", + "192.0.0.22", + "192.0.0.23", + "192.0.0.24", + "192.0.0.25", + "192.0.0.26", + "192.0.0.27", + "192.0.0.28", + "192.0.0.29", + "192.0.0.30", + "192.0.0.31", + "192.0.0.32", + "192.0.0.33", + "192.0.0.34", + "192.0.0.35", + "192.0.0.36", + "192.0.0.37", + "192.0.0.38", + "192.0.0.39", + "192.0.0.40", + "192.0.0.41", + "192.0.0.42", + "192.0.0.43", + "192.0.0.44", + "192.0.0.45", + "192.0.0.46", + "192.0.0.47", + "192.0.0.48" + ], + "vlanid": "1000" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "1": "1", + "0": "0", + "3": "3", + "2": "2", + "5": "5", + "4": "4", + "7": "7", + "6": "6" + } + }, + "QUEUE": { + "Ethernet4|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet4|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet4|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet8|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet8|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet12|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet12|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet16|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet16|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet20|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet20|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet24|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet24|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet28|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet28|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet32|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet32|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet36|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet36|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet40|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet40|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet44|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet44|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet48|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet48|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet52|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet52|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet56|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet56|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet60|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet60|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet64|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet64|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet68|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet68|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet72|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet72|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet76|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet76|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet80|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet80|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet84|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet84|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet88|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet88|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet92|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet92|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet96|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet96|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet112|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet112|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet116|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet116|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet120|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet120|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet124|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet124|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + } + }, + "PORTCHANNEL_MEMBER": { + "PortChannel0001|Ethernet112": {}, + "PortChannel0002|Ethernet116": {}, + "PortChannel0003|Ethernet120": {}, + "PortChannel0004|Ethernet124": {} + }, + "PORT": { + "Ethernet0": { + "index": "0", + "lanes": "29,30,31,32", + "description": "fortyGigE0/0", + "mtu": "9100", + "alias": "fortyGigE0/0", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet4": { + "index": "1", + "lanes": "25,26,27,28", + "description": "Servers0:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/4", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet8": { + "index": "2", + "lanes": "37,38,39,40", + "description": "Servers1:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/8", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet12": { + "index": "3", + "lanes": "33,34,35,36", + "description": "Servers2:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/12", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet16": { + "index": "4", + "lanes": "41,42,43,44", + "description": "Servers3:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/16", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet20": { + "index": "5", + "lanes": "45,46,47,48", + "description": "Servers4:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/20", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet24": { + "index": "6", + "lanes": "5,6,7,8", + "description": "Servers5:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/24", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet28": { + "index": "7", + "lanes": "1,2,3,4", + "description": "Servers6:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/28", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet32": { + "index": "8", + "lanes": "9,10,11,12", + "description": "Servers7:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/32", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet36": { + "index": "9", + "lanes": "13,14,15,16", + "description": "Servers8:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/36", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet40": { + "index": "10", + "lanes": "21,22,23,24", + "description": "Servers9:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/40", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet44": { + "index": "11", + "lanes": "17,18,19,20", + "description": "Servers10:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/44", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet48": { + "index": "12", + "lanes": "49,50,51,52", + "description": "Servers11:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/48", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet52": { + "index": "13", + "lanes": "53,54,55,56", + "description": "Servers12:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/52", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet56": { + "index": "14", + "lanes": "61,62,63,64", + "description": "Servers13:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/56", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet60": { + "index": "15", + "lanes": "57,58,59,60", + "description": "Servers14:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/60", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet64": { + "index": "16", + "lanes": "65,66,67,68", + "description": "Servers15:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/64", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet68": { + "index": "17", + "lanes": "69,70,71,72", + "description": "Servers16:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/68", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet72": { + "index": "18", + "lanes": "77,78,79,80", + "description": "Servers17:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/72", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet76": { + "index": "19", + "lanes": "73,74,75,76", + "description": "Servers18:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/76", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet80": { + "index": "20", + "lanes": "105,106,107,108", + "description": "Servers19:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/80", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet84": { + "index": "21", + "lanes": "109,110,111,112", + "description": "Servers20:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/84", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet88": { + "index": "22", + "lanes": "117,118,119,120", + "description": "Servers21:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/88", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet92": { + "index": "23", + "lanes": "113,114,115,116", + "description": "Servers22:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/92", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet96": { + "index": "24", + "lanes": "121,122,123,124", + "description": "Servers23:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/96", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet100": { + "index": "25", + "lanes": "125,126,127,128", + "description": "fortyGigE0/100", + "mtu": "9100", + "alias": "fortyGigE0/100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet104": { + "index": "26", + "lanes": "85,86,87,88", + "description": "fortyGigE0/104", + "mtu": "9100", + "alias": "fortyGigE0/104", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet108": { + "index": "27", + "lanes": "81,82,83,84", + "description": "fortyGigE0/108", + "mtu": "9100", + "alias": "fortyGigE0/108", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet112": { + "index": "28", + "lanes": "89,90,91,92", + "description": "ARISTA01T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/112", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet116": { + "index": "29", + "lanes": "93,94,95,96", + "description": "ARISTA02T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/116", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet120": { + "index": "30", + "lanes": "97,98,99,100", + "description": "ARISTA03T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/120", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet124": { + "index": "31", + "lanes": "101,102,103,104", + "description": "ARISTA04T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/124", + "admin_status": "up", + "speed": "40000" + } + }, + "SYSLOG_SERVER": { + "10.3.145.8": {}, + "100.127.20.21": {} + }, + "CRM": { + "Config": { + "acl_table_threshold_type": "percentage", + "nexthop_group_threshold_type": "percentage", + "fdb_entry_high_threshold": "85", + "acl_entry_threshold_type": "percentage", + "ipv6_neighbor_low_threshold": "70", + "nexthop_group_member_low_threshold": "70", + "acl_group_high_threshold": "85", + "ipv4_route_high_threshold": "85", + "acl_counter_high_threshold": "85", + "ipv4_route_low_threshold": "70", + "ipv4_route_threshold_type": "percentage", + "ipv4_neighbor_low_threshold": "70", + "acl_group_threshold_type": "percentage", + "ipv4_nexthop_high_threshold": "85", + "ipv6_route_threshold_type": "percentage", + "nexthop_group_low_threshold": "70", + "ipv4_neighbor_high_threshold": "85", + "ipv6_route_high_threshold": "85", + "ipv6_nexthop_threshold_type": "percentage", + "polling_interval": "300", + "ipv4_nexthop_threshold_type": "percentage", + "acl_group_low_threshold": "70", + "acl_entry_low_threshold": "70", + "nexthop_group_member_threshold_type": "percentage", + "ipv4_nexthop_low_threshold": "70", + "acl_counter_threshold_type": "percentage", + "ipv6_neighbor_high_threshold": "85", + "nexthop_group_member_high_threshold": "85", + "acl_table_low_threshold": "70", + "fdb_entry_threshold_type": "percentage", + "ipv6_neighbor_threshold_type": "percentage", + "acl_table_high_threshold": "85", + "ipv6_nexthop_low_threshold": "70", + "acl_counter_low_threshold": "70", + "ipv4_neighbor_threshold_type": "percentage", + "nexthop_group_high_threshold": "85", + "ipv6_route_low_threshold": "70", + "acl_entry_high_threshold": "85", + "fdb_entry_low_threshold": "70", + "ipv6_nexthop_high_threshold": "85" + } + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.0.1/21": {} + }, + "BUFFER_PG": { + "Ethernet4|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet8|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet12|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet16|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet20|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet24|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet28|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet32|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet36|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet40|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet44|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet48|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet52|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet56|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet60|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet64|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet68|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet72|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet76|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet80|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet84|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet88|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet92|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet96|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet112|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet116|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet120|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet124|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + } + }, + "BGP_NEIGHBOR": { + "10.0.0.57": { + "rrclient": "0", + "name": "ARISTA01T1", + "local_addr": "10.0.0.56", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "10.0.0.59": { + "rrclient": "0", + "name": "ARISTA02T1", + "local_addr": "10.0.0.58", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "10.0.0.61": { + "rrclient": "0", + "name": "ARISTA03T1", + "local_addr": "10.0.0.60", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "10.0.0.63": { + "rrclient": "0", + "name": "ARISTA04T1", + "local_addr": "10.0.0.62", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::7a": { + "rrclient": "0", + "name": "ARISTA03T1", + "local_addr": "fc00::79", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::7e": { + "rrclient": "0", + "name": "ARISTA04T1", + "local_addr": "fc00::7d", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::72": { + "rrclient": "0", + "name": "ARISTA01T1", + "local_addr": "fc00::71", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::76": { + "rrclient": "0", + "name": "ARISTA02T1", + "local_addr": "fc00::75", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + } + }, + "PORTCHANNEL_INTERFACE": { + "PortChannel0001|10.0.0.56/31": {}, + "PortChannel0001|FC00::71/126": {}, + "PortChannel0002|10.0.0.58/31": {}, + "PortChannel0002|FC00::75/126": {}, + "PortChannel0003|10.0.0.60/31": {}, + "PortChannel0003|FC00::79/126": {}, + "PortChannel0004|10.0.0.62/31": {}, + "PortChannel0004|FC00::7D/126": {} + }, + "PFC_WD": { + "Ethernet4": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet8": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet12": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet16": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet20": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet24": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet28": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet32": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet36": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet40": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet44": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet48": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet52": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet56": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet60": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet64": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet68": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet72": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet76": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet80": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet84": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet88": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet92": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet96": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet112": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet116": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet120": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet124": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "GLOBAL": { + "POLL_INTERVAL": "200" + } + }, + "PORTCHANNEL": { + "PortChannel0001": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet112" + ], + "mtu": "9100" + }, + "PortChannel0002": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet116" + ], + "mtu": "9100" + }, + "PortChannel0003": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet120" + ], + "mtu": "9100" + }, + "PortChannel0004": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet124" + ], + "mtu": "9100" + } + }, + "LOOPBACK_INTERFACE": { + "Loopback0|10.1.0.32/32": {}, + "Loopback0|FC00:1::32/128": {} + }, + "PORT_QOS_MAP": { + "Ethernet4": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet8": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet12": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet16": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet20": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet24": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet28": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet32": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet36": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet40": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet44": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet48": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet52": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet56": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet60": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet64": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet68": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet72": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet76": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet80": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet84": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet88": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet92": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet96": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet112": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet116": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet120": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet124": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + } + }, + "DHCP_SERVER": { + "192.0.0.1": {}, + "192.0.0.2": {}, + "192.0.0.3": {}, + "192.0.0.4": {}, + "192.0.0.5": {}, + "192.0.0.6": {}, + "192.0.0.7": {}, + "192.0.0.8": {}, + "192.0.0.9": {}, + "192.0.0.10": {}, + "192.0.0.11": {}, + "192.0.0.12": {}, + "192.0.0.13": {}, + "192.0.0.14": {}, + "192.0.0.15": {}, + "192.0.0.16": {}, + "192.0.0.17": {}, + "192.0.0.18": {}, + "192.0.0.19": {}, + "192.0.0.20": {}, + "192.0.0.21": {}, + "192.0.0.22": {}, + "192.0.0.23": {}, + "192.0.0.24": {}, + "192.0.0.25": {}, + "192.0.0.26": {}, + "192.0.0.27": {}, + "192.0.0.28": {}, + "192.0.0.29": {}, + "192.0.0.30": {}, + "192.0.0.31": {}, + "192.0.0.32": {}, + "192.0.0.33": {}, + "192.0.0.34": {}, + "192.0.0.35": {}, + "192.0.0.36": {}, + "192.0.0.37": {}, + "192.0.0.38": {}, + "192.0.0.39": {}, + "192.0.0.40": {}, + "192.0.0.41": {}, + "192.0.0.42": {}, + "192.0.0.43": {}, + "192.0.0.44": {}, + "192.0.0.45": {}, + "192.0.0.46": {}, + "192.0.0.47": {}, + "192.0.0.48": {} + }, + "VLAN_MEMBER": { + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet12": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet16": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet20": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet24": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet28": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet32": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet36": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet40": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet44": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet48": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet52": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet56": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet60": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet64": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet68": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet72": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet76": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet80": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet84": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet88": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet92": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet96": { + "tagging_mode": "untagged" + } + }, + "BUFFER_QUEUE": { + "Ethernet4|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet4|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet4|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet8|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet8|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet8|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet12|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet12|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet12|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet16|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet16|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet16|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet20|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet20|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet20|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet24|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet24|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet24|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet28|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet28|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet28|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet32|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet32|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet32|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet36|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet36|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet36|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet40|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet40|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet40|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet44|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet44|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet44|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet48|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet48|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet48|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet52|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet52|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet52|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet56|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet56|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet56|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet60|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet60|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet60|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet64|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet64|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet64|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet68|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet68|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet68|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet72|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet72|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet72|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet76|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet76|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet76|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet80|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet80|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet80|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet84|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet84|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet84|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet88|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet88|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet88|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet92|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet92|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet92|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet96|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet96|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet96|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet112|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet112|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet112|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet116|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet116|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet116|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet120|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet120|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet120|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet124|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet124|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet124|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + } + }, + "WRED_PROFILE": { + "AZURE_LOSSLESS": { + "red_max_threshold": "2097152", + "wred_green_enable": "true", + "ecn": "ecn_all", + "green_min_threshold": "1048576", + "red_min_threshold": "1048576", + "wred_yellow_enable": "true", + "yellow_min_threshold": "1048576", + "green_max_threshold": "2097152", + "green_drop_probability": "5", + "yellow_max_threshold": "2097152", + "wred_red_enable": "true", + "yellow_drop_probability": "5", + "red_drop_probability": "5" + } + }, + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "1": "0", + "0": "0", + "3": "3", + "2": "0", + "5": "0", + "4": "4", + "7": "7", + "6": "0" + } + }, + "DEVICE_NEIGHBOR_METADATA": { + "ARISTA01T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.112", + "hwsku": "Arista-VM", + "type": "LeafRouter" + }, + "ARISTA02T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.113", + "hwsku": "Arista-VM", + "type": "LeafRouter" + }, + "ARISTA03T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.114", + "hwsku": "Arista-VM", + "type": "LeafRouter" + }, + "ARISTA04T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.115", + "hwsku": "Arista-VM", + "type": "LeafRouter" + } + }, + "DEVICE_NEIGHBOR": { + "Ethernet4": { + "name": "Servers0", + "port": "eth0" + }, + "Ethernet8": { + "name": "Servers1", + "port": "eth0" + }, + "Ethernet12": { + "name": "Servers2", + "port": "eth0" + }, + "Ethernet16": { + "name": "Servers3", + "port": "eth0" + }, + "Ethernet20": { + "name": "Servers4", + "port": "eth0" + }, + "Ethernet24": { + "name": "Servers5", + "port": "eth0" + }, + "Ethernet28": { + "name": "Servers6", + "port": "eth0" + }, + "Ethernet32": { + "name": "Servers7", + "port": "eth0" + }, + "Ethernet36": { + "name": "Servers8", + "port": "eth0" + }, + "Ethernet40": { + "name": "Servers9", + "port": "eth0" + }, + "Ethernet44": { + "name": "Servers10", + "port": "eth0" + }, + "Ethernet48": { + "name": "Servers11", + "port": "eth0" + }, + "Ethernet52": { + "name": "Servers12", + "port": "eth0" + }, + "Ethernet56": { + "name": "Servers13", + "port": "eth0" + }, + "Ethernet60": { + "name": "Servers14", + "port": "eth0" + }, + "Ethernet64": { + "name": "Servers15", + "port": "eth0" + }, + "Ethernet68": { + "name": "Servers16", + "port": "eth0" + }, + "Ethernet72": { + "name": "Servers17", + "port": "eth0" + }, + "Ethernet76": { + "name": "Servers18", + "port": "eth0" + }, + "Ethernet80": { + "name": "Servers19", + "port": "eth0" + }, + "Ethernet84": { + "name": "Servers20", + "port": "eth0" + }, + "Ethernet88": { + "name": "Servers21", + "port": "eth0" + }, + "Ethernet92": { + "name": "Servers22", + "port": "eth0" + }, + "Ethernet96": { + "name": "Servers23", + "port": "eth0" + }, + "Ethernet112": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "Ethernet116": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "Ethernet120": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "Ethernet124": { + "name": "ARISTA04T1", + "port": "Ethernet1" + } + }, + "DSCP_TO_TC_MAP": { + "AZURE": { + "56": "1", + "54": "1", + "28": "1", + "48": "6", + "29": "1", + "60": "1", + "61": "1", + "62": "1", + "63": "1", + "49": "1", + "34": "1", + "24": "1", + "25": "1", + "26": "1", + "27": "1", + "20": "1", + "21": "1", + "22": "1", + "23": "1", + "46": "5", + "47": "1", + "44": "1", + "45": "1", + "42": "1", + "43": "1", + "40": "1", + "41": "1", + "1": "1", + "0": "1", + "3": "3", + "2": "1", + "5": "2", + "4": "4", + "7": "1", + "6": "1", + "9": "1", + "8": "0", + "35": "1", + "13": "1", + "12": "1", + "15": "1", + "58": "1", + "11": "1", + "10": "1", + "39": "1", + "38": "1", + "59": "1", + "14": "1", + "17": "1", + "16": "1", + "19": "1", + "18": "1", + "31": "1", + "30": "1", + "51": "1", + "36": "1", + "53": "1", + "52": "1", + "33": "1", + "55": "1", + "37": "1", + "32": "1", + "57": "1", + "50": "1" + } + }, + "MGMT_INTERFACE": { + "eth0|10.3.147.17/23": { + "gwaddr": "10.3.146.1" + }, + "eth0|FC00:2::32/64": { + "forced_mgmt_routes": [ + "10.3.145.98/31", + "10.3.145.8", + "100.127.20.16/28", + "10.3.149.170/31", + "40.122.216.24", + "13.91.48.226", + "10.3.145.14", + "10.64.246.0/24", + "10.64.247.0/24" + ], + "gwaddr": "fc00:2::1" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "1": "1", + "0": "0", + "3": "3", + "2": "2", + "5": "5", + "4": "4", + "7": "7", + "6": "6" + } + }, + "MGMT_PORT": { + "eth0": { + "alias": "eth0", + "admin_status": "up" + } + }, + "VERSIONS": { + "DATABASE": { + "VERSION": "version_1_0_1" + } + }, + "ACL_TABLE": { + "DATAACL": { + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004" + ], + "type": "L3", + "policy_desc": "DATAACL", + "stage": "ingress" + }, + "EVERFLOW": { + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004", + "Ethernet24", + "Ethernet40", + "Ethernet20", + "Ethernet44", + "Ethernet48", + "Ethernet28", + "Ethernet96", + "Ethernet92", + "Ethernet76", + "Ethernet72", + "Ethernet52", + "Ethernet80", + "Ethernet56", + "Ethernet32", + "Ethernet16", + "Ethernet36", + "Ethernet12", + "Ethernet60", + "Ethernet8", + "Ethernet4", + "Ethernet64", + "Ethernet68", + "Ethernet84", + "Ethernet88" + ], + "type": "MIRROR", + "policy_desc": "EVERFLOW", + "stage": "ingress" + }, + "EVERFLOWV6": { + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004", + "Ethernet24", + "Ethernet40", + "Ethernet20", + "Ethernet44", + "Ethernet48", + "Ethernet28", + "Ethernet96", + "Ethernet92", + "Ethernet76", + "Ethernet72", + "Ethernet52", + "Ethernet80", + "Ethernet56", + "Ethernet32", + "Ethernet16", + "Ethernet36", + "Ethernet12", + "Ethernet60", + "Ethernet8", + "Ethernet4", + "Ethernet64", + "Ethernet68", + "Ethernet84", + "Ethernet88" + ], + "type": "MIRRORV6", + "policy_desc": "EVERFLOWV6", + "stage": "ingress" + }, + "SNMP_ACL": { + "services": [ + "SNMP" + ], + "type": "CTRLPLANE", + "policy_desc": "SNMP_ACL", + "stage": "ingress" + }, + "SSH_ONLY": { + "services": [ + "SSH" + ], + "type": "CTRLPLANE", + "policy_desc": "SSH_ONLY", + "stage": "ingress" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet8": "5m", + "Ethernet0": "300m", + "Ethernet4": "5m", + "Ethernet108": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet68": "5m", + "Ethernet96": "5m", + "Ethernet124": "40m", + "Ethernet92": "5m", + "Ethernet120": "40m", + "Ethernet52": "5m", + "Ethernet56": "5m", + "Ethernet76": "5m", + "Ethernet72": "5m", + "Ethernet64": "5m", + "Ethernet32": "5m", + "Ethernet16": "5m", + "Ethernet36": "5m", + "Ethernet12": "5m", + "Ethernet88": "5m", + "Ethernet116": "40m", + "Ethernet80": "5m", + "Ethernet112": "40m", + "Ethernet84": "5m", + "Ethernet48": "5m", + "Ethernet44": "5m", + "Ethernet40": "5m", + "Ethernet28": "5m", + "Ethernet60": "5m", + "Ethernet20": "5m", + "Ethernet24": "5m" + } + }, + "SCHEDULER": { + "scheduler.0": { + "type": "DWRR", + "weight": "14" + }, + "scheduler.1": { + "type": "DWRR", + "weight": "15" + } + }, + "BUFFER_POOL": { + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "12766208" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "7326924" + }, + "ingress_lossless_pool": { + "type": "ingress", + "mode": "dynamic", + "size": "12766208" + } + }, + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "static_th": "12766208", + "pool": "[BUFFER_POOL|egress_lossless_pool]", + "size": "0" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "[BUFFER_POOL|egress_lossy_pool]", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "size": "0" + } + } +} diff --git a/sonic-utilities-tests/filter_fdb_input/test_vectors.py b/sonic-utilities-tests/filter_fdb_input/test_vectors.py index 55d6c136de..cd1592a0a4 100644 --- a/sonic-utilities-tests/filter_fdb_input/test_vectors.py +++ b/sonic-utilities-tests/filter_fdb_input/test_vectors.py @@ -7,6 +7,8 @@ ], "fdb": [ ], + "config_db": { + }, "expected_fdb": [ ], }, @@ -19,6 +21,13 @@ }, "OP": "SET" }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "50:2f:a8:cb:76:7c", + "family": "IPv4" + }, + "OP": "SET" + }, ], "fdb": [ { @@ -29,6 +38,14 @@ "OP": "SET" }, ], + "config_db": { + "VLAN": { + "Vlan1000": {} + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.0.1/21": {} + }, + }, "expected_fdb": [ ], }, @@ -41,6 +58,13 @@ }, "OP": "SET" }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, ], "fdb": [ { @@ -51,7 +75,116 @@ "OP": "SET" }, ], + "config_db": { + "VLAN": { + "Vlan1000": {} + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.0.1/21": {} + }, + }, + "expected_fdb": [ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|192.168.0.1/21": {} + }, + }, + "expected_fdb": [ + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "50:2f:a8:cb:76:7c", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1:50-2f-a8-cb-76-7c": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|25.103.178.1/21": {} + }, + }, "expected_fdb": [ + { + "FDB_TABLE:Vlan1:50-2f-a8-cb-76-7c": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ { "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { "type": "dynamic", @@ -60,10 +193,80 @@ "OP": "SET" }, ], + "config_db": { + "VLAN": { + "Vlan1000": {} + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.128.1/21": {} + }, + }, + "expected_fdb": [ + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "50:2f:a8:cb:76:7c", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1:50-2f-a8-cb-76-7c": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|25.103.0.1/21": {} + }, + }, + "expected_fdb": [ + ], }, { "arp": "sonic-utilities-tests/filter_fdb_input/arp.json", "fdb": "sonic-utilities-tests/filter_fdb_input/fdb.json", + "config_db": "sonic-utilities-tests/filter_fdb_input/config_db.json", "expected_fdb": "sonic-utilities-tests/filter_fdb_input/expected_fdb.json" }, + { + "arp": "sonic-utilities-tests/filter_fdb_input/arp.json", + "fdb": "sonic-utilities-tests/filter_fdb_input/fdb.json", + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|192.168.0.1/21": {} + }, + }, + "expected_fdb": [ + ], + }, + { + "arp": "sonic-utilities-tests/filter_fdb_input/arp.json", + "fdb": "sonic-utilities-tests/filter_fdb_input/fdb.json", + "config_db": { + }, + "expected_fdb": [ + ], + }, ] diff --git a/sonic-utilities-tests/mock_tables/config_db.json b/sonic-utilities-tests/mock_tables/config_db.json index 3061c3015e..ec278c3450 100644 --- a/sonic-utilities-tests/mock_tables/config_db.json +++ b/sonic-utilities-tests/mock_tables/config_db.json @@ -1,4 +1,13 @@ { + "BREAKOUT_CFG|Ethernet0": { + "brkout_mode": "4x25G[10G]" + }, + "BREAKOUT_CFG|Ethernet4": { + "brkout_mode": "2x50G" + }, + "BREAKOUT_CFG|Ethernet8": { + "brkout_mode": "1x100G[40G]" + }, "PORT|Ethernet0": { "alias": "etp1", "lanes": "0,1,2,3", diff --git a/sonic-utilities-tests/mock_tables/counters_db.json b/sonic-utilities-tests/mock_tables/counters_db.json index 2476837d71..2b2b600280 100644 --- a/sonic-utilities-tests/mock_tables/counters_db.json +++ b/sonic-utilities-tests/mock_tables/counters_db.json @@ -145,6 +145,12 @@ "Ethernet4": "oid:0x1000000000004", "Ethernet8": "oid:0x1000000000006" }, + "COUNTERS_LAG_NAME_MAP": { + "PortChannel0001": "oid:0x60000000005a1", + "PortChannel0002": "oid:0x60000000005a2", + "PortChannel0003": "oid:0x600000000063c", + "PortChannel0004": "oid:0x600000000063d" + }, "COUNTERS_DEBUG_NAME_PORT_STAT_MAP": { "DEBUG_0": "SAI_PORT_STAT_IN_DROP_REASON_RANGE_BASE", "DEBUG_2": "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" diff --git a/sonic-utilities-tests/mock_tables/state_db.json b/sonic-utilities-tests/mock_tables/state_db.json index 924600ae4b..b44b60df9f 100644 --- a/sonic-utilities-tests/mock_tables/state_db.json +++ b/sonic-utilities-tests/mock_tables/state_db.json @@ -1,13 +1,13 @@ { "TRANSCEIVER_INFO|Ethernet0": { "type": "QSFP28 or later", - "hardwarerev": "AC", - "serialnum": "MT1706FT02064", - "manufacturename": "Mellanox", - "modelname": "MFA1A00-C003", + "hardware_rev": "AC", + "serial": "MT1706FT02064", + "manufacturer": "Mellanox", + "model": "MFA1A00-C003", "vendor_oui": "00-02-c9", "vendor_date": "2017-01-13 ", - "Connector": "No separable connector", + "connector": "No separable connector", "encoding": "64B66B", "ext_identifier": "Power Class 3(2.5W max), CDR present in Rx Tx", "ext_rateselect_compliance": "QSFP+ Rate Select Version 1", diff --git a/sonic-utilities-tests/show_breakout_test.py b/sonic-utilities-tests/show_breakout_test.py new file mode 100644 index 0000000000..f3636e9907 --- /dev/null +++ b/sonic-utilities-tests/show_breakout_test.py @@ -0,0 +1,65 @@ +import os +import sys +from click.testing import CliRunner +from unittest import TestCase +from swsssdk import ConfigDBConnector + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +import mock_tables.dbconnector +import show.main as show + +# Expected output for 'show breakout current-mode' +current_mode_all_output = ''+ \ +"""+-------------+-------------------------+ +| Interface | Current Breakout Mode | ++=============+=========================+ +| Ethernet0 | 4x25G[10G] | ++-------------+-------------------------+ +| Ethernet4 | 2x50G | ++-------------+-------------------------+ +| Ethernet8 | 1x100G[40G] | ++-------------+-------------------------+ +""" + +# Expected output for 'show breakout current-mode Ethernet0' +current_mode_intf_output = ''+ \ +"""+-------------+-------------------------+ +| Interface | Current Breakout Mode | ++=============+=========================+ +| Ethernet0 | 4x25G[10G] | ++-------------+-------------------------+ +""" + +class TestBreakout(TestCase): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def setUp(self): + self.runner = CliRunner() + self.config_db = ConfigDBConnector() + self.config_db.connect() + self.obj = {'db': self.config_db} + + # Test 'show interfaces breakout current-mode' + def test_all_intf_current_mode(self): + result = self.runner.invoke(show.cli.commands["interfaces"].commands["breakout"].commands["current-mode"], [], obj=self.obj) + print(sys.stderr, result.output) + assert result.output == current_mode_all_output + + # Test 'show interfaces breakout current-mode Ethernet0' + def test_single_intf_current_mode(self): + result = self.runner.invoke(show.cli.commands["interfaces"].commands["breakout"].commands["current-mode"], ["Ethernet0"], obj=self.obj) + print(sys.stderr, result.output) + assert result.output == current_mode_intf_output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" diff --git a/sonic_installer/bootloader/__init__.py b/sonic_installer/bootloader/__init__.py new file mode 100644 index 0000000000..d2872eb7d0 --- /dev/null +++ b/sonic_installer/bootloader/__init__.py @@ -0,0 +1,16 @@ + +from .aboot import AbootBootloader +from .grub import GrubBootloader +from .uboot import UbootBootloader + +BOOTLOADERS = [ + AbootBootloader, + GrubBootloader, + UbootBootloader, +] + +def get_bootloader(): + for bootloaderCls in BOOTLOADERS: + if bootloaderCls.detect(): + return bootloaderCls() + raise RuntimeError('Bootloader could not be detected') diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py new file mode 100644 index 0000000000..1933921512 --- /dev/null +++ b/sonic_installer/bootloader/aboot.py @@ -0,0 +1,173 @@ +""" +Bootloader implementation for Aboot used on Arista devices +""" + +import base64 +import collections +import os +import re +import subprocess +import sys +import zipfile + +import click + +from M2Crypto import X509 + +from ..common import ( + HOST_PATH, + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, + run_command, +) +from .bootloader import Bootloader + +_secureboot = None + +# For the signature format, see: https://github.com/aristanetworks/swi-tools/tree/master/switools +SWI_SIG_FILE_NAME = 'swi-signature' +SWIX_SIG_FILE_NAME = 'swix-signature' +ISSUERCERT = 'IssuerCert' + +def isSecureboot(): + global _secureboot + if _secureboot is None: + with open('/proc/cmdline') as f: + m = re.search(r"secure_boot_enable=[y1]", f.read()) + _secureboot = bool(m) + return _secureboot + +class AbootBootloader(Bootloader): + + NAME = 'aboot' + BOOT_CONFIG_PATH = os.path.join(HOST_PATH, 'boot-config') + DEFAULT_IMAGE_PATH = '/tmp/sonic_image.swi' + + def _boot_config_read(self, path=BOOT_CONFIG_PATH): + config = collections.OrderedDict() + with open(path) as f: + for line in f.readlines(): + line = line.strip() + if not line or line.startswith('#') or '=' not in line: + continue + key, value = line.split('=', 1) + config[key] = value + return config + + def _boot_config_write(self, config, path=BOOT_CONFIG_PATH): + with open(path, 'w') as f: + f.write(''.join('%s=%s\n' % (k, v) for k, v in config.items())) + + def _boot_config_set(self, **kwargs): + path = kwargs.pop('path', self.BOOT_CONFIG_PATH) + config = self._boot_config_read(path=path) + for key, value in kwargs.items(): + config[key] = value + self._boot_config_write(config, path=path) + + def _swi_image_path(self, image): + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + if isSecureboot(): + return 'flash:%s/sonic.swi' % image_dir + return 'flash:%s/.sonic-boot.swi' % image_dir + + def get_current_image(self): + with open('/proc/cmdline') as f: + current = re.search(r"loop=/*(\S+)/", f.read()).group(1) + return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + + def get_installed_images(self): + images = [] + for filename in os.listdir(HOST_PATH): + if filename.startswith(IMAGE_DIR_PREFIX): + images.append(filename.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX)) + return images + + def get_next_image(self): + config = self._boot_config_read() + match = re.search(r"flash:/*(\S+)/", config['SWI']) + return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + + def set_default_image(self, image): + image_path = self._swi_image_path(image) + self._boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) + return True + + def set_next_image(self, image): + image_path = self._swi_image_path(image) + self._boot_config_set(SWI=image_path) + return True + + def install_image(self, image_path): + run_command("/usr/bin/unzip -od /tmp %s boot0" % image_path) + run_command("swipath=%s target_path=/host sonic_upgrade=1 . /tmp/boot0" % image_path) + + def remove_image(self, image): + nextimage = self.get_next_image() + current = self.get_current_image() + if image == nextimage: + image_path = self._swi_image_path(current) + self._boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) + click.echo("Set next and default boot to current image %s" % current) + + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + click.echo('Removing image root filesystem...') + subprocess.call(['rm','-rf', os.path.join(HOST_PATH, image_dir)]) + click.echo('Image removed') + + def get_binary_image_version(self, image_path): + try: + version = subprocess.check_output(['/usr/bin/unzip', '-qop', image_path, '.imagehash']) + except subprocess.CalledProcessError: + return None + return IMAGE_PREFIX + version.strip() + + def verify_binary_image(self, image_path): + try: + subprocess.check_call(['/usr/bin/unzip', '-tq', image_path]) + return self._verify_secureboot_image(image_path) + except subprocess.CalledProcessError: + return False + + def _verify_secureboot_image(self, image_path): + if isSecureboot(): + cert = self.getCert(image_path) + return cert is not None + return True + + @classmethod + def getCert(cls, swiFile): + with zipfile.ZipFile(swiFile, 'r') as swi: + try: + sigInfo = swi.getinfo(cls.getSigFileName(swiFile)) + except KeyError: + # Occurs if SIG_FILE_NAME is not in the swi (the SWI is not signed properly) + return None + with swi.open(sigInfo, 'r') as sigFile: + for line in sigFile: + data = line.split(':') + if len(data) == 2: + if data[0] == ISSUERCERT: + try: + base64_cert = cls.base64Decode(data[1].strip()) + return X509.load_cert_string(base64_cert) + except TypeError: + return None + else: + sys.stderr.write('Unexpected format for line in swi[x]-signature file: %s\n' % line) + return None + + @classmethod + def getSigFileName(cls, swiFile): + if swiFile.lower().endswith(".swix"): + return SWIX_SIG_FILE_NAME + return SWI_SIG_FILE_NAME + + @classmethod + def base64Decode(cls, text): + return base64.standard_b64decode(text) + + @classmethod + def detect(cls): + with open('/proc/cmdline') as f: + return 'Aboot=' in f.read() diff --git a/sonic_installer/bootloader/bootloader.py b/sonic_installer/bootloader/bootloader.py new file mode 100644 index 0000000000..78bd05c61c --- /dev/null +++ b/sonic_installer/bootloader/bootloader.py @@ -0,0 +1,50 @@ +""" +Abstract Bootloader class +""" + +class Bootloader(object): + + NAME = None + DEFAULT_IMAGE_PATH = None + + def get_current_image(self): + """returns name of the current image""" + raise NotImplementedError + + def get_next_image(self): + """returns name of the next image""" + raise NotImplementedError + + def get_installed_images(self): + """returns list of installed images""" + raise NotImplementedError + + def set_default_image(self, image): + """set default image to boot from""" + raise NotImplementedError + + def set_next_image(self, image): + """set next image to boot from""" + raise NotImplementedError + + def install_image(self, image_path): + """install new image""" + raise NotImplementedError + + def remove_image(self, image): + """remove existing image""" + raise NotImplementedError + + def get_binary_image_version(self, image_path): + """returns the version of the image""" + raise NotImplementedError + + def verify_binary_image(self, image_path): + """verify that the image is supported by the bootloader""" + raise NotImplementedError + + @classmethod + def detect(cls): + """returns True if the bootloader is in use""" + return False + diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py new file mode 100644 index 0000000000..1d111f4191 --- /dev/null +++ b/sonic_installer/bootloader/grub.py @@ -0,0 +1,86 @@ +""" +Bootloader implementation for grub based platforms +""" + +import os +import re +import subprocess + +import click + +from ..common import ( + HOST_PATH, + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, + run_command, +) +from .onie import OnieInstallerBootloader + +class GrubBootloader(OnieInstallerBootloader): + + NAME = 'grub' + + def get_installed_images(self): + images = [] + config = open(HOST_PATH + '/grub/grub.cfg', 'r') + for line in config: + if line.startswith('menuentry'): + image = line.split()[1].strip("'") + if IMAGE_PREFIX in image: + images.append(image) + config.close() + return images + + def get_next_image(self): + images = self.get_installed_images() + grubenv = subprocess.check_output(["/usr/bin/grub-editenv", HOST_PATH + "/grub/grubenv", "list"]) + m = re.search(r"next_entry=(\d+)", grubenv) + if m: + next_image_index = int(m.group(1)) + else: + m = re.search(r"saved_entry=(\d+)", grubenv) + if m: + next_image_index = int(m.group(1)) + else: + next_image_index = 0 + return images[next_image_index] + + def set_default_image(self, image): + images = self.get_installed_images() + command = 'grub-set-default --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) + run_command(command) + return True + + def set_next_image(self, image): + images = self.get_installed_images() + command = 'grub-reboot --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) + run_command(command) + return True + + def install_image(self, image_path): + run_command("bash " + image_path) + run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + + def remove_image(self, image): + click.echo('Updating GRUB...') + config = open(HOST_PATH + '/grub/grub.cfg', 'r') + old_config = config.read() + menuentry = re.search("menuentry '" + image + "[^}]*}", old_config).group() + config.close() + config = open(HOST_PATH + '/grub/grub.cfg', 'w') + # remove menuentry of the image in grub.cfg + config.write(old_config.replace(menuentry, "")) + config.close() + click.echo('Done') + + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + click.echo('Removing image root filesystem...') + subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) + click.echo('Done') + + run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + click.echo('Image removed') + + @classmethod + def detect(cls): + return os.path.isfile(os.path.join(HOST_PATH, 'grub/grub.cfg')) diff --git a/sonic_installer/bootloader/onie.py b/sonic_installer/bootloader/onie.py new file mode 100644 index 0000000000..ca16172efa --- /dev/null +++ b/sonic_installer/bootloader/onie.py @@ -0,0 +1,48 @@ +""" +Common logic for bootloaders using an ONIE installer image +""" + +import os +import re +import signal +import subprocess + +from ..common import ( + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, +) +from .bootloader import Bootloader + +# Needed to prevent "broken pipe" error messages when piping +# output of multiple commands using subprocess.Popen() +def default_sigpipe(): + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +class OnieInstallerBootloader(Bootloader): # pylint: disable=abstract-method + + DEFAULT_IMAGE_PATH = '/tmp/sonic_image' + + def get_current_image(self): + cmdline = open('/proc/cmdline', 'r') + current = re.search(r"loop=(\S+)/fs.squashfs", cmdline.read()).group(1) + cmdline.close() + return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + + def get_binary_image_version(self, image_path): + """returns the version of the image""" + p1 = subprocess.Popen(["cat", "-v", image_path], stdout=subprocess.PIPE, preexec_fn=default_sigpipe) + p2 = subprocess.Popen(["grep", "-m 1", "^image_version"], stdin=p1.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) + p3 = subprocess.Popen(["sed", "-n", r"s/^image_version=\"\(.*\)\"$/\1/p"], stdin=p2.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) + + stdout = p3.communicate()[0] + p3.wait() + version_num = stdout.rstrip('\n') + + # If we didn't read a version number, this doesn't appear to be a valid SONiC image file + if not version_num: + return None + + return IMAGE_PREFIX + version_num + + def verify_binary_image(self, image_path): + return os.path.isfile(image_path) diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py new file mode 100644 index 0000000000..47252dd6af --- /dev/null +++ b/sonic_installer/bootloader/uboot.py @@ -0,0 +1,83 @@ +""" +Bootloader implementation for uboot based platforms +""" + +import platform +import subprocess + +import click + +from ..common import ( + HOST_PATH, + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, + run_command, +) +from .onie import OnieInstallerBootloader + +class UbootBootloader(OnieInstallerBootloader): + + NAME = 'uboot' + + def get_installed_images(self): + images = [] + proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + image = out.rstrip() + if IMAGE_PREFIX in image: + images.append(image) + proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + image = out.rstrip() + if IMAGE_PREFIX in image: + images.append(image) + return images + + def get_next_image(self): + images = self.get_installed_images() + proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + image = out.rstrip() + if "sonic_image_2" in image: + next_image_index = 1 + else: + next_image_index = 0 + return images[next_image_index] + + def set_default_image(self, image): + images = self.get_installed_images() + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') + return True + + def set_next_image(self, image): + images = self.get_installed_images() + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"') + return True + + def install_image(self, image_path): + run_command("bash " + image_path) + + def remove_image(self, image): + click.echo('Updating next boot ...') + images = self.get_installed_images() + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') + run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') + run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + click.echo('Removing image root filesystem...') + subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) + click.echo('Done') + + @classmethod + def detect(cls): + arch = platform.machine() + return ("arm" in arch) or ("aarch64" in arch) diff --git a/sonic_installer/common.py b/sonic_installer/common.py new file mode 100644 index 0000000000..f12454042a --- /dev/null +++ b/sonic_installer/common.py @@ -0,0 +1,25 @@ +""" +Module holding common functions and constants used by sonic_installer and its +subpackages. +""" + +import subprocess +import sys + +import click + +HOST_PATH = '/host' +IMAGE_PREFIX = 'SONiC-OS-' +IMAGE_DIR_PREFIX = 'image-' + +# Run bash command and print output to stdout +def run_command(command): + click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) + + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + + click.echo(out) + + if proc.returncode != 0: + sys.exit(proc.returncode) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index fb8179c9c6..3c68bcd843 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -1,8 +1,6 @@ #! /usr/bin/python -u import os -import re -import signal import sys import time import click @@ -10,45 +8,30 @@ import syslog import subprocess from swsssdk import SonicV2Connector -import collections -import platform - -HOST_PATH = '/host' -IMAGE_PREFIX = 'SONiC-OS-' -IMAGE_DIR_PREFIX = 'image-' -ONIE_DEFAULT_IMAGE_PATH = '/tmp/sonic_image' -ABOOT_DEFAULT_IMAGE_PATH = '/tmp/sonic_image.swi' -IMAGE_TYPE_ABOOT = 'aboot' -IMAGE_TYPE_ONIE = 'onie' -ABOOT_BOOT_CONFIG = '/boot-config' -BOOTLOADER_TYPE_GRUB = 'grub' -BOOTLOADER_TYPE_UBOOT = 'uboot' -ARCH = platform.machine() -BOOTLOADER = BOOTLOADER_TYPE_UBOOT if ("arm" in ARCH) or ("aarch64" in ARCH) else BOOTLOADER_TYPE_GRUB + +from .bootloader import get_bootloader +from .common import run_command # # Helper functions # -# Needed to prevent "broken pipe" error messages when piping -# output of multiple commands using subprocess.Popen() -def default_sigpipe(): - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - +_start_time = None +_last_time = None def reporthook(count, block_size, total_size): - global start_time, last_time + global _start_time, _last_time cur_time = int(time.time()) if count == 0: - start_time = cur_time - last_time = cur_time + _start_time = cur_time + _last_time = cur_time return - if cur_time == last_time: + if cur_time == _last_time: return - last_time = cur_time + _last_time = cur_time - duration = cur_time - start_time + duration = cur_time - _start_time progress_size = int(count * block_size) speed = int(progress_size / (1024 * duration)) percent = int(count * block_size * 100 / total_size) @@ -57,226 +40,13 @@ def reporthook(count, block_size, total_size): (percent, progress_size / (1024 * 1024), speed, time_left)) sys.stdout.flush() -def get_running_image_type(): - """ Attempt to determine whether we are running an ONIE or Aboot image """ - cmdline = open('/proc/cmdline', 'r') - if "Aboot=" in cmdline.read(): - return IMAGE_TYPE_ABOOT - return IMAGE_TYPE_ONIE - -# Returns None if image doesn't exist or isn't a regular file -def get_binary_image_type(binary_image_path): - """ Attempt to determine whether this is an ONIE or Aboot image file """ - if not os.path.isfile(binary_image_path): - return None - - with open(binary_image_path) as f: - # Aboot file is a zip archive; check the start of the file for the zip magic number - if f.read(4) == "\x50\x4b\x03\x04": - return IMAGE_TYPE_ABOOT - return IMAGE_TYPE_ONIE - -# Returns None if image doesn't exist or doesn't appear to be a valid SONiC image file -def get_binary_image_version(binary_image_path): - binary_type = get_binary_image_type(binary_image_path) - if not binary_type: - return None - elif binary_type == IMAGE_TYPE_ABOOT: - p1 = subprocess.Popen(["unzip", "-p", binary_image_path, "boot0"], stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p2 = subprocess.Popen(["grep", "-m 1", "^image_name"], stdin=p1.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p3 = subprocess.Popen(["sed", "-n", r"s/^image_name=\"\image-\(.*\)\"$/\1/p"], stdin=p2.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - else: - p1 = subprocess.Popen(["cat", "-v", binary_image_path], stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p2 = subprocess.Popen(["grep", "-m 1", "^image_version"], stdin=p1.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p3 = subprocess.Popen(["sed", "-n", r"s/^image_version=\"\(.*\)\"$/\1/p"], stdin=p2.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - - stdout = p3.communicate()[0] - p3.wait() - version_num = stdout.rstrip('\n') - - # If we didn't read a version number, this doesn't appear to be a valid SONiC image file - if len(version_num) == 0: - return None - - return IMAGE_PREFIX + version_num - -# Sets specified image as default image to boot from -def set_default_image(image): - images = get_installed_images() - if image not in images: - return False - - if get_running_image_type() == IMAGE_TYPE_ABOOT: - image_path = aboot_image_path(image) - aboot_boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - command = 'grub-set-default --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) - run_command(command) - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - if image in images[0]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') - elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') - - return True - -def aboot_read_boot_config(path): - config = collections.OrderedDict() - with open(path) as f: - for line in f.readlines(): - line = line.strip() - if not line or line.startswith('#') or '=' not in line: - continue - key, value = line.split('=', 1) - config[key] = value - return config - -def aboot_write_boot_config(path, config): - with open(path, 'w') as f: - f.write(''.join( '%s=%s\n' % (k, v) for k, v in config.items())) - -def aboot_boot_config_set(**kwargs): - path = kwargs.get('path', HOST_PATH + ABOOT_BOOT_CONFIG) - config = aboot_read_boot_config(path) - for key, value in kwargs.items(): - config[key] = value - aboot_write_boot_config(path, config) - -def aboot_image_path(image): - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - return 'flash:%s/.sonic-boot.swi' % image_dir - -# Run bash command and print output to stdout -def run_command(command): - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) - - proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - - click.echo(out) - - if proc.returncode != 0: - sys.exit(proc.returncode) - -# Returns list of installed images -def get_installed_images(): - images = [] - if get_running_image_type() == IMAGE_TYPE_ABOOT: - for filename in os.listdir(HOST_PATH): - if filename.startswith(IMAGE_DIR_PREFIX): - images.append(filename.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX)) - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - config = open(HOST_PATH + '/grub/grub.cfg', 'r') - for line in config: - if line.startswith('menuentry'): - image = line.split()[1].strip("'") - if IMAGE_PREFIX in image: - images.append(image) - config.close() - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - image = out.rstrip() - if IMAGE_PREFIX in image: - images.append(image) - proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - image = out.rstrip() - if IMAGE_PREFIX in image: - images.append(image) - return images - -# Returns name of current image -def get_current_image(): - cmdline = open('/proc/cmdline', 'r') - current = re.search("loop=(\S+)/fs.squashfs", cmdline.read()).group(1) - cmdline.close() - return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) - -# Returns name of next boot image -def get_next_image(): - if get_running_image_type() == IMAGE_TYPE_ABOOT: - config = open(HOST_PATH + ABOOT_BOOT_CONFIG, 'r') - next_image = re.search("SWI=flash:(\S+)/", config.read()).group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) - config.close() - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - images = get_installed_images() - grubenv = subprocess.check_output(["/usr/bin/grub-editenv", HOST_PATH + "/grub/grubenv", "list"]) - m = re.search("next_entry=(\d+)", grubenv) - if m: - next_image_index = int(m.group(1)) - else: - m = re.search("saved_entry=(\d+)", grubenv) - if m: - next_image_index = int(m.group(1)) - else: - next_image_index = 0 - next_image = images[next_image_index] - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - images = get_installed_images() - proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - image = out.rstrip() - if "sonic_image_2" in image: - next_image_index = 1 - else: - next_image_index = 0 - next_image = images[next_image_index] - return next_image - -def remove_image(image): - if get_running_image_type() == IMAGE_TYPE_ABOOT: - nextimage = get_next_image() - current = get_current_image() - if image == nextimage: - image_path = aboot_image_path(current) - aboot_boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) - click.echo("Set next and default boot to current image %s" % current) - - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - click.echo('Removing image root filesystem...') - subprocess.call(['rm','-rf', os.path.join(HOST_PATH, image_dir)]) - click.echo('Image removed') - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - click.echo('Updating GRUB...') - config = open(HOST_PATH + '/grub/grub.cfg', 'r') - old_config = config.read() - menuentry = re.search("menuentry '" + image + "[^}]*}", old_config).group() - config.close() - config = open(HOST_PATH + '/grub/grub.cfg', 'w') - # remove menuentry of the image in grub.cfg - config.write(old_config.replace(menuentry, "")) - config.close() - click.echo('Done') - - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - click.echo('Removing image root filesystem...') - subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) - click.echo('Done') - - run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') - click.echo('Image removed') - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - click.echo('Updating next boot ...') - images = get_installed_images() - if image in images[0]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') - run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"') - elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') - run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - click.echo('Removing image root filesystem...') - subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) - click.echo('Done') - # TODO: Embed tag name info into docker image meta data at build time, # and extract tag name from docker image file. def get_docker_tag_name(image): # Try to get tag name from label metadata cmd = "docker inspect --format '{{.ContainerConfig.Labels.Tag}}' " + image proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) - (out, err) = proc.communicate() + (out, _) = proc.communicate() if proc.returncode != 0: return "unknown" tag = out.rstrip() @@ -292,7 +62,7 @@ def validate_url_or_abort(url): urlfile = urllib.urlopen(url) response_code = urlfile.getcode() urlfile.close() - except IOError, err: + except IOError: response_code = None if not response_code: @@ -313,7 +83,7 @@ def get_container_image_name(container_name): # example image: docker-lldp-sv2:latest cmd = "docker inspect --format '{{.Config.Image}}' " + container_name proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) - (out, err) = proc.communicate() + (out, _) = proc.communicate() if proc.returncode != 0: sys.exit(proc.returncode) image_latest = out.rstrip() @@ -374,52 +144,42 @@ def cli(): @click.argument('url') def install(url, force, skip_migration=False): """ Install image from local binary or URL""" - if get_running_image_type() == IMAGE_TYPE_ABOOT: - DEFAULT_IMAGE_PATH = ABOOT_DEFAULT_IMAGE_PATH - else: - DEFAULT_IMAGE_PATH = ONIE_DEFAULT_IMAGE_PATH + bootloader = get_bootloader() if url.startswith('http://') or url.startswith('https://'): click.echo('Downloading image...') validate_url_or_abort(url) try: - urllib.urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook) + urllib.urlretrieve(url, bootloader.DEFAULT_IMAGE_PATH, reporthook) + click.echo('') except Exception as e: click.echo("Download error", e) raise click.Abort() - image_path = DEFAULT_IMAGE_PATH + image_path = bootloader.DEFAULT_IMAGE_PATH else: image_path = os.path.join("./", url) - running_image_type = get_running_image_type() - binary_image_type = get_binary_image_type(image_path) - binary_image_version = get_binary_image_version(image_path) - if not binary_image_type or not binary_image_version: + binary_image_version = bootloader.get_binary_image_version(image_path) + if not binary_image_version: click.echo("Image file does not exist or is not a valid SONiC image file") raise click.Abort() # Is this version already installed? - if binary_image_version in get_installed_images(): + if binary_image_version in bootloader.get_installed_images(): click.echo("Image {} is already installed. Setting it as default...".format(binary_image_version)) - if not set_default_image(binary_image_version): + if not bootloader.set_default_image(binary_image_version): click.echo('Error: Failed to set image as default') raise click.Abort() else: # Verify that the binary image is of the same type as the running image - if (binary_image_type != running_image_type) and not force: - click.echo("Image file '{}' is of a different type than running image.\n" + - "If you are sure you want to install this image, use -f|--force.\n" + + if not bootloader.verify_binary_image(image_path) and not force: + click.echo("Image file '{}' is of a different type than running image.\n" + "If you are sure you want to install this image, use -f|--force.\n" "Aborting...".format(image_path)) raise click.Abort() click.echo("Installing image {} and setting it as default...".format(binary_image_version)) - if running_image_type == IMAGE_TYPE_ABOOT: - run_command("/usr/bin/unzip -od /tmp %s boot0" % image_path) - run_command("swipath=%s target_path=/host sonic_upgrade=1 . /tmp/boot0" % image_path) - else: - run_command("bash " + image_path) - if BOOTLOADER == BOOTLOADER_TYPE_GRUB: - run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + bootloader.install_image(image_path) # Take a backup of current configuration if skip_migration: click.echo("Skipping configuration migration as requested in the command option.") @@ -433,12 +193,13 @@ def install(url, force, skip_migration=False): # List installed images -@cli.command() -def list(): +@cli.command('list') +def list_command(): """ Print installed images """ - images = get_installed_images() - curimage = get_current_image() - nextimage = get_next_image() + bootloader = get_bootloader() + images = bootloader.get_installed_images() + curimage = bootloader.get_current_image() + nextimage = bootloader.get_next_image() click.echo("Current: " + curimage) click.echo("Next: " + nextimage) click.echo("Available: ") @@ -450,32 +211,22 @@ def list(): @click.argument('image') def set_default(image): """ Choose image to boot from by default """ - if not set_default_image(image): + bootloader = get_bootloader() + if image not in bootloader.get_installed_images(): click.echo('Error: Image does not exist') raise click.Abort() - + bootloader.set_default_image(image) # Set image for next boot @cli.command('set_next_boot') @click.argument('image') def set_next_boot(image): """ Choose image for next reboot (one time action) """ - images = get_installed_images() - if image not in images: - click.echo('Image does not exist') + bootloader = get_bootloader() + if image not in bootloader.get_installed_images(): + click.echo('Error: Image does not exist') sys.exit(1) - if get_running_image_type() == IMAGE_TYPE_ABOOT: - image_path = aboot_image_path(image) - aboot_boot_config_set(SWI=image_path) - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - command = 'grub-reboot --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) - run_command(command) - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - if image in images[0]: - run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"') - elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"') - + bootloader.set_next_image(image) # Uninstall image @cli.command() @@ -484,28 +235,30 @@ def set_next_boot(image): @click.argument('image') def remove(image): """ Uninstall image """ - images = get_installed_images() - current = get_current_image() + bootloader = get_bootloader() + images = bootloader.get_installed_images() + current = bootloader.get_current_image() if image not in images: click.echo('Image does not exist') sys.exit(1) if image == current: click.echo('Cannot remove current image') sys.exit(1) - - remove_image(image) + # TODO: check if image is next boot or default boot and fix these + bootloader.remove_image(image) # Retrieve version from binary image file and print to screen @cli.command('binary_version') @click.argument('binary_image_path') def binary_version(binary_image_path): """ Get version from local binary image file """ - binary_version = get_binary_image_version(binary_image_path) - if not binary_version: + bootloader = get_bootloader() + version = bootloader.get_binary_image_version(binary_image_path) + if not version: click.echo("Image file does not exist or is not a valid SONiC image file") sys.exit(1) else: - click.echo(binary_version) + click.echo(version) # Remove installed images which are not current and next @cli.command() @@ -513,14 +266,15 @@ def binary_version(binary_image_path): expose_value=False, prompt='Remove images which are not current and next, continue?') def cleanup(): """ Remove installed images which are not current and next """ - images = get_installed_images() - curimage = get_current_image() - nextimage = get_next_image() + bootloader = get_bootloader() + images = bootloader.get_installed_images() + curimage = bootloader.get_current_image() + nextimage = bootloader.get_next_image() image_removed = 0 for image in images: if image != curimage and image != nextimage: click.echo("Removing image %s" % image) - remove_image(image) + bootloader.remove_image(image) image_removed += 1 if image_removed == 0: