diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 3d7bead8..7bb5e5a3 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -31,6 +31,12 @@ jobs: with: submodules: recursive + - name: Install Flake8 + run: pip install flake8 + + - name: Verify Python source files + run: make verify + - name: Build container images - spdk run: make build SVC="spdk" SPDK_TARGET_ARCH=x86-64-v2 diff --git a/Makefile b/Makefile index 0b44bdda..149f5eab 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,10 @@ include mk/autohelp.mk .DEFAULT_GOAL := all all: setup $(ALL) +verify: ## Run Python source files through flake8 + @echo Verifying Python source files + flake8 control/*.py + setup: ## Configure huge-pages (requires sudo/root password) @echo Setup core dump pattern as /tmp/coredump/core.* diff --git a/control/cephutils.py b/control/cephutils.py index 7452a008..be0be408 100644 --- a/control/cephutils.py +++ b/control/cephutils.py @@ -13,6 +13,7 @@ import json from .utils import GatewayLogger + class CephUtils: """Miscellaneous functions which connect to Ceph """ @@ -27,11 +28,12 @@ def __init__(self, config): self.last_sent = time.time() def execute_ceph_monitor_command(self, cmd): - self.logger.debug(f"Execute monitor command: {cmd}") - with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster: + self.logger.debug(f"Execute monitor command: {cmd}") + with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster: rply = cluster.mon_command(cmd, b'') self.logger.debug(f"Monitor reply: {rply}") return rply + def get_gw_id_owner_ana_group(self, pool, group, anagrp): str = '{' + f'"prefix":"nvme-gw show", "pool":"{pool}", "group":"{group}"' + '}' self.logger.debug(f"nvme-show string: {str}") @@ -45,23 +47,23 @@ def get_gw_id_owner_ana_group(self, pool, group, anagrp): comp_str = f"{anagrp}: ACTIVE" for gateway in data["Created Gateways:"]: if comp_str in gateway["ana states"]: - gw_id = gateway["gw-id"] - self.logger.debug(f"found gw owner of anagrp {anagrp}: gw {gw_id}") - break + gw_id = gateway["gw-id"] + self.logger.debug(f"found gw owner of anagrp {anagrp}: gw {gw_id}") + break return gw_id def is_rebalance_supported(self): - return self.rebalance_supported + return self.rebalance_supported def get_rebalance_ana_group(self): - return self.rebalance_ana_group + return self.rebalance_ana_group def get_number_created_gateways(self, pool, group): now = time.time() - if (now - self.last_sent) < 10 and self.anagroup_list : - self.logger.info(f"Caching response of the monitor: {self.anagroup_list}") - return self.anagroup_list - else : + if (now - self.last_sent) < 10 and self.anagroup_list: + self.logger.info(f"Caching response of the monitor: {self.anagroup_list}") + return self.anagroup_list + else: try: self.anagroup_list = [] self.last_sent = now @@ -76,12 +78,12 @@ def get_number_created_gateways(self, pool, group): self.rebalance_supported = True self.rebalance_ana_group = data.get("rebalance_ana_group", None) self.logger.debug(f"Rebalance ana_group: {self.rebalance_ana_group}") - else : + else: self.rebalance_supported = False pos = conv_str.find("[") if pos != -1: - new_str = conv_str[pos + len("[") :] - pos = new_str.find("]") + new_str = conv_str[pos + len("["):] + pos = new_str.find("]") new_str = new_str[: pos].strip() int_str_list = new_str.split(' ') self.logger.debug(f"new_str : {new_str}") @@ -92,7 +94,7 @@ def get_number_created_gateways(self, pool, group): self.logger.warning("GWs not found") except Exception: - self.logger.exception(f"Failure get number created gateways:") + self.logger.exception("Failure get number created gateways") self.anagroup_list = [] return self.anagroup_list @@ -104,7 +106,7 @@ def fetch_and_display_ceph_version(self): ceph_ver = ceph_ver.removeprefix("ceph version ") self.logger.info(f"Connected to Ceph with version \"{ceph_ver}\"") except Exception: - self.logger.exception(f"Failure fetching Ceph version:") + self.logger.exception("Failure fetching Ceph version") pass def fetch_ceph_fsid(self) -> str: @@ -113,7 +115,7 @@ def fetch_ceph_fsid(self) -> str: with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster: fsid = cluster.get_fsid() except Exception: - self.logger.exception(f"Failure fetching Ceph fsid:") + self.logger.exception("Failure fetching Ceph fsid") return fsid @@ -130,24 +132,24 @@ def pool_exists(self, pool) -> bool: def service_daemon_register(self, cluster, metadata): try: - if cluster: # rados client + if cluster: # rados client daemon_name = metadata['id'] cluster.service_daemon_register("nvmeof", daemon_name, metadata) self.logger.info(f"Registered {daemon_name} to service_map!") except Exception: - self.logger.exception(f"Can't register daemon to service_map!") + self.logger.exception("Can't register daemon to service_map!") def service_daemon_update(self, cluster, status_buffer): try: if cluster and status_buffer: cluster.service_daemon_update(status_buffer) except Exception: - self.logger.exception(f"Can't update daemon status to service_map!") + self.logger.exception("Can't update daemon status to service_map!") def create_image(self, pool_name, image_name, size) -> bool: # Check for pool existence in advance as we don't create it if it's not there if not self.pool_exists(pool_name): - raise rbd.ImageNotFound(f"Pool {pool_name} doesn't exist", errno = errno.ENODEV) + raise rbd.ImageNotFound(f"Pool {pool_name} doesn't exist", errno=errno.ENODEV) image_exists = False try: @@ -160,7 +162,7 @@ def create_image(self, pool_name, image_name, size) -> bool: if image_exists: if image_size != size: raise rbd.ImageExists(f"Image {pool_name}/{image_name} already exists with a size of {image_size} bytes which differs from the requested size of {size} bytes", - errno = errno.EEXIST) + errno=errno.EEXIST) return False # Image exists with an idetical size, there is nothing to do here with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster: @@ -168,29 +170,29 @@ def create_image(self, pool_name, image_name, size) -> bool: rbd_inst = rbd.RBD() try: rbd_inst.create(ioctx, image_name, size) - except rbd.ImageExists as ex: + except rbd.ImageExists: self.logger.exception(f"Image {pool_name}/{image_name} was created just now") raise rbd.ImageExists(f"Image {pool_name}/{image_name} was just created by someone else, please retry", - errno = errno.EAGAIN) - except Exception as ex: + errno=errno.EAGAIN) + except Exception: self.logger.exception(f"Can't create image {pool_name}/{image_name}") - raise ex + raise return True def get_image_size(self, pool_name, image_name) -> int: image_size = 0 if not self.pool_exists(pool_name): - raise rbd.ImageNotFound(f"Pool {pool_name} doesn't exist", errno = errno.ENODEV) + raise rbd.ImageNotFound(f"Pool {pool_name} doesn't exist", errno=errno.ENODEV) with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster: with cluster.open_ioctx(pool_name) as ioctx: - rbd_inst = rbd.RBD() + rbd.RBD() try: with rbd.Image(ioctx, image_name) as img: image_size = img.size() except rbd.ImageNotFound: - raise rbd.ImageNotFound(f"Image {pool_name}/{image_name} doesn't exist", errno = errno.ENODEV) + raise rbd.ImageNotFound(f"Image {pool_name}/{image_name} doesn't exist", errno=errno.ENODEV) except Exception as ex: self.logger.exception(f"Error while trying to get the size of image {pool_name}/{image_name}") raise ex @@ -205,6 +207,6 @@ def get_rbd_exception_details(self, ex): if msg.startswith("["): pos = msg.find("]") if pos >= 0: - msg = msg[pos + 1 :].strip() + msg = msg[pos + 1:].strip() ex_details = (ex.errno, msg) return ex_details diff --git a/control/cli.py b/control/cli.py index 12284baa..8274ee70 100644 --- a/control/cli.py +++ b/control/cli.py @@ -25,16 +25,19 @@ from .utils import GatewayUtils from .utils import GatewayEnumUtils -BASE_GATEWAY_VERSION="1.1.0" +BASE_GATEWAY_VERSION = "1.1.0" + def errprint(msg): - print(msg, file = sys.stderr) + print(msg, file=sys.stderr) + def argument(*name_or_flags, **kwargs): """Helper function to format arguments for argparse command decorator.""" return (list(name_or_flags), kwargs) -def get_enum_keys_list(e_type, include_first = True): + +def get_enum_keys_list(e_type, include_first=True): k_list = [] for k in e_type.keys(): k_list.append(k.lower()) @@ -44,6 +47,7 @@ def get_enum_keys_list(e_type, include_first = True): return k_list + def break_string(s, delim, count): start = 0 for i in range(count): @@ -53,12 +57,13 @@ def break_string(s, delim, count): start = ind + 1 return s[0:ind + 1] + "\n" + s[ind + 1:] + class ErrorCatchingArgumentParser(argparse.ArgumentParser): def __init__(self, *args, **kwargs): self.logger = logging.getLogger(__name__) super(ErrorCatchingArgumentParser, self).__init__(*args, **kwargs) - def exit(self, status = 0, message = None): + def exit(self, status=0, message=None): if status != 0: if message: self.logger.error(message) @@ -73,6 +78,7 @@ def error(self, message): self.logger.error(f"error: {message}") exit(2) + class Parser: """Class to simplify creation of client CLI. @@ -321,9 +327,9 @@ def gw_get_info(self): gw_info = self.stub.get_gateway_info(req) if gw_info.status == 0: base_ver = self.parse_version_string(BASE_GATEWAY_VERSION) - assert base_ver != None + assert base_ver is not None gw_ver = self.parse_version_string(gw_info.version) - if gw_ver == None: + if gw_ver is None: gw_info.status = errno.EINVAL gw_info.bool_status = False gw_info.error_message = f"Can't parse gateway version \"{gw_info.version}\"." @@ -340,7 +346,7 @@ def gw_info(self, args): try: gw_info = self.gw_get_info() except Exception as ex: - gw_info = pb2.gateway_info(status = errno.EINVAL, error_message = f"Failure getting gateway's information:\n{ex}") + gw_info = pb2.gateway_info(status=errno.EINVAL, error_message=f"Failure getting gateway's information:\n{ex}") if args.format == "text" or args.format == "plain": if gw_info.status == 0: @@ -368,19 +374,17 @@ def gw_info(self, args): if gw_info.spdk_version: out_func(f"SPDK version: {gw_info.spdk_version}") if not gw_info.bool_status: - err_func(f"Getting gateway's information returned status mismatch") + err_func("Getting gateway's information returned status mismatch") else: - err_func(f"{gw_info.error_message}") + err_func(gw_info.error_message) if gw_info.bool_status: - err_func(f"Getting gateway's information returned status mismatch") + err_func("Getting gateway's information returned status mismatch") elif args.format == "json" or args.format == "yaml": - gw_info_str = json_format.MessageToJson( - gw_info, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + gw_info_str = json_format.MessageToJson(gw_info, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{gw_info_str}") + out_func(gw_info_str) elif args.format == "yaml": obj = json.loads(gw_info_str) out_func(yaml.dump(obj)) @@ -398,7 +402,7 @@ def gw_version(self, args): try: gw_info = self.gw_get_info() except Exception as ex: - gw_info = pb2.gateway_info(status = errno.EINVAL, error_message = f"Failure getting gateway's version:\n{ex}") + gw_info = pb2.gateway_info(status=errno.EINVAL, error_message=f"Failure getting gateway's version:\n{ex}") if args.format == "text" or args.format == "plain": if gw_info.status == 0: @@ -431,7 +435,7 @@ def gw_get_log_level(self, args): try: ret = self.stub.get_gateway_log_level(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure getting gateway log level:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure getting gateway log level:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -440,12 +444,11 @@ def gw_get_log_level(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - out_log_level = json_format.MessageToJson(ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + out_log_level = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{out_log_level}") + out_func(out_log_level) elif args.format == "yaml": obj = json.loads(out_log_level) out_func(yaml.dump(obj)) @@ -473,7 +476,7 @@ def gw_set_log_level(self, args): try: ret = self.stub.set_gateway_log_level(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure setting gateway log level:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure setting gateway log level:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -481,13 +484,11 @@ def gw_set_log_level(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -501,10 +502,10 @@ def gw_set_log_level(self, args): type=str, choices=get_enum_keys_list(pb2.GwLogLevel, False)), ] gw_actions = [] - gw_actions.append({"name" : "version", "args" : [], "help" : "Display gateway's version"}) - gw_actions.append({"name" : "info", "args" : [], "help" : "Display gateway's information"}) - gw_actions.append({"name" : "get_log_level", "args" : [], "help" : "Get gateway's log level"}) - gw_actions.append({"name" : "set_log_level", "args" : gw_set_log_level_args, "help" : "Set gateway's log level"}) + gw_actions.append({"name": "version", "args": [], "help": "Display gateway's version"}) + gw_actions.append({"name": "info", "args": [], "help": "Display gateway's information"}) + gw_actions.append({"name": "get_log_level", "args": [], "help": "Get gateway's log level"}) + gw_actions.append({"name": "set_log_level", "args": gw_set_log_level_args, "help": "Set gateway's log level"}) gw_choices = get_actions(gw_actions) @cli.cmd(gw_actions) @@ -531,21 +532,19 @@ def spdk_log_level_disable(self, args): try: ret = self.stub.disable_spdk_nvmf_logs(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure disabling SPDK nvmf log flags:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure disabling SPDK nvmf log flags:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: - out_func(f"Disable SPDK nvmf log flags: Successful") + out_func("Disable SPDK nvmf log flags: Successful") else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -565,7 +564,7 @@ def spdk_log_level_get(self, args): try: ret = self.stub.get_spdk_nvmf_log_flags_and_level(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure getting SPDK log levels and nvmf log flags:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure getting SPDK log levels and nvmf log flags:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -579,12 +578,10 @@ def spdk_log_level_get(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - out_log_level = json_format.MessageToJson(ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + out_log_level = json_format.MessageToJson(ret, indent=4, including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{out_log_level}") + out_func(out_log_level) elif args.format == "yaml": obj = json.loads(out_log_level) out_func(yaml.dump(obj)) @@ -616,21 +613,19 @@ def spdk_log_level_set(self, args): try: ret = self.stub.set_spdk_nvmf_logs(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure setting SPDK log levels and nvmf log flags:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure setting SPDK log levels and nvmf log flags:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: - out_func(f"Set SPDK log levels and nvmf log flags: Successful") + out_func("Set SPDK log levels and nvmf log flags: Successful") else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -650,9 +645,9 @@ def spdk_log_level_set(self, args): ] spdk_log_disable_args = [] spdk_log_actions = [] - spdk_log_actions.append({"name" : "get", "args" : spdk_log_get_args, "help" : "Get SPDK log levels and nvmf log flags"}) - spdk_log_actions.append({"name" : "set", "args" : spdk_log_set_args, "help" : "Set SPDK log levels and nvmf log flags"}) - spdk_log_actions.append({"name" : "disable", "args" : spdk_log_disable_args, "help" : "Disable SPDK nvmf log flags"}) + spdk_log_actions.append({"name": "get", "args": spdk_log_get_args, "help": "Get SPDK log levels and nvmf log flags"}) + spdk_log_actions.append({"name": "set", "args": spdk_log_set_args, "help": "Set SPDK log levels and nvmf log flags"}) + spdk_log_actions.append({"name": "disable", "args": spdk_log_disable_args, "help": "Disable SPDK nvmf log flags"}) spdk_log_choices = get_actions(spdk_log_actions) @cli.cmd(spdk_log_actions) @@ -671,28 +666,28 @@ def subsystem_add(self, args): """Create a subsystem""" out_func, err_func = self.get_output_functions(args) - if args.max_namespaces != None and args.max_namespaces <= 0: + if args.max_namespaces is not None and args.max_namespaces <= 0: self.cli.parser.error("--max-namespaces value must be positive") if args.subsystem == GatewayUtils.DISCOVERY_NQN: self.cli.parser.error("Can't add a discovery subsystem") req = pb2.create_subsystem_req(subsystem_nqn=args.subsystem, - serial_number=args.serial_number, - max_namespaces=args.max_namespaces, - enable_ha=True, - no_group_append=args.no_group_append, - dhchap_key=args.dhchap_key) + serial_number=args.serial_number, + max_namespaces=args.max_namespaces, + enable_ha=True, + no_group_append=args.no_group_append, + dhchap_key=args.dhchap_key) try: ret = self.stub.create_subsystem(req) except Exception as ex: - ret = pb2.subsys_status(status = errno.EINVAL, error_message = f"Failure adding subsystem {args.subsystem}:\n{ex}", - nqn = args.subsystem) + ret = pb2.subsys_status(status=errno.EINVAL, error_message=f"Failure adding subsystem {args.subsystem}:\n{ex}", + nqn=args.subsystem) new_nqn = "" try: new_nqn = ret.nqn except Exception: # In case of an old gateway the returned value wouldn't have the nqn field - pass + pass if not new_nqn: new_nqn = args.subsystem @@ -702,13 +697,11 @@ def subsystem_add(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -730,7 +723,7 @@ def subsystem_del(self, args): try: ret = self.stub.delete_subsystem(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure deleting subsystem {args.subsystem}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure deleting subsystem {args.subsystem}:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -738,13 +731,11 @@ def subsystem_del(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -764,17 +755,17 @@ def subsystem_list(self, args): try: subsystems = self.stub.list_subsystems(pb2.list_subsystems_req(subsystem_nqn=args.subsystem, serial_number=args.serial_number)) except Exception as ex: - subsystems = pb2.subsystems_info_cli(status = errno.EINVAL, error_message = f"Failure listing subsystems:\n{ex}") + subsystems = pb2.subsystems_info_cli(status=errno.EINVAL, error_message=f"Failure listing subsystems:\n{ex}") if args.format == "text" or args.format == "plain": if subsystems.status == 0: subsys_list = [] for s in subsystems.subsystems: if args.subsystem and args.subsystem != s.nqn: - err_func("Failure listing subsystem {args.subsystem}: Got subsystem {s.nqn} instead") + err_func(f"Failure listing subsystem {args.subsystem}: Got subsystem {s.nqn} instead") return errno.ENODEV if args.serial_number and args.serial_number != s.serial_number: - err_func("Failure listing subsystem with serial number {args.serial_number}: Got serial number {s.serial_number} instead") + err_func(f"Failure listing subsystem with serial number {args.serial_number}: Got serial number {s.serial_number} instead") return errno.ENODEV ctrls_id = f"{s.min_cntlid}-{s.max_cntlid}" has_dhchap = "Yes" if s.has_dhchap_key else "No" @@ -787,9 +778,9 @@ def subsystem_list(self, args): else: table_format = "plain" subsys_out = tabulate(subsys_list, - headers = ["Subtype", "NQN", "Serial\nNumber", "Controller IDs", - "Namespace\nCount", "Max\nNamespaces", "Allow\nAny Host", "DHCHAP\nKey"], - tablefmt=table_format) + headers=["Subtype", "NQN", "Serial\nNumber", "Controller IDs", + "Namespace\nCount", "Max\nNamespaces", "Allow\nAny Host", "DHCHAP\nKey"], + tablefmt=table_format) prefix = "Subsystems" if args.subsystem: prefix = f"Subsystem {args.subsystem}" @@ -802,17 +793,15 @@ def subsystem_list(self, args): elif args.serial_number: out_func(f"No subsystem with serial number {args.serial_number}") else: - out_func(f"No subsystems") + out_func("No subsystems") else: err_func(f"{subsystems.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - subsystems, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(subsystems, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -833,7 +822,7 @@ def subsystem_change_key(self, args): ret = self.stub.change_subsystem_key(req) except Exception as ex: errmsg = f"Failure changing key for subsystem {args.subsystem}" - ret = pb2.req_status(status = errno.EINVAL, error_message = f"{errmsg}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"{errmsg}:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -841,13 +830,11 @@ def subsystem_change_key(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -878,10 +865,10 @@ def subsystem_change_key(self, args): argument("--dhchap-key", "-k", help="Subsystem DH-HMAC-CHAP key", required=False), ] subsystem_actions = [] - subsystem_actions.append({"name" : "add", "args" : subsys_add_args, "help" : "Create a subsystem"}) - subsystem_actions.append({"name" : "del", "args" : subsys_del_args, "help" : "Delete a subsystem"}) - subsystem_actions.append({"name" : "list", "args" : subsys_list_args, "help" : "List subsystems"}) - subsystem_actions.append({"name" : "change_key", "args" : subsys_change_key_args, "help" : "Change subsystem key"}) + subsystem_actions.append({"name": "add", "args": subsys_add_args, "help": "Create a subsystem"}) + subsystem_actions.append({"name": "del", "args": subsys_del_args, "help": "Delete a subsystem"}) + subsystem_actions.append({"name": "list", "args": subsys_list_args, "help": "List subsystems"}) + subsystem_actions.append({"name": "change_key", "args": subsys_change_key_args, "help": "Change subsystem key"}) subsystem_choices = get_actions(subsystem_actions) @cli.cmd(subsystem_actions) @@ -903,7 +890,7 @@ def listener_add(self, args): out_func, err_func = self.get_output_functions(args) - if args.trsvcid == None: + if args.trsvcid is None: args.trsvcid = 4420 elif args.trsvcid <= 0: self.cli.parser.error("trsvcid value must be positive") @@ -929,8 +916,8 @@ def listener_add(self, args): try: ret = self.stub.create_listener(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, - error_message = f"Failure adding {args.subsystem} listener at {traddr}:{args.trsvcid}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, + error_message=f"Failure adding {args.subsystem} listener at {traddr}:{args.trsvcid}:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -938,13 +925,11 @@ def listener_add(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -986,8 +971,8 @@ def listener_del(self, args): try: ret = self.stub.delete_listener(req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, - error_message = f"Failure deleting listener {traddr}:{args.trsvcid} from {args.subsystem}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, + error_message=f"Failure deleting listener {traddr}:{args.trsvcid} from {args.subsystem}:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -996,13 +981,11 @@ def listener_del(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1021,7 +1004,7 @@ def listener_list(self, args): try: listeners_info = self.stub.list_listeners(pb2.list_listeners_req(subsystem=args.subsystem)) except Exception as ex: - listeners_info = pb2.listeners_info(status = errno.EINVAL, error_message = f"Failure listing listeners:\n{ex}", listeners=[]) + listeners_info = pb2.listeners_info(status=errno.EINVAL, error_message=f"Failure listing listeners:\n{ex}", listeners=[]) if args.format == "text" or args.format == "plain": if listeners_info.status == 0: @@ -1037,21 +1020,19 @@ def listener_list(self, args): else: table_format = "plain" listeners_out = tabulate(listeners_list, - headers = ["Host", "Transport", "Address Family", "Address", "Secure"], - tablefmt=table_format) + headers=["Host", "Transport", "Address Family", "Address", "Secure"], + tablefmt=table_format) out_func(f"Listeners for {args.subsystem}:\n{listeners_out}") else: out_func(f"No listeners for {args.subsystem}") else: err_func(f"{listeners_info.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - listeners_info, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(listeners_info, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1082,9 +1063,9 @@ def listener_list(self, args): listener_list_args = listener_common_args + [ ] listener_actions = [] - listener_actions.append({"name" : "add", "args" : listener_add_args, "help" : "Create a listener"}) - listener_actions.append({"name" : "del", "args" : listener_del_args, "help" : "Delete a listener"}) - listener_actions.append({"name" : "list", "args" : listener_list_args, "help" : "List listeners"}) + listener_actions.append({"name": "add", "args": listener_add_args, "help": "Create a listener"}) + listener_actions.append({"name": "del", "args": listener_del_args, "help": "Delete a listener"}) + listener_actions.append({"name": "list", "args": listener_list_args, "help": "List listeners"}) listener_choices = get_actions(listener_actions) @cli.cmd(listener_actions) @@ -1108,18 +1089,18 @@ def host_add(self, args): if args.psk: if len(args.host_nqn) > 1: - self.cli.parser.error(f"Can't have more than one host NQN when PSK keys are used") + self.cli.parser.error("Can't have more than one host NQN when PSK keys are used") if args.dhchap_key: if len(args.host_nqn) > 1: - self.cli.parser.error(f"Can't have more than one host NQN when DH-HMAC-CHAP keys are used") + self.cli.parser.error("Can't have more than one host NQN when DH-HMAC-CHAP keys are used") for one_host_nqn in args.host_nqn: if one_host_nqn == "*" and args.psk: - self.cli.parser.error(f"PSK key is only allowed for specific hosts") + self.cli.parser.error("PSK key is only allowed for specific hosts") if one_host_nqn == "*" and args.dhchap_key: - self.cli.parser.error(f"DH-HMAC-CHAP key is only allowed for specific hosts") + self.cli.parser.error("DH-HMAC-CHAP key is only allowed for specific hosts") req = pb2.add_host_req(subsystem_nqn=args.subsystem, host_nqn=one_host_nqn, psk=args.psk, dhchap_key=args.dhchap_key) @@ -1130,7 +1111,7 @@ def host_add(self, args): errmsg = f"Failure allowing open host access to {args.subsystem}" else: errmsg = f"Failure adding host {one_host_nqn} to {args.subsystem}" - ret = pb2.req_status(status = errno.EINVAL, error_message = f"{errmsg}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"{errmsg}:\n{ex}") if not rc: rc = ret.status @@ -1144,13 +1125,11 @@ def host_add(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1180,7 +1159,7 @@ def host_del(self, args): errmsg = f"Failure disabling open host access to {args.subsystem}" else: errmsg = f"Failure removing host {one_host_nqn} access to {args.subsystem}" - ret = pb2.req_status(status = errno.EINVAL, error_message = f"{errmsg}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"{errmsg}:\n{ex}") if not rc: rc = ret.status @@ -1194,13 +1173,11 @@ def host_del(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1220,7 +1197,7 @@ def host_change_key(self, args): out_func, err_func = self.get_output_functions(args) if args.host_nqn == "*": - self.cli.parser.error(f"Can't change keys for host NQN '*', please use a real NQN") + self.cli.parser.error("Can't change keys for host NQN '*', please use a real NQN") req = pb2.change_host_key_req(subsystem_nqn=args.subsystem, host_nqn=args.host_nqn, dhchap_key=args.dhchap_key) @@ -1228,21 +1205,19 @@ def host_change_key(self, args): ret = self.stub.change_host_key(req) except Exception as ex: errmsg = f"Failure changing key for host {args.host_nqn} on subsystem {args.subsystem}" - ret = pb2.req_status(status = errno.EINVAL, error_message = f"{errmsg}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"{errmsg}:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: out_func(f"Changing key for host {args.host_nqn} on subsystem {args.subsystem}: Successful") else: - err_func(f"{ret.error_message}") + err_func(ret.error_message) elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1262,7 +1237,7 @@ def host_list(self, args): try: hosts_info = self.stub.list_hosts(pb2.list_hosts_req(subsystem=args.subsystem)) except Exception as ex: - hosts_info = pb2.hosts_info(status = errno.EINVAL, error_message = f"Failure listing hosts:\n{ex}", hosts=[]) + hosts_info = pb2.hosts_info(status=errno.EINVAL, error_message=f"Failure listing hosts:\n{ex}", hosts=[]) if args.format == "text" or args.format == "plain": if hosts_info.status == 0: @@ -1279,21 +1254,19 @@ def host_list(self, args): else: table_format = "plain" hosts_out = tabulate(hosts_list, - headers = ["Host NQN", "Uses PSK", "Uses DHCHAP"], - tablefmt=table_format, stralign="center") + headers=["Host NQN", "Uses PSK", "Uses DHCHAP"], + tablefmt=table_format, stralign="center") out_func(f"Hosts allowed to access {args.subsystem}:\n{hosts_out}") else: out_func(f"No hosts are allowed to access {args.subsystem}") else: err_func(f"{hosts_info.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - hosts_info, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(hosts_info, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1322,10 +1295,10 @@ def host_list(self, args): argument("--dhchap-key", "-k", help="Host DH-HMAC-CHAP key", required=False), ] host_actions = [] - host_actions.append({"name" : "add", "args" : host_add_args, "help" : "Add host access to a subsystem"}) - host_actions.append({"name" : "del", "args" : host_del_args, "help" : "Remove host access from a subsystem"}) - host_actions.append({"name" : "list", "args" : host_list_args, "help" : "List subsystem's host access"}) - host_actions.append({"name" : "change_key", "args" : host_change_key_args, "help" : "Change host's inband authentication keys"}) + host_actions.append({"name": "add", "args": host_add_args, "help": "Add host access to a subsystem"}) + host_actions.append({"name": "del", "args": host_del_args, "help": "Remove host access from a subsystem"}) + host_actions.append({"name": "list", "args": host_list_args, "help": "List subsystem's host access"}) + host_actions.append({"name": "change_key", "args": host_change_key_args, "help": "Change host's inband authentication keys"}) host_choices = get_actions(host_actions) @cli.cmd(host_actions) @@ -1350,8 +1323,8 @@ def connection_list(self, args): try: connections_info = self.stub.list_connections(pb2.list_connections_req(subsystem=args.subsystem)) except Exception as ex: - connections_info = pb2.connections_info(status = errno.EINVAL, - error_message = f"Failure listing hosts:\n{ex}", connections=[]) + connections_info = pb2.connections_info(status=errno.EINVAL, + error_message=f"Failure listing hosts:\n{ex}", connections=[]) if args.format == "text" or args.format == "plain": if connections_info.status == 0: @@ -1363,34 +1336,33 @@ def connection_list(self, args): if conn.connected: conn_secure = "Yes" if conn.secure else "No" connections_list.append([conn.nqn, - f"{conn.traddr}:{conn.trsvcid}" if conn.connected else "", - "Yes" if conn.connected else "No", - conn.qpairs_count if conn.connected else "", - conn.controller_id if conn.connected else "", - conn_secure, - conn_psk, - conn_dhchap]) + f"{conn.traddr}:{conn.trsvcid}" if conn.connected else "", + "Yes" if conn.connected else "No", + conn.qpairs_count if conn.connected else "", + conn.controller_id if conn.connected else "", + conn_secure, + conn_psk, + conn_dhchap]) if len(connections_list) > 0: if args.format == "text": table_format = "fancy_grid" else: table_format = "plain" connections_out = tabulate(connections_list, - headers = ["Host NQN", "Address", "Connected", "QPairs Count", "Controller ID", "Secure", "Uses\nPSK", "Uses\nDHCHAP"], - tablefmt=table_format) + headers=["Host NQN", "Address", "Connected", "QPairs Count", + "Controller ID", "Secure", "Uses\nPSK", "Uses\nDHCHAP"], + tablefmt=table_format) out_func(f"Connections for {args.subsystem}:\n{connections_out}") else: out_func(f"No connections for {args.subsystem}") else: err_func(f"{connections_info.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - connections_info, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(connections_info, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1405,7 +1377,7 @@ def connection_list(self, args): argument("--subsystem", "-n", help="Subsystem NQN", required=True), ] connection_actions = [] - connection_actions.append({"name" : "list", "args" : connection_list_args, "help" : "List active connections"}) + connection_actions.append({"name": "list", "args": connection_list_args, "help": "List active connections"}) connection_choices = get_actions(connection_actions) @cli.cmd(connection_actions) @@ -1421,17 +1393,17 @@ def ns_add(self, args): img_size = 0 out_func, err_func = self.get_output_functions(args) - if args.block_size == None: + if args.block_size is None: args.block_size = 512 if args.block_size <= 0: self.cli.parser.error("block-size value must be positive") if args.load_balancing_group < 0: - self.cli.parser.error("load-balancing-group value must be positive") - if args.nsid != None and args.nsid <= 0: + self.cli.parser.error("load-balancing-group value must be positive") + if args.nsid is not None and args.nsid <= 0: self.cli.parser.error("nsid value must be positive") if args.rbd_create_image: - if args.size == None: + if args.size is None: self.cli.parser.error("--size argument is mandatory for add command when RBD image creation is enabled") img_size = self.get_size_in_bytes(args.size) if img_size <= 0: @@ -1440,20 +1412,20 @@ def ns_add(self, args): if img_size % mib: self.cli.parser.error("size value must be aligned to MiBs") else: - if args.size != None: + if args.size is not None: self.cli.parser.error("--size argument is not allowed for add command when RBD image creation is disabled") req = pb2.namespace_add_req(rbd_pool_name=args.rbd_pool, - rbd_image_name=args.rbd_image, - subsystem_nqn=args.subsystem, - nsid=args.nsid, - block_size=args.block_size, - uuid=args.uuid, - anagrpid=args.load_balancing_group, - create_image=args.rbd_create_image, - size=img_size, - force=args.force, - no_auto_visible=args.no_auto_visible) + rbd_image_name=args.rbd_image, + subsystem_nqn=args.subsystem, + nsid=args.nsid, + block_size=args.block_size, + uuid=args.uuid, + anagrpid=args.load_balancing_group, + create_image=args.rbd_create_image, + size=img_size, + force=args.force, + no_auto_visible=args.no_auto_visible) try: ret = self.stub.namespace_add(req) except Exception as ex: @@ -1461,21 +1433,19 @@ def ns_add(self, args): if args.nsid: nsid_msg = f"using NSID {args.nsid} " errmsg = f"Failure adding namespace {nsid_msg}to {args.subsystem}" - ret = pb2.req_status(status = errno.EINVAL, error_message = f"{errmsg}:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"{errmsg}:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: out_func(f"Adding namespace {ret.nsid} to {args.subsystem}: Successful") else: - err_func(f"{ret.error_message}") + err_func(ret.error_message) elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1496,7 +1466,7 @@ def ns_del(self, args): try: ret = self.stub.namespace_delete(pb2.namespace_delete_req(subsystem_nqn=args.subsystem, nsid=args.nsid)) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure deleting namespace:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure deleting namespace:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -1504,13 +1474,11 @@ def ns_del(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1539,7 +1507,7 @@ def ns_resize(self, args): try: ret = self.stub.namespace_resize(pb2.namespace_resize_req(subsystem_nqn=args.subsystem, nsid=args.nsid, new_size=ns_size)) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure resizing namespace:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure resizing namespace:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -1548,13 +1516,11 @@ def ns_resize(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1613,14 +1579,14 @@ def ns_list(self, args): """Lists namespaces on a subsystem.""" out_func, err_func = self.get_output_functions(args) - if args.nsid != None and args.nsid <= 0: + if args.nsid is not None and args.nsid <= 0: self.cli.parser.error("nsid value must be positive") try: namespaces_info = self.stub.list_namespaces(pb2.list_namespaces_req(subsystem=args.subsystem, nsid=args.nsid, uuid=args.uuid)) except Exception as ex: - namespaces_info = pb2.namespaces_info(status = errno.EINVAL, error_message = f"Failure listing namespaces:\n{ex}") + namespaces_info = pb2.namespaces_info(status=errno.EINVAL, error_message=f"Failure listing namespaces:\n{ex}") if args.format == "text" or args.format == "plain": if namespaces_info.status == 0: @@ -1669,11 +1635,11 @@ def ns_list(self, args): else: table_format = "plain" namespaces_out = tabulate(namespaces_list, - headers = ["NSID", "Bdev\nName", "RBD\nImage", - "Image\nSize", "Block\nSize", "UUID", "Load\nBalancing\nGroup", "Visibility", - "R/W IOs\nper\nsecond", "R/W MBs\nper\nsecond", - "Read MBs\nper\nsecond", "Write MBs\nper\nsecond"], - tablefmt=table_format) + headers=["NSID", "Bdev\nName", "RBD\nImage", + "Image\nSize", "Block\nSize", "UUID", "Load\nBalancing\nGroup", "Visibility", + "R/W IOs\nper\nsecond", "R/W MBs\nper\nsecond", + "Read MBs\nper\nsecond", "Write MBs\nper\nsecond"], + tablefmt=table_format) if args.nsid: prefix = f"Namespace {args.nsid} in" elif args.uuid: @@ -1691,13 +1657,11 @@ def ns_list(self, args): else: err_func(f"{namespaces_info.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - namespaces_info, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(namespaces_info, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1719,7 +1683,7 @@ def ns_get_io_stats(self, args): get_stats_req = pb2.namespace_get_io_stats_req(subsystem_nqn=args.subsystem, nsid=args.nsid) ns_io_stats = self.stub.namespace_get_io_stats(get_stats_req) except Exception as ex: - ns_io_stats = pb2.namespace_io_stats_info(status = errno.EINVAL, error_message = f"Failure getting namespace's IO stats:\n{ex}") + ns_io_stats = pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=f"Failure getting namespace's IO stats:\n{ex}") if ns_io_stats.status == 0: if ns_io_stats.subsystem_nqn != args.subsystem: @@ -1731,32 +1695,32 @@ def ns_get_io_stats(self, args): # only show IO errors in verbose mode if not args.verbose: - io_stats = pb2.namespace_io_stats_info(status = ns_io_stats.status, - error_message = ns_io_stats.error_message, - subsystem_nqn = ns_io_stats.subsystem_nqn, - nsid = ns_io_stats.nsid, - uuid = ns_io_stats.uuid, - bdev_name = ns_io_stats.bdev_name, - tick_rate = ns_io_stats.tick_rate, - ticks = ns_io_stats.ticks, - bytes_read = ns_io_stats.bytes_read, - num_read_ops = ns_io_stats.num_read_ops, - bytes_written = ns_io_stats.bytes_written, - num_write_ops = ns_io_stats.num_write_ops, - bytes_unmapped = ns_io_stats.bytes_unmapped, - num_unmap_ops = ns_io_stats.num_unmap_ops, - read_latency_ticks = ns_io_stats.read_latency_ticks, - max_read_latency_ticks = ns_io_stats.max_read_latency_ticks, - min_read_latency_ticks = ns_io_stats.min_read_latency_ticks, - write_latency_ticks = ns_io_stats.write_latency_ticks, - max_write_latency_ticks = ns_io_stats.max_write_latency_ticks, - min_write_latency_ticks = ns_io_stats.min_write_latency_ticks, - unmap_latency_ticks = ns_io_stats.unmap_latency_ticks, - max_unmap_latency_ticks = ns_io_stats.max_unmap_latency_ticks, - min_unmap_latency_ticks = ns_io_stats.min_unmap_latency_ticks, - copy_latency_ticks = ns_io_stats.copy_latency_ticks, - max_copy_latency_ticks = ns_io_stats.max_copy_latency_ticks, - min_copy_latency_ticks = ns_io_stats.min_copy_latency_ticks) + io_stats = pb2.namespace_io_stats_info(status=ns_io_stats.status, + error_message=ns_io_stats.error_message, + subsystem_nqn=ns_io_stats.subsystem_nqn, + nsid=ns_io_stats.nsid, + uuid=ns_io_stats.uuid, + bdev_name=ns_io_stats.bdev_name, + tick_rate=ns_io_stats.tick_rate, + ticks=ns_io_stats.ticks, + bytes_read=ns_io_stats.bytes_read, + num_read_ops=ns_io_stats.num_read_ops, + bytes_written=ns_io_stats.bytes_written, + num_write_ops=ns_io_stats.num_write_ops, + bytes_unmapped=ns_io_stats.bytes_unmapped, + num_unmap_ops=ns_io_stats.num_unmap_ops, + read_latency_ticks=ns_io_stats.read_latency_ticks, + max_read_latency_ticks=ns_io_stats.max_read_latency_ticks, + min_read_latency_ticks=ns_io_stats.min_read_latency_ticks, + write_latency_ticks=ns_io_stats.write_latency_ticks, + max_write_latency_ticks=ns_io_stats.max_write_latency_ticks, + min_write_latency_ticks=ns_io_stats.min_write_latency_ticks, + unmap_latency_ticks=ns_io_stats.unmap_latency_ticks, + max_unmap_latency_ticks=ns_io_stats.max_unmap_latency_ticks, + min_unmap_latency_ticks=ns_io_stats.min_unmap_latency_ticks, + copy_latency_ticks=ns_io_stats.copy_latency_ticks, + max_copy_latency_ticks=ns_io_stats.max_copy_latency_ticks, + min_copy_latency_ticks=ns_io_stats.min_copy_latency_ticks) ns_io_stats = io_stats if args.format == "text" or args.format == "plain": @@ -1790,18 +1754,16 @@ def ns_get_io_stats(self, args): table_format = "fancy_grid" else: table_format = "plain" - stats_out = tabulate(stats_list, headers = ["Stat", "Value"], tablefmt=table_format) + stats_out = tabulate(stats_list, headers=["Stat", "Value"], tablefmt=table_format) out_func(f"IO statistics for namespace {args.nsid} in {args.subsystem}, bdev {ns_io_stats.bdev_name}:\n{stats_out}") else: err_func(f"{ns_io_stats.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ns_io_stats, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ns_io_stats, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1826,7 +1788,7 @@ def ns_change_load_balancing_group(self, args): nsid=args.nsid, anagrpid=args.load_balancing_group) ret = self.stub.namespace_change_load_balancing_group(change_lb_group_req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure changing namespace load balancing group:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure changing namespace load balancing group:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -1834,13 +1796,11 @@ def ns_change_load_balancing_group(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1863,7 +1823,7 @@ def ns_set_qos(self, args): out_func, err_func = self.get_output_functions(args) if args.nsid <= 0: self.cli.parser.error("nsid value must be positive") - if args.rw_ios_per_second == None and args.rw_megabytes_per_second == None and args.r_megabytes_per_second == None and args.w_megabytes_per_second == None: + if args.rw_ios_per_second is None and args.rw_megabytes_per_second is None and args.r_megabytes_per_second is None and args.w_megabytes_per_second is None: self.cli.parser.error("At least one QOS limit should be set") if args.format == "text" or args.format == "plain": @@ -1875,19 +1835,19 @@ def ns_set_qos(self, args): qos_args["subsystem_nqn"] = args.subsystem if args.nsid: qos_args["nsid"] = args.nsid - if args.rw_ios_per_second != None: + if args.rw_ios_per_second is not None: qos_args["rw_ios_per_second"] = args.rw_ios_per_second - if args.rw_megabytes_per_second != None: + if args.rw_megabytes_per_second is not None: qos_args["rw_mbytes_per_second"] = args.rw_megabytes_per_second - if args.r_megabytes_per_second != None: + if args.r_megabytes_per_second is not None: qos_args["r_mbytes_per_second"] = args.r_megabytes_per_second - if args.w_megabytes_per_second != None: + if args.w_megabytes_per_second is not None: qos_args["w_mbytes_per_second"] = args.w_megabytes_per_second try: set_qos_req = pb2.namespace_set_qos_req(**qos_args) ret = self.stub.namespace_set_qos_limits(set_qos_req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure setting namespaces QOS limits:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure setting namespaces QOS limits:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: @@ -1895,13 +1855,11 @@ def ns_set_qos(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1927,7 +1885,7 @@ def ns_add_host(self, args): add_host_req = pb2.namespace_add_host_req(subsystem_nqn=args.subsystem, nsid=args.nsid, host_nqn=one_host_nqn) ret = self.stub.namespace_add_host(add_host_req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure adding host to namespace:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure adding host to namespace:\n{ex}") if not rc: rc = ret.status @@ -1938,13 +1896,11 @@ def ns_add_host(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -1973,7 +1929,7 @@ def ns_del_host(self, args): del_host_req = pb2.namespace_delete_host_req(subsystem_nqn=args.subsystem, nsid=args.nsid, host_nqn=one_host_nqn) ret = self.stub.namespace_delete_host(del_host_req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure deleting host from namespace:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure deleting host from namespace:\n{ex}") if not rc: rc = ret.status @@ -1984,13 +1940,11 @@ def ns_del_host(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -2026,11 +1980,11 @@ def ns_change_visibility(self, args): try: change_visibility_req = pb2.namespace_change_visibility_req(subsystem_nqn=args.subsystem, - nsid=args.nsid, auto_visible=auto_visible, - force=args.force) + nsid=args.nsid, auto_visible=auto_visible, + force=args.force) ret = self.stub.namespace_change_visibility(change_visibility_req) except Exception as ex: - ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure changing namespace visibility:\n{ex}") + ret = pb2.req_status(status=errno.EINVAL, error_message=f"Failure changing namespace visibility:\n{ex}") if auto_visible: vis_text = "\"visible to all hosts\"" @@ -2042,13 +1996,11 @@ def ns_change_visibility(self, args): else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": - ret_str = json_format.MessageToJson( - ret, - indent=4, - including_default_value_fields=True, - preserving_proto_field_name=True) + ret_str = json_format.MessageToJson(ret, indent=4, + including_default_value_fields=True, + preserving_proto_field_name=True) if args.format == "json": - out_func(f"{ret_str}") + out_func(ret_str) elif args.format == "yaml": obj = json.loads(ret_str) out_func(yaml.dump(obj)) @@ -2114,16 +2066,16 @@ def ns_change_visibility(self, args): argument("--host-nqn", "-t", help="Host NQN list", nargs="+", required=True), ] ns_actions = [] - ns_actions.append({"name" : "add", "args" : ns_add_args_list, "help" : "Create a namespace"}) - ns_actions.append({"name" : "del", "args" : ns_del_args_list, "help" : "Delete a namespace"}) - ns_actions.append({"name" : "resize", "args" : ns_resize_args_list, "help" : "Resize a namespace"}) - ns_actions.append({"name" : "list", "args" : ns_list_args_list, "help" : "List namespaces"}) - ns_actions.append({"name" : "get_io_stats", "args" : ns_get_io_stats_args_list, "help" : "Get I/O stats for a namespace"}) - ns_actions.append({"name" : "change_load_balancing_group", "args" : ns_change_load_balancing_group_args_list, "help" : "Change load balancing group for a namespace"}) - ns_actions.append({"name" : "set_qos", "args" : ns_set_qos_args_list, "help" : "Set QOS limits for a namespace"}) - ns_actions.append({"name" : "add_host", "args" : ns_add_host_args_list, "help" : "Add a host to a namespace"}) - ns_actions.append({"name" : "del_host", "args" : ns_del_host_args_list, "help" : "Delete a host from a namespace"}) - ns_actions.append({"name" : "change_visibility", "args" : ns_change_visibility_args_list, "help" : "Change visibility for a namespace"}) + ns_actions.append({"name": "add", "args": ns_add_args_list, "help": "Create a namespace"}) + ns_actions.append({"name": "del", "args": ns_del_args_list, "help": "Delete a namespace"}) + ns_actions.append({"name": "resize", "args": ns_resize_args_list, "help": "Resize a namespace"}) + ns_actions.append({"name": "list", "args": ns_list_args_list, "help": "List namespaces"}) + ns_actions.append({"name": "get_io_stats", "args": ns_get_io_stats_args_list, "help": "Get I/O stats for a namespace"}) + ns_actions.append({"name": "change_load_balancing_group", "args": ns_change_load_balancing_group_args_list, "help": "Change load balancing group for a namespace"}) + ns_actions.append({"name": "set_qos", "args": ns_set_qos_args_list, "help": "Set QOS limits for a namespace"}) + ns_actions.append({"name": "add_host", "args": ns_add_host_args_list, "help": "Add a host to a namespace"}) + ns_actions.append({"name": "del_host", "args": ns_del_host_args_list, "help": "Delete a host from a namespace"}) + ns_actions.append({"name": "change_visibility", "args": ns_change_visibility_args_list, "help": "Change visibility for a namespace"}) ns_choices = get_actions(ns_actions) @cli.cmd(ns_actions, ["ns"]) @@ -2160,12 +2112,12 @@ def get_subsystems(self, args): subsystems = self.stub.get_subsystems(pb2.get_subsystems_req()) if args.format == "python": return subsystems - subsystems_out = json_format.MessageToJson( - subsystems, - indent=4, including_default_value_fields=True, - preserving_proto_field_name=True) + subsystems_out = json_format.MessageToJson(subsystems, + indent=4, including_default_value_fields=True, + preserving_proto_field_name=True) out_func(f"Get subsystems:\n{subsystems_out}") + def main_common(client, args): client.logger.setLevel(GatewayEnumUtils.get_value_from_key(pb2.GwLogLevel, args.log_level.lower())) server_address = args.server_address @@ -2178,6 +2130,7 @@ def main_common(client, args): rc = call_function(args) return rc + def main_test(args): if not args: return None @@ -2194,6 +2147,7 @@ def main_test(args): return main_common(client, parsed_args) + def main(args=None) -> int: client = GatewayClient() parsed_args = client.cli.parser.parse_args(args) diff --git a/control/config.py b/control/config.py index 947ad87c..8aecb004 100644 --- a/control/config.py +++ b/control/config.py @@ -10,6 +10,7 @@ import configparser import os + class GatewayConfig: """Loads and returns config file settings. @@ -59,12 +60,12 @@ def dump_config_file(self, logger): logger.info(f"Using configuration file {self.filepath}") with open(self.filepath) as f: logger.info( - f"====================================== Configuration file content ======================================") + "====================================== Configuration file content ======================================") for line in f: line = line.rstrip() logger.info(f"{line}") logger.info( - f"========================================================================================================") + "========================================================================================================") self.conffile_logged = True except Exception: pass diff --git a/control/discovery.py b/control/discovery.py index eae1a504..29ec34e7 100644 --- a/control/discovery.py +++ b/control/discovery.py @@ -8,15 +8,12 @@ # import argparse -import grpc import json from .config import GatewayConfig from .state import GatewayState, LocalGatewayState, OmapGatewayState, GatewayStateHandler from .utils import GatewayLogger -from .proto import gateway_pb2 as pb2 -import rados -from typing import Dict, Optional +from typing import Dict import socket import threading @@ -27,8 +24,8 @@ import selectors import os from dataclasses import dataclass, field -from ctypes import Structure, LittleEndianStructure, c_bool, c_ubyte, c_uint8, c_uint16, c_uint32, c_uint64, c_float -from google.protobuf import json_format +from ctypes import LittleEndianStructure, c_ubyte, c_uint8, c_uint16, c_uint32, c_uint64 + # NVMe tcp pdu type class NVME_TCP_PDU(enum.IntFlag): @@ -42,6 +39,7 @@ class NVME_TCP_PDU(enum.IntFlag): C2H_DATA = 0x7 TCP_R2T = 0x9 + # NVMe tcp opcode class NVME_TCP_OPC(enum.IntFlag): DELETE_SQ = 0x0 @@ -61,6 +59,7 @@ class NVME_TCP_OPC(enum.IntFlag): KEEP_ALIVE = 0x18 FABRIC_TYPE = 0x7F + # NVMe tcp fabric command type class NVME_TCP_FCTYPE(enum.IntFlag): PROP_SET = 0x0 @@ -70,6 +69,7 @@ class NVME_TCP_FCTYPE(enum.IntFlag): AUTH_RECV = 0x6 DISCONNECT = 0x8 + # NVMe controller register space offsets class NVME_CTL(enum.IntFlag): CAPABILITIES = 0x0 @@ -85,6 +85,7 @@ class NVMF_SUBTYPE(enum.IntFlag): # NVMe type for NVM subsystem NVME = 0x2 + # NVMe over Fabrics transport types class TRANSPORT_TYPES(enum.IntFlag): RDMA = 0x1 @@ -92,6 +93,7 @@ class TRANSPORT_TYPES(enum.IntFlag): TCP = 0x3 INTRA_HOST = 0xfe + # Address family types class ADRFAM_TYPES(enum.IntFlag): ipv4 = 0x1 @@ -100,6 +102,7 @@ class ADRFAM_TYPES(enum.IntFlag): fc = 0x4 intra_host = 0xfe + # Transport requirement, secure channel requirements # Connections shall be made over a fabric secure channel class NVMF_TREQ_SECURE_CHANNEL(enum.IntFlag): @@ -107,6 +110,7 @@ class NVMF_TREQ_SECURE_CHANNEL(enum.IntFlag): REQUIRED = 0x1 NOT_REQUIRED = 0x2 + # maximum number of connections MAX_CONNECTION = 10240 @@ -116,6 +120,7 @@ class NVMF_TREQ_SECURE_CHANNEL(enum.IntFlag): # Max SQ head pointer SQ_HEAD_MAX = 128 + @dataclass class Connection: """Data used multiple times in each connection.""" @@ -124,15 +129,15 @@ class Connection: allow_listeners: list = field(default_factory=list) log_page: bytearray = field(default_factory=bytearray) recv_buffer: bytearray = field(default_factory=bytearray) - nvmeof_connect_data_hostid: tuple = tuple((c_ubyte *16)()) + nvmeof_connect_data_hostid: tuple = tuple((c_ubyte * 16)()) nvmeof_connect_data_cntlid: int = 0 - nvmeof_connect_data_subnqn: tuple = tuple((c_ubyte *256)()) - nvmeof_connect_data_hostnqn: tuple = tuple((c_ubyte *256)()) + nvmeof_connect_data_subnqn: tuple = tuple((c_ubyte * 256)()) + nvmeof_connect_data_hostnqn: tuple = tuple((c_ubyte * 256)()) sq_head_ptr: int = 0 unsent_log_page_len: int = 0 # NVM ExpressTM Revision 1.4, page 47 # see Figure 78: Offset 14h: CC – Controller Configuration - property_configuration: tuple = tuple((c_ubyte *8)()) + property_configuration: tuple = tuple((c_ubyte * 8)()) shutdown_now: bool = False controller_id: uuid = None gen_cnt: int = 0 @@ -141,6 +146,7 @@ class Connection: keep_alive_time: float = 0.0 keep_alive_timeout: int = 0 + class AutoSerializableStructure(LittleEndianStructure): def __add__(self, other): if isinstance(other, LittleEndianStructure): @@ -150,6 +156,7 @@ def __add__(self, other): else: raise ValueError("error message format.") + class Pdu(AutoSerializableStructure): _fields_ = [ ("type", c_uint8), @@ -159,6 +166,7 @@ class Pdu(AutoSerializableStructure): ("packet_length", c_uint32), ] + class ICResp(AutoSerializableStructure): _fields_ = [ # pdu version format @@ -171,6 +179,7 @@ class ICResp(AutoSerializableStructure): ("maximum_data_capsules", c_uint32) ] + class CqeConnect(AutoSerializableStructure): _fields_ = [ ("controller_id", c_uint16), @@ -182,6 +191,7 @@ class CqeConnect(AutoSerializableStructure): ("status", c_uint16) ] + class CqePropertyGetSet(AutoSerializableStructure): _fields_ = [ # property data for property get, reserved for property set @@ -192,6 +202,7 @@ class CqePropertyGetSet(AutoSerializableStructure): ("status", c_uint16) ] + class NVMeTcpDataPdu(AutoSerializableStructure): _fields_ = [ ("cmd_id", c_uint16), @@ -201,6 +212,7 @@ class NVMeTcpDataPdu(AutoSerializableStructure): ("reserved", c_uint32) ] + class NVMeIdentify(AutoSerializableStructure): _fields_ = [ # skip some fields, include VID, SSVID, SN, MN @@ -251,8 +263,9 @@ class NVMeIdentify(AutoSerializableStructure): ("vendor_specific", c_ubyte * 1024) ] + # for set feature, keep alive and async -class CqeNVMe(AutoSerializableStructure): +class CqeNVMe(AutoSerializableStructure): _fields_ = [ ("dword0", c_uint32), ("dword1", c_uint32), @@ -262,17 +275,19 @@ class CqeNVMe(AutoSerializableStructure): ("status", c_uint16) ] + class NVMeGetLogPage(AutoSerializableStructure): _fields_ = [ # generation counter ("genctr", c_uint64), # number of records ("numrec", c_uint64), - #record format + # record format ("recfmt", c_uint16), ("reserved", c_ubyte * 1006) ] + class DiscoveryLogEntry(AutoSerializableStructure): _fields_ = [ ("trtype", c_uint8), @@ -292,6 +307,7 @@ class DiscoveryLogEntry(AutoSerializableStructure): ("tsas", c_ubyte * 256) ] + class DiscoveryService: """Implements discovery controller. @@ -345,28 +361,28 @@ def __exit__(self, exc_type, exc_value, traceback): for key in self.conn_vals: try: self.selector.unregister(self.conn_vals[key].connection) - except Except as ex: + except Exception: pass try: self.conn_vals[key].connection.close() - except Except as ex: + except Exception: pass self.conn_vals = {} if self.sock: try: self.selector.unregister(self.sock) - except Exception as ex: + except Exception: pass try: self.sock.close() - except Exception as ex: + except Exception: pass self.sock = None try: self.selector.close() - except Exception as ex: + except Exception: pass self.selector = None @@ -379,8 +395,7 @@ def _read_all(self) -> Dict[str, str]: def _get_vals(self, omap_dict, prefix): """Read values from the OMAP dict.""" - return [json.loads(val.decode('utf-8')) for (key, val) in omap_dict.items() - if key.startswith(prefix)] + return [json.loads(val.decode('utf-8')) for (key, val) in omap_dict.items() if key.startswith(prefix)] def reply_initialize(self, conn): """Reply initialize request.""" @@ -407,21 +422,21 @@ def reply_fc_cmd_connect(self, conn, data, cmd_id): self.logger.debug("handle connect request.") self_conn = self.conn_vals[conn.fileno()] - hf_nvmeof_cmd_connect_rsvd1 = struct.unpack_from('<19B', data, 13) + hf_nvmeof_cmd_connect_rsvd1 = struct.unpack_from('<19B', data, 13) # noqa: F841 SIGL1 = struct.unpack_from('> 8) & 0x1F - get_logpage_lsi = nvme_get_logpage_dword11 >> 16 - get_logpage_uid_idx = nvme_get_logpage_dword14 & 0x3F + get_logpage_lsp = (nvme_get_logpage_dword10 >> 8) & 0x1F # noqa: F841 + get_logpage_lsi = nvme_get_logpage_dword11 >> 16 # noqa: F841 + get_logpage_uid_idx = nvme_get_logpage_dword14 & 0x3F # noqa: F841 if get_logpage_lid != 0x70: self.logger.error("request type error, not discovery request.") @@ -747,7 +755,6 @@ def reply_get_log_page(self, conn, data, cmd_id): allow_listeners = self_conn.allow_listeners if len(allow_listeners) == 0: for host in hosts: - a = host["host_nqn"] if host["host_nqn"] == '*' or host["host_nqn"] == hostnqn: for listener in listeners: # TODO: It is better to change nqn in the "listener" @@ -784,23 +791,22 @@ def reply_get_log_page(self, conn, data, cmd_id): log_entry.asqsz = 128 # transport service indentifier str_trsvcid = str(allow_listeners[log_entry_counter]["trsvcid"]) - log_entry.trsvcid = (c_ubyte * 32)(*[c_ubyte(x) for x \ - in str_trsvcid.encode()]) + log_entry.trsvcid = (c_ubyte * 32)(*[c_ubyte(x) for x in str_trsvcid.encode()]) log_entry.trsvcid[len(str_trsvcid):] = \ [c_ubyte(0x20)] * (32 - len(str_trsvcid)) # NVM subsystem qualified name - log_entry.subnqn = (c_ubyte * 256)(*[c_ubyte(x) for x \ - in allow_listeners[log_entry_counter]["nqn"].encode()]) + log_entry.subnqn = (c_ubyte * 256)(*[c_ubyte(x) for x + in allow_listeners[log_entry_counter]["nqn"].encode()]) log_entry.subnqn[len(allow_listeners[log_entry_counter]["nqn"]):] = \ [c_ubyte(0x00)] * (256 - len(allow_listeners[log_entry_counter]["nqn"])) # Transport address - log_entry.traddr = (c_ubyte * 256)(*[c_ubyte(x) for x \ - in allow_listeners[log_entry_counter]["traddr"].encode()]) + log_entry.traddr = (c_ubyte * 256)(*[c_ubyte(x) for x + in allow_listeners[log_entry_counter]["traddr"].encode()]) log_entry.traddr[len(allow_listeners[log_entry_counter]["traddr"]):] = \ [c_ubyte(0x20)] * (256 - len(allow_listeners[log_entry_counter]["traddr"])) - self_conn.log_page[1024*(log_entry_counter+1): \ - 1024*(log_entry_counter+2)] = log_entry + self_conn.log_page[1024 * (log_entry_counter + 1): + 1024 * (log_entry_counter + 2)] = log_entry log_entry_counter += 1 else: self.logger.debug("in the process of sending log pages...") @@ -828,7 +834,7 @@ def reply_get_log_page(self, conn, data, cmd_id): elif nvme_data_len % 1024 == 0: # reply log pages reply = pdu_reply + nvme_tcp_data_pdu + \ - self_conn.log_page[nvme_logpage_offset:nvme_logpage_offset+nvme_data_len] + self_conn.log_page[nvme_logpage_offset:nvme_logpage_offset + nvme_data_len] self_conn.unsent_log_page_len -= nvme_data_len if self_conn.unsent_log_page_len == 0: self_conn.log_page = b'' @@ -850,21 +856,21 @@ def reply_keep_alive(self, conn, data, cmd_id): self.logger.debug("handle keep alive request.") self_conn = self.conn_vals[conn.fileno()] nvme_sgl = struct.unpack_from('<16B', data, 32) - nvme_sgl_desc_type = nvme_sgl[15] & 0xF0 - nvme_sgl_desc_sub_type = nvme_sgl[15] & 0x0F - nvme_keep_alive_dword10 = struct.unpack_from('= \ - self.conn_vals[key].keep_alive_timeout / 1000: + time.time() - self.conn_vals[key].keep_alive_time >= \ + self.conn_vals[key].keep_alive_timeout / 1000: # Adding locks to prevent another thread from processing sudden requests. # Is there a better way? with self.lock: @@ -982,6 +987,7 @@ def reply_fabric_request(self, conn, data, cmd_id): NVME_TCP_FCTYPE.PROP_GET: self.reply_fc_cmd_prop_get, NVME_TCP_FCTYPE.PROP_SET: self.reply_fc_cmd_prop_set } + class UnknownFabricType(BaseException): def __init__(self, fabric_type): super().__init__(f"unsupported opcode: {fabric_type}") @@ -1015,9 +1021,9 @@ def nvmeof_tcp_connection(self, conn, mask): return pdu = struct.unpack_from(' None: self.set_group_id = set_group_id - def group_id(self, request: monitor_pb2.group_id_req, context = None) -> Empty: + def group_id(self, request: monitor_pb2.group_id_req, context=None) -> Empty: self.set_group_id(request.id) return Empty() + class SubsystemHostAuth: MAX_PSK_KEY_NAME_LENGTH = 200 # taken from SPDK SPDK_TLS_PSK_MAX_LEN @@ -162,6 +166,7 @@ def get_subsystem_dhchap_key(self, subsys) -> str: key = self.subsys_dhchap_key[subsys] return key + class NamespaceInfo: def __init__(self, nsid, bdev, uuid, anagrpid, auto_visible): self.nsid = nsid @@ -204,6 +209,7 @@ def host_count(self): def set_ana_group_id(self, anagrpid): self.anagrpid = anagrpid + class NamespacesLocalList: EMPTY_NAMESPACE = NamespaceInfo(None, None, None, 0, False) @@ -225,7 +231,7 @@ def add_namespace(self, nqn, nsid, bdev, uuid, anagrpid, auto_visible): bdev = GatewayService.find_unique_bdev_name(uuid) self.namespace_list[nqn][nsid] = NamespaceInfo(nsid, bdev, uuid, anagrpid, auto_visible) - def find_namespace(self, nqn, nsid, uuid = None) -> NamespaceInfo: + def find_namespace(self, nqn, nsid, uuid=None) -> NamespaceInfo: if nqn not in self.namespace_list: return NamespacesLocalList.EMPTY_NAMESPACE @@ -242,7 +248,7 @@ def find_namespace(self, nqn, nsid, uuid = None) -> NamespaceInfo: return NamespacesLocalList.EMPTY_NAMESPACE - def get_namespace_count(self, nqn, auto_visible = None, min_hosts = 0) -> int: + def get_namespace_count(self, nqn, auto_visible=None, min_hosts=0) -> int: if nqn and nqn not in self.namespace_list: return 0 @@ -282,7 +288,7 @@ def get_all_namespaces_by_ana_group_id(self, anagrpid): if ns.empty(): continue if ns.anagrpid == anagrpid: - ns_list.append((nsid, nqn))#list of tupples + ns_list.append((nsid, nqn)) # list of tupples return ns_list def get_ana_group_id_by_nsid_subsys(self, nqn, nsid): @@ -295,7 +301,6 @@ def get_ana_group_id_by_nsid_subsys(self, nqn, nsid): return 0 return ns.anagrpid - def get_subsys_namespaces_by_ana_group_id(self, nqn, anagrpid): ns_list = [] if nqn not in self.namespace_list: @@ -310,6 +315,7 @@ def get_subsys_namespaces_by_ana_group_id(self, nqn, anagrpid): return ns_list + class GatewayService(pb2_grpc.GatewayServicer): """Implements gateway service interface. @@ -384,7 +390,7 @@ def __init__(self, config: GatewayConfig, gateway_state: GatewayStateHandler, rp self.ana_grp_state[i] = pb2.ana_state.INACCESSIBLE self.cluster_nonce = {} self.bdev_cluster = {} - self.bdev_params = {} + self.bdev_params = {} self.subsystem_nsid_bdev_and_uuid = NamespacesLocalList() self.subsystem_listeners = defaultdict(set) self._init_cluster_context() @@ -393,7 +399,7 @@ def __init__(self, config: GatewayConfig, gateway_state: GatewayStateHandler, rp self.up_and_running = True self.rebalance = Rebalance(self) - def get_directories_for_key_file(self, key_type : str, subsysnqn : str, create_dir : bool = False) -> []: + def get_directories_for_key_file(self, key_type: str, subsysnqn: str, create_dir: bool = False) -> []: tmp_dirs = [] dir_prefix = f"{key_type}_{subsysnqn}_" @@ -419,13 +425,13 @@ def get_directories_for_key_file(self, key_type : str, subsysnqn : str, create_d return None return [tmp_dir_name] - def create_host_key_file(self, key_type : str, subsysnqn : str, hostnqn : str, key_value : str) -> str: + def create_host_key_file(self, key_type: str, subsysnqn: str, hostnqn: str, key_value: str) -> str: assert subsysnqn, "Subsystem NQN can't be empty" assert hostnqn, "Host NQN can't be empty" assert key_type, "Key type can't be empty" assert key_value, "Key value can't be empty" - tmp_dir_names = self.get_directories_for_key_file(key_type, subsysnqn, create_dir = True) + tmp_dir_names = self.get_directories_for_key_file(key_type, subsysnqn, create_dir=True) if not tmp_dir_names: return None @@ -443,7 +449,7 @@ def create_host_key_file(self, key_type : str, subsysnqn : str, hostnqn : str, k with open(file_fd, "wt") as f: f.write(key_value) except Exception: - self.logger.exception(f"Error creating file") + self.logger.exception("Error creating file") try: os.remove(filepath) except Exception: @@ -451,17 +457,17 @@ def create_host_key_file(self, key_type : str, subsysnqn : str, hostnqn : str, k return None return filepath - def create_host_psk_file(self, subsysnqn : str, hostnqn : str, key_value : str) -> str: + def create_host_psk_file(self, subsysnqn: str, hostnqn: str, key_value: str) -> str: return self.create_host_key_file(self.PSK_PREFIX, subsysnqn, hostnqn, key_value) - def create_host_dhchap_file(self, subsysnqn : str, hostnqn : str, key_value : str) -> str: + def create_host_dhchap_file(self, subsysnqn: str, hostnqn: str, key_value: str) -> str: return self.create_host_key_file(self.DHCHAP_PREFIX, subsysnqn, hostnqn, key_value) - def remove_host_key_file(self, key_type : str, subsysnqn : str, hostnqn : str) -> None: + def remove_host_key_file(self, key_type: str, subsysnqn: str, hostnqn: str) -> None: assert key_type, "Key type can't be empty" assert subsysnqn, "Subsystem NQN can't be empty" - tmp_dir_names = self.get_directories_for_key_file(key_type, subsysnqn, create_dir = False) + tmp_dir_names = self.get_directories_for_key_file(key_type, subsysnqn, create_dir=False) if not tmp_dir_names: return @@ -469,7 +475,7 @@ def remove_host_key_file(self, key_type : str, subsysnqn : str, hostnqn : str) - if not hostnqn: for one_tmp_dir in tmp_dir_names: try: - shutil.rmtree(one_tmp_dir, ignore_errors = True) + shutil.rmtree(one_tmp_dir, ignore_errors=True) except Exception: pass return @@ -484,27 +490,27 @@ def remove_host_key_file(self, key_type : str, subsysnqn : str, hostnqn : str) - self.logger.exception(f"Error deleting file {f.name}") pass - def remove_host_psk_file(self, subsysnqn : str, hostnqn : str) -> None: + def remove_host_psk_file(self, subsysnqn: str, hostnqn: str) -> None: self.remove_host_key_file(self.PSK_PREFIX, subsysnqn, hostnqn) - def remove_host_dhchap_file(self, subsysnqn : str, hostnqn : str) -> None: + def remove_host_dhchap_file(self, subsysnqn: str, hostnqn: str) -> None: self.remove_host_key_file(self.DHCHAP_PREFIX, subsysnqn, hostnqn) - def remove_all_host_key_files(self, subsysnqn : str, hostnqn : str) -> None: + def remove_all_host_key_files(self, subsysnqn: str, hostnqn: str) -> None: self.remove_host_psk_file(subsysnqn, hostnqn) self.remove_host_dhchap_file(subsysnqn, hostnqn) - def remove_all_subsystem_key_files(self, subsysnqn : str) -> None: + def remove_all_subsystem_key_files(self, subsysnqn: str) -> None: self.remove_all_host_key_files(subsysnqn, None) @staticmethod - def construct_key_name_for_keyring(subsysnqn : str, hostnqn : str, prefix : str = None) -> str: + def construct_key_name_for_keyring(subsysnqn: str, hostnqn: str, prefix: str = None) -> str: key_name = hashlib.sha256(subsysnqn.encode()).hexdigest() + "_" + hashlib.sha256(hostnqn.encode()).hexdigest() if prefix: key_name = prefix + "_" + key_name return key_name - def remove_key_from_keyring(self, key_type : str, subsysnqn : str, hostnqn : str) -> None: + def remove_key_from_keyring(self, key_type: str, subsysnqn: str, hostnqn: str) -> None: assert self.rpc_lock.locked(), "RPC is unlocked when calling remove_key_from_keyring()" key_name = GatewayService.construct_key_name_for_keyring(subsysnqn, hostnqn, key_type) try: @@ -512,21 +518,21 @@ def remove_key_from_keyring(self, key_type : str, subsysnqn : str, hostnqn : str except Exception: pass - def remove_psk_key_from_keyring(self, subsysnqn : str, hostnqn : str) -> None: + def remove_psk_key_from_keyring(self, subsysnqn: str, hostnqn: str) -> None: self.remove_key_from_keyring(self.PSK_PREFIX, subsysnqn, hostnqn) - def remove_dhchap_key_from_keyring(self, subsysnqn : str, hostnqn : str) -> None: + def remove_dhchap_key_from_keyring(self, subsysnqn: str, hostnqn: str) -> None: self.remove_key_from_keyring(self.DHCHAP_PREFIX, subsysnqn, hostnqn) - def remove_dhchap_controller_key_from_keyring(self, subsysnqn : str, hostnqn : str) -> None: + def remove_dhchap_controller_key_from_keyring(self, subsysnqn: str, hostnqn: str) -> None: self.remove_key_from_keyring(self.DHCHAP_CONTROLLER_PREFIX, subsysnqn, hostnqn) - def remove_all_host_keys_from_keyring(self, subsysnqn : str, hostnqn : str) -> None: + def remove_all_host_keys_from_keyring(self, subsysnqn: str, hostnqn: str) -> None: self.remove_psk_key_from_keyring(subsysnqn, hostnqn) self.remove_dhchap_key_from_keyring(subsysnqn, hostnqn) self.remove_dhchap_controller_key_from_keyring(subsysnqn, hostnqn) - def remove_all_subsystem_keys_from_keyring(self, subsysnqn : str) -> None: + def remove_all_subsystem_keys_from_keyring(self, subsysnqn: str) -> None: assert self.rpc_lock.locked(), "RPC is unlocked when calling remove_all_subsystem_keys_from_keyring()" try: key_list = rpc_keyring.keyring_get_keys(self.spdk_rpc_client) @@ -544,8 +550,14 @@ def remove_all_subsystem_keys_from_keyring(self, subsysnqn : str) -> None: continue if not key_name or not key_path: continue - if (key_path.startswith(f"{self.KEYS_DIR}/{self.PSK_PREFIX}_{subsysnqn}_") or - key_path.startswith(f"{self.KEYS_DIR}/{self.DHCHAP_PREFIX}_{subsysnqn}_")): + + should_remove = False + if key_path.startswith(f"{self.KEYS_DIR}/{self.PSK_PREFIX}_{subsysnqn}_"): + should_remove = True + elif key_path.startswith(f"{self.KEYS_DIR}/{self.DHCHAP_PREFIX}_{subsysnqn}_"): + should_remove = True + + if should_remove: try: rpc_keyring.keyring_file_remove_key(self.spdk_rpc_client, key_name) except Exception: @@ -567,19 +579,19 @@ def parse_json_exeption(self, ex): try: resp_index = ex.message.find(json_error_text) if resp_index >= 0: - resp_str = ex.message[resp_index + len(json_error_text) :] + resp_str = ex.message[resp_index + len(json_error_text):] resp_index = resp_str.find("response:") if resp_index >= 0: - resp_str = resp_str[resp_index + len("response:") :] + resp_str = resp_str[resp_index + len("response:"):] resp = json.loads(resp_str) except Exception: - self.logger.exception(f"Got exception parsing JSON exception") + self.logger.exception("Got exception parsing JSON exception") pass if resp: if resp["code"] < 0: resp["code"] = -resp["code"] else: - resp={} + resp = {} if "timeout" in ex.message.lower(): resp["code"] = errno.ETIMEDOUT else: @@ -625,13 +637,13 @@ def _put_cluster(self, name: str) -> None: if self.clusters[anagrp][name] == 0: ret = rpc_bdev.bdev_rbd_unregister_cluster( self.spdk_rpc_client, - name = name + name=name ) self.logger.info(f"Free cluster {name=} {ret=}") assert ret self.clusters[anagrp].pop(name) - else : - self.logger.info(f"put_cluster {name=} number bdevs: {self.clusters[anagrp][name]}") + else: + self.logger.info(f"put_cluster {name=} number bdevs: {self.clusters[anagrp][name]}") return assert False, f"Cluster {name} is not found" # we should find the cluster in our state @@ -650,9 +662,9 @@ def _alloc_cluster(self, anagrp: int) -> str: name = self._alloc_cluster_name(anagrp) nonce = rpc_bdev.bdev_rbd_register_cluster( self.spdk_rpc_client, - name = name, - user_id = self.rados_id, - core_mask = self.librbd_core_mask, + name=name, + user_id=self.rados_id, + core_mask=self.librbd_core_mask, ) with self.shared_state_lock: self.logger.info(f"Allocated cluster {name=} {nonce=} {anagrp=}") @@ -680,7 +692,8 @@ def execute_grpc_function(self, func, request, context): return self.omap_lock.execute_omap_locking_function(self._grpc_function_with_lock, func, request, context) - def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, block_size, create_image, rbd_image_size, context, peer_msg = ""): + def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, + block_size, create_image, rbd_image_size, context, peer_msg=""): """Creates a bdev from an RBD image.""" if create_image: @@ -694,7 +707,7 @@ def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, bl if block_size == 0: return BdevStatus(status=errno.EINVAL, - error_message=f"Failure creating bdev {name}: block size can't be zero") + error_message=f"Failure creating bdev {name}: block size can't be zero") if create_image: if rbd_image_size <= 0: @@ -706,7 +719,7 @@ def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, bl rc = self.ceph_utils.pool_exists(rbd_pool_name) if not rc: return BdevStatus(status=errno.ENODEV, - error_message=f"Failure creating bdev {name}: RBD pool {rbd_pool_name} doesn't exist") + error_message=f"Failure creating bdev {name}: RBD pool {rbd_pool_name} doesn't exist") try: rc = self.ceph_utils.create_image(rbd_pool_name, rbd_image_name, rbd_image_size) @@ -730,7 +743,7 @@ def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, bl return BdevStatus(status=errcode, error_message=f"Failure creating bdev {name}: {errmsg}") try: - cluster_name=self._get_cluster(anagrp) + cluster_name = self._get_cluster(anagrp) bdev_name = rpc_bdev.bdev_rbd_create( self.spdk_rpc_client, name=name, @@ -742,7 +755,8 @@ def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, bl ) with self.shared_state_lock: self.bdev_cluster[name] = cluster_name - self.bdev_params[name] = {'uuid':uuid, 'pool_name':rbd_pool_name, 'image_name':rbd_image_name, 'image_size':rbd_image_size, 'block_size': block_size} + self.bdev_params[name] = {'uuid': uuid, 'pool_name': rbd_pool_name, 'image_name': rbd_image_name, + 'image_size': rbd_image_size, 'block_size': block_size} self.logger.debug(f"bdev_rbd_create: {bdev_name}, cluster_name {cluster_name}") except Exception as ex: @@ -767,7 +781,7 @@ def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, bl return BdevStatus(status=0, error_message=os.strerror(0), bdev_name=name) - def resize_bdev(self, bdev_name, new_size, peer_msg = ""): + def resize_bdev(self, bdev_name, new_size, peer_msg=""): """Resizes a bdev.""" self.logger.info(f"Received request to resize bdev {bdev_name} to {new_size} MiB{peer_msg}") @@ -913,7 +927,7 @@ def get_peer_message(self, context) -> str: addr_fam = "" return f", client address: {addr_fam} {addr}" except Exception: - self.logger.exception(f"Got exception trying to get peer's address") + self.logger.exception("Got exception trying to get peer's address") return "" @@ -928,13 +942,13 @@ def create_subsystem_safe(self, request, context): if not request.enable_ha: errmsg = f"{create_subsystem_error_prefix}: HA must be enabled for subsystems" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status = errno.EINVAL, error_message = errmsg, nqn = request.subsystem_nqn) + self.logger.error(errmsg) + return pb2.subsys_status(status=errno.EINVAL, error_message=errmsg, nqn=request.subsystem_nqn) if not request.subsystem_nqn: - errmsg = f"Failure creating subsystem, missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status = errno.EINVAL, error_message = errmsg, nqn = request.subsystem_nqn) + errmsg = "Failure creating subsystem, missing subsystem NQN" + self.logger.error(errmsg) + return pb2.subsys_status(status=errno.EINVAL, error_message=errmsg, nqn=request.subsystem_nqn) if not request.max_namespaces: request.max_namespaces = self.max_namespaces_per_subsystem @@ -947,29 +961,29 @@ def create_subsystem_safe(self, request, context): errmsg = "" if not GatewayState.is_key_element_valid(request.subsystem_nqn): errmsg = f"{create_subsystem_error_prefix}: Invalid NQN \"{request.subsystem_nqn}\", contains invalid characters" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status = errno.EINVAL, error_message = errmsg, nqn = request.subsystem_nqn) + self.logger.error(errmsg) + return pb2.subsys_status(status=errno.EINVAL, error_message=errmsg, nqn=request.subsystem_nqn) if self.verify_nqns: rc = GatewayUtils.is_valid_nqn(request.subsystem_nqn) if rc[0] != 0: errmsg = f"{create_subsystem_error_prefix}: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status = rc[0], error_message = errmsg, nqn = request.subsystem_nqn) + self.logger.error(errmsg) + return pb2.subsys_status(status=rc[0], error_message=errmsg, nqn=request.subsystem_nqn) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): errmsg = f"{create_subsystem_error_prefix}: Can't create a discovery subsystem" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status = errno.EINVAL, error_message = errmsg, nqn = request.subsystem_nqn) + self.logger.error(errmsg) + return pb2.subsys_status(status=errno.EINVAL, error_message=errmsg, nqn=request.subsystem_nqn) if len(self.subsys_max_ns) >= self.max_subsystems: errmsg = f"{create_subsystem_error_prefix}: Maximal number of subsystems ({self.max_subsystems}) has already been reached" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status = errno.E2BIG, error_message = errmsg, nqn = request.subsystem_nqn) + self.logger.error(errmsg) + return pb2.subsys_status(status=errno.E2BIG, error_message=errmsg, nqn=request.subsystem_nqn) if context: if request.no_group_append or not self.gateway_group: - self.logger.info(f"Subsystem NQN will not be changed") + self.logger.info("Subsystem NQN will not be changed") else: group_name_to_use = self.gateway_group.replace(GatewayState.OMAP_KEY_DELIMITER, "-") request.subsystem_nqn += f".{group_name_to_use}" @@ -994,15 +1008,15 @@ def create_subsystem_safe(self, request, context): subsys_using_serial = None subsys_already_exists = self.subsystem_already_exists(context, request.subsystem_nqn) if subsys_already_exists: - errmsg = f"Subsystem already exists" + errmsg = "Subsystem already exists" else: subsys_using_serial = self.serial_number_already_used(context, request.serial_number) if subsys_using_serial: errmsg = f"Serial number {request.serial_number} is already used by subsystem {subsys_using_serial}" if subsys_already_exists or subsys_using_serial: errmsg = f"{create_subsystem_error_prefix}: {errmsg}" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status=errno.EEXIST, error_message=errmsg, nqn = request.subsystem_nqn) + self.logger.error(errmsg) + return pb2.subsys_status(status=errno.EEXIST, error_message=errmsg, nqn=request.subsystem_nqn) ret = rpc_nvmf.nvmf_create_subsystem( self.spdk_rpc_client, nqn=request.subsystem_nqn, @@ -1011,7 +1025,7 @@ def create_subsystem_safe(self, request, context): max_namespaces=request.max_namespaces, min_cntlid=min_cntlid, max_cntlid=max_cntlid, - ana_reporting = True, + ana_reporting=True, ) self.subsys_max_ns[request.subsystem_nqn] = request.max_namespaces if request.dhchap_key: @@ -1025,12 +1039,12 @@ def create_subsystem_safe(self, request, context): if resp: status = resp["code"] errmsg = f"{create_subsystem_error_prefix}: {resp['message']}" - return pb2.subsys_status(status=status, error_message=errmsg, nqn = request.subsystem_nqn) + return pb2.subsys_status(status=status, error_message=errmsg, nqn=request.subsystem_nqn) # Just in case SPDK failed with no exception if not ret: self.logger.error(create_subsystem_error_prefix) - return pb2.subsys_status(status=errno.EINVAL, error_message=create_subsystem_error_prefix, nqn = request.subsystem_nqn) + return pb2.subsys_status(status=errno.EINVAL, error_message=create_subsystem_error_prefix, nqn=request.subsystem_nqn) if context: # Update gateway state @@ -1042,9 +1056,9 @@ def create_subsystem_safe(self, request, context): errmsg = f"Error persisting subsystem {request.subsystem_nqn}" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" - return pb2.subsys_status(status=errno.EINVAL, error_message=errmsg, nqn = request.subsystem_nqn) + return pb2.subsys_status(status=errno.EINVAL, error_message=errmsg, nqn=request.subsystem_nqn) - return pb2.subsys_status(status=0, error_message=os.strerror(0), nqn = request.subsystem_nqn) + return pb2.subsys_status(status=0, error_message=os.strerror(0), nqn=request.subsystem_nqn) def create_subsystem(self, request, context=None): return self.execute_grpc_function(self.create_subsystem_safe, request, context) @@ -1130,7 +1144,7 @@ def delete_subsystem_safe(self, request, context): # Just in case SPDK failed with no exception if not ret: self.logger.error(delete_subsystem_error_prefix) - self.remove_subsystem_from_state( request.subsystem_nqn, context) + self.remove_subsystem_from_state(request.subsystem_nqn, context) return pb2.req_status(status=errno.EINVAL, error_message=delete_subsystem_error_prefix) return self.remove_subsystem_from_state(request.subsystem_nqn, context) @@ -1143,21 +1157,21 @@ def delete_subsystem(self, request, context=None): self.logger.info(f"Received request to delete subsystem {request.subsystem_nqn}, context: {context}{peer_msg}") if not request.subsystem_nqn: - errmsg = f"Failure deleting subsystem, missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + errmsg = "Failure deleting subsystem, missing subsystem NQN" + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if self.verify_nqns: rc = GatewayUtils.is_valid_nqn(request.subsystem_nqn) if rc[0] != 0: errmsg = f"{delete_subsystem_error_prefix}: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): errmsg = f"{delete_subsystem_error_prefix}: Can't delete a discovery subsystem" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) ns_list = [] if context: @@ -1215,9 +1229,9 @@ def create_namespace(self, subsystem_nqn, bdev_name, nsid, anagrpid, uuid, auto_ nsid_msg = f" using NSID {nsid}" if not subsystem_nqn: - errmsg = f"Failure adding namespace, missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.nsid_status(status=errno.EINVAL, error_message = errmsg) + errmsg = "Failure adding namespace, missing subsystem NQN" + self.logger.error(errmsg) + return pb2.nsid_status(status=errno.EINVAL, error_message=errmsg) add_namespace_error_prefix = f"Failure adding namespace{nsid_msg} to {subsystem_nqn}" @@ -1240,30 +1254,30 @@ def create_namespace(self, subsystem_nqn, bdev_name, nsid, anagrpid, uuid, auto_ return pb2.nsid_status(status=errno.EINVAL, error_message=errmsg) if not auto_visible and self.subsystem_nsid_bdev_and_uuid.get_namespace_count(subsystem_nqn, - False, 0) >= self.max_namespaces_with_netmask: + False, 0) >= self.max_namespaces_with_netmask: errmsg = f"{add_namespace_error_prefix}: Maximal number of namespaces which are only visible to selected hosts ({self.max_namespaces_with_netmask}) has already been reached" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.E2BIG, error_message=errmsg) if nsid and nsid > self.subsys_max_ns[subsystem_nqn]: errmsg = f"{add_namespace_error_prefix}: Requested NSID {nsid} is bigger than the maximal one ({self.subsys_max_ns[subsystem_nqn]})" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.E2BIG, error_message=errmsg) if not nsid and self.subsystem_nsid_bdev_and_uuid.get_namespace_count(subsystem_nqn, - None, 0) >= self.subsys_max_ns[subsystem_nqn]: + None, 0) >= self.subsys_max_ns[subsystem_nqn]: errmsg = f"{add_namespace_error_prefix}: Subsystem's maximal number of namespaces ({self.subsys_max_ns[subsystem_nqn]}) has already been reached" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.E2BIG, error_message=errmsg) if self.subsystem_nsid_bdev_and_uuid.get_namespace_count(None, None, 0) >= self.max_namespaces: errmsg = f"{add_namespace_error_prefix}: Maximal number of namespaces ({self.max_namespaces}) has already been reached" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.E2BIG, error_message=errmsg) if self.subsystem_nsid_bdev_and_uuid.get_namespace_count(subsystem_nqn, None, 0) >= self.subsys_max_ns[subsystem_nqn]: errmsg = f"{add_namespace_error_prefix}: Maximal number of namespaces per subsystem ({self.subsys_max_ns[subsystem_nqn]}) has already been reached" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.E2BIG, error_message=errmsg) try: @@ -1321,8 +1335,8 @@ def set_ana_state_safe(self, ana_info: pb2.ana_info, context=None): # fill the static gateway dictionary per nqn and grp_id nqn = nas.nqn for gs in nas.states: - self.ana_map[nqn][gs.grp_id] = gs.state - self.ana_grp_state[gs.grp_id] = gs.state + self.ana_map[nqn][gs.grp_id] = gs.state + self.ana_grp_state[gs.grp_id] = gs.state # If this is not set the subsystem was not created yet if nqn not in self.subsys_max_ns: @@ -1371,7 +1385,7 @@ def set_ana_state_safe(self, ana_info: pb2.ana_info, context=None): adrfam=adrfam, ana_state=ana_state, anagrpid=grp_id) - if ana_state == "inaccessible" : + if ana_state == "inaccessible": inaccessible_ana_groups[grp_id] = True self.logger.debug(f"set_ana_state nvmf_subsystem_listener_set_ana_state response {ret=}") if not ret: @@ -1384,10 +1398,10 @@ def set_ana_state_safe(self, ana_info: pb2.ana_info, context=None): return pb2.req_status() return pb2.req_status(status=True) - def choose_anagrpid_for_namespace(self, nsid) ->int: + def choose_anagrpid_for_namespace(self, nsid) -> int: grps_list = self.ceph_utils.get_number_created_gateways(self.gateway_pool, self.gateway_group) for ana_grp in grps_list: - if self.ana_grp_ns_load[ana_grp] == 0: # still no namespaces in this ana-group - probably the new GW added + if self.ana_grp_ns_load[ana_grp] == 0: # still no namespaces in this ana-group - probably the new GW added self.logger.info(f"New GW created: chosen ana group {ana_grp} for ns {nsid} ") return ana_grp min_load = 2000 @@ -1395,7 +1409,7 @@ def choose_anagrpid_for_namespace(self, nsid) ->int: for ana_grp in self.ana_grp_ns_load: if ana_grp in grps_list: self.logger.info(f" ana group {ana_grp} load = {self.ana_grp_ns_load[ana_grp]} ") - if self.ana_grp_ns_load[ana_grp] <= min_load: + if self.ana_grp_ns_load[ana_grp] <= min_load: min_load = self.ana_grp_ns_load[ana_grp] chosen_ana_group = ana_grp self.logger.info(f" ana group {ana_grp} load = {self.ana_grp_ns_load[ana_grp]} set as min {min_load} ") @@ -1406,9 +1420,9 @@ def namespace_add_safe(self, request, context): """Adds a namespace to a subsystem.""" if not request.subsystem_nqn: - errmsg = f"Failure adding namespace, missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.nsid_status(status=errno.EINVAL, error_message = errmsg) + errmsg = "Failure adding namespace, missing subsystem NQN" + self.logger.error(errmsg) + return pb2.nsid_status(status=errno.EINVAL, error_message=errmsg) grps_list = [] anagrp = 0 @@ -1432,14 +1446,14 @@ def namespace_add_safe(self, request, context): ns = self.subsystem_nsid_bdev_and_uuid.find_namespace(request.subsystem_nqn, request.nsid) if not ns.empty(): errmsg = f"Failure adding namespace, NSID {request.nsid} is already in use" - self.logger.error(f"{errmsg}") - return pb2.nsid_status(status=errno.EEXIST, error_message = errmsg) + self.logger.error(errmsg) + return pb2.nsid_status(status=errno.EEXIST, error_message=errmsg) ns = self.subsystem_nsid_bdev_and_uuid.find_namespace(request.subsystem_nqn, None, request.uuid) if not ns.empty(): errmsg = f"Failure adding namespace, UUID {request.uuid} is already in use" - self.logger.error(f"{errmsg}") - return pb2.nsid_status(status=errno.EEXIST, error_message = errmsg) + self.logger.error(errmsg) + return pb2.nsid_status(status=errno.EEXIST, error_message=errmsg) omap_lock = self.omap_lock.get_omap_lock_to_use(context) with omap_lock: @@ -1458,7 +1472,7 @@ def namespace_add_safe(self, request, context): create_image = request.create_image if not context: create_image = False - else: # new namespace + else: # new namespace # If an explicit load balancing group was passed, make sure it exists if request.anagrpid != 0: if request.anagrpid not in grps_list: @@ -1467,7 +1481,7 @@ def namespace_add_safe(self, request, context): self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) else: - request.anagrpid = anagrp + request.anagrpid = anagrp anagrp = request.anagrpid ret_bdev = self.create_bdev(anagrp, bdev_name, request.uuid, request.rbd_pool_name, @@ -1478,9 +1492,9 @@ def namespace_add_safe(self, request, context): # Delete the bdev unless there was one already there, just to be on the safe side if ret_bdev.status != errno.EEXIST: ns_bdev = self.get_bdev_info(bdev_name) - if ns_bdev != None: + if ns_bdev is not None: try: - ret_del = self.delete_bdev(bdev_name, peer_msg = peer_msg) + ret_del = self.delete_bdev(bdev_name, peer_msg=peer_msg) self.logger.debug(f"delete_bdev({bdev_name}): {ret_del.status}") except AssertionError: self.logger.exception(f"Got an assert while trying to delete bdev {bdev_name}") @@ -1500,7 +1514,7 @@ def namespace_add_safe(self, request, context): if ret_ns.status != 0: try: - ret_del = self.delete_bdev(bdev_name, peer_msg = peer_msg) + ret_del = self.delete_bdev(bdev_name, peer_msg=peer_msg) if ret_del.status != 0: self.logger.warning(f"Failure {ret_del.status} deleting bdev {bdev_name}: {ret_del.error_message}") except AssertionError: @@ -1541,8 +1555,8 @@ def namespace_change_load_balancing_group_safe(self, request, context): self.logger.info(f"Received {auto_lb_msg} request to change load balancing group for namespace with NSID {request.nsid} in {request.subsystem_nqn} to {request.anagrpid}, context: {context}{peer_msg}") if not request.subsystem_nqn: - errmsg = f"Failure changing load balancing group for namespace, missing subsystem NQN" - self.logger.error(f"{errmsg}") + errmsg = "Failure changing load balancing group for namespace, missing subsystem NQN" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.nsid: @@ -1550,7 +1564,7 @@ def namespace_change_load_balancing_group_safe(self, request, context): self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) - #below checks are legal only if command is initiated by local cli or is sent from the local rebalance logic. + # below checks are legal only if command is initiated by local cli or is sent from the local rebalance logic. if context: grps_list = self.ceph_utils.get_number_created_gateways(self.gateway_pool, self.gateway_group) if request.anagrpid not in grps_list: @@ -1592,7 +1606,6 @@ def namespace_change_load_balancing_group_safe(self, request, context): nqn=request.subsystem_nqn, nsid=request.nsid, anagrpid=request.anagrpid, - #transit_anagrpid=0, #temporary for spdk 24.05 ) self.logger.debug(f"nvmf_subsystem_set_ns_ana_group: {ret}") except Exception as ex: @@ -1618,7 +1631,7 @@ def namespace_change_load_balancing_group_safe(self, request, context): else: self.ana_grp_subs_load[request.anagrpid][request.subsystem_nqn] = 1 self.logger.debug(f"updated load in grp {request.anagrpid} = {self.ana_grp_ns_load[request.anagrpid]} ") - #here update find_ret.set_ana_group_id(request.anagrpid) + # here update find_ret.set_ana_group_id(request.anagrpid) if not find_ret.empty(): find_ret.set_ana_group_id(request.anagrpid) @@ -1671,8 +1684,8 @@ def namespace_change_visibility_safe(self, request, context): self.logger.info(f"Received request to change the visibility of namespace {request.nsid} in {request.subsystem_nqn} to {vis_txt}, force: {request.force}, context: {context}{peer_msg}") if not request.subsystem_nqn: - errmsg = f"Failure changing visibility for namespace, missing subsystem NQN" - self.logger.error(f"{errmsg}") + errmsg = "Failure changing visibility for namespace, missing subsystem NQN" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.nsid: @@ -1823,8 +1836,8 @@ def remove_namespace(self, subsystem_nqn, nsid, context): self.logger.info(f"Received request to remove namespace {nsid} from {subsystem_nqn}{peer_msg}") if GatewayUtils.is_discovery_nqn(subsystem_nqn): - errmsg=f"{namespace_failure_prefix}: Can't remove a namespace from a discovery subsystem" - self.logger.error(f"{errmsg}") + errmsg = f"{namespace_failure_prefix}: Can't remove a namespace from a discovery subsystem" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) try: @@ -1874,7 +1887,7 @@ def list_namespaces(self, request, context=None): """List namespaces.""" peer_msg = self.get_peer_message(context) - if request.nsid == None or request.nsid == 0: + if request.nsid is None or request.nsid == 0: if request.uuid: nsid_msg = f"namespace with UUID {request.uuid}" else: @@ -1887,8 +1900,8 @@ def list_namespaces(self, request, context=None): self.logger.info(f"Received request to list {nsid_msg} for {request.subsystem}, context: {context}{peer_msg}") if not request.subsystem: - errmsg = f"Failure listing namespaces, missing subsystem NQN" - self.logger.error(f"{errmsg}") + errmsg = "Failure listing namespaces, missing subsystem NQN" + self.logger.error(errmsg) return pb2.namespaces_info(status=errno.EINVAL, error_message=errmsg, subsystem_nqn=request.subsystem, namespaces=[]) with self.rpc_lock: @@ -1896,7 +1909,7 @@ def list_namespaces(self, request, context=None): ret = rpc_nvmf.nvmf_get_subsystems(self.spdk_rpc_client, nqn=request.subsystem) self.logger.debug(f"list_namespaces: {ret}") except Exception as ex: - errmsg = f"Failure listing namespaces" + errmsg = "Failure listing namespaces" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -1936,15 +1949,15 @@ def list_namespaces(self, request, context=None): find_ret = self.subsystem_nsid_bdev_and_uuid.find_namespace(request.subsystem, nsid) if find_ret.empty(): self.logger.warning(f"Can't find info of namesapce {nsid} in {request.subsystem}. Visibility status will be inaccurate") - one_ns = pb2.namespace_cli(nsid = nsid, - bdev_name = bdev_name, - uuid = n["uuid"], - load_balancing_group = lb_group, - auto_visible = find_ret.auto_visible, - hosts = find_ret.host_list) + one_ns = pb2.namespace_cli(nsid=nsid, + bdev_name=bdev_name, + uuid=n["uuid"], + load_balancing_group=lb_group, + auto_visible=find_ret.auto_visible, + hosts=find_ret.host_list) with self.rpc_lock: ns_bdev = self.get_bdev_info(bdev_name) - if ns_bdev == None: + if ns_bdev is None: self.logger.warning(f"Can't find namespace's bdev {bdev_name}, will not list bdev's information") else: try: @@ -1955,15 +1968,15 @@ def list_namespaces(self, request, context=None): one_ns.block_size = ns_bdev["block_size"] one_ns.rbd_image_size = ns_bdev["block_size"] * ns_bdev["num_blocks"] assigned_limits = ns_bdev["assigned_rate_limits"] - one_ns.rw_ios_per_second=assigned_limits["rw_ios_per_sec"] - one_ns.rw_mbytes_per_second=assigned_limits["rw_mbytes_per_sec"] - one_ns.r_mbytes_per_second=assigned_limits["r_mbytes_per_sec"] - one_ns.w_mbytes_per_second=assigned_limits["w_mbytes_per_sec"] + one_ns.rw_ios_per_second = assigned_limits["rw_ios_per_sec"] + one_ns.rw_mbytes_per_second = assigned_limits["rw_mbytes_per_sec"] + one_ns.r_mbytes_per_second = assigned_limits["r_mbytes_per_sec"] + one_ns.w_mbytes_per_second = assigned_limits["w_mbytes_per_sec"] except KeyError as err: - self.logger.warning(f"Key {err} is not found, will not list bdev's information") + self.logger.warning(f"Key {err} is not found, will not list bdev's information") pass except Exception: - self.logger.exception(f"{ns_bdev=} parse error") + self.logger.exception(f"{ns_bdev=} parse error") pass namespaces.append(one_ns) break @@ -1971,7 +1984,7 @@ def list_namespaces(self, request, context=None): self.logger.exception(f"{s=} parse error") pass - return pb2.namespaces_info(status = 0, error_message = os.strerror(0), subsystem_nqn=request.subsystem, namespaces=namespaces) + return pb2.namespaces_info(status=0, error_message=os.strerror(0), subsystem_nqn=request.subsystem, namespaces=namespaces) def namespace_get_io_stats(self, request, context=None): """Get namespace's IO stats.""" @@ -1979,13 +1992,13 @@ def namespace_get_io_stats(self, request, context=None): peer_msg = self.get_peer_message(context) self.logger.info(f"Received request to get IO stats for namespace {request.nsid} on {request.subsystem_nqn}, context: {context}{peer_msg}") if not request.nsid: - errmsg = f"Failure getting IO stats for namespace, missing NSID" - self.logger.error(f"{errmsg}") + errmsg = "Failure getting IO stats for namespace, missing NSID" + self.logger.error(errmsg) return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) if not request.subsystem_nqn: errmsg = f"Failure getting IO stats for namespace {request.nsid}, missing subsystem NQN" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) with self.rpc_lock: @@ -2031,51 +2044,51 @@ def namespace_get_io_stats(self, request, context=None): return pb2.namespace_io_stats_info(status=errno.ENODEV, error_message=f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}: No associated block device found") if len(bdevs) > 1: - self.logger.warning(f"More than one associated block device found for namespace, will use the first one") + self.logger.warning("More than one associated block device found for namespace, will use the first one") bdev = bdevs[0] io_errs = [] try: - io_error=bdev["io_error"] + io_error = bdev["io_error"] for err_name in io_error.keys(): one_error = pb2.namespace_io_error(name=err_name, value=io_error[err_name]) io_errs.append(one_error) except Exception: - self.logger.exception(f"failure getting io errors") + self.logger.exception("failure getting io errors") io_stats = pb2.namespace_io_stats_info(status=0, - error_message=os.strerror(0), - subsystem_nqn=request.subsystem_nqn, - nsid=request.nsid, - uuid=uuid, - bdev_name=bdev_name, - tick_rate=ret["tick_rate"], - ticks=ret["ticks"], - bytes_read=bdev["bytes_read"], - num_read_ops=bdev["num_read_ops"], - bytes_written=bdev["bytes_written"], - num_write_ops=bdev["num_write_ops"], - bytes_unmapped=bdev["bytes_unmapped"], - num_unmap_ops=bdev["num_unmap_ops"], - read_latency_ticks=bdev["read_latency_ticks"], - max_read_latency_ticks=bdev["max_read_latency_ticks"], - min_read_latency_ticks=bdev["min_read_latency_ticks"], - write_latency_ticks=bdev["write_latency_ticks"], - max_write_latency_ticks=bdev["max_write_latency_ticks"], - min_write_latency_ticks=bdev["min_write_latency_ticks"], - unmap_latency_ticks=bdev["unmap_latency_ticks"], - max_unmap_latency_ticks=bdev["max_unmap_latency_ticks"], - min_unmap_latency_ticks=bdev["min_unmap_latency_ticks"], - copy_latency_ticks=bdev["copy_latency_ticks"], - max_copy_latency_ticks=bdev["max_copy_latency_ticks"], - min_copy_latency_ticks=bdev["min_copy_latency_ticks"], - io_error=io_errs) + error_message=os.strerror(0), + subsystem_nqn=request.subsystem_nqn, + nsid=request.nsid, + uuid=uuid, + bdev_name=bdev_name, + tick_rate=ret["tick_rate"], + ticks=ret["ticks"], + bytes_read=bdev["bytes_read"], + num_read_ops=bdev["num_read_ops"], + bytes_written=bdev["bytes_written"], + num_write_ops=bdev["num_write_ops"], + bytes_unmapped=bdev["bytes_unmapped"], + num_unmap_ops=bdev["num_unmap_ops"], + read_latency_ticks=bdev["read_latency_ticks"], + max_read_latency_ticks=bdev["max_read_latency_ticks"], + min_read_latency_ticks=bdev["min_read_latency_ticks"], + write_latency_ticks=bdev["write_latency_ticks"], + max_write_latency_ticks=bdev["max_write_latency_ticks"], + min_write_latency_ticks=bdev["min_write_latency_ticks"], + unmap_latency_ticks=bdev["unmap_latency_ticks"], + max_unmap_latency_ticks=bdev["max_unmap_latency_ticks"], + min_unmap_latency_ticks=bdev["min_unmap_latency_ticks"], + copy_latency_ticks=bdev["copy_latency_ticks"], + max_copy_latency_ticks=bdev["max_copy_latency_ticks"], + min_copy_latency_ticks=bdev["min_copy_latency_ticks"], + io_error=io_errs) return io_stats except Exception as ex: - self.logger.exception(f"parse error") + self.logger.exception("parse error") exmsg = str(ex) pass return pb2.namespace_io_stats_info(status=errno.EINVAL, - error_message=f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}: Error parsing returned stats:\n{exmsg}") + error_message=f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}: Error parsing returned stats:\n{exmsg}") def get_qos_limits_string(self, request): limits_to_set = "" @@ -2098,13 +2111,13 @@ def namespace_set_qos_limits_safe(self, request, context): self.logger.info(f"Received request to set QOS limits for namespace {request.nsid} on {request.subsystem_nqn},{limits_to_set}, context: {context}{peer_msg}") if not request.nsid: - errmsg = f"Failure setting QOS limits for namespace, missing NSID" - self.logger.error(f"{errmsg}") + errmsg = "Failure setting QOS limits for namespace, missing NSID" + self.logger.error(errmsg) return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) if not request.subsystem_nqn: errmsg = f"Failure setting QOS limits for namespace {request.nsid}, missing subsystem NQN" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) find_ret = self.subsystem_nsid_bdev_and_uuid.find_namespace(request.subsystem_nqn, request.nsid) @@ -2141,13 +2154,13 @@ def namespace_set_qos_limits_safe(self, request, context): # Merge current limits with previous ones, if exist if ns_qos_entry: - if not request.HasField("rw_ios_per_second") and ns_qos_entry.get("rw_ios_per_second") != None: + if not request.HasField("rw_ios_per_second") and ns_qos_entry.get("rw_ios_per_second") is not None: request.rw_ios_per_second = int(ns_qos_entry["rw_ios_per_second"]) - if not request.HasField("rw_mbytes_per_second") and ns_qos_entry.get("rw_mbytes_per_second") != None: + if not request.HasField("rw_mbytes_per_second") and ns_qos_entry.get("rw_mbytes_per_second") is not None: request.rw_mbytes_per_second = int(ns_qos_entry["rw_mbytes_per_second"]) - if not request.HasField("r_mbytes_per_second") and ns_qos_entry.get("r_mbytes_per_second") != None: + if not request.HasField("r_mbytes_per_second") and ns_qos_entry.get("r_mbytes_per_second") is not None: request.r_mbytes_per_second = int(ns_qos_entry["r_mbytes_per_second"]) - if not request.HasField("w_mbytes_per_second") and ns_qos_entry.get("w_mbytes_per_second") != None: + if not request.HasField("w_mbytes_per_second") and ns_qos_entry.get("w_mbytes_per_second") is not None: request.w_mbytes_per_second = int(ns_qos_entry["w_mbytes_per_second"]) limits_to_set = self.get_qos_limits_string(request) @@ -2202,18 +2215,18 @@ def namespace_resize_safe(self, request, context=None): self.logger.info(f"Received request to resize namespace {request.nsid} on {request.subsystem_nqn} to {request.new_size} MiB, context: {context}{peer_msg}") if not request.nsid: - errmsg = f"Failure resizing namespace, missing NSID" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + errmsg = "Failure resizing namespace, missing NSID" + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.subsystem_nqn: errmsg = f"Failure resizing namespace {request.nsid}, missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.new_size <= 0: errmsg = f"Failure resizing namespace {request.nsid}: New size must be positive" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) find_ret = self.subsystem_nsid_bdev_and_uuid.find_namespace(request.subsystem_nqn, request.nsid) @@ -2245,14 +2258,14 @@ def namespace_delete_safe(self, request, context): """Delete a namespace.""" if not request.nsid: - errmsg = f"Failure deleting namespace, missing NSID" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + errmsg = "Failure deleting namespace, missing NSID" + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.subsystem_nqn: errmsg = f"Failure deleting namespace {request.nsid}, missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) peer_msg = self.get_peer_message(context) self.logger.info(f"Received request to delete namespace {request.nsid} from {request.subsystem_nqn}, context: {context}{peer_msg}") @@ -2264,7 +2277,7 @@ def namespace_delete_safe(self, request, context): return pb2.req_status(status=errno.ENODEV, error_message=errmsg) bdev_name = find_ret.bdev if not bdev_name: - self.logger.warning(f"Can't find namespace's bdev name, will try to delete namespace anyway") + self.logger.warning("Can't find namespace's bdev name, will try to delete namespace anyway") omap_lock = self.omap_lock.get_omap_lock_to_use(context) with omap_lock: @@ -2275,7 +2288,7 @@ def namespace_delete_safe(self, request, context): self.remove_namespace_from_state(request.subsystem_nqn, request.nsid, context) self.subsystem_nsid_bdev_and_uuid.remove_namespace(request.subsystem_nqn, request.nsid) if bdev_name: - ret_del = self.delete_bdev(bdev_name, peer_msg = peer_msg) + ret_del = self.delete_bdev(bdev_name, peer_msg=peer_msg) if ret_del.status != 0: errmsg = f"Failure deleting namespace {request.nsid} from {request.subsystem_nqn}: {ret_del.error_message}" self.logger.error(errmsg) @@ -2296,18 +2309,18 @@ def namespace_add_host_safe(self, request, context): if not request.nsid: errmsg = f"Failure adding host {request.host_nqn} to namespace on {request.subsystem_nqn}: Missing NSID" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.subsystem_nqn: errmsg = f"Failure adding host to namespace {request.nsid}: Missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.host_nqn: errmsg = f"Failure adding host to namespace {request.nsid} on {request.subsystem_nqn}: Missing host NQN" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) # If this is not set the subsystem was not created yet if request.subsystem_nqn not in self.subsys_max_ns: @@ -2317,29 +2330,29 @@ def namespace_add_host_safe(self, request, context): if request.host_nqn == "*": errmsg = f"{failure_prefix}: Host NQN can't be \"*\"" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if self.verify_nqns: rc = GatewayUtils.is_valid_nqn(request.subsystem_nqn) if rc[0] != 0: errmsg = f"{failure_prefix}: Invalid subsystem NQN: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) rc = GatewayUtils.is_valid_nqn(request.host_nqn) if rc[0] != 0: errmsg = f"{failure_prefix}: Invalid host NQN: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): errmsg = f"{failure_prefix}: Subsystem NQN can't be a discovery NQN" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.host_nqn): errmsg = f"{failure_prefix}: Host NQN can't be a discovery NQN" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) find_ret = self.subsystem_nsid_bdev_and_uuid.find_namespace(request.subsystem_nqn, request.nsid) @@ -2350,12 +2363,12 @@ def namespace_add_host_safe(self, request, context): if find_ret.auto_visible: errmsg = f"{failure_prefix}: Namespace is visible to all hosts" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if find_ret.host_count() >= self.max_hosts_per_namespace: errmsg = f"{failure_prefix}: Maximal host count for namespace ({self.max_hosts_per_namespace}) was already reached" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.E2BIG, error_message=errmsg) omap_lock = self.omap_lock.get_omap_lock_to_use(context) @@ -2403,18 +2416,18 @@ def namespace_delete_host_safe(self, request, context): if not request.nsid: errmsg = f"Failure deleting host {request.host_nqn} from namespace: Missing NSID" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.subsystem_nqn: errmsg = f"Failure deleting host {request.host_nqn} from namespace {request.nsid}: Missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not request.host_nqn: errmsg = f"Failure deleting host from namespace {request.nsid} on {request.subsystem_nqn}: Missing host NQN" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) # If this is not set the subsystem was not created yet if request.subsystem_nqn not in self.subsys_max_ns: @@ -2424,29 +2437,29 @@ def namespace_delete_host_safe(self, request, context): if request.host_nqn == "*": errmsg = f"{failure_prefix}: Host NQN can't be \"*\"" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if self.verify_nqns: rc = GatewayUtils.is_valid_nqn(request.subsystem_nqn) if rc[0] != 0: errmsg = f"{failure_prefix}: Invalid subsystem NQN: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) rc = GatewayUtils.is_valid_nqn(request.host_nqn) if rc[0] != 0: errmsg = f"{failure_prefix}: Invalid host NQN: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): errmsg = f"{failure_prefix}: Subsystem NQN can't be a discovery NQN" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.host_nqn): errmsg = f"{failure_prefix}: Host NQN can't be a discovery NQN" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) find_ret = self.subsystem_nsid_bdev_and_uuid.find_namespace(request.subsystem_nqn, request.nsid) @@ -2457,12 +2470,12 @@ def namespace_delete_host_safe(self, request, context): if find_ret.auto_visible: errmsg = f"{failure_prefix}: Namespace is visible to all hosts" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not find_ret.is_host_in_namespace(request.host_nqn): errmsg = f"{failure_prefix}: Host is not found in namespace's host list" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) omap_lock = self.omap_lock.get_omap_lock_to_use(context) @@ -2533,23 +2546,24 @@ def _create_dhchap_key_files(self, subsystem_nqn, host_nqn, dhchap_key, dhchap_c if dhchap_key: dhchap_file = self.create_host_dhchap_file(subsystem_nqn, host_nqn, dhchap_key) if not dhchap_file: - errmsg=f"{err_prefix}: Can't write DH-HMAC-CHAP file" - self.logger.error(f"{errmsg}") + errmsg = f"{err_prefix}: Can't write DH-HMAC-CHAP file" + self.logger.error(errmsg) return (errno.ENOENT, errmsg, None, None, None, None) - dhchap_key_name = GatewayService.construct_key_name_for_keyring( - subsystem_nqn, host_nqn, GatewayService.DHCHAP_PREFIX) + dhchap_key_name = GatewayService.construct_key_name_for_keyring(subsystem_nqn, + host_nqn, GatewayService.DHCHAP_PREFIX) dhchap_ctrlr_file = None dhchap_ctrlr_key_name = None if dhchap_ctrlr_key: dhchap_ctrlr_file = self.create_host_dhchap_file(subsystem_nqn, host_nqn, dhchap_ctrlr_key) if not dhchap_ctrlr_file: - errmsg=f"{err_prefix}: Can't write DH-HMAC-CHAP controller file" - self.logger.error(f"{errmsg}") + errmsg = f"{err_prefix}: Can't write DH-HMAC-CHAP controller file" + self.logger.error(errmsg) if dhchap_file: self.remove_host_dhchap_file(subsystem_nqn, host_nqn) return (errno.ENOENT, errmsg, None, None, None, None) - dhchap_ctrlr_key_name = GatewayService.construct_key_name_for_keyring( - subsystem_nqn, host_nqn, GatewayService.DHCHAP_CONTROLLER_PREFIX) + dhchap_ctrlr_key_name = GatewayService.construct_key_name_for_keyring(subsystem_nqn, + host_nqn, + GatewayService.DHCHAP_CONTROLLER_PREFIX) return (0, "", dhchap_file, dhchap_key_name, dhchap_ctrlr_file, dhchap_ctrlr_key_name) @@ -2594,23 +2608,23 @@ def add_host_safe(self, request, context): self.logger.info( f"Received request to add host {request.host_nqn} to {request.subsystem_nqn}, psk: {request.psk}, dhchap: {request.dhchap_key}, context: {context}{peer_msg}") - all_host_failure_prefix=f"Failure allowing open host access to {request.subsystem_nqn}" - host_failure_prefix=f"Failure adding host {request.host_nqn} to {request.subsystem_nqn}" + all_host_failure_prefix = f"Failure allowing open host access to {request.subsystem_nqn}" + host_failure_prefix = f"Failure adding host {request.host_nqn} to {request.subsystem_nqn}" if not GatewayState.is_key_element_valid(request.host_nqn): errmsg = f"{host_failure_prefix}: Invalid host NQN \"{request.host_nqn}\", contains invalid characters" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not GatewayState.is_key_element_valid(request.subsystem_nqn): errmsg = f"{host_failure_prefix}: Invalid subsystem NQN \"{request.subsystem_nqn}\", contains invalid characters" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.host_nqn == "*" and self.host_info.does_subsystem_have_dhchap_key(request.subsystem_nqn): - errmsg=f"{all_host_failure_prefix}: Can't allow any host access on a subsystem having a DH-HMAC-CHAP key" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + errmsg = f"{all_host_failure_prefix}: Can't allow any host access on a subsystem having a DH-HMAC-CHAP key" + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.host_nqn != "*" and self.host_info.is_any_host_allowed(request.subsystem_nqn): self.logger.warning(f"A specific host {request.host_nqn} was added to subsystem {request.subsystem_nqn} in which all hosts are allowed") @@ -2619,30 +2633,30 @@ def add_host_safe(self, request, context): rc = GatewayService.is_valid_host_nqn(request.host_nqn) if rc.status != 0: errmsg = f"{host_failure_prefix}: {rc.error_message}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc.status, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc.status, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): if request.host_nqn == "*": - errmsg=f"{all_host_failure_prefix}: Can't allow host access to a discovery subsystem" + errmsg = f"{all_host_failure_prefix}: Can't allow host access to a discovery subsystem" else: - errmsg=f"{host_failure_prefix}: Can't add host to a discovery subsystem" - self.logger.error(f"{errmsg}") + errmsg = f"{host_failure_prefix}: Can't add host to a discovery subsystem" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.host_nqn): - errmsg=f"{host_failure_prefix}: Can't use a discovery NQN as host's" - self.logger.error(f"{errmsg}") + errmsg = f"{host_failure_prefix}: Can't use a discovery NQN as host's" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.psk and request.host_nqn == "*": - errmsg=f"{all_host_failure_prefix}: PSK is only allowed for specific hosts" - self.logger.error(f"{errmsg}") + errmsg = f"{all_host_failure_prefix}: PSK is only allowed for specific hosts" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.dhchap_key and request.host_nqn == "*": - errmsg=f"{all_host_failure_prefix}: DH-HMAC-CHAP key is only allowed for specific hosts" - self.logger.error(f"{errmsg}") + errmsg = f"{all_host_failure_prefix}: DH-HMAC-CHAP key is only allowed for specific hosts" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.dhchap_key and not self.host_info.does_subsystem_have_dhchap_key(request.subsystem_nqn): @@ -2654,8 +2668,8 @@ def add_host_safe(self, request, context): for listener in self.subsystem_listeners[request.subsystem_nqn]: (_, _, _, secure) = listener if secure: - errmsg=f"{all_host_failure_prefix}: Can't allow any host on a subsystem with secure listeners" - self.logger.error(f"{errmsg}") + errmsg = f"{all_host_failure_prefix}: Can't allow any host on a subsystem with secure listeners" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) except Exception: pass @@ -2664,25 +2678,25 @@ def add_host_safe(self, request, context): if host_already_exist: if request.host_nqn == "*": errmsg = f"{all_host_failure_prefix}: Open host access is already allowed" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EEXIST, error_message=errmsg) else: errmsg = f"{host_failure_prefix}: Host is already added" - self.logger.error(f"{errmsg}") + self.logger.error(errmsg) return pb2.req_status(status=errno.EEXIST, error_message=errmsg) if request.host_nqn != "*" and self.host_info.get_host_count(request.subsystem_nqn) >= self.max_hosts_per_subsystem: errmsg = f"{host_failure_prefix}: Maximal number of hosts for subsystem ({self.max_hosts_per_subsystem}) has already been reached" - self.logger.error(f"{errmsg}") - return pb2.subsys_status(status = errno.E2BIG, error_message = errmsg, nqn = request.subsystem_nqn) + self.logger.error(errmsg) + return pb2.subsys_status(status=errno.E2BIG, error_message=errmsg, nqn=request.subsystem_nqn) dhchap_ctrlr_key = self.host_info.get_subsystem_dhchap_key(request.subsystem_nqn) if dhchap_ctrlr_key: self.logger.info(f"Got DHCHAP key {dhchap_ctrlr_key} for subsystem {request.subsystem_nqn}") if dhchap_ctrlr_key and not request.dhchap_key: - errmsg=f"{host_failure_prefix}: Host must have a DH-HMAC-CHAP key if the subsystem has one" - self.logger.error(f"{errmsg}") + errmsg = f"{host_failure_prefix}: Host must have a DH-HMAC-CHAP key if the subsystem has one" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) psk_file = None @@ -2690,14 +2704,14 @@ def add_host_safe(self, request, context): if request.psk: psk_file = self.create_host_psk_file(request.subsystem_nqn, request.host_nqn, request.psk) if not psk_file: - errmsg=f"{host_failure_prefix}: Can't write PSK file" - self.logger.error(f"{errmsg}") + errmsg = f"{host_failure_prefix}: Can't write PSK file" + self.logger.error(errmsg) return pb2.req_status(status=errno.ENOENT, error_message=errmsg) - psk_key_name = GatewayService.construct_key_name_for_keyring( - request.subsystem_nqn, request.host_nqn, GatewayService.PSK_PREFIX) + psk_key_name = GatewayService.construct_key_name_for_keyring(request.subsystem_nqn, + request.host_nqn, GatewayService.PSK_PREFIX) if len(psk_key_name) >= SubsystemHostAuth.MAX_PSK_KEY_NAME_LENGTH: - errmsg=f"{host_failure_prefix}: PSK key name {psk_key_name} is too long, max length is {SubsystemHostAuth.MAX_PSK_KEY_NAME_LENGTH}" - self.logger.error(f"{errmsg}") + errmsg = f"{host_failure_prefix}: PSK key name {psk_key_name} is too long, max length is {SubsystemHostAuth.MAX_PSK_KEY_NAME_LENGTH}" + self.logger.error(errmsg) return pb2.req_status(status=errno.E2BIG, error_message=errmsg) dhchap_file = None @@ -2710,9 +2724,8 @@ def add_host_safe(self, request, context): dhchap_file, dhchap_key_name, dhchap_ctrlr_file, - dhchap_ctrlr_key_name) = self._create_dhchap_key_files( - request.subsystem_nqn, request.host_nqn, - request.dhchap_key, dhchap_ctrlr_key, host_failure_prefix) + dhchap_ctrlr_key_name) = self._create_dhchap_key_files(request.subsystem_nqn, request.host_nqn, + request.dhchap_key, dhchap_ctrlr_key, host_failure_prefix) if key_files_status != 0: if psk_file: self.remove_host_psk_file(request.subsystem_nqn, request.host_nqn) @@ -2821,30 +2834,30 @@ def remove_host_safe(self, request, context): """Removes a host from a subsystem.""" peer_msg = self.get_peer_message(context) - all_host_failure_prefix=f"Failure disabling open host access to {request.subsystem_nqn}" - host_failure_prefix=f"Failure removing host {request.host_nqn} access from {request.subsystem_nqn}" + all_host_failure_prefix = f"Failure disabling open host access to {request.subsystem_nqn}" + host_failure_prefix = f"Failure removing host {request.host_nqn} access from {request.subsystem_nqn}" if self.verify_nqns: rc = GatewayService.is_valid_host_nqn(request.host_nqn) if rc.status != 0: errmsg = f"{host_failure_prefix}: {rc.error_message}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc.status, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc.status, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): if request.host_nqn == "*": - errmsg=f"{all_host_failure_prefix}: Can't disable open host access to a discovery subsystem" + errmsg = f"{all_host_failure_prefix}: Can't disable open host access to a discovery subsystem" else: - errmsg=f"{host_failure_prefix}: Can't remove host access from a discovery subsystem" - self.logger.error(f"{errmsg}") + errmsg = f"{host_failure_prefix}: Can't remove host access from a discovery subsystem" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.host_nqn): if request.host_nqn == "*": - errmsg=f"{all_host_failure_prefix}: Can't use a discovery NQN as host's" + errmsg = f"{all_host_failure_prefix}: Can't use a discovery NQN as host's" else: - errmsg=f"{host_failure_prefix}: Can't use a discovery NQN as host's" - self.logger.error(f"{errmsg}") + errmsg = f"{host_failure_prefix}: Can't use a discovery NQN as host's" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) omap_lock = self.omap_lock.get_omap_lock_to_use(context) @@ -2914,52 +2927,52 @@ def change_host_key_safe(self, request, context): """Changes host's inband authentication key.""" peer_msg = self.get_peer_message(context) - failure_prefix=f"Failure changing DH-HMAC-CHAP key for host {request.host_nqn} on subsystem {request.subsystem_nqn}" + failure_prefix = f"Failure changing DH-HMAC-CHAP key for host {request.host_nqn} on subsystem {request.subsystem_nqn}" self.logger.info( f"Received request to change inband authentication key for host {request.host_nqn} on subsystem {request.subsystem_nqn}, dhchap: {request.dhchap_key}, context: {context}{peer_msg}") if request.host_nqn == "*": - errmsg=f"{failure_prefix}: Host NQN can't be '*'" - self.logger.error(f"{errmsg}") + errmsg = f"{failure_prefix}: Host NQN can't be '*'" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not GatewayState.is_key_element_valid(request.host_nqn): errmsg = f"{failure_prefix}: Invalid host NQN \"{request.host_nqn}\", contains invalid characters" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not GatewayState.is_key_element_valid(request.subsystem_nqn): errmsg = f"{failure_prefix}: Invalid subsystem NQN \"{request.subsystem_nqn}\", contains invalid characters" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if self.verify_nqns: rc = GatewayUtils.is_valid_nqn(request.subsystem_nqn) if rc[0] != 0: errmsg = f"{failure_prefix}: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) rc = GatewayUtils.is_valid_nqn(request.host_nqn) if rc[0] != 0: errmsg = f"{failure_prefix}: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): - errmsg=f"{failure_prefix}: Can't use a discovery NQN as subsystem's" - self.logger.error(f"{errmsg}") + errmsg = f"{failure_prefix}: Can't use a discovery NQN as subsystem's" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.host_nqn): - errmsg=f"{failure_prefix}: Can't use a discovery NQN as host's" - self.logger.error(f"{errmsg}") + errmsg = f"{failure_prefix}: Can't use a discovery NQN as host's" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) dhchap_ctrlr_key = self.host_info.get_subsystem_dhchap_key(request.subsystem_nqn) if dhchap_ctrlr_key and not request.dhchap_key: - errmsg=f"{failure_prefix}: Host must have a DH-HMAC-CHAP key if the subsystem has one" - self.logger.error(f"{errmsg}") + errmsg = f"{failure_prefix}: Host must have a DH-HMAC-CHAP key if the subsystem has one" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.dhchap_key and not dhchap_ctrlr_key: @@ -2967,8 +2980,8 @@ def change_host_key_safe(self, request, context): host_already_exist = self.matching_host_exists(context, request.subsystem_nqn, request.host_nqn) if not host_already_exist and context: - errmsg=f"{failure_prefix}: Can't find host on subsystem" - self.logger.error(f"{errmsg}") + errmsg = f"{failure_prefix}: Can't find host on subsystem" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) dhchap_file = None @@ -2981,9 +2994,8 @@ def change_host_key_safe(self, request, context): dhchap_file, dhchap_key_name, dhchap_ctrlr_file, - dhchap_ctrlr_key_name) = self._create_dhchap_key_files( - request.subsystem_nqn, request.host_nqn, - request.dhchap_key, dhchap_ctrlr_key, failure_prefix) + dhchap_ctrlr_key_name) = self._create_dhchap_key_files(request.subsystem_nqn, request.host_nqn, + request.dhchap_key, dhchap_ctrlr_key, failure_prefix) if key_files_status != 0: return pb2.req_status(status=key_files_status, error_message=key_file_errmsg) @@ -3059,7 +3071,7 @@ def list_hosts_safe(self, request, context): ret = rpc_nvmf.nvmf_get_subsystems(self.spdk_rpc_client, nqn=request.subsystem) self.logger.debug(f"list_hosts: {ret}") except Exception as ex: - errmsg = f"Failure listing hosts, can't get subsystems" + errmsg = "Failure listing hosts, can't get subsystems" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -3086,14 +3098,14 @@ def list_hosts_safe(self, request, context): host_nqn = h["nqn"] psk = self.host_info.is_psk_host(request.subsystem, host_nqn) dhchap = self.host_info.is_dhchap_host(request.subsystem, host_nqn) - one_host = pb2.host(nqn = host_nqn, use_psk = psk, use_dhchap = dhchap) + one_host = pb2.host(nqn=host_nqn, use_psk=psk, use_dhchap=dhchap) hosts.append(one_host) break except Exception: self.logger.exception(f"{s=} parse error") pass - return pb2.hosts_info(status = 0, error_message = os.strerror(0), allow_any_host=allow_any_host, + return pb2.hosts_info(status=0, error_message=os.strerror(0), allow_any_host=allow_any_host, subsystem_nqn=request.subsystem, hosts=hosts) def list_hosts(self, request, context=None): @@ -3107,15 +3119,15 @@ def list_connections_safe(self, request, context): self.logger.log(log_level, f"Received request to list connections for {request.subsystem}, context: {context}{peer_msg}") if not request.subsystem: - errmsg = f"Failure listing connections, missing subsystem NQN" - self.logger.error(f"{errmsg}") - return pb2.connections_info(status=errno.EINVAL, error_message = errmsg, connections=[]) + errmsg = "Failure listing connections, missing subsystem NQN" + self.logger.error(errmsg) + return pb2.connections_info(status=errno.EINVAL, error_message=errmsg, connections=[]) try: qpair_ret = rpc_nvmf.nvmf_subsystem_get_qpairs(self.spdk_rpc_client, nqn=request.subsystem) self.logger.debug(f"list_connections get_qpairs: {qpair_ret}") except Exception as ex: - errmsg = f"Failure listing connections, can't get qpairs" + errmsg = "Failure listing connections, can't get qpairs" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -3129,7 +3141,7 @@ def list_connections_safe(self, request, context): ctrl_ret = rpc_nvmf.nvmf_subsystem_get_controllers(self.spdk_rpc_client, nqn=request.subsystem) self.logger.debug(f"list_connections get_controllers: {ctrl_ret}") except Exception as ex: - errmsg = f"Failure listing connections, can't get controllers" + errmsg = "Failure listing connections, can't get controllers" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -3143,7 +3155,7 @@ def list_connections_safe(self, request, context): subsys_ret = rpc_nvmf.nvmf_get_subsystems(self.spdk_rpc_client, nqn=request.subsystem) self.logger.debug(f"list_connections subsystems: {subsys_ret}") except Exception as ex: - errmsg = f"Failure listing connections, can't get subsystems" + errmsg = "Failure listing connections, can't get subsystems" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -3250,8 +3262,8 @@ def list_connections_safe(self, request, context): qpairs_count=-1, controller_id=-1, use_psk=psk, use_dhchap=dhchap) connections.append(one_conn) - return pb2.connections_info(status = 0, error_message = os.strerror(0), - subsystem_nqn=request.subsystem, connections=connections) + return pb2.connections_info(status=0, error_message=os.strerror(0), + subsystem_nqn=request.subsystem, connections=connections) def list_connections(self, request, context=None): return self.execute_grpc_function(self.list_connections_safe, request, context) @@ -3263,9 +3275,9 @@ def create_listener_safe(self, request, context): create_listener_error_prefix = f"Failure adding {request.nqn} listener at {request.traddr}:{request.trsvcid}" adrfam = GatewayEnumUtils.get_key_from_value(pb2.AddressFamily, request.adrfam) - if adrfam == None: - errmsg=f"{create_listener_error_prefix}: Unknown address family {request.adrfam}" - self.logger.error(f"{errmsg}") + if adrfam is None: + errmsg = f"{create_listener_error_prefix}: Unknown address family {request.adrfam}" + self.logger.error(errmsg) return pb2.req_status(status=errno.ENOKEY, error_message=errmsg) peer_msg = self.get_peer_message(context) @@ -3276,18 +3288,18 @@ def create_listener_safe(self, request, context): traddr = GatewayUtils.unescape_address_if_ipv6(request.traddr, adrfam) if GatewayUtils.is_discovery_nqn(request.nqn): - errmsg=f"{create_listener_error_prefix}: Can't create a listener for a discovery subsystem" - self.logger.error(f"{errmsg}") + errmsg = f"{create_listener_error_prefix}: Can't create a listener for a discovery subsystem" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if not GatewayState.is_key_element_valid(request.host_name): - errmsg=f"{create_listener_error_prefix}: Host name \"{request.host_name}\" contains invalid characters" - self.logger.error(f"{errmsg}") + errmsg = f"{create_listener_error_prefix}: Host name \"{request.host_name}\" contains invalid characters" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if request.secure and self.host_info.is_any_host_allowed(request.nqn): - errmsg=f"{create_listener_error_prefix}: Secure channel is only allowed for subsystems in which \"allow any host\" is off" - self.logger.error(f"{errmsg}") + errmsg = f"{create_listener_error_prefix}: Secure channel is only allowed for subsystems in which \"allow any host\" is off" + self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) add_listener_args = {} @@ -3306,18 +3318,18 @@ def create_listener_safe(self, request, context): if (adrfam, traddr, request.trsvcid, False) in self.subsystem_listeners[request.nqn] or (adrfam, traddr, request.trsvcid, True) in self.subsystem_listeners[request.nqn]: self.logger.error(f"{request.nqn} already listens on address {request.traddr}:{request.trsvcid}") return pb2.req_status(status=errno.EEXIST, - error_message=f"{create_listener_error_prefix}: Subsystem already listens on this address") + error_message=f"{create_listener_error_prefix}: Subsystem already listens on this address") ret = rpc_nvmf.nvmf_subsystem_add_listener(self.spdk_rpc_client, **add_listener_args) self.logger.debug(f"create_listener: {ret}") self.subsystem_listeners[request.nqn].add((adrfam, traddr, request.trsvcid, request.secure)) else: if context: - errmsg=f"{create_listener_error_prefix}: Gateway's host name must match current host ({self.host_name})" - self.logger.error(f"{errmsg}") + errmsg = f"{create_listener_error_prefix}: Gateway's host name must match current host ({self.host_name})" + self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) else: - errmsg=f"Listener not created as gateway's host name {self.host_name} differs from requested host {request.host_name}" - self.logger.debug(f"{errmsg}") + errmsg = f"Listener not created as gateway's host name {self.host_name} differs from requested host {request.host_name}" + self.logger.debug(errmsg) return pb2.req_status(status=0, error_message=errmsg) except Exception as ex: self.logger.exception(create_listener_error_prefix) @@ -3338,38 +3350,38 @@ def create_listener_safe(self, request, context): self.logger.debug(f"create_listener nvmf_subsystem_listener_set_ana_state {request=} set inaccessible for all ana groups") _ana_state = "inaccessible" ret = rpc_nvmf.nvmf_subsystem_listener_set_ana_state( - self.spdk_rpc_client, - nqn=request.nqn, - ana_state=_ana_state, - trtype="TCP", - traddr=traddr, - trsvcid=str(request.trsvcid), - adrfam=adrfam) + self.spdk_rpc_client, + nqn=request.nqn, + ana_state=_ana_state, + trtype="TCP", + traddr=traddr, + trsvcid=str(request.trsvcid), + adrfam=adrfam) self.logger.debug(f"create_listener nvmf_subsystem_listener_set_ana_state response {ret=}") # have been provided with ana state for this nqn prior to creation # update optimized ana groups if self.ana_map[request.nqn]: - for x in range (self.subsys_max_ns[request.nqn]): - ana_grp = x+1 + for x in range(self.subsys_max_ns[request.nqn]): + ana_grp = x + 1 if ana_grp in self.ana_map[request.nqn] and self.ana_map[request.nqn][ana_grp] == pb2.ana_state.OPTIMIZED: _ana_state = "optimized" self.logger.debug(f"using ana_map: set listener on nqn : {request.nqn} ana state : {_ana_state} for group : {ana_grp}") ret = rpc_nvmf.nvmf_subsystem_listener_set_ana_state( - self.spdk_rpc_client, - nqn=request.nqn, - ana_state=_ana_state, - trtype="TCP", - traddr=traddr, - trsvcid=str(request.trsvcid), - adrfam=adrfam, - anagrpid=ana_grp ) + self.spdk_rpc_client, + nqn=request.nqn, + ana_state=_ana_state, + trtype="TCP", + traddr=traddr, + trsvcid=str(request.trsvcid), + adrfam=adrfam, + anagrpid=ana_grp) self.logger.debug(f"create_listener nvmf_subsystem_listener_set_ana_state response {ret=}") except Exception as ex: - errmsg=f"{create_listener_error_prefix}: Error setting ANA state" + errmsg = f"{create_listener_error_prefix}: Error setting ANA state" self.logger.exception(errmsg) - errmsg=f"{errmsg}:\n{ex}" + errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) status = errno.EINVAL if resp: @@ -3453,8 +3465,8 @@ def delete_listener_safe(self, request, context): delete_listener_error_prefix = f"Listener {esc_traddr}:{request.trsvcid} failed to delete from {request.nqn}" adrfam = GatewayEnumUtils.get_key_from_value(pb2.AddressFamily, request.adrfam) - if adrfam == None: - errmsg=f"{delete_listener_error_prefix}. Unknown address family {request.adrfam}" + if adrfam is None: + errmsg = f"{delete_listener_error_prefix}. Unknown address family {request.adrfam}" self.logger.error(errmsg) return pb2.req_status(status=errno.ENOKEY, error_message=errmsg) @@ -3469,12 +3481,12 @@ def delete_listener_safe(self, request, context): f" {esc_traddr}:{request.trsvcid}{force_msg}, context: {context}{peer_msg}") if request.host_name == "*" and not request.force: - errmsg=f"{delete_listener_error_prefix}. Must use the \"--force\" parameter when setting the host name to \"*\"." + errmsg = f"{delete_listener_error_prefix}. Must use the \"--force\" parameter when setting the host name to \"*\"." self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.nqn): - errmsg=f"{delete_listener_error_prefix}. Can't delete a listener from a discovery subsystem" + errmsg = f"{delete_listener_error_prefix}. Can't delete a listener from a discovery subsystem" self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) @@ -3482,7 +3494,7 @@ def delete_listener_safe(self, request, context): list_conn_req = pb2.list_connections_req(subsystem=request.nqn) list_conn_ret = self.list_connections_safe(list_conn_req, context) if list_conn_ret.status != 0: - errmsg=f"{delete_listener_error_prefix}. Can't verify there are no active connections for this address" + errmsg = f"{delete_listener_error_prefix}. Can't verify there are no active connections for this address" self.logger.error(errmsg) return pb2.req_status(status=errno.ENOTEMPTY, error_message=errmsg) for conn in list_conn_ret.connections: @@ -3492,7 +3504,7 @@ def delete_listener_safe(self, request, context): continue if conn.trsvcid != request.trsvcid: continue - errmsg=f"{delete_listener_error_prefix} due to active connections for {esc_traddr}:{request.trsvcid}. Deleting the listener terminates active connections. You can continue to delete the listener by adding the `--force` parameter." + errmsg = f"{delete_listener_error_prefix} due to active connections for {esc_traddr}:{request.trsvcid}. Deleting the listener terminates active connections. You can continue to delete the listener by adding the `--force` parameter." self.logger.error(errmsg) return pb2.req_status(status=errno.ENOTEMPTY, error_message=errmsg) @@ -3515,8 +3527,8 @@ def delete_listener_safe(self, request, context): if (adrfam, traddr, request.trsvcid, True) in self.subsystem_listeners[request.nqn]: self.subsystem_listeners[request.nqn].remove((adrfam, traddr, request.trsvcid, True)) else: - errmsg=f"{delete_listener_error_prefix}. Gateway's host name must match current host ({self.host_name}). You can continue to delete the listener by adding the `--force` parameter." - self.logger.error(f"{errmsg}") + errmsg = f"{delete_listener_error_prefix}. Gateway's host name must match current host ({self.host_name}). You can continue to delete the listener by adding the `--force` parameter." + self.logger.error(errmsg) return pb2.req_status(status=errno.ENOENT, error_message=errmsg) except Exception as ex: self.logger.exception(delete_listener_error_prefix) @@ -3569,18 +3581,18 @@ def list_listeners_safe(self, request, context): secure = False if "secure" in listener: secure = listener["secure"] - one_listener = pb2.listener_info(host_name = listener["host_name"], - trtype = "TCP", - adrfam = listener["adrfam"], - traddr = listener["traddr"], - trsvcid = listener["trsvcid"], - secure = secure) + one_listener = pb2.listener_info(host_name=listener["host_name"], + trtype="TCP", + adrfam=listener["adrfam"], + traddr=listener["traddr"], + trsvcid=listener["trsvcid"], + secure=secure) listeners.append(one_listener) except Exception: self.logger.exception(f"Got exception while parsing {val}") continue - return pb2.listeners_info(status = 0, error_message = os.strerror(0), listeners=listeners) + return pb2.listeners_info(status=0, error_message=os.strerror(0), listeners=listeners) def list_listeners(self, request, context=None): return self.execute_grpc_function(self.list_listeners_safe, request, context) @@ -3606,7 +3618,7 @@ def list_subsystems_safe(self, request, context): ret = rpc_nvmf.nvmf_get_subsystems(self.spdk_rpc_client) self.logger.debug(f"list_subsystems: {ret}") except Exception as ex: - errmsg = f"Failure listing subsystems" + errmsg = "Failure listing subsystems" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -3640,7 +3652,7 @@ def list_subsystems_safe(self, request, context): self.logger.exception(f"{s=} parse error") pass - return pb2.subsystems_info_cli(status = 0, error_message = os.strerror(0), subsystems=subsystems) + return pb2.subsystems_info_cli(status=0, error_message=os.strerror(0), subsystems=subsystems) def get_subsystems_safe(self, request, context): """Gets subsystems.""" @@ -3651,7 +3663,7 @@ def get_subsystems_safe(self, request, context): try: ret = rpc_nvmf.nvmf_get_subsystems(self.spdk_rpc_subsystems_client) except Exception as ex: - self.logger.exception(f"get_subsystems failed") + self.logger.exception("get_subsystems failed") context.set_code(grpc.StatusCode.INTERNAL) context.set_details(f"{ex}") return pb2.subsystems_info() @@ -3689,26 +3701,26 @@ def list_subsystems(self, request, context=None): def change_subsystem_key_safe(self, request, context): """Change subsystem key.""" peer_msg = self.get_peer_message(context) - failure_prefix=f"Failure changing DH-HMAC-CHAP key for subsystem {request.subsystem_nqn}" + failure_prefix = f"Failure changing DH-HMAC-CHAP key for subsystem {request.subsystem_nqn}" self.logger.info( f"Received request to change inband authentication key for subsystem {request.subsystem_nqn}, dhchap: {request.dhchap_key}, context: {context}{peer_msg}") if not GatewayState.is_key_element_valid(request.subsystem_nqn): errmsg = f"{failure_prefix}: Invalid subsystem NQN \"{request.subsystem_nqn}\", contains invalid characters" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) if self.verify_nqns: rc = GatewayUtils.is_valid_nqn(request.subsystem_nqn) if rc[0] != 0: errmsg = f"{failure_prefix}: {rc[1]}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = rc[0], error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=rc[0], error_message=errmsg) if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): errmsg = f"{failure_prefix}: Can't change DH-HMAC-CHAP key for a discovery subsystem" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) omap_lock = self.omap_lock.get_omap_lock_to_use(context) with omap_lock: @@ -3724,8 +3736,8 @@ def change_subsystem_key_safe(self, request, context): assert hostnqn, "Shouldn't get an empty host NQN" if not self.host_info.is_dhchap_host(request.subsystem_nqn, hostnqn): errmsg = f"{failure_prefix}: Can't set a subsystem's DH-HMAC-CHAP key when it has hosts with no key, like host {hostnqn}" - self.logger.error(f"{errmsg}") - return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + self.logger.error(errmsg) + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) subsys_key = GatewayState.build_subsystem_key(request.subsystem_nqn) try: @@ -3739,11 +3751,11 @@ def change_subsystem_key_safe(self, request, context): assert subsys_entry, f"Can't find entry for subsystem {request.subsystem_nqn}" try: create_req = pb2.create_subsystem_req(subsystem_nqn=request.subsystem_nqn, - serial_number=subsys_entry["serial_number"], - max_namespaces=subsys_entry["max_namespaces"], - enable_ha=subsys_entry["enable_ha"], - no_group_append=subsys_entry["no_group_append"], - dhchap_key=request.dhchap_key) + serial_number=subsys_entry["serial_number"], + max_namespaces=subsys_entry["max_namespaces"], + enable_ha=subsys_entry["enable_ha"], + no_group_append=subsys_entry["no_group_append"], + dhchap_key=request.dhchap_key) json_req = json_format.MessageToJson( create_req, preserving_proto_field_name=True, including_default_value_fields=True) self.gateway_state.add_subsystem(request.subsystem_nqn, json_req) @@ -3769,7 +3781,6 @@ def change_subsystem_key_safe(self, request, context): except Exception: pass - return pb2.req_status(status=0, error_message=os.strerror(0)) def change_subsystem_key(self, request, context=None): @@ -3785,15 +3796,15 @@ def get_spdk_nvmf_log_flags_and_level_safe(self, request, context): nvmf_log_flags = {key: value for key, value in rpc_log.log_get_flags( self.spdk_rpc_client).items() if key.startswith('nvmf')} for flag, flagvalue in nvmf_log_flags.items(): - pb2_log_flag = pb2.spdk_log_flag_info(name = flag, enabled = flagvalue) + pb2_log_flag = pb2.spdk_log_flag_info(name=flag, enabled=flagvalue) log_flags.append(pb2_log_flag) spdk_log_level = rpc_log.log_get_level(self.spdk_rpc_client) spdk_log_print_level = rpc_log.log_get_print_level(self.spdk_rpc_client) - self.logger.debug(f"spdk log flags: {nvmf_log_flags}, " - f"spdk log level: {spdk_log_level}, " - f"spdk log print level: {spdk_log_print_level}") + self.logger.debug(f"spdk log flags: {nvmf_log_flags}, " + f"spdk log level: {spdk_log_level}, " + f"spdk log print level: {spdk_log_print_level}") except Exception as ex: - errmsg = f"Failure getting SPDK log levels and nvmf log flags" + errmsg = "Failure getting SPDK log levels and nvmf log flags" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -3801,14 +3812,14 @@ def get_spdk_nvmf_log_flags_and_level_safe(self, request, context): if resp: status = resp["code"] errmsg = f"Failure getting SPDK log levels and nvmf log flags: {resp['message']}" - return pb2.spdk_nvmf_log_flags_and_level_info(status = status, error_message = errmsg) + return pb2.spdk_nvmf_log_flags_and_level_info(status=status, error_message=errmsg) return pb2.spdk_nvmf_log_flags_and_level_info( nvmf_log_flags=log_flags, - log_level = spdk_log_level, - log_print_level = spdk_log_print_level, - status = 0, - error_message = os.strerror(0)) + log_level=spdk_log_level, + log_print_level=spdk_log_print_level, + status=0, + error_message=os.strerror(0)) def get_spdk_nvmf_log_flags_and_level(self, request, context=None): return self.execute_grpc_function(self.get_spdk_nvmf_log_flags_and_level_safe, request, context) @@ -3823,16 +3834,16 @@ def set_spdk_nvmf_logs_safe(self, request, context): peer_msg = self.get_peer_message(context) if request.HasField("log_level"): log_level = GatewayEnumUtils.get_key_from_value(pb2.LogLevel, request.log_level) - if log_level == None: - errmsg=f"Unknown log level {request.log_level}" - self.logger.error(f"{errmsg}") + if log_level is None: + errmsg = f"Unknown log level {request.log_level}" + self.logger.error(errmsg) return pb2.req_status(status=errno.ENOKEY, error_message=errmsg) if request.HasField("print_level"): print_level = GatewayEnumUtils.get_key_from_value(pb2.LogLevel, request.print_level) - if print_level == None: - errmsg=f"Unknown print level {request.print_level}" - self.logger.error(f"{errmsg}") + if print_level is None: + errmsg = f"Unknown print level {request.print_level}" + self.logger.error(errmsg) return pb2.req_status(status=errno.ENOKEY, error_message=errmsg) self.logger.info(f"Received request to set SPDK nvmf logs: log_level: {log_level}, print_level: {print_level}{peer_msg}") @@ -3842,17 +3853,17 @@ def set_spdk_nvmf_logs_safe(self, request, context): ret = [rpc_log.log_set_flag( self.spdk_rpc_client, flag=flag) for flag in nvmf_log_flags] self.logger.debug(f"Set SPDK nvmf log flags {nvmf_log_flags} to TRUE: {ret}") - if log_level != None: + if log_level is not None: ret_log = rpc_log.log_set_level(self.spdk_rpc_client, level=log_level) self.logger.debug(f"Set log level to {log_level}: {ret_log}") - if print_level != None: + if print_level is not None: ret_print = rpc_log.log_set_print_level( self.spdk_rpc_client, level=print_level) self.logger.debug(f"Set log print level to {print_level}: {ret_print}") except Exception as ex: - errmsg="Failure setting SPDK log levels" + errmsg = "Failure setting SPDK log levels" self.logger.exception(errmsg) - errmsg="{errmsg}:\n{ex}" + errmsg = "{errmsg}:\n{ex}" for flag in nvmf_log_flags: rpc_log.log_clear_flag(self.spdk_rpc_client, flag=flag) resp = self.parse_json_exeption(ex) @@ -3864,10 +3875,10 @@ def set_spdk_nvmf_logs_safe(self, request, context): status = 0 errmsg = os.strerror(0) - if log_level != None and not ret_log: + if log_level is not None and not ret_log: status = errno.EINVAL errmsg = "Failure setting SPDK log level" - elif print_level != None and not ret_print: + elif print_level is not None and not ret_print: status = errno.EINVAL errmsg = "Failure setting SPDK print log level" elif not all(ret): @@ -3890,7 +3901,7 @@ def disable_spdk_nvmf_logs_safe(self, request, context): rpc_log.log_set_print_level(self.spdk_rpc_client, level='INFO')] ret.extend(logs_level) except Exception as ex: - errmsg = f"Failure in disable SPDK nvmf log flags" + errmsg = "Failure in disable SPDK nvmf log flags" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -3935,25 +3946,25 @@ def get_gateway_info_safe(self, request, context): cli_version_string = request.cli_version addr = self.config.get_with_default("gateway", "addr", "") port = self.config.get_with_default("gateway", "port", "") - ret = pb2.gateway_info(cli_version = request.cli_version, - version = gw_version_string, - spdk_version = spdk_version_string, - name = self.gateway_name, - group = self.gateway_group, - addr = addr, - port = port, - load_balancing_group = self.group_id + 1, - bool_status = True, - hostname = self.host_name, - max_subsystems = self.max_subsystems, - max_namespaces = self.max_namespaces, - max_namespaces_per_subsystem = self.max_namespaces_per_subsystem, - max_hosts_per_subsystem = self.max_hosts_per_subsystem, - status = 0, - error_message = os.strerror(0)) + ret = pb2.gateway_info(cli_version=request.cli_version, + version=gw_version_string, + spdk_version=spdk_version_string, + name=self.gateway_name, + group=self.gateway_group, + addr=addr, + port=port, + load_balancing_group=self.group_id + 1, + bool_status=True, + hostname=self.host_name, + max_subsystems=self.max_subsystems, + max_namespaces=self.max_namespaces, + max_namespaces_per_subsystem=self.max_namespaces_per_subsystem, + max_hosts_per_subsystem=self.max_hosts_per_subsystem, + status=0, + error_message=os.strerror(0)) cli_ver = self.parse_version(cli_version_string) gw_ver = self.parse_version(gw_version_string) - if cli_ver != None and gw_ver != None and cli_ver < gw_ver: + if cli_ver is not None and gw_ver is not None and cli_ver < gw_ver: ret.bool_status = False ret.status = errno.EINVAL ret.error_message = f"CLI version {cli_version_string} is older than gateway's version {gw_version_string}" @@ -3966,7 +3977,7 @@ def get_gateway_info_safe(self, request, context): ret.status = errno.EINVAL ret.error_message = f"Invalid gateway's version {gw_version_string}" if not cli_version_string: - self.logger.warning(f"No CLI version specified, can't check version compatibility") + self.logger.warning("No CLI version specified, can't check version compatibility") elif not cli_ver: self.logger.warning(f"Invalid CLI version {cli_version_string}, can't check version compatibility") if ret.status == 0: @@ -3987,19 +3998,19 @@ def get_gateway_log_level(self, request, context=None): log_level = GatewayEnumUtils.get_key_from_value(pb2.GwLogLevel, self.logger.level) except Exception: self.logger.exception(f"Can't get string value for log level {self.logger.level}") - return pb2.gateway_log_level_info(status = errno.ENOKEY, - error_message=f"Invalid gateway log level") + return pb2.gateway_log_level_info(status=errno.ENOKEY, + error_message="Invalid gateway log level") self.logger.info(f"Received request to get gateway's log level. Level is {log_level}{peer_msg}") - return pb2.gateway_log_level_info(status = 0, error_message=os.strerror(0), log_level=log_level) + return pb2.gateway_log_level_info(status=0, error_message=os.strerror(0), log_level=log_level) def set_gateway_log_level(self, request, context=None): """Set gateway's log level""" peer_msg = self.get_peer_message(context) log_level = GatewayEnumUtils.get_key_from_value(pb2.GwLogLevel, request.log_level) - if log_level == None: - errmsg=f"Unknown log level {request.log_level}" - self.logger.error(f"{errmsg}") + if log_level is None: + errmsg = f"Unknown log level {request.log_level}" + self.logger.error(errmsg) return pb2.req_status(status=errno.ENOKEY, error_message=errmsg) log_level = log_level.upper() diff --git a/control/prometheus.py b/control/prometheus.py index 411b6057..56971c8a 100644 --- a/control/prometheus.py +++ b/control/prometheus.py @@ -25,6 +25,7 @@ logger = None + class RBD(NamedTuple): pool: str namespace: str @@ -421,7 +422,7 @@ def collect(self): yield subsystem_metadata yield subsystem_listeners yield subsystem_host_count - yield subsystem_namespace_count + yield subsystem_namespace_count yield subsystem_namespace_limit yield subsystem_namespace_metadata yield host_connection_state diff --git a/control/rebalance.py b/control/rebalance.py index 608fb850..3fa126ba 100755 --- a/control/rebalance.py +++ b/control/rebalance.py @@ -7,16 +7,11 @@ # Authors: leonidc@il.ibm.com # -#import uuid -import errno import threading import time -import json -import re -from .utils import GatewayLogger -from .config import GatewayConfig from .proto import gateway_pb2 as pb2 + class Rebalance: """Miscellaneous functions which do rebalance of ANA groups """ @@ -25,11 +20,11 @@ def __init__(self, gateway_service): self.logger = gateway_service.logger self.gw_srv = gateway_service self.ceph_utils = gateway_service.ceph_utils - self.rebalance_period_sec = gateway_service.config.getint_with_default("gateway", "rebalance_period_sec", 7) + self.rebalance_period_sec = gateway_service.config.getint_with_default("gateway", "rebalance_period_sec", 7) self.rebalance_max_ns_to_change_lb_grp = gateway_service.config.getint_with_default("gateway", "max_ns_to_change_lb_grp", 8) self.rebalance_event = threading.Event() self.auto_rebalance = threading.Thread(target=self.auto_rebalance_task, daemon=True, args=(self.rebalance_event,)) - self.auto_rebalance.start() #start the thread + self.auto_rebalance.start() # start the thread def auto_rebalance_task(self, death_event): """Periodically calls for auto rebalance.""" @@ -41,42 +36,42 @@ def auto_rebalance_task(self, death_event): self.logger.debug(f"Nothing found for rebalance, break at {i} iteration") break except Exception: - self.logger.exception(f"Exception in auto rebalance") - if death_event: - death_event.set() - raise - time.sleep(0.01) #release lock for 10ms after rebalancing each 1 NS + self.logger.exception("Exception in auto rebalance") + if death_event: + death_event.set() + raise + time.sleep(0.01) # release lock for 10ms after rebalancing each 1 NS time.sleep(self.rebalance_period_sec) - def find_min_loaded_group(self, grp_list)->int: + def find_min_loaded_group(self, grp_list) -> int: min_load = 2000 chosen_ana_group = 0 - for ana_grp in self.gw_srv.ana_grp_ns_load : - if ana_grp in grp_list : - if self.gw_srv.ana_grp_ns_load[ana_grp] <= min_load: - min_load = self.gw_srv.ana_grp_ns_load[ana_grp] - chosen_ana_group = ana_grp + for ana_grp in self.gw_srv.ana_grp_ns_load: + if ana_grp in grp_list: + if self.gw_srv.ana_grp_ns_load[ana_grp] <= min_load: + min_load = self.gw_srv.ana_grp_ns_load[ana_grp] + chosen_ana_group = ana_grp min_load = 2000 - for nqn in self.gw_srv.ana_grp_subs_load[chosen_ana_group] : - if self.gw_srv.ana_grp_subs_load[chosen_ana_group][nqn] < min_load: + for nqn in self.gw_srv.ana_grp_subs_load[chosen_ana_group]: + if self.gw_srv.ana_grp_subs_load[chosen_ana_group][nqn] < min_load: min_load = self.gw_srv.ana_grp_subs_load[chosen_ana_group][nqn] chosen_nqn = nqn return chosen_ana_group, chosen_nqn - def find_min_loaded_group_in_subsys(self, nqn, grp_list)->int: + def find_min_loaded_group_in_subsys(self, nqn, grp_list) -> int: min_load = 2000 chosen_ana_group = 0 - for ana_grp in grp_list : - if self.gw_srv.ana_grp_ns_load[ana_grp] == 0: + for ana_grp in grp_list: + if self.gw_srv.ana_grp_ns_load[ana_grp] == 0: self.gw_srv.ana_grp_subs_load[ana_grp][nqn] = 0 return 0, ana_grp - for ana_grp in self.gw_srv.ana_grp_subs_load : - if ana_grp in grp_list : + for ana_grp in self.gw_srv.ana_grp_subs_load: + if ana_grp in grp_list: if nqn in self.gw_srv.ana_grp_subs_load[ana_grp]: - if self.gw_srv.ana_grp_subs_load[ana_grp][nqn] <= min_load: + if self.gw_srv.ana_grp_subs_load[ana_grp][nqn] <= min_load: min_load = self.gw_srv.ana_grp_subs_load[ana_grp][nqn] chosen_ana_group = ana_grp - else: #still no load on this ana and subs + else: # still no load on this ana and subs chosen_ana_group = ana_grp self.gw_srv.ana_grp_subs_load[chosen_ana_group][nqn] = 0 min_load = 0 @@ -84,64 +79,63 @@ def find_min_loaded_group_in_subsys(self, nqn, grp_list)->int: return min_load, chosen_ana_group # 1. Not allowed to perform regular rebalance when scale_down rebalance is ongoing - # 2. Monitor each time defines what GW is responsible for regular rebalance(fairness logic), so there will not be collisions between the GWs + # 2. Monitor each time defines what GW is responsible for regular rebalance(fairness logic), so there will not be collisions between the GWs # and reballance results will be accurate. Monitor in nvme-gw show response publishes the index of ANA group that is currently responsible for rebalance - def rebalance_logic(self, request, context)->int: + def rebalance_logic(self, request, context) -> int: worker_ana_group = self.ceph_utils.get_rebalance_ana_group() self.logger.debug(f"Called rebalance logic: current rebalancing ana group {worker_ana_group}") ongoing_scale_down_rebalance = False grps_list = self.ceph_utils.get_number_created_gateways(self.gw_srv.gateway_pool, self.gw_srv.gateway_group) if not self.ceph_utils.is_rebalance_supported(): - self.logger.info(f"Auto rebalance is not supported with the curent ceph version") + self.logger.info("Auto rebalance is not supported with the curent ceph version") return 1 for ana_grp in self.gw_srv.ana_grp_state: - if self.gw_srv.ana_grp_ns_load[ana_grp] != 0 : #internally valid group - if ana_grp not in grps_list: #monitor considers it invalid since GW owner was deleted + if self.gw_srv.ana_grp_ns_load[ana_grp] != 0: # internally valid group + if ana_grp not in grps_list: # monitor considers it invalid since GW owner was deleted ongoing_scale_down_rebalance = True - self.logger.info(f"Scale-down rebalance is ongoing for ANA group {ana_grp} current load {self.gw_srv.ana_grp_ns_load[ana_grp]}") + self.logger.info(f"Scale-down rebalance is ongoing for ANA group {ana_grp} current load {self.gw_srv.ana_grp_ns_load[ana_grp]}") break num_active_ana_groups = len(grps_list) for ana_grp in self.gw_srv.ana_grp_state: - if self.gw_srv.ana_grp_state[ana_grp] == pb2.ana_state.OPTIMIZED : + if self.gw_srv.ana_grp_state[ana_grp] == pb2.ana_state.OPTIMIZED: if ana_grp not in grps_list: self.logger.info(f"Found optimized ana group {ana_grp} that handles the group of deleted GW." f"Number NS in group {self.gw_srv.ana_grp_ns_load[ana_grp]} - Start NS rebalance") if self.gw_srv.ana_grp_ns_load[ana_grp] >= self.rebalance_max_ns_to_change_lb_grp: - num = self.rebalance_max_ns_to_change_lb_grp + num = self.rebalance_max_ns_to_change_lb_grp else: - num = self.gw_srv.ana_grp_ns_load[ana_grp] - if num > 0 : + num = self.gw_srv.ana_grp_ns_load[ana_grp] + if num > 0: min_ana_grp, chosen_nqn = self.find_min_loaded_group(grps_list) self.logger.info(f"Start rebalance (scale down) destination ana group {min_ana_grp}, subsystem {chosen_nqn}") - self.ns_rebalance(context, ana_grp, min_ana_grp, 1, "0")#scale down rebalance + self.ns_rebalance(context, ana_grp, min_ana_grp, 1, "0") # scale down rebalance return 0 - else : + else: self.logger.info(f"warning: empty group {ana_grp} of Deleting GW still appears Optimized") return 1 - else : - if not ongoing_scale_down_rebalance and (self.gw_srv.ana_grp_state[worker_ana_group] == pb2.ana_state.OPTIMIZED) : - # if my optimized ana group == worker-ana-group or worker-ana-group is also in optimized state on this GW machine - for nqn in self.gw_srv.ana_grp_subs_load[ana_grp] : #need to search all nqns not only inside the current load + else: + if not ongoing_scale_down_rebalance and (self.gw_srv.ana_grp_state[worker_ana_group] == pb2.ana_state.OPTIMIZED): + # if my optimized ana group == worker-ana-group or worker-ana-group is also in optimized state on this GW machine + for nqn in self.gw_srv.ana_grp_subs_load[ana_grp]: # need to search all nqns not only inside the current load num_ns_in_nqn = self.gw_srv.subsystem_nsid_bdev_and_uuid.get_namespace_count(nqn, None, 0) - target_subs_per_ana = num_ns_in_nqn/num_active_ana_groups + target_subs_per_ana = num_ns_in_nqn / num_active_ana_groups self.logger.debug(f"loop: nqn {nqn} ana group {ana_grp} load {self.gw_srv.ana_grp_subs_load[ana_grp][nqn]}, " f"num-ns in nqn {num_ns_in_nqn}, target_subs_per_ana {target_subs_per_ana} ") if self.gw_srv.ana_grp_subs_load[ana_grp][nqn] > target_subs_per_ana: self.logger.debug(f"max-nqn load {self.gw_srv.ana_grp_subs_load[ana_grp][nqn]} nqn {nqn} ") min_load, min_ana_grp = self.find_min_loaded_group_in_subsys(nqn, grps_list) - if ( - (self.gw_srv.ana_grp_subs_load[min_ana_grp][nqn] + 1) <= target_subs_per_ana - or (self.gw_srv.ana_grp_subs_load[min_ana_grp][nqn] + 1) == (self.gw_srv.ana_grp_subs_load[ana_grp][nqn] - 1) - ): - self.logger.info(f"Start rebalance (regular) in subsystem {nqn}, dest ana {min_ana_grp}, dest ana load per subs {min_load}") - self.ns_rebalance(context, ana_grp, min_ana_grp, 1, nqn) #regular rebalance - return 0 + le_target = (self.gw_srv.ana_grp_subs_load[min_ana_grp][nqn] + 1) <= target_subs_per_ana + load_eq = (self.gw_srv.ana_grp_subs_load[min_ana_grp][nqn] + 1) == (self.gw_srv.ana_grp_subs_load[ana_grp][nqn] - 1) + if le_target or load_eq: + self.logger.info(f"Start rebalance (regular) in subsystem {nqn}, dest ana {min_ana_grp}, dest ana load per subs {min_load}") + self.ns_rebalance(context, ana_grp, min_ana_grp, 1, nqn) # regular rebalance + return 0 else: self.logger.debug(f"Found min loaded subsystem {nqn}, ana {min_ana_grp}, load {min_load} does not fit rebalance criteria!") continue return 1 - def ns_rebalance(self, context, ana_id, dest_ana_id, num, subs_nqn) ->int: + def ns_rebalance(self, context, ana_id, dest_ana_id, num, subs_nqn) -> int: now = time.time() num_rebalanced = 0 self.logger.info(f"== rebalance started == for subsystem {subs_nqn}, anagrp {ana_id}, destination anagrp {dest_ana_id}, num ns {num} time {now} ") @@ -150,12 +144,12 @@ def ns_rebalance(self, context, ana_id, dest_ana_id, num, subs_nqn) ->int: for nsid, subsys in ns_list: self.logger.debug(f"nsid {nsid} for nqn {subsys} to rebalance:") if subsys == subs_nqn or subs_nqn == "0": - self.logger.info(f"nsid for change_load_balancing : {nsid}, {subsys}, anagrpid: {ana_id}") - change_lb_group_req = pb2.namespace_change_load_balancing_group_req(subsystem_nqn=subsys, nsid= nsid, anagrpid=dest_ana_id, auto_lb_logic=True) - ret = self.gw_srv.namespace_change_load_balancing_group_safe(change_lb_group_req, context) - self.logger.debug(f"ret namespace_change_load_balancing_group {ret}") - num_rebalanced += 1 - if num_rebalanced >= num : - self.logger.info(f"== Completed rebalance in {time.time() - now } sec for {num} namespaces from anagrp {ana_id} to {dest_ana_id} ") - return 0 + self.logger.info(f"nsid for change_load_balancing : {nsid}, {subsys}, anagrpid: {ana_id}") + change_lb_group_req = pb2.namespace_change_load_balancing_group_req(subsystem_nqn=subsys, nsid=nsid, anagrpid=dest_ana_id, auto_lb_logic=True) + ret = self.gw_srv.namespace_change_load_balancing_group_safe(change_lb_group_req, context) + self.logger.debug(f"ret namespace_change_load_balancing_group {ret}") + num_rebalanced += 1 + if num_rebalanced >= num: + self.logger.info(f"== Completed rebalance in {time.time() - now } sec for {num} namespaces from anagrp {ana_id} to {dest_ana_id} ") + return 0 return 0 diff --git a/control/server.py b/control/server.py index fdbea7ee..4494a582 100644 --- a/control/server.py +++ b/control/server.py @@ -35,12 +35,14 @@ from .cephutils import CephUtils from .prometheus import start_exporter + def sigterm_handler(signum, frame): """Handle SIGTERM, runs when a gateway is terminated gracefully.""" logger = GatewayLogger().logger logger.info(f"GatewayServer: SIGTERM received {signum=}") raise SystemExit(0) + def sigchld_handler(signum, frame): """Handle SIGCHLD, runs when a child process, like the spdk, terminates.""" logger = GatewayLogger().logger @@ -50,7 +52,7 @@ def sigchld_handler(signum, frame): pid, wait_status = os.waitpid(-1, os.WNOHANG) logger.error(f"PID of terminated child process is {pid}") except OSError: - logger.exception(f"waitpid error") + logger.exception("waitpid error") # eat the exception, in signal handler context pass @@ -59,10 +61,12 @@ def sigchld_handler(signum, frame): # GW process should exit now raise SystemExit(f"Gateway subprocess terminated {pid=} {exit_code=}") + def int_to_bitmask(n): """Converts an integer n to a bitmask string""" return f"0x{hex((1 << n) - 1)[2:].upper()}" + def cpumask_set(args): """Check if reactor cpu mask is set in command line args""" @@ -77,6 +81,7 @@ def cpumask_set(args): return False + class GatewayServer: """Runs SPDK and receives client requests for the gateway service. @@ -195,19 +200,19 @@ def _wait_for_group_id(self): self.monitor_event.wait() self.monitor_event = None self.logger.info("Stopping the MonitorGroup server...") - grace = self.config.getfloat_with_default("gateway", "monitor_stop_grace", 1/1000) + grace = self.config.getfloat_with_default("gateway", "monitor_stop_grace", 1 / 1000) self.monitor_server.stop(grace).wait() self.logger.info("The MonitorGroup gRPC server stopped...") self.monitor_server = None def start_prometheus(self): - ###Starts the prometheus endpoint if enabled by the config.### + """Starts the prometheus endpoint if enabled by the config.""" if self.config.getboolean_with_default("gateway", "enable_prometheus_exporter", True): self.logger.info("Prometheus endpoint is enabled") start_exporter(self.spdk_rpc_client, self.config, self.gateway_rpc, self.logger) else: - self.logger.info(f"Prometheus endpoint is disabled. To enable, set the config option 'enable_prometheus_exporter = True'") + self.logger.info("Prometheus endpoint is disabled. To enable, set the config option 'enable_prometheus_exporter = True'") def serve(self): """Starts gateway server.""" @@ -266,10 +271,10 @@ def _register_service_map(self): metadata = { "id": self.name.removeprefix("client.nvmeof."), "pool_name": self.config.get("ceph", "pool"), - "daemon_type": "gateway", # "nvmeof: 3 active (3 hosts)" + "daemon_type": "gateway", # "nvmeof: 3 active (3 hosts)" "group": self.config.get_with_default("gateway", "group", ""), - } - self.ceph_utils.service_daemon_register(conn, metadata) + } + self.ceph_utils.service_daemon_register(conn, metadata) def _monitor_client_version(self) -> str: """Return monitor client version string.""" @@ -298,20 +303,20 @@ def _start_monitor_client(self): rados_id = self.config.get_with_default("ceph", "id", "client.admin") if not rados_id.startswith(client_prefix): rados_id = client_prefix + rados_id - cmd = [ self.monitor_client, - "--gateway-name", self.name, - "--gateway-address", self._gateway_address(), - "--gateway-pool", self.config.get("ceph", "pool"), - "--gateway-group", self.config.get_with_default("gateway", "group", ""), - "--monitor-group-address", self._monitor_address(), - '-c', '/etc/ceph/ceph.conf', - '-n', rados_id, - '-k', '/etc/ceph/keyring'] + cmd = [self.monitor_client, + "--gateway-name", self.name, + "--gateway-address", self._gateway_address(), + "--gateway-pool", self.config.get("ceph", "pool"), + "--gateway-group", self.config.get_with_default("gateway", "group", ""), + "--monitor-group-address", self._monitor_address(), + '-c', '/etc/ceph/ceph.conf', + '-n', rados_id, + '-k', '/etc/ceph/keyring'] if self.config.getboolean("gateway", "enable_auth"): cmd += [ "--server-cert", self.config.get("mtls", "server_cert"), "--client-key", self.config.get("mtls", "client_key"), - "--client-cert", self.config.get("mtls", "client_cert") ] + "--client-cert", self.config.get("mtls", "client_cert")] self.monitor_client_log_file = None self.monitor_client_log_file_path = None @@ -335,7 +340,7 @@ def _start_monitor_client(self): # wait for monitor notification of the group id self._wait_for_group_id() except Exception: - self.logger.exception(f"Unable to start CEPH monitor client:") + self.logger.exception("Unable to start CEPH monitor client") raise def _start_discovery_service(self): @@ -348,7 +353,7 @@ def _start_discovery_service(self): try: rpc_nvmf.nvmf_delete_subsystem(self.spdk_rpc_client, GatewayUtils.DISCOVERY_NQN) except Exception: - self.logger.exception(f"Delete Discovery subsystem returned with error") + self.logger.exception("Delete Discovery subsystem returned with error") raise # run ceph nvmeof discovery service in sub-process @@ -389,7 +394,7 @@ def _grpc_server(self, address): enable_auth = self.config.getboolean("gateway", "enable_auth") if enable_auth: - self.logger.info(f"mTLS authenciation has been enabled") + self.logger.info("mTLS authentication has been enabled") # Read in key and certificates for authentication server_key = self.config.get("mtls", "server_key") server_cert = self.config.get("mtls", "server_cert") @@ -470,7 +475,7 @@ def _start_spdk(self, omap_state): sockname = self.config.get_with_default("spdk", "rpc_socket_name", "spdk.sock") if sockname.find("/") >= 0: self.logger.error(f"Invalid SPDK socket name \"{sockname}\". Name should not contain a \"/\".") - raise RuntimeError(f"Invalid SPDK socket name.") + raise RuntimeError("Invalid SPDK socket name.") self.spdk_rpc_socket_path = sockdir + sockname self.logger.info(f"SPDK Socket: {self.spdk_rpc_socket_path}") spdk_tgt_cmd_extra_args = self.config.get_with_default( @@ -487,10 +492,9 @@ def _start_spdk(self, omap_state): self.logger.info(f"SPDK will not use huge pages, mem size: {spdk_memsize}") cmd += ["--no-huge", "-s", str(spdk_memsize)] else: - self.logger.info(f"SPDK will use huge pages, probing...") + self.logger.info("SPDK will use huge pages, probing...") self.probe_huge_pages() - # If not provided in configuration, # calculate cpu mask available for spdk reactors if not cpumask_set(cmd): @@ -515,7 +519,7 @@ def _start_spdk(self, omap_state): time.sleep(2) # this is a temporary hack, we have a timing issue here. Once we solve it the sleep will ve removed self.spdk_process = subprocess.Popen(cmd, stdout=self.spdk_log_file, stderr=log_stderr) except Exception: - self.logger.exception(f"Unable to start SPDK") + self.logger.exception("Unable to start SPDK") raise # Initialization @@ -559,7 +563,7 @@ def _start_spdk(self, omap_state): conn_retries=conn_retries, ) except Exception: - self.logger.exception(f"Unable to initialize SPDK") + self.logger.exception("Unable to initialize SPDK") raise # Implicitly create transports @@ -576,12 +580,12 @@ def _start_spdk(self, omap_state): except KeyError: self.logger.error(f"Can't find SPDK version string in {return_version}") except Exception: - self.logger.exception(f"Can't read SPDK version") + self.logger.exception("Can't read SPDK version") pass def _stop_subprocess(self, proc, timeout): """Stops SPDK process.""" - assert proc is not None # should be verified by the caller + assert proc is not None # should be verified by the caller return_code = proc.returncode @@ -598,7 +602,7 @@ def _stop_subprocess(self, proc, timeout): except subprocess.TimeoutExpired: self.logger.exception(f"({self.name}) pid {proc.pid} " f"timeout occurred while terminating sub process:") - proc.kill() # kill -9, send KILL signal + proc.kill() # kill -9, send KILL signal def _stop_monitor_client(self): """Stops Monitor client.""" @@ -640,7 +644,7 @@ def _stop_spdk(self): def _stop_discovery(self): """Stops Discovery service process.""" - assert self.discovery_pid is not None # should be verified by the caller + assert self.discovery_pid is not None # should be verified by the caller self.logger.info("Terminating discovery service...") # discovery service selector loop should exit due to KeyboardInterrupt exception @@ -648,7 +652,7 @@ def _stop_discovery(self): os.kill(self.discovery_pid, signal.SIGINT) os.waitpid(self.discovery_pid, 0) except (ChildProcessError, ProcessLookupError): - pass # ignore + pass # ignore self.logger.info("Discovery service terminated") self.discovery_pid = None @@ -669,7 +673,7 @@ def _create_transport(self, trtype): raise try: - rpc_nvmf.nvmf_create_transport( self.spdk_rpc_client, **args) + rpc_nvmf.nvmf_create_transport(self.spdk_rpc_client, **args) except Exception: self.logger.exception(f"Create Transport {trtype} returned with error") raise @@ -677,7 +681,7 @@ def _create_transport(self, trtype): def keep_alive(self): """Continuously confirms communication with SPDK process.""" allowed_consecutive_spdk_ping_failures = self.config.getint_with_default("gateway", - "allowed_consecutive_spdk_ping_failures", 1) + "allowed_consecutive_spdk_ping_failures", 1) spdk_ping_interval_in_seconds = self.config.getfloat_with_default("gateway", "spdk_ping_interval_in_seconds", 2.0) if spdk_ping_interval_in_seconds < 0.0: self.logger.warning(f"Invalid SPDK ping interval {spdk_ping_interval_in_seconds}, will reset to 0") @@ -693,8 +697,8 @@ def keep_alive(self): while True: if self.gateway_rpc: if self.gateway_rpc.rebalance.rebalance_event.is_set(): - self.logger.critical(f"Failure in rebalance, aborting") - raise SystemExit(f"Failure in rebalance, quitting gateway") + self.logger.critical("Failure in rebalance, aborting") + raise SystemExit("Failure in rebalance, quitting gateway") timedout = self.server.wait_for_termination(timeout=1) if not timedout: break @@ -705,7 +709,7 @@ def keep_alive(self): consecutive_ping_failures += 1 if consecutive_ping_failures >= allowed_consecutive_spdk_ping_failures: self.logger.critical(f"SPDK ping failed {consecutive_ping_failures} times, aborting") - raise SystemExit(f"SPDK ping failed, quitting gateway") + raise SystemExit("SPDK ping failed, quitting gateway") else: self.logger.warning(f"SPDK ping failed {consecutive_ping_failures} times, will keep trying") else: @@ -717,7 +721,7 @@ def _ping(self): spdk.rpc.spdk_get_version(self.spdk_rpc_ping_client) return True except Exception: - self.logger.exception(f"spdk_get_version failed") + self.logger.exception("spdk_get_version failed") return False def probe_huge_pages(self): diff --git a/control/state.py b/control/state.py index 6d2faeb2..736313b7 100644 --- a/control/state.py +++ b/control/state.py @@ -20,6 +20,7 @@ from google.protobuf import json_format from .proto import gateway_pb2 as pb2 + class GatewayState(ABC): """Persists gateway NVMeoF target state. @@ -70,11 +71,12 @@ def build_namespace_qos_key(subsystem_nqn: str, nsid) -> str: key += GatewayState.OMAP_KEY_DELIMITER + str(nsid) return key - def build_namespace_host_key(subsystem_nqn: str, nsid, host : str) -> str: + def build_namespace_host_key(subsystem_nqn: str, nsid, host: str) -> str: key = GatewayState.NAMESPACE_HOST_PREFIX + subsystem_nqn if nsid is not None: key += GatewayState.OMAP_KEY_DELIMITER + str(nsid) - key += GatewayState.OMAP_KEY_DELIMITER + host + if host: + key += GatewayState.OMAP_KEY_DELIMITER + host return key def build_subsystem_key(subsystem_nqn: str) -> str: @@ -139,8 +141,14 @@ def remove_namespace(self, subsystem_nqn: str, nsid: str): # Delete all keys related to the namespace state = self.get_state() for key in state.keys(): - if (key.startswith(GatewayState.build_namespace_qos_key(subsystem_nqn, nsid)) or - key.startswith(GatewayState.build_namespace_host_key(subsystem_nqn, nsid, ""))): + # Separate if to several statements to keep flake8 happy + if key.startswith(GatewayState.build_namespace_qos_key(subsystem_nqn, nsid)): + self._remove_key(key) + elif key.startswith(GatewayState.build_namespace_host_key(subsystem_nqn, nsid, "")): + self._remove_key(key) + elif key.startswith(GatewayState.build_namespace_visibility_key(subsystem_nqn, nsid)): + self._remove_key(key) + elif key.startswith(GatewayState.build_namespace_lbgroup_key(subsystem_nqn, nsid)): self._remove_key(key) def add_namespace_qos(self, subsystem_nqn: str, nsid: str, val: str): @@ -153,12 +161,12 @@ def remove_namespace_qos(self, subsystem_nqn: str, nsid: str): key = GatewayState.build_namespace_qos_key(subsystem_nqn, nsid) self._remove_key(key) - def add_namespace_host(self, subsystem_nqn: str, nsid: str, host : str, val: str): + def add_namespace_host(self, subsystem_nqn: str, nsid: str, host: str, val: str): """Adds namespace's host to the state data store.""" key = GatewayState.build_namespace_host_key(subsystem_nqn, nsid, host) self._add_key(key, val) - def remove_namespace_host(self, subsystem_nqn: str, nsid: str, host : str): + def remove_namespace_host(self, subsystem_nqn: str, nsid: str, host: str): """Removes namespace's host from the state data store.""" key = GatewayState.build_namespace_host_key(subsystem_nqn, nsid, host) self._remove_key(key) @@ -176,11 +184,24 @@ def remove_subsystem(self, subsystem_nqn: str): # Delete all keys related to subsystem state = self.get_state() for key in state.keys(): - if (key.startswith(GatewayState.build_namespace_key(subsystem_nqn, None)) or - key.startswith(GatewayState.build_namespace_qos_key(subsystem_nqn, None)) or - key.startswith(GatewayState.build_namespace_host_key(subsystem_nqn, None, "")) or - key.startswith(GatewayState.build_host_key(subsystem_nqn, None)) or - key.startswith(GatewayState.build_partial_listener_key(subsystem_nqn, None))): + # Separate if to several statements to keep flake8 happy + if key.startswith(GatewayState.build_namespace_key(subsystem_nqn, None)): + self._remove_key(key) + elif key.startswith(GatewayState.build_namespace_qos_key(subsystem_nqn, None)): + self._remove_key(key) + elif key.startswith(GatewayState.build_namespace_host_key(subsystem_nqn, None, "")): + self._remove_key(key) + elif key.startswith(GatewayState.build_namespace_visibility_key(subsystem_nqn, None)): + self._remove_key(key) + elif key.startswith(GatewayState.build_namespace_lbgroup_key(subsystem_nqn, None)): + self._remove_key(key) + elif key.startswith(GatewayState.build_host_key(subsystem_nqn, None)): + self._remove_key(key) + elif key.startswith(GatewayState.build_host_key_key(subsystem_nqn, None)): + self._remove_key(key) + elif key.startswith(GatewayState.build_subsystem_key_key(subsystem_nqn)): + self._remove_key(key) + elif key.startswith(GatewayState.build_partial_listener_key(subsystem_nqn, None)): self._remove_key(key) def add_host(self, subsystem_nqn: str, host_nqn: str, val: str): @@ -195,6 +216,12 @@ def remove_host(self, subsystem_nqn: str, host_nqn: str): if key in state.keys(): self._remove_key(key) + # Delete all keys related to the host + state = self.get_state() + for key in state.keys(): + if key.startswith(GatewayState.build_host_key_key(subsystem_nqn, host_nqn)): + self._remove_key(key) + def add_listener(self, subsystem_nqn: str, gateway: str, trtype: str, traddr: str, trsvcid: int, val: str): """Adds a listener to the state data store.""" key = GatewayState.build_listener_key(subsystem_nqn, gateway, trtype, traddr, trsvcid) @@ -243,6 +270,7 @@ def reset(self, omap_state): """Resets dictionary with OMAP state.""" self.state = omap_state + class ReleasedLock: def __init__(self, lock: threading.Lock): self.lock = lock @@ -255,6 +283,7 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): self.lock.acquire() + class OmapLock: OMAP_FILE_LOCK_NAME = "omap_file_lock" OMAP_FILE_LOCK_COOKIE = "omap_file_cookie" @@ -269,12 +298,12 @@ def __init__(self, omap_state, gateway_state, rpc_lock: threading.Lock) -> None: self.omap_file_update_reloads = self.omap_state.config.getint_with_default("gateway", "omap_file_update_reloads", 10) self.omap_file_lock_retries = self.omap_state.config.getint_with_default("gateway", "omap_file_lock_retries", 30) self.omap_file_lock_retry_sleep_interval = self.omap_state.config.getfloat_with_default("gateway", - "omap_file_lock_retry_sleep_interval", 1.0) + "omap_file_lock_retry_sleep_interval", 1.0) self.lock_start_time = 0.0 # This is used for testing purposes only. To allow us testing locking from two gateways at the same time self.omap_file_disable_unlock = self.omap_state.config.getboolean_with_default("gateway", "omap_file_disable_unlock", False) if self.omap_file_disable_unlock: - self.logger.warning(f"Will not unlock OMAP file for testing purposes") + self.logger.warning("Will not unlock OMAP file for testing purposes") # # We pass the context from the different functions here. It should point to a real object in case we come from a real @@ -335,28 +364,29 @@ def lock_omap(self): assert self.rpc_lock.locked(), "The RPC lock is not locked." if not self.omap_state.ioctx: - self.logger.warning(f"Not locking OMAP as Rados connection is closed") + self.logger.warning("Not locking OMAP as Rados connection is closed") raise Exception("An attempt to lock OMAP file after Rados connection was closed") for i in range(0, self.omap_file_lock_retries + 1): try: self.omap_state.ioctx.lock_exclusive(self.omap_state.omap_name, self.OMAP_FILE_LOCK_NAME, - self.OMAP_FILE_LOCK_COOKIE, "OMAP file changes lock", self.omap_file_lock_duration, 0) + self.OMAP_FILE_LOCK_COOKIE, "OMAP file changes lock", + self.omap_file_lock_duration, 0) got_lock = True if i > 0: self.logger.info(f"Succeeded to lock OMAP file after {i} retries") break except rados.ObjectExists: - self.logger.info(f"We already locked the OMAP file") + self.logger.info("We already locked the OMAP file") got_lock = True break except rados.ObjectBusy: self.logger.warning( - f"The OMAP file is locked, will try again in {self.omap_file_lock_retry_sleep_interval} seconds") + f"The OMAP file is locked, will try again in {self.omap_file_lock_retry_sleep_interval} seconds") with ReleasedLock(self.rpc_lock): time.sleep(self.omap_file_lock_retry_sleep_interval) except Exception: - self.logger.exception(f"Unable to lock OMAP file, exiting") + self.logger.exception("Unable to lock OMAP file, exiting") raise if not got_lock: @@ -368,15 +398,14 @@ def lock_omap(self): local_version = self.omap_state.get_local_version() if omap_version > local_version: - self.logger.warning( - f"Local version {local_version} differs from OMAP file version {omap_version}." - f" The file is not current, will reload it and try again") + self.logger.warning(f"Local version {local_version} differs from OMAP file version {omap_version}." + " The file is not current, will reload it and try again") self.unlock_omap() raise OSError(errno.EAGAIN, "Unable to lock OMAP file, file not current", self.omap_state.omap_name) def unlock_omap(self): if self.omap_file_disable_unlock: - self.logger.warning(f"OMAP file unlock was disabled, will not unlock file") + self.logger.warning("OMAP file unlock was disabled, will not unlock file") return if not self.omap_state.ioctx: @@ -387,15 +416,16 @@ def unlock_omap(self): self.omap_state.ioctx.unlock(self.omap_state.omap_name, self.OMAP_FILE_LOCK_NAME, self.OMAP_FILE_LOCK_COOKIE) except rados.ObjectNotFound: if self.is_locked: - self.logger.warning(f"No such lock, the lock duration might have passed") + self.logger.warning("No such lock, the lock duration might have passed") except Exception: - self.logger.exception(f"Unable to unlock OMAP file") + self.logger.exception("Unable to unlock OMAP file") pass self.is_locked = False def locked(self): return self.is_locked + class OmapGatewayState(GatewayState): """Persists gateway NVMeoF target state to an OMAP object. @@ -442,7 +472,7 @@ def __init__(self, config, id_text=""): except rados.ObjectExists: self.logger.info(f"{self.omap_name} OMAP object already exists.") except Exception: - self.logger.exception(f"Unable to create OMAP, exiting!") + self.logger.exception("Unable to create OMAP, exiting!") raise def __exit__(self, exc_type, exc_value, traceback): @@ -451,8 +481,8 @@ def __exit__(self, exc_type, exc_value, traceback): def check_for_old_format_omap_files(self): omap_dict = self.get_state() for omap_item_key in omap_dict.keys(): - if omap_item_key.startswith("bdev"): - raise Exception("Old OMAP file format, still contains bdevs, please remove file and try again") + if omap_item_key.startswith("bdev"): + raise Exception("Old OMAP file format, still contains bdevs, please remove file and try again") def open_rados_connection(self, config): ceph_pool = config.get("ceph", "pool") @@ -475,7 +505,7 @@ def set_local_version(self, version_update: int): def get_omap_version(self) -> int: """Returns OMAP version.""" if not self.ioctx: - self.logger.warning(f"Trying to get OMAP version when Rados connection is closed") + self.logger.warning("Trying to get OMAP version when Rados connection is closed") return -1 with rados.ReadOpCtx() as read_op: @@ -497,7 +527,7 @@ def get_state(self) -> Dict[str, str]: omap_list = [("", 0)] # Dummy, non empty, list value. Just so we would enter the while omap_dict = {} if not self.ioctx: - self.logger.warning(f"Trying to get OMAP state when Rados connection is closed") + self.logger.warning("Trying to get OMAP state when Rados connection is closed") return omap_dict # The number of items returned is limited by Ceph, so we need to read in a loop until no more items are returned while len(omap_list) > 0: @@ -527,14 +557,14 @@ def _add_key(self, key: str, val: str): self.version = version_update self.logger.debug(f"omap_key generated: {key}") except Exception: - self.logger.exception(f"Unable to add key to OMAP, exiting!") + self.logger.exception("Unable to add key to OMAP, exiting!") raise # Notify other gateways within the group of change try: - self.ioctx.notify(self.omap_name, timeout_ms = self.notify_timeout) + self.ioctx.notify(self.omap_name, timeout_ms=self.notify_timeout) except Exception: - self.logger.warning(f"Failed to notify.") + self.logger.warning("Failed to notify.") def _remove_key(self, key: str): """Removes key from the OMAP.""" @@ -554,14 +584,14 @@ def _remove_key(self, key: str): self.version = version_update self.logger.debug(f"omap_key removed: {key}") except Exception: - self.logger.exception(f"Unable to remove key from OMAP, exiting!") + self.logger.exception("Unable to remove key from OMAP, exiting!") raise # Notify other gateways within the group of change try: - self.ioctx.notify(self.omap_name, timeout_ms = self.notify_timeout) + self.ioctx.notify(self.omap_name, timeout_ms=self.notify_timeout) except Exception: - self.logger.warning(f"Failed to notify.") + self.logger.warning("Failed to notify.") def delete_state(self): """Deletes OMAP object contents.""" @@ -575,9 +605,9 @@ def delete_state(self): self.ioctx.set_omap(write_op, (self.OMAP_VERSION_KEY,), (str(1),)) self.ioctx.operate_write_op(write_op, self.omap_name) - self.logger.info(f"Deleted OMAP contents.") + self.logger.info("Deleted OMAP contents.") except Exception: - self.logger.exception(f"Error deleting OMAP contents, exiting!") + self.logger.exception("Error deleting OMAP contents, exiting!") raise def register_watch(self, notify_event): @@ -593,11 +623,11 @@ def _watcher_callback(notify_id, notifier_id, watch_id, data): try: self.watch = self.ioctx.watch(self.omap_name, _watcher_callback) except Exception: - self.logger.exception(f"Unable to initiate watch") + self.logger.exception("Unable to initiate watch") else: - self.logger.info(f"Watch already exists.") + self.logger.info("Watch already exists.") - def cleanup_omap(self, omap_lock = None): + def cleanup_omap(self, omap_lock=None): self.logger.info(f"Cleanup OMAP on exit ({self.id_text})") if self.watch: try: @@ -622,6 +652,7 @@ def cleanup_omap(self, omap_lock = None): self.conn.shutdown() self.conn = None + class GatewayStateHandler: """Maintains consistency in NVMeoF target state store instances. @@ -673,12 +704,12 @@ def remove_namespace_qos(self, subsystem_nqn: str, nsid: str): self.omap.remove_namespace_qos(subsystem_nqn, nsid) self.local.remove_namespace_qos(subsystem_nqn, nsid) - def add_namespace_host(self, subsystem_nqn: str, nsid: str, host : str, val: str): + def add_namespace_host(self, subsystem_nqn: str, nsid: str, host: str, val: str): """Adds namespace's host to the state data store.""" self.omap.add_namespace_host(subsystem_nqn, nsid, host, val) self.local.add_namespace_host(subsystem_nqn, nsid, host, val) - def remove_namespace_host(self, subsystem_nqn: str, nsid: str, host : str): + def remove_namespace_host(self, subsystem_nqn: str, nsid: str, host: str): """Removes namespace's host from the state data store.""" self.omap.remove_namespace_host(subsystem_nqn, nsid, host) self.local.remove_namespace_host(subsystem_nqn, nsid, host) @@ -798,7 +829,7 @@ def host_only_key_changed(self, old_val, new_val): old_req = None new_req = None try: - old_req = json_format.Parse(old_val, pb2.add_host_req(), ignore_unknown_fields=True ) + old_req = json_format.Parse(old_val, pb2.add_host_req(), ignore_unknown_fields=True) except json_format.ParseError: self.logger.exception(f"Got exception parsing {old_val}") return (False, None) @@ -827,7 +858,7 @@ def subsystem_only_key_changed(self, old_val, new_val): old_req = None new_req = None try: - old_req = json_format.Parse(old_val, pb2.create_subsystem_req(), ignore_unknown_fields=True ) + old_req = json_format.Parse(old_val, pb2.create_subsystem_req(), ignore_unknown_fields=True) except json_format.ParseError: self.logger.exception(f"Got exception parsing {old_val}") return (False, None) @@ -855,7 +886,7 @@ def break_namespace_key(self, ns_key: str): if not ns_key.startswith(GatewayState.NAMESPACE_PREFIX): self.logger.warning(f"Invalid namespace key \"{ns_key}\", can't find key parts") return (None, None) - key_end = ns_key[len(GatewayState.NAMESPACE_PREFIX) : ] + key_end = ns_key[len(GatewayState.NAMESPACE_PREFIX):] key_parts = key_end.split(GatewayState.OMAP_KEY_DELIMITER) if len(key_parts) != 2: self.logger.warning(f"Invalid namespace key \"{ns_key}\", can't find key parts") @@ -876,7 +907,7 @@ def break_host_key(self, host_key: str): if not host_key.startswith(GatewayState.HOST_PREFIX): self.logger.warning(f"Invalid host key \"{host_key}\", can't find key parts") return (None, None) - key_end = host_key[len(GatewayState.HOST_PREFIX) : ] + key_end = host_key[len(GatewayState.HOST_PREFIX):] key_parts = key_end.split(GatewayState.OMAP_KEY_DELIMITER) if len(key_parts) != 2: self.logger.warning(f"Invalid host key \"{host_key}\", can't find key parts") @@ -921,11 +952,11 @@ def update(self) -> bool: """Checks for updated OMAP state and initiates local update.""" if self.update_is_active_lock.locked(): - self.logger.warning(f"An update is already running, ignore") + self.logger.warning("An update is already running, ignore") return False if not self.omap.ioctx: - self.logger.warning(f"Can't update when Rados connection is closed") + self.logger.warning("Can't update when Rados connection is closed") return False with self.update_is_active_lock: @@ -1016,7 +1047,7 @@ def update(self) -> bool: including_default_value_fields=True) added[lbgroup_key] = json_req except Exception: - self.logger.exception(f"Exception formatting change namespace load balancing group request") + self.logger.exception("Exception formatting change namespace load balancing group request") for ns_key, new_visibility in only_visibility_changed: ns_nqn = None @@ -1035,7 +1066,7 @@ def update(self) -> bool: including_default_value_fields=True) added[visibility_key] = json_req except Exception: - self.logger.exception(f"Exception formatting change namespace visibility request") + self.logger.exception("Exception formatting change namespace visibility request") for host_key, new_dhchap_key in only_host_key_changed: subsys_nqn = None @@ -1046,13 +1077,12 @@ def update(self) -> bool: except Exception: self.logger.exception(f"Exception removing {host_key} from {changed}") if host_nqn == "*": - self.logger.warning(f"Something went wrong, host \"*\" can't have DH-HMAC-CHAP keys, ignore") + self.logger.warning("Something went wrong, host \"*\" can't have DH-HMAC-CHAP keys, ignore") continue if subsys_nqn and host_nqn: try: host_key_key = GatewayState.build_host_key_key(subsys_nqn, host_nqn) - req = pb2.change_host_key_req(subsystem_nqn=subsys_nqn, host_nqn=host_nqn, - dhchap_key=new_dhchap_key) + req = pb2.change_host_key_req(subsystem_nqn=subsys_nqn, host_nqn=host_nqn, dhchap_key=new_dhchap_key) json_req = json_format.MessageToJson(req, preserving_proto_field_name=True, including_default_value_fields=True) added[host_key_key] = json_req diff --git a/control/utils.py b/control/utils.py index 570f5415..65d16ba5 100644 --- a/control/utils.py +++ b/control/utils.py @@ -21,7 +21,7 @@ class GatewayEnumUtils: - def get_value_from_key(e_type, keyval, ignore_case = False): + def get_value_from_key(e_type, keyval, ignore_case=False): val = None try: key_index = e_type.keys().index(keyval) @@ -31,10 +31,13 @@ def get_value_from_key(e_type, keyval, ignore_case = False): except IndexError: pass - if ignore_case and val == None and type(keyval) == str: - val = get_value_from_key(e_type, keyval.lower(), False) - if ignore_case and val == None and type(keyval) == str: - val = get_value_from_key(e_type, keyval.upper(), False) + if val is not None or not ignore_case: + return val + + if isinstance(keyval, str): + val = GatewayEnumUtils.get_value_from_key(e_type, keyval.lower(), False) + if val is None and isinstance(keyval, str): + val = GatewayEnumUtils.get_value_from_key(e_type, keyval.upper(), False) return val @@ -49,17 +52,18 @@ def get_key_from_value(e_type, val): pass return keyval + class GatewayUtils: DISCOVERY_NQN = "nqn.2014-08.org.nvmexpress.discovery" # We need to enclose IPv6 addresses in brackets before concatenating a colon and port number to it - def escape_address_if_ipv6(addr : str) -> str: + def escape_address_if_ipv6(addr: str) -> str: ret_addr = addr if ":" in addr and not addr.strip().startswith("["): ret_addr = f"[{addr}]" return ret_addr - def unescape_address_if_ipv6(addr : str, adrfam : str) -> str: + def unescape_address_if_ipv6(addr: str, adrfam: str) -> str: ret_addr = addr.strip() if adrfam.lower() == "ipv6": ret_addr = ret_addr.removeprefix("[").removesuffix("]") @@ -74,7 +78,7 @@ def is_valid_rev_domain(rev_domain): domain_parts = rev_domain.split(".") for lbl in domain_parts: if not lbl: - return (errno.EINVAL, f"empty domain label doesn't start with a letter") + return (errno.EINVAL, "empty domain label doesn't start with a letter") if len(lbl) > DOMAIN_LABEL_MAX_LEN: return (errno.EINVAL, f"domain label {lbl} is too long") @@ -111,7 +115,7 @@ def is_valid_uuid(uuid_val) -> bool: for u in uuid_parts: try: - n = int(u, 16) + int(u, 16) except ValueError: return False @@ -125,11 +129,11 @@ def is_valid_nqn(nqn): NQN_UUID_PREFIX = "nqn.2014-08.org.nvmexpress:uuid:" NQN_UUID_PREFIX_LENGTH = len(NQN_UUID_PREFIX) - if type(nqn) != str: + if not isinstance(nqn, str): return (errno.EINVAL, f"Invalid type {type(nqn)} for NQN, must be a string") try: - b = nqn.encode(encoding="utf-8") + nqn.encode(encoding="utf-8") except UnicodeEncodeError: return (errno.EINVAL, f"Invalid NQN \"{nqn}\", must have an UTF-8 encoding") @@ -145,7 +149,7 @@ def is_valid_nqn(nqn): if nqn.startswith(NQN_UUID_PREFIX): if len(nqn) != NQN_UUID_PREFIX_LENGTH + UUID_STRING_LENGTH: return (errno.EINVAL, f"Invalid NQN \"{nqn}\": UUID is not the correct length") - uuid_part = nqn[NQN_UUID_PREFIX_LENGTH : ] + uuid_part = nqn[NQN_UUID_PREFIX_LENGTH:] if not GatewayUtils.is_valid_uuid(uuid_part): return (errno.EINVAL, f"Invalid NQN \"{nqn}\": UUID is not formatted correctly") return (0, os.strerror(0)) @@ -153,18 +157,18 @@ def is_valid_nqn(nqn): if not nqn.startswith(NQN_PREFIX): return (errno.EINVAL, f"Invalid NQN \"{nqn}\", doesn't start with \"{NQN_PREFIX}\"") - nqn_no_prefix = nqn[len(NQN_PREFIX) : ] - date_part = nqn_no_prefix[ : 8] - rev_domain_part = nqn_no_prefix[8 : ] + nqn_no_prefix = nqn[len(NQN_PREFIX):] + date_part = nqn_no_prefix[:8] + rev_domain_part = nqn_no_prefix[8:] if not date_part.endswith("."): return (errno.EINVAL, f"Invalid NQN \"{nqn}\": invalid date code") - date_part = date_part[ : -1] + date_part = date_part[:-1] try: year_part, month_part = date_part.split("-") if len(year_part) != 4 or len(month_part) != 2: return (errno.EINVAL, f"Invalid NQN \"{nqn}\": invalid date code") - n = int(year_part) - n = int(month_part) + int(year_part) + int(month_part) except ValueError: return (errno.EINVAL, f"Invalid NQN \"{nqn}\": invalid date code") @@ -182,6 +186,7 @@ def is_valid_nqn(nqn): return (0, os.strerror(0)) + class GatewayLogger: CEPH_LOG_DIRECTORY = "/var/log/ceph/" MAX_LOG_FILE_SIZE_DEFAULT = 10 @@ -247,8 +252,8 @@ def __init__(self, config=None): os.makedirs(self.log_directory, 0o755, True) logdir_ok = True self.handler = logging.handlers.RotatingFileHandler(self.log_directory + "/" + GatewayLogger.NVME_LOG_FILE_NAME, - maxBytes = max_log_file_size * 1024 * 1024, - backupCount = max_log_files_count) + maxBytes=max_log_file_size * 1024 * 1024, + backupCount=max_log_files_count) self.handler.setFormatter(frmtr) if log_files_rotation_enabled: self.handler.rotator = GatewayLogger.log_file_rotate @@ -270,19 +275,19 @@ def __init__(self, config=None): if not logdir_ok: self.logger.error(f"Failed to create directory {self.log_directory}, the log wouldn't be saved to a file") elif not self.handler: - self.logger.error(f"Failed to set up log file handler, the log wouldn't be saved to a file") + self.logger.error("Failed to set up log file handler, the log wouldn't be saved to a file") else: rot_msg = "" if log_files_rotation_enabled: rot_msg = ", using rotation" self.logger.info(f"Log files will be saved in {self.log_directory}{rot_msg}") else: - self.logger.warning(f"Log files are disabled, the log wouldn't be saved to a file") + self.logger.warning("Log files are disabled, the log wouldn't be saved to a file") GatewayLogger.init_executed = True def rotate_backup_directories(dirname, count): try: - shutil.rmtree(dirname + f".bak{count}", ignore_errors = True) + shutil.rmtree(dirname + f".bak{count}", ignore_errors=True) except Exception: pass for i in range(count, 2, -1): @@ -291,22 +296,22 @@ def rotate_backup_directories(dirname, count): except Exception: pass try: - os.rename(dirname + f".bak", dirname + f".bak2") + os.rename(dirname + ".bak", dirname + ".bak2") except Exception: pass try: - os.rename(dirname, dirname + f".bak") + os.rename(dirname, dirname + ".bak") except Exception: pass # Just to be on the safe side, in case the rename failed try: - shutil.rmtree(dirname, ignore_errors = True) + shutil.rmtree(dirname, ignore_errors=True) except Exception: pass def set_log_level(self, log_level): - if type(log_level) == str: + if isinstance(log_level, str): log_level = log_level.upper() self.logger.setLevel(log_level) logger_parent = self.logger.parent @@ -326,7 +331,6 @@ def log_file_rotate(src, dest): GatewayLogger.logger.info(m) for e in errs: GatewayLogger.logger.error(e) - else: os.rename(src, dest) @@ -377,7 +381,7 @@ def compress_final_log_file(self, gw_name): return if not gw_name: - self.logger.error(f"No gateway name, can't compress the log file") + self.logger.error("No gateway name, can't compress the log file") return if not self.log_directory.endswith(gw_name): @@ -390,8 +394,7 @@ def compress_final_log_file(self, gw_name): dest_name = self.log_directory + "/" + GatewayLogger.NVME_LOG_FILE_NAME + ".gz" if os.access(self.log_directory + "/" + GatewayLogger.NVME_LOG_FILE_NAME + ".1", - os.F_OK) and not os.access(self.log_directory + "/" + GatewayLogger.NVME_LOG_FILE_NAME + ".0", - os.F_OK): + os.F_OK) and not os.access(self.log_directory + "/" + GatewayLogger.NVME_LOG_FILE_NAME + ".0", os.F_OK): dest_name = self.log_directory + "/" + GatewayLogger.NVME_LOG_FILE_NAME + ".0" msgs, errs = GatewayLogger.compress_file(self.log_directory + "/" + GatewayLogger.NVME_LOG_FILE_NAME, dest_name) diff --git a/tests/conftest.py b/tests/conftest.py index 73f4baa5..6bf54826 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,7 +1,5 @@ import pytest -import rados from control.config import GatewayConfig -from control.state import OmapGatewayState def pytest_addoption(parser): diff --git a/tests/ha/start_up.sh b/tests/ha/start_up.sh index 30966891..49453862 100755 --- a/tests/ha/start_up.sh +++ b/tests/ha/start_up.sh @@ -13,7 +13,7 @@ if [ $# -ge 1 ]; then fi fi echo ℹ️ Starting $SCALE nvmeof gateways -docker compose up -d --remove-orphans --scale nvmeof=$SCALE nvmeof +docker compose up -d --remove-orphans ceph # Waiting for the ceph container to become healthy while true; do @@ -28,6 +28,7 @@ while true; do fi done echo ✅ ceph is healthy +docker compose up -d --remove-orphans --scale nvmeof=$SCALE nvmeof echo ℹ️ Increase debug logs level docker compose exec -T ceph ceph config get mon.a diff --git a/tests/test_cli.py b/tests/test_cli.py index 4c4513ad..8f0b0537 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -69,6 +69,7 @@ config = "ceph-nvmeof.conf" group_name = "GROUPNAME" + @pytest.fixture(scope="module") def gateway(config): """Sets up and tears down Gateway""" @@ -101,6 +102,7 @@ def gateway(config): gateway.server.stop(grace=1) gateway.gateway_rpc.gateway_state.delete_state() + class TestGet: def test_get_subsystems(self, caplog, gateway): caplog.clear() @@ -158,7 +160,7 @@ def test_get_gateway_info(self, caplog, gateway): caplog.clear() spdk_ver = os.getenv("NVMEOF_SPDK_VERSION") gw_info = cli_test(["gw", "info"]) - assert gw_info != None + assert gw_info is not None assert gw_info.cli_version == cli_ver assert gw_info.version == cli_ver assert gw_info.spdk_version == spdk_ver @@ -171,46 +173,50 @@ def test_get_gateway_info(self, caplog, gateway): assert gw_info.status == 0 assert gw_info.bool_status + class TestCreate: def test_create_subsystem(self, caplog, gateway): caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2016", "--no-group-append"]) - assert f'NQN "nqn.2016" is too short, minimal length is 11' in caplog.text + assert 'NQN "nqn.2016" is too short, minimal length is 11' in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", -"nqn.2016-06XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", "--no-group-append"]) - assert f"is too long, maximal length is 223" in caplog.text + "nqn.2016-06XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" + "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", + "--no-group-append"]) + assert "is too long, maximal length is 223" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2014-08.org.nvmexpress:uuid:0", "--no-group-append"]) - assert f"UUID is not the correct length" in caplog.text + assert "UUID is not the correct length" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2014-08.org.nvmexpress:uuid:9e9134-3cb431-4f3e-91eb-a13cefaabebf", "--no-group-append"]) - assert f"UUID is not formatted correctly" in caplog.text + assert "UUID is not formatted correctly" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "qqn.2016-06.io.spdk:cnode1", "--no-group-append"]) - assert f"doesn't start with" in caplog.text + assert "doesn't start with" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.016-206.io.spdk:cnode1", "--no-group-append"]) - assert f"invalid date code" in caplog.text + assert "invalid date code" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2X16-06.io.spdk:cnode1", "--no-group-append"]) - assert f"invalid date code" in caplog.text + assert "invalid date code" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2016-06.io.spdk:", "--no-group-append"]) - assert f"must contain a user specified name starting with" in caplog.text + assert "must contain a user specified name starting with" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2016-06.io..spdk:cnode1", "--no-group-append"]) - assert f"reverse domain is not formatted correctly" in caplog.text + assert "reverse domain is not formatted correctly" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2016-06.io.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.spdk:cnode1", "--no-group-append"]) - assert f"reverse domain is not formatted correctly" in caplog.text + assert "reverse domain is not formatted correctly" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", "nqn.2016-06.io.-spdk:cnode1", "--no-group-append"]) - assert f"reverse domain is not formatted correctly" in caplog.text + assert "reverse domain is not formatted correctly" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", f"{subsystem}_X", "--no-group-append"]) - assert f"Invalid NQN" in caplog.text - assert f"contains invalid characters" in caplog.text + assert "Invalid NQN" in caplog.text + assert "contains invalid characters" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", subsystem, "--max-namespaces", "2049", "--no-group-append"]) assert f"The requested max number of namespaces for subsystem {subsystem} (2049) is greater than the global limit on the number of namespaces (12), will continue" in caplog.text @@ -218,7 +224,7 @@ def test_create_subsystem(self, caplog, gateway): cli(["--format", "json", "subsystem", "list"]) assert f'"serial_number": "{serial}"' not in caplog.text assert f'"nqn": "{subsystem}"' in caplog.text - assert f'"max_namespaces": 2049' in caplog.text + assert '"max_namespaces": 2049' in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", subsystem, "--max-namespaces", "2049", "--no-group-append"]) assert f"Failure creating subsystem {subsystem}: Subsystem already exists" in caplog.text @@ -250,14 +256,14 @@ def test_create_subsystem(self, caplog, gateway): assert f'{subsystem2}' in caplog.text caplog.clear() cli(["subsystem", "list", "--serial-number", "JUNK"]) - assert f"No subsystem with serial number JUNK" in caplog.text + assert "No subsystem with serial number JUNK" in caplog.text caplog.clear() cli(["subsystem", "list", "--subsystem", "JUNK"]) - assert f"Failure listing subsystems: No such device" in caplog.text - assert f'"nqn": "JUNK"' in caplog.text + assert "Failure listing subsystems: No such device" in caplog.text + assert '"nqn": "JUNK"' in caplog.text caplog.clear() subs_list = cli_test(["--format", "text", "subsystem", "list"]) - assert subs_list != None + assert subs_list is not None assert subs_list.status == 0 assert subs_list.subsystems[0].nqn == subsystem assert subs_list.subsystems[1].nqn == subsystem2 @@ -266,7 +272,7 @@ def test_create_subsystem(self, caplog, gateway): assert f"Failure creating subsystem {subsystem8}: Serial number {serial} is already used by subsystem {subsystem2}" in caplog.text caplog.clear() subs_list = cli_test(["subsystem", "list"]) - assert subs_list != None + assert subs_list is not None assert subs_list.status == 0 assert subs_list.subsystems[0].nqn == subsystem assert subs_list.subsystems[1].nqn == subsystem2 @@ -295,7 +301,7 @@ def test_add_namespace_wrong_balancing_group(self, caplog, gateway): caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image4, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", "100", "--force"]) assert f"Failure adding namespace to {subsystem}:" in caplog.text - assert f"Load balancing group 100 doesn't exist" in caplog.text + assert "Load balancing group 100 doesn't exist" in caplog.text def test_add_namespace_wrong_size(self, caplog, gateway): caplog.clear() @@ -321,21 +327,21 @@ def test_add_namespace_wrong_size_grpc(self, caplog, gateway): gw, stub = gateway caplog.clear() add_namespace_req = pb2.namespace_add_req(subsystem_nqn=subsystem, rbd_pool_name=pool, rbd_image_name="junkimage", - block_size=512, create_image=True, size=16*1024*1024+20) + block_size=512, create_image=True, size=16 * 1024 * 1024 + 20) ret = stub.namespace_add(add_namespace_req) assert ret.status != 0 - assert f"Failure adding namespace" in caplog.text - assert f"image size must be aligned to MiBs" in caplog.text + assert "Failure adding namespace" in caplog.text + assert "image size must be aligned to MiBs" in caplog.text def test_add_namespace_wrong_block_size(self, caplog, gateway): gw, stub = gateway caplog.clear() add_namespace_req = pb2.namespace_add_req(subsystem_nqn=subsystem, rbd_pool_name=pool, rbd_image_name="junkimage", - create_image=True, size=16*1024*1024, force=True) + create_image=True, size=16 * 1024 * 1024, force=True) ret = stub.namespace_add(add_namespace_req) assert ret.status != 0 - assert f"Failure adding namespace" in caplog.text - assert f"block size can't be zero" in caplog.text + assert "Failure adding namespace" in caplog.text + assert "block size can't be zero" in caplog.text def test_add_namespace_double_uuid(self, caplog, gateway): caplog.clear() @@ -354,7 +360,7 @@ def test_add_namespace_double_nsid(self, caplog, gateway): assert f"Adding namespace 1 to {subsystem}: Successful" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image3, "--nsid", "1", "--size", "16MB", "--rbd-create-image", "--force"]) - assert f"Failure adding namespace, NSID 1 is already in use" in caplog.text + assert "Failure adding namespace, NSID 1 is already in use" in caplog.text caplog.clear() cli(["namespace", "del", "--subsystem", subsystem, "--nsid", "1"]) assert f"Deleting namespace 1 from {subsystem}: Successful" in caplog.text @@ -362,13 +368,13 @@ def test_add_namespace_double_nsid(self, caplog, gateway): def test_add_namespace(self, caplog, gateway): caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", "junk", "--rbd-image", image2, "--uuid", uuid, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", anagrpid]) - assert f"RBD pool junk doesn't exist" in caplog.text + assert "RBD pool junk doesn't exist" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--uuid", uuid, "--size", "16MB", "--rbd-create-image", "--load-balancing-group", anagrpid, "--force"]) assert f"Adding namespace 1 to {subsystem}: Successful" in caplog.text assert f"Allocated cluster name='cluster_context_{anagrpid}_0'" in caplog.text assert f"get_cluster cluster_name='cluster_context_{anagrpid}_0'" in caplog.text - assert f"no_auto_visible: False" in caplog.text + assert "no_auto_visible: False" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image2, "--size", "36", "--rbd-create-image", "--load-balancing-group", anagrpid, "--force"]) assert f"Image {pool}/{image2} already exists with a size of 16777216 bytes which differs from the requested size of 37748736 bytes" in caplog.text @@ -441,7 +447,7 @@ def test_add_namespace(self, caplog, gateway): caplog.clear() cli(["namespace", "change_load_balancing_group", "--subsystem", subsystem, "--nsid", nsid, "--load-balancing-group", "10"]) assert f"Failure changing load balancing group for namespace with NSID {nsid} in {subsystem}" in caplog.text - assert f"Load balancing group 10 doesn't exist" in caplog.text + assert "Load balancing group 10 doesn't exist" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image3, "--size", "4GB", "--rbd-create-image"]) assert f"Adding namespace 3 to {subsystem}: Successful" in caplog.text @@ -452,14 +458,14 @@ def test_add_namespace(self, caplog, gateway): def test_add_namespace_ipv6(self, caplog, gateway): caplog.clear() - cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid, "--nsid", "4", "--force"]) + cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid, "--nsid", "4", "--force"]) assert f"Adding namespace 4 to {subsystem}: Successful" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "4"]) assert f'"load_balancing_group": {anagrpid}' in caplog.text - cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--nsid", "5", "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid, "--force"]) + cli(["--server-address", server_addr_ipv6, "namespace", "add", "--subsystem", subsystem, "--nsid", "5", "--rbd-pool", pool, "--rbd-image", image, "--load-balancing-group", anagrpid, "--force"]) assert f"Adding namespace 5 to {subsystem}: Successful" in caplog.text - assert f'will continue as the "force" argument was used' in caplog.text + assert 'will continue as the "force" argument was used' in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "5"]) assert f'"load_balancing_group": {anagrpid}' in caplog.text @@ -472,26 +478,26 @@ def test_add_namespace_same_image(self, caplog, gateway): caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--size", "16MB", "--load-balancing-group", anagrpid, "--rbd-create-image", "--nsid", "7"]) assert f"RBD image {pool}/{img_name} is already used by a namespace" in caplog.text - assert f"you can find the offending namespace by using" in caplog.text + assert "you can find the offending namespace by using" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", img_name, "--load-balancing-group", anagrpid, "--force", "--nsid", "7"]) assert f"Adding namespace 7 to {subsystem}: Successful" in caplog.text assert f"RBD image {pool}/{img_name} is already used by a namespace" in caplog.text - assert f'will continue as the "force" argument was used' in caplog.text + assert 'will continue as the "force" argument was used' in caplog.text def test_add_namespace_no_auto_visible(self, caplog, gateway): caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image5, "--size", "16MB", "--rbd-create-image", "--no-auto-visible"]) assert f"Adding namespace 8 to {subsystem}: Successful" in caplog.text - assert f"no_auto_visible: True" in caplog.text + assert "no_auto_visible: True" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image6, "--size", "16MB", "--rbd-create-image", "--no-auto-visible"]) assert f"Adding namespace 9 to {subsystem}: Successful" in caplog.text - assert f"no_auto_visible: True" in caplog.text + assert "no_auto_visible: True" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image7, "--size", "16MB", "--rbd-create-image", "--no-auto-visible"]) assert f"Adding namespace 10 to {subsystem}: Successful" in caplog.text - assert f"no_auto_visible: True" in caplog.text + assert "no_auto_visible: True" in caplog.text def test_add_host_to_namespace(self, caplog, gateway): caplog.clear() @@ -531,9 +537,9 @@ def test_change_namespace_visibility(self, caplog, gateway): assert f"No change to namespace 8 in {subsystem} visibility, nothing to do" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "8"]) - assert f'"nsid": 8' in caplog.text - assert f'"auto_visible": true' in caplog.text - assert f'"hosts": []' in caplog.text + assert '"nsid": 8' in caplog.text + assert '"auto_visible": true' in caplog.text + assert '"hosts": []' in caplog.text caplog.clear() cli(["namespace", "add_host", "--subsystem", subsystem, "--nsid", "8", "--host-nqn", host8]) assert f"Failure adding host {host8} to namespace 8 on {subsystem}: Namespace is visible to all hosts" in caplog.text @@ -542,17 +548,17 @@ def test_change_namespace_visibility(self, caplog, gateway): assert f'Changing visibility of namespace 8 in {subsystem} to "visible to selected hosts": Successful' in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "8"]) - assert f'"nsid": 8' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text - assert f'"hosts": []' in caplog.text + assert '"nsid": 8' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text + assert '"hosts": []' in caplog.text caplog.clear() cli(["namespace", "add_host", "--subsystem", subsystem, "--nsid", "8", "--host-nqn", host8]) assert f"Adding host {host8} to namespace 8 on {subsystem}: Successful" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "8"]) - assert f'"nsid": 8' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text - assert f'"hosts": []' not in caplog.text + assert '"nsid": 8' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text + assert '"hosts": []' not in caplog.text assert f"{host8}" in caplog.text def test_change_namespace_visibility_wrong_params(self, caplog, gateway): @@ -608,7 +614,7 @@ def test_add_too_many_namespaces_to_a_subsystem(self, caplog, gateway): caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--rbd-pool", pool, "--rbd-image", image9, "--nsid", "3000", "--size", "16MB", "--rbd-create-image"]) assert f"Failure adding namespace using NSID 3000 to {subsystem}: Requested NSID 3000 is bigger than the maximal one (2049)" in caplog.text - assert f"Received request to delete bdev" in caplog.text + assert "Received request to delete bdev" in caplog.text caplog.clear() cli(["subsystem", "add", "--subsystem", subsystem5, "--no-group-append", "--max-namespaces", "1"]) assert f"Adding subsystem {subsystem5}: Successful" in caplog.text @@ -618,7 +624,7 @@ def test_add_too_many_namespaces_to_a_subsystem(self, caplog, gateway): caplog.clear() cli(["namespace", "add", "--subsystem", subsystem5, "--rbd-pool", pool, "--rbd-image", image10, "--size", "16MB", "--rbd-create-image"]) assert f"Failure adding namespace to {subsystem5}: Subsystem's maximal number of namespaces (1) has already been reached" in caplog.text - assert f"Received request to delete bdev" in caplog.text + assert "Received request to delete bdev" in caplog.text caplog.clear() cli(["subsystem", "del", "--subsystem", subsystem5, "--force"]) assert f"Deleting subsystem {subsystem5}: Successful" in caplog.text @@ -659,10 +665,10 @@ def test_add_too_many_namespaces_with_hosts(self, caplog, gateway): def test_list_namespace_with_hosts(self, caplog, gateway): caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "9"]) - assert f'"nsid": 9' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text + assert '"nsid": 9' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text assert f'"{host8}"' in caplog.text - assert f'"hosts": []' not in caplog.text + assert '"hosts": []' not in caplog.text def test_del_namespace_host(self, caplog, gateway): caplog.clear() @@ -688,19 +694,19 @@ def test_del_namespace_host(self, caplog, gateway): assert f"Failure deleting host {host8} from namespace 9 on {subsystemX}: Can't find subsystem" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "9"]) - assert f'"nsid": 9' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text + assert '"nsid": 9' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text assert f'"{host8}"' in caplog.text - assert f'"hosts": []' not in caplog.text + assert '"hosts": []' not in caplog.text caplog.clear() cli(["namespace", "del_host", "--subsystem", subsystem, "--nsid", "9", "--host-nqn", host8]) assert f"Deleting host {host8} from namespace 9 on {subsystem}: Successful" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "9"]) - assert f'"nsid": 9' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text + assert '"nsid": 9' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text assert f'"{host8}"' not in caplog.text - assert f'"hosts": []' in caplog.text + assert '"hosts": []' in caplog.text caplog.clear() cli(["namespace", "del_host", "--subsystem", subsystem, "--nsid", "9", "--host-nqn", hostxx]) assert f"Failure deleting host {hostxx} from namespace 9 on {subsystem}: Host is not found in namespace's host list" in caplog.text @@ -713,12 +719,12 @@ def test_add_namespace_multiple_hosts(self, caplog, gateway): assert f"Adding host {host10} to namespace 9 on {subsystem}: Successful" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "9"]) - assert f'"nsid": 9' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text + assert '"nsid": 9' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text assert f'"{host8}"' in caplog.text assert f'"{host9}"' in caplog.text assert f'"{host10}"' in caplog.text - assert f'"hosts": []' not in caplog.text + assert '"hosts": []' not in caplog.text def test_del_namespace_multiple_hosts(self, caplog, gateway): caplog.clear() @@ -728,19 +734,19 @@ def test_del_namespace_multiple_hosts(self, caplog, gateway): assert f"Deleting host {host10} from namespace 9 on {subsystem}: Successful" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "9"]) - assert f'"nsid": 9' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text + assert '"nsid": 9' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text assert f'"{host8}"' not in caplog.text assert f'"{host9}"' not in caplog.text assert f'"{host10}"' not in caplog.text - assert f'"hosts": []' in caplog.text + assert '"hosts": []' in caplog.text def test_list_namespace_with_no_hosts(self, caplog, gateway): caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "10"]) - assert f'"nsid": 10' in caplog.text - assert '"auto_visible":' not in caplog.text or f'"auto_visible": false' in caplog.text - assert f'"hosts": []' in caplog.text + assert '"nsid": 10' in caplog.text + assert '"auto_visible":' not in caplog.text or '"auto_visible": false' in caplog.text + assert '"hosts": []' in caplog.text def test_add_too_many_namespaces(self, caplog, gateway): caplog.clear() @@ -754,19 +760,19 @@ def test_resize_namespace(self, caplog, gateway): gw, stub = gateway caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert '"block_size": 512' in caplog.text assert '"rbd_image_size": "16777216"' in caplog.text assert f'"uuid": "{uuid2}"' in caplog.text caplog.clear() cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "2MB"]) - assert f"new size 2097152 bytes is smaller than current size 16777216 bytes" in caplog.text + assert "new size 2097152 bytes is smaller than current size 16777216 bytes" in caplog.text caplog.clear() cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "2"]) - assert f"new size 2097152 bytes is smaller than current size 16777216 bytes" in caplog.text + assert "new size 2097152 bytes is smaller than current size 16777216 bytes" in caplog.text caplog.clear() cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "3145728B"]) - assert f"new size 3145728 bytes is smaller than current size 16777216 bytes" in caplog.text + assert "new size 3145728 bytes is smaller than current size 16777216 bytes" in caplog.text caplog.clear() cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "32MB"]) assert f"Resizing namespace 6 in {subsystem} to 32 MiB: Successful" in caplog.text @@ -799,7 +805,7 @@ def test_resize_namespace(self, caplog, gateway): assert rc == 2 caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert '"block_size": 512' in caplog.text assert '"rbd_image_size": "33554432"' in caplog.text assert f'"uuid": "{uuid2}"' in caplog.text @@ -831,7 +837,7 @@ def test_resize_namespace(self, caplog, gateway): assert f"Resizing namespace 6 in {subsystem} to 64 MiB: Successful" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid2]) - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert '"block_size": 512' in caplog.text assert '"rbd_image_size": "67108864"' in caplog.text assert f'"uuid": "{uuid2}"' in caplog.text @@ -847,7 +853,7 @@ def test_resize_namespace(self, caplog, gateway): cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "32MB"]) assert f"Failure resizing namespace 6 on {subsystem}: new size 33554432 bytes is smaller than current size 67108864 bytes" in caplog.text ns = cli_test(["namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) - assert ns != None + assert ns is not None assert ns.status == 0 assert len(ns.namespaces) == 1 assert ns.namespaces[0].rbd_image_size == 67108864 @@ -861,7 +867,7 @@ def test_resize_namespace(self, caplog, gateway): def test_set_namespace_qos_limits(self, caplog, gateway): caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert '"rw_ios_per_second": "0"' in caplog.text assert '"rw_mbytes_per_second": "0"' in caplog.text assert '"r_mbytes_per_second": "0"' in caplog.text @@ -872,7 +878,7 @@ def test_set_namespace_qos_limits(self, caplog, gateway): assert f"No previous QOS limits found, this is the first time the limits are set for namespace 6 on {subsystem}" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert f'"uuid": "{uuid2}"' in caplog.text assert '"rw_ios_per_second": "2000"' in caplog.text assert '"rw_mbytes_per_second": "0"' in caplog.text @@ -885,7 +891,7 @@ def test_set_namespace_qos_limits(self, caplog, gateway): caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid2]) assert f'"uuid": "{uuid2}"' in caplog.text - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert '"rw_ios_per_second": "2000"' in caplog.text assert '"rw_mbytes_per_second": "30"' in caplog.text assert '"r_mbytes_per_second": "0"' in caplog.text @@ -897,7 +903,7 @@ def test_set_namespace_qos_limits(self, caplog, gateway): assert f"No previous QOS limits found, this is the first time the limits are set for namespace 6 on {subsystem}" not in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert '"rw_ios_per_second": "2000"' in caplog.text assert '"rw_mbytes_per_second": "30"' in caplog.text assert '"r_mbytes_per_second": "15"' in caplog.text @@ -927,15 +933,15 @@ def test_namespace_io_stats(self, caplog, gateway): assert f'IO statistics for namespace 6 in {subsystem}' in caplog.text caplog.clear() cli(["--format", "json", "namespace", "get_io_stats", "--subsystem", subsystem, "--nsid", "6"]) - assert f'"status": 0' in caplog.text + assert '"status": 0' in caplog.text assert f'"subsystem_nqn": "{subsystem}"' in caplog.text - assert f'"nsid": 6' in caplog.text + assert '"nsid": 6' in caplog.text assert f'"uuid": "{uuid2}"' in caplog.text - assert f'"ticks":' in caplog.text - assert f'"bytes_written":' in caplog.text - assert f'"bytes_read":' in caplog.text - assert f'"max_write_latency_ticks":' in caplog.text - assert f'"io_error":' in caplog.text + assert '"ticks":' in caplog.text + assert '"bytes_written":' in caplog.text + assert '"bytes_read":' in caplog.text + assert '"max_write_latency_ticks":' in caplog.text + assert '"io_error":' in caplog.text caplog.clear() rc = 0 try: @@ -978,18 +984,18 @@ def test_add_host(self, caplog, host): def test_add_host_invalid_nqn(self, caplog): caplog.clear() cli(["host", "add", "--subsystem", subsystem, "--host-nqn", "nqn.2016"]) - assert f'NQN "nqn.2016" is too short, minimal length is 11' in caplog.text + assert 'NQN "nqn.2016" is too short, minimal length is 11' in caplog.text caplog.clear() cli(["host", "add", "--subsystem", subsystem, "--host-nqn", "nqn.2X16-06.io.spdk:host1"]) - assert f"invalid date code" in caplog.text + assert "invalid date code" in caplog.text caplog.clear() cli(["host", "add", "--subsystem", subsystem, "--host-nqn", "nqn.2016-06.io.spdk:host1_X"]) - assert f"Invalid host NQN" in caplog.text - assert f"contains invalid characters" in caplog.text + assert "Invalid host NQN" in caplog.text + assert "contains invalid characters" in caplog.text caplog.clear() cli(["host", "add", "--subsystem", f"{subsystem}_X", "--host-nqn", "nqn.2016-06.io.spdk:host2"]) - assert f"Invalid subsystem NQN" in caplog.text - assert f"contains invalid characters" in caplog.text + assert "Invalid subsystem NQN" in caplog.text + assert "contains invalid characters" in caplog.text def test_host_list(self, caplog): caplog.clear() @@ -1008,7 +1014,6 @@ def test_create_listener(self, caplog, listener, gateway): assert "ipv4" in caplog.text.lower() assert f"Adding {subsystem} listener at {listener[1]}:{listener[3]}: Successful" in caplog.text - @pytest.mark.parametrize("listener_ipv6", listener_list_ipv6) def test_create_listener_ipv6(self, caplog, listener_ipv6, gateway): caplog.clear() @@ -1031,10 +1036,10 @@ def test_list_listeners(self, caplog, listener, listener_ipv6, gateway): assert f'"host_name": "{host_name}"' in caplog.text assert f'"traddr": "{listener[1]}"' in caplog.text assert f'"trsvcid": {listener[3]}' in caplog.text - assert f'"adrfam": "ipv4"' in caplog.text + assert '"adrfam": "ipv4"' in caplog.text assert f'"traddr": "[{listener_ipv6[1]}]"' in caplog.text assert f'"trsvcid": {listener_ipv6[3]}' in caplog.text - assert f'"adrfam": "ipv6"' in caplog.text + assert '"adrfam": "ipv6"' in caplog.text @pytest.mark.parametrize("listener", listener_list_negative_port) def test_create_listener_negative_port(self, caplog, listener, gateway): @@ -1084,6 +1089,7 @@ def test_create_listener_on_discovery(self, caplog, listener, gateway): cli(["listener", "add", "--host-name", host_name] + listener) assert "Can't create a listener for a discovery subsystem" in caplog.text + class TestDelete: @pytest.mark.parametrize("host", host_list) def test_remove_host(self, caplog, host, gateway): @@ -1170,7 +1176,7 @@ def test_remove_namespace(self, caplog, gateway): gw, stub = gateway caplog.clear() ns_list = cli_test(["namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) - assert ns_list != None + assert ns_list is not None assert ns_list.status == 0 assert len(ns_list.namespaces) == 1 bdev_name = ns_list.namespaces[0].bdev_name @@ -1238,6 +1244,7 @@ def test_delete_subsystem_with_discovery_nqn(self, caplog, gateway): assert "Can't delete a discovery subsystem" in caplog.text assert rc == 2 + class TestCreateWithAna: def test_create_subsystem_ana(self, caplog, gateway): caplog.clear() @@ -1267,8 +1274,8 @@ def test_create_listener_ana(self, caplog, listener, gateway): assert "ipv4" in caplog.text.lower() assert f"Adding {subsystem} listener at {listener[1]}:{listener[3]}: Successful" in caplog.text -class TestDeleteAna: +class TestDeleteAna: @pytest.mark.parametrize("listener", listener_list) def test_delete_listener_ana(self, caplog, listener, gateway): caplog.clear() @@ -1288,6 +1295,7 @@ def test_delete_subsystem_ana(self, caplog, gateway): cli(["subsystem", "list"]) assert "No subsystems" in caplog.text + class TestSubsysWithGroupName: def test_create_subsys_group_name(self, caplog, gateway): caplog.clear() @@ -1301,12 +1309,13 @@ def test_create_subsys_group_name(self, caplog, gateway): caplog.clear() cli(["subsystem", "add", "--subsystem", subsystem4, "--no-group-append"]) assert f"Adding subsystem {subsystem4}: Successful" in caplog.text - assert f"Subsystem NQN will not be changed" in caplog.text + assert "Subsystem NQN will not be changed" in caplog.text assert f"Adding subsystem {subsystem4}.{group_name}: Successful" not in caplog.text cli(["--format", "json", "subsystem", "list"]) assert f'"nqn": "{subsystem4}.{group_name}"' not in caplog.text assert f'"nqn": "{subsystem4}"' in caplog.text + class TestTooManySubsystemsAndHosts: def test_add_too_many_subsystem(self, caplog, gateway): caplog.clear() @@ -1334,6 +1343,7 @@ def test_too_many_hosts(self, caplog, gateway): cli(["host", "add", "--subsystem", subsystem6, "--host-nqn", host5]) assert f"Failure adding host {host5} to {subsystem6}: Maximal number of hosts for subsystem (4) has already been reached" in caplog.text + class TestGwLogLevel: def test_gw_log_level(self, caplog, gateway): caplog.clear() @@ -1341,13 +1351,13 @@ def test_gw_log_level(self, caplog, gateway): assert 'Gateway log level is "debug"' in caplog.text caplog.clear() cli(["gw", "set_log_level", "--level", "error"]) - assert f'Set gateway log level to "error": Successful' in caplog.text + assert 'Set gateway log level to "error": Successful' in caplog.text caplog.clear() cli(["gw", "get_log_level"]) assert 'Gateway log level is "error"' in caplog.text caplog.clear() cli(["gw", "set_log_level", "-l", "CRITICAL"]) - assert f'Set gateway log level to "critical": Successful' in caplog.text + assert 'Set gateway log level to "critical": Successful' in caplog.text caplog.clear() cli(["gw", "get_log_level"]) assert 'Gateway log level is "critical"' in caplog.text @@ -1362,14 +1372,15 @@ def test_gw_log_level(self, caplog, gateway): assert rc == 2 caplog.clear() cli(["--format", "json", "gw", "get_log_level"]) - assert f'"log_level": "critical"' in caplog.text + assert '"log_level": "critical"' in caplog.text caplog.clear() cli(["--log-level", "critical", "gw", "set_log_level", "--level", "DEBUG"]) - assert f'Set gateway log level to "debug": Successful' not in caplog.text + assert 'Set gateway log level to "debug": Successful' not in caplog.text caplog.clear() cli(["gw", "get_log_level"]) assert 'Gateway log level is "debug"' in caplog.text + class TestSPDKLOg: def test_log_flags(self, caplog, gateway): caplog.clear() diff --git a/tests/test_cli_change_keys.py b/tests/test_cli_change_keys.py index f3392117..0de55b40 100644 --- a/tests/test_cli_change_keys.py +++ b/tests/test_cli_change_keys.py @@ -23,6 +23,7 @@ hostpsk1 = "NVMeTLSkey-1:01:YzrPElk4OYy1uUERriPwiiyEJE/+J5ckYpLB+5NHMsR2iBuT:" config = "ceph-nvmeof.conf" + @pytest.fixture(scope="module") def two_gateways(config): """Sets up and tears down two Gateways""" @@ -67,6 +68,7 @@ def two_gateways(config): gatewayA.server.stop(grace=1) gatewayB.server.stop(grace=1) + def test_change_host_key(caplog, two_gateways): gatewayA, stubA, gatewayB, stubB = two_gateways caplog.clear() diff --git a/tests/test_cli_change_lb.py b/tests/test_cli_change_lb.py index fdfd0207..5d3eb55d 100755 --- a/tests/test_cli_change_lb.py +++ b/tests/test_cli_change_lb.py @@ -19,6 +19,7 @@ config = "ceph-nvmeof.conf" namespace_count = 20 + @pytest.fixture(scope="module") def two_gateways(config): """Sets up and tears down two Gateways""" @@ -64,16 +65,19 @@ def two_gateways(config): gatewayA.server.stop(grace=1) gatewayB.server.stop(grace=1) + def verify_one_namespace_lb_group(caplog, gw_port, subsys, nsid_to_verify, grp): caplog.clear() cli(["--server-port", gw_port, "--format", "json", "namespace", "list", "--subsystem", subsys, "--nsid", nsid_to_verify]) assert f'"nsid": {nsid_to_verify},' in caplog.text assert f'"load_balancing_group": {grp},' in caplog.text + def verify_namespaces(caplog, gw_port, subsys, first_nsid, last_nsid, grp): for ns in range(first_nsid, last_nsid + 1): verify_one_namespace_lb_group(caplog, gw_port, subsys, str(ns), grp) + def verify_namespaces_using_get_subsystems(caplog, gw_port, subsys, first_nsid, last_nsid, grp): caplog.clear() subsys_info = cli_test(["--server-port", gw_port, "get_subsystems"]) @@ -84,6 +88,7 @@ def verify_namespaces_using_get_subsystems(caplog, gw_port, subsys, first_nsid, assert subsys_info.subsystems[0].namespaces[ns - 1].nsid == ns assert subsys_info.subsystems[0].namespaces[ns - 1].anagrpid == grp + def verify_namespaces_using_spdk_get_subsystems(caplog, gw, subsys, first_nsid, last_nsid, grp): caplog.clear() subsys_info = rpc_nvmf.nvmf_get_subsystems(gw.gateway_rpc.spdk_rpc_client) @@ -94,6 +99,7 @@ def verify_namespaces_using_spdk_get_subsystems(caplog, gw, subsys, first_nsid, assert subsys_info[0]["namespaces"][ns - 1]["nsid"] == ns assert subsys_info[0]["namespaces"][ns - 1]["anagrpid"] == grp + def create_namespaces(caplog, ns_count, subsys): for i in range(1, 1 + (ns_count // 2)): caplog.clear() @@ -104,12 +110,14 @@ def create_namespaces(caplog, ns_count, subsys): cli(["namespace", "add", "--subsystem", subsys, "--rbd-pool", pool, "--rbd-image", f"{image}{i}", "--size", "16MB", "--rbd-create-image", "--load-balancing-group", anagrpid2]) assert f"Adding namespace {i} to {subsys}: Successful" in caplog.text + def try_change_one_namespace_lb_group_no_listeners(caplog, subsys, nsid_to_change, new_group): caplog.clear() cli(["--server-port", "5502", "namespace", "change_load_balancing_group", "--subsystem", subsys, "--nsid", nsid_to_change, "--load-balancing-group", new_group]) time.sleep(8) assert "is owned by gateway None so try this command from it" in caplog.text + def change_one_namespace_lb_group(caplog, subsys, nsid_to_change, new_group): caplog.clear() cli(["--server-port", "5502", "namespace", "change_load_balancing_group", "--subsystem", subsys, "--nsid", nsid_to_change, "--load-balancing-group", new_group]) @@ -121,16 +129,18 @@ def change_one_namespace_lb_group(caplog, subsys, nsid_to_change, new_group): assert f"Changing load balancing group of namespace {nsid_to_change} in {subsys} to {new_group}: Successful" in caplog.text assert f"Received manual request to change load balancing group for namespace with NSID {nsid_to_change} in {subsys} to {new_group}, context: 0) - assert(code) + assert pid > 0 + assert code def remove_core_files(self, directory_path): # List all files starting with "core." in the core directory @@ -38,9 +38,9 @@ def remove_core_files(self, directory_path): print(f"Removed: {file_path}") def assert_no_core_files(self, directory_path): - assert(os.path.exists(directory_path) and os.path.isdir(directory_path)) + assert os.path.exists(directory_path) and os.path.isdir(directory_path) files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f)) and f.startswith("core.")] - assert(len(files) == 0) + assert len(files) == 0 def test_spdk_exception(self): """Tests spdk sub process exiting with error.""" @@ -80,7 +80,7 @@ def test_monc_exit(self): time.sleep(2) # Send SIGABRT (abort signal) to the monitor client process - assert(gateway.monitor_client_process) + assert gateway.monitor_client_process gateway.monitor_client_process.send_signal(signal.SIGABRT) # Block on running keep alive ping @@ -116,5 +116,6 @@ def test_spdk_multi_gateway_exception(self): gatewayB.serve() self.validate_exception(cm.exception) + if __name__ == '__main__': unittest.main() diff --git a/tests/test_state.py b/tests/test_state.py index fda1366d..59c67093 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -181,8 +181,8 @@ def _state_notify_update(update, is_add_req): # to test notify capability elapsed = time.time() - start wait_interval = update_interval_sec - elapsed - 0.5 - assert(wait_interval > 0) - assert(wait_interval < update_interval_sec) + assert wait_interval > 0 + assert wait_interval < update_interval_sec time.sleep(wait_interval) # expect 4 updates: addition, two-step change and removal diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..779aeb84 --- /dev/null +++ b/tox.ini @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 100 +ignore = + E501,