diff --git a/control/cli.py b/control/cli.py index 73bb7d538..5532e52df 100644 --- a/control/cli.py +++ b/control/cli.py @@ -1327,25 +1327,17 @@ def ns_del(self, args): """Deletes a namespace from a subsystem.""" out_func, err_func = self.get_output_functions(args) - if args.nsid == None and args.uuid == None: - self.cli.parser.error("At least one of --nsid or --uuid arguments is mandatory for del command") - if args.nsid != None and args.nsid <= 0: + if args.nsid <= 0: self.cli.parser.error("nsid value must be positive") try: - ret = self.stub.namespace_delete(pb2.namespace_delete_req(subsystem_nqn=args.subsystem, nsid=args.nsid, uuid=args.uuid)) + ret = self.stub.namespace_delete(pb2.namespace_delete_req(subsystem_nqn=args.subsystem, nsid=args.nsid)) except Exception as ex: ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure deleting namespace:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: - if args.nsid: - ns_id_str = f"{args.nsid}" - elif args.uuid: - ns_id_str = f"with UUID {args.uuid}" - else: - assert False - out_func(f"Deleting namespace {ns_id_str} from {args.subsystem}: Successful") + out_func(f"Deleting namespace {args.nsid} from {args.subsystem}: Successful") else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": @@ -1371,9 +1363,7 @@ def ns_resize(self, args): ns_size = 0 out_func, err_func = self.get_output_functions(args) - if args.nsid == None and args.uuid == None: - self.cli.parser.error("At least one of --nsid or --uuid arguments is mandatory for resize command") - if args.nsid != None and args.nsid <= 0: + if args.nsid <= 0: self.cli.parser.error("nsid value must be positive") ns_size = self.get_size_in_bytes(args.size) if ns_size <= 0: @@ -1384,21 +1374,14 @@ def ns_resize(self, args): ns_size //= mib try: - ret = self.stub.namespace_resize(pb2.namespace_resize_req(subsystem_nqn=args.subsystem, nsid=args.nsid, - uuid=args.uuid, new_size=ns_size)) + ret = self.stub.namespace_resize(pb2.namespace_resize_req(subsystem_nqn=args.subsystem, nsid=args.nsid, new_size=ns_size)) except Exception as ex: ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure resizing namespace:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: - if args.nsid: - ns_id_str = f"{args.nsid}" - elif args.uuid: - ns_id_str = f"with UUID {args.uuid}" - else: - assert False sz_str = self.format_size(ns_size * mib) - out_func(f"Resizing namespace {ns_id_str} in {args.subsystem} to {sz_str}: Successful") + out_func(f"Resizing namespace {args.nsid} in {args.subsystem} to {sz_str}: Successful") else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": @@ -1556,13 +1539,11 @@ def ns_get_io_stats(self, args): """Get namespace IO statistics.""" out_func, err_func = self.get_output_functions(args) - if args.nsid == None and args.uuid == None: - self.cli.parser.error("At least one of --nsid or --uuid arguments is mandatory for get_io_stats command") - if args.nsid != None and args.nsid <= 0: + if args.nsid <= 0: self.cli.parser.error("nsid value must be positive") try: - get_stats_req = pb2.namespace_get_io_stats_req(subsystem_nqn=args.subsystem, nsid=args.nsid, uuid=args.uuid) + get_stats_req = pb2.namespace_get_io_stats_req(subsystem_nqn=args.subsystem, nsid=args.nsid) ns_io_stats = self.stub.namespace_get_io_stats(get_stats_req) except Exception as ex: ns_io_stats = pb2.namespace_io_stats_info(status = errno.EINVAL, error_message = f"Failure getting namespace's IO stats:\n{ex}") @@ -1574,9 +1555,6 @@ def ns_get_io_stats(self, args): elif args.nsid and args.nsid != ns_io_stats.nsid: ns_io_stats.status = errno.ENODEV ns_io_stats.error_message = f"Failure getting namespace's IO stats: Returned namespace NSID {ns_io_stats.nsid} differs from requested one {args.nsid}" - elif args.uuid and args.uuid != ns_io_stats.uuid: - ns_io_stats.status = errno.ENODEV - ns_io_stats.error_message = f"Failure getting namespace's IO stats: Returned namespace UUID {ns_io_stats.uuid} differs from requested one {args.uuid}" # only show IO errors in verbose mode if not args.verbose: @@ -1640,13 +1618,7 @@ def ns_get_io_stats(self, args): else: table_format = "plain" stats_out = tabulate(stats_list, headers = ["Stat", "Value"], tablefmt=table_format) - if args.nsid: - ns_id_str = f"{args.nsid}" - elif args.uuid: - ns_id_str = f"with UUID {args.uuid}" - else: - assert False - out_func(f"IO statistics for namespace {ns_id_str} in {args.subsystem}, bdev {ns_io_stats.bdev_name}:\n{stats_out}") + out_func(f"IO statistics for namespace {args.nsid} in {args.subsystem}, bdev {ns_io_stats.bdev_name}:\n{stats_out}") else: err_func(f"{ns_io_stats.error_message}") elif args.format == "json" or args.format == "yaml": @@ -1671,8 +1643,6 @@ def ns_change_load_balancing_group(self, args): """Change namespace load balancing group.""" out_func, err_func = self.get_output_functions(args) - if args.nsid == None: - self.cli.parser.error("--nsid argument is mandatory for change_load_balancing_group command") if args.nsid <= 0: self.cli.parser.error("nsid value must be positive") if args.load_balancing_group <= 0: @@ -1680,21 +1650,14 @@ def ns_change_load_balancing_group(self, args): try: change_lb_group_req = pb2.namespace_change_load_balancing_group_req(subsystem_nqn=args.subsystem, - nsid=args.nsid, uuid=args.uuid, - anagrpid=args.load_balancing_group) + nsid=args.nsid, anagrpid=args.load_balancing_group) ret = self.stub.namespace_change_load_balancing_group(change_lb_group_req) except Exception as ex: ret = pb2.req_status(status = errno.EINVAL, error_message = f"Failure changing namespace load balancing group:\n{ex}") if args.format == "text" or args.format == "plain": if ret.status == 0: - if args.nsid: - ns_id_str = f"{args.nsid}" - elif args.uuid: - ns_id_str = f"with UUID {args.uuid}" - else: - assert False - out_func(f"Changing load balancing group of namespace {ns_id_str} in {args.subsystem} to {args.load_balancing_group}: Successful") + out_func(f"Changing load balancing group of namespace {args.nsid} in {args.subsystem} to {args.load_balancing_group}: Successful") else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": @@ -1725,9 +1688,7 @@ def ns_set_qos(self, args): """Set namespace QOS limits.""" out_func, err_func = self.get_output_functions(args) - if args.nsid == None and args.uuid == None: - self.cli.parser.error("At least one of --nsid or --uuid arguments is mandatory for set_qos command") - if args.nsid != None and args.nsid <= 0: + if args.nsid <= 0: self.cli.parser.error("nsid value must be positive") if args.rw_ios_per_second == None and args.rw_megabytes_per_second == None and args.r_megabytes_per_second == None and args.w_megabytes_per_second == None: self.cli.parser.error("At least one QOS limit should be set") @@ -1741,8 +1702,6 @@ def ns_set_qos(self, args): qos_args["subsystem_nqn"] = args.subsystem if args.nsid: qos_args["nsid"] = args.nsid - if args.uuid: - qos_args["uuid"] = args.uuid if args.rw_ios_per_second != None: qos_args["rw_ios_per_second"] = args.rw_ios_per_second if args.rw_megabytes_per_second != None: @@ -1759,13 +1718,7 @@ def ns_set_qos(self, args): if args.format == "text" or args.format == "plain": if ret.status == 0: - if args.nsid: - ns_id_str = f"{args.nsid}" - elif args.uuid: - ns_id_str = f"with UUID {args.uuid}" - else: - assert False - out_func(f"Setting QOS limits of namespace {ns_id_str} in {args.subsystem}: Successful") + out_func(f"Setting QOS limits of namespace {args.nsid} in {args.subsystem}: Successful") else: err_func(f"{ret.error_message}") elif args.format == "json" or args.format == "yaml": @@ -1788,10 +1741,10 @@ def ns_set_qos(self, args): ns_common_args = [ argument("--subsystem", "-n", help="Subsystem NQN", required=True), - argument("--uuid", "-u", help="UUID"), - argument("--nsid", help="Namespace ID", type=int), ] ns_add_args_list = ns_common_args + [ + argument("--nsid", help="Namespace ID", type=int), + argument("--uuid", "-u", help="UUID"), argument("--rbd-pool", "-p", help="RBD pool name", required=True), argument("--rbd-image", "-i", help="RBD image name", required=True), argument("--rbd-create-image", "-c", help="Create RBD image if needed", action='store_true', required=False), @@ -1801,18 +1754,25 @@ def ns_set_qos(self, args): argument("--force", help="Create a namespace even its image is already used by another namespace", action='store_true', required=False), ] ns_del_args_list = ns_common_args + [ + argument("--nsid", help="Namespace ID", type=int, required=True), ] ns_resize_args_list = ns_common_args + [ + argument("--nsid", help="Namespace ID", type=int, required=True), argument("--size", help="Size in bytes or specified unit (K, KB, M, MB, G, GB, T, TB, P, PB)", required=True), ] ns_list_args_list = ns_common_args + [ + argument("--nsid", help="Namespace ID", type=int), + argument("--uuid", "-u", help="UUID"), ] ns_get_io_stats_args_list = ns_common_args + [ + argument("--nsid", help="Namespace ID", type=int, required=True), ] ns_change_load_balancing_group_args_list = ns_common_args + [ + argument("--nsid", help="Namespace ID", type=int, required=True), argument("--load-balancing-group", "-l", help="Load balancing group", type=int, required=True), ] ns_set_qos_args_list = ns_common_args + [ + argument("--nsid", help="Namespace ID", type=int, required=True), argument("--rw-ios-per-second", help="R/W IOs per second limit, 0 means unlimited", type=int), argument("--rw-megabytes-per-second", help="R/W megabytes per second limit, 0 means unlimited", type=int), argument("--r-megabytes-per-second", help="Read megabytes per second limit, 0 means unlimited", type=int), diff --git a/control/grpc.py b/control/grpc.py index d71c7fa01..70076b76b 100644 --- a/control/grpc.py +++ b/control/grpc.py @@ -346,8 +346,7 @@ def create_bdev(self, anagrp: int, name, uuid, rbd_pool_name, rbd_image_name, bl self.logger.error(errmsg) return BdevStatus(status=errno.ENODEV, error_message=errmsg) - if name != bdev_name: - self.logger.warning(f"Created bdev name {bdev_name} differs from requested name {name}") + assert name == bdev_name, f"Created bdev name {bdev_name} differs from requested name {name}" return BdevStatus(status=0, error_message=os.strerror(0), bdev_name=name) @@ -512,6 +511,11 @@ def create_subsystem_safe(self, request, context): self.logger.error(f"{errmsg}") return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + if not request.subsystem_nqn: + errmsg = f"Failure creating subsystem, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + errmsg = "" if not GatewayState.is_key_element_valid(request.subsystem_nqn): errmsg = f"{create_subsystem_error_prefix}: Invalid NQN \"{request.subsystem_nqn}\", contains invalid characters" @@ -690,6 +694,11 @@ def delete_subsystem(self, request, context=None): delete_subsystem_error_prefix = f"Failure deleting subsystem {request.subsystem_nqn}" self.logger.info(f"Received request to delete subsystem {request.subsystem_nqn}, context: {context}{peer_msg}") + if not request.subsystem_nqn: + errmsg = f"Failure deleting subsystem, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + if GatewayUtils.is_discovery_nqn(request.subsystem_nqn): errmsg = f"{delete_subsystem_error_prefix}: Can't delete a discovery subsystem" self.logger.error(f"{errmsg}") @@ -745,13 +754,15 @@ def create_namespace(self, subsystem_nqn, bdev_name, nsid, anagrpid, uuid, conte if context: assert self.omap_lock.locked(), "OMAP is unlocked when calling create_namespace()" + nsid_msg = "" - if nsid and uuid: - nsid_msg = f" using NSID {nsid} and UUID {uuid}" - elif nsid: + if nsid: nsid_msg = f" using NSID {nsid} " - elif uuid: - nsid_msg = f" using UUID {uuid} " + + if not subsystem_nqn: + errmsg = f"Failure adding namespace, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.nsid_status(status=errno.EINVAL, error_message = errmsg) add_namespace_error_prefix = f"Failure adding namespace{nsid_msg}to {subsystem_nqn}" @@ -797,20 +808,9 @@ def create_namespace(self, subsystem_nqn, bdev_name, nsid, anagrpid, uuid, conte return pb2.nsid_status(nsid=nsid, status=0, error_message=os.strerror(0)) def find_unique_bdev_name(self, uuid) -> str: - if not uuid: - uuid = str(uuid.uuid4()) + assert uuid, "Got an empty UUID" return f"bdev_{uuid}" - def get_ns_id_message(self, nsid, uuid): - ns_id_msg = "" - if nsid and uuid: - ns_id_msg = f"using NSID {nsid} and UUID {uuid} " - elif nsid: - ns_id_msg = f"using NSID {nsid} " - elif uuid: - ns_id_msg = f"using UUID {uuid} " - return ns_id_msg - def set_ana_state(self, request, context=None): return self.execute_grpc_function(self.set_ana_state_safe, request, context) @@ -908,11 +908,18 @@ def choose_anagrpid_for_namespace(self, nsid) ->int: def namespace_add_safe(self, request, context): """Adds a namespace to a subsystem.""" + if not request.subsystem_nqn: + errmsg = f"Failure adding namespace, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.nsid_status(status=errno.EINVAL, error_message = errmsg) + grps_list = [] anagrp = 0 peer_msg = self.get_peer_message(context) - nsid_msg = self.get_ns_id_message(request.nsid, request.uuid) - self.logger.info(f"Received request to add a namespace {nsid_msg}to {request.subsystem_nqn}, ana group {request.anagrpid}, context: {context}{peer_msg}") + nsid_msg = "" + if request.nsid: + nsid_msg = f"{request.nsid} " + self.logger.info(f"Received request to add namespace {nsid_msg}to {request.subsystem_nqn}, ana group {request.anagrpid}, context: {context}{peer_msg}") if not request.uuid: request.uuid = str(uuid.uuid4()) @@ -971,11 +978,9 @@ def namespace_add_safe(self, request, context): self.logger.exception(f"Got exception while trying to delete bdev {bdev_name}") return pb2.nsid_status(status=ret_bdev.status, error_message=errmsg) - if ret_bdev.bdev_name != bdev_name: - self.logger.warning(f"Returned bdev name {ret_bdev.bdev_name} differs from requested one {bdev_name}") + # If we got here we asserted that ret_bdev.bdev_name == bdev_name ret_ns = self.create_namespace(request.subsystem_nqn, bdev_name, request.nsid, anagrp, request.uuid, context) - if ret_ns.status == 0 and request.nsid and ret_ns.nsid != request.nsid: errmsg = f"Returned NSID {ret_ns.nsid} differs from requested one {request.nsid}" self.logger.error(errmsg) @@ -992,7 +997,7 @@ def namespace_add_safe(self, request, context): raise except Exception: self.logger.exception(f"Got exception while trying to delete bdev {bdev_name}") - errmsg = f"Failure adding namespace {nsid_msg}to {request.subsystem_nqn}:{ret_ns.error_message}" + errmsg = f"Failure adding namespace {nsid_msg}to {request.subsystem_nqn}: {ret_ns.error_message}" self.logger.error(errmsg) return pb2.nsid_status(status=ret_ns.status, error_message=errmsg) @@ -1023,6 +1028,11 @@ def namespace_change_load_balancing_group_safe(self, request, context): change_lb_group_failure_prefix = f"Failure changing load balancing group for namespace with NSID {request.nsid} in {request.subsystem_nqn}" self.logger.info(f"Received request to change load balancing group for namespace with NSID {request.nsid} in {request.subsystem_nqn} to {request.anagrpid}, context: {context}{peer_msg}") + if not request.subsystem_nqn: + errmsg = f"Failure changing load balancing group for namespace, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) + if not request.nsid: errmsg = f"Failure changing load balancing group for namespace in {request.subsystem_nqn}: No NSID was given" self.logger.error(errmsg) @@ -1201,6 +1211,11 @@ def list_namespaces(self, request, context=None): nsid_msg = f"namespace with NSID {request.nsid}" self.logger.info(f"Received request to list {nsid_msg} for {request.subsystem}, context: {context}{peer_msg}") + if not request.subsystem: + errmsg = f"Failure listing namespaces, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.namespaces_info(status=errno.EINVAL, error_message=errmsg, subsystem_nqn=request.subsystem, namespaces=[]) + with self.rpc_lock: try: ret = rpc_nvmf.nvmf_get_subsystems(self.spdk_rpc_client, nqn=request.subsystem) @@ -1278,20 +1293,27 @@ def namespace_get_io_stats(self, request, context=None): """Get namespace's IO stats.""" peer_msg = self.get_peer_message(context) - nsid_msg = self.get_ns_id_message(request.nsid, request.uuid) - self.logger.info(f"Received request to get IO stats for namespace {nsid_msg}on {request.subsystem_nqn}, context: {context}{peer_msg}") + self.logger.info(f"Received request to get IO stats for namespace {request.nsid} on {request.subsystem_nqn}, context: {context}{peer_msg}") + if not request.nsid: + errmsg = f"Failure getting IO stats for namespace, missing NSID" + self.logger.error(f"{errmsg}") + return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) + + if not request.subsystem_nqn: + errmsg = f"Failure getting IO stats for namespace {request.nsid}, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) with self.rpc_lock: - find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, request.uuid, False, - "Failure getting namespace's IO stats") + find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, False, "Failure getting namespace's IO stats") ns = find_ret[0] if not ns: - errmsg = f"Failure getting IO stats for namespace {nsid_msg}on {request.subsystem_nqn}: Can't find namespace" + errmsg = f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}: Can't find namespace" self.logger.error(errmsg) return pb2.namespace_io_stats_info(status=errno.ENODEV, error_message=errmsg) bdev_name = find_ret[1] if not bdev_name: - errmsg = f"Failure getting IO stats for namespace {nsid_msg}on {request.subsystem_nqn}: Can't find associated block device" + errmsg = f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}: Can't find associated block device" self.logger.error(errmsg) return pb2.namespace_io_stats_info(status=errno.ENODEV, error_message=errmsg) @@ -1302,19 +1324,19 @@ def namespace_get_io_stats(self, request, context=None): ) self.logger.debug(f"get_bdev_iostat {bdev_name}: {ret}") except Exception as ex: - errmsg = f"Failure getting IO stats for namespace {nsid_msg}on {request.subsystem_nqn}" + errmsg = f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) status = errno.EINVAL if resp: status = resp["code"] - errmsg = f"Failure getting IO stats for namespace {nsid_msg}on {request.subsystem_nqn}: {resp['message']}" + errmsg = f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}: {resp['message']}" return pb2.namespace_io_stats_info(status=status, error_message=errmsg) # Just in case SPDK failed with no exception if not ret: - errmsg = f"Failure getting IO stats for namespace {nsid_msg}on {request.subsystem_nqn}" + errmsg = f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}" self.logger.error(errmsg) return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) @@ -1323,7 +1345,7 @@ def namespace_get_io_stats(self, request, context=None): bdevs = ret["bdevs"] if not bdevs: return pb2.namespace_io_stats_info(status=errno.ENODEV, - error_message=f"Failure getting IO stats for namespace {nsid_msg}on {request.subsystem_nqn}: No associated block device found") + error_message=f"Failure getting IO stats for namespace {request.nsid} on {request.subsystem_nqn}: No associated block device found") if len(bdevs) > 1: self.logger.warning(f"More than one associated block device found for namespace, will use the first one") bdev = bdevs[0] @@ -1388,19 +1410,27 @@ def namespace_set_qos_limits_safe(self, request, context): """Set namespace's qos limits.""" peer_msg = self.get_peer_message(context) - nsid_msg = self.get_ns_id_message(request.nsid, request.uuid) limits_to_set = self.get_qos_limits_string(request) - self.logger.info(f"Received request to set QOS limits for namespace {nsid_msg}on {request.subsystem_nqn},{limits_to_set}, context: {context}{peer_msg}") + self.logger.info(f"Received request to set QOS limits for namespace {request.nsid} on {request.subsystem_nqn},{limits_to_set}, context: {context}{peer_msg}") + + if not request.nsid: + errmsg = f"Failure setting QOS limits for namespace, missing NSID" + self.logger.error(f"{errmsg}") + return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) + + if not request.subsystem_nqn: + errmsg = f"Failure setting QOS limits for namespace {request.nsid}, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.namespace_io_stats_info(status=errno.EINVAL, error_message=errmsg) - find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, request.uuid, False, - "Failure setting namespace's QOS limits") + find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, False, "Failure setting namespace's QOS limits") if not find_ret[0]: - errmsg = f"Failure setting QOS limits for namespace {nsid_msg}on {request.subsystem_nqn}: Can't find namespace" + errmsg = f"Failure setting QOS limits for namespace {request.nsid} on {request.subsystem_nqn}: Can't find namespace" self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) bdev_name = find_ret[1] if not bdev_name: - errmsg = f"Failure setting QOS limits for namespace {nsid_msg}on {request.subsystem_nqn}: Can't find associated block device" + errmsg = f"Failure setting QOS limits for namespace {request.nsid} on {request.subsystem_nqn}: Can't find associated block device" self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) nsid = find_ret[0]["nsid"] @@ -1424,7 +1454,7 @@ def namespace_set_qos_limits_safe(self, request, context): state_ns_qos = state[ns_qos_key] ns_qos_entry = json.loads(state_ns_qos) except Exception as ex: - self.logger.info(f"No previous QOS limits found, this is the first time the limits are set for namespace {nsid_msg}on {request.subsystem_nqn}") + self.logger.info(f"No previous QOS limits found, this is the first time the limits are set for namespace {request.nsid} on {request.subsystem_nqn}") # Merge current limits with previous ones, if exist if ns_qos_entry: @@ -1438,7 +1468,7 @@ def namespace_set_qos_limits_safe(self, request, context): request.w_mbytes_per_second = int(ns_qos_entry["w_mbytes_per_second"]) limits_to_set = self.get_qos_limits_string(request) - self.logger.debug(f"After merging current QOS limits with previous ones for namespace {nsid_msg}on {request.subsystem_nqn},{limits_to_set}") + self.logger.debug(f"After merging current QOS limits with previous ones for namespace {request.nsid} on {request.subsystem_nqn},{limits_to_set}") omap_lock = self.omap_lock.get_omap_lock_to_use(context) with omap_lock: @@ -1448,7 +1478,7 @@ def namespace_set_qos_limits_safe(self, request, context): **set_qos_limits_args) self.logger.debug(f"bdev_set_qos_limit {bdev_name}: {ret}") except Exception as ex: - errmsg = f"Failure setting QOS limits for namespace {nsid_msg}on {request.subsystem_nqn}" + errmsg = f"Failure setting QOS limits for namespace {request.nsid} on {request.subsystem_nqn}" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" resp = self.parse_json_exeption(ex) @@ -1460,7 +1490,7 @@ def namespace_set_qos_limits_safe(self, request, context): # Just in case SPDK failed with no exception if not ret: - errmsg = f"Failure setting QOS limits for namespace {nsid_msg}on {request.subsystem_nqn}" + errmsg = f"Failure setting QOS limits for namespace {request.nsid} on {request.subsystem_nqn}" self.logger.error(errmsg) return pb2.req_status(status=errno.EINVAL, error_message=errmsg) @@ -1471,7 +1501,7 @@ def namespace_set_qos_limits_safe(self, request, context): request, preserving_proto_field_name=True, including_default_value_fields=True) self.gateway_state.add_namespace_qos(request.subsystem_nqn, nsid, json_req) except Exception as ex: - errmsg = f"Error persisting namespace QOS settings {nsid_msg}on {request.subsystem_nqn}" + errmsg = f"Error persisting namespace QOS settings {request.nsid} on {request.subsystem_nqn}" self.logger.exception(errmsg) errmsg = f"{errmsg}:\n{ex}" return pb2.req_status(status=errno.EINVAL, error_message=errmsg) @@ -1482,9 +1512,13 @@ def namespace_set_qos_limits(self, request, context=None): """Set namespace's qos limits.""" return self.execute_grpc_function(self.namespace_set_qos_limits_safe, request, context) - def find_namespace_and_bdev_name(self, nqn, nsid, uuid, needs_lock, err_prefix): - if nsid <= 0 and not uuid: - self.logger.error(f"{err_prefix}: At least one of NSID or UUID should be specified for finding a namesapce") + def find_namespace_and_bdev_name(self, nqn, nsid, needs_lock, err_prefix): + if not nsid: + self.logger.error(f"{err_prefix}: NSID should be specified for finding a namesapce") + return (None, None) + + if nsid <= 0: + self.logger.error(f"{err_prefix}: NSID should be positive") return (None, None) if needs_lock: @@ -1520,38 +1554,58 @@ def find_namespace_and_bdev_name(self, nqn, nsid, uuid, needs_lock, err_prefix): ns_list = [] pass for n in ns_list: - if nsid > 0 and nsid != n["nsid"]: - continue - if uuid and uuid != n["uuid"]: + if nsid != n["nsid"]: continue found_ns = n - bdev_name = n["bdev_name"] + break break except Exception: self.logger.exception(f"{s=} parse error") pass + uuid = None + if found_ns: + try: + uuid = found_ns["uuid"] + except Exception: + self.logger.exception(f"{found_ns=} parse error") + uuid = None + pass + + if uuid: + bdev_name = self.find_unique_bdev_name(uuid) + return (found_ns, bdev_name) def namespace_resize(self, request, context=None): """Resize a namespace.""" peer_msg = self.get_peer_message(context) - nsid_msg = self.get_ns_id_message(request.nsid, request.uuid) - self.logger.info(f"Received request to resize namespace {nsid_msg}on {request.subsystem_nqn} to {request.new_size} MiB, context: {context}{peer_msg}") + self.logger.info(f"Received request to resize namespace {request.nsid} on {request.subsystem_nqn} to {request.new_size} MiB, context: {context}{peer_msg}") + + if not request.nsid: + errmsg = f"Failure resizing namespace, missing NSID" + self.logger.error(f"{errmsg}") + return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + + if not request.subsystem_nqn: + errmsg = f"Failure resizing namespace {request.nsid}, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.req_status(status = errno.EINVAL, error_message = errmsg) if request.new_size <= 0: - return pb2.req_status(status=errno.EINVAL, - error_message=f"Failure resizing namespace {nsid_msg}on {request.subsystem_nqn}: New size must be positive") + errmsg = f"Failure resizing namespace {request.nsid}: New size must be positive" + self.logger.error(f"{errmsg}") + return pb2.req_status(status=errno.EINVAL, error_message=errmsg) - find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, request.uuid, True, "Failure resizing namespace") + find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, True, "Failure resizing namespace") if not find_ret[0]: - errmsg = f"Failure resizing namespace {nsid_msg}on {request.subsystem_nqn}: Can't find namespace" + errmsg = f"Failure resizing namespace {request.nsid} on {request.subsystem_nqn}: Can't find namespace" self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) bdev_name = find_ret[1] if not bdev_name: - errmsg = f"Failure resizing namespace {nsid_msg}on {request.subsystem_nqn}: Can't find associated block device" + errmsg = f"Failure resizing namespace {request.nsid} on {request.subsystem_nqn}: Can't find associated block device" self.logger.error(errmsg) return pb2.req_status(status=errno.ENODEV, error_message=errmsg) @@ -1560,7 +1614,7 @@ def namespace_resize(self, request, context=None): if ret.status == 0: errmsg = os.strerror(0) else: - errmsg = f"Failure resizing namespace {nsid_msg}on {request.subsystem_nqn}: {ret.error_message}" + errmsg = f"Failure resizing namespace {request.nsid} on {request.subsystem_nqn}: {ret.error_message}" self.logger.error(errmsg) return pb2.req_status(status=ret.status, error_message=errmsg) @@ -1568,14 +1622,22 @@ def namespace_resize(self, request, context=None): def namespace_delete_safe(self, request, context): """Delete a namespace.""" + if not request.nsid: + errmsg = f"Failure deleting namespace, missing NSID" + self.logger.error(f"{errmsg}") + return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + + if not request.subsystem_nqn: + errmsg = f"Failure deleting namespace {request.nsid}, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.req_status(status = errno.EINVAL, error_message = errmsg) + peer_msg = self.get_peer_message(context) - nsid_msg = self.get_ns_id_message(request.nsid, request.uuid) - self.logger.info(f"Received request to delete namespace {nsid_msg}from {request.subsystem_nqn}, context: {context}{peer_msg}") + self.logger.info(f"Received request to delete namespace {request.nsid} from {request.subsystem_nqn}, context: {context}{peer_msg}") omap_lock = self.omap_lock.get_omap_lock_to_use(context) with omap_lock: - find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, request.uuid, False, - "Failure deleting namespace") + find_ret = self.find_namespace_and_bdev_name(request.subsystem_nqn, request.nsid, False, "Failure deleting namespace") if not find_ret[0]: errmsg = f"Failure deleting namespace: Can't find namespace" self.logger.error(errmsg) @@ -1594,7 +1656,7 @@ def namespace_delete_safe(self, request, context): if bdev_name: ret_del = self.delete_bdev(bdev_name, peer_msg = peer_msg) if ret_del.status != 0: - errmsg = f"Failure deleting namespace {nsid_msg}from {request.subsystem_nqn}: {ret_del.error_message}" + errmsg = f"Failure deleting namespace {request.nsid} from {request.subsystem_nqn}: {ret_del.error_message}" self.logger.error(errmsg) return pb2.nsid_status(status=ret_del.status, error_message=errmsg) @@ -1874,6 +1936,12 @@ def list_connections_safe(self, request, context): peer_msg = self.get_peer_message(context) log_level = logging.INFO if context else logging.DEBUG self.logger.log(log_level, f"Received request to list connections for {request.subsystem}, context: {context}{peer_msg}") + + if not request.subsystem: + errmsg = f"Failure listing connections, missing subsystem NQN" + self.logger.error(f"{errmsg}") + return pb2.connections_info(status=errno.EINVAL, error_message = errmsg, connections=[]) + try: qpair_ret = rpc_nvmf.nvmf_subsystem_get_qpairs(self.spdk_rpc_client, nqn=request.subsystem) self.logger.debug(f"list_connections get_qpairs: {qpair_ret}") diff --git a/control/proto/gateway.proto b/control/proto/gateway.proto index f6047298e..334dbf5eb 100644 --- a/control/proto/gateway.proto +++ b/control/proto/gateway.proto @@ -127,20 +127,20 @@ message namespace_add_req { message namespace_resize_req { string subsystem_nqn = 1; optional uint32 nsid = 2; - optional string uuid = 3; + //optional string uuid = 3; uint64 new_size = 4; } message namespace_get_io_stats_req { string subsystem_nqn = 1; optional uint32 nsid = 2; - optional string uuid = 3; + //optional string uuid = 3; } message namespace_set_qos_req { string subsystem_nqn = 1; optional uint32 nsid = 2; - optional string uuid = 3; + //optional string uuid = 3; optional uint64 rw_ios_per_second = 4; optional uint64 rw_mbytes_per_second = 5; optional uint64 r_mbytes_per_second = 6; @@ -150,14 +150,14 @@ message namespace_set_qos_req { message namespace_change_load_balancing_group_req { string subsystem_nqn = 1; optional uint32 nsid = 2; - optional string uuid = 3; + //optional string uuid = 3; int32 anagrpid = 4; } message namespace_delete_req { string subsystem_nqn = 1; optional uint32 nsid = 2; - optional string uuid = 3; + //optional string uuid = 3; } message create_subsystem_req { @@ -448,7 +448,7 @@ message namespace_io_stats_info { string error_message = 2; string subsystem_nqn = 3; uint32 nsid = 4; - string uuid = 5; + optional string uuid = 5; string bdev_name = 6; uint64 tick_rate = 7; uint64 ticks = 8; diff --git a/tests/test_cli.py b/tests/test_cli.py index ca73edf2d..71cffd1e1 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -459,8 +459,26 @@ def test_resize_namespace(self, caplog, gateway): assert '"nsid": 4' not in caplog.text assert '"nsid": 5' not in caplog.text caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--uuid", uuid2, "--size", "64MB"]) - assert f"Resizing namespace with UUID {uuid2} in {subsystem} to 64 MiB: Successful" in caplog.text + rc = 0 + try: + cli(["namespace", "resize", "--subsystem", subsystem, "--uuid", uuid2, "--size", "64MB"]) + except SystemExit as sysex: + rc = int(str(sysex)) + pass + assert "error: the following arguments are required: --nsid" in caplog.text + assert rc == 2 + caplog.clear() + rc = 0 + try: + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--uuid", uuid2, "--size", "64MB"]) + except SystemExit as sysex: + rc = int(str(sysex)) + pass + assert "error: unrecognized arguments: --uuid" in caplog.text + assert rc == 2 + caplog.clear() + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "64MB"]) + assert f"Resizing namespace 6 in {subsystem} to 64 MiB: Successful" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid2]) assert f'"nsid": 6' in caplog.text @@ -473,11 +491,11 @@ def test_resize_namespace(self, caplog, gateway): assert '"nsid": 4' not in caplog.text assert '"nsid": 5' not in caplog.text caplog.clear() - cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "12", "--uuid", uuid, "--size", "128MB"]) - assert f"Failure resizing namespace using NSID 12 and UUID {uuid} on {subsystem}: Can't find namespace" in caplog.text + cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "12", "--size", "128MB"]) + assert f"Failure resizing namespace 12 on {subsystem}: Can't find namespace" in caplog.text caplog.clear() cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "32MB"]) - assert f"Failure resizing namespace using NSID 6 on {subsystem}: new size 33554432 bytes is smaller than current size 67108864 bytes" in caplog.text + assert f"Failure resizing namespace 6 on {subsystem}: new size 33554432 bytes is smaller than current size 67108864 bytes" in caplog.text ns = cli_test(["namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) assert ns != None assert ns.status == 0 @@ -487,7 +505,7 @@ def test_resize_namespace(self, caplog, gateway): assert rc caplog.clear() cli(["namespace", "resize", "--subsystem", subsystem, "--nsid", "6", "--size", "128MB"]) - assert f"Failure resizing namespace using NSID 6 on {subsystem}: Can't find namespace" in caplog.text + assert f"Failure resizing namespace 6 on {subsystem}: Can't find namespace" in caplog.text caplog.clear() cli(["namespace", "add", "--subsystem", subsystem, "--nsid", "6", "--rbd-pool", pool, "--rbd-image", image, "--uuid", uuid2, "--force", "--load-balancing-group", anagrpid, "--force"]) assert f"Adding namespace 6 to {subsystem}: Successful" in caplog.text @@ -509,7 +527,7 @@ def test_set_namespace_qos_limits(self, caplog, gateway): caplog.clear() cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", "6", "--rw-ios-per-second", "2000"]) assert f"Setting QOS limits of namespace 6 in {subsystem}: Successful" in caplog.text - assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 6 on {subsystem}" in caplog.text + assert f"No previous QOS limits found, this is the first time the limits are set for namespace 6 on {subsystem}" in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) assert f'"nsid": 6' in caplog.text @@ -519,9 +537,9 @@ def test_set_namespace_qos_limits(self, caplog, gateway): assert '"r_mbytes_per_second": "0"' in caplog.text assert '"w_mbytes_per_second": "0"' in caplog.text caplog.clear() - cli(["namespace", "set_qos", "--subsystem", subsystem, "--uuid", uuid2, "--rw-megabytes-per-second", "30"]) - assert f"Setting QOS limits of namespace with UUID {uuid2} in {subsystem}: Successful" in caplog.text - assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 6 on {subsystem}" not in caplog.text + cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", "6", "--rw-megabytes-per-second", "30"]) + assert f"Setting QOS limits of namespace 6 in {subsystem}: Successful" in caplog.text + assert f"No previous QOS limits found, this is the first time the limits are set for namespace 6 on {subsystem}" not in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--uuid", uuid2]) assert f'"uuid": "{uuid2}"' in caplog.text @@ -534,7 +552,7 @@ def test_set_namespace_qos_limits(self, caplog, gateway): cli(["namespace", "set_qos", "--subsystem", subsystem, "--nsid", "6", "--r-megabytes-per-second", "15", "--w-megabytes-per-second", "25"]) assert f"Setting QOS limits of namespace 6 in {subsystem}: Successful" in caplog.text - assert f"No previous QOS limits found, this is the first time the limits are set for namespace using NSID 6 on {subsystem}" not in caplog.text + assert f"No previous QOS limits found, this is the first time the limits are set for namespace 6 on {subsystem}" not in caplog.text caplog.clear() cli(["--format", "json", "namespace", "list", "--subsystem", subsystem, "--nsid", "6"]) assert f'"nsid": 6' in caplog.text @@ -577,19 +595,14 @@ def test_namespace_io_stats(self, caplog, gateway): assert f'"max_write_latency_ticks":' in caplog.text assert f'"io_error":' in caplog.text caplog.clear() - cli(["namespace", "get_io_stats", "--subsystem", subsystem, "--uuid", uuid2]) - assert f'IO statistics for namespace with UUID {uuid2} in {subsystem}' in caplog.text - caplog.clear() - cli(["--format", "json", "namespace", "get_io_stats", "--subsystem", subsystem, "--uuid", uuid2]) - assert f'"status": 0' in caplog.text - assert f'"subsystem_nqn": "{subsystem}"' in caplog.text - assert f'"nsid": 6' in caplog.text - assert f'"uuid": "{uuid2}"' in caplog.text - assert f'"ticks":' in caplog.text - assert f'"bytes_written":' in caplog.text - assert f'"bytes_read":' in caplog.text - assert f'"max_write_latency_ticks":' in caplog.text - assert f'"io_error":' in caplog.text + rc = 0 + try: + cli(["namespace", "get_io_stats", "--subsystem", subsystem, "--uuid", uuid2, "--nsid", "1"]) + except SystemExit as sysex: + rc = int(str(sysex)) + pass + assert "error: unrecognized arguments: --uuid" in caplog.text + assert rc == 2 caplog.clear() rc = 0 try: @@ -597,7 +610,7 @@ def test_namespace_io_stats(self, caplog, gateway): except SystemExit as sysex: rc = int(str(sysex)) pass - assert "error: At least one of --nsid or --uuid arguments is mandatory for get_io_stats command" in caplog.text + assert "error: the following arguments are required: --nsid" in caplog.text assert rc == 2 @pytest.mark.parametrize("host", host_list) @@ -814,7 +827,11 @@ def test_remove_namespace(self, caplog, gateway): caplog.clear() del_ns_req = pb2.namespace_delete_req(subsystem_nqn=subsystem) ret = stub.namespace_delete(del_ns_req) - assert "At least one of NSID or UUID should be specified for finding a namesapce" in caplog.text + assert "Failure deleting namespace, missing NSID" in caplog.text + caplog.clear() + del_ns_req = pb2.namespace_delete_req(nsid=1) + ret = stub.namespace_delete(del_ns_req) + assert "Failure deleting namespace 1, missing subsystem NQN" in caplog.text caplog.clear() cli(["namespace", "del", "--subsystem", subsystem, "--nsid", "6"]) assert f"Deleting namespace 6 from {subsystem}: Successful" in caplog.text diff --git a/tests/test_cli_change_lb.py b/tests/test_cli_change_lb.py index 3aca3b5f2..db234ac0b 100644 --- a/tests/test_cli_change_lb.py +++ b/tests/test_cli_change_lb.py @@ -133,8 +133,8 @@ def test_change_namespace_lb_group(caplog, two_gateways): time.sleep(10) assert f"Adding namespace 1 to {subsystem}: Successful" in caplog.text assert f"get_cluster cluster_name='cluster_context_{anagrpid}_0'" in caplog.text - assert f"Received request to add a namespace using UUID {uuid} to {subsystem}, ana group {anagrpid}, context: