From c2fd5118a1f76d48cdc38fa0fa5048e9e1685fdc Mon Sep 17 00:00:00 2001 From: Gil Bregman Date: Thu, 26 Dec 2024 19:24:52 +0200 Subject: [PATCH] Verify python source files with flake8. Fixes #994 Signed-off-by: Gil Bregman --- .github/workflows/build-container.yml | 6 + Makefile | 6 + control/cephutils.py | 13 +- control/discovery.py | 215 ++++++++++++-------------- control/rebalance.py | 24 ++- control/utils.py | 24 +-- tests/conftest.py | 2 - tests/ha/start_up.sh | 3 +- tests/test_dhchap.py | 19 +-- tests/test_grpc.py | 4 +- tests/test_log_files.py | 6 +- tests/test_multi_gateway.py | 1 - tests/test_nsid.py | 2 - tests/test_omap_lock.py | 7 +- tests/test_psk.py | 19 +-- tests/test_server.py | 13 +- tests/test_state.py | 4 +- 17 files changed, 173 insertions(+), 195 deletions(-) diff --git a/.github/workflows/build-container.yml b/.github/workflows/build-container.yml index 3d7bead8f..c37b709e2 100644 --- a/.github/workflows/build-container.yml +++ b/.github/workflows/build-container.yml @@ -31,6 +31,12 @@ jobs: with: submodules: recursive + - name: Flake8 Linting + uses: lukacat10/Flake8@v5 + + - name: Verify Python source files + run: make verify + - name: Build container images - spdk run: make build SVC="spdk" SPDK_TARGET_ARCH=x86-64-v2 diff --git a/Makefile b/Makefile index 0b44bdda9..49159553c 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,12 @@ include mk/autohelp.mk .DEFAULT_GOAL := all all: setup $(ALL) +FLAKE_IGNORE = --extend-ignore E501,E251,E225,E711,E128,F541,W291,E211,E202,E265,E117,E111,E203,E127,E261,E222,E201,E262,E303,E302,E221,E231 + +verify: ## Run Python source files through flake8 + @echo Verifying Python source files + flake8 $(FLAKE_IGNORE) control/*.py + setup: ## Configure huge-pages (requires sudo/root password) @echo Setup core dump pattern as /tmp/coredump/core.* diff --git a/control/cephutils.py b/control/cephutils.py index 7452a008b..614382a04 100644 --- a/control/cephutils.py +++ b/control/cephutils.py @@ -32,6 +32,7 @@ def execute_ceph_monitor_command(self, cmd): rply = cluster.mon_command(cmd, b'') self.logger.debug(f"Monitor reply: {rply}") return rply + def get_gw_id_owner_ana_group(self, pool, group, anagrp): str = '{' + f'"prefix":"nvme-gw show", "pool":"{pool}", "group":"{group}"' + '}' self.logger.debug(f"nvme-show string: {str}") @@ -51,10 +52,10 @@ def get_gw_id_owner_ana_group(self, pool, group, anagrp): return gw_id def is_rebalance_supported(self): - return self.rebalance_supported + return self.rebalance_supported def get_rebalance_ana_group(self): - return self.rebalance_ana_group + return self.rebalance_ana_group def get_number_created_gateways(self, pool, group): now = time.time() @@ -168,13 +169,13 @@ def create_image(self, pool_name, image_name, size) -> bool: rbd_inst = rbd.RBD() try: rbd_inst.create(ioctx, image_name, size) - except rbd.ImageExists as ex: + except rbd.ImageExists: self.logger.exception(f"Image {pool_name}/{image_name} was created just now") raise rbd.ImageExists(f"Image {pool_name}/{image_name} was just created by someone else, please retry", errno = errno.EAGAIN) - except Exception as ex: + except Exception: self.logger.exception(f"Can't create image {pool_name}/{image_name}") - raise ex + raise return True @@ -185,7 +186,7 @@ def get_image_size(self, pool_name, image_name) -> int: with rados.Rados(conffile=self.ceph_conf, rados_id=self.rados_id) as cluster: with cluster.open_ioctx(pool_name) as ioctx: - rbd_inst = rbd.RBD() + rbd.RBD() try: with rbd.Image(ioctx, image_name) as img: image_size = img.size() diff --git a/control/discovery.py b/control/discovery.py index eae1a5048..7a93789db 100644 --- a/control/discovery.py +++ b/control/discovery.py @@ -8,15 +8,12 @@ # import argparse -import grpc import json from .config import GatewayConfig from .state import GatewayState, LocalGatewayState, OmapGatewayState, GatewayStateHandler from .utils import GatewayLogger -from .proto import gateway_pb2 as pb2 -import rados -from typing import Dict, Optional +from typing import Dict import socket import threading @@ -27,8 +24,7 @@ import selectors import os from dataclasses import dataclass, field -from ctypes import Structure, LittleEndianStructure, c_bool, c_ubyte, c_uint8, c_uint16, c_uint32, c_uint64, c_float -from google.protobuf import json_format +from ctypes import LittleEndianStructure, c_ubyte, c_uint8, c_uint16, c_uint32, c_uint64 # NVMe tcp pdu type class NVME_TCP_PDU(enum.IntFlag): @@ -107,6 +103,7 @@ class NVMF_TREQ_SECURE_CHANNEL(enum.IntFlag): REQUIRED = 0x1 NOT_REQUIRED = 0x2 + # maximum number of connections MAX_CONNECTION = 10240 @@ -252,7 +249,7 @@ class NVMeIdentify(AutoSerializableStructure): ] # for set feature, keep alive and async -class CqeNVMe(AutoSerializableStructure): +class CqeNVMe(AutoSerializableStructure): _fields_ = [ ("dword0", c_uint32), ("dword1", c_uint32), @@ -345,28 +342,28 @@ def __exit__(self, exc_type, exc_value, traceback): for key in self.conn_vals: try: self.selector.unregister(self.conn_vals[key].connection) - except Except as ex: + except Exception: pass try: self.conn_vals[key].connection.close() - except Except as ex: + except Exception: pass self.conn_vals = {} if self.sock: try: self.selector.unregister(self.sock) - except Exception as ex: + except Exception: pass try: self.sock.close() - except Exception as ex: + except Exception: pass self.sock = None try: self.selector.close() - except Exception as ex: + except Exception: pass self.selector = None @@ -407,18 +404,18 @@ def reply_fc_cmd_connect(self, conn, data, cmd_id): self.logger.debug("handle connect request.") self_conn = self.conn_vals[conn.fileno()] - hf_nvmeof_cmd_connect_rsvd1 = struct.unpack_from('<19B', data, 13) + hf_nvmeof_cmd_connect_rsvd1 = struct.unpack_from('<19B', data, 13) # noqa: F841 SIGL1 = struct.unpack_from('> 8) & 0x1F - get_logpage_lsi = nvme_get_logpage_dword11 >> 16 - get_logpage_uid_idx = nvme_get_logpage_dword14 & 0x3F + get_logpage_lsp = (nvme_get_logpage_dword10 >> 8) & 0x1F # noqa: F841 + get_logpage_lsi = nvme_get_logpage_dword11 >> 16 # noqa: F841 + get_logpage_uid_idx = nvme_get_logpage_dword14 & 0x3F # noqa: F841 if get_logpage_lid != 0x70: self.logger.error("request type error, not discovery request.") @@ -747,7 +738,6 @@ def reply_get_log_page(self, conn, data, cmd_id): allow_listeners = self_conn.allow_listeners if len(allow_listeners) == 0: for host in hosts: - a = host["host_nqn"] if host["host_nqn"] == '*' or host["host_nqn"] == hostnqn: for listener in listeners: # TODO: It is better to change nqn in the "listener" @@ -784,22 +774,22 @@ def reply_get_log_page(self, conn, data, cmd_id): log_entry.asqsz = 128 # transport service indentifier str_trsvcid = str(allow_listeners[log_entry_counter]["trsvcid"]) - log_entry.trsvcid = (c_ubyte * 32)(*[c_ubyte(x) for x \ + log_entry.trsvcid = (c_ubyte * 32)(*[c_ubyte(x) for x in str_trsvcid.encode()]) log_entry.trsvcid[len(str_trsvcid):] = \ [c_ubyte(0x20)] * (32 - len(str_trsvcid)) # NVM subsystem qualified name - log_entry.subnqn = (c_ubyte * 256)(*[c_ubyte(x) for x \ + log_entry.subnqn = (c_ubyte * 256)(*[c_ubyte(x) for x in allow_listeners[log_entry_counter]["nqn"].encode()]) log_entry.subnqn[len(allow_listeners[log_entry_counter]["nqn"]):] = \ [c_ubyte(0x00)] * (256 - len(allow_listeners[log_entry_counter]["nqn"])) # Transport address - log_entry.traddr = (c_ubyte * 256)(*[c_ubyte(x) for x \ + log_entry.traddr = (c_ubyte * 256)(*[c_ubyte(x) for x in allow_listeners[log_entry_counter]["traddr"].encode()]) log_entry.traddr[len(allow_listeners[log_entry_counter]["traddr"]):] = \ [c_ubyte(0x20)] * (256 - len(allow_listeners[log_entry_counter]["traddr"])) - self_conn.log_page[1024*(log_entry_counter+1): \ + self_conn.log_page[1024*(log_entry_counter+1): 1024*(log_entry_counter+2)] = log_entry log_entry_counter += 1 else: @@ -850,21 +840,21 @@ def reply_keep_alive(self, conn, data, cmd_id): self.logger.debug("handle keep alive request.") self_conn = self.conn_vals[conn.fileno()] nvme_sgl = struct.unpack_from('<16B', data, 32) - nvme_sgl_desc_type = nvme_sgl[15] & 0xF0 - nvme_sgl_desc_sub_type = nvme_sgl[15] & 0x0F - nvme_keep_alive_dword10 = struct.unpack_from('int: min_load = 2000 chosen_ana_group = 0 - for ana_grp in self.gw_srv.ana_grp_ns_load : - if ana_grp in grp_list : + for ana_grp in self.gw_srv.ana_grp_ns_load: + if ana_grp in grp_list: if self.gw_srv.ana_grp_ns_load[ana_grp] <= min_load: min_load = self.gw_srv.ana_grp_ns_load[ana_grp] chosen_ana_group = ana_grp @@ -66,12 +60,12 @@ def find_min_loaded_group(self, grp_list)->int: def find_min_loaded_group_in_subsys(self, nqn, grp_list)->int: min_load = 2000 chosen_ana_group = 0 - for ana_grp in grp_list : - if self.gw_srv.ana_grp_ns_load[ana_grp] == 0: - self.gw_srv.ana_grp_subs_load[ana_grp][nqn] = 0 - return 0, ana_grp + for ana_grp in grp_list: + if self.gw_srv.ana_grp_ns_load[ana_grp] == 0: + self.gw_srv.ana_grp_subs_load[ana_grp][nqn] = 0 + return 0, ana_grp for ana_grp in self.gw_srv.ana_grp_subs_load : - if ana_grp in grp_list : + if ana_grp in grp_list: if nqn in self.gw_srv.ana_grp_subs_load[ana_grp]: if self.gw_srv.ana_grp_subs_load[ana_grp][nqn] <= min_load: min_load = self.gw_srv.ana_grp_subs_load[ana_grp][nqn] @@ -119,8 +113,8 @@ def rebalance_logic(self, request, context)->int: self.logger.info(f"warning: empty group {ana_grp} of Deleting GW still appears Optimized") return 1 else : - if not ongoing_scale_down_rebalance and (self.gw_srv.ana_grp_state[worker_ana_group] == pb2.ana_state.OPTIMIZED) : - # if my optimized ana group == worker-ana-group or worker-ana-group is also in optimized state on this GW machine + if not ongoing_scale_down_rebalance and (self.gw_srv.ana_grp_state[worker_ana_group] == pb2.ana_state.OPTIMIZED) : + # if my optimized ana group == worker-ana-group or worker-ana-group is also in optimized state on this GW machine for nqn in self.gw_srv.ana_grp_subs_load[ana_grp] : #need to search all nqns not only inside the current load num_ns_in_nqn = self.gw_srv.subsystem_nsid_bdev_and_uuid.get_namespace_count(nqn, None, 0) target_subs_per_ana = num_ns_in_nqn/num_active_ana_groups diff --git a/control/utils.py b/control/utils.py index 570f54159..6efc08259 100644 --- a/control/utils.py +++ b/control/utils.py @@ -31,10 +31,13 @@ def get_value_from_key(e_type, keyval, ignore_case = False): except IndexError: pass - if ignore_case and val == None and type(keyval) == str: - val = get_value_from_key(e_type, keyval.lower(), False) - if ignore_case and val == None and type(keyval) == str: - val = get_value_from_key(e_type, keyval.upper(), False) + if val is not None or not ignore_case: + return val + + if isinstance(keyval, str): + val = GatewayEnumUtils.get_value_from_key(e_type, keyval.lower(), False) + if val is None and isinstance(keyval, str): + val = GatewayEnumUtils.get_value_from_key(e_type, keyval.upper(), False) return val @@ -111,7 +114,7 @@ def is_valid_uuid(uuid_val) -> bool: for u in uuid_parts: try: - n = int(u, 16) + int(u, 16) except ValueError: return False @@ -125,11 +128,11 @@ def is_valid_nqn(nqn): NQN_UUID_PREFIX = "nqn.2014-08.org.nvmexpress:uuid:" NQN_UUID_PREFIX_LENGTH = len(NQN_UUID_PREFIX) - if type(nqn) != str: + if not isinstance(nqn, str): return (errno.EINVAL, f"Invalid type {type(nqn)} for NQN, must be a string") try: - b = nqn.encode(encoding="utf-8") + nqn.encode(encoding="utf-8") except UnicodeEncodeError: return (errno.EINVAL, f"Invalid NQN \"{nqn}\", must have an UTF-8 encoding") @@ -163,8 +166,8 @@ def is_valid_nqn(nqn): year_part, month_part = date_part.split("-") if len(year_part) != 4 or len(month_part) != 2: return (errno.EINVAL, f"Invalid NQN \"{nqn}\": invalid date code") - n = int(year_part) - n = int(month_part) + int(year_part) + int(month_part) except ValueError: return (errno.EINVAL, f"Invalid NQN \"{nqn}\": invalid date code") @@ -306,7 +309,7 @@ def rotate_backup_directories(dirname, count): pass def set_log_level(self, log_level): - if type(log_level) == str: + if isinstance(log_level, str): log_level = log_level.upper() self.logger.setLevel(log_level) logger_parent = self.logger.parent @@ -326,7 +329,6 @@ def log_file_rotate(src, dest): GatewayLogger.logger.info(m) for e in errs: GatewayLogger.logger.error(e) - else: os.rename(src, dest) diff --git a/tests/conftest.py b/tests/conftest.py index 73f4baa58..6bf548268 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,7 +1,5 @@ import pytest -import rados from control.config import GatewayConfig -from control.state import OmapGatewayState def pytest_addoption(parser): diff --git a/tests/ha/start_up.sh b/tests/ha/start_up.sh index 309668916..494538629 100755 --- a/tests/ha/start_up.sh +++ b/tests/ha/start_up.sh @@ -13,7 +13,7 @@ if [ $# -ge 1 ]; then fi fi echo ℹ️ Starting $SCALE nvmeof gateways -docker compose up -d --remove-orphans --scale nvmeof=$SCALE nvmeof +docker compose up -d --remove-orphans ceph # Waiting for the ceph container to become healthy while true; do @@ -28,6 +28,7 @@ while true; do fi done echo ✅ ceph is healthy +docker compose up -d --remove-orphans --scale nvmeof=$SCALE nvmeof echo ℹ️ Increase debug logs level docker compose exec -T ceph ceph config get mon.a diff --git a/tests/test_dhchap.py b/tests/test_dhchap.py index 6de95f56b..33fe8ac00 100644 --- a/tests/test_dhchap.py +++ b/tests/test_dhchap.py @@ -4,13 +4,7 @@ from control.cli import main as cli from control.cli import main_test as cli_test from control.cephutils import CephUtils -from control.utils import GatewayUtils -from control.config import GatewayConfig import grpc -from control.proto import gateway_pb2 as pb2 -from control.proto import gateway_pb2_grpc as pb2_grpc -import os -import os.path image = "mytestdevimage" pool = "rbd" @@ -64,7 +58,7 @@ def gateway(config): gateway.serve() # Bind the client and Gateway - channel = grpc.insecure_channel(f"{addr}:{port}") + grpc.insecure_channel(f"{addr}:{port}") yield gateway.gateway_rpc # Stop gateway @@ -72,7 +66,6 @@ def gateway(config): gateway.gateway_rpc.gateway_state.delete_state() def test_setup(caplog, gateway): - gw = gateway caplog.clear() cli(["subsystem", "add", "--subsystem", subsystem, "--no-group-append"]) assert f"create_subsystem {subsystem}: True" in caplog.text @@ -196,13 +189,13 @@ def test_list_listeners(caplog, gateway): listeners = cli_test(["listener", "list", "--subsystem", subsystem]) assert len(listeners.listeners) == 2 found = 0 - for l in listeners.listeners: - if l.trsvcid == 5001: + for lstnr in listeners.listeners: + if lstnr.trsvcid == 5001: found += 1 - assert l.secure - elif l.trsvcid == 5002: + assert lstnr.secure + elif lstnr.trsvcid == 5002: found += 1 - assert not l.secure + assert not lstnr.secure else: assert False assert found == 2 diff --git a/tests/test_grpc.py b/tests/test_grpc.py index 1a6936725..5034f538c 100644 --- a/tests/test_grpc.py +++ b/tests/test_grpc.py @@ -3,8 +3,6 @@ from control.server import GatewayServer from control.cli import main as cli from control.cephutils import CephUtils -import logging -import warnings image = "mytestdevimage" pool = "rbd" @@ -69,7 +67,7 @@ def test_create_get_subsys(caplog, config): "--r-megabytes-per-second", "5"]) assert f"Setting QOS limits of namespace 1 in {subsystem_prefix}0: Successful" in caplog.text assert f"No previous QOS limits found, this is the first time the limits are set for namespace 1 on {subsystem_prefix}0" not in caplog.text - + # add host to the first namespace caplog.clear() cli(["namespace", "add_host", "--subsystem", f"{subsystem_prefix}0", "--nsid", "1", "--host-nqn", f"{host_prefix}0"]) diff --git a/tests/test_log_files.py b/tests/test_log_files.py index 44eafccd9..6939808a6 100644 --- a/tests/test_log_files.py +++ b/tests/test_log_files.py @@ -1,7 +1,5 @@ import pytest from control.server import GatewayServer -from control.utils import GatewayLogger -import socket from control.cli import main as cli from control.cli import main_test as cli_test import grpc @@ -55,8 +53,7 @@ def gateway(config, request): gateway.serve() # Bind the client and Gateway - channel = grpc.insecure_channel(f"{addr}:{port}") - stub = pb2_grpc.GatewayStub(channel) + grpc.insecure_channel(f"{addr}:{port}") yield gateway # Stop gateway @@ -82,7 +79,6 @@ def test_log_files(gateway): assert f"spdk-{gw.name}" in spdk_files[0] def test_log_files_disabled(gateway): - gw = gateway cli(["subsystem", "add", "--subsystem", subsystem_prefix + "1"]) subs_list = cli_test(["--format", "text", "subsystem", "list"]) assert subs_list != None diff --git a/tests/test_multi_gateway.py b/tests/test_multi_gateway.py index 85fe57798..08a845653 100644 --- a/tests/test_multi_gateway.py +++ b/tests/test_multi_gateway.py @@ -134,4 +134,3 @@ def test_multi_gateway_coordination(config, image, conn): assert nsListB[0]["uuid"] == uuid assert nsListB[0]["rbd_image_name"] == image assert nsListB[0]["rbd_pool_name"] == pool - diff --git a/tests/test_nsid.py b/tests/test_nsid.py index f31867251..4119b38cd 100644 --- a/tests/test_nsid.py +++ b/tests/test_nsid.py @@ -1,4 +1,3 @@ -import pytest import copy import grpc import json @@ -8,7 +7,6 @@ from control.cephutils import CephUtils from control.proto import gateway_pb2 as pb2 from control.proto import gateway_pb2_grpc as pb2_grpc -import spdk.rpc.bdev as rpc_bdev image = "mytestdevimage" pool = "rbd" diff --git a/tests/test_omap_lock.py b/tests/test_omap_lock.py index e9473bb99..eecea3993 100644 --- a/tests/test_omap_lock.py +++ b/tests/test_omap_lock.py @@ -16,7 +16,7 @@ host_nqn_prefix = "nqn.2014-08.org.nvmexpress:uuid:22207d09-d8af-4ed2-84ec-a6d80b" created_resource_count = 10 -def setup_config(config, gw1_name, gw2_name, gw_group, update_notify ,update_interval_sec, disable_unlock, lock_duration, +def setup_config(config, gw1_name, gw2_name, gw_group, update_notify, update_interval_sec, disable_unlock, lock_duration, sock1_name, sock2_name, port_inc): """Sets up the config objects for gateways A and B """ @@ -118,7 +118,8 @@ def conn_concurrent(config, request): ceph_utils = CephUtils(config) # Setup GatewayA and GatewayB configs - configA, configB = setup_config(config, "GatewayAAA", "GatewayBBB", "Group3", True, 5, False, 60, + configA, configB = setup_config(config, "GatewayAAA", "GatewayBBB", "Group3", update_notify, update_interval_sec, + disable_unlock, lock_duration, "spdk_GatewayAAA.sock", "spdk_GatewayBBB.sock", 4) addr = configA.get("gateway", "addr") @@ -254,7 +255,7 @@ def test_trying_to_lock_twice(config, image, conn_lock_twice, caplog): caplog.clear() stubA, stubB = conn_lock_twice - with pytest.raises(Exception) as ex: + with pytest.raises(Exception): create_resource_by_index(stubA, 100000, None) create_resource_by_index(stubB, 100001, None) assert "OMAP file unlock was disabled, will not unlock file" in caplog.text diff --git a/tests/test_psk.py b/tests/test_psk.py index 536035c8d..2338737cb 100644 --- a/tests/test_psk.py +++ b/tests/test_psk.py @@ -4,13 +4,7 @@ from control.cli import main as cli from control.cli import main_test as cli_test from control.cephutils import CephUtils -from control.utils import GatewayUtils -from control.config import GatewayConfig import grpc -from control.proto import gateway_pb2 as pb2 -from control.proto import gateway_pb2_grpc as pb2_grpc -import os -import os.path image = "mytestdevimage" pool = "rbd" @@ -57,7 +51,7 @@ def gateway(config): gateway.serve() # Bind the client and Gateway - channel = grpc.insecure_channel(f"{addr}:{port}") + grpc.insecure_channel(f"{addr}:{port}") yield gateway.gateway_rpc # Stop gateway @@ -65,7 +59,6 @@ def gateway(config): gateway.gateway_rpc.gateway_state.delete_state() def test_setup(caplog, gateway): - gw = gateway caplog.clear() cli(["subsystem", "add", "--subsystem", subsystem]) assert f"create_subsystem {subsystem}: True" in caplog.text @@ -183,13 +176,13 @@ def test_list_listeners(caplog, gateway): listeners = cli_test(["listener", "list", "--subsystem", subsystem]) assert len(listeners.listeners) == 2 found = 0 - for l in listeners.listeners: - if l.trsvcid == 5001: + for lstnr in listeners.listeners: + if lstnr.trsvcid == 5001: found += 1 - assert l.secure - elif l.trsvcid == 5002: + assert lstnr.secure + elif lstnr.trsvcid == 5002: found += 1 - assert not l.secure + assert not lstnr.secure else: assert False assert found == 2 diff --git a/tests/test_server.py b/tests/test_server.py index fdfd209c4..fb2adb483 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -18,11 +18,11 @@ def _config(self, config): def validate_exception(self, e): pattern = r'Gateway subprocess terminated pid=(\d+) exit_code=(-?\d+)' m = re.match(pattern, e.code) - assert(m) + assert m pid = int(m.group(1)) code = int(m.group(2)) - assert(pid > 0) - assert(code) + assert pid > 0 + assert code def remove_core_files(self, directory_path): # List all files starting with "core." in the core directory @@ -38,9 +38,9 @@ def remove_core_files(self, directory_path): print(f"Removed: {file_path}") def assert_no_core_files(self, directory_path): - assert(os.path.exists(directory_path) and os.path.isdir(directory_path)) + assert os.path.exists(directory_path) and os.path.isdir(directory_path) files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f)) and f.startswith("core.")] - assert(len(files) == 0) + assert len(files) == 0 def test_spdk_exception(self): """Tests spdk sub process exiting with error.""" @@ -80,7 +80,7 @@ def test_monc_exit(self): time.sleep(2) # Send SIGABRT (abort signal) to the monitor client process - assert(gateway.monitor_client_process) + assert gateway.monitor_client_process gateway.monitor_client_process.send_signal(signal.SIGABRT) # Block on running keep alive ping @@ -116,5 +116,6 @@ def test_spdk_multi_gateway_exception(self): gatewayB.serve() self.validate_exception(cm.exception) + if __name__ == '__main__': unittest.main() diff --git a/tests/test_state.py b/tests/test_state.py index fda1366d0..59c670935 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -181,8 +181,8 @@ def _state_notify_update(update, is_add_req): # to test notify capability elapsed = time.time() - start wait_interval = update_interval_sec - elapsed - 0.5 - assert(wait_interval > 0) - assert(wait_interval < update_interval_sec) + assert wait_interval > 0 + assert wait_interval < update_interval_sec time.sleep(wait_interval) # expect 4 updates: addition, two-step change and removal