Skip to content

Commit

Permalink
[pytest]: Re-use DVS container when possible (#1816)
Browse files Browse the repository at this point in the history
- If the fake_platform does not change between test modules, re-use the same DVS container (but still restart it between modules and do some cleanup/re-init to ensure a consistent start state for each test module)
- Add a CLI option --force-recreate-dvs to revert to the previous behavior of creating a new DVS per test module

Signed-off-by: Lawrence Lee <[email protected]>
  • Loading branch information
theasianpianist authored Sep 28, 2021
1 parent a875d60 commit a031542
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 26 deletions.
125 changes: 100 additions & 25 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,13 +47,18 @@ def pytest_addoption(parser):
parser.addoption("--dvsname",
action="store",
default=None,
help="Name of a persistent DVS container to run the tests with")
help="Name of a persistent DVS container to run the tests with. Mutually exclusive with --force-recreate-dvs")

parser.addoption("--forcedvs",
action="store_true",
default=False,
help="Force tests to run in persistent DVS containers with <32 ports")

parser.addoption("--force-recreate-dvs",
action="store_true",
default=False,
help="Force the DVS container to be recreated between each test module. Mutually exclusive with --dvsname")

parser.addoption("--keeptb",
action="store_true",
default=False,
Expand Down Expand Up @@ -192,6 +197,9 @@ def __init__(self, ctn_name: str, pid: int, i: int):
ensure_system(f"nsenter -t {pid} -n ip link set arp off dev {self.pifname}")
ensure_system(f"nsenter -t {pid} -n sysctl -w net.ipv6.conf.{self.pifname}.disable_ipv6=1")

def __repr__(self):
return f'<VirtualServer> {self.nsname}'

def kill_all_processes(self) -> None:
pids = subprocess.check_output(f"ip netns pids {self.nsname}", shell=True).decode("utf-8")
if pids:
Expand Down Expand Up @@ -339,9 +347,7 @@ def __init__(

# create virtual server
self.servers = []
for i in range(NUM_PORTS):
server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i)
self.servers.append(server)
self.create_servers()

if self.vct:
self.vct_connect(newctnname)
Expand Down Expand Up @@ -377,13 +383,7 @@ def __init__(
self.redis_sock = os.path.join(self.mount, "redis.sock")
self.redis_chassis_sock = os.path.join(self.mount, "redis_chassis.sock")

# DB wrappers are declared here, lazy-loaded in the tests
self.app_db = None
self.asic_db = None
self.counters_db = None
self.config_db = None
self.flex_db = None
self.state_db = None
self.reset_dbs()

# Make sure everything is up and running before turning over control to the caller
self.check_ready_status_and_init_db()
Expand All @@ -392,23 +392,40 @@ def __init__(
if buffer_model == 'dynamic':
enable_dynamic_buffer(self.get_config_db(), self.runcmd)

def create_servers(self):
for i in range(NUM_PORTS):
server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i)
self.servers.append(server)

def reset_dbs(self):
# DB wrappers are declared here, lazy-loaded in the tests
self.app_db = None
self.asic_db = None
self.counters_db = None
self.config_db = None
self.flex_db = None
self.state_db = None

def destroy(self) -> None:
if self.appldb:
if getattr(self, 'appldb', False):
del self.appldb

# In case persistent dvs was used removed all the extra server link
# that were created
if self.persistent:
for s in self.servers:
s.destroy()
self.destroy_servers()

# persistent and clean-up flag are mutually exclusive
elif self.cleanup:
self.ctn.remove(force=True)
self.ctn_sw.remove(force=True)
os.system(f"rm -rf {self.mount}")
for s in self.servers:
s.destroy()
self.destroy_servers()

def destroy_servers(self):
for s in self.servers:
s.destroy()
self.servers = []

def check_ready_status_and_init_db(self) -> None:
try:
Expand All @@ -423,6 +440,7 @@ def check_ready_status_and_init_db(self) -> None:
# Initialize the databases.
self.init_asic_db_validator()
self.init_appl_db_validator()
self.reset_dbs()

# Verify that SWSS has finished initializing.
self.check_swss_ready()
Expand Down Expand Up @@ -451,9 +469,9 @@ def _polling_function():

for pname in self.alld:
if process_status.get(pname, None) != "RUNNING":
return (False, None)
return (False, process_status)

return (process_status.get("start.sh", None) == "EXITED", None)
return (process_status.get("start.sh", None) == "EXITED", process_status)

wait_for_result(_polling_function, service_polling_config)

Expand Down Expand Up @@ -1556,35 +1574,92 @@ def verify_vct(self):
print("vct verifications passed ? %s" % (ret1 and ret2))
return ret1 and ret2

@pytest.yield_fixture(scope="module")
def dvs(request) -> DockerVirtualSwitch:
@pytest.fixture(scope="session")
def manage_dvs(request) -> str:
"""
Main fixture to manage the lifecycle of the DVS (Docker Virtual Switch) for testing
Returns:
(func) update_dvs function which can be called on a per-module basis
to handle re-creating the DVS if necessary
"""
if sys.version_info[0] < 3:
raise NameError("Python 2 is not supported, please install python 3")

if subprocess.check_call(["/sbin/modprobe", "team"]):
raise NameError("Cannot install kernel team module, please install a generic kernel")

name = request.config.getoption("--dvsname")
using_persistent_dvs = name is not None
forcedvs = request.config.getoption("--forcedvs")
keeptb = request.config.getoption("--keeptb")
imgname = request.config.getoption("--imgname")
max_cpu = request.config.getoption("--max_cpu")
buffer_model = request.config.getoption("--buffer_model")
fakeplatform = getattr(request.module, "DVS_FAKE_PLATFORM", None)
log_path = name if name else request.module.__name__
force_recreate = request.config.getoption("--force-recreate-dvs")
dvs = None
curr_fake_platform = None # lgtm[py/unused-local-variable]

if using_persistent_dvs and force_recreate:
pytest.fail("Options --dvsname and --force-recreate-dvs are mutually exclusive")

def update_dvs(log_path, new_fake_platform=None):
"""
Decides whether or not to create a new DVS
Create a new the DVS in the following cases:
1. CLI option `--force-recreate-dvs` was specified (recreate for every module)
2. The fake_platform has changed (this can only be set at container creation,
so it is necessary to spin up a new DVS)
3. No DVS currently exists (i.e. first time startup)
Otherwise, restart the existing DVS (to get to a clean state)
Returns:
(DockerVirtualSwitch) a DVS object
"""
nonlocal curr_fake_platform, dvs
if force_recreate or \
new_fake_platform != curr_fake_platform or \
dvs is None:

dvs = DockerVirtualSwitch(name, imgname, keeptb, fakeplatform, log_path, max_cpu, forcedvs, buffer_model = buffer_model)
if dvs is not None:
dvs.get_logs()
dvs.destroy()

yield dvs
dvs = DockerVirtualSwitch(name, imgname, keeptb, new_fake_platform, log_path, max_cpu, forcedvs, buffer_model = buffer_model)

curr_fake_platform = new_fake_platform

else:
# First generate GCDA files for GCov
dvs.runcmd('killall5 -15')
# If not re-creating the DVS, restart container
# between modules to ensure a consistent start state
dvs.net_cleanup()
dvs.destroy_servers()
dvs.create_servers()
dvs.restart()

return dvs

yield update_dvs

dvs.get_logs()
dvs.destroy()

# restore original config db
if dvs.persistent:
dvs.runcmd("mv /etc/sonic/config_db.json.orig /etc/sonic/config_db.json")
dvs.ctn_restart()

@pytest.yield_fixture(scope="module")
def dvs(request, manage_dvs) -> DockerVirtualSwitch:
fakeplatform = getattr(request.module, "DVS_FAKE_PLATFORM", None)
name = request.config.getoption("--dvsname")
log_path = name if name else request.module.__name__

return manage_dvs(log_path, fakeplatform)

@pytest.yield_fixture(scope="module")
def vct(request):
vctns = request.config.getoption("--vctns")
Expand Down
2 changes: 1 addition & 1 deletion tests/dvslib/dvs_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def wait_for_result(
time.sleep(polling_config.polling_interval)

if polling_config.strict:
message = failure_message or f"Operation timed out after {polling_config.timeout} seconds"
message = failure_message or f"Operation timed out after {polling_config.timeout} seconds with result {result}"
assert False, message

return (False, result)

0 comments on commit a031542

Please sign in to comment.