forked from ivy-llc/models
-
Notifications
You must be signed in to change notification settings - Fork 0
/
conftest.py
60 lines (47 loc) · 1.61 KB
/
conftest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# global
import pytest
# local
import ivy
import jax
jax.config.update("jax_enable_x64", True)
FW_STRS = ["numpy", "jax", "tensorflow", "torch"]
@pytest.fixture(autouse=True)
def run_around_tests(device, compile_graph, fw):
if "gpu" in device and fw == "numpy":
# Numpy does not support GPU
pytest.skip()
with ivy.utils.backend.ContextManager(fw):
with ivy.DefaultDevice(device):
yield
def pytest_generate_tests(metafunc):
# dev_str
raw_value = metafunc.config.getoption("--device")
if raw_value == "all":
devices = ["cpu", "gpu:0", "tpu:0"]
else:
devices = raw_value.split(",")
# framework
raw_value = metafunc.config.getoption("--backend")
if raw_value == "all":
backend_strs = FW_STRS
else:
backend_strs = raw_value.split(",")
# compile_graph
raw_value = metafunc.config.getoption("--compile_graph")
if raw_value == "both":
compile_modes = [True, False]
elif raw_value == "true":
compile_modes = [True]
else:
compile_modes = [False]
# create test configs
configs = list()
for backend_str in backend_strs:
for device in devices:
for compile_graph in compile_modes:
configs.append((device, compile_graph, backend_str))
metafunc.parametrize("device,compile_graph,fw", configs)
def pytest_addoption(parser):
parser.addoption("--device", action="store", default="cpu")
parser.addoption("--backend", action="store", default="numpy,jax,tensorflow,torch")
parser.addoption("--compile_graph", action="store", default="true")