Skip to content

Commit

Permalink
Fix timm filter and run timm models in parallel (#23116)
Browse files Browse the repository at this point in the history
### Details:
 - *Update list of timm models to comply with new version of timm*
- *Run timm models and torchvision models in trace and export model in
parallel*

### Tickets:
 - *ticket-id*
  • Loading branch information
mvafin authored Mar 1, 2024
1 parent fddda65 commit 4edb040
Show file tree
Hide file tree
Showing 13 changed files with 48 additions and 48 deletions.
16 changes: 14 additions & 2 deletions .github/workflows/job_pytorch_models_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,8 @@ jobs:

- name: Install OpenVINO Python wheels
run: |
# To enable pytest parallel features
python3 -m pip install pytest-xdist[psutil]
python3 -m pip install ${INSTALL_DIR}/tools/openvino-*
python3 -m pip install ${INSTALL_DIR}/openvino_tokenizers-*
Expand All @@ -118,10 +120,20 @@ jobs:
env:
CPLUS_INCLUDE_PATH: ${{ env.Python_ROOT_DIR }}/include/python${{ env.PYTHON_VERSION }}

- name: PyTorch Models Tests
- name: PyTorch Models Tests Timm and Torchvision
run: |
export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_timm_tv_tests.html --self-contained-html -v -n 4 -k "TestTimmConvertModel or TestTorchHubConvertModel"
env:
TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}}
TEST_DEVICE: CPU
OP_REPORT_FILE: ${{ env.INSTALL_TEST_DIR }}/TEST-torch_unsupported_ops.log

- name: PyTorch Models Tests Not Timm or Torchvision
if: always()
run: |
export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/pytorch -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v -k "not (TestTimmConvertModel or TestTorchHubConvertModel)"
env:
TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}}
TEST_DEVICE: CPU
Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/hf_transformers_models
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ hf-internal-testing/tiny-random-DonutSwinModel,donut-swin
hf-internal-testing/tiny-random-EfficientFormerForImageClassification,efficientformer
hf-internal-testing/tiny-random-flaubert,flaubert
hf-internal-testing/tiny-random-FocalNetModel,focalnet
hf-internal-testing/tiny-random-GPTBigCodeForCausalLM,gpt_bigcode,xfail,Conversion is failed for: aten::mul
hf-internal-testing/tiny-random-GPTBigCodeForCausalLM,gpt_bigcode
hf-internal-testing/tiny-random-GPTJModel,gptj
hf-internal-testing/tiny-random-groupvit,groupvit
hf-internal-testing/tiny-random-IBertModel,ibert
Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_aliked.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def setup_class(self):
subprocess.check_call(["sh", "build.sh"], cwd=os.path.join(
self.repo_dir.name, "custom_ops"))

def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
sys.path.append(self.repo_dir.name)
from nets.aliked import ALIKED

Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_detectron2.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def setup_class(self):
subprocess.run([sys.executable, "-m", "pip", "install",
"git+https://github.com/facebookresearch/detectron2.git@017abbfa5f2c2a2afa045200c2af9ccf2fc6227f"])

def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
from detectron2 import model_zoo, export
from detectron2.modeling import build_model, PanopticFPN
from detectron2.checkpoint import DetectionCheckpointer
Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_edsr.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@


class TestEdsrConvertModel(TestTorchConvertModel):
def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
# image link from https://github.com/eugenesiow/super-image
url = 'https://paperswithcode.com/media/datasets/Set5-0000002728-07a9793f_zA3bDjj.jpg'
image = Image.open(requests.get(url, stream=True).raw)
Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_gfpgan.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def setup_class(self):
subprocess.check_call(
["wget", "-nv", checkpoint_url], cwd=self.repo_dir.name)

def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
sys.path.append(self.repo_dir.name)
from gfpgan import GFPGANer

Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_hf_transformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def setup_class(self):
self.image = Image.open(requests.get(url, stream=True).raw)
self.cuda_available, self.gptq_postinit = None, None

def load_model_impl(self, name, type):
def load_model(self, name, type):
import torch
name_suffix = ''
from transformers import AutoConfig
Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_speech-transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def setup_class(self):
checkpoint_url = "https://github.com/foamliu/Speech-Transformer/releases/download/v1.0/speech-transformer-cn.pt"
subprocess.check_call(["wget", "-nv", checkpoint_url], cwd=self.repo_dir.name)

def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
sys.path.append(self.repo_dir.name)
from transformer.transformer import Transformer

Expand Down
24 changes: 10 additions & 14 deletions tests/model_hub_tests/pytorch/test_timm.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,22 +6,23 @@
import pytest
import timm
import torch
from models_hub_common.constants import hf_hub_cache_dir
from models_hub_common.utils import cleanup_dir, get_models_list
from models_hub_common.utils import get_models_list

from torch_utils import TestTorchConvertModel, process_pytest_marks
from openvino import convert_model
from torch.export import export
from packaging import version


def filter_timm(timm_list: list) -> list:
unique_models = dict()
filtered_list = []
ignore_list = ["base", "xxtiny", "xxs", "pico", "xtiny", "xs", "nano", "tiny", "s", "mini", "small", "lite",
"medium", "m", "big", "large", "l", "xlarge", "xl", "huge", "xxlarge", "gigantic", "giant", "enormous"]
ignore_list = ["base", "atto", "femto", "xxtiny", "xxsmall", "xxs", "pico",
"xtiny", "xmall", "xs", "nano", "tiny", "s", "mini", "small",
"lite", "medium", "m", "big", "large", "l", "xlarge", "xl",
"huge", "xxlarge", "gigantic", "giant", "enormous"]
ignore_set = set(ignore_list)
for name in sorted(timm_list):
if "x_" in name:
# x_small or xx_small should be merged to xsmall and xxsmall
name.replace("x_", "x")
# first: remove datasets
name_parts = name.split(".")
_name = "_".join(name.split(".")[:-1]) if len(name_parts) > 1 else name
Expand Down Expand Up @@ -50,7 +51,7 @@ def get_all_models() -> list:


class TestTimmConvertModel(TestTorchConvertModel):
def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
m = timm.create_model(model_name, pretrained=True)
cfg = timm.get_pretrained_cfg(model_name)
shape = [1] + list(cfg.input_size)
Expand All @@ -69,11 +70,6 @@ def infer_fw_model(self, model_obj, inputs):
fw_outputs = [fw_outputs.numpy(force=True)]
return fw_outputs

def teardown_method(self):
# remove all downloaded files from cache
cleanup_dir(hf_hub_cache_dir)
super().teardown_method()

@pytest.mark.parametrize("name", ["mobilevitv2_050.cvnets_in1k",
"poolformerv2_s12.sail_in1k",
"vit_base_patch8_224.augreg_in21k",
Expand All @@ -86,8 +82,8 @@ def test_convert_model_precommit(self, name, ie_device):
self.run(name, None, ie_device)

@pytest.mark.nightly
@pytest.mark.parametrize("mode", ["trace"]) # disable "export" for now
@pytest.mark.parametrize("name", get_all_models())
@pytest.mark.parametrize("mode", ["trace", "export"])
def test_convert_model_all_models(self, mode, name, ie_device):
self.mode = mode
self.run(name, None, ie_device)
Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_torchbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def setup_class(self):
subprocess.check_call(
["git", "checkout", "dbc109791dbb0dfb58775a5dc284fc2c3996cb30"], cwd=self.repo_dir.name)

def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
subprocess.check_call([sys.executable, "install.py"] + [model_name], cwd=self.repo_dir.name)
sys.path.append(self.repo_dir.name)
from torchbenchmark import load_model_by_name
Expand Down
15 changes: 2 additions & 13 deletions tests/model_hub_tests/pytorch/test_torchvision_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,7 @@ def prepare_frames_for_raft(name, frames1, frames2):


class TestTorchHubConvertModel(TestTorchConvertModel):
def setup_class(self):
self.cache_dir = tempfile.TemporaryDirectory()
# set temp dir for torch cache
if os.environ.get('USE_SYSTEM_CACHE', 'True') == 'False':
torch.hub.set_dir(str(self.cache_dir.name))

def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
m = torch.hub.load("pytorch/vision", model_name,
weights='DEFAULT', skip_validation=True)
m.eval()
Expand Down Expand Up @@ -97,11 +91,6 @@ def infer_fw_model(self, model_obj, inputs):
fw_outputs = [fw_outputs.numpy(force=True)]
return fw_outputs

def teardown_method(self):
# cleanup tmpdir
self.cache_dir.cleanup()
super().teardown_method()

@pytest.mark.parametrize("model_name", ["efficientnet_b7", "raft_small", "swin_v2_s"])
@pytest.mark.precommit
def test_convert_model_precommit(self, model_name, ie_device):
Expand All @@ -114,9 +103,9 @@ def test_convert_model_precommit_export(self, model_name, ie_device):
self.mode = "export"
self.run(model_name, None, ie_device)

@pytest.mark.parametrize("mode", ["trace"]) # disable "export" for now
@pytest.mark.parametrize("name",
process_pytest_marks(os.path.join(os.path.dirname(__file__), "torchvision_models")))
@pytest.mark.parametrize("mode", ["trace", "export"])
@pytest.mark.nightly
def test_convert_model_all_models(self, mode, name, ie_device):
self.mode = mode
Expand Down
2 changes: 1 addition & 1 deletion tests/model_hub_tests/pytorch/test_tpsmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def setup_class(self):
["git", "checkout", "c616878812c9870ed81ac72561be2676fd7180e2"], cwd=self.repo_dir.name)
# verify model on random weights

def load_model_impl(self, model_name, model_link):
def load_model(self, model_name, model_link):
sys.path.append(self.repo_dir.name)
from modules.inpainting_network import InpaintingNetwork
from modules.keypoint_detector import KPDetector
Expand Down
23 changes: 13 additions & 10 deletions tests/model_hub_tests/pytorch/timm_models
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,11 @@ convformer_s36.sail_in1k,None
convit_base.fb_in1k,None,xfail,Trace failed
convmixer_1024_20_ks9_p14.in1k,None
convmixer_1536_20.in1k,None
convnext_atto.d2_in1k,None
convnext_atto_ols.a2_in1k,None
convnext_base.clip_laion2b,None
convnext_femto.d1_in1k,None
convnext_femto_ols.d1_in1k,None
convnext_large_mlp.clip_laion2b_augreg,None
convnext_pico_ols.d1_in1k,None
convnext_tiny_hnf.a2h_in1k,None
convnextv2_atto.fcmae,None
convnextv2_base.fcmae,None
convnextv2_femto.fcmae,None
crossvit_15_dagger_240.in1k,None
crossvit_base_240.in1k,None
cs3darknet_focus_m.c2ns_in1k,None
Expand All @@ -52,10 +46,10 @@ cspresnext50.ra_in1k,None
darknet53.c2ns_in1k,None
darknetaa53.c2ns_in1k,None
davit_base.msft_in1k,None
deit3_base_patch16_224.fb_in1k,None
deit3_huge_patch14_224.fb_in1k,None
deit_base_distilled_patch16_224.fb_in1k,None
deit_base_patch16_224.fb_in1k,None
deit3_base_patch16_224.fb_in1k,None
deit3_huge_patch14_224.fb_in1k,None
densenet121.ra_in1k,None
densenet161.tv_in1k,None
densenet169.tv_in1k,None
Expand Down Expand Up @@ -144,11 +138,11 @@ efficientvit_m4.r224_in1k,None
efficientvit_m5.r224_in1k,None
ese_vovnet19b_dw.ra_in1k,None
ese_vovnet39b.ra_in1k,None
eva_giant_patch14_clip_224.laion400m,None
eva_large_patch14_196.in22k_ft_in1k,None
eva02_base_patch14_224.mim_in22k,None
eva02_base_patch16_clip_224.merged2b,None
eva02_large_patch14_clip_224.merged2b,None
eva_giant_patch14_clip_224.laion400m,None
eva_large_patch14_196.in22k_ft_in1k,None
fastvit_ma36.apple_dist_in1k,None
fastvit_s12.apple_dist_in1k,None
fastvit_sa12.apple_dist_in1k,None
Expand Down Expand Up @@ -186,6 +180,14 @@ hardcorenas_c.miil_green_in1k,None
hardcorenas_d.miil_green_in1k,None
hardcorenas_e.miil_green_in1k,None
hardcorenas_f.miil_green_in1k,None
hgnet_base.ssld_in1k,None
hgnetv2_b0.ssld_stage1_in22k_in1k,None
hgnetv2_b1.ssld_stage1_in22k_in1k,None
hgnetv2_b2.ssld_stage1_in22k_in1k,None
hgnetv2_b3.ssld_stage1_in22k_in1k,None
hgnetv2_b4.ssld_stage1_in22k_in1k,None
hgnetv2_b5.ssld_stage1_in22k_in1k,None
hgnetv2_b6.ssld_stage1_in22k_in1k,None
hrnet_w18_small.gluon_in1k,None
hrnet_w18_small_v2.gluon_in1k,None
hrnet_w18_ssld.paddle_in1k,None
Expand Down Expand Up @@ -245,6 +247,7 @@ mvitv2_base.fb_in1k,None
mvitv2_base_cls.fb_inw21k,None
nasnetalarge.tf_in1k,None
nest_base_jx.goog_in1k,None
nextvit_base.bd_in1k,None
nf_regnet_b1.ra2_in1k,None
nf_resnet50.ra2_in1k,None
nfnet_l0.ra2_in1k,None
Expand Down

0 comments on commit 4edb040

Please sign in to comment.