diff --git a/.github/workflows/ci_test-full.yml b/.github/workflows/ci_test-full.yml index 19c085ddcc..d4c68478f2 100644 --- a/.github/workflows/ci_test-full.yml +++ b/.github/workflows/ci_test-full.yml @@ -74,12 +74,11 @@ jobs: pip list shell: bash - #- name: Cache datasets - # uses: actions/cache@v2 - # with: - # path: Datasets # This path is specific to Ubuntu - # # Look to see if there is a cache hit for the corresponding requirements file - # key: pl-datasets + - name: Cache datasets + uses: actions/cache@v2 + with: + path: ./datasets + key: pl-datasets-${{ hashFiles('tests/conftest.py') }} - name: Tests run: | diff --git a/pl_bolts/models/mnist_module.py b/pl_bolts/models/mnist_module.py index d9521e1db8..dbe4fbc91d 100644 --- a/pl_bolts/models/mnist_module.py +++ b/pl_bolts/models/mnist_module.py @@ -1,4 +1,3 @@ -import os from argparse import ArgumentParser from warnings import warn @@ -70,7 +69,7 @@ def val_dataloader(self): return loader def test_dataloader(self): - test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()) + test_dataset = MNIST(self.hparams.data_dir, train=False, download=True, transform=transforms.ToTensor()) loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers) return loader diff --git a/tests/__init__.py b/tests/__init__.py index 36a5cee9f1..da9450441b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -2,8 +2,9 @@ from pytorch_lightning import seed_everything -TEST_ROOT = os.path.dirname(__file__) +TEST_ROOT = os.path.realpath(os.path.dirname(__file__)) PACKAGE_ROOT = os.path.dirname(TEST_ROOT) +DATASETS_PATH = os.path.join(PACKAGE_ROOT, 'datasets') # generate a list of random seeds for each test ROOT_SEED = 1234 diff --git a/tests/callbacks/test_info_callbacks.py b/tests/callbacks/test_info_callbacks.py index fd58184da1..9a5eb451b3 100644 --- a/tests/callbacks/test_info_callbacks.py +++ b/tests/callbacks/test_info_callbacks.py @@ -1,7 +1,7 @@ from pl_bolts.callbacks import PrintTableMetricsCallback -def test_printtable_metrics_callback(tmpdir): +def test_printtable_metrics_callback(): callback = PrintTableMetricsCallback() metrics_a = {'loss': 1.0, 'epoch': 0} diff --git a/tests/callbacks/test_param_update_callbacks.py b/tests/callbacks/test_param_update_callbacks.py index 41eb4f84b5..ebc0dbd7f3 100644 --- a/tests/callbacks/test_param_update_callbacks.py +++ b/tests/callbacks/test_param_update_callbacks.py @@ -6,7 +6,7 @@ from pl_bolts.callbacks.byol_updates import BYOLMAWeightUpdate -def test_byol_ma_weight_update_callback(tmpdir): +def test_byol_ma_weight_update_callback(): a = nn.Linear(100, 10) b = deepcopy(a) a_original = deepcopy(a) diff --git a/tests/callbacks/test_variational_callbacks.py b/tests/callbacks/test_variational_callbacks.py index ba49540554..bbe3833ab3 100644 --- a/tests/callbacks/test_variational_callbacks.py +++ b/tests/callbacks/test_variational_callbacks.py @@ -4,7 +4,7 @@ from pl_bolts.models.gans import GAN -def test_latent_dim_interpolator(tmpdir): +def test_latent_dim_interpolator(): class FakeTrainer(object): def __init__(self): diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000..5fbf96133c --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,15 @@ +from pathlib import Path + +import pytest + + +# GitHub Actions use this path to cache datasets. +# Use `datadir` fixture where possible and use `DATASETS_PATH` in +# `pytest.mark.parametrize()` where you cannot use `datadir`. +# https://github.com/pytest-dev/pytest/issues/349 +from tests import DATASETS_PATH + + +@pytest.fixture(scope="session") +def datadir(): + return Path(DATASETS_PATH) diff --git a/tests/datamodules/test_dataloader.py b/tests/datamodules/test_dataloader.py index 9627a25da2..ae8041db09 100644 --- a/tests/datamodules/test_dataloader.py +++ b/tests/datamodules/test_dataloader.py @@ -5,8 +5,8 @@ from pl_bolts.datasets.cifar10_dataset import CIFAR10 -def test_async_dataloader(tmpdir): - ds = CIFAR10(tmpdir) +def test_async_dataloader(datadir): + ds = CIFAR10(data_dir=datadir) if torch.cuda.device_count() > 0: # Can only run this test with a GPU device = torch.device('cuda', 0) diff --git a/tests/datamodules/test_datamodules.py b/tests/datamodules/test_datamodules.py index 49554788e9..0764cc0adf 100644 --- a/tests/datamodules/test_datamodules.py +++ b/tests/datamodules/test_datamodules.py @@ -6,8 +6,9 @@ from pl_bolts.datamodules import CityscapesDataModule -def test_dev_datasets(tmpdir): - ds = CIFAR10(tmpdir) +def test_dev_datasets(datadir): + + ds = CIFAR10(data_dir=datadir) for b in ds: pass @@ -35,14 +36,14 @@ def _create_synth_Cityscapes_dataset(path_dir): fine_labels_dir / split / city / semantic_target_name) -def test_cityscapes_datamodule(tmpdir): +def test_cityscapes_datamodule(datadir): - _create_synth_Cityscapes_dataset(tmpdir) + _create_synth_Cityscapes_dataset(datadir) batch_size = 1 target_types = ['semantic', 'instance'] for target_type in target_types: - dm = CityscapesDataModule(tmpdir, + dm = CityscapesDataModule(datadir, num_workers=0, batch_size=batch_size, target_type=target_type) diff --git a/tests/datamodules/test_sklearn_dataloaders.py b/tests/datamodules/test_sklearn_dataloaders.py index 5fa8939909..7e35e20c66 100644 --- a/tests/datamodules/test_sklearn_dataloaders.py +++ b/tests/datamodules/test_sklearn_dataloaders.py @@ -12,7 +12,7 @@ ' install it with `pip install sklearn`.') -def test_dataloader(tmpdir): +def test_dataloader(): seed_everything() X = np.random.rand(5, 2) diff --git a/tests/datasets/test_datasets.py b/tests/datasets/test_datasets.py index 1e9ae6d79f..373a52dbce 100644 --- a/tests/datasets/test_datasets.py +++ b/tests/datasets/test_datasets.py @@ -3,7 +3,7 @@ from pl_bolts.datasets import DummyDataset, RandomDataset, RandomDictDataset, RandomDictStringDataset -def test_dummy_ds(tmpdir): +def test_dummy_ds(): ds = DummyDataset((1, 2), num_samples=100) dl = DataLoader(ds) @@ -11,7 +11,7 @@ def test_dummy_ds(tmpdir): pass -def test_rand_ds(tmpdir): +def test_rand_ds(): ds = RandomDataset(32, num_samples=100) dl = DataLoader(ds) @@ -19,7 +19,7 @@ def test_rand_ds(tmpdir): pass -def test_rand_dict_ds(tmpdir): +def test_rand_dict_ds(): ds = RandomDictDataset(32, num_samples=100) dl = DataLoader(ds) @@ -27,7 +27,7 @@ def test_rand_dict_ds(tmpdir): pass -def test_rand_str_dict_ds(tmpdir): +def test_rand_str_dict_ds(): ds = RandomDictStringDataset(32, num_samples=100) dl = DataLoader(ds) diff --git a/tests/models/self_supervised/test_models.py b/tests/models/self_supervised/test_models.py index 0521594ef2..54e51c1c25 100644 --- a/tests/models/self_supervised/test_models.py +++ b/tests/models/self_supervised/test_models.py @@ -15,14 +15,14 @@ # TODO: this test is hanging (runs for more then 10min) so we need to use GPU or optimize it... @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") -def test_cpcv2(tmpdir): +def test_cpcv2(tmpdir, datadir): seed_everything() - datamodule = CIFAR10DataModule(data_dir=tmpdir, num_workers=0, batch_size=2) + datamodule = CIFAR10DataModule(data_dir=datadir, num_workers=0, batch_size=2) datamodule.train_transforms = CPCTrainTransformsCIFAR10() datamodule.val_transforms = CPCEvalTransformsCIFAR10() - model = CPCV2(encoder='resnet18', data_dir=tmpdir, batch_size=2, online_ft=True, datamodule=datamodule) + model = CPCV2(encoder='resnet18', data_dir=datadir, batch_size=2, online_ft=True, datamodule=datamodule) trainer = pl.Trainer(fast_dev_run=True, max_epochs=1, default_root_dir=tmpdir) trainer.fit(model) loss = trainer.progress_bar_dict['val_nce'] @@ -32,14 +32,14 @@ def test_cpcv2(tmpdir): # TODO: this test is hanging (runs for more then 10min) so we need to use GPU or optimize it... @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") -def test_byol(tmpdir): +def test_byol(tmpdir, datadir): seed_everything() - datamodule = CIFAR10DataModule(data_dir=tmpdir, num_workers=0, batch_size=2) + datamodule = CIFAR10DataModule(data_dir=datadir, num_workers=0, batch_size=2) datamodule.train_transforms = CPCTrainTransformsCIFAR10() datamodule.val_transforms = CPCEvalTransformsCIFAR10() - model = BYOL(data_dir=tmpdir, num_classes=datamodule) + model = BYOL(data_dir=datadir, num_classes=datamodule) trainer = pl.Trainer(fast_dev_run=True, max_epochs=1, default_root_dir=tmpdir, max_steps=2) trainer.fit(model, datamodule) loss = trainer.progress_bar_dict['loss'] @@ -47,10 +47,10 @@ def test_byol(tmpdir): assert float(loss) < 1.0 -def test_amdim(tmpdir): +def test_amdim(tmpdir, datadir): seed_everything() - model = AMDIM(data_dir=tmpdir, batch_size=2, online_ft=True, encoder='resnet18') + model = AMDIM(data_dir=datadir, batch_size=2, online_ft=True, encoder='resnet18') trainer = pl.Trainer(fast_dev_run=True, max_epochs=1, default_root_dir=tmpdir) trainer.fit(model) loss = trainer.progress_bar_dict['loss'] @@ -58,14 +58,14 @@ def test_amdim(tmpdir): assert float(loss) > 0 -def test_moco(tmpdir): +def test_moco(tmpdir, datadir): seed_everything() - datamodule = CIFAR10DataModule(tmpdir, num_workers=0, batch_size=2) + datamodule = CIFAR10DataModule(data_dir=datadir, num_workers=0, batch_size=2) datamodule.train_transforms = Moco2TrainCIFAR10Transforms() datamodule.val_transforms = Moco2EvalCIFAR10Transforms() - model = MocoV2(data_dir=tmpdir, batch_size=2, online_ft=True) + model = MocoV2(data_dir=datadir, batch_size=2, online_ft=True) trainer = pl.Trainer(fast_dev_run=True, max_epochs=1, default_root_dir=tmpdir, callbacks=[MocoLRScheduler()]) trainer.fit(model, datamodule=datamodule) loss = trainer.progress_bar_dict['loss'] @@ -73,10 +73,10 @@ def test_moco(tmpdir): assert float(loss) > 0 -def test_simclr(tmpdir): +def test_simclr(tmpdir, datadir): seed_everything() - datamodule = CIFAR10DataModule(tmpdir, num_workers=0, batch_size=2) + datamodule = CIFAR10DataModule(data_dir=datadir, num_workers=0, batch_size=2) datamodule.train_transforms = SimCLRTrainDataTransform(32) datamodule.val_transforms = SimCLREvalDataTransform(32) @@ -88,14 +88,14 @@ def test_simclr(tmpdir): assert float(loss) > 0 -def test_swav(tmpdir): +def test_swav(tmpdir, datadir): seed_everything() batch_size = 2 # inputs, y = batch (doesn't receive y for some reason) datamodule = CIFAR10DataModule( - data_dir=tmpdir, + data_dir=datadir, batch_size=batch_size, num_workers=0 ) diff --git a/tests/models/self_supervised/test_resnets.py b/tests/models/self_supervised/test_resnets.py index fb77905ed9..4d14b69bb7 100644 --- a/tests/models/self_supervised/test_resnets.py +++ b/tests/models/self_supervised/test_resnets.py @@ -16,7 +16,7 @@ ) -def test_cpc_resnet(tmpdir): +def test_cpc_resnet(): x = torch.rand(3, 3, 64, 64) model = cpc_resnet50(x) model(x) @@ -33,7 +33,7 @@ def test_cpc_resnet(tmpdir): wide_resnet50_2, wide_resnet101_2 ]) -def test_torchvision_resnets(tmpdir, model_class): +def test_torchvision_resnets(model_class): x = torch.rand(3, 3, 64, 64) model = model_class() model(x) @@ -44,7 +44,7 @@ def test_torchvision_resnets(tmpdir, model_class): 64, 128 ]) -def test_amdim_encoder(tmpdir, size): +def test_amdim_encoder(size): dummy_batch = torch.zeros((2, 3, size, size)) model = AMDIMEncoder(dummy_batch, encoder_size=size) model.init_weights() diff --git a/tests/models/self_supervised/test_scripts.py b/tests/models/self_supervised/test_scripts.py index d53e10e923..f77207d5fd 100644 --- a/tests/models/self_supervised/test_scripts.py +++ b/tests/models/self_supervised/test_scripts.py @@ -3,8 +3,12 @@ import pytest import torch +from tests import DATASETS_PATH -@pytest.mark.parametrize('cli_args', ["--max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2"]) + +@pytest.mark.parametrize('cli_args', [ + f"--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2" +]) def test_cli_run_self_supervised_amdim(cli_args): """Test running CLI for an example with default params.""" from pl_bolts.models.self_supervised.amdim.amdim_module import cli_main @@ -16,7 +20,9 @@ def test_cli_run_self_supervised_amdim(cli_args): # TODO: this test is hanging (runs for more then 10min) so we need to use GPU or optimize it... @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") -@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --encoder resnet18']) +@pytest.mark.parametrize('cli_args', [ + f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --encoder resnet18' +]) def test_cli_run_self_supervised_cpc(cli_args): """Test running CLI for an example with default params.""" from pl_bolts.models.self_supervised.cpc.cpc_module import cli_main @@ -26,7 +32,9 @@ def test_cli_run_self_supervised_cpc(cli_args): cli_main() -@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2']) +@pytest.mark.parametrize('cli_args', [ + f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2' +]) def test_cli_run_self_supervised_moco(cli_args): """Test running CLI for an example with default params.""" from pl_bolts.models.self_supervised.moco.moco2_module import cli_main @@ -36,7 +44,9 @@ def test_cli_run_self_supervised_moco(cli_args): cli_main() -@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft']) +@pytest.mark.parametrize('cli_args', [ + f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft' +]) def test_cli_run_self_supervised_simclr(cli_args): """Test running CLI for an example with default params.""" from pl_bolts.models.self_supervised.simclr.simclr_module import cli_main @@ -46,7 +56,9 @@ def test_cli_run_self_supervised_simclr(cli_args): cli_main() -@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft']) +@pytest.mark.parametrize('cli_args', [ + f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft' +]) def test_cli_run_self_supervised_byol(cli_args): """Test running CLI for an example with default params.""" from pl_bolts.models.self_supervised.byol.byol_module import cli_main @@ -58,8 +70,8 @@ def test_cli_run_self_supervised_byol(cli_args): @pytest.mark.parametrize( 'cli_args', [ - '--max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --gpus 0 --arch resnet18' - ' --hidden_mlp 512 --fp32 --sinkhorn_iterations 1 --nmb_prototypes 2 --dataset cifar10' + f'--dataset cifar10 --data_path {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2' + ' --gpus 0 --arch resnet18 --hidden_mlp 512 --fp32 --sinkhorn_iterations 1 --nmb_prototypes 2' ] ) def test_cli_run_self_supervised_swav(cli_args): diff --git a/tests/models/test_autoencoders.py b/tests/models/test_autoencoders.py index b27ab518f3..29b74d52b4 100644 --- a/tests/models/test_autoencoders.py +++ b/tests/models/test_autoencoders.py @@ -10,10 +10,10 @@ @pytest.mark.parametrize("dm_cls", [pytest.param(CIFAR10DataModule, id="cifar10")]) -def test_vae(tmpdir, dm_cls): +def test_vae(tmpdir, datadir, dm_cls): seed_everything() - dm = dm_cls(batch_size=4) + dm = dm_cls(data_dir=datadir, batch_size=4) model = VAE(input_height=dm.size()[-1]) trainer = pl.Trainer( fast_dev_run=True, @@ -27,10 +27,10 @@ def test_vae(tmpdir, dm_cls): @pytest.mark.parametrize("dm_cls", [pytest.param(CIFAR10DataModule, id="cifar10")]) -def test_ae(tmpdir, dm_cls): +def test_ae(tmpdir, datadir, dm_cls): seed_everything() - dm = dm_cls(batch_size=4) + dm = dm_cls(data_dir=datadir, batch_size=4) model = AE(input_height=dm.size()[-1]) trainer = pl.Trainer( fast_dev_run=True, @@ -43,7 +43,7 @@ def test_ae(tmpdir, dm_cls): assert result == 1 -def test_encoder(tmpdir): +def test_encoder(): img = torch.rand(16, 3, 224, 224) encoder1 = resnet18_encoder(first_conv=False, maxpool1=True) @@ -56,7 +56,7 @@ def test_encoder(tmpdir): assert out2.shape == (16, 2048) -def test_decoder(tmpdir): +def test_decoder(): latent_dim = 128 input_height = 288 # random but has to be a multiple of 32 for first_conv=True, maxpool1=True @@ -78,8 +78,8 @@ def test_decoder(tmpdir): assert out4.shape == (2, 3, 288, 288) -def test_from_pretrained(tmpdir): - dm = CIFAR10DataModule('.', batch_size=4) +def test_from_pretrained(datadir): + dm = CIFAR10DataModule(data_dir=datadir, batch_size=4) dm.setup() dm.prepare_data() diff --git a/tests/models/test_classic_ml.py b/tests/models/test_classic_ml.py index 4651063b0d..565773a95e 100644 --- a/tests/models/test_classic_ml.py +++ b/tests/models/test_classic_ml.py @@ -33,11 +33,11 @@ def test_linear_regression_model(tmpdir): trainer.test(model, loader) -def test_logistic_regression_model(tmpdir): +def test_logistic_regression_model(tmpdir, datadir): pl.seed_everything(0) # create dataset - dm = MNISTDataModule(num_workers=0, data_dir=tmpdir) + dm = MNISTDataModule(num_workers=0, data_dir=datadir) model = LogisticRegression(input_dim=28 * 28, num_classes=10, learning_rate=0.001) model.prepare_data = dm.prepare_data diff --git a/tests/models/test_detection.py b/tests/models/test_detection.py index 730f9b5f91..c45c08fc60 100644 --- a/tests/models/test_detection.py +++ b/tests/models/test_detection.py @@ -10,7 +10,7 @@ def _collate_fn(batch): return tuple(zip(*batch)) -def test_fasterrcnn(tmpdir): +def test_fasterrcnn(): model = FasterRCNN() image = torch.rand(1, 3, 400, 400) diff --git a/tests/models/test_executable_scripts.py b/tests/models/test_executable_scripts.py index 2f0953f9df..56f396cbea 100644 --- a/tests/models/test_executable_scripts.py +++ b/tests/models/test_executable_scripts.py @@ -3,10 +3,14 @@ import pytest import torch +from tests import DATASETS_PATH + @pytest.mark.parametrize('cli_args', [ - '--dataset mnist --max_epochs 1 --batch_size 2 --limit_train_batches 2 --limit_val_batches 2', - '--dataset cifar10 --max_epochs 1 --batch_size 2 --limit_train_batches 2 --limit_val_batches 2', + f'--dataset mnist --data_dir {DATASETS_PATH} --max_epochs 1' + ' --batch_size 2 --limit_train_batches 2 --limit_val_batches 2', + f'--dataset cifar10 --data_dir {DATASETS_PATH} --max_epochs 1' + ' --batch_size 2 --limit_train_batches 2 --limit_val_batches 2', ]) def test_cli_run_basic_gan(cli_args): from pl_bolts.models.gans.basic.basic_gan_module import cli_main @@ -18,7 +22,8 @@ def test_cli_run_basic_gan(cli_args): # TODO: this test is hanging (runs for more then 10min) so we need to use GPU or optimize it... @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") @pytest.mark.parametrize('cli_args', [ - '--max_epochs 1 --limit_train_batches 2 --limit_val_batches 2 --batch_size 2 --encoder resnet18', + f'--data_dir {DATASETS_PATH} --max_epochs 1 --limit_train_batches 2' + ' --limit_val_batches 2 --batch_size 2 --encoder resnet18', ]) def test_cli_run_cpc(cli_args): from pl_bolts.models.self_supervised.cpc.cpc_module import cli_main @@ -28,7 +33,7 @@ def test_cli_run_cpc(cli_args): cli_main() -@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 2']) +@pytest.mark.parametrize('cli_args', [f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 2']) def test_cli_run_mnist(cli_args): """Test running CLI for an example with default params.""" from pl_bolts.models.mnist_module import cli_main @@ -39,7 +44,7 @@ def test_cli_run_mnist(cli_args): @pytest.mark.parametrize('cli_args', [ - '--dataset cifar10 --max_epochs 1 --batch_size 2 --fast_dev_run', + f'--dataset cifar10 --data_dir {DATASETS_PATH} --max_epochs 1 --batch_size 2 --fast_dev_run', ]) def test_cli_run_basic_ae(cli_args): """Test running CLI for an example with default params.""" @@ -51,7 +56,7 @@ def test_cli_run_basic_ae(cli_args): @pytest.mark.parametrize('cli_args', [ - '--dataset cifar10 --max_epochs 1 --batch_size 2 --fast_dev_run', + f'--dataset cifar10 --data_dir {DATASETS_PATH} --max_epochs 1 --batch_size 2 --fast_dev_run', ]) def test_cli_run_basic_vae(cli_args): """Test running CLI for an example with default params.""" @@ -82,7 +87,7 @@ def test_cli_run_log_regression(cli_args): cli_main() -@pytest.mark.parametrize('cli_args', ['--max_epochs 1 --max_steps 2']) +@pytest.mark.parametrize('cli_args', [f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 2']) def test_cli_run_vision_image_gpt(cli_args): """Test running CLI for an example with default params.""" from pl_bolts.models.vision.image_gpt.igpt_module import cli_main diff --git a/tests/models/test_gans.py b/tests/models/test_gans.py index eaf149a54c..5f148f6bc4 100644 --- a/tests/models/test_gans.py +++ b/tests/models/test_gans.py @@ -9,10 +9,10 @@ @pytest.mark.parametrize( "dm_cls", [pytest.param(MNISTDataModule, id="mnist"), pytest.param(CIFAR10DataModule, id="cifar10")] ) -def test_gan(tmpdir, dm_cls): +def test_gan(tmpdir, datadir, dm_cls): seed_everything() - dm = dm_cls() + dm = dm_cls(data_dir=datadir) model = GAN(*dm.size()) trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir) trainer.fit(model, dm) diff --git a/tests/models/test_mnist_templates.py b/tests/models/test_mnist_templates.py index 0c8867eb03..4ff3ea8fbb 100644 --- a/tests/models/test_mnist_templates.py +++ b/tests/models/test_mnist_templates.py @@ -4,10 +4,10 @@ from pl_bolts.models import LitMNIST -def test_mnist(tmpdir): +def test_mnist(tmpdir, datadir): seed_everything() - model = LitMNIST(data_dir=tmpdir, num_workers=0) + model = LitMNIST(data_dir=datadir, num_workers=0) trainer = pl.Trainer(limit_train_batches=0.01, limit_val_batches=0.01, max_epochs=1, limit_test_batches=0.01, default_root_dir=tmpdir) trainer.fit(model) diff --git a/tests/models/test_vision.py b/tests/models/test_vision.py index d32c09b9cc..055dafb5c2 100644 --- a/tests/models/test_vision.py +++ b/tests/models/test_vision.py @@ -7,9 +7,9 @@ from torch.utils.data import DataLoader -def test_igpt(tmpdir): +def test_igpt(datadir): pl.seed_everything(0) - dm = MNISTDataModule(tmpdir, normalize=False) + dm = MNISTDataModule(data_dir=datadir, normalize=False) model = ImageGPT() trainer = pl.Trainer( @@ -22,7 +22,7 @@ def test_igpt(tmpdir): trainer.test(datamodule=dm) assert trainer.callback_metrics["test_loss"] < 1.7 - dm = FashionMNISTDataModule(tmpdir, num_workers=1) + dm = FashionMNISTDataModule(data_dir=datadir, num_workers=1) model = ImageGPT(classify=True) trainer = pl.Trainer( limit_train_batches=2, @@ -33,8 +33,7 @@ def test_igpt(tmpdir): trainer.fit(model, datamodule=dm) -def test_gpt2(tmpdir): - +def test_gpt2(): seq_len = 17 batch_size = 32 vocab_size = 16 @@ -51,14 +50,14 @@ def test_gpt2(tmpdir): model(x) -def test_unet(tmpdir): +def test_unet(): x = torch.rand(10, 3, 28, 28) model = UNet(num_classes=2) y = model(x) assert y.shape == torch.Size([10, 2, 28, 28]) -def test_semantic_segmentation(tmpdir): +def test_semantic_segmentation(): class DummyDataModule(pl.LightningDataModule): def train_dataloader(self): diff --git a/tests/optimizers/test_lr_scheduler.py b/tests/optimizers/test_lr_scheduler.py index 3540500a30..59646a8238 100644 --- a/tests/optimizers/test_lr_scheduler.py +++ b/tests/optimizers/test_lr_scheduler.py @@ -85,7 +85,7 @@ def _test_against_closed_form(self, scheduler, closed_form_scheduler, epochs=10) scheduler.step() -def test_lwca_lr(tmpdir): +def test_lwca_lr(): seed_everything() warmup_start_lr = 0.0 @@ -136,7 +136,7 @@ def test_lwca_lr(tmpdir): test_lr_scheduler._test_lr(scheduler, targets, epochs=max_epochs) -def test_lwca_lr_with_nz_start_lr(tmpdir): +def test_lwca_lr_with_nz_start_lr(): seed_everything() warmup_start_lr = 0.2 @@ -187,7 +187,7 @@ def test_lwca_lr_with_nz_start_lr(tmpdir): test_lr_scheduler._test_lr(scheduler, targets, epochs=max_epochs) -def test_lwca_lr_with_nz_eta_min(tmpdir): +def test_lwca_lr_with_nz_eta_min(): seed_everything() warmup_start_lr = 0.0 @@ -238,7 +238,7 @@ def test_lwca_lr_with_nz_eta_min(tmpdir): test_lr_scheduler._test_lr(scheduler, targets, epochs=max_epochs) -def test_lwca_lr_with_nz_start_lr_nz_eta_min(tmpdir): +def test_lwca_lr_with_nz_start_lr_nz_eta_min(): seed_everything() warmup_start_lr = 0.009 @@ -289,7 +289,7 @@ def test_lwca_lr_with_nz_start_lr_nz_eta_min(tmpdir): test_lr_scheduler._test_lr(scheduler, targets, epochs=max_epochs) -def test_closed_form_lwca_lr(tmpdir): +def test_closed_form_lwca_lr(): seed_everything() warmup_start_lr = 0.0 @@ -321,7 +321,7 @@ def test_closed_form_lwca_lr(tmpdir): ) -def test_closed_form_lwca_lr_with_nz_start_lr(tmpdir): +def test_closed_form_lwca_lr_with_nz_start_lr(): seed_everything() warmup_start_lr = 0.2 @@ -353,7 +353,7 @@ def test_closed_form_lwca_lr_with_nz_start_lr(tmpdir): ) -def test_closed_form_lwca_lr_with_nz_eta_min(tmpdir): +def test_closed_form_lwca_lr_with_nz_eta_min(): seed_everything() warmup_start_lr = 0.0 @@ -385,7 +385,7 @@ def test_closed_form_lwca_lr_with_nz_eta_min(tmpdir): ) -def test_closed_form_lwca_lr_with_nz_start_lr_nz_eta_min(tmpdir): +def test_closed_form_lwca_lr_with_nz_start_lr_nz_eta_min(): seed_everything() warmup_start_lr = 0.009 diff --git a/tests/transforms/test_transforms.py b/tests/transforms/test_transforms.py index 88190cfe34..5aad6b8fc9 100644 --- a/tests/transforms/test_transforms.py +++ b/tests/transforms/test_transforms.py @@ -45,7 +45,7 @@ (3, 96, 96), (3, 160, 160), ]) -def test_simclr_transforms(tmpdir, img_size): +def test_simclr_transforms(img_size): pl.seed_everything(0) (c, h, w) = img_size @@ -67,7 +67,7 @@ def test_simclr_transforms(tmpdir, img_size): Moco2TrainCIFAR10Transforms, Moco2EvalCIFAR10Transforms, ]) -def test_cifar10_transforms(tmpdir, transform): +def test_cifar10_transforms(transform): x = torch.rand(3, 32, 32) x = transforms.ToPILImage(mode='RGB')(x) @@ -83,7 +83,7 @@ def test_cifar10_transforms(tmpdir, transform): Moco2TrainSTL10Transforms, Moco2EvalSTL10Transforms, ]) -def test_stl10_transforms(tmpdir, transform): +def test_stl10_transforms(transform): x = torch.rand(3, 64, 64) x = transforms.ToPILImage(mode='RGB')(x) @@ -99,7 +99,7 @@ def test_stl10_transforms(tmpdir, transform): Moco2TrainImagenetTransforms, Moco2EvalImagenetTransforms ]) -def test_imagenet_transforms(tmpdir, transform): +def test_imagenet_transforms(transform): x = torch.rand(3, 128, 128) x = transforms.ToPILImage(mode='RGB')(x) diff --git a/tests/utils/test_semi_supervised.py b/tests/utils/test_semi_supervised.py index 8337fd6409..2226f93c9e 100644 --- a/tests/utils/test_semi_supervised.py +++ b/tests/utils/test_semi_supervised.py @@ -6,7 +6,7 @@ from pl_bolts.utils.semi_supervised import balance_classes, generate_half_labeled_batches -def test_balance_classes(tmpdir): +def test_balance_classes(): X = torch.rand(100, 3, 32, 32) c1 = torch.zeros(20, 1) c2 = torch.zeros(20, 1) + 1 @@ -16,7 +16,7 @@ def test_balance_classes(tmpdir): (balanced_X, balanced_Y) = balance_classes(X, y, batch_size=10) -def test_generate_half_labeled_batches(tmpdir): +def test_generate_half_labeled_batches(): smaller_set_X = np.random.rand(100, 3, 32, 32) smaller_set_Y = np.random.randint(0, 3, (100, 1)) larger_set_X = np.random.rand(100, 3, 32, 32)