diff --git a/docs/source/testing.rst b/docs/source/testing.rst index ea2cf51b09bf4c..8e25e84dc12db3 100644 --- a/docs/source/testing.rst +++ b/docs/source/testing.rst @@ -400,29 +400,46 @@ or if you have multiple gpus, you can specify which one is to be used by ``pytes CUDA_VISIBLE_DEVICES="1" pytest tests/test_logging.py This is handy when you want to run different tasks on different GPUs. - -And we have these decorators that require the condition described by the marker. -`` -@require_torch -@require_tf -@require_multigpu -@require_non_multigpu -@require_torch_tpu -@require_torch_and_cuda -`` +Some tests must be run on CPU-only, others on either CPU or GPU or TPU, yet others on multiple-GPUs. The following skip decorators are used to set the requirements of tests CPU/GPU/TPU-wise: + +* ``require_torch`` - this test will run only under torch +* ``require_torch_gpu`` - as ``require_torch`` plus requires at least 1 GPU +* ``require_torch_multigpu`` - as ``require_torch`` plus requires at least 2 GPUs +* ``require_torch_non_multigpu`` - as ``require_torch`` plus requires 0 or 1 GPUs +* ``require_torch_tpu`` - as ``require_torch`` plus requires at least 1 TPU + +For example, here is a test that must be run only when there are 2 or more GPUs available and pytorch is installed: + +.. code-block:: python + + @require_torch_multigpu + def test_example_with_multigpu(): + +If a test requires ``tensorflow`` use the ``require_tf`` decorator. For example: + +.. code-block:: python + + @require_tf + def test_tf_thing_with_tensorflow(): + +These decorators can be stacked. For example, if a test is slow and requires at least one GPU under pytorch, here is how to set it up: + +.. code-block:: python + + @require_torch_gpu + @slow + def test_example_slow_on_gpu(): Some decorators like ``@parametrized`` rewrite test names, therefore ``@require_*`` skip decorators have to be listed last for them to work correctly. Here is an example of the correct usage: .. code-block:: python @parameterized.expand(...) - @require_multigpu + @require_torch_multigpu def test_integration_foo(): - -There is no problem whatsoever with ``@pytest.mark.parametrize`` (but it only works with non-unittests) - can use it in any order. -This section will be expanded soon once our work in progress on those decorators is finished. +This order problem doesn't exist with ``@pytest.mark.parametrize``, you can put it first or last and it will still work. But it only works with non-unittests. Inside tests: diff --git a/examples/seq2seq/test_seq2seq_examples.py b/examples/seq2seq/test_seq2seq_examples.py index 51a6e6633c374e..09b34e552a925b 100644 --- a/examples/seq2seq/test_seq2seq_examples.py +++ b/examples/seq2seq/test_seq2seq_examples.py @@ -19,7 +19,7 @@ from run_eval_search import run_search from transformers import AutoConfig, AutoModelForSeq2SeqLM from transformers.hf_api import HfApi -from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_and_cuda, slow +from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow from utils import ROUGE_KEYS, label_smoothed_nll_loss, lmap, load_json @@ -125,9 +125,9 @@ def setUpClass(cls): return cls @slow - @require_torch_and_cuda + @require_torch_gpu def test_hub_configs(self): - """I put require_torch_and_cuda cause I only want this to run with self-scheduled.""" + """I put require_torch_gpu cause I only want this to run with self-scheduled.""" model_list = HfApi().model_list() org = "sshleifer" diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 1d83b73b4f55e9..d108112e8f4b4a 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -154,7 +154,7 @@ def require_tokenizers(test_case): return test_case -def require_multigpu(test_case): +def require_torch_multigpu(test_case): """ Decorator marking a test that requires a multi-GPU setup (in PyTorch). @@ -174,7 +174,7 @@ def require_multigpu(test_case): return test_case -def require_non_multigpu(test_case): +def require_torch_non_multigpu(test_case): """ Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch). """ @@ -208,7 +208,7 @@ def require_torch_tpu(test_case): torch_device = None -def require_torch_and_cuda(test_case): +def require_torch_gpu(test_case): """Decorator marking a test that requires CUDA and PyTorch. """ if torch_device != "cuda": return unittest.skip("test requires CUDA")(test_case) diff --git a/templates/adding_a_new_model/tests/test_modeling_xxx.py b/templates/adding_a_new_model/tests/test_modeling_xxx.py index dc23438b076de7..b2474ce9a0ac53 100644 --- a/templates/adding_a_new_model/tests/test_modeling_xxx.py +++ b/templates/adding_a_new_model/tests/test_modeling_xxx.py @@ -17,7 +17,7 @@ import unittest from transformers import is_torch_available -from transformers.testing_utils import require_torch, require_torch_and_cuda, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, ids_tensor @@ -302,6 +302,6 @@ def test_XXX_backward_pass_reduces_loss(self): """Test loss/gradients same as reference implementation, for example.""" pass - @require_torch_and_cuda + @require_torch_gpu def test_large_inputs_in_fp16_dont_cause_overflow(self): pass diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 49bf7d5d47d324..935ee816cee1c8 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -22,7 +22,7 @@ from typing import List, Tuple from transformers import is_torch_available -from transformers.testing_utils import require_multigpu, require_torch, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_multigpu, slow, torch_device if is_torch_available(): @@ -980,7 +980,7 @@ def _check_match_tokens(self, generated_ids, bad_words_ids): return True return False - @require_multigpu + @require_torch_multigpu def test_multigpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/test_modeling_layoutlm.py b/tests/test_modeling_layoutlm.py index 36d6993d2daa7c..2b616e4df60d67 100644 --- a/tests/test_modeling_layoutlm.py +++ b/tests/test_modeling_layoutlm.py @@ -18,7 +18,7 @@ from transformers import is_torch_available from transformers.file_utils import cached_property -from transformers.testing_utils import require_torch, require_torch_and_cuda, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, ids_tensor @@ -234,6 +234,6 @@ def test_LayoutLM_backward_pass_reduces_loss(self): """Test loss/gradients same as reference implementation, for example.""" pass - @require_torch_and_cuda + @require_torch_gpu def test_large_inputs_in_fp16_dont_cause_overflow(self): pass diff --git a/tests/test_modeling_reformer.py b/tests/test_modeling_reformer.py index e454d06685f713..888d22f30e9f75 100644 --- a/tests/test_modeling_reformer.py +++ b/tests/test_modeling_reformer.py @@ -17,10 +17,10 @@ from transformers import is_torch_available from transformers.testing_utils import ( - require_multigpu, require_sentencepiece, require_tokenizers, require_torch, + require_torch_multigpu, slow, torch_device, ) @@ -558,7 +558,7 @@ def test_reformer_model_fp16_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_reformer_model_fp16_generate(*config_and_inputs) - @require_multigpu + @require_torch_multigpu def test_multigpu_data_parallel_forward(self): # Opt-out of this test. pass diff --git a/tests/test_modeling_transfo_xl.py b/tests/test_modeling_transfo_xl.py index 2c93243f955bbd..5f16a1a4e224c6 100644 --- a/tests/test_modeling_transfo_xl.py +++ b/tests/test_modeling_transfo_xl.py @@ -17,7 +17,7 @@ import unittest from transformers import is_torch_available -from transformers.testing_utils import require_multigpu, require_torch, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_multigpu, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, ids_tensor @@ -204,7 +204,7 @@ def test_transfo_xl_lm_head(self): output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs) self.model_tester.check_transfo_xl_lm_head_output(output_result) - @require_multigpu + @require_torch_multigpu def test_multigpu_data_parallel_forward(self): # Opt-out of this test. pass diff --git a/tests/test_skip_decorators.py b/tests/test_skip_decorators.py index 3aac3e9b3d1169..89ff0e3bafdc2b 100644 --- a/tests/test_skip_decorators.py +++ b/tests/test_skip_decorators.py @@ -34,7 +34,7 @@ import pytest from parameterized import parameterized -from transformers.testing_utils import require_torch, require_torch_and_cuda, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device # skipping in unittest tests @@ -63,11 +63,11 @@ def check_slow_torch_cuda(): @require_torch class SkipTester(unittest.TestCase): @slow - @require_torch_and_cuda + @require_torch_gpu def test_2_skips_slow_first(self): check_slow_torch_cuda() - @require_torch_and_cuda + @require_torch_gpu @slow def test_2_skips_slow_last(self): check_slow_torch_cuda() @@ -97,12 +97,12 @@ def test_param_slow_last(self, param=None): @slow -@require_torch_and_cuda +@require_torch_gpu def test_pytest_2_skips_slow_first(): check_slow_torch_cuda() -@require_torch_and_cuda +@require_torch_gpu @slow def test_pytest_2_skips_slow_last(): check_slow_torch_cuda()