Skip to content

Commit

Permalink
Use pytest.raises where suitable
Browse files Browse the repository at this point in the history
  • Loading branch information
akx committed Feb 14, 2024
1 parent 82b883b commit 699e7b1
Show file tree
Hide file tree
Showing 6 changed files with 30 additions and 39 deletions.
13 changes: 3 additions & 10 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,15 +224,10 @@ def test_regex_with_layer_indexing_lora(self):

valid_config = {"target_modules": ["foo"], "layers_pattern": ["bar"], "layers_to_transform": [0]}

with self.assertRaisesRegex(
ValueError,
expected_regex="`layers_to_transform` cannot be used when `target_modules` is a str.",
):
with pytest.raises(ValueError, match="`layers_to_transform` cannot be used when `target_modules` is a str."):
LoraConfig(**invalid_config1)

with self.assertRaisesRegex(
ValueError, expected_regex="`layers_pattern` cannot be used when `target_modules` is a str."
):
with pytest.raises(ValueError, match="`layers_pattern` cannot be used when `target_modules` is a str."):
LoraConfig(**invalid_config2)

# should run without errors
Expand All @@ -245,9 +240,7 @@ def test_ia3_is_feedforward_subset_invalid_config(self):
# an example invalid config
invalid_config = {"target_modules": ["k", "v"], "feedforward_modules": ["q"]}

with self.assertRaisesRegex(
ValueError, expected_regex="^`feedforward_modules` should be a subset of `target_modules`$"
):
with pytest.raises(ValueError, match="^`feedforward_modules` should be a subset of `target_modules`$"):
IA3Config(**invalid_config)

def test_ia3_is_feedforward_subset_valid_config(self):
Expand Down
4 changes: 2 additions & 2 deletions tests/test_decoder_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import unittest
from unittest.mock import Mock, call, patch

import pytest
import torch
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer
Expand Down Expand Up @@ -120,8 +121,7 @@ def test_prompt_tuning_config_invalid_args(self):
# Raise an error when tokenizer_kwargs is used with prompt_tuning_init!='TEXT', because this argument has no
# function in that case
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
msg = "tokenizer_kwargs only valid when using prompt_tuning_init='TEXT'."
with self.assertRaisesRegex(ValueError, expected_regex=msg):
with pytest.raises(ValueError, match="tokenizer_kwargs only valid when using prompt_tuning_init='TEXT'."):
PromptTuningConfig(
base_model_name_or_path=model_id,
tokenizer_name_or_path=model_id,
Expand Down
3 changes: 1 addition & 2 deletions tests/test_gpu_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -1350,8 +1350,7 @@ def test_model_loaded_in_float16_raises(self):
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
msg = "Attempting to unscale FP16 gradients."
with self.assertRaisesRegex(ValueError, msg):
with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."):
trainer.train()

@pytest.mark.single_gpu_tests
Expand Down
10 changes: 5 additions & 5 deletions tests/test_mixed.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import tempfile
import unittest

import pytest
import torch
from parameterized import parameterized
from torch import nn
Expand Down Expand Up @@ -633,7 +634,7 @@ def test_delete_adapter(self):
assert torch.allclose(output_0, output_deleted_1, atol=atol, rtol=rtol)

msg = re.escape("Adapter(s) ['adapter1'] not found, available adapters: ['adapter0']")
with self.assertRaisesRegex(ValueError, expected_regex=msg):
with pytest.raises(ValueError, match=msg):
peft_model.set_adapter(["adapter0", "adapter1"])

# re-add adapter1
Expand All @@ -657,7 +658,7 @@ def test_delete_adapter(self):
assert not torch.allclose(output_deleted_0, output_01, atol=atol, rtol=rtol)

msg = re.escape("Adapter(s) ['adapter0'] not found, available adapters: ['adapter1']")
with self.assertRaisesRegex(ValueError, expected_regex=msg):
with pytest.raises(ValueError, match=msg):
peft_model.set_adapter(["adapter0", "adapter1"])

peft_model.delete_adapter("adapter1")
Expand All @@ -674,8 +675,7 @@ def test_modules_to_save(self):
# TODO: theoretically, we could allow this if it's the same target layer
config1 = LoHaConfig(target_modules=["lin0"], modules_to_save=["lin1"])
peft_model.add_adapter("adapter1", config1)
msg = "Only one adapter can be set at a time for modules_to_save"
with self.assertRaisesRegex(ValueError, expected_regex=msg):
with pytest.raises(ValueError, match="Only one adapter can be set at a time for modules_to_save"):
peft_model.set_adapter(["adapter0", "adapter1"])

def test_get_nb_trainable_parameters(self):
Expand Down Expand Up @@ -714,7 +714,7 @@ def test_incompatible_config_raises(self):

config1 = PrefixTuningConfig()
msg = "The provided `peft_type` 'PREFIX_TUNING' is not compatible with the `PeftMixedModel`."
with self.assertRaisesRegex(ValueError, expected_regex=msg):
with pytest.raises(ValueError, match=msg):
peft_model.add_adapter("adapter1", config1)

def test_decoder_model(self):
Expand Down
5 changes: 3 additions & 2 deletions tests/test_tuners_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import unittest
from copy import deepcopy

import pytest
from diffusers import StableDiffusionPipeline
from parameterized import parameterized
from torch import nn
Expand Down Expand Up @@ -312,9 +313,9 @@ def test_maybe_include_all_linear_layers_diffusion(self):
model_id = "hf-internal-testing/tiny-stable-diffusion-torch"
model = StableDiffusionPipeline.from_pretrained(model_id)
config = LoraConfig(base_model_name_or_path=model_id, target_modules="all-linear")
with self.assertRaisesRegex(
with pytest.raises(
ValueError,
"Only instances of PreTrainedModel support `target_modules='all-linear'`",
match="Only instances of PreTrainedModel support `target_modules='all-linear'`",
):
model.unet = get_peft_model(model.unet, config)

Expand Down
34 changes: 16 additions & 18 deletions tests/testing_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from collections import OrderedDict
from dataclasses import replace

import pytest
import torch
import yaml
from diffusers import StableDiffusionPipeline
Expand Down Expand Up @@ -489,25 +490,19 @@ def _test_merge_layers_nan(self, model_id, config_cls, config_kwargs):
if "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name:
module.data[0] = torch.nan

with self.assertRaises(ValueError) as error_context:
with pytest.raises(ValueError) as ei:
model = model.merge_and_unload(safe_merge=True)

assert (
str(error_context.exception)
== "NaNs detected in the merged weights. The adapter default seems to be broken"
)
assert str(ei.value) == "NaNs detected in the merged weights. The adapter default seems to be broken"

for name, module in model.named_parameters():
if "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name:
module.data[0] = torch.inf

with self.assertRaises(ValueError) as error_context:
with pytest.raises(ValueError) as ei:
model = model.merge_and_unload(safe_merge=True)

assert (
str(error_context.exception)
== "NaNs detected in the merged weights. The adapter default seems to be broken"
)
assert str(ei.value) == "NaNs detected in the merged weights. The adapter default seems to be broken"

def _test_merge_layers(self, model_id, config_cls, config_kwargs):
if issubclass(config_cls, PromptLearningConfig):
Expand Down Expand Up @@ -646,7 +641,10 @@ def _test_merge_layers_is_idempotent(self, model_id, config_cls, config_kwargs):

# merging again should not change anything
# also check warning:
with self.assertWarnsRegex(UserWarning, "All adapters are already merged, nothing to do"):
with pytest.raises(
UserWarning,
match="All adapters are already merged, nothing to do",
):
model.merge_adapter()
logits_1 = model(**self.prepare_inputs_for_testing())[0]

Expand Down Expand Up @@ -677,7 +675,7 @@ def _test_generate_pos_args(self, model_id, config_cls, config_kwargs, raises_er

inputs = self.prepare_inputs_for_testing()
if raises_err:
with self.assertRaises(TypeError):
with pytest.raises(TypeError):
# check if `generate` raises an error if positional arguments are passed
_ = model.generate(inputs["input_ids"])
else:
Expand Down Expand Up @@ -1002,7 +1000,7 @@ def _test_unload_adapter(self, model_id, config_cls, config_kwargs):
model = model.to(self.torch_device)

if config.peft_type not in ("LORA", "ADALORA", "IA3"):
with self.assertRaises(AttributeError):
with pytest.raises(AttributeError):
model = model.unload()
else:
dummy_input = self.prepare_inputs_for_testing()
Expand Down Expand Up @@ -1112,15 +1110,15 @@ def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kw
combination_type="linear",
)

with self.assertRaises(ValueError):
with pytest.raises(ValueError):
model.add_weighted_adapter(
adapter_list[1:],
weight_list[1:],
"multi_adapter_linear_reweighting_uneven_r",
combination_type="linear",
)

with self.assertRaises(ValueError):
with pytest.raises(ValueError):
model.add_weighted_adapter(
adapter_list[1:],
weight_list[1:],
Expand All @@ -1129,7 +1127,7 @@ def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kw
density=0.5,
)

with self.assertRaises(ValueError):
with pytest.raises(ValueError):
model.add_weighted_adapter(
adapter_list[1:],
weight_list[1:],
Expand All @@ -1138,7 +1136,7 @@ def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kw
density=0.5,
)

with self.assertRaises(ValueError):
with pytest.raises(ValueError):
model.add_weighted_adapter(
adapter_list[1:],
weight_list[1:],
Expand Down Expand Up @@ -1272,7 +1270,7 @@ def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls,

model = self.transformers_class.from_pretrained(model_id)
model = get_peft_model(model, config, "adapter0")
with self.assertRaises(ValueError):
with pytest.raises(ValueError):
model.add_adapter("adapter1", replace(config, r=20))

# (superficial) test that the model is not left in a half-initialized state when adding an adapter fails
Expand Down

0 comments on commit 699e7b1

Please sign in to comment.