Skip to content

Commit

Permalink
CI errors be gone (#601)
Browse files Browse the repository at this point in the history
  • Loading branch information
AkshitaB authored Oct 2, 2023
1 parent 01077eb commit b955ef7
Show file tree
Hide file tree
Showing 8 changed files with 22 additions and 81 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ jobs:
if: steps.virtualenv-cache.outputs.cache-hit != 'true' && (contains(matrix.task.extras, 'flax') || contains(matrix.task.extras, 'all'))
run: |
. .venv/bin/activate
pip install flax==0.5.0 jax==0.3.13 jaxlib==0.3.10 tensorflow-cpu==2.9.1 optax==0.1.3
pip install flax==0.6.1 jax==0.4.1 jaxlib==0.4.1 tensorflow-cpu==2.9.1 optax==0.1.3
- name: Install editable (no cache hit)
if: steps.virtualenv-cache.outputs.cache-hit != 'true'
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- Removed unnecessary code coverage dev requirements.
- Fixed issue where new version of torch caused no LR schedulers to be registered.
- Updated pinned versions of jax, jaxlib, and flax.

## [v1.2.1](https://github.com/allenai/tango/releases/tag/v1.2.1) - 2023-04-06

Expand Down
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,9 @@ fairscale = [
]
flax = [
"datasets>=1.12,<3.0",
"jax>=0.3.13",
"flax>=0.5.0",
"jax>=0.4.1,<=0.4.13",
"jaxlib>=0.4.1,<=0.4.13",
"flax>=0.6.1,<=0.7.0",
"optax>=0.1.2",
"tensorflow-cpu>=2.9.1"
]
Expand Down
6 changes: 3 additions & 3 deletions tango/common/file_lock.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ def acquire( # type: ignore[override]
if err.errno not in (1, 13, 30):
raise

if os.path.isfile(self._lock_file) and self._read_only_ok:
if os.path.isfile(self._lock_file) and self._read_only_ok: # type: ignore
warnings.warn(
f"Lacking permissions required to obtain lock '{self._lock_file}'. "
f"Lacking permissions required to obtain lock '{self._lock_file}'. " # type: ignore
"Race conditions are possible if other processes are writing to the same resource.",
UserWarning,
)
Expand All @@ -62,7 +62,7 @@ def acquire_with_updates(self, desc: Optional[str] = None) -> AcquireReturnProxy
from .tqdm import Tqdm

if desc is None:
desc = f"acquiring lock at {self._lock_file}"
desc = f"acquiring lock at {self._lock_file}" # type: ignore

progress = Tqdm.tqdm(desc=desc, bar_format="{desc} [{elapsed}]")
while True:
Expand Down
2 changes: 1 addition & 1 deletion tango/common/from_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ def construct_arg(
)
elif annotation == str:
# Strings are special because we allow casting from Path to str.
if type(popped_params) == str or isinstance(popped_params, Path):
if isinstance(popped_params, str) or isinstance(popped_params, Path):
return str(popped_params) # type: ignore
else:
raise TypeError(
Expand Down
15 changes: 10 additions & 5 deletions tango/integrations/torch/optim.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from typing import Type

import torch

from tango.common.registrable import Registrable
Expand Down Expand Up @@ -73,11 +75,14 @@ class LRScheduler(torch.optim.lr_scheduler._LRScheduler, Registrable):
):
Optimizer.register("torch::" + name)(cls)

# Note: This is a hack. Remove after we upgrade the torch version.
base_class: Type
try:
base_class = torch.optim.lr_scheduler.LRScheduler
except AttributeError:
base_class = torch.optim.lr_scheduler._LRScheduler

# Register all learning rate schedulers.
for name, cls in torch.optim.lr_scheduler.__dict__.items():
if (
isinstance(cls, type)
and issubclass(cls, torch.optim.lr_scheduler.LRScheduler)
and not cls == torch.optim.lr_scheduler.LRScheduler
):
if isinstance(cls, type) and issubclass(cls, base_class) and not cls == base_class:
LRScheduler.register("torch::" + name)(cls)
70 changes: 2 additions & 68 deletions tango/integrations/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,76 +44,10 @@
from tango.integrations.torch import Model
from tango.integrations.transformers import *
available_models = []
for name in sorted(Model.list_available()):
if name.startswith("transformers::AutoModel"):
print(name)
.. testoutput::
transformers::AutoModel::from_config
transformers::AutoModel::from_pretrained
transformers::AutoModelForAudioClassification::from_config
transformers::AutoModelForAudioClassification::from_pretrained
transformers::AutoModelForAudioFrameClassification::from_config
transformers::AutoModelForAudioFrameClassification::from_pretrained
transformers::AutoModelForAudioXVector::from_config
transformers::AutoModelForAudioXVector::from_pretrained
transformers::AutoModelForCTC::from_config
transformers::AutoModelForCTC::from_pretrained
transformers::AutoModelForCausalLM::from_config
transformers::AutoModelForCausalLM::from_pretrained
transformers::AutoModelForDepthEstimation::from_config
transformers::AutoModelForDepthEstimation::from_pretrained
transformers::AutoModelForDocumentQuestionAnswering::from_config
transformers::AutoModelForDocumentQuestionAnswering::from_pretrained
transformers::AutoModelForImageClassification::from_config
transformers::AutoModelForImageClassification::from_pretrained
transformers::AutoModelForImageSegmentation::from_config
transformers::AutoModelForImageSegmentation::from_pretrained
transformers::AutoModelForInstanceSegmentation::from_config
transformers::AutoModelForInstanceSegmentation::from_pretrained
transformers::AutoModelForMaskGeneration::from_config
transformers::AutoModelForMaskGeneration::from_pretrained
transformers::AutoModelForMaskedImageModeling::from_config
transformers::AutoModelForMaskedImageModeling::from_pretrained
transformers::AutoModelForMaskedLM::from_config
transformers::AutoModelForMaskedLM::from_pretrained
transformers::AutoModelForMultipleChoice::from_config
transformers::AutoModelForMultipleChoice::from_pretrained
transformers::AutoModelForNextSentencePrediction::from_config
transformers::AutoModelForNextSentencePrediction::from_pretrained
transformers::AutoModelForObjectDetection::from_config
transformers::AutoModelForObjectDetection::from_pretrained
transformers::AutoModelForPreTraining::from_config
transformers::AutoModelForPreTraining::from_pretrained
transformers::AutoModelForQuestionAnswering::from_config
transformers::AutoModelForQuestionAnswering::from_pretrained
transformers::AutoModelForSemanticSegmentation::from_config
transformers::AutoModelForSemanticSegmentation::from_pretrained
transformers::AutoModelForSeq2SeqLM::from_config
transformers::AutoModelForSeq2SeqLM::from_pretrained
transformers::AutoModelForSequenceClassification::from_config
transformers::AutoModelForSequenceClassification::from_pretrained
transformers::AutoModelForSpeechSeq2Seq::from_config
transformers::AutoModelForSpeechSeq2Seq::from_pretrained
transformers::AutoModelForTableQuestionAnswering::from_config
transformers::AutoModelForTableQuestionAnswering::from_pretrained
transformers::AutoModelForTokenClassification::from_config
transformers::AutoModelForTokenClassification::from_pretrained
transformers::AutoModelForUniversalSegmentation::from_config
transformers::AutoModelForUniversalSegmentation::from_pretrained
transformers::AutoModelForVideoClassification::from_config
transformers::AutoModelForVideoClassification::from_pretrained
transformers::AutoModelForVision2Seq::from_config
transformers::AutoModelForVision2Seq::from_pretrained
transformers::AutoModelForVisualQuestionAnswering::from_config
transformers::AutoModelForVisualQuestionAnswering::from_pretrained
transformers::AutoModelForZeroShotImageClassification::from_config
transformers::AutoModelForZeroShotImageClassification::from_pretrained
transformers::AutoModelForZeroShotObjectDetection::from_config
transformers::AutoModelForZeroShotObjectDetection::from_pretrained
transformers::AutoModelWithLMHead::from_config
transformers::AutoModelWithLMHead::from_pretrained
available_models.append(name)
- :class:`~tango.integrations.torch.Optimizer`: All optimizers from transformers are registered according
to their class names (e.g. "transformers::AdaFactor").
Expand Down
2 changes: 1 addition & 1 deletion tests/common/from_params_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ def __init__(self, a: str, x: int = 42, **kwargs):
assert instance.x == 42
assert instance.a == -1
assert len(instance.rest) == 1 # type: ignore
assert type(instance.rest["raw_a"]) == str # type: ignore
assert isinstance(instance.rest["raw_a"], str) # type: ignore
assert instance.rest["raw_a"] == "123" # type: ignore

def test_kwargs_are_passed_to_deeper_superclasses(self):
Expand Down

0 comments on commit b955ef7

Please sign in to comment.