Skip to content
This repository has been archived by the owner on Nov 21, 2022. It is now read-only.

Add ability to set kwargs for pipeline in load_from_checkpoint #204

Merged
merged 2 commits into from
Oct 20, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion lightning_transformers/core/nlp/model.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Type
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union

import torch
from hydra.utils import get_class
from transformers import PreTrainedTokenizerBase
from transformers import pipeline as hf_transformers_pipeline
Expand Down Expand Up @@ -96,3 +97,19 @@ def hf_predict(self, *args, **kwargs) -> Any:
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
if "tokenizer" in checkpoint:
self.tokenizer = checkpoint["tokenizer"]

@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: Union[str, IO],
map_location: Optional[Union[Dict[str, str], str, torch.device, int, Callable]] = None,
hparams_file: Optional[str] = None,
strict: bool = True,
hf_pipeline_kwargs: Optional[Dict] = None,
**kwargs,
):
model: HFTransformer = super().load_from_checkpoint(checkpoint_path, map_location, hparams_file, strict)
# update model with hf_pipeline_kwargs override
if hf_pipeline_kwargs is not None:
model._hf_pipeline_kwargs.update(hf_pipeline_kwargs)
return model
36 changes: 36 additions & 0 deletions tests/task/nlp/test_pipeline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import pytorch_lightning as pl
import torch
from transformers import AutoTokenizer

from lightning_transformers.core.nlp import HFBackboneConfig
from lightning_transformers.task.nlp.language_modeling import LanguageModelingDataModule, LanguageModelingTransformer
from lightning_transformers.task.nlp.language_modeling.config import LanguageModelingDataConfig


def test_kwargs_load_from_checkpoint(hf_cache_path, tmpdir):
"""Test to ensure we can pass arguments to hf_pipeline when loading from checkpoint."""

class TestModel(LanguageModelingTransformer):
def configure_optimizers(self):
return torch.optim.AdamW(self.parameters(), lr=1e-5)

tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="sshleifer/tiny-gpt2")
model = TestModel(backbone=HFBackboneConfig(pretrained_model_name_or_path="sshleifer/tiny-gpt2"))
dm = LanguageModelingDataModule(
cfg=LanguageModelingDataConfig(
batch_size=1,
dataset_name="wikitext",
dataset_config_name="wikitext-2-raw-v1",
cache_dir=hf_cache_path,
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(default_root_dir=tmpdir, fast_dev_run=True)

trainer.fit(model, dm)

trainer.save_checkpoint("test.pt")
kwargs = {"device": 0}
model = TestModel.load_from_checkpoint("test.pt", hf_pipeline_kwargs=kwargs)
# todo: refactor this to actually mock the hf_pipeline and assert the input
assert model._hf_pipeline_kwargs == kwargs