Skip to content

Commit

Permalink
Adding QuestionAnsweringTask class to the question answering task (Li…
Browse files Browse the repository at this point in the history
…ghtning-Universe#567)

* Adding QuestionAnsweringTask class to the question answering task

* Small changes based on pep8 guidelines

Co-authored-by: Ethan Harris <[email protected]>
  • Loading branch information
karthikrangasai and ethanwharris authored Jul 12, 2021
1 parent bf1526f commit f7a86ea
Show file tree
Hide file tree
Showing 5 changed files with 179 additions and 1 deletion.
1 change: 1 addition & 0 deletions flash/text/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from flash.text.classification import TextClassificationData, TextClassifier # noqa: F401
from flash.text.seq2seq import ( # noqa: F401
QuestionAnsweringData,
QuestionAnsweringTask,
Seq2SeqData,
Seq2SeqTask,
SummarizationData,
Expand Down
2 changes: 1 addition & 1 deletion flash/text/seq2seq/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from flash.text.seq2seq.core import Seq2SeqData, Seq2SeqFreezeEmbeddings, Seq2SeqTask # noqa: F401
from flash.text.seq2seq.question_answering import QuestionAnsweringData # noqa: F401
from flash.text.seq2seq.question_answering import QuestionAnsweringData, QuestionAnsweringTask # noqa: F401
from flash.text.seq2seq.summarization import SummarizationData, SummarizationTask # noqa: F401
from flash.text.seq2seq.translation import TranslationData, TranslationTask # noqa: F401
1 change: 1 addition & 0 deletions flash/text/seq2seq/question_answering/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
from flash.text.seq2seq.question_answering.data import QuestionAnsweringData # noqa: F401
from flash.text.seq2seq.question_answering.model import QuestionAnsweringTask # noqa: F401
84 changes: 84 additions & 0 deletions flash/text/seq2seq/question_answering/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union

import torch
from torchmetrics import Metric

from flash.text.seq2seq.core.metrics import RougeMetric
from flash.text.seq2seq.core.model import Seq2SeqTask


class QuestionAnsweringTask(Seq2SeqTask):
"""The ``QuestionAnsweringTask`` is a :class:`~flash.Task` for Seq2Seq text question answering. For more details,
see :ref:`question_answering`.
You can change the backbone to any question answering model from `HuggingFace/transformers
<https://huggingface.co/models?filter=pytorch&pipeline_tag=question-answering>`_ using the ``backbone`` argument.
.. note:: When changing the backbone, make sure you pass in the same backbone to the :class:`~flash.Task` and the
:class:`~flash.core.data.data_module.DataModule` object! Since this is a Seq2Seq task, make sure you use a
Seq2Seq model.
Args:
backbone: backbone model to use for the task.
loss_fn: Loss function for training.
optimizer: Optimizer to use for training, defaults to `torch.optim.Adam`.
metrics: Metrics to compute for training and evaluation. Defauls to calculating the ROUGE metric.
Changing this argument currently has no effect.
learning_rate: Learning rate to use for training, defaults to `3e-4`
val_target_max_length: Maximum length of targets in validation. Defaults to `128`
num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`
use_stemmer: Whether Porter stemmer should be used to strip word suffixes to improve matching.
rouge_newline_sep: Add a new line at the beginning of each sentence in Rouge Metric calculation.
"""

def __init__(
self,
backbone: str = "t5-small",
loss_fn: Optional[Union[Callable, Mapping, Sequence]] = None,
optimizer: Type[torch.optim.Optimizer] = torch.optim.Adam,
metrics: Union[Metric, Callable, Mapping, Sequence, None] = None,
learning_rate: float = 1e-5,
val_target_max_length: Optional[int] = None,
num_beams: Optional[int] = 4,
use_stemmer: bool = True,
rouge_newline_sep: bool = True
):
self.save_hyperparameters()
super().__init__(
backbone=backbone,
loss_fn=loss_fn,
optimizer=optimizer,
metrics=metrics,
learning_rate=learning_rate,
val_target_max_length=val_target_max_length,
num_beams=num_beams
)
self.rouge = RougeMetric(
rouge_newline_sep=rouge_newline_sep,
use_stemmer=use_stemmer,
)

def compute_metrics(self, generated_tokens: torch.Tensor, batch: Dict, prefix: str) -> None:
tgt_lns = self.tokenize_labels(batch["labels"])
result = self.rouge(self._postprocess.uncollate(generated_tokens), tgt_lns)
self.log_dict(result, on_step=False, on_epoch=True, prog_bar=True)

@staticmethod
def _ci_benchmark_fn(history: List[Dict[str, Any]]):
"""
This function is used only for debugging usage with CI
"""
assert history[-1]["rouge1_recall"] > 0.2
92 changes: 92 additions & 0 deletions tests/text/seq2seq/question_answering/test_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from unittest import mock

import pytest
import torch

from flash import Trainer
from flash.core.utilities.imports import _TEXT_AVAILABLE
from flash.text import QuestionAnsweringTask
from flash.text.seq2seq.core.data import Seq2SeqPostprocess
from flash.text.seq2seq.question_answering.data import QuestionAnsweringPreprocess
from tests.helpers.utils import _SERVE_TESTING, _TEXT_TESTING

# ======== Mock functions ========


class DummyDataset(torch.utils.data.Dataset):

def __getitem__(self, index):
return {
"input_ids": torch.randint(1000, size=(128, )),
"labels": torch.randint(1000, size=(128, )),
}

def __len__(self) -> int:
return 100


# ==============================

TEST_BACKBONE = "sshleifer/tiny-mbart" # super small model for testing


@pytest.mark.skipif(os.name == "nt", reason="Huggingface timing out on Windows")
@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_init_train(tmpdir):
model = QuestionAnsweringTask(TEST_BACKBONE)
train_dl = torch.utils.data.DataLoader(DummyDataset())
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model, train_dl)


@pytest.mark.skipif(not _TEXT_TESTING, reason="text libraries aren't installed.")
def test_jit(tmpdir):
sample_input = {
"input_ids": torch.randint(1000, size=(1, 32)),
"attention_mask": torch.randint(1, size=(1, 32)),
}
path = os.path.join(tmpdir, "test.pt")

model = QuestionAnsweringTask(TEST_BACKBONE)
model.eval()

# Huggingface only supports `torch.jit.trace`
model = torch.jit.trace(model, [sample_input])

torch.jit.save(model, path)
model = torch.jit.load(path)

out = model(sample_input)
assert isinstance(out, torch.Tensor)


@pytest.mark.skipif(not _SERVE_TESTING, reason="serve libraries aren't installed.")
@mock.patch("flash._IS_TESTING", True)
def test_serve():
model = QuestionAnsweringTask(TEST_BACKBONE)
# TODO: Currently only servable once a preprocess and postprocess have been attached
model._preprocess = QuestionAnsweringPreprocess(backbone=TEST_BACKBONE)
model._postprocess = Seq2SeqPostprocess()
model.eval()
model.serve()


@pytest.mark.skipif(_TEXT_AVAILABLE, reason="text libraries are installed.")
def test_load_from_checkpoint_dependency_error():
with pytest.raises(ModuleNotFoundError, match=re.escape("'lightning-flash[text]'")):
QuestionAnsweringTask.load_from_checkpoint("not_a_real_checkpoint.pt")

0 comments on commit f7a86ea

Please sign in to comment.