Skip to content

Commit

Permalink
Merge pull request #17 from emcastillo/fix-eval
Browse files Browse the repository at this point in the history
Fix ignite agregate metrics not being reported
  • Loading branch information
asi1024 authored May 25, 2020
2 parents b2d409b + c8d4873 commit 803b5c8
Show file tree
Hide file tree
Showing 3 changed files with 68 additions and 5 deletions.
11 changes: 7 additions & 4 deletions pytorch_pfn_extras/training/extensions/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,10 +298,13 @@ def report_iteration_metrics(engine):

@self.evaluator.on(Events.EPOCH_COMPLETED)
def set_evaluation_completed(engine):
metrics = self.evaluator.state.metrics
for metric in metrics:
reporting.report(
{'val/{}'.format(metric): metrics[metric]})
ignite_metrics = {}
with reporting.report_scope(ignite_metrics):
metrics = self.evaluator.state.metrics
for metric in metrics:
reporting.report(
{'val/{}'.format(metric): metrics[metric]})
self.summary.add(ignite_metrics)

def evaluate(self):
iterator = self._iterators['main']
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -188,3 +188,60 @@ def test_evaluator_progress_bar():
reporter.add_observer('target', target)
with reporter:
evaluator.evaluate()


# Code excerpts to test IgniteEvaluator
class IgniteDummyModel(torch.nn.Module):
def __init__(self):
super(IgniteDummyModel, self).__init__()
self.count = 0.

def forward(self, *args):
ppe.reporting.report({'x': self.count}, self)
self.count += 1.
return 0.


def create_dummy_evaluator(model):
from ignite.engine import Engine

def update_fn(engine, batch):
y_pred = torch.tensor(batch[1])
model()
# We return fake results for the reporters to
# and metrics to work
return (y_pred, y_pred)

evaluator = Engine(update_fn)
return evaluator


def test_ignite_evaluator_reporting_metrics():
try:
from ignite.metrics import MeanSquaredError
except ImportError:
pytest.skip('pytorch-ignite is not installed')

# This tests verifies that either, usuer manually reported metrics
# and ignite calculated ones are correctly reflected in the reporter
# observation
model = IgniteDummyModel()
n_data = 10
x = torch.randn((n_data, 2), requires_grad=True)
y = torch.randn((n_data, 2))
dataset = torch.utils.data.TensorDataset(x, y)
loader = torch.utils.data.DataLoader(dataset, batch_size=3)
evaluator = create_dummy_evaluator(model)
# Attach metrics to the evaluator
metric = MeanSquaredError()
metric.attach(evaluator, 'mse')
evaluator_ignite_ext = ppe.training.extensions.IgniteEvaluator(
evaluator, loader, model, progress_bar=False
)
reporter = ppe.reporting.Reporter()
with reporter:
result = evaluator_ignite_ext()
# Internally reported metrics
assert result['main/x'] == 1.5
# Ignite calculated metric
assert result['val/mse'] == 0.0
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,10 @@ def test_extensions_manager_state_dict():

def test_ignite_extensions_manager_state_dict():

from ignite.engine import create_supervised_trainer
try:
from ignite.engine import create_supervised_trainer
except ImportError:
pytest.skip('pytorch-ignite not found')

model_state_dict = object()
optimizer_state_dict = object()
Expand Down

0 comments on commit 803b5c8

Please sign in to comment.