Skip to content

Commit

Permalink
Chronos: modify examples of .evaluate (intel-analytics#5622)
Browse files Browse the repository at this point in the history
  • Loading branch information
plusbang authored and ForJadeForest committed Sep 20, 2022
1 parent 6e45868 commit 236e595
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 8 deletions.
14 changes: 7 additions & 7 deletions python/chronos/src/bigdl/chronos/forecaster/base_forecaster.py
Original file line number Diff line number Diff line change
Expand Up @@ -595,14 +595,14 @@ def evaluate(self, data, batch_size=32, multioutput="raw_values", quantize=False
"""
Evaluate using a trained forecaster.
If you want to evaluate on a single node(which is common practice), please call
.to_local().evaluate(data, ...)
Please note that evaluate result is calculated by scaled y and yhat. If you scaled
your data (e.g. use .scale() on the TSDataset) please follow the following code
your data (e.g. use .scale() on the TSDataset), please follow the following code
snap to evaluate your result if you need to evaluate on unscaled data.
if you want to evaluate on a single node(which is common practice), please call
.to_local().evaluate(data, ...)
>>> from bigdl.orca.automl.metrics import Evaluator
>>> from bigdl.chronos.metric.forecast_metrics import Evaluator
>>> y_hat = forecaster.predict(x)
>>> y_hat_unscaled = tsdata.unscale_numpy(y_hat) # or other customized unscale methods
>>> y_unscaled = tsdata.unscale_numpy(y) # or other customized unscale methods
Expand Down Expand Up @@ -699,8 +699,8 @@ def evaluate_with_onnx(self, data,
your data (e.g. use .scale() on the TSDataset) please follow the following code
snap to evaluate your result if you need to evaluate on unscaled data.
>>> from bigdl.orca.automl.metrics import Evaluator
>>> y_hat = forecaster.predict(x)
>>> from bigdl.chronos.metric.forecast_metrics import Evaluator
>>> y_hat = forecaster.predict_with_onnx(x)
>>> y_hat_unscaled = tsdata.unscale_numpy(y_hat) # or other customized unscale methods
>>> y_unscaled = tsdata.unscale_numpy(y) # or other customized unscale methods
>>> Evaluator.evaluate(metric=..., y_unscaled, y_hat_unscaled, multioutput=...)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def get_latency(func, *args, num_running=100, **kwargs):
>>> # run forecaster.predict(x.numpy()) for len(tsdata_test.df) times
>>> # to evaluate the time cost
>>> latency = Evaluator.get_latency(forecaster.predict, x.numpy(),\
num_running = len(tsdata_test.df))
num_running = len(tsdata_test.df))
>>> # an example output:
>>> # {"p50": 3.853, "p90": 3.881, "p95": 3.933, "p99": 4.107}
"""
Expand Down

0 comments on commit 236e595

Please sign in to comment.