From 5543f7b63da96559e69636c289027e5db0e31ae8 Mon Sep 17 00:00:00 2001 From: Yu Shan Date: Mon, 12 Oct 2020 16:56:52 +0800 Subject: [PATCH] add predict and evaluate result check in zouwu tests (#2943) * add predict and evaluate result check in zouwu tests * fix ramdom fail --- pyzoo/test/zoo/zouwu/autots/test_auto_ts.py | 27 ++++++++++++++----- .../model/forecast/test_lstm_forecaster.py | 8 +++--- .../model/forecast/test_mtnet_forecaster.py | 8 +++--- 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/pyzoo/test/zoo/zouwu/autots/test_auto_ts.py b/pyzoo/test/zoo/zouwu/autots/test_auto_ts.py index 6754e9ec35f..f71ce668eb4 100644 --- a/pyzoo/test/zoo/zouwu/autots/test_auto_ts.py +++ b/pyzoo/test/zoo/zouwu/autots/test_auto_ts.py @@ -53,8 +53,13 @@ def test_AutoTSTrainer_smoke(self): pipeline = tsp.fit(self.train_df) assert isinstance(pipeline, TSPipeline) assert pipeline.internal.config is not None - pipeline.evaluate(self.validation_df) - pipeline.predict(self.validation_df) + evaluate_result = pipeline.evaluate(self.validation_df) + if horizon > 1: + assert evaluate_result[0].shape[0] == horizon + else: + assert evaluate_result[0] + predict_df = pipeline.predict(self.validation_df) + assert not predict_df.empty def test_AutoTrainer_LstmRecipe(self): horizon = np.random.randint(1, 6) @@ -74,8 +79,13 @@ def test_AutoTrainer_LstmRecipe(self): )) assert isinstance(pipeline, TSPipeline) assert pipeline.internal.config is not None - pipeline.evaluate(self.validation_df) - pipeline.predict(self.validation_df) + evaluate_result = pipeline.evaluate(self.validation_df) + if horizon > 1: + assert evaluate_result[0].shape[0] == horizon + else: + assert evaluate_result[0] + predict_df = pipeline.predict(self.validation_df) + assert not predict_df.empty def test_AutoTrainer_MTNetRecipe(self): horizon = np.random.randint(1, 6) @@ -97,8 +107,13 @@ def test_AutoTrainer_MTNetRecipe(self): )) assert isinstance(pipeline, TSPipeline) assert pipeline.internal.config is not None - pipeline.evaluate(self.validation_df) - pipeline.predict(self.validation_df) + evaluate_result = pipeline.evaluate(self.validation_df) + if horizon > 1: + assert evaluate_result[0].shape[0] == horizon + else: + assert evaluate_result[0] + predict_df = pipeline.predict(self.validation_df) + assert not predict_df.empty if __name__ == "__main__": diff --git a/pyzoo/test/zoo/zouwu/model/forecast/test_lstm_forecaster.py b/pyzoo/test/zoo/zouwu/model/forecast/test_lstm_forecaster.py index 5e6aec8fc71..576550d5ee0 100644 --- a/pyzoo/test/zoo/zouwu/model/forecast/test_lstm_forecaster.py +++ b/pyzoo/test/zoo/zouwu/model/forecast/test_lstm_forecaster.py @@ -62,14 +62,16 @@ def gen_test_sample(data, past_seq_len): def test_forecast_lstm(self): # TODO hacking to fix a bug - model = LSTMForecaster(target_dim=1, feature_dim=self.x_train.shape[-1]) + target_dim = 1 + model = LSTMForecaster(target_dim=target_dim, feature_dim=self.x_train.shape[-1]) model.fit(self.x_train, self.y_train, validation_data=(self.x_val, self.y_val), batch_size=8, distributed=False) - model.evaluate(self.x_val, self.y_val) - model.predict(self.x_test) + assert model.evaluate(self.x_val, self.y_val) + predict_result = model.predict(self.x_test) + assert predict_result.shape == (self.x_test.shape[0], target_dim) if __name__ == "__main__": diff --git a/pyzoo/test/zoo/zouwu/model/forecast/test_mtnet_forecaster.py b/pyzoo/test/zoo/zouwu/model/forecast/test_mtnet_forecaster.py index 005708c7121..6c8dfffc1c7 100644 --- a/pyzoo/test/zoo/zouwu/model/forecast/test_mtnet_forecaster.py +++ b/pyzoo/test/zoo/zouwu/model/forecast/test_mtnet_forecaster.py @@ -62,7 +62,8 @@ def gen_test_sample(data, past_seq_len): def test_forecast_mtnet(self): # TODO hacking to fix a bug - model = MTNetForecaster(target_dim=1, + target_dim = 1 + model = MTNetForecaster(target_dim=target_dim, feature_dim=self.x_train.shape[-1], long_series_num=self.long_num, series_length=self.time_step @@ -76,8 +77,9 @@ def test_forecast_mtnet(self): validation_data=([x_val_long, x_val_short], self.y_val), batch_size=32, distributed=False) - model.evaluate([x_val_long, x_val_short], self.y_val) - model.predict([x_test_long, x_test_short]) + assert model.evaluate([x_val_long, x_val_short], self.y_val) + predict_result = model.predict([x_test_long, x_test_short]) + assert predict_result.shape == (self.x_test.shape[0], target_dim) if __name__ == "__main__":