From afdcd09cdb1377ed4b118f6ddcd789d4009a75f2 Mon Sep 17 00:00:00 2001 From: liangs6212 Date: Tue, 30 Aug 2022 11:58:49 +0800 Subject: [PATCH] remove tf1 UTs --- python/chronos/dev/test/run-pytests-tf1.sh | 39 --- .../test/bigdl/chronos/model/tf1/__init__.py | 15 -- .../chronos/model/tf1/test_Seq2Seq_keras.py | 236 ------------------ .../model/tf1/test_VanillaLSTM_keras.py | 105 -------- .../chronos/model/tf1/test_mtnet_keras.py | 116 --------- 5 files changed, 511 deletions(-) delete mode 100755 python/chronos/dev/test/run-pytests-tf1.sh delete mode 100644 python/chronos/test/bigdl/chronos/model/tf1/__init__.py delete mode 100644 python/chronos/test/bigdl/chronos/model/tf1/test_Seq2Seq_keras.py delete mode 100644 python/chronos/test/bigdl/chronos/model/tf1/test_VanillaLSTM_keras.py delete mode 100644 python/chronos/test/bigdl/chronos/model/tf1/test_mtnet_keras.py diff --git a/python/chronos/dev/test/run-pytests-tf1.sh b/python/chronos/dev/test/run-pytests-tf1.sh deleted file mode 100755 index 545318ff1f38..000000000000 --- a/python/chronos/dev/test/run-pytests-tf1.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -# -# Copyright 2016 The BigDL Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -cd "`dirname $0`" -cd ../.. - -export PYSPARK_PYTHON=python -export PYSPARK_DRIVER_PYTHON=python -if [ -z "${OMP_NUM_THREADS}" ]; then - export OMP_NUM_THREADS=1 -fi - -ray stop -f - -echo "Running chronos tests TF1 and Deprecated API" -python -m pytest -v test/bigdl/chronos/model/tf1 - -exit_status_0=$? -if [ $exit_status_0 -ne 0 ]; -then - exit $exit_status_0 -fi - -ray stop -f diff --git a/python/chronos/test/bigdl/chronos/model/tf1/__init__.py b/python/chronos/test/bigdl/chronos/model/tf1/__init__.py deleted file mode 100644 index 2151a805423a..000000000000 --- a/python/chronos/test/bigdl/chronos/model/tf1/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -# Copyright 2016 The BigDL Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/python/chronos/test/bigdl/chronos/model/tf1/test_Seq2Seq_keras.py b/python/chronos/test/bigdl/chronos/model/tf1/test_Seq2Seq_keras.py deleted file mode 100644 index b7f80212858c..000000000000 --- a/python/chronos/test/bigdl/chronos/model/tf1/test_Seq2Seq_keras.py +++ /dev/null @@ -1,236 +0,0 @@ -# -# Copyright 2016 The BigDL Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import tensorflow as tf -import pytest - -from bigdl.orca.test_zoo_utils import ZooTestCase -from bigdl.chronos.model.tf1.Seq2Seq_keras import LSTMSeq2Seq -from bigdl.chronos.autots.deprecated.feature.time_sequence import TimeSequenceFeatureTransformer -from numpy.testing import assert_array_almost_equal -import pandas as pd -import numpy as np - - -@pytest.mark.skipif(tf.__version__ > '2.0.0', reason="Run only when tf==1.15.0.") -class TestSeq2Seq(ZooTestCase): - - def setup_method(self, method): - # super().setup_method(method) - self.train_data = pd.DataFrame(data=np.random.randn(64, 4)) - self.val_data = pd.DataFrame(data=np.random.randn(16, 4)) - self.test_data = pd.DataFrame(data=np.random.randn(16, 4)) - - self.past_seq_len = 6 - self.future_seq_len_1 = 1 - self.future_seq_len_2 = 2 - - # use roll method in time_sequence - self.feat = TimeSequenceFeatureTransformer() - - self.config = { - 'batch_size': 32, - 'epochs': 1, - 'latent_dim': 8 - } - - self.model_1 = LSTMSeq2Seq(check_optional_config=False, - future_seq_len=self.future_seq_len_1) - self.model_2 = LSTMSeq2Seq(check_optional_config=False, - future_seq_len=self.future_seq_len_2) - - self.fitted = False - self.predict_1 = None - self.predict_2 = None - - def teardown_method(self, method): - pass - - def test_fit_eval_1(self): - x_train_1, y_train_1 = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_1) - print("fit_eval_future_seq_len_1:", - self.model_1.fit_eval((x_train_1, y_train_1), **self.config)) - assert self.model_1.past_seq_len == 6 - assert self.model_1.feature_num == 4 - assert self.model_1.future_seq_len == 1 - assert self.model_1.target_col_num == 1 - - def test_fit_eval(self): - past_seq_len = 6 - future_seq_len = 2 - input_dim = 5 - output_dim = 4 - x_train = np.random.rand(100, past_seq_len, input_dim) - y_train = np.random.rand(100, future_seq_len, output_dim) - x_test = np.random.rand(100, past_seq_len, input_dim) - y_test = np.random.rand(100, future_seq_len, output_dim) - model = LSTMSeq2Seq(check_optional_config=False, - future_seq_len=future_seq_len) - model_config = { - 'batch_size': 32, - 'epochs': 1, - 'latent_dim': 8, - 'dropout': 0.2 - } - model.fit_eval((x_train, y_train), **model_config) - y_pred = model.predict(x_test) - rmse, smape = model.evaluate(x=x_test, y=y_test, metric=["rmse", "smape"]) - assert rmse.shape == smape.shape - assert rmse.shape == (future_seq_len, output_dim) - - assert model.past_seq_len == past_seq_len - assert model.future_seq_len == future_seq_len - assert model.feature_num == input_dim - assert model.target_col_num == output_dim - assert y_pred.shape == y_test.shape - - def test_fit_eval_2(self): - x_train_2, y_train_2 = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_2) - print("fit_eval_future_seq_len_2:", - self.model_2.fit_eval((x_train_2, y_train_2), **self.config)) - assert self.model_2.future_seq_len == 2 - - self.fitted = True - - def test_evaluate_1(self): - x_train_1, y_train_1 = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_1) - x_val_1, y_val_1 = self.feat._roll_train(self.val_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_1) - - self.model_1.fit_eval((x_train_1, y_train_1), **self.config) - - print("evaluate_future_seq_len_1:", self.model_1.evaluate(x_val_1, - y_val_1, - metric=['mse', - 'r2'])) - - def test_evaluate_2(self): - x_train_2, y_train_2 = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_2) - x_val_2, y_val_2 = self.feat._roll_train(self.val_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_2) - - self.model_2.fit_eval((x_train_2, y_train_2), **self.config) - - print("evaluate_future_seq_len_2:", self.model_2.evaluate(x_val_2, - y_val_2, - metric=['mse', - 'r2'])) - - def test_predict_1(self): - x_train_1, y_train_1 = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_1) - x_test_1 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) - self.model_1.fit_eval((x_train_1, y_train_1), **self.config) - - predict_1 = self.model_1.predict(x_test_1) - assert predict_1.shape == (x_test_1.shape[0], self.future_seq_len_1) - - def test_predict_2(self): - x_train_2, y_train_2 = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=self.future_seq_len_2) - x_test_2 = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) - self.model_2.fit_eval((x_train_2, y_train_2), **self.config) - - predict_2 = self.model_2.predict(x_test_2) - assert predict_2.shape == (x_test_2.shape[0], self.future_seq_len_2) - - def test_save_restore_single_step(self): - future_seq_len = 1 - x_train, y_train = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=future_seq_len) - x_test = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) - model = LSTMSeq2Seq(future_seq_len=future_seq_len) - model.fit_eval((x_train, y_train), **self.config) - - predict_before = model.predict(x_test) - new_model = LSTMSeq2Seq() - - ckpt = os.path.join("/tmp", "seq2seq.ckpt") - model.save(ckpt) - new_model.restore(ckpt) - predict_after = new_model.predict(x_test) - assert_array_almost_equal(predict_before, predict_after, decimal=2), \ - "Prediction values are not the same after restore: " \ - "predict before is {}, and predict after is {}".format(predict_before, predict_after) - new_config = {'epochs': 1, 'latent_dim': 8} - new_model.fit_eval((x_train, y_train), **new_config) - os.remove(ckpt) - - def test_save_restore_multistep(self): - future_seq_len = np.random.randint(2, 6) - x_train, y_train = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=future_seq_len) - x_test = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) - model = LSTMSeq2Seq(future_seq_len=future_seq_len) - model.fit_eval((x_train, y_train), **self.config) - - predict_before = model.predict(x_test) - new_model = LSTMSeq2Seq() - - ckpt = os.path.join("/tmp", "seq2seq.ckpt") - model.save(ckpt) - new_model.restore(ckpt) - predict_after = new_model.predict(x_test) - assert_array_almost_equal(predict_before, predict_after, decimal=2), \ - "Prediction values are not the same after restore: " \ - "predict before is {}, and predict after is {}".format(predict_before, predict_after) - new_config = {'epochs': 1, 'latent_dim': 8} - new_model.fit_eval((x_train, y_train), **new_config) - os.remove(ckpt) - - def test_predict_with_uncertainty(self,): - future_seq_len = np.random.randint(2, 6) - x_train, y_train = self.feat._roll_train(self.train_data, - past_seq_len=self.past_seq_len, - future_seq_len=future_seq_len) - x_test = self.feat._roll_test(self.test_data, past_seq_len=self.past_seq_len) - model = LSTMSeq2Seq(future_seq_len=future_seq_len) - model.fit_eval((x_train, y_train), mc=True, **self.config) - - prediction, uncertainty = model.predict_with_uncertainty(x_test, n_iter=2) - assert prediction.shape == (x_test.shape[0], future_seq_len) - assert uncertainty.shape == (x_test.shape[0], future_seq_len) - assert np.any(uncertainty) - - new_model = LSTMSeq2Seq() - - ckpt = os.path.join("/tmp", "seq2seq.ckpt") - model.save(ckpt) - new_model.restore(ckpt) - prediction_after, uncertainty_after = new_model.predict_with_uncertainty(x_test, n_iter=2) - assert prediction_after.shape == (x_test.shape[0], future_seq_len) - assert uncertainty_after.shape == (x_test.shape[0], future_seq_len) - assert np.any(uncertainty_after) - - os.remove(ckpt) - - -if __name__ == '__main__': - pytest.main([__file__]) diff --git a/python/chronos/test/bigdl/chronos/model/tf1/test_VanillaLSTM_keras.py b/python/chronos/test/bigdl/chronos/model/tf1/test_VanillaLSTM_keras.py deleted file mode 100644 index 236185ef0a48..000000000000 --- a/python/chronos/test/bigdl/chronos/model/tf1/test_VanillaLSTM_keras.py +++ /dev/null @@ -1,105 +0,0 @@ -# -# Copyright 2016 The BigDL Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import pytest -from unittest import TestCase -from bigdl.chronos.model.tf1.VanillaLSTM_keras import VanillaLSTM -import numpy as np -import tempfile -import os -import tensorflow as tf - - -def create_data(): - num_train_samples = 1000 - num_val_samples = 400 - num_test_samples = 400 - input_time_steps = 7 - input_feature_dim = 4 - output_dim = np.random.randint(1, 5) - - def get_x_y(num_samples): - x = np.random.rand(num_samples, input_time_steps, input_feature_dim) - y = np.random.randn(num_samples, output_dim) - return x, y - - train_data = get_x_y(num_train_samples) - val_data = get_x_y(num_val_samples) - test_data = get_x_y(num_test_samples) - return train_data, val_data, test_data - -@pytest.mark.skipif(tf.__version__ > '2.0.0', reason="Run only when tf==1.15.0.") -class TestVanillaLSTM(TestCase): - train_data, val_data, test_data = create_data() - model = VanillaLSTM() - - def test_fit_evaluate(self): - config = {"batch_size": 128} - self.model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - mse, smape = self.model.evaluate(self.val_data[0], self.val_data[1], - metrics=["mse", "smape"]) - - def test_config(self): - config = {"lstm_units": [128] * 2, - "dropouts": [0.2] * 2} - self.model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - - config = {"lstm_units": 128, - "dropouts": 0.2} - self.model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - - with pytest.raises(RuntimeError): - config = {"lstm_units": 0.1, - "dropouts": 0.2} - self.model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - - with pytest.raises(RuntimeError): - config = {"lstm_units": [128] * 2, - "dropouts": [0.2] * 3} - self.model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - - with pytest.raises(RuntimeError): - config = {"lstm_units": 128, - "dropouts": [0.2] * 2} - self.model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - - def test_predict_save_restore(self): - model = VanillaLSTM() - config = {"lstm_units": [128] * 2, - "dropouts": [0.2] * 2, - "batch_size": 128} - model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - pred = model.predict(self.test_data[0]) - assert pred.shape == self.test_data[1].shape - with tempfile.TemporaryDirectory() as tmp_dir_name: - ckpt_name = os.path.join(tmp_dir_name, "ckpt") - model.save(ckpt_name) - model_1 = VanillaLSTM() - model_1.restore(ckpt_name) - pred_1 = model_1.predict(self.test_data[0]) - assert np.allclose(pred, pred_1) - - def test_predict_with_uncertainty(self): - config = {"batch_size": 128} - self.model.fit_eval((self.train_data[0], self.train_data[1]), self.val_data, **config) - prediction, uncertainty = self.model.predict_with_uncertainty(self.test_data[0], n_iter=100) - assert prediction.shape == self.test_data[1].shape - assert uncertainty.shape == self.test_data[1].shape - assert np.any(uncertainty) - - -if __name__ == '__main__': - pytest.main([__file__]) diff --git a/python/chronos/test/bigdl/chronos/model/tf1/test_mtnet_keras.py b/python/chronos/test/bigdl/chronos/model/tf1/test_mtnet_keras.py deleted file mode 100644 index 5b1f1241edab..000000000000 --- a/python/chronos/test/bigdl/chronos/model/tf1/test_mtnet_keras.py +++ /dev/null @@ -1,116 +0,0 @@ -# -# Copyright 2016 The BigDL Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import shutil - -import pytest - -from unittest import TestCase -from bigdl.chronos.model.tf1.MTNet_keras import MTNetKeras -from bigdl.chronos.data import TSDataset -import pandas as pd -import numpy as np -import tensorflow as tf -from numpy.testing import assert_array_almost_equal - - -def create_data(): - lookback = 3 - horizon = 1 - def get_data(num_samples): - values = np.random.randn(num_samples) - df = pd.DataFrame({'timestep': pd.date_range(start='2010-01-01', - freq='m', - periods=num_samples), - 'value 1': values, - 'value 2': values, - 'value 3': values, - 'value 4': values}) - return df - tsdata_train = TSDataset.from_pandas(get_data(32), - target_col=['value 1', 'value 2', 'value 3', 'value 4'], - dt_col='timestep', - with_split=False) - tsdata_test = TSDataset.from_pandas(get_data(16), - target_col=['value 1', 'value 2', 'value 3', 'value 4'], - dt_col='timestep', - with_split=False) - for tsdata in [tsdata_train, tsdata_test]: - tsdata.roll(lookback=lookback, horizon=horizon) - return tsdata_train, tsdata_test - - -@pytest.mark.skipif(tf.__version__ > '2.0.0', reason="Run only when tf==1.15.0.") -class TestMTNetKeras(TestCase): - - def setup_method(self, method): - tf.keras.backend.clear_session() - train_data, test_data = create_data() - self.x_train, y_train = train_data.to_numpy() - self.y_train = y_train[:, :, 0] - self.x_val, y_val = test_data.to_numpy() - self.y_val = y_val[:, :, 0] - self.x_test, _ = test_data.to_numpy() - self.model = MTNetKeras() - self.config = {"long_num": 2, - "time_step": 1, - "ar_window": 1, # np.random.randint(1, 3), - "cnn_height": 1, # np.random.randint(1, 3), - "cnn_hid_size": 2, - "rnn_hid_sizes": [2, 2], - "epochs": 1} - - def teardown_method(self, method): - pass - - def test_fit_evaluate(self): - self.model.fit_eval(data=(self.x_train, self.y_train), - validation_data=(self.x_val, self.y_val), - **self.config) - self.model.evaluate(self.x_val, self.y_val) - - def test_save_restore(self): - import os - self.model.fit_eval(data=(self.x_train, self.y_train), - validation_data=(self.x_val, self.y_val), - **self.config) - y_pred = self.model.predict(self.x_test) - assert y_pred.shape == (self.x_test.shape[0], self.y_train.shape[1]) - dirname = "/tmp" - restored_model = MTNetKeras() - ckpt = os.path.join(dirname, "mtnet.ckpt") - self.model.save(checkpoint_file=ckpt) - restored_model.restore(checkpoint_file=ckpt) - predict_after = restored_model.predict(self.x_test) - assert_array_almost_equal(y_pred, predict_after, decimal=2), \ - "Prediction values are not the same after restore: " \ - "predict before is {}, and predict after is {}".format(y_pred, predict_after) - restored_model.fit_eval((self.x_train, self.y_train), epochs=1) - restored_model.evaluate(self.x_val, self.y_val) - os.remove(ckpt) - - def test_predict_with_uncertainty(self): - self.model.fit_eval(data=(self.x_train, self.y_train), - validation_data=(self.x_val, self.y_val), - mc=True, - **self.config) - pred, uncertainty = self.model.predict_with_uncertainty(self.x_test, n_iter=2) - assert pred.shape == (self.x_test.shape[0], self.y_train.shape[1]) - assert uncertainty.shape == pred.shape - # assert np.any(uncertainty) It may happen that all results are dropped out. - - -if __name__ == '__main__': - pytest.main([__file__])