From d5c85097c9790abb5af754ce8201064e4cdd550a Mon Sep 17 00:00:00 2001 From: Shengsheng Huang Date: Wed, 18 May 2022 16:55:15 +0000 Subject: [PATCH] add learning rate and batch size tuning in tests for showcase --- .../nano/test/automl/pytorch/test_trainer.py | 37 +++++++++++++++---- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/python/nano/test/automl/pytorch/test_trainer.py b/python/nano/test/automl/pytorch/test_trainer.py index d5d870172a5..95eca55236b 100644 --- a/python/nano/test/automl/pytorch/test_trainer.py +++ b/python/nano/test/automl/pytorch/test_trainer.py @@ -22,7 +22,8 @@ import bigdl.nano.automl.hpo.space as space import torch -from _helper import BoringModel +from torch.utils.data import DataLoader, Dataset +from _helper import BoringModel, RandomDataset import bigdl.nano.automl.hpo as hpo class TestTrainer(TestCase): @@ -31,12 +32,14 @@ def test_simple_model(self): @hpo.plmodel() class CustomModel(BoringModel): + """Customized Model.""" def __init__(self, out_dim1, out_dim2, dropout_1, - dropout_2): - + dropout_2, + learning_rate=0.1, + batch_size=16): super().__init__() layers = [] input_dim = 32 @@ -46,16 +49,33 @@ def __init__(self, layers.append(torch.nn.Tanh()) layers.append(torch.nn.Dropout(dropout)) input_dim = out_dim - layers.append(torch.nn.Linear(input_dim, 2)) - self.layers: torch.nn.Module = torch.nn.Sequential(*layers) + self.save_hyperparameters() + + def configure_optimizers(self): + # set learning rate in the optimizer + print("setting initial learning rate to",str(self.hparams.learning_rate)) + self.optimizer = torch.optim.Adam(self.layers.parameters(), + lr=self.hparams.learning_rate) + return [self.optimizer], [] + + def train_dataloader(self): + print("setting initial learning rate to",str(self.hparams.learning_rate)) + return DataLoader(RandomDataset(32, 64), + batch_size=self.hparams.batch_size) + + def val_dataloader(self): + return DataLoader(RandomDataset(32, 64), + batch_size=self.hparams.batch_size) model = CustomModel( out_dim1=space.Categorical(16,32), out_dim2=space.Categorical(16,32), dropout_1=space.Categorical(0.1, 0.2, 0.3, 0.4, 0.5), - dropout_2 = space.Categorical(0.1,0.2) + dropout_2 = space.Categorical(0.1,0.2), + learning_rate = space.Real(0.001,0.01,log=True), + batch_size = space.Categorical(32,64) ) trainer = Trainer( @@ -77,10 +97,13 @@ def __init__(self, assert(study.best_trial) assert(best_model.summarize()) trainer.fit(best_model) + lr = best_model.optimizer.param_groups[0]['lr'] + assert( lr <= 0.01 and lr >= 0.001) + batch_size = best_model.hparams.batch_size + assert(batch_size == 32 or batch_size == 64) # score = trainer.callback_metrics["val_loss"].item() # print("final val_loss is:", score) - if __name__ == '__main__': pytest.main([__file__]) \ No newline at end of file