Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Nano HPO] add learning rate and batch size tuning in tests for showcase #4631

Merged
merged 1 commit into from
May 18, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 30 additions & 7 deletions python/nano/test/automl/pytorch/test_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@
import bigdl.nano.automl.hpo.space as space

import torch
from _helper import BoringModel
from torch.utils.data import DataLoader, Dataset
from _helper import BoringModel, RandomDataset
import bigdl.nano.automl.hpo as hpo

class TestTrainer(TestCase):
Expand All @@ -31,12 +32,14 @@ def test_simple_model(self):

@hpo.plmodel()
class CustomModel(BoringModel):
"""Customized Model."""
def __init__(self,
out_dim1,
out_dim2,
dropout_1,
dropout_2):

dropout_2,
learning_rate=0.1,
batch_size=16):
super().__init__()
layers = []
input_dim = 32
Expand All @@ -46,16 +49,33 @@ def __init__(self,
layers.append(torch.nn.Tanh())
layers.append(torch.nn.Dropout(dropout))
input_dim = out_dim

layers.append(torch.nn.Linear(input_dim, 2))

self.layers: torch.nn.Module = torch.nn.Sequential(*layers)
self.save_hyperparameters()

def configure_optimizers(self):
# set learning rate in the optimizer
print("setting initial learning rate to",str(self.hparams.learning_rate))
self.optimizer = torch.optim.Adam(self.layers.parameters(),
lr=self.hparams.learning_rate)
return [self.optimizer], []

def train_dataloader(self):
print("setting initial learning rate to",str(self.hparams.learning_rate))
return DataLoader(RandomDataset(32, 64),
batch_size=self.hparams.batch_size)

def val_dataloader(self):
return DataLoader(RandomDataset(32, 64),
batch_size=self.hparams.batch_size)

model = CustomModel(
out_dim1=space.Categorical(16,32),
out_dim2=space.Categorical(16,32),
dropout_1=space.Categorical(0.1, 0.2, 0.3, 0.4, 0.5),
dropout_2 = space.Categorical(0.1,0.2)
dropout_2 = space.Categorical(0.1,0.2),
learning_rate = space.Real(0.001,0.01,log=True),
batch_size = space.Categorical(32,64)
)

trainer = Trainer(
Expand All @@ -77,10 +97,13 @@ def __init__(self,
assert(study.best_trial)
assert(best_model.summarize())
trainer.fit(best_model)
lr = best_model.optimizer.param_groups[0]['lr']
assert( lr <= 0.01 and lr >= 0.001)
batch_size = best_model.hparams.batch_size
assert(batch_size == 32 or batch_size == 64)
# score = trainer.callback_metrics["val_loss"].item()
# print("final val_loss is:", score)



if __name__ == '__main__':
pytest.main([__file__])