Skip to content

Commit

Permalink
Merge pull request Lightning-Universe#1 from ganprad/warnings_fix
Browse files Browse the repository at this point in the history
Fixed warnings:

1) Fixed this warning in SklearnDataModule:
```DeprecationWarning: Converting `np.integer` or `np.signedinteger` to a dtype is deprecated. The current result is `np.dtype(np.int_)` which is not strictly correct. Note that the result depends on the system. To ensure stable results use may want to use `np.int64` or `np.int32````

2) Fixed this warning in LinearRegression in linear_regression.py
```UserWarning: The {log:dict keyword} was deprecated in 0.9.1 and will be removed in 1.0.0
Please use self.log(...) inside the lightningModule instead.
# log on a step or aggregate epoch metric to the logger and/or progress bar
# (inside LightningModule)
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True)
  warnings.warn(*args, **kwargs)```
  • Loading branch information
ganprad authored Dec 13, 2020
2 parents 13863cc + c8aeaa1 commit 7c5aaf0
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 23 deletions.
2 changes: 1 addition & 1 deletion pl_bolts/datamodules/sklearn_datamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __getitem__(self, idx):
y = self.Y[idx]

# Do not convert integer to float for classification data
if not y.dtype == np.integer:
if not ((y.dtype == np.int32) or (y.dtype == np.int64)):
y = y.astype(np.float32)

if self.X_transform:
Expand Down
28 changes: 6 additions & 22 deletions pl_bolts/models/regression/linear_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,8 @@ def training_step(self, batch, batch_idx):
loss += self.hparams.l2_strength * l2_reg

loss /= x.size(0)

tensorboard_logs = {'train_mse_loss': loss}
progress_bar_metrics = tensorboard_logs
return {
'loss': loss,
'log': tensorboard_logs,
'progress_bar': progress_bar_metrics
}
self.log('train_mse_loss', loss, on_epoch=True, prog_bar=True)
return loss

def validation_step(self, batch, batch_idx):
x, y = batch
Expand All @@ -81,13 +75,8 @@ def validation_step(self, batch, batch_idx):

def validation_epoch_end(self, outputs):
val_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_mse_loss': val_loss}
progress_bar_metrics = tensorboard_logs
return {
'val_loss': val_loss,
'log': tensorboard_logs,
'progress_bar': progress_bar_metrics
}
self.log('val_mse_loss', val_loss, on_epoch=True, prog_bar=True)
return val_loss

def test_step(self, batch, batch_idx):
x, y = batch
Expand All @@ -96,13 +85,8 @@ def test_step(self, batch, batch_idx):

def test_epoch_end(self, outputs):
test_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_mse_loss': test_loss}
progress_bar_metrics = tensorboard_logs
return {
'test_loss': test_loss,
'log': tensorboard_logs,
'progress_bar': progress_bar_metrics
}
self.log('test_mse_loss', test_loss, on_epoch=True, prog_bar=True)
return test_loss

def configure_optimizers(self):
return self.optimizer(self.parameters(), lr=self.hparams.learning_rate)
Expand Down

0 comments on commit 7c5aaf0

Please sign in to comment.