diff --git a/federatedscope/contrib/trainer/local_entropy.py b/federatedscope/contrib/trainer/local_entropy.py index eec78d438..8eaa911c0 100644 --- a/federatedscope/contrib/trainer/local_entropy.py +++ b/federatedscope/contrib/trainer/local_entropy.py @@ -83,7 +83,7 @@ def run_epoch(self, optimizer, criterion, current_global_model, mu): outputs = self.model(inputs) ce_loss = criterion(outputs, targets) loss = ce_loss + self._thermal * prox_term(self.model.state_dict(), - current_global_model) + current_global_model) loss.backward() optimizer.step() diff --git a/federatedscope/core/aggregators/fedopt_aggregator.py b/federatedscope/core/aggregators/fedopt_aggregator.py index ae09d87be..29a9b3e99 100644 --- a/federatedscope/core/aggregators/fedopt_aggregator.py +++ b/federatedscope/core/aggregators/fedopt_aggregator.py @@ -16,7 +16,9 @@ def __init__(self, config, model, device='cpu'): if config.fedopt.annealing: self._annealing = True # TODO: generic scheduler construction - self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=4000, gamma=0.2) + self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, + step_size=4000, + gamma=0.2) else: self._annealing = False