Skip to content

Commit

Permalink
n_hidden now a thing that must be passed
Browse files Browse the repository at this point in the history
  • Loading branch information
beckynevin committed May 20, 2024
1 parent b774803 commit 32434b5
Showing 1 changed file with 6 additions and 51 deletions.
57 changes: 6 additions & 51 deletions src/models/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,9 @@ def forward(self, x):
return torch.stack((gamma, nu, alpha, beta), dim=1)


def model_setup_DER(loss_type, DEVICE):
print('loss type', loss_type, type(loss_type))
def model_setup_DER(loss_type,
DEVICE,
n_hidden):
# initialize the model from scratch
if loss_type == "SDER":
Layer = SDERLayer
Expand All @@ -81,7 +82,8 @@ def model_setup_DER(loss_type, DEVICE):

# from https://github.com/pasteurlabs/unreasonable_effective_der
# /blob/main/x3_indepth.ipynb
model = torch.nn.Sequential(Model(4), Layer())
model = torch.nn.Sequential(Model(4, n_hidden),
Layer())
model = model.to(DEVICE)
return model, lossFn

Expand Down Expand Up @@ -118,60 +120,13 @@ def model_setup_DE(loss_type, DEVICE):
return model, lossFn


class de_no_var(nn.Module):
def __init__(self):
super().__init__()
drop_percent = 0.1
self.ln_1 = nn.Linear(3, 100)
self.act1 = nn.ReLU()
self.drop1 = nn.Dropout(drop_percent)
self.ln_2 = nn.Linear(100, 100)
self.act2 = nn.ReLU()
self.drop2 = nn.Dropout(drop_percent)
self.ln_3 = nn.Linear(100, 100)
self.act3 = nn.ReLU()
self.drop3 = nn.Dropout(drop_percent)
self.ln_4 = nn.Linear(100, 1)
# this last dim needs to be 2 if using the GaussianNLLoss

def forward(self, x):
x = self.drop1(self.act1(self.ln_1(x)))
x = self.drop2(self.act2(self.ln_2(x)))
x = self.drop3(self.act3(self.ln_3(x)))
x = self.ln_4(x)
return x


class de_var(nn.Module):
def __init__(self):
super().__init__()
drop_percent = 0.1
self.ln_1 = nn.Linear(3, 100)
self.act1 = nn.ReLU()
self.drop1 = nn.Dropout(drop_percent)
self.ln_2 = nn.Linear(100, 100)
self.act2 = nn.ReLU()
self.drop2 = nn.Dropout(drop_percent)
self.ln_3 = nn.Linear(100, 100)
self.act3 = nn.ReLU()
self.drop3 = nn.Dropout(drop_percent)
self.ln_4 = nn.Linear(100, 2)
# this last dim needs to be 2 if using the GaussianNLLoss

def forward(self, x):
x = self.drop1(self.act1(self.ln_1(x)))
x = self.drop2(self.act2(self.ln_2(x)))
x = self.drop3(self.act3(self.ln_3(x)))
x = self.ln_4(x)
return x


# This following is from PasteurLabs -
# https://github.com/pasteurlabs/unreasonable_effective_der/blob/main/models.py


class Model(nn.Module):
def __init__(self, n_output, n_hidden=64):
def __init__(self, n_output, n_hidden):
super().__init__()
self.model = nn.Sequential(
nn.Linear(3, n_hidden),
Expand Down

0 comments on commit 32434b5

Please sign in to comment.