Skip to content

Commit

Permalink
running for DE but not DER
Browse files Browse the repository at this point in the history
  • Loading branch information
beckynevin committed Jul 17, 2024
1 parent 126803f commit 1aa58d9
Show file tree
Hide file tree
Showing 4 changed files with 93 additions and 69 deletions.
25 changes: 1 addition & 24 deletions src/scripts/DeepEnsemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,13 +407,7 @@ def beta_type(value):
config.get_item("model", "save_final_checkpoint", "DE"),
)
print("model name is ", model_name)
"""
EPOCHS=100,
path_to_model="models/",
data_prescription="linear_homoskedastic",
inject_type="predictive",
noise_level="low",
save_all_checkpoints=False,"""
print("dim is ", dim)
model_ensemble = train.train_DE(
trainDataLoader,
x_val,
Expand Down Expand Up @@ -446,20 +440,3 @@ def beta_type(value):
n_hidden=config.get_item("model", "n_hidden", "DE"),
verbose=config.get_item("model", "verbose", "DE"),
)
"""
if config.get_item("analysis", "run_analysis", "DE"):
# now run the analysis on the resulting checkpoints
chk_module = AggregateCheckpoints()
print('n_models', config.get_item("model", "n_models", "DE"))
print('n_epochs', config.get_item("model", "n_epochs", "DE"))
for nmodel in range(config.get_item("model", "n_models", "DE")):
for epoch in range(config.get_item("model", "n_epochs", "DE")):
chk = chk_module.load_DE_checkpoint(
model_name,
nmodel,
epoch,
config.get_item("model", "BETA", "DE"),
DEVICE)
# things to grab: 'valid_mse' and 'valid_bnll'
print(chk)
"""
128 changes: 87 additions & 41 deletions src/scripts/ExperimentalExpectedSigmaTest.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,36 +232,50 @@ def beta_type(value):
#for i, noise in enumerate(noise_list):
for typei in inject_type_list:
# now create a test set
size_df = 1000
data = DataPreparation()
data.sample_params_from_prior(1000)
data.simulate_data(
data.params,
sigma_inject,
"linear_homoskedastic",
inject_type=typei,
seed=41,
)
df_array = data.get_dict()
# Convert non-tensor entries to tensors
df = {}
for key, value in df_array.items():
if dim == "0D":
data.sample_params_from_prior(size_df)
data.simulate_data(
data.params,
sigma_inject,
"linear_homoskedastic",
inject_type=typei,
seed=41,
)
df_array = data.get_dict()
# Convert non-tensor entries to tensors
df = {}
for key, value in df_array.items():

if isinstance(value, TensorDataset):
# Keep tensors as they are
df[key] = value
else:
# Convert lists to tensors
df[key] = torch.tensor(value)
len_df = len(df["params"][:, 0].numpy())
len_x = np.shape(df["output"])[1]
ms_array = np.repeat(df["params"][:, 0].numpy(), len_x)
bs_array = np.repeat(df["params"][:, 1].numpy(), len_x)
xs_array = np.reshape(df["inputs"].numpy(), (len_df * len_x))
ys_array = np.reshape(df["output"].numpy(), (len_df * len_x))
if isinstance(value, TensorDataset):
# Keep tensors as they are
df[key] = value
else:
# Convert lists to tensors
df[key] = torch.tensor(value)
len_df = len(df["params"][:, 0].numpy())
len_x = np.shape(df["output"])[1]
ms_array = np.repeat(df["params"][:, 0].numpy(), len_x)
bs_array = np.repeat(df["params"][:, 1].numpy(), len_x)
xs_array = np.reshape(df["inputs"].numpy(), (len_df * len_x))
ys_array = np.reshape(df["output"].numpy(), (len_df * len_x))

inputs = np.array([xs_array, ms_array, bs_array]).T
inputs = np.array([xs_array, ms_array, bs_array]).T
elif dim == "2D":
data.sample_params_from_prior(
size_df,
low=[1, 1, -1.5],
high=[10, 10, 1.5],
n_params=3,
seed=41)
model_inputs, model_outputs = data.simulate_data_2d(
size_df,
data.params,
image_size=32,
inject_type=typei)
model_inputs, model_outputs = DataPreparation.normalize(
inputs, ys_array, False
model_inputs, model_outputs, False
)
_, x_test, _, y_test = DataPreparation.train_val_split(
model_inputs, model_outputs, val_proportion=0.1,
Expand Down Expand Up @@ -301,30 +315,62 @@ def beta_type(value):
# checkpoint['model_state_dict'])
model.eval()
# now run on the x_test
y_pred = model(torch.Tensor(x_test))
print(y_pred)
y_pred = model(torch.Tensor(x_test)).detach().numpy()
if model_type == "DER":
assert model_type == "DER", "stop haven't written this yet"
elif model_type == "DE":
sigma = np.sqrt(y_pred[:, 1])


print(x_test)
plt.clf()
plt.scatter(y_test, y_pred[:, 0])
plt.errorbar(y_test, y_pred[:, 0], yerr=sigma, fmt='o', linestyle='None')
plt.xlabel('true')
plt.ylabel('predicted')
plt.show()

plt.clf()
plt.hist(sigma, alpha=0.5)
plt.axvline(x=np.mean(sigma))
plt.title(str(round(np.mean(sigma), 2)))
plt.xlabel('output sigma')
plt.show()

STOP

print(np.shape(x_test))
for i in range(10):
plt.clf()
plt.imshow(x_test[i,:,:])
plt.title(f'y_true = {y_test[i]}, y_pred = {y_pred[i, 0]} +/- {sigma[i]}')
plt.show()



print(x_test[:, 1])
print('mean of predicted sigmas', np.mean(sigma))
if typei == "predictive":
y_noisy = y_test
y_noiseless = x_test[:, 1] * x_test[:, 0] + x_test[:, 2]
sub = y_noisy - y_noiseless
label = r"$y_{noisy} - y_{noiseless}$"
elif typei == "feature":
y_noisy = x_test[:, 1] * x_test[:, 0] + x_test[:, 2]
y_noiseless = y_test
sub = y_noisy - y_noiseless # / x_test[:, 1]
label = r"$(y_{noisy} - y_{noiseless})$" # / m$'
# finally, analytically propagate
if dim == "0D":
print('mean of ms', np.mean(x_test[:, 1]))
true_analytic = sigma_inject * np.mean(x_test[:, 1])
if dim == "0D":
if typei == "predictive":
y_noisy = y_test
y_noiseless = x_test[:, 1] * x_test[:, 0] + x_test[:, 2]
sub = y_noisy - y_noiseless
label = r"$y_{noisy} - y_{noiseless}$"
elif typei == "feature":
y_noisy = x_test[:, 1] * x_test[:, 0] + x_test[:, 2]
y_noiseless = y_test
sub = y_noisy - y_noiseless # / x_test[:, 1]
label = r"$(y_{noisy} - y_{noiseless})$" # / m$'
# finally, analytically propagate
if dim == "0D":
print('mean of ms', np.mean(x_test[:, 1]))
true_analytic = sigma_inject * np.mean(x_test[:, 1])
elif dim == "2D":
if typei == "predictive":
assert "haven't done this yet"
elif typei == "feature":
assert "haven't done this yet"

plt.clf()
if noise == "high":
Expand Down
1 change: 1 addition & 0 deletions src/train/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -825,6 +825,7 @@ def train_DE(
},
filename,
)
print('saved final checkpoint', filename)
model_ensemble.append(model)
final_mse.append(mse)

Expand Down
8 changes: 4 additions & 4 deletions src/utils/defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,16 +105,16 @@
"model_engine": "DE",
"model_type": "DE",
"data_prescription": "linear_homoskedastic",
"data_dimension": "0D",
"data_dimension": "2D",
"n_models": 1,
"n_epochs": 100,
"BETA": 0.5,
"COEFF": 0.01,
"loss_type": "bnll_loss"
"loss_type": "DER"
},
"analysis": {
"noise_level": "low",
"model_names_list": ["DE"],
"noise_level": "high",
"model_names_list": ["DER"],
"inject_type_list": ["feature"],
# ["DER_wst", "DE_desiderata_2"],
# for architecture: ["DER"], #, "DE_desiderata_2"],
Expand Down

0 comments on commit 1aa58d9

Please sign in to comment.