diff --git a/src/scripts/LossFunctions.py b/src/scripts/LossFunctions.py index dd29a9f..83c493f 100644 --- a/src/scripts/LossFunctions.py +++ b/src/scripts/LossFunctions.py @@ -26,6 +26,16 @@ def parse_args(): # path to save the model results parser.add_argument("--dir", default=DefaultsAnalysis["common"]["dir"]) # now args for model + parser.add_argument( + "--data_prescription", + "-dp", + default=DefaultsAnalysis["model"]["data_prescription"], + ) + parser.add_argument( + "--data_dimension", + "-dd", + default=DefaultsAnalysis["model"]["data_dimension"], + ) parser.add_argument( "--n_models", type=int, @@ -69,6 +79,13 @@ def parse_args(): default=DefaultsAnalysis["analysis"]["model_names_list"], help="Beginning of name for saved checkpoints and figures", ) + parser.add_argument( + "--inject_type_list", + type=list, + required=False, + default=DefaultsAnalysis["analysis"]["inject_type_list"], + help="Feature and predictive", + ) parser.add_argument( "--n_epochs", type=int, @@ -121,6 +138,8 @@ def parse_args(): "model": { "n_models": args.n_models, "n_epochs": args.n_epochs, + "data_prescription": args.data_prescription, + "data_dimension": args.data_dimension, "BETA": args.BETA, "COEFF": args.COEFF, "loss_type": args.loss_type, @@ -128,6 +147,7 @@ def parse_args(): "analysis": { "noise_level_list": args.noise_level_list, "model_names_list": args.model_names_list, + "inject_type_list": args.inject_type_list, "plot": args.plot, "savefig": args.savefig, "verbose": args.verbose, @@ -166,10 +186,13 @@ def beta_type(value): color_list = config.get_item("plots", "color_list", "Analysis") BETA = config.get_item("model", "BETA", "Analysis") COEFF = config.get_item("model", "COEFF", "Analysis") + n_models = config.get_item("model", "n_models", "Analysis") loss_type = config.get_item("model", "loss_type", "Analysis") - sigma_list = [] - for noise in noise_list: - sigma_list.append(DataPreparation.get_sigma(noise)) + prescription = config.get_item("model", "data_prescription", "Analysis") + inject_type_list = config.get_item( + "analysis", "inject_type_list", "Analysis" + ) + dim = config.get_item("model", "data_dimension", "Analysis") root_dir = config.get_item("common", "dir", "Analysis") path_to_chk = root_dir + "checkpoints/" path_to_out = root_dir + "analysis/" @@ -186,30 +209,34 @@ def beta_type(value): print("noise list", noise_list) chk_module = AggregateCheckpoints() mse_loss = { - model_name: {noise: [] for noise in noise_list} - for model_name in model_name_list + typei: {noise: [] for noise in noise_list} + for typei in inject_type_list } loss = { - model_name: {noise: [] for noise in noise_list} - for model_name in model_name_list + typei: {noise: [] for noise in noise_list} + for typei in inject_type_list } mse_loss_train = { - model_name: {noise: [] for noise in noise_list} - for model_name in model_name_list + typei: {noise: [] for noise in noise_list} + for typei in inject_type_list } loss_train = { - model_name: {noise: [] for noise in noise_list} - for model_name in model_name_list + typei: {noise: [] for noise in noise_list} + for typei in inject_type_list } n_epochs = config.get_item("model", "n_epochs", "Analysis") n_models = config.get_item("model", "n_models", "Analysis") - for model in model_name_list: + model = model_name_list[0] + for typei in inject_type_list: for noise in noise_list: # now run the analysis on the resulting checkpoints if model[0:3] == "DER": for epoch in range(n_epochs): chk = chk_module.load_checkpoint( model, + prescription, + typei, + dim, noise, epoch, DEVICE, @@ -219,10 +246,10 @@ def beta_type(value): ) # path=path_to_chk) # things to grab: 'valid_mse' and 'valid_bnll' - mse_loss[model][noise].append(chk["valid_mse"]) - loss[model][noise].append(chk["valid_loss"]) - mse_loss_train[model][noise].append(chk["train_mse"]) - loss_train[model][noise].append(chk["train_loss"]) + mse_loss[typei][noise].append(chk["valid_mse"]) + loss[typei][noise].append(chk["valid_loss"]) + mse_loss_train[typei][noise].append(chk["train_mse"]) + loss_train[typei][noise].append(chk["train_loss"]) elif model[0:2] == "DE": for nmodel in range(n_models): mse_loss_one_model = [] @@ -232,6 +259,9 @@ def beta_type(value): for epoch in range(n_epochs): chk = chk_module.load_checkpoint( model, + prescription, + typei, + dim, noise, epoch, DEVICE, @@ -244,86 +274,89 @@ def beta_type(value): train_mse_loss_one_model.append(chk["train_mse"]) train_loss_one_model.append(chk["train_loss"]) - mse_loss[model][noise].append(mse_loss_one_model) - loss[model][noise].append(loss_one_model) - mse_loss_train[model][noise].append( + mse_loss[typei][noise].append(mse_loss_one_model) + loss[typei][noise].append(loss_one_model) + mse_loss_train[typei][noise].append( train_mse_loss_one_model ) - loss_train[model][noise].append(train_loss_one_model) + loss_train[typei][noise].append(train_loss_one_model) # make a two-paneled plot for the different noise levels # make one panel per model # for the noise levels: plt.clf() fig = plt.figure(figsize=(12, 10)) # try this instead with a fill_between method - for i, model in enumerate(model_name_list): - ax = fig.add_subplot(2, len(model_name_list), i + 1) + for i, typei in enumerate(inject_type_list): + ax = fig.add_subplot(2, len(inject_type_list), i + 1) # Your plotting code for each model here - ax.set_title(model) # Set title for each subplot + ax.set_title(typei) # Set title for each subplot for i, noise in enumerate(noise_list): + sigma = DataPreparation.get_sigma( + noise, inject_type=typei, data_dimension=dim + ) if model[0:3] == "DER": ax.plot( range(n_epochs), - mse_loss_train[model][noise], + mse_loss_train[typei][noise], color=color_list[i], - label=r"Train; $\sigma = $" + str(sigma_list[i]), + label=r"Train; $\sigma = $" + str(sigma), ls="--", ) ax.plot( range(n_epochs), - mse_loss[model][noise], + mse_loss[typei][noise], color=color_list[i], - label=r"Validation; $\sigma = $" + str(sigma_list[i]), + label=r"Validation; $\sigma = $" + str(sigma), ) else: ax.plot( range(n_epochs), - mse_loss_train[model][noise][0], + mse_loss_train[typei][noise][0], color=color_list[i], ls="--", ) ax.plot( range(n_epochs), - mse_loss[model][noise][0], + mse_loss[typei][noise][0], color=color_list[i], ) ax.set_ylabel("MSE Loss") ax.set_xlabel("Epoch") - if model[0:3] == "DER": - ax.set_title("Deep Evidential Regression") - plt.legend() - elif model[0:2] == "DE": - ax.set_title("Deep Ensemble") - ax.set_ylim([0, 250]) + ax.set_title(typei) + plt.legend() + # ax.set_ylim([0, 250]) # now make the other loss plots - for i, model in enumerate(model_name_list): - ax = fig.add_subplot(2, len(model_name_list), i + 3) + for i, typei in enumerate(inject_type_list): + ax = fig.add_subplot(2, len(inject_type_list), i + 3) # Your plotting code for each model here for i, noise in enumerate(noise_list): + sigma = DataPreparation.get_sigma( + noise, inject_type=typei, data_dimension=dim + ) if model[0:3] == "DER": ax.plot( range(n_epochs), - loss_train[model][noise], + loss_train[typei][noise], color=color_list[i], - label=r"Train; $\sigma = $" + str(sigma_list[i]), + label=r"Train; $\sigma = $" + str(sigma), ls="--", ) ax.plot( range(n_epochs), - loss[model][noise], + loss[typei][noise], color=color_list[i], - label=r"Validation; $\sigma = $" + str(sigma_list[i]), + label=r"Validation; $\sigma = $" + str(sigma), ) else: ax.plot( range(n_epochs), - loss_train[model][noise][0], + loss_train[typei][noise][0], color=color_list[i], ls="--", ) ax.plot( range(n_epochs), - loss[model][noise][0], + loss[typei][noise][0], color=color_list[i], )