Skip to content

Commit

Permalink
format
Browse files Browse the repository at this point in the history
  • Loading branch information
odunbar committed Nov 11, 2022
1 parent b54c6e7 commit 1ddc959
Show file tree
Hide file tree
Showing 6 changed files with 99 additions and 91 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ if plot_flag
end

# setup random features
n_features = 2*n
n_features = 2 * n


vrfi = VectorRandomFeatureInterface(n_features, p, d)
Expand Down
16 changes: 8 additions & 8 deletions src/Emulator.jl
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,13 @@ function Emulator(
end

# [3.] Decorrelating the outputs, not performed for vector RF
if !isa(machine_learning_tool,VectorRandomFeatureInterface)
if !isa(machine_learning_tool, VectorRandomFeatureInterface)

#Transform data if obs_noise_cov available
# (if obs_noise_cov==nothing, transformed_data is equal to data)
decorrelated_training_outputs, decomposition =
svd_transform(training_outputs, obs_noise_cov, retained_svd_frac = retained_svd_frac)

# write new pairs structure
if retained_svd_frac < 1.0
#note this changes the dimension of the outputs
Expand All @@ -145,7 +145,7 @@ function Emulator(
# regularization = 1 / size(obs_noise_cov,1) * tr(obs_noise_cov)
training_pairs = PairedDataContainer(training_inputs, training_outputs)
# [4.] build an emulator
build_models!(machine_learning_tool, training_pairs, obs_noise_cov)
build_models!(machine_learning_tool, training_pairs, obs_noise_cov)

end

Expand Down Expand Up @@ -199,7 +199,7 @@ function predict(
ds_outputs, ds_output_var = predict(emulator.machine_learning_tool, normalized_new_inputs)

# [3.] transform back to real coordinates or remain in decorrelated coordinates
if !isa(get_machine_learning_tool(emulator),VectorRandomFeatureInterface)
if !isa(get_machine_learning_tool(emulator), VectorRandomFeatureInterface)
if transform_to_real && emulator.decomposition === nothing
throw(ArgumentError("""Need SVD decomposition to transform back to original space,
but GaussianProcess.decomposition == nothing.
Expand All @@ -223,19 +223,19 @@ function predict(
return ds_outputs, ds_output_diagvar
end
else
ds_output_covvec = vec([ds_output_var[:,:, j] for j in 1:N_samples])
ds_output_covvec = vec([ds_output_var[:, :, j] for j in 1:N_samples])
if output_dim == 1
ds_output_covvec = [ds_output_covvec[i][1,1] for i in 1:N_samples]
ds_output_covvec = [ds_output_covvec[i][1, 1] for i in 1:N_samples]
end

if vector_rf_unstandardize
# [4.] unstandardize
return reverse_standardize(emulator,ds_outputs, ds_output_covvec)
return reverse_standardize(emulator, ds_outputs, ds_output_covvec)
else
return ds_outputs, ds_output_covvec
end
end

end

# Normalization, Standardization, and Decorrelation
Expand Down
7 changes: 4 additions & 3 deletions src/MarkovChainMonteCarlo.jl
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,10 @@ function EmulatorPosteriorModel(
# Vector of N_samples covariance matrices. For MH, N_samples is always 1, so we
# have to reshape()/re-cast input/output; simpler to do here than add a
# predict() method.
g, g_cov = Emulators.predict(em, reshape(θ, :, 1), transform_to_real = false, vector_rf_unstandardize = false)
#TODO vector_rf will always unstandardize, but other methods will not, so we require this additional flag.

g, g_cov =
Emulators.predict(em, reshape(θ, :, 1), transform_to_real = false, vector_rf_unstandardize = false)
#TODO vector_rf will always unstandardize, but other methods will not, so we require this additional flag.

if isa(g_cov[1], Real)
return logpdf(MvNormal(obs_sample, g_cov[1] * I), vec(g)) + get_logpdf(prior, θ)
else
Expand Down
1 change: 0 additions & 1 deletion src/RandomFeature.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,3 @@ using Random

include("ScalarRandomFeature.jl")
include("VectorRandomFeature.jl")

31 changes: 15 additions & 16 deletions src/ScalarRandomFeature.jl
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ function calculate_mean_cov_and_coeffs(
batch_inputs = RF.Utilities.batch_generator(itest, test_batch_size, dims = 2) # input_dim x batch_size

#we want to calc 1/var(y-mean)^2 + lambda/m * coeffs^2 in the end
pred_mean, pred_cov = RF.Methods.predict(rfm, fitted_features, DataContainer(itest))
pred_mean, pred_cov = RF.Methods.predict(rfm, fitted_features, DataContainer(itest))
scaled_coeffs = sqrt(1 / n_features) * RF.Methods.get_coeffs(fitted_features)
return pred_mean, pred_cov, scaled_coeffs

Expand All @@ -135,8 +135,8 @@ function estimate_mean_cov_and_coeffnorm_covariance(
for i in 1:n_samples
for j in 1:repeats
m, v, c = calculate_mean_cov_and_coeffs(srfi, rng, l, noise_sd, n_features, batch_sizes, io_pairs)
means[:, i] += m[1,:] / repeats
covs[:, i] += v[1,1,:] / repeats
means[:, i] += m[1, :] / repeats
covs[:, i] += v[1, 1, :] / repeats
coeffl2norm[1, i] += sqrt(sum(c .^ 2)) / repeats
end
end
Expand Down Expand Up @@ -174,8 +174,8 @@ function calculate_ensemble_mean_cov_and_coeffnorm(
for j in collect(1:repeats)
l = lmat[:, i]
m, v, c = calculate_mean_cov_and_coeffs(srfi, rng, l, noise_sd, n_features, batch_sizes, io_pairs)
means[:, i] += m[1,:] / repeats
covs[:, i] += v[1,1,:] / repeats
means[:, i] += m[1, :] / repeats
covs[:, i] += v[1, 1, :] / repeats
coeffl2norm[1, i] += sqrt(sum(c .^ 2)) / repeats
end
end
Expand Down Expand Up @@ -251,8 +251,15 @@ function build_models!(

#get parameters:
lvec = transform_unconstrained_to_constrained(hyper_prior, get_u_final(ekiobj))
g_ens, _ =
calculate_ensemble_mean_cov_and_coeffnorm(srfi, rng, lvec, 1.0, n_features_opt, batch_sizes, io_pairs_opt)
g_ens, _ = calculate_ensemble_mean_cov_and_coeffnorm(
srfi,
rng,
lvec,
1.0,
n_features_opt,
batch_sizes,
io_pairs_opt,
)
EKP.update_ensemble!(ekiobj, g_ens)
err[i] = get_error(ekiobj)[end] #mean((params_true - mean(params_i,dims=2)).^2)

Expand All @@ -265,15 +272,7 @@ function build_models!(
io_pairs_i = PairedDataContainer(input_values, reshape(output_values[i, :], 1, size(output_values, 2)))
n_features = get_n_features(srfi)
# Now, fit new RF model with the optimized hyperparameters
rfm_i = RFM_from_hyperparameters(
srfi,
rng,
hp_optimal,
1.0,
n_features,
batch_sizes,
input_dim,
)
rfm_i = RFM_from_hyperparameters(srfi, rng, hp_optimal, 1.0, n_features, batch_sizes, input_dim)
fitted_features_i = RF.Methods.fit(rfm_i, io_pairs_i, decomposition_type = "svd") #fit features

push!(rfms, rfm_i)
Expand Down
Loading

0 comments on commit 1ddc959

Please sign in to comment.