Skip to content

Commit

Permalink
Try #208:
Browse files Browse the repository at this point in the history
  • Loading branch information
bors[bot] authored Apr 5, 2023
2 parents 1d46a06 + 2deb7ba commit 880e870
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 18 deletions.
2 changes: 1 addition & 1 deletion .buildkite/pipeline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ steps:
command:
- "echo $$JULIA_DEPOT_PATH"
- "julia --project -e 'using Pkg; Pkg.instantiate(;verbose=true)'"
- "julia --project -e 'using Conda; Conda.add(\"scipy=1.8.1\")'"
- "julia --project -e 'using Conda; Conda.add(\"scipy=1.8.1\", channel=\"conda-forge\")'"
- "julia --project -e 'using Conda; Conda.add(\"scikit-learn=1.1.1\")'"
- "julia --project -e 'using Conda; Conda.add(\"matplotlib\")'"
env:
Expand Down
32 changes: 17 additions & 15 deletions test/GaussianProcess/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,12 @@ using Test
using GaussianProcesses
using Statistics
using Distributions
using ScikitLearn: @sk_import
using LinearAlgebra
@sk_import gaussian_process:GaussianProcessRegressor
@sk_import (gaussian_process.kernels):(RBF, WhiteKernel, ConstantKernel)
using PyCall
using ScikitLearn
const pykernels = PyNULL()
copy!(pykernels, pyimport_conda("sklearn.gaussian_process.kernels", "scikit-learn=1.1.1"))


using CalibrateEmulateSample.Emulators
using CalibrateEmulateSample.DataContainers
Expand Down Expand Up @@ -102,8 +104,8 @@ using CalibrateEmulateSample.DataContainers

gppackage = SKLJL()
pred_type = YType()
var = ConstantKernel(constant_value = 1.0)
se = RBF(1.0)
var = pykernels.ConstantKernel(constant_value = 1.0)
se = pykernels.RBF(1.0)
GPkernel = var * se


Expand Down Expand Up @@ -196,24 +198,24 @@ using CalibrateEmulateSample.DataContainers
new_inputs[:, 4] = [3 * π / 2, 2 * π]

μ4_noise_learnt, σ4²_noise_learnt = Emulators.predict(em4_noise_learnt, new_inputs, transform_to_real = true)
tol_mu = 0.25

@test μ4_noise_learnt[:, 1] [1.0, -1.0] atol = 0.2
@test μ4_noise_learnt[:, 2] [0.0, 2.0] atol = 0.2
@test μ4_noise_learnt[:, 3] [0.0, 0.0] atol = 0.2
@test μ4_noise_learnt[:, 4] [0.0, -2.0] atol = 0.2
@test μ4_noise_learnt[:, 1] [1.0, -1.0] atol = tol_mu
@test μ4_noise_learnt[:, 2] [0.0, 2.0] atol = tol_mu
@test μ4_noise_learnt[:, 3] [0.0, 0.0] atol = tol_mu
@test μ4_noise_learnt[:, 4] [0.0, -2.0] atol = tol_mu
@test length(σ4²_noise_learnt) == size(new_inputs, 2)
@test size(σ4²_noise_learnt[1]) == (d, d)

μ4_noise_from_Σ, σ4²_noise_from_Σ = Emulators.predict(em4_noise_from_Σ, new_inputs, transform_to_real = true)

@test μ4_noise_from_Σ[:, 1] [1.0, -1.0] atol = 0.2
@test μ4_noise_from_Σ[:, 2] [0.0, 2.0] atol = 0.2
@test μ4_noise_from_Σ[:, 3] [0.0, 0.0] atol = 0.2
@test μ4_noise_from_Σ[:, 4] [0.0, -2.0] atol = 0.2
@test μ4_noise_from_Σ[:, 1] [1.0, -1.0] atol = tol_mu
@test μ4_noise_from_Σ[:, 2] [0.0, 2.0] atol = tol_mu
@test μ4_noise_from_Σ[:, 3] [0.0, 0.0] atol = tol_mu
@test μ4_noise_from_Σ[:, 4] [0.0, -2.0] atol = tol_mu

# check match between the means and variances (should be similar at least
@test all(isapprox.(μ4_noise_from_Σ, μ4_noise_learnt, rtol = 0.05))
@test all(isapprox.(σ4²_noise_from_Σ, σ4²_noise_learnt, rtol = 0.05))
@test all(isapprox.(σ4²_noise_from_Σ, σ4²_noise_learnt, rtol = 2 * tol_mu))


end
5 changes: 3 additions & 2 deletions test/MarkovChainMonteCarlo/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ using GaussianProcesses
using Test

using CalibrateEmulateSample.MarkovChainMonteCarlo
const MCMC = MarkovChainMonteCarlo
using CalibrateEmulateSample.ParameterDistributions
using CalibrateEmulateSample.Emulators
using CalibrateEmulateSample.DataContainers
Expand Down Expand Up @@ -92,8 +93,8 @@ function mcmc_test_template(
# First let's run a short chain to determine a good step size
new_step = optimize_stepsize(mcmc; init_stepsize = step, N = 5000)

# Now begin the actual MCMC
chain = sample(rng, mcmc, 100_000; stepsize = new_step, discard_initial = 1000)
# Now begin the actual MCMC, sample is multiply exported so we qualify
chain = MCMC.sample(rng, mcmc, 100_000; stepsize = new_step, discard_initial = 1000)
posterior_distribution = get_posterior(mcmc, chain)
#post_mean = mean(posterior, dims=1)[1]
posterior_mean = mean(posterior_distribution)
Expand Down

0 comments on commit 880e870

Please sign in to comment.