diff --git a/.appveyor.yml b/.appveyor.yml index 5092e056f..bf86b8aff 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -30,6 +30,8 @@ notifications: install: - ps: iex ((new-object net.webclient).DownloadString("https://raw.githubusercontent.com/JuliaCI/Appveyor.jl/version-1/bin/install.ps1")) + - pip install --user -U numpy scipy scikit-learn + - C:\julia\bin\julia -e 'using Pkg; Pkg.build("PyCall"); Pkg.update()' build_script: - echo "%JL_BUILD_SCRIPT%" diff --git a/Manifest.toml b/Manifest.toml index fbe65c190..6ad805ef0 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -48,6 +48,12 @@ git-tree-sha1 = "bc779df8d73be70e4e05a63727d3a4dfb4c52b1f" uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" version = "0.1.5" +[[AxisAlgorithms]] +deps = ["LinearAlgebra", "Random", "SparseArrays", "WoodburyMatrices"] +git-tree-sha1 = "a4d07a1c313392a77042855df46c5f534076fab9" +uuid = "13072b0f-2c55-5437-9ae7-d433b7a33950" +version = "1.0.0" + [[BandedMatrices]] deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra", "Random", "SparseArrays"] git-tree-sha1 = "03b21fb83f9e0b13f99c407810cd3b6cac293a1d" @@ -164,6 +170,12 @@ git-tree-sha1 = "c7559d68defb3708c0e46df1158e21be86e04312" uuid = "88353bc9-fd38-507d-a820-d3b43837d6b9" version = "0.1.1" +[[Contour]] +deps = ["LinearAlgebra", "StaticArrays", "Test"] +git-tree-sha1 = "b974e164358fea753ef853ce7bad97afec15bb80" +uuid = "d38c429a-6771-53c6-b99e-75d170b6e991" +version = "0.5.1" + [[DataAPI]] git-tree-sha1 = "674b67f344687a88310213ddfa8a2b3c76cc4252" uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a" @@ -186,6 +198,12 @@ git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6" uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464" version = "1.0.0" +[[DataValues]] +deps = ["DataValueInterfaces", "Dates"] +git-tree-sha1 = "d88a19299eba280a6d062e135a43f00323ae70bf" +uuid = "e7dc6d0d-1eca-5fa6-8ad6-5aecde8b7ea5" +version = "0.4.13" + [[Dates]] deps = ["Printf"] uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" @@ -296,9 +314,9 @@ version = "0.8.1" [[Documenter]] deps = ["Base64", "Dates", "DocStringExtensions", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] -git-tree-sha1 = "51f0c7df46abb9c07d80e529718951e634670afb" +git-tree-sha1 = "d497bcc45bb98a1fbe19445a774cfafeabc6c6df" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "0.24.4" +version = "0.24.5" [[ElasticArrays]] git-tree-sha1 = "5b5b7cb8cba44bcf337b8af0a1f3e57c89468660" @@ -333,6 +351,12 @@ git-tree-sha1 = "0fa3b52a04a4e210aeb1626def9c90df3ae65268" uuid = "8f5d6c58-4d21-5cfd-889c-e3ad7ee6a615" version = "1.1.0" +[[FFMPEG]] +deps = ["BinaryProvider", "Libdl"] +git-tree-sha1 = "9143266ba77d3313a4cf61d8333a1970e8c5d8b6" +uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" +version = "0.2.4" + [[FFTW]] deps = ["AbstractFFTs", "FFTW_jll", "IntelOpenMP_jll", "Libdl", "LinearAlgebra", "MKL_jll", "Reexport"] git-tree-sha1 = "109d82fa4b00429f9afcce873e9f746f11f018d3" @@ -396,6 +420,12 @@ version = "1.0.0" deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" +[[GR]] +deps = ["Base64", "DelimitedFiles", "LinearAlgebra", "Printf", "Random", "Serialization", "Sockets", "Test"] +git-tree-sha1 = "10633436bc2fc836347bda5073b7b6f06dcdc5e6" +uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71" +version = "0.46.0" + [[GaussianMixtures]] deps = ["Clustering", "Compat", "DelimitedFiles", "Distributed", "Distributions", "FileIO", "JLD", "LinearAlgebra", "Logging", "PDMats", "Printf", "Random", "ScikitLearnBase", "SpecialFunctions", "Statistics", "StatsBase"] git-tree-sha1 = "9b892cfc95da04ca9eff2387bc66f6baddeed606" @@ -414,6 +444,12 @@ git-tree-sha1 = "c59a30ef95fa4b5e0567c1911652e0c70a5d055c" uuid = "01680d73-4ee2-5a08-a1aa-533608c188bb" version = "0.2.2" +[[GeometryTypes]] +deps = ["ColorTypes", "FixedPointNumbers", "LinearAlgebra", "StaticArrays"] +git-tree-sha1 = "a96baa00f5ac755689c82c29bc3395e161337c69" +uuid = "4d00f742-c7ba-57c2-abde-4428a4b178cb" +version = "0.7.7" + [[HDF5]] deps = ["BinaryProvider", "Blosc", "CMakeWrapper", "Libdl", "Mmap"] git-tree-sha1 = "d3ea5532668bf9bdd5e8d5f16571e0b520cbbb9f" @@ -442,6 +478,12 @@ version = "2018.0.3+0" deps = ["Markdown"] uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +[[Interpolations]] +deps = ["AxisAlgorithms", "LinearAlgebra", "OffsetArrays", "Random", "Ratios", "SharedArrays", "SparseArrays", "StaticArrays", "WoodburyMatrices"] +git-tree-sha1 = "c7579b2617b513d8e6eb9f4b91837eefe7080af8" +uuid = "a98d9a8b-a2ab-59e6-89dd-64a1c18fca59" +version = "0.12.8" + [[InvertedIndices]] deps = ["Test"] git-tree-sha1 = "15732c475062348b0165684ffe28e85ea8396afc" @@ -465,10 +507,16 @@ uuid = "82899510-4779-5014-852e-03e436cf321d" version = "1.0.0" [[JLD]] -deps = ["Compat", "FileIO", "HDF5", "LegacyStrings", "Profile", "Random"] -git-tree-sha1 = "95fd5d7f129918a75d0535aaaf5b8e235e6e0b0b" +deps = ["Compat", "FileIO", "HDF5", "LegacyStrings"] +git-tree-sha1 = "c8c7a9ca4c17a519961a8ec27e508b07ffc9c1a9" uuid = "4138dd39-2aa7-5051-a626-17a0bb65d9c8" -version = "0.9.1" +version = "0.9.2" + +[[JLD2]] +deps = ["CodecZlib", "DataStructures", "FileIO", "Mmap", "Pkg", "Printf", "UUIDs"] +git-tree-sha1 = "5deae9f0745ef505ed155a0029629cf08502ccab" +uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" +version = "0.1.11" [[JSON]] deps = ["Dates", "Mmap", "Parsers", "Unicode"] @@ -476,6 +524,12 @@ git-tree-sha1 = "b34d7cef7b337321e97d22242c3c2b91f476748e" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" version = "0.21.0" +[[KernelDensity]] +deps = ["Distributions", "FFTW", "Interpolations", "Optim", "StatsBase", "Test"] +git-tree-sha1 = "c1048817fe5711f699abc8fabd47b1ac6ba4db04" +uuid = "5ab0869b-81aa-558d-bb23-cbf5423bbe9b" +version = "0.5.1" + [[LaTeXStrings]] deps = ["Compat"] git-tree-sha1 = "7ab9b8788cfab2bdde22adf9004bda7ad9954b6c" @@ -535,6 +589,11 @@ version = "0.5.3" deps = ["Base64"] uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" +[[Measures]] +git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f" +uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e" +version = "0.3.1" + [[Missings]] deps = ["DataAPI"] git-tree-sha1 = "de0a5ce9e5289f27df672ffabef4d1e5861247d5" @@ -569,9 +628,9 @@ version = "2.1.0" [[NLSolversBase]] deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] -git-tree-sha1 = "b13b1f97ce88a37dd55ce7ae7e75b81002bf2b9d" +git-tree-sha1 = "7c4e66c47848562003250f28b579c584e55becc0" uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" -version = "7.6.0" +version = "7.6.1" [[NLsolve]] deps = ["Distances", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport"] @@ -602,6 +661,17 @@ git-tree-sha1 = "8bc6180f328f3c0ea2663935db880d34c57d6eae" uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce" version = "0.4.4" +[[Observables]] +deps = ["Test"] +git-tree-sha1 = "dc02cec22747d1d10d9f70d8a1c03432b5bfbcd0" +uuid = "510215fc-4207-5dde-b226-833fc4488ee2" +version = "0.2.3" + +[[OffsetArrays]] +git-tree-sha1 = "707e34562700b81e8aa13548eb6b23b18112e49b" +uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" +version = "1.0.2" + [[OpenBLAS_jll]] deps = ["Libdl", "Pkg"] git-tree-sha1 = "e2551d7c25d52f35b76d86a50917a3ba8988f519" @@ -654,6 +724,24 @@ version = "0.3.10" deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Test", "UUIDs"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" +[[PlotThemes]] +deps = ["PlotUtils", "Requires", "Statistics"] +git-tree-sha1 = "df772cc7c78862da96af1ee85cd0111c6640e44e" +uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a" +version = "1.0.1" + +[[PlotUtils]] +deps = ["Colors", "Dates", "Printf", "Random", "Reexport"] +git-tree-sha1 = "22bd7d0a1f4665d66317d6c89a57f6bba9f2560d" +uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043" +version = "0.6.2" + +[[Plots]] +deps = ["Base64", "Contour", "Dates", "FFMPEG", "FixedPointNumbers", "GR", "GeometryTypes", "JSON", "LinearAlgebra", "Measures", "NaNMath", "Pkg", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "Reexport", "Requires", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs"] +git-tree-sha1 = "11c75a31269c1c64790e7cb910346f64cd4440c1" +uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" +version = "0.27.1" + [[PoissonRandom]] deps = ["Random", "Statistics", "Test"] git-tree-sha1 = "44d018211a56626288b5d3f8c6497d28c26dc850" @@ -676,10 +764,6 @@ version = "0.2.3" deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" -[[Profile]] -deps = ["Printf"] -uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79" - [[ProgressMeter]] deps = ["Distributed", "Printf"] git-tree-sha1 = "ea1f4fa0ff5e8b771bf130d87af5b7ef400760bd" @@ -730,6 +814,12 @@ git-tree-sha1 = "441e6fc35597524ada7f85e13df1f4e10137d16f" uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143" version = "1.4.0" +[[Ratios]] +deps = ["Compat"] +git-tree-sha1 = "cdbbe0f350581296f3a2e3e7a91b214121934407" +uuid = "c84ed2f1-dad5-54f0-aa8e-dbefe2724439" +version = "0.3.1" + [[RecipesBase]] git-tree-sha1 = "7bdce29bc9b2f5660a6e5e64d64d91ec941f6aa2" uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" @@ -799,6 +889,12 @@ uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" deps = ["Distributed", "Mmap", "Random", "Serialization"] uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" +[[Showoff]] +deps = ["Dates"] +git-tree-sha1 = "e032c9df551fb23c9f98ae1064de074111b7bc39" +uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f" +version = "0.3.1" + [[SimpleTraits]] deps = ["InteractiveUtils", "MacroTools"] git-tree-sha1 = "2bdf3b6300a9d66fe29ee8bb51ba100c4df9ecbc" @@ -858,6 +954,12 @@ git-tree-sha1 = "79982835d2ff3970685cb704500909c94189bde9" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" version = "0.9.3" +[[StatsPlots]] +deps = ["Clustering", "DataStructures", "DataValues", "Distributions", "Interpolations", "KernelDensity", "Observables", "Plots", "RecipesBase", "Reexport", "StatsBase", "Tables", "Widgets"] +git-tree-sha1 = "9f3f096a310f25debaca9dbe6b4e0df7bb428fd0" +uuid = "f3b207a7-027a-5e70-b257-86293d7955fd" +version = "0.12.0" + [[SteadyStateDiffEq]] deps = ["DiffEqBase", "DiffEqCallbacks", "LinearAlgebra", "NLsolve", "Reexport"] git-tree-sha1 = "9a2de84a1618e1702dbb95ebdbb84dbf7f24d1ab" @@ -866,9 +968,9 @@ version = "1.5.0" [[StochasticDiffEq]] deps = ["ArrayInterface", "DataStructures", "DiffEqBase", "DiffEqNoiseProcess", "FillArrays", "FiniteDiff", "ForwardDiff", "LinearAlgebra", "Logging", "MuladdMacro", "NLsolve", "Parameters", "Random", "RandomNumbers", "RecursiveArrayTools", "Reexport", "SparseArrays", "SparseDiffTools", "StaticArrays"] -git-tree-sha1 = "acdb92d3d056cbbfd9c598e9f792eb5c0d21542d" +git-tree-sha1 = "0709f16470826c61334dba83600fa9f7d31bd1df" uuid = "789caeaf-c7a9-5a7d-9973-96adeb23e2a0" -version = "6.18.0" +version = "6.19.0" [[SuiteSparse]] deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] @@ -950,6 +1052,18 @@ git-tree-sha1 = "28807f85197eaad3cbd2330386fac1dcb9e7e11d" uuid = "ea10d353-3f73-51f8-a26c-33c1cb351aa5" version = "0.6.2" +[[Widgets]] +deps = ["Colors", "Dates", "Observables", "OrderedCollections"] +git-tree-sha1 = "fc0feda91b3fef7fe6948ee09bb628f882b49ca4" +uuid = "cc8bc4a8-27d6-5769-a93b-9d913e69aa62" +version = "0.6.2" + +[[WoodburyMatrices]] +deps = ["LinearAlgebra", "SparseArrays"] +git-tree-sha1 = "bbb9f7fd6fbdd9582e77c0b698312c543de5eb71" +uuid = "efce3f68-66dc-5838-9240-27a6d6f5f9b6" +version = "0.5.0" + [[XML2_jll]] deps = ["Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] git-tree-sha1 = "ed5603a695aefe3e9e404fc7b052e02cc72cfab6" @@ -970,9 +1084,9 @@ version = "1.2.11+8" [[Zygote]] deps = ["DiffRules", "FFTW", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NNlib", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"] -git-tree-sha1 = "ca4dfa4de0a0e2c1da6c8c67d3b9af99645b57fc" +git-tree-sha1 = "54872ae5411c8795ed52852759796a04fb771f68" uuid = "e88e6eb3-aa80-5325-afca-941959d7151f" -version = "0.4.6" +version = "0.4.7" [[ZygoteRules]] deps = ["MacroTools"] diff --git a/Project.toml b/Project.toml index d2caf7f24..8b1520724 100644 --- a/Project.toml +++ b/Project.toml @@ -10,12 +10,19 @@ DifferentialEquations = "0c46a032-eb83-5123-abaf-570d42b7fbaa" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" EllipsisNotation = "da5c29d0-fa7d-589e-88eb-ea29b0a81949" +GaussianProcesses = "891a1506-143c-57d2-908e-e1f8e92e6de9" +JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NPZ = "15e1cf62-19b3-5cfa-8e77-841668bca605" +Optim = "429524aa-4258-5aef-a3af-852621145aeb" Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a" +Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" PyCall = "438e738f-606a-5dbb-bf0a-cddfbfd45ab0" PyPlot = "d330b81b-6aea-500a-939a-2ce795aea3ee" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" ScikitLearn = "3646fa90-6ef7-5e7e-9f22-8aca16db6324" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" +StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd" +Sundials = "c3572dad-4567-51f8-b174-8c6c989267f4" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/examples/L96m/L96m.jl b/examples/L96m/L96m.jl index cc47d10a1..cf51f5708 100644 --- a/examples/L96m/L96m.jl +++ b/examples/L96m/L96m.jl @@ -14,13 +14,13 @@ Parameters: Other: - `G` : functional closure for slow variables (usually a GPR-closure) """ -@with_kw mutable struct L96m - K::Int = 9 - J::Int = 8 - hx::Float64 = -0.8 - hy::Float64 = 1.0 - F::Float64 = 10.0 - eps::Float64 = 2^(-7) +@with_kw mutable struct L96m{FT<:AbstractFloat,I<:Int} + K::I = 9 + J::I = 8 + hx::FT = -0.8 + hy::FT = 1.0 + F::FT = 10.0 + eps::FT = 2^(-7) G = nothing end @@ -38,7 +38,7 @@ Output: - `rhs` : RHS computed at `z` """ -function full(rhs::Array{<:Real,1}, z::Array{<:Real,1}, p::L96m, t) +function full(rhs::Array{FT,1}, z::Array{FT,1}, p::L96m{FT,I}, t) where {FT,I} K = p.K J = p.J x = @view(z[1:K]) @@ -96,7 +96,7 @@ Output: - `rhs` : balanced RHS computed at `x` """ -function balanced(rhs::Array{<:Real,1}, x::Array{<:Real,1}, p::L96m, t) +function balanced(rhs::Array{FT,1}, x::Array{FT,1}, p::L96m{FT,I}, t) where {FT,I} K = p.K # three boundary cases @@ -128,7 +128,7 @@ Output: - `rhs` : regressed RHS computed at `x` """ -function regressed(rhs::Array{<:Real,1}, x::Array{<:Real,1}, p::L96m, t) +function regressed(rhs::Array{FT,1}, x::Array{FT,1}, p::L96m{FT,I}, t) where {FT,I} K = p.K # three boundary cases @@ -152,7 +152,7 @@ end Reshape a vector of y_{j,k} into a matrix, then sum along one dim and divide by J to get averages """ -function compute_Yk(p::L96m, z::Array{<:Real,1}) +function compute_Yk(p::L96m{FT,I}, z::Array{FT,1}) where {FT,I} return dropdims( sum( reshape(z[p.K+1:end], p.J, p.K), dims = 1 ), dims = 1 @@ -163,8 +163,8 @@ end Set the closure `p.G` to a linear one with slope `slope`. If unspecified, slope is equal to `p.hy`. """ -function set_G0(p::L96m; slope = nothing) - if (slope == nothing) || (!isa(slope, Real)) +function set_G0(p::L96m{FT,I}; slope = nothing) where {FT,I} + if (slope == nothing) || (!isa(slope, FT)) slope = p.hy end p.G = x -> slope * x @@ -173,7 +173,7 @@ end """ Wrapper for set_G0(p::L96m; slope = nothing). """ -function set_G0(p::L96m, slope::Real) +function set_G0(p::L96m{FT,I}, slope::FT) where {FT,I} set_G0(p, slope = slope) end @@ -190,9 +190,9 @@ Output: the 2nd dimension in `sol` (number of time steps) """ -function gather_pairs(p::L96m, sol) +function gather_pairs(p::L96m{FT,I}, sol) where {FT,I} N = size(sol, 2) - pairs = Array{Float64, 2}(undef, p.K * N, 2) + pairs = Array{FT, 2}(undef, p.K * N, 2) for n in 1:N pairs[p.K * (n-1) + 1 : p.K * n, 1] = sol[1:p.K, n] pairs[p.K * (n-1) + 1 : p.K * n, 2] = compute_Yk(p, sol[:,n]) @@ -217,8 +217,8 @@ Input: Output: - `z00` : array of size `p.K + p.K * p.J` with random values """ -function random_init(p::L96m) - z00 = Array{Float64}(undef, p.K + p.K * p.J) +function random_init(p::L96m{FT,I}) where {FT,I} + z00 = Array{FT}(undef, p.K + p.K * p.J) z00[1:p.K] .= rand(p.K) * 15 .- 5 for k_ in 1:p.K diff --git a/src/CalibrateEmulateSample.jl b/src/CalibrateEmulateSample.jl index d8c2d85a3..9d9ef599f 100644 --- a/src/CalibrateEmulateSample.jl +++ b/src/CalibrateEmulateSample.jl @@ -2,9 +2,15 @@ module CalibrateEmulateSample using Distributions, Statistics, LinearAlgebra, DocStringExtensions +include("Utilities.jl") include("spaces.jl") include("problems.jl") -include("eks.jl") +include("Histograms.jl") +include("EKS.jl") +include("EKI.jl") +include("Truth.jl") +include("GPEmulator.jl") +include("MCMC.jl") include("GPR.jl") end # module diff --git a/src/ConvenienceFunctions.jl b/src/ConvenienceFunctions.jl deleted file mode 100644 index 6a6bea524..000000000 --- a/src/ConvenienceFunctions.jl +++ /dev/null @@ -1,12 +0,0 @@ - -const RPAD = 25 - -function name(name::AbstractString) - return rpad(name * ":", RPAD) -end - -function warn(name::AbstractString) - return rpad("WARNING (" * name * "):", RPAD) -end - - diff --git a/src/EKI_mb.jl b/src/EKI.jl similarity index 56% rename from src/EKI_mb.jl rename to src/EKI.jl index 17caf3248..002a5233e 100644 --- a/src/EKI_mb.jl +++ b/src/EKI.jl @@ -1,36 +1,14 @@ -module EKI - -""" -Module: EKI -------------------------------------- -Packages required: LinearAlgebra, - Statistics, - Distributions - LinearAlgebra - DifferentialeEquations - Random - Sundials -------------------------------------- -Idea: To construct an object to perform Ensemble Kalman updates - It also measures errors to the truth to assess convergence -------------------------------------- -Exports: EKIObject - update_ensemble! - compute_error - construct_initial_ensemble - run_cloudy - run_cloudy_ensemble -------------------------------------- """ + EKI -# Import Cloudy modules -using Cloudy.PDistributions -using Cloudy.Sources -using Cloudy.KernelTensors +To construct an object to perform Ensemble Kalman updates +It also measures errors to the truth to assess convergence +""" +module EKI # packages using Random -using Statistics +using Statistics using Sundials # CVODE_BDF() solver for ODE using Distributions using LinearAlgebra @@ -40,8 +18,8 @@ using DifferentialEquations export EKIObj export construct_initial_ensemble export compute_error -export run_cloudy -export run_cloudy_ensemble +export run_model +export run_model_ensemble export update_ensemble! @@ -50,14 +28,14 @@ export update_ensemble! ##### # structure to organize data -struct EKIObj - u::Vector{Array{Float64, 2}} +struct EKIObj{FT<:AbstractFloat,I<:Int} + u::Vector{Array{FT, 2}} unames::Vector{String} - g_t::Vector{Float64} - cov::Array{Float64, 2} - N_ens::Int64 - g::Vector{Array{Float64, 2}} - error::Vector{Float64} + g_t::Vector{FT} + cov::Array{FT, 2} + N_ens::I + g::Vector{Array{FT, 2}} + error::Vector{FT} end @@ -66,30 +44,31 @@ end ##### # outer constructors -function EKIObj(parameters::Array{Float64, 2}, parameter_names::Vector{String}, - t_mean, t_cov::Array{Float64, 2}) - +function EKIObj(parameters::Array{FT, 2}, parameter_names::Vector{String}, + t_mean, t_cov::Array{FT, 2}) where {FT<:AbstractFloat} + # ensemble size N_ens = size(parameters)[1] + I = typeof(N_ens) # parameters - u = Array{Float64, 2}[] # array of Matrix{Float64}'s + u = Array{FT, 2}[] # array of Matrix{FT}'s push!(u, parameters) # insert parameters at end of array (in this case just 1st entry) # observations - g = Vector{Float64}[] + g = Vector{FT}[] # error store error = [] - - EKIObj(u, parameter_names, t_mean, t_cov, N_ens, g, error) + + EKIObj{FT,I}(u, parameter_names, t_mean, t_cov, N_ens, g, error) end """ construct_initial_ensemble(priors, N_ens) - -Constructs the initial parameters, by sampling N_ens samples from specified + +Constructs the initial parameters, by sampling N_ens samples from specified prior distributions. """ -function construct_initial_ensemble(N_ens::Int64, priors; rng_seed=42) +function construct_initial_ensemble(N_ens::I, priors; rng_seed=42) where {I<:Int} N_params = length(priors) params = zeros(N_ens, N_params) # Ensuring reproducibility of the sampled parameter values @@ -98,24 +77,28 @@ function construct_initial_ensemble(N_ens::Int64, priors; rng_seed=42) prior_i = priors[i] params[:, i] = rand(prior_i, N_ens) end - + return params -end # function construct_initial ensemble +end function compute_error(eki) meang = dropdims(mean(eki.g[end], dims=1), dims=1) - diff = eki.g_t - meang + diff = eki.g_t - meang X = eki.cov \ diff # diff: column vector newerr = dot(diff, X) push!(eki.error, newerr) -end # function compute_error +end -function run_cloudy_ensemble(kernel::KernelTensor{Float64}, - dist::PDistribution{Float64}, - params::Array{Float64, 2}, - moments::Array{Float64, 1}, - tspan::Tuple{Float64, Float64}; rng_seed=42) +function run_model_ensemble(kernel::KT, + dist::D, + params::Array{FT, 2}, + moments::Array{FT, 1}, + tspan::Tuple{FT, FT}, + moment, + update_params, + get_src; + rng_seed=42) where {D,KT, FT<:AbstractFloat} N_ens = size(params, 1) # params is N_ens x N_params n_moments = length(moments) @@ -123,52 +106,56 @@ function run_cloudy_ensemble(kernel::KernelTensor{Float64}, Random.seed!(rng_seed) for i in 1:N_ens - # run cloudy with the current parameters, i.e., map θ to G(θ) - g_ens[i, :] = run_cloudy(params[i, :], kernel, dist, moments, tspan) + # run model with the current parameters, i.e., map θ to G(θ) + g_ens[i, :] = run_model(params[i, :], kernel, dist, moments, moment, update_params, get_src, tspan) end return g_ens -end # function run_cloudy_ensemble +end """ -run_cloudy(kernel, dist, moments, tspan) +run_model(kernel, dist, moments, tspan) -- `kernel` - is the collision-coalescence kernel that determines the evolution +- `kernel` - is the collision-coalescence kernel that determines the evolution of the droplet size distribution - `dist` - is a mass distribution function -- `moments` - is an array defining the moments of dist Cloudy will compute +- `moments` - is an array defining the moments of dist model will compute over time (e.g, [0.0, 1.0, 2.0]) -- `tspan` - is a tuple definint the time interval over which cloudy is run +- `tspan` - is a tuple definint the time interval over which the model is run """ -function run_cloudy(params::Array{Float64, 1}, kernel::KernelTensor{Float64}, - dist::PDistributions.PDistribution{Float64}, - moments::Array{Float64, 1}, - tspan=Tuple{Float64, Float64}) - +function run_model(params::Array{FT, 1}, + kernel::KT, + dist::D, + moments::Array{FT, 1}, + moment, + update_params, + get_src, + tspan=Tuple{FT, FT}) where {D,KT,FT<:AbstractFloat} + # generate the initial distribution - dist = PDistributions.update_params(dist, params) + dist = update_params(dist, params) # Numerical parameters tol = 1e-7 - # Make sure moments are up to date. mom0 is the initial condition for the + # Make sure moments are up to date. mom0 is the initial condition for the # ODE problem moments_init = fill(NaN, length(moments)) for (i, mom) in enumerate(moments) - moments_init[i] = PDistributions.moment(dist, convert(Float64, mom)) + moments_init[i] = moment(dist, convert(FT, mom)) end # Set up ODE problem: dM/dt = f(M,p,t) - rhs(M, p, t) = get_src_coalescence(M, dist, kernel) + rhs(M, p, t) = get_src(M, dist, kernel) prob = ODEProblem(rhs, moments_init, tspan) # Solve the ODE sol = solve(prob, CVODE_BDF(), alg_hints=[:stiff], reltol=tol, abstol=tol) # Return moments at last time step moments_final = vcat(sol.u'...)[end, :] time = tspan[2] - + return moments_final -end # function run_cloudy +end function update_ensemble!(eki, g) @@ -178,7 +165,7 @@ function update_ensemble!(eki, g) u_bar = fill(0.0, size(u)[2]) # g: N_ens x N_data g_bar = fill(0.0, size(g)[2]) - + cov_ug = fill(0.0, size(u)[2], size(g)[2]) cov_gg = fill(0.0, size(g)[2], size(g)[2]) @@ -191,12 +178,12 @@ function update_ensemble!(eki, g) # add to mean u_bar += u_ens g_bar += g_ens - + #add to cov cov_ug += u_ens * g_ens' # cov_ug is N_params x N_data cov_gg += g_ens * g_ens' end - + u_bar = u_bar / eki.N_ens g_bar = g_bar / eki.N_ens cov_ug = cov_ug / eki.N_ens - u_bar * g_bar' @@ -207,13 +194,13 @@ function update_ensemble!(eki, g) y = (eki.g_t .+ noise)' # add g_t (N_data) to each column of noise (N_data x N_ens), then transp. into N_ens x N_data tmp = (cov_gg + eki.cov) \ (y - g)' # N_data x N_data \ [N_ens x N_data - N_ens x N_data]' --> tmp is N_data x N_ens u += (cov_ug * tmp)' # N_ens x N_params - + # store new parameters (and observations) push!(eki.u, u) # N_ens x N_params push!(eki.g, g) # N_ens x N_data compute_error(eki) -end # function update_ensemble! +end end # module EKI diff --git a/src/eks.jl b/src/EKS.jl similarity index 86% rename from src/eks.jl rename to src/EKS.jl index fd6c3a4b7..575ff952e 100644 --- a/src/eks.jl +++ b/src/EKS.jl @@ -5,7 +5,7 @@ Perform an iteration of the ensemble Kalman sampler (EKS), returning a new set o See eqs. (5.4a-b) from https://arxiv.org/abs/1903.08866. """ -function eks_iter(prob::CESProblem, θs, fθs) +function eks_iter(prob::CESProblem, θs::Vector{Vector{FT}}, fθs::Vector{Vector{FT}}) where {FT} covθ = cov(θs) meanθ = mean(θs) @@ -14,7 +14,7 @@ function eks_iter(prob::CESProblem, θs, fθs) J = length(θs) CG = [dot(fθk - m, fθj - prob.obs, prob.space)/J for fθj in fθs, fθk in fθs] - Δt = 1.0 / (norm(CG) + sqrt(eps(Float64))) + Δt = 1.0 / (norm(CG) + sqrt(eps(FT))) implicit = lu( I + Δt .* (covθ * inv(cov(prob.prior))) ) # todo: incorporate means Z = covθ * (cov(prob.prior) \ mean(prob.prior)) diff --git a/src/GPEmulator_mb.jl b/src/GPEmulator.jl similarity index 67% rename from src/GPEmulator_mb.jl rename to src/GPEmulator.jl index 398c76850..a77f26706 100644 --- a/src/GPEmulator_mb.jl +++ b/src/GPEmulator.jl @@ -1,35 +1,20 @@ -module GPEmulator - """ -Module: GP -------------------------------------- -Packages required: Statistics, - Distributions, - LinearAlgebra, - GaussianProcesses, - (Optim) - ScikitLearn, -------------------------------------- -Idea: To create an emulator (GP). - Include functions to optimize the emulator - and to make predictions of mean and variance - uses ScikitLearn (or GaussianProcesses.jl -------------------------------------- -Exports: GPObj - optimize_hyperparameters - predict - emulate - extract -------------------------------------- + GPEmulator + +To create an emulator (GP). +Include functions to optimize the emulator +and to make predictions of mean and variance +uses ScikitLearn (or GaussianProcesses.jl """ -# import CES modules -include("EKI.jl") -const S = Main.EKI.EKIObj -include("Truth.jl") -const T = Main.Truth.TruthObj +module GPEmulator + +using ..EKI +using ..Truth +const S = EKI.EKIObj +const T = Truth.TruthObj # packages -using Statistics +using Statistics using Distributions using LinearAlgebra using GaussianProcesses @@ -54,25 +39,26 @@ export extract ##### #structure to hold inputs/ouputs kernel type and whether we do sparse GP -struct GPObj - inputs::Matrix{Float64} - data::Matrix{Float64} +struct GPObj{FT<:AbstractFloat} + inputs::Matrix{FT} + data::Matrix{FT} models::Vector package::String end function GPObj(inputs, data, package) + FT = eltype(data) if package == "gp_jl" models = Any[] - outputs = convert(Matrix{Float64}, data') - inputs = convert(Matrix{Float64}, inputs') + outputs = convert(Matrix{FT}, data') + inputs = convert(Matrix{FT}, inputs') for i in 1:size(outputs, 1) # Zero mean function - kmean = MeanZero() + kmean = MeanZero() # Construct kernel: - # Sum kernel consisting of Matern 5/2 ARD kernel and Squared - # Exponential kernel + # Sum kernel consisting of Matern 5/2 ARD kernel and Squared + # Exponential kernel len2 = 1.0 var2 = 1.0 kern1 = SE(len2, var2) @@ -89,28 +75,26 @@ function GPObj(inputs, data, package) optimize!(m) push!(models, m) end - - GPObj(inputs, outputs, models, package) - + elseif package == "sk_jl" - + len2 = ones(size(inputs, 2)) var2 = 1.0 varkern = ConstantKernel(constant_value=var2, constant_value_bounds=(1e-05, 1000.0)) rbf = RBF(length_scale=len2, length_scale_bounds=(1.0, 1000.0)) - + white = WhiteKernel(noise_level=1.0, noise_level_bounds=(1e-05, 10.0)) kern = varkern * rbf + white models = Any[] - outputs = convert(Matrix{Float64}, data') - inputs = convert(Matrix{Float64}, inputs) - + outputs = convert(Matrix{FT}, data') + inputs = convert(Matrix{FT}, inputs) + for i in 1:size(outputs,1) out = reshape(outputs[i,:], (size(outputs, 2), 1)) - + m = GaussianProcessRegressor(kernel=kern, n_restarts_optimizer=10, alpha=0.0, normalize_y=true) @@ -123,48 +107,47 @@ function GPObj(inputs, data, package) push!(models, m) end - GPObj(inputs, outputs, models, package) - - else - println("use package sk_jl or gp_jl") + else + error("use package sk_jl or gp_jl") end -end # function GPObj + return GPObj{FT}(inputs, outputs, models, package) +end + +function predict(gp::GPObj{FT}, new_inputs::Array{FT}; + prediction_type="y") where {FT} -function predict(gp::GPObj, new_inputs::Array{Float64}; - prediction_type="y") - # predict data (type "y") or latent function (type "f") # column of new_inputs gives new parameter set to evaluate gp at M = length(gp.models) - mean = Array{Float64}[] - var = Array{Float64}[] + mean = Array{FT}[] + var = Array{FT}[] # predicts columns of inputs so must be transposed if gp.package == "gp_jl" - new_inputs = convert(Matrix{Float64}, new_inputs') - end + new_inputs = convert(Matrix{FT}, new_inputs') + end for i=1:M if gp.package == "gp_jl" if prediction_type == "y" mu, sig2 = predict_y(gp.models[i], new_inputs) push!(mean, mu) - push!(var, sig2) + push!(var, sig2) elseif prediction_type == "f" mu, sig2 = predict_f(gp.models[i], new_inputs') push!(mean, mu) - push!(var, sig2) + push!(var, sig2) - else + else println("prediction_type must be string: y or f") exit() end elseif gp.package == "sk_jl" - + mu, sig = gp.models[i].predict(new_inputs, return_std=true) sig2 = sig .* sig push!(mean, mu) - push!(var, sig2) + push!(var, sig2) end end @@ -174,8 +157,8 @@ function predict(gp::GPObj, new_inputs::Array{Float64}; end # function predict -function emulate(u_tp::Array{Float64, 2}, g_tp::Array{Float64, 2}, - gppackage::String) +function emulate(u_tp::Array{FT, 2}, g_tp::Array{FT, 2}, + gppackage::String) where {FT<:AbstractFloat} gpobj = GPObj(u_tp, g_tp, gppackage) # construct the GP based on data @@ -183,27 +166,27 @@ function emulate(u_tp::Array{Float64, 2}, g_tp::Array{Float64, 2}, end -function extract(truthobj::T, ekiobj::S, N_eki_it::Int64) - +function extract(truthobj::T, ekiobj::EKIObj{FT}, N_eki_it::I) where {T,FT, I<:Int} + yt = truthobj.y_t yt_cov = truthobj.cov yt_covinv = inv(yt_cov) - + # Note u[end] does not have an equivalent g u_tp = ekiobj.u[end-N_eki_it:end-1] # N_eki_it x [N_ens x N_param] g_tp = ekiobj.g[end-N_eki_it+1:end] # N_eki_it x [N_ens x N_data] # u does not require reduction, g does: - # g_tp[j] is jth iteration of ensembles + # g_tp[j] is jth iteration of ensembles u_tp = cat(u_tp..., dims=1) # [(N_eki_it x N_ens) x N_param] g_tp = cat(g_tp..., dims=1) # [(N_eki_it x N_ens) x N_data] return yt, yt_cov, yt_covinv, u_tp, g_tp end -function orig2zscore(X::AbstractVector, mean::AbstractVector, +function orig2zscore(X::AbstractVector, mean::AbstractVector, std::AbstractVector) - # Compute the z scores of a vector X using the given mean + # Compute the z scores of a vector X using the given mean # and std Z = zeros(size(X)) for i in 1:length(X) @@ -212,7 +195,7 @@ function orig2zscore(X::AbstractVector, mean::AbstractVector, return Z end -function orig2zscore(X::AbstractMatrix, mean::AbstractVector, +function orig2zscore(X::AbstractMatrix, mean::AbstractVector, std::AbstractVector) # Compute the z scores of matrix X using the given mean and # std. Transformation is applied column-wise. @@ -224,9 +207,9 @@ function orig2zscore(X::AbstractMatrix, mean::AbstractVector, return Z end -function zscore2orig(Z::AbstractVector, mean::AbstractVector, +function zscore2orig(Z::AbstractVector, mean::AbstractVector, std::AbstractVector) - # Transform X (a vector of z scores) back to the original + # Transform X (a vector of z scores) back to the original # values X = zeros(size(Z)) for i in 1:length(X) @@ -235,7 +218,7 @@ function zscore2orig(Z::AbstractVector, mean::AbstractVector, return X end -function zscore2orig(Z::AbstractMatrix, mean::AbstractVector, +function zscore2orig(Z::AbstractMatrix, mean::AbstractVector, std::AbstractVector) X = zeros(size(Z)) # Transform X (a matrix of z scores) back to the original diff --git a/src/GPR.jl b/src/GPR.jl index 2502020ae..3a4a4fc42 100644 --- a/src/GPR.jl +++ b/src/GPR.jl @@ -9,9 +9,9 @@ module GPR using Parameters # lets you have defaults for fields using EllipsisNotation # adds '..' to refer to the rest of array -import ScikitLearn -import StatsBase -include("ConvenienceFunctions.jl") +using ScikitLearn +using StatsBase +using ..Utilities const sklearn = ScikitLearn @@ -41,9 +41,6 @@ Do *not* set Wrap's variables except for `thrsh`; use setter functions! __subsample_set::Bool = false end -################################################################################ -# GRPWrap-related functions #################################################### -################################################################################ """ Set `gprw.data` and reset `gprw.subsample` and `gprw.GPR` -- very important! @@ -61,7 +58,7 @@ function set_data!(gprw::Wrap, data::Array{<:Real}) idx = fill(1, ndims(data) - 2) data = data[:,:,idx...] elseif ndims(data) < 2 - throw(error("set_data!: ndims(data) < 2; cannot proceed")) + error("set_data!: ndims(data) < 2; cannot proceed") end gprw.data = data gprw.subsample = nothing @@ -104,10 +101,10 @@ This function ignores `gprw.thrsh` """ function subsample!(gprw::Wrap, thrsh::Int) if !gprw.__data_set - throw(error("subsample!: 'data' is not set, cannot sample")) + error("subsample!: 'data' is not set, cannot sample") end if thrsh == 0 - throw(error("subsample!: 'thrsh' == 0, cannot sample")) + error("subsample!: 'thrsh' == 0, cannot sample") end N = size(gprw.data,1) diff --git a/src/Histograms.jl b/src/Histograms.jl index 07b3bcb5b..74b90cfc9 100644 --- a/src/Histograms.jl +++ b/src/Histograms.jl @@ -11,15 +11,12 @@ Functions in this module: """ module Histograms -import PyCall -import NPZ -include("ConvenienceFunctions.jl") +using PyCall +using NPZ +using ..Utilities scsta = PyCall.pyimport("scipy.stats") -################################################################################ -# HistData struct ############################################################## -################################################################################ """ A simple struct to store samples for empirical PDFs (histograms, distances etc.) @@ -28,12 +25,14 @@ Functions that operate on HistData struct: - `load!` (3 methods) - `plot` (2 methods) - `plot_raw` (1 method) + +TODO: fix type restriction for Array, make sensible constructors """ -mutable struct HistData - samples::Dict{Symbol, AbstractVecOrMat} +mutable struct HistData{FT<:AbstractFloat} + samples::Dict{Symbol, Array} end -HistData() = HistData(Dict()) +HistData(FT) = HistData{FT}(Dict()) """ Load samples into a HistData object under a specific key from a vector @@ -277,8 +276,8 @@ Examples: println(k2a_combined[:bal]) ``` """ -function W1(hd::HistData, key::Symbol, k::Union{Int,UnitRange}) - key2all_combined = Dict{Symbol, Float64}() +function W1(hd::HistData{FT}, key::Symbol, k::Union{I,UnitRange}) where {FT,I<:Int} + key2all_combined = Dict{Symbol, FT}() for ki in keys(hd.samples) if ki == key continue @@ -352,8 +351,8 @@ Examples: println(k2a_vectorized[:onl] .> 0.01) ``` """ -function W1(hd::HistData, key::Symbol) - key2all_vectorized = Dict{Symbol, Union{Vector{Float64}, Float64}}() +function W1(hd::HistData{FT}, key::Symbol) where {FT} + key2all_vectorized = Dict{Symbol, Union{Vector{FT}, FT}}() for ki in keys(hd.samples) if ki == key continue diff --git a/src/MCMC_mb.jl b/src/MCMC.jl similarity index 68% rename from src/MCMC_mb.jl rename to src/MCMC.jl index db9b1071c..ca03f0058 100644 --- a/src/MCMC_mb.jl +++ b/src/MCMC.jl @@ -1,41 +1,28 @@ -module MCMC """ -Module: MCMC -------------------------------------- -Packages required: LinearAlgebra, - Statistics, - Distributions -------------------------------------- -Idea: To construct a simple MCMC object which, given a prior distribution, - will update in a Metropolis-Hastings fashion with respect to a quadratic - likelihood. It also computes acceptance ratios -------------------------------------- -Exports: MCMCObj - mcmc_sample - accept_ratio - mcmc_sample - reset_with_step! - get_posterior - find_mcmc_step! - sample_posterior! -------------------------------------- + MCMC + +To construct a simple MCMC object which, given a prior distribution, +will update in a Metropolis-Hastings fashion with respect to a quadratic +likelihood. It also computes acceptance ratios """ -# import Cloudy modules -include("EKI.jl") -include("Truth.jl") -include("GPEmulator.jl") -const T = Main.GPEmulator.GPObj -const pred = Main.GPEmulator.predict +module MCMC + +using ..EKI +using ..Truth +using ..GPEmulator + +const T = GPEmulator.GPObj +const pred = GPEmulator.predict # packages -using Statistics +using Statistics using Distributions using LinearAlgebra # exports export MCMCObj -export mcmc_sample! -export accept_ratio +export mcmc_sample! +export accept_ratio export reset_with_step! export get_posterior export find_mcmc_step! @@ -47,18 +34,18 @@ export sample_posterior! ##### # structure to organize MCMC parameters and data -struct MCMCObj - truth_sample::Vector{Float64} - truth_cov::Array{Float64, 2} - truth_covinv::Array{Float64, 2} +struct MCMCObj{FT<:AbstractFloat,I<:Int} + truth_sample::Vector{FT} + truth_cov::Array{FT, 2} + truth_covinv::Array{FT, 2} prior::Array - step::Array{Float64} - burnin::Int64 - param::Vector{Float64} - posterior::Array{Float64, 2} - log_posterior::Array{Union{Float64,Nothing}} - iter::Array{Int64} - accept::Array{Int64} + step::Array{FT} + burnin::I + param::Vector{FT} + posterior::Array{FT, 2} + log_posterior::Array{Union{FT,Nothing}} + iter::Array{I} + accept::Array{I} algtype::String standardized::Bool end @@ -69,12 +56,12 @@ end ##### #outer constructors -function MCMCObj(truth_sample::Vector{Float64}, truth_cov::Array{Float64, 2}, - priors::Array, step::Float64, param_init::Vector{Float64}, - max_iter::Int64, algtype::String, burnin::Int64, - standardized::Bool) - - # first row is param_init +function MCMCObj(truth_sample::Vector{FT}, truth_cov::Array{FT, 2}, + priors::Array, step::FT, param_init::Vector{FT}, + max_iter::I, algtype::String, burnin::I, + standardized::Bool) where {FT<:AbstractFloat,I<:Int} + + # first row is param_init posterior = zeros(max_iter + 1, length(param_init)) posterior[1, :] = param_init param = param_init @@ -86,13 +73,13 @@ function MCMCObj(truth_sample::Vector{Float64}, truth_cov::Array{Float64, 2}, println("only random walk metropolis 'rwm' is implemented so far") sys.exit() end - MCMCObj(truth_sample, truth_cov, truth_covinv, priors, [step], burnin, + MCMCObj{FT,I}(truth_sample, truth_cov, truth_covinv, priors, [step], burnin, param, posterior, log_posterior, iter, accept, algtype, standardized) end -function reset_with_step!(mcmc::MCMCObj, step::Float64) +function reset_with_step!(mcmc::MCMCObj{FT}, step::FT) where {FT} # reset to beginning with new stepsize mcmc.step[1] = step mcmc.log_posterior[1] = nothing @@ -103,43 +90,43 @@ function reset_with_step!(mcmc::MCMCObj, step::Float64) end -# exported functions function get_posterior(mcmc::MCMCObj) return mcmc.posterior[mcmc.burnin+1:end, :] end -function mcmc_sample!(mcmc::MCMCObj, g::Vector{Float64}, gvar::Vector{Float64}) +function mcmc_sample!(mcmc::MCMCObj{FT}, g::Vector{FT}, gvar::Vector{FT}) where {FT} if mcmc.algtype == "rwm" log_posterior = log_likelihood(mcmc, g, gvar) + log_prior(mcmc) end if mcmc.log_posterior[1] isa Nothing #do an accept step. mcmc.log_posterior[1] = log_posterior - log(0.5) #this makes p_accept = 0.5 - end + end p_accept = exp(log_posterior - mcmc.log_posterior[1]) if p_accept > rand(Distributions.Uniform(0, 1)) mcmc.posterior[1 + mcmc.iter[1], :] = mcmc.param mcmc.log_posterior[1] = log_posterior mcmc.accept[1] = mcmc.accept[1] + 1 - else + else mcmc.posterior[1 + mcmc.iter[1], :] = mcmc.posterior[mcmc.iter[1], :] end # get new parameters by comparing likelihood_current * prior_current to # likelihood_proposal * prior_proposal - either we accept the proposed # parameter or we stay where we are. - mcmc.param[:] = proposal(mcmc)[:] + mcmc.param[:] = proposal(mcmc)[:] mcmc.iter[1] = mcmc.iter[1] + 1 - -end # function mcmc_sample! -function accept_ratio(mcmc::MCMCObj) - return convert(Float64, mcmc.accept[1]) / mcmc.iter[1] +end + +function accept_ratio(mcmc::MCMCObj{FT}) where {FT} + return convert(FT, mcmc.accept[1]) / mcmc.iter[1] end -function log_likelihood(mcmc::MCMCObj, g::Vector{Float64}, - gvar::Vector{Float64}) +function log_likelihood(mcmc::MCMCObj{FT}, + g::Vector{FT}, + gvar::Vector{FT}) where {FT} log_rho = [0.0] if gvar == nothing diff = g - mcmc.truth_sample @@ -160,7 +147,7 @@ function log_prior(mcmc::MCMCObj) priors = mcmc.prior for (param, prior_dist) in zip(mcmc.param, priors) if mcmc.standardized - aram_std = std(prior_dist) + param_std = std(prior_dist) param_mean = mean(prior_dist) log_rho[1] += logpdf(prior_dist, param*param_std + param_mean) # get density at current parameter value else @@ -191,40 +178,41 @@ function proposal(mcmc::MCMCObj) # sample[:] = mcmc.posterior[1 + mcmc.iter[1], :] .+ rand(prop_dist) # end # end - + return sample end -function find_mcmc_step!(mcmc_test::MCMCObj, gpobj::T) +function find_mcmc_step!(mcmc_test::MCMCObj{FT}, gpobj::T) where {FT} step = mcmc_test.step[1] mcmc_accept = false doubled = false halved = false countmcmc = 0 - - println("Begin step size search") + + println("Begin step size search") println("iteration 0; current parameters ", mcmc_test.param') flush(stdout) it = 0 - while mcmc_accept == false - - param = convert(Array{Float64, 2}, mcmc_test.param') + local acc_ratio + while mcmc_accept == false + + param = convert(Array{FT, 2}, mcmc_test.param') # test predictions param' is 1xN_params gp_pred, gp_predvar = pred(gpobj, param) gp_pred = cat(gp_pred..., dims=2) gp_predvar = cat(gp_predvar..., dims=2) - + mcmc_sample!(mcmc_test, vec(gp_pred), vec(gp_predvar)) it += 1 if it % 2000 == 0 countmcmc += 1 acc_ratio = accept_ratio(mcmc_test) - println("iteration ", it, "; acceptance rate = ", acc_ratio, + println("iteration ", it, "; acceptance rate = ", acc_ratio, ", current parameters ", param) flush(stdout) - if countmcmc == 20 - println("failed to choose suitable stepsize in ", countmcmc, + if countmcmc == 20 + println("failed to choose suitable stepsize in ", countmcmc, "iterations") exit() end @@ -250,35 +238,36 @@ function find_mcmc_step!(mcmc_test::MCMCObj, gpobj::T) flush(stdout) end end - + end - return mcmc_test.step[1] -end # function find_mcmc_step! + return mcmc_test.step[1], acc_ratio +end -function sample_posterior!(mcmc::MCMCObj, gpobj::T, max_iter::Int64) +function sample_posterior!(mcmc::MCMCObj{FT}, gpobj::T, max_iter::I) where {FT,I,T} println("iteration 0; current parameters ", mcmc.param') flush(stdout) - + for mcmcit in 1:max_iter - param = convert(Array{Float64, 2}, mcmc.param') + param = convert(Array{FT, 2}, mcmc.param') # test predictions param' is 1xN_params gp_pred, gp_predvar = pred(gpobj, param) gp_pred = cat(gp_pred..., dims=2) gp_predvar = cat(gp_predvar..., dims=2) - + mcmc_sample!(mcmc, vec(gp_pred), vec(gp_predvar)) - + if mcmcit % 1000 == 0 acc_ratio = accept_ratio(mcmc) - println("iteration ", mcmcit ," of ", max_iter, - "; acceptance rate = ", acc_ratio, + # TODO: Add callbacks, remove print statements + println("iteration ", mcmcit ," of ", max_iter, + "; acceptance rate = ", acc_ratio, ", current parameters ", param) flush(stdout) end end -end # function sample_posterior! +end end # module MCMC diff --git a/src/Truth_mb.jl b/src/Truth.jl similarity index 50% rename from src/Truth_mb.jl rename to src/Truth.jl index e3dbb15e2..2bf5b6eac 100644 --- a/src/Truth_mb.jl +++ b/src/Truth.jl @@ -1,10 +1,5 @@ module Truth -# Import Cloudy modules -using Cloudy.KernelTensors -using Cloudy.PDistributions -using Cloudy.Sources - # packages using LinearAlgebra using DifferentialEquations @@ -13,7 +8,7 @@ using Random using Distributions export TruthObj -export run_cloudy_truth +export run_model_truth ##### @@ -21,22 +16,23 @@ export run_cloudy_truth ##### # Structure to organize the "truth" -struct TruthObj - distr_init::PDistribution{Float64} - solution::ODESolution # full solution of Cloudy run - y_t::Array{Float64, 1} # true data (="observations") - cov::Array{Float64, 2} # covariance of the obs noise - data_names::Vector{String} +struct TruthObj{FT,D} + distr_init::D + solution::ODESolution # full solution of the model run + y_t::Array{FT, 1} # true data (="observations") + cov::Array{FT, 2} # covariance of the obs noise + data_names::Vector{String} end # Constructors -function TruthObj(distr_init::PDistribution{Float64}, - solution::ODESolution, cov::Array{Float64, 2}, - data_names::Vector{String}) +function TruthObj(distr_init::D, + solution::ODESolution, + cov::Array{FT, 2}, + data_names::Vector{String}) where {FT,D} y_t = vcat(solution.u'...)[end, :] - TruthObj(distr_init, solution, y_t, cov, data_names) + TruthObj{FT,D}(distr_init, solution, y_t, cov, data_names) end @@ -45,29 +41,39 @@ end ##### """ -function run_cloudy_truth(kernel, dist, moments, tspan, data_names, obs_noise) - -- `kernel` - Cloudy KernelTensor defining particle interactions -- `dist` - Cloudy.PDistribution defining the intial particle mass distirbution -- `moments` - Moments of dist to collect in Cloudy as they evolve over time, + run_model_truth(kernel::KT, + dist::D, + moments::Array{Float64}, + tspan::Tuple{Float64, Float64}, + data_names::Vector{String}, + obs_noise::Union{Array{Float64, 2}, Nothing} + moment, + get_src; + rng_seed=1234) where {D,KT} + +- `kernel` - Model KernelTensor defining particle interactions +- `dist` - Distribution defining the initial particle mass distribution +- `moments` - Moments of dist to collect in the model as they evolve over time, e.g. [0.0, 1.0, 2.0] for the 0th, 1st and 2nd moment -- `tspan` - Time period over which Cloudy is run, e.g. (0.0, 1.0) +- `tspan` - Time period over which the model is run, e.g. (0.0, 1.0) - `data_names` - Names of the data/moments to collect, e.g. ["M0", "M1", "M2"] - `obs_noise` - Covariance of the observational noise (assumed to be normally - distributed with mean zero). Will be used as an attribute of - the returned TruthObj. If obs_noise is Nothing, it will be - defined within run_cloudy_truth based on draws from a normal + distributed with mean zero). Will be used as an attribute of + the returned TruthObj. If obs_noise is Nothing, it will be + defined within run_model_truth based on draws from a normal distribution with mean 0 and standard deviation 2. Returns a TruthObj. """ -function run_cloudy_truth(kernel::KernelTensor{Float64}, - dist::PDistribution{Float64}, - moments::Array{Float64}, - tspan::Tuple{Float64, Float64}, +function run_model_truth(kernel::KT, + dist::D, + moments::Array{Float64}, + tspan::Tuple{Float64, Float64}, data_names::Vector{String}, - obs_noise::Union{Array{Float64, 2}, Nothing}; - rng_seed=1234) + obs_noise::Union{Array{Float64, 2}, Nothing}, + moment, + get_src; + rng_seed=1234) where {D,KT} # Numerical parameters tol = 1e-7 @@ -75,11 +81,11 @@ function run_cloudy_truth(kernel::KernelTensor{Float64}, n_moments = length(moments) moments_init = fill(NaN, length(moments)) for (i, mom) in enumerate(moments) - moments_init[i] = PDistributions.moment(dist, convert(Float64, mom)) + moments_init[i] = moment(dist, convert(Float64, mom)) end # Set up ODE problem: dM/dt = f(M,p,t) - rhs(M, p, t) = get_src_coalescence(M, dist, kernel) + rhs(M, p, t) = get_src(M, dist, kernel) prob = ODEProblem(rhs, moments_init, tspan) # Solve the ODE diff --git a/src/Utilities.jl b/src/Utilities.jl new file mode 100644 index 000000000..6d1f01d1c --- /dev/null +++ b/src/Utilities.jl @@ -0,0 +1,11 @@ +module Utilities + +export RPAD, name, warn + +const RPAD = 25 + +name(_name::AbstractString) = rpad(_name * ":", RPAD) + +warn(_name::AbstractString) = rpad("WARNING (" * _name * "):", RPAD) + +end \ No newline at end of file diff --git a/test/Cloudy/runtests.jl b/test/Cloudy/runtests.jl new file mode 100644 index 000000000..701aeab4d --- /dev/null +++ b/test/Cloudy/runtests.jl @@ -0,0 +1,229 @@ + +# Import Cloudy modules +using Pkg; Pkg.add(PackageSpec(url="https://github.com/climate-machine/Cloudy.jl")) +using Cloudy +using Cloudy.KernelTensors +PDistributions = Cloudy.Distributions +Pkg.add("Plots") + +# Import modules +using JLD2 # saving and loading Julia arrays +using Distributions # probability distributions and associated functions +using StatsBase +using LinearAlgebra +using StatsPlots + +# Import Calibrate-Emulate-Sample modules +using CalibrateEmulateSample.EKI +using CalibrateEmulateSample.Truth +using CalibrateEmulateSample.GPEmulator +using CalibrateEmulateSample.MCMC + + +# Define the data from which we want to learn +data_names = ["M0", "M1", "M2"] +moments = [0.0, 1.0, 2.0] +n_moments = length(moments) +param_names = ["N0", "θ", "k"] + +FT = Float64 + +# Define the true parameters and distribution as well as the +# priors of the parameters +N0_true = 3650. +θ_true = 4.1 +k_true = 2.0 +priors = [Distributions.Normal(3000., 500.), # prior on N0 + Distributions.Normal(6.0, 2.0), # prior on θ + Distributions.Normal(3.0, 1.5)] # prior on k + +get_src = Cloudy.Sources.get_int_coalescence +# We assume that the true particle mass distribution is a +# Gamma distribution with parameters N0_true, θ_true, k_true +# Note that dist_true has to be a Cloudy distribution, not a +# "regular" Julia distribution +dist_true = PDistributions.Gamma(N0_true, θ_true, k_true) + +# Collision-coalescence kernel to be used in Cloudy +coalescence_coeff = 1/3.14/4 +# kernel = ConstantCoalescenceTensor(coalescence_coeff) +kernel_func = x -> coalescence_coeff +kernel = CoalescenceTensor(kernel_func, 0, 100.0) + +# Time period over which to run Cloudy +tspan = (0., 0.5) + +# Generate the truth (a Truth.TruthObj) +truth = Truth.run_model_truth(kernel, + dist_true, + moments, + tspan, + data_names, + nothing, + PDistributions.moment, + get_src) + +log_transform(a::AbstractArray) = log.(a) +exp_transform(a::AbstractArray) = exp.(a) + +N_ens = 50 # number of ensemble members +N_iter = 5 # number of EKI iterations +# initial parameters: N_ens x N_params +initial_params = EKI.construct_initial_ensemble(N_ens, priors; rng_seed=5) +# Note: For the model G (=Cloudy) to run, N0 needs to be +# nonnegative, and θ and k need to be positive. +# The EKI update can result in violations of these constraints - +# therefore, we log-transform the initial ensemble, perform all +# EKI operations in log space and run G with the exponentiated +# parameters, which ensures positivity. +ekiobj = EKI.EKIObj(log_transform(initial_params), + param_names, truth.y_t, truth.cov) + +# Initialize a CDistribution with dummy parameters +# The parameters will then be set in run_model_ensemble +dummy = 1.0 +dist_type = PDistributions.Gamma(dummy, dummy, dummy) + +# EKI iterations +for i in 1:N_iter + # Note the exp-transform to ensure positivity of the + # parameters + g_ens = EKI.run_model_ensemble(kernel, + dist_type, + exp_transform(ekiobj.u[end]), + moments, + tspan, + PDistributions.moment, + PDistributions.update_params, + get_src) + EKI.update_ensemble!(ekiobj, g_ens) +end + +# EKI results: Has the ensemble collapsed toward the truth? +true_params = PDistributions.get_params(truth.distr_init) +println("True parameters: ") +println(true_params) + +println("\nEKI results:") +println(mean(exp_transform(ekiobj.u[end]), dims=1)) + +using Plots +gr() + +# p = plot(exp_transform(ekiobj.u[1][:, 2]), +# exp_transform(ekiobj.u[1][:, 3]), +# seriestype=:scatter) +# for i in 2:N_iter +# plot!(exp_transform(ekiobj.u[i][:, 2]), +# exp_transform(ekiobj.u[i][:, 3]), +# seriestype=:scatter) +# end + +# plot!([θ_true], xaxis="theta", yaxis="k", seriestype="vline", +# linestyle=:dash, linecolor=:red) +# plot!([k_true], seriestype="hline", linestyle=:dash, linecolor=:red) + + +# savefig(p, "EKI_test.png") + +gppackage = "gp_jl" +yt, yt_cov, yt_covinv, log_u_tp, g_tp = GPEmulator.extract(truth, ekiobj, N_iter) +u_tp = exp_transform(log_u_tp) + +# Standardize both the data and the parameters +data_mean = vec(mean(g_tp, dims=1)) +data_std = vec(std(g_tp, dims=1)) +g_tp_zscore = GPEmulator.orig2zscore(g_tp, data_mean, data_std) + +param_mean = [mean(prior) for prior in priors] +param_std = [std(prior) for prior in priors] +u_tp_zscore = GPEmulator.orig2zscore(u_tp, param_mean, param_std) + +# Fit a Gaussian Process regression to the training points +gpobj = GPEmulator.emulate(u_tp_zscore, g_tp_zscore, gppackage) + +# Check how well the Gaussian Process regression predicts on the +# (standardized) true parameters +u_true_zscore = GPEmulator.orig2zscore([N0_true θ_true k_true], + param_mean, param_std) + +y_mean, y_var = GPEmulator.predict(gpobj, reshape(u_true_zscore, 1, :)) +y_mean = cat(y_mean..., dims=2) +y_var = cat(y_var..., dims=2) + +println("GP prediction on true parameters: ") +println(vec(y_mean) .* data_std + data_mean) +println("true data: ") +println(truth.y_t) + +# initial values +u0 = vec(mean(u_tp_zscore, dims=1)) +println("initial parameters: ", u0) + +# MCMC parameters +mcmc_alg = "rwm" # random walk Metropolis + +# First let's run a short chain to determine a good step size +burnin = 0 +step = 0.1 # first guess +yt_zscore = GPEmulator.orig2zscore(yt, data_mean, data_std) +#yt_cov_zscore = GPEmulator.orig2zscore(yt_cov, data_mean, data_std) + +max_iter = 5000 +standardized = true # we are using z scores +mcmc_test = MCMC.MCMCObj(yt_zscore, yt_cov./maximum(yt_cov), + priors, step, u0, + max_iter, mcmc_alg, burnin, standardized) +new_step, acc_ratio = MCMC.find_mcmc_step!(mcmc_test, gpobj) + +# Now begin the actual MCMC +println("Begin MCMC - with step size ", new_step) + +# reset parameters +burnin = 1000 +max_iter = 100000 +mcmc = MCMC.MCMCObj(yt_zscore, yt_cov./maximum(yt_cov), priors, + new_step, u0, max_iter, mcmc_alg, burnin, + standardized) +MCMC.sample_posterior!(mcmc, gpobj, max_iter) + +posterior = MCMC.get_posterior(mcmc) +post_mean = mean(posterior, dims=1) +post_cov = cov(posterior, dims=1) +println("post_mean") +println(post_mean) +println("post_cov") +println(post_cov) +println("D util") +println(det(inv(post_cov))) +println(" ") + +# Plot the posteriors together with the priors and the true +# parameter values + +true_values = [N0_true θ_true k_true] +n_params = length(true_values) + +for idx in 1:n_params + if idx == 1 + param = "N0" + xs = collect(0:1:4000) + elseif idx == 2 + param = "Theta" + xs = collect(0:0.01:12.0) + elseif idx == 3 + param = "k" + xs = collect(0:0.001:6.0) + else + error("not implemented") + end + + label = "true " * param + histogram(posterior[:, idx] .* param_std[idx] .+ param_mean[idx], + bins=100, normed=true, fill=:slategray, lab="posterior") + plot!(xs, mcmc.prior[idx], w=2.6, color=:blue, lab="prior") + plot!([true_values[idx]], seriestype="vline", w=2.6, lab=label) + + title!(param) + StatsPlots.savefig("posterior_"*param*".png") +end diff --git a/test/ConvenienceFunctions/runtests.jl b/test/ConvenienceFunctions/runtests.jl deleted file mode 100644 index 7eb7c89d6..000000000 --- a/test/ConvenienceFunctions/runtests.jl +++ /dev/null @@ -1,19 +0,0 @@ -using Test - -include(joinpath("..","..","src","ConvenienceFunctions.jl")) - -################################################################################ -# unit testing ################################################################# -################################################################################ -@testset "unit testing" begin - @test isdefined(Main, :RPAD) - @test length(name("a")) == RPAD - @test length(name("a" ^ RPAD)) == (RPAD + 1) - @test length(warn("a")) == RPAD - @test length(warn("a" ^ RPAD)) == (RPAD + 11) - @test isa(name("a"), String) - @test isa(warn("a"), String) -end -println("") - - diff --git a/test/EKS/runtests.jl b/test/EKS/runtests.jl new file mode 100644 index 000000000..30ff2ac90 --- /dev/null +++ b/test/EKS/runtests.jl @@ -0,0 +1,37 @@ +using Test +using CalibrateEmulateSample +using LinearAlgebra, Distributions + +@testset "EKS" begin + + for FT in [Float32, Float64] + Σ = 100.0^2*Matrix{FT}(I,2,2) + μ = [3.0,5.0] + prior = MvNormal(μ, Σ) + + A = hcat(ones(10), [i == 1 ? 1.0 : 0.0 for i = 1:10]) + f(x) = A*x + + u_star = [-1.0,2.0] + y_obs = A*u_star + + Γ = 0.1*Matrix{FT}(I,10,10) + + prob = CalibrateEmulateSample.CESProblem(prior, f, y_obs, CalibrateEmulateSample.CovarianceSpace(Γ)) + + J = 50 + θs = [rand(2) for i in 1:J] + + for i = 1:20 + fθs = map(f, θs) + θs = CalibrateEmulateSample.eks_iter(prob, θs, fθs) + end + + postΣ = inv(inv(Σ) + A'*inv(Γ)*A) + postμ = postΣ*(inv(Σ)*μ + A'*inv(Γ)*y_obs) + + + @test mean(θs) ≈ postμ atol=2e-1 + @test cov(θs) ≈ postΣ atol=5e-1 + end +end diff --git a/test/GPR/runtests.jl b/test/GPR/runtests.jl index 78a68710e..162c794e3 100644 --- a/test/GPR/runtests.jl +++ b/test/GPR/runtests.jl @@ -1,6 +1,6 @@ using Test -import NPZ -import LinearAlgebra +using NPZ +using LinearAlgebra include(joinpath("..","..","src","GPR.jl")) @@ -17,25 +17,21 @@ const matern_05_std = NPZ.npzread(joinpath(data_dir, "matern_05_std.npy")) const inf_norm = x -> LinearAlgebra.norm(x, Inf) -################################################################################ -# unit testing ################################################################# -################################################################################ X = 0:0.1:1 y = X.^2 xmesh = 0:0.01:1 gprw = GPR.Wrap() -@testset "struct testing" begin +@testset "GPR: structs" begin @test !gprw.__data_set @test !gprw.__subsample_set @test gprw.data == nothing @test gprw.subsample == nothing @test gprw.GPR == nothing end -println("") thrsh = gprw.thrsh -@testset "methods testing" begin +@testset "GPR: methods" begin ex_thrown1 = false try GPR.subsample!(gprw) @@ -105,14 +101,10 @@ thrsh = gprw.thrsh @test gprw.thrsh == thrsh end -println("") -################################################################################ -# tests w/ non-synthetic data ################################################## -################################################################################ GPR.set_data!(gprw, gpr_data) gprw.thrsh = -1 -@testset "non-synthetic testing" begin +@testset "GPR: non-synthetic data" begin GPR.learn!(gprw) mesh, mean, std = GPR.mmstd(gprw) @test size(gprw.data,1) == 800 @@ -131,6 +123,3 @@ gprw.thrsh = -1 @test isapprox(mean, matern_05_mean, atol=1e-3, norm=inf_norm) @test isapprox(std, matern_05_std, atol=1e-3, norm=inf_norm) end -println("") - - diff --git a/test/Histograms/runtests.jl b/test/Histograms/runtests.jl index 9c02047c5..486ce43f8 100644 --- a/test/Histograms/runtests.jl +++ b/test/Histograms/runtests.jl @@ -1,5 +1,5 @@ using Test -import NPZ +using NPZ include(joinpath("..","..","src","Histograms.jl")) const Hgm = Histograms @@ -20,10 +20,7 @@ const w1_dns_bal_comb = 0.818516185308166 const w1_bal_onl_x2 = 0.8781673689588835 const w1_bal_onl_comb = 0.8529846357976019 -################################################################################ -# unit testing ################################################################# -################################################################################ -@testset "W1 testing" begin +@testset "Histograms: W1" begin arr1 = [1, 1, 1, 2, 3, 4, 4, 4] arr2 = [1, 1, 2, 2, 3, 3, 4, 4, 4] @test Hgm.W1(arr1, arr2; normalize = false) == 0.25 @@ -40,8 +37,9 @@ const w1_bal_onl_comb = 0.8529846357976019 @test size(Hgm.W1(rand(9,100), rand(3,100))) == (3,) end -@testset "HistData testing" begin - hd = Hgm.HistData() +@testset "Histograms: HistData" begin + FT = typeof(1.0) + hd = Hgm.HistData(FT) Hgm.load!(hd, :unif, rand(3,100000)) Hgm.load!(hd, :norm, randn(3,100000)) w1_rand = Hgm.W1(hd, :unif, :norm) @@ -74,6 +72,4 @@ end @test isapprox(Hgm.W1(hd, :dns, :bal, 1:2), w1_dns_bal_comb) end -println("") - diff --git a/test/L96m/runtests.jl b/test/L96m/runtests.jl index 373856725..05b609ba9 100644 --- a/test/L96m/runtests.jl +++ b/test/L96m/runtests.jl @@ -1,12 +1,12 @@ using Test -import DifferentialEquations -import NPZ +using DifferentialEquations +using NPZ +using CalibrateEmulateSample.Utilities const DE = DifferentialEquations include(joinpath("..","..","examples","L96m","L96m.jl")) -const RPAD = 25 const LPAD_INTEGER = 7 const LPAD_FLOAT = 13 l96 = L96m() @@ -22,10 +22,7 @@ const dp5_test = NPZ.npzread(joinpath(data_dir, "dp5_full.npy")) const tp8_test = NPZ.npzread(joinpath(data_dir, "tsitpap8_full.npy")) const bal_test = NPZ.npzread(joinpath(data_dir, "dp5_bal.npy")) -################################################################################ -# unit testing ################################################################# -################################################################################ -@testset "unit testing" begin +@testset "L96: unit tests" begin Yk_comp = compute_Yk(l96, z0) @test isapprox(Yk_test, Yk_comp, atol=1e-15) @@ -45,7 +42,7 @@ const bal_test = NPZ.npzread(joinpath(data_dir, "dp5_bal.npy")) regressed(regressed_comp, z0[1:l96.K], l96, 0.0) @test isapprox(regressed_test, regressed_comp, atol=1e-15) - @testset "G0 testing" begin + @testset "L96: G0 tests" begin v1 = Vector(1.0:20.0) v2 = Vector(1:3:21) set_G0(l96) @@ -82,7 +79,6 @@ const bal_test = NPZ.npzread(joinpath(data_dir, "dp5_bal.npy")) @test isapprox(gather_pairs(l96, dp5_test), dp5_pairs) end -println("") ################################################################################ # integration testing ########################################################## @@ -127,11 +123,10 @@ end println("steps:", lpad(length(sol_reg.t), LPAD_INTEGER), "\telapsed:", lpad(elapsed_reg, LPAD_FLOAT)) -@testset "integration testing" begin +@testset "L96: integration tests" begin @test isapprox(sol_dp5[:,1:end], dp5_test, atol=1e-3) @test isapprox(sol_tp8[:,1:end], tp8_test, atol=1e-3) @test isapprox(sol_bal[:,1:end], bal_test, atol=1e-5) @test isapprox(sol_reg[:,1:end], bal_test, atol=1e-5) # reg + G0 == bal end -println("") diff --git a/test/Utilities/runtests.jl b/test/Utilities/runtests.jl new file mode 100644 index 000000000..d5602383c --- /dev/null +++ b/test/Utilities/runtests.jl @@ -0,0 +1,12 @@ +using Test + +using CalibrateEmulateSample.Utilities + +@testset "Utilities" begin + @test length(name("a")) == RPAD + @test length(name("a" ^ RPAD)) == (RPAD + 1) + @test length(warn("a")) == RPAD + @test length(warn("a" ^ RPAD)) == (RPAD + 11) + @test isa(name("a"), String) + @test isa(warn("a"), String) +end diff --git a/test/eks.jl b/test/eks.jl deleted file mode 100644 index 537e657d1..000000000 --- a/test/eks.jl +++ /dev/null @@ -1,35 +0,0 @@ -using Test -using CalibrateEmulateSample -using LinearAlgebra, Distributions - -@testset "EKS" begin - - Σ = 100.0^2*Matrix{Float64}(I,2,2) - μ = [3.0,5.0] - prior = MvNormal(μ, Σ) - - A = hcat(ones(10), [i == 1 ? 1.0 : 0.0 for i = 1:10]) - f(x) = A*x - - u_star = [-1.0,2.0] - y_obs = A*u_star - - Γ = 0.1*Matrix{Float64}(I,10,10) - - prob = CalibrateEmulateSample.CESProblem(prior, f, y_obs, CalibrateEmulateSample.CovarianceSpace(Γ)) - - J = 50 - θs = [rand(2) for i in 1:J] - - for i = 1:20 - fθs = map(f, θs) - θs = CalibrateEmulateSample.eks_iter(prob, θs, fθs) - end - - postΣ = inv(inv(Σ) + A'*inv(Γ)*A) - postμ = postΣ*(inv(Σ)*μ + A'*inv(Γ)*y_obs) - - - @test mean(θs) ≈ postμ atol=2e-1 - @test cov(θs) ≈ postΣ atol=5e-1 -end diff --git a/test/runtests.jl b/test/runtests.jl index 67ebbefa4..5775a955e 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,12 +2,13 @@ using Test using CalibrateEmulateSample ENV["JULIA_LOG_LEVEL"] = "WARN" -include("eks.jl") - -for submodule in ["L96m", +for submodule in [ + "Utilities", + "L96m", "GPR", + "EKS", "Histograms", - "ConvenienceFunctions", + "Cloudy", ] println("Starting tests for $submodule")