diff --git a/src/essential/ad.jl b/src/essential/ad.jl index 4f07c1eab..5dda65d0f 100644 --- a/src/essential/ad.jl +++ b/src/essential/ad.jl @@ -118,7 +118,7 @@ end for cache in (:true, :false) @eval begin function LogDensityProblemsAD.ADgradient(::ReverseDiffAD{$cache}, ℓ::Turing.LogDensityFunction) - return LogDensityProblemsAD.ADgradient(Val(:ReverseDiff), ℓ; compile=Val($cache)) + return LogDensityProblemsAD.ADgradient(Val(:ReverseDiff), ℓ; compile=Val($cache), x=DynamicPPL.getparams(ℓ)) end end end diff --git a/test/essential/ad.jl b/test/essential/ad.jl index c00f76f12..359101257 100644 --- a/test/essential/ad.jl +++ b/test/essential/ad.jl @@ -198,4 +198,18 @@ end end end + + @testset "ReverseDiff compiled without linking" begin + f = DynamicPPL.LogDensityFunction(gdemo_default) + θ = DynamicPPL.getparams(f) + + f_rd = LogDensityProblemsAD.ADgradient(Turing.Essential.ReverseDiffAD{false}(), f) + f_rd_compiled = LogDensityProblemsAD.ADgradient(Turing.Essential.ReverseDiffAD{true}(), f) + + ℓ, ℓ_grad = LogDensityProblems.logdensity_and_gradient(f_rd, θ) + ℓ_compiled, ℓ_grad_compiled = LogDensityProblems.logdensity_and_gradient(f_rd_compiled, θ) + + @test ℓ == ℓ_compiled + @test ℓ_grad == ℓ_grad_compiled + end end diff --git a/test/optimisation/OptimInterface.jl b/test/optimisation/OptimInterface.jl index 2418037a4..5ac338876 100644 --- a/test/optimisation/OptimInterface.jl +++ b/test/optimisation/OptimInterface.jl @@ -151,6 +151,9 @@ end DynamicPPL.TestUtils.demo_dot_assume_observe_submodel, DynamicPPL.TestUtils.demo_dot_assume_dot_observe_matrix, DynamicPPL.TestUtils.demo_dot_assume_matrix_dot_observe_matrix, + DynamicPPL.TestUtils.demo_assume_submodel_observe_index_literal, + DynamicPPL.TestUtils.demo_dot_assume_observe_index_literal, + DynamicPPL.TestUtils.demo_assume_matrix_dot_observe_matrix ] @testset "MLE for $(model.f)" for model in DynamicPPL.TestUtils.DEMO_MODELS result_true = DynamicPPL.TestUtils.likelihood_optima(model)