Skip to content

Commit

Permalink
Merge pull request #803 from kishore-nori/AD-for-SkeletonIntegration
Browse files Browse the repository at this point in the history
Extended the `jacobian` to functionals involving Skeleton terms
  • Loading branch information
amartinhuertas authored Jun 22, 2022
2 parents 78b27c4 + 8b7d3ba commit 02d96ad
Show file tree
Hide file tree
Showing 3 changed files with 130 additions and 13 deletions.
3 changes: 2 additions & 1 deletion NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## Unreleased

### Added
- Added functionality to take gradient of functional involving integration (`DomainContribution`) over Skeleton faces, with respect to the degrees-of-freedom `FEFunction`. The interface remains the same - `gradient(f,uh)`. Since PR [#797](https://github.com/gridap/Gridap.jl/pull/797)
- Added functionality to take gradient of functional involving integration (`DomainContribution`) over Skeleton faces, with respect to the degrees-of-freedom of `FEFunction`. The interface remains the same - `gradient(f,uh)`. Since PR [#797](https://github.com/gridap/Gridap.jl/pull/797)
- Extended the `MultiField` functional gradient (with respect to degrees-of-freedom of `MultiFieldFEFunction`) to functionals involving Skeleton integration. The interface remains the same `gradient(f,xh)`. Since PR [#799](https://github.com/gridap/Gridap.jl/pull/799)
- Added functionality to take jacobian of functional involving integration (`DomainContribution`) over Skeleton faces (obtained from testing bilinear form with the whole set of test `fe_basis`), with respect to the degrees-of-freedom of `FEFunction`. The interface remains the same - `jacobian(f,uh)`. Since PR [#803](https://github.com/gridap/Gridap.jl/pull/803)

### Fixed
- Fixed the behavior of `gradient` for functionals involving operations of `CellFields` inside `mean` and `jump` terms of Skeleton Integration terms. Since PR [#800](https://github.com/gridap/Gridap.jl/pull/800)
Expand Down
58 changes: 50 additions & 8 deletions src/FESpaces/FEAutodiff.jl
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,7 @@ end
Gridap, where we can use or import required functionalities from other
modules without any circular dependency
=#
function autodiff_array_gradient(
a, i_to_x, j_to_i::SkeletonPair)

function autodiff_array_gradient(a, i_to_x, j_to_i::SkeletonPair)
i_to_xdual = lazy_map(DualizeMap(ForwardDiff.gradient),i_to_x)

# dual output of both sides at once
Expand All @@ -191,12 +189,56 @@ function autodiff_array_gradient(
lazy_map(BlockMap(2,[1,2]),j_to_result_plus,j_to_result_minus)
end

function autodiff_array_jacobian(
a,i_to_x,j_to_i::SkeletonPair)
@notimplemented
function autodiff_array_jacobian(a,i_to_x,j_to_i::SkeletonPair)
i_to_xdual = lazy_map(DualizeMap(ForwardDiff.jacobian),i_to_x)
j_to_ydual_plus, j_to_ydual_minus = a(i_to_xdual)

# Bilinear form when tested with test basis functions dv results in
# DomainContribution containing vector of 2-block `VectorBlock{Vector}`
# each block coming from plus side and minus side of dv.
# So we densify each of the `VectorBlock`` into plain vectors and construct
# back the jacobian contributions into a MatrixBlock

densify = DensifyInnerMostBlockLevelMap()
j_to_ydual_plus_dense = lazy_map(densify, j_to_ydual_plus)
j_to_ydual_minus_dense = lazy_map(densify, j_to_ydual_minus)

# Work for plus side
j_to_x_plus = lazy_map(Reindex(i_to_x),j_to_i.plus)
j_to_cfg_plus = lazy_map(ConfigMap(ForwardDiff.jacobian),j_to_x_plus)
j_to_result_plus_dense = lazy_map(AutoDiffMap(ForwardDiff.jacobian),
j_to_ydual_plus_dense,j_to_x_plus,j_to_cfg_plus)

# Work for minus side
j_to_x_minus = lazy_map(Reindex(i_to_x),j_to_i.minus)
j_to_cfg_minus = lazy_map(ConfigMap(ForwardDiff.jacobian),j_to_x_minus)
j_to_result_minus_dense = lazy_map(AutoDiffMap(ForwardDiff.jacobian),
j_to_ydual_minus_dense,j_to_x_minus,j_to_cfg_minus)

# j_to_result_plus_dense/j_to_result_minus_dense can be (and must be)
# laid out into 2x2 block matrices
num_dofs_x_cell = lazy_map(length,i_to_x)
num_dofs_x_face_and_cell_plus = lazy_map(Reindex(num_dofs_x_cell),j_to_i.plus)
num_dofs_x_face_and_cell_minus = lazy_map(Reindex(num_dofs_x_cell),j_to_i.minus)

J_11 = lazy_map((x,b)->view(x, 1:b,:),
j_to_result_plus_dense,num_dofs_x_face_and_cell_plus)

J_21 = lazy_map((x,b)->view(x, b+1:size(x,1),:),
j_to_result_plus_dense,num_dofs_x_face_and_cell_minus)

J_12 = lazy_map((x,b)->view(x, 1:b,:),
j_to_result_minus_dense,num_dofs_x_face_and_cell_plus)

J_22 = lazy_map((x,b)->view(x, b+1:size(x,1),:),
j_to_result_minus_dense,num_dofs_x_face_and_cell_minus)

# Assembly on SkeletonTriangulation expects an array of facets where each
# entry is a 2x2-block MatrixBlock with the blocks of the Jacobian matrix
bm_jacobian = BlockMap((2,2),[(1,1),(2,1),(1,2),(2,2)])
lazy_map(bm_jacobian, J_11, J_21, J_12, J_22)
end

function autodiff_array_hessian(
a,i_to_x,i_to_j::SkeletonPair)
function autodiff_array_hessian(a,i_to_x,i_to_j::SkeletonPair)
@notimplemented
end
82 changes: 78 additions & 4 deletions test/FESpacesTests/FEAutodiffTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ using Gridap.TensorValues
using Gridap.CellData
using Gridap.ReferenceFEs
using ForwardDiff
using SparseArrays

domain = (0,1,0,1)
partition = (2,2)
Expand Down Expand Up @@ -94,9 +95,9 @@ cell_j_auto = get_array(jacobian(u->res(u,dv),uh))

test_array(cell_j_auto,cell_j,)

# comparing AD of integration over Skeleton faces with ForwardDiff results
## comparing AD of integration over Skeleton faces with ForwardDiff results ##

model = CartesianDiscreteModel((0.,1.,0.,1.),(3,3))
model = CartesianDiscreteModel((0.,1.,0.,1.),(2,2))
Ω = Triangulation(model)
Γ = BoundaryTriangulation(model)
Λ = SkeletonTriangulation(model)
Expand All @@ -108,7 +109,7 @@ dΛ = Measure(Λ,2)
n_Γ = get_normal_vector(Γ)
n_Λ = get_normal_vector(Λ)

reffe = ReferenceFE(lagrangian,Float64,2)
reffe = ReferenceFE(lagrangian,Float64,1)
V = TestFESpace(model,reffe,conformity=:L2)

u(x) = sin(norm(x))
Expand All @@ -118,7 +119,7 @@ uh = FEFunction(U,rand(num_free_dofs(U)))

f_Λ(uh) = (mean(uh))*
a_Λ(u) = ( - jump(u*n_Λ)mean((u))
- mean((u))jump(u*n_Λ)
- mean(Δ(u))
+ jump(u*n_Λ)jump(u*n_Λ) )dΛ

# functionals having mean and jump of products of FEFunctions/CellFields
Expand All @@ -138,6 +139,8 @@ f_Λ_(θ) = f_uh_free_dofs(f_Λ,uh,θ)
a_Λ_(θ) = f_uh_free_dofs(a_Λ,uh,θ)
θ = get_free_dof_values(uh)

# testing gradients of functionals involving Skeleton integration terms

gridapgradf = assemble_vector(gradient(f_Λ,uh),U)
fdgradf = ForwardDiff.gradient(f_Λ_,θ)
test_array(gridapgradf,fdgradf,)
Expand All @@ -162,4 +165,75 @@ gridapgradj = assemble_vector(gradient(j_Λ,uh),U)
fdgradj = ForwardDiff.gradient(j_Λ_,θ)
test_array(gridapgradj,fdgradj,)

# testing jacobians of functionals involving Skeleton inetgration terms
uh = FEFunction(U,rand(num_free_dofs(U)))
θ = get_free_dof_values(uh)
dv = get_fe_basis(V)
du = get_trial_fe_basis(U)

a(uh,vh) = (mean(uh)*mean(vh))*
# b(uh,vh) = ∫(-jump(uh*n_Λ)⊙mean(∇(vh)) +
# jump(vh*n_Λ)⊙mean(∇(uh)) +
# mean(Δ(uh))*mean(Δ(vh)) +
# jump(uh*n_Λ)⊙jump(vh*n_Λ))*dΛ
# g(uh,vh) = ∫(mean(uh*vh))*dΛ
h(uh,vh) = (jump(uh*vh*n_Λ)jump(vh*vh*n_Λ))*
# j(uh,vh) = ∫(mean(∇(vh)*uh)⊙jump((∇(vh)⊙∇(uh))*n_Λ))*dΛ

# We use the strongly-typed lowel-level interface of SparseMatrixCSC{T,Int} here
# as ForwardDiff doesn't work directly through `assemble_vector``, this is due
# to the typing of SparseMatrixCSC{T,Int} and Vector{T} inside high-level
# `assemble_vector` API (via the `SparseMatrixAssembler`) using the type T of
# the eltype of FESpace U, which remains Float even though Dual numbers are
# passed into θ.
# So as to mitigate this problem, low-level interface of assemble_vector is
# being used with SparseMatrixCSC{T,Int} and Vector{T} constructed by hand with
# types of θ which can be dual numbers when using ForwardDiff or even #
# ReverseDiff types, when using ReverseDiff! This is just for testing purposes
function _change_input(f,θ,uh)
dir = similar(uh.dirichlet_values,eltype(θ))
U = uh.fe_space
uh = FEFunction(U,θ,dir)
T = eltype(θ)
matrix_type = SparseMatrixCSC{T,Int}
vector_type = Vector{T}
assem = SparseMatrixAssembler(matrix_type,vector_type,U,U)
assemble_vector(f(uh),assem,U)
end

function _assemble_jacobian(f,uh)
assemble_matrix(jacobian(f,uh),U,V)
end

f(uh) = a(uh,dv)
jac_gridap_a = _assemble_jacobian(f,uh)
collect(get_array(jacobian(f,uh))) # just to check the working
f_(θ) = _change_input(f,θ,uh)
jac_forwdiff_a = ForwardDiff.jacobian(f_,θ)
test_array(jac_gridap_a,jac_forwdiff_a,)

# f(uh) = b(uh,dv)
# jac_gridap_b = _assemble_jacobian(f,uh)
# f_(θ) = _change_input(f,θ,uh)
# jac_forwdiff_b = ForwardDiff.jacobian(f_,θ)
# test_array(jac_gridap_b,jac_forwdiff_b,≈)

# f(uh) = g(uh,dv)
# jac_gridap_g = _assemble_jacobian(f,uh)
# f_(θ) = _change_input(f,θ,uh)
# jac_forwdiff_g = ForwardDiff.jacobian(f_,θ)
# test_array(jac_gridap_g,jac_forwdiff_g,≈)

f(uh) = h(uh,dv)
jac_gridap_h = _assemble_jacobian(f,uh)
f_(θ) = _change_input(f,θ,uh)
jac_forwdiff_h = ForwardDiff.jacobian(f_,θ)
test_array(jac_gridap_h,jac_forwdiff_h,)

# f(uh) = j(uh,dv)
# jac_gridap_j = _assemble_jacobian(f,uh)
# f_(θ) = _change_input(f,θ,uh)
# jac_forwdiff_j = ForwardDiff.jacobian(f_,θ)
# test_array(jac_gridap_j,jac_forwdiff_j,≈)

end # module

0 comments on commit 02d96ad

Please sign in to comment.