From e6aa4bdc1f3419e65789538ffb80d4fe403c759e Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Thu, 24 Jun 2021 06:38:47 +0200 Subject: [PATCH] Implement 3D support for `P4estMesh` (#637) * Implement structured 3D simulations with P4estMesh * Fix loading and saving of P4estMesh in 3D * Fix unstructured P4estMesh simulation in 3D * Revise 3D orientations * Implement P4estMesh simulation for non-conforming 3D meshes * Add non-conforming example * Clean up * Remove unused destroy functions * Clean up more * Add restart example with P4estMesh in 3D * Implement AMR with P4estMesh in 3D * Add AMR example with P4estMesh in 3D * First version of fast calc_contravariant_vectors! * Make calc_contravariant_vectors! more readable * Improve documentation of calc_contravariant_vectors! * Calculate both summands of contravariant vectors in one loop * Move @turbo to inner loop * Only use one loop over n * Optimize calc_jacobian_matrix! * Optimize calc_inverse_jacobian! * Use tmp arrays for multiply_dimensionwise! * Update Euler FSP errors after optimizing code * Remove duplicate code * Dispatch by function type * Compute contravariant vectors in one loop for each summand * Add more 3D `P4estMesh` examples * Make curved example actually curved * Make calc_node_coordinates! consistent with 2D * Remove sign Jacobian from normal vector p4est doesn't support left-handed coordinates anyway. * Add FSP example with P4estMesh * Add P4estMesh examples to tests * Clean up * Update examples/3d/elixir_advection_amr_p4est.jl Co-authored-by: Hendrik Ranocha * Update examples/3d/elixir_advection_basic_p4est.jl Co-authored-by: Hendrik Ranocha * Update src/solvers/dg_p4est/containers_2d.jl Co-authored-by: Hendrik Ranocha * Only use Base functions for sc_array load and wrap * Implement suggestions * Implement suggestions * Add convergence test for 3D `P4estMesh` * Implement suggestions Co-authored-by: Hendrik Ranocha --- ...r_advection_amr_p4est_unstructured_flag.jl | 6 +- ...xir_advection_p4est_non_conforming_flag.jl | 4 +- ..._p4est_non_conforming_flag_unstructured.jl | 10 +- examples/2d/elixir_euler_free_stream_p4est.jl | 2 +- ...ir_euler_nonperiodic_p4est_unstructured.jl | 2 +- examples/3d/elixir_advection_amr_p4est.jl | 72 +++ ...advection_amr_p4est_unstructured_curved.jl | 105 ++++ examples/3d/elixir_advection_basic_p4est.jl | 63 +++ .../elixir_advection_p4est_non_conforming.jl | 76 +++ ...xir_advection_p4est_unstructured_curved.jl | 98 ++++ examples/3d/elixir_advection_restart_p4est.jl | 33 ++ examples/3d/elixir_euler_free_stream_p4est.jl | 105 ++++ examples/3d/elixir_euler_nonperiodic_p4est.jl | 74 +++ src/Trixi.jl | 1 + src/auxiliary/auxiliary.jl | 19 - src/auxiliary/p4est.jl | 177 ++++++ src/callbacks_step/amr.jl | 39 +- src/callbacks_step/amr_dg2d.jl | 37 -- src/callbacks_step/amr_dg3d.jl | 66 +-- src/callbacks_step/analysis_dg3d.jl | 13 +- src/callbacks_step/stepsize_dg3d.jl | 4 +- src/mesh/mesh_io.jl | 11 +- src/mesh/p4est_mesh.jl | 512 ++++++++++++++---- src/solvers/dg_curved/containers_3d.jl | 182 +++++-- src/solvers/dg_curved/dg_3d.jl | 4 +- src/solvers/dg_p4est/containers.jl | 374 ++++++++++--- src/solvers/dg_p4est/containers_2d.jl | 245 +-------- src/solvers/dg_p4est/containers_3d.jl | 307 +++++++++++ src/solvers/dg_p4est/dg.jl | 13 +- src/solvers/dg_p4est/dg_2d.jl | 137 +++-- src/solvers/dg_p4est/dg_3d.jl | 413 ++++++++++++++ src/solvers/dg_tree/containers.jl | 45 ++ src/solvers/dg_tree/dg.jl | 6 +- src/solvers/dg_tree/dg_3d.jl | 2 +- test/test_examples_3d_curved.jl | 4 +- test/test_examples_3d_p4est.jl | 61 +++ test/test_examples_3d_part2.jl | 3 + test/test_special_elixirs.jl | 3 + 38 files changed, 2620 insertions(+), 708 deletions(-) create mode 100644 examples/3d/elixir_advection_amr_p4est.jl create mode 100644 examples/3d/elixir_advection_amr_p4est_unstructured_curved.jl create mode 100644 examples/3d/elixir_advection_basic_p4est.jl create mode 100644 examples/3d/elixir_advection_p4est_non_conforming.jl create mode 100644 examples/3d/elixir_advection_p4est_unstructured_curved.jl create mode 100644 examples/3d/elixir_advection_restart_p4est.jl create mode 100644 examples/3d/elixir_euler_free_stream_p4est.jl create mode 100644 examples/3d/elixir_euler_nonperiodic_p4est.jl create mode 100644 src/auxiliary/p4est.jl create mode 100644 src/solvers/dg_p4est/containers_3d.jl create mode 100644 src/solvers/dg_p4est/dg_3d.jl create mode 100644 src/solvers/dg_tree/containers.jl create mode 100644 test/test_examples_3d_p4est.jl diff --git a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl index f7947f89dc..982449006b 100644 --- a/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl +++ b/examples/2d/elixir_advection_amr_p4est_unstructured_flag.jl @@ -37,9 +37,9 @@ mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", mesh_file) -mesh = P4estMesh(mesh_file, polydeg=3, - mapping=mapping_flag, - initial_refinement_level=1) +mesh = P4estMesh{2}(mesh_file, polydeg=3, + mapping=mapping_flag, + initial_refinement_level=1) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl index 5075b1d814..8926262d79 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming_flag.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag.jl @@ -40,7 +40,7 @@ end # Refine recursively until each bottom left quadrant of a tree has level 4 # The mesh will be rebalanced before the simulation starts refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p4est_quadrant_t})) -Trixi.p4est_refine(mesh.p4est, true, refine_fn_c, C_NULL) +Trixi.refine_p4est!(mesh.p4est, true, refine_fn_c, C_NULL) # A semidiscretization collects data structures and functions for the spatial discretization semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, solver) @@ -49,7 +49,7 @@ semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergen ############################################################################### # ODE solvers, callbacks etc. -# Create ODE problem with time span from 0.0 to 1.0 +# Create ODE problem with time span from 0.0 to 0.2 ode = semidiscretize(semi, (0.0, 0.2)); # At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup diff --git a/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl index 1c58f9d4f6..261494754e 100644 --- a/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl +++ b/examples/2d/elixir_advection_p4est_non_conforming_flag_unstructured.jl @@ -36,9 +36,9 @@ mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", mesh_file) -mesh = P4estMesh(mesh_file, polydeg=3, - mapping=mapping_flag, - initial_refinement_level=0) +mesh = P4estMesh{2}(mesh_file, polydeg=3, + mapping=mapping_flag, + initial_refinement_level=0) # Refine bottom left quadrant of each tree to level 3 function refine_fn(p4est, which_tree, quadrant) @@ -54,7 +54,7 @@ end # Refine recursively until each bottom left quadrant of a tree has level 4 # The mesh will be rebalanced before the simulation starts refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{Trixi.p4est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p4est_quadrant_t})) -Trixi.p4est_refine(mesh.p4est, true, refine_fn_c, C_NULL) +Trixi.refine_p4est!(mesh.p4est, true, refine_fn_c, C_NULL) # A semidiscretization collects data structures and functions for the spatial discretization semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions=boundary_conditions) @@ -63,7 +63,7 @@ semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, ############################################################################### # ODE solvers, callbacks etc. -# Create ODE problem with time span from 0.0 to 1.0 +# Create ODE problem with time span from 0.0 to 0.2 ode = semidiscretize(semi, (0.0, 0.2)); # At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup diff --git a/examples/2d/elixir_euler_free_stream_p4est.jl b/examples/2d/elixir_euler_free_stream_p4est.jl index ec1a7d7a1f..c73cd7c84d 100644 --- a/examples/2d/elixir_euler_free_stream_p4est.jl +++ b/examples/2d/elixir_euler_free_stream_p4est.jl @@ -36,7 +36,7 @@ isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/a07 mesh_file) # Map the unstructured mesh with the mapping above -mesh = P4estMesh(mesh_file, polydeg=3, mapping=mapping, initial_refinement_level=1) +mesh = P4estMesh{2}(mesh_file, polydeg=3, mapping=mapping, initial_refinement_level=1) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions=Dict( diff --git a/examples/2d/elixir_euler_nonperiodic_p4est_unstructured.jl b/examples/2d/elixir_euler_nonperiodic_p4est_unstructured.jl index 68835b9177..451fdaf4ba 100644 --- a/examples/2d/elixir_euler_nonperiodic_p4est_unstructured.jl +++ b/examples/2d/elixir_euler_nonperiodic_p4est_unstructured.jl @@ -28,7 +28,7 @@ mesh_file = joinpath(@__DIR__, "square_unstructured_2.inp") isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/63ff2ea224409e55ee8423b3a33e316a/raw/7db58af7446d1479753ae718930741c47a3b79b7/square_unstructured_2.inp", mesh_file) -mesh = P4estMesh(mesh_file, initial_refinement_level=0) +mesh = P4estMesh{2}(mesh_file, initial_refinement_level=0) semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, source_terms=source_terms, diff --git a/examples/3d/elixir_advection_amr_p4est.jl b/examples/3d/elixir_advection_amr_p4est.jl new file mode 100644 index 0000000000..57fdd75c58 --- /dev/null +++ b/examples/3d/elixir_advection_amr_p4est.jl @@ -0,0 +1,72 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advectionvelocity) + +initial_condition = initial_condition_gauss +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +coordinates_min = (-5, -5, -5) +coordinates_max = ( 5, 5, 5) +trees_per_dimension = (1, 1, 1) +mesh = P4estMesh(trees_per_dimension, polydeg=1, + coordinates_min=coordinates_min, coordinates_max=coordinates_max, + initial_refinement_level=4) + + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) + + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.3) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval=analysis_interval, + extra_analysis_integrals=(entropy,)) + +alive_callback = AliveCallback(analysis_interval=analysis_interval) + +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +save_solution = SaveSolutionCallback(interval=100, + save_initial_solution=true, + save_final_solution=true, + solution_variables=cons2prim) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable=first), + base_level=4, + med_level=5, med_threshold=0.1, + max_level=6, max_threshold=0.6) +amr_callback = AMRCallback(semi, amr_controller, + interval=5, + adapt_initial_condition=true, + adapt_initial_condition_only_refine=true) + +stepsize_callback = StepsizeCallback(cfl=1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + save_restart, + save_solution, + amr_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); +summary_callback() # print the timer summary diff --git a/examples/3d/elixir_advection_amr_p4est_unstructured_curved.jl b/examples/3d/elixir_advection_amr_p4est_unstructured_curved.jl new file mode 100644 index 0000000000..78f6d542a1 --- /dev/null +++ b/examples/3d/elixir_advection_amr_p4est_unstructured_curved.jl @@ -0,0 +1,105 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (0.2, -0.7, 0.5) +equations = LinearScalarAdvectionEquation3D(advectionvelocity) + +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +initial_condition = initial_condition_gauss +boundary_condition = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict( + :all => boundary_condition +) + +# Mapping as described in https://arxiv.org/abs/2012.12040, but with less warping. +# The original mapping applied to this unstructured mesh creates extreme angles, +# which require a high resolution for proper results. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + 1/4 * (cos(1.5 * pi * (2 * xi - 3)/3) * + cos(0.5 * pi * (2 * eta - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + x = xi + 1/4 * (cos(0.5 * pi * (2 * xi - 3)/3) * + cos(2 * pi * (2 * y - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + z = zeta + 1/4 * (cos(0.5 * pi * (2 * x - 3)/3) * + cos(pi * (2 * y - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + # Transform the weird deformed cube to be approximately the size of [-5,5]^3 to match IC + return SVector(5 * x, 5 * y, 5 * z) +end + +# Unstructured mesh with 48 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_2.inp") +isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/b8df0033798e4926dec515fc045e8c2c/raw/b9254cde1d1fb64b6acc8416bc5ccdd77a240227/cube_unstructured_2.inp", + mesh_file) + +mesh = P4estMesh{3}(mesh_file, polydeg=3, + mapping=mapping, + initial_refinement_level=1) + + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions=boundary_conditions) + + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 8.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval=analysis_interval, + extra_analysis_integrals=(entropy,)) + +alive_callback = AliveCallback(analysis_interval=analysis_interval) + +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +save_solution = SaveSolutionCallback(interval=100, + save_initial_solution=true, + save_final_solution=true, + solution_variables=cons2prim) + +amr_controller = ControllerThreeLevel(semi, IndicatorMax(semi, variable=first), + base_level=1, + med_level=2, med_threshold=0.1, + max_level=3, max_threshold=0.6) +amr_callback = AMRCallback(semi, amr_controller, + interval=5, + adapt_initial_condition=true, + adapt_initial_condition_only_refine=true) + +stepsize_callback = StepsizeCallback(cfl=1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + save_restart, + save_solution, + amr_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); +summary_callback() # print the timer summary diff --git a/examples/3d/elixir_advection_basic_p4est.jl b/examples/3d/elixir_advection_basic_p4est.jl new file mode 100644 index 0000000000..1065322ed7 --- /dev/null +++ b/examples/3d/elixir_advection_basic_p4est.jl @@ -0,0 +1,63 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (1.0, 1.0, 1.0) +equations = LinearScalarAdvectionEquation3D(advectionvelocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +coordinates_min = (-1.5, -0.9, 0.0) # minimum coordinates (min(x), min(y), min(z)) +coordinates_max = ( 0.5, 1.1, 4.0) # maximum coordinates (max(x), max(y), max(z)) + +# Create P4estMesh with 8 x 10 x 16 elements (note `refinement_level=1`) +trees_per_dimension = (4, 5, 8) +mesh = P4estMesh(trees_per_dimension, polydeg=3, + coordinates_min=coordinates_min, coordinates_max=coordinates_max, + initial_refinement_level=1) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +ode = semidiscretize(semi, (0.0, 1.0)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval=100) + +# The SaveRestartCallback allows to save a file from which a Trixi simulation can be restarted +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval=100, + solution_variables=cons2prim) + +# The StepsizeCallback handles the re-calculcation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl=1.2) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, save_restart, save_solution, stepsize_callback) + + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/3d/elixir_advection_p4est_non_conforming.jl b/examples/3d/elixir_advection_p4est_non_conforming.jl new file mode 100644 index 0000000000..6b1f25430a --- /dev/null +++ b/examples/3d/elixir_advection_p4est_non_conforming.jl @@ -0,0 +1,76 @@ + +using OrdinaryDiffEq +using Trixi + + +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (1.0, 1.0, 1.0) +equations = LinearScalarAdvectionEquation3D(advectionvelocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +coordinates_min = (-1.0, -1.0, -1.0) # minimum coordinates (min(x), min(y), min(z)) +coordinates_max = ( 1.0, 1.0, 1.0) # maximum coordinates (max(x), max(y), max(z)) + +trees_per_dimension = (1, 1, 1) +mesh = P4estMesh(trees_per_dimension, polydeg=3, + coordinates_min=coordinates_min, coordinates_max=coordinates_max, + initial_refinement_level=2) + +# Refine bottom left quadrant of each tree to level 4 +function refine_fn(p8est, which_tree, quadrant) + if quadrant.x == 0 && quadrant.y == 0 && quadrant.z == 0 && quadrant.level < 3 + # return true (refine) + return Cint(1) + else + # return false (don't refine) + return Cint(0) + end +end + +# Refine recursively until each bottom left quadrant of a tree has level 2 +# The mesh will be rebalanced before the simulation starts +refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{Trixi.p8est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p8est_quadrant_t})) +Trixi.refine_p4est!(mesh.p4est, true, refine_fn_c, C_NULL) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, solver) + + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 1.0 +ode = semidiscretize(semi, (0.0, 1.0)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_callback = AnalysisCallback(semi, interval=100) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval=100, + solution_variables=cons2prim) + +# The StepsizeCallback handles the re-calculcation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl=1.6) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, save_solution, stepsize_callback) + + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/3d/elixir_advection_p4est_unstructured_curved.jl b/examples/3d/elixir_advection_p4est_unstructured_curved.jl new file mode 100644 index 0000000000..93a1d356db --- /dev/null +++ b/examples/3d/elixir_advection_p4est_unstructured_curved.jl @@ -0,0 +1,98 @@ + +using Downloads: download +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the linear advection equation + +advectionvelocity = (1.0, 1.0, 1.0) +equations = LinearScalarAdvectionEquation3D(advectionvelocity) + +# Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs) + +initial_condition = initial_condition_convergence_test +boundary_condition = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict( + :all => boundary_condition +) + +# Mapping as described in https://arxiv.org/abs/2012.12040, but with less warping. +# The original mapping applied to this unstructured mesh creates extreme angles, +# which require a high resolution for proper results. +function mapping(xi, eta, zeta) + # Don't transform input variables between -1 and 1 onto [0,3] to obtain curved boundaries + # xi = 1.5 * xi_ + 1.5 + # eta = 1.5 * eta_ + 1.5 + # zeta = 1.5 * zeta_ + 1.5 + + y = eta + 1/6 * (cos(1.5 * pi * (2 * xi - 3)/3) * + cos(0.5 * pi * (2 * eta - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + x = xi + 1/6 * (cos(0.5 * pi * (2 * xi - 3)/3) * + cos(2 * pi * (2 * y - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + z = zeta + 1/6 * (cos(0.5 * pi * (2 * x - 3)/3) * + cos(pi * (2 * y - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") +isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + mesh_file) + +mesh = P4estMesh{3}(mesh_file, polydeg=3, + mapping=mapping, + initial_refinement_level=2) + +# A semidiscretization collects data structures and functions for the spatial discretization +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions=boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +# Create ODE problem with time span from 0.0 to 0.1 +ode = semidiscretize(semi, (0.0, 0.1)); + +# At the beginning of the main loop, the SummaryCallback prints a summary of the simulation setup +# and resets the timers +summary_callback = SummaryCallback() + +# The AnalysisCallback allows to analyse the solution in regular intervals and prints the results +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval=analysis_interval) + +alive_callback = AliveCallback(analysis_interval=analysis_interval) + +# The SaveRestartCallback allows to save a file from which a Trixi simulation can be restarted +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +# The SaveSolutionCallback allows to save the solution to a file in regular intervals +save_solution = SaveSolutionCallback(interval=100, + solution_variables=cons2prim) + +# The StepsizeCallback handles the re-calculcation of the maximum Δt after each time step +stepsize_callback = StepsizeCallback(cfl=1.2) + +# Create a CallbackSet to collect all callbacks such that they can be passed to the ODE solver +callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_restart, save_solution, stepsize_callback) + + +############################################################################### +# run the simulation + +# OrdinaryDiffEq's `solve` method evolves the solution in time and executes the passed callbacks +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); + +# Print the timer summary +summary_callback() diff --git a/examples/3d/elixir_advection_restart_p4est.jl b/examples/3d/elixir_advection_restart_p4est.jl new file mode 100644 index 0000000000..2495bc1ddf --- /dev/null +++ b/examples/3d/elixir_advection_restart_p4est.jl @@ -0,0 +1,33 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# create a restart file + +trixi_include(@__MODULE__, joinpath(@__DIR__, "elixir_advection_basic_p4est.jl"), + trees_per_dimension=(2, 2, 2)) + + +############################################################################### +# adapt the parameters that have changed compared to "elixir_advection_extended.jl" + +# Note: If you get a restart file from somewhere else, you need to provide +# appropriate setups in the elixir loading a restart file + +restart_filename = joinpath("out", "restart_000017.h5") +mesh = load_mesh(restart_filename) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, solver) + +tspan = (load_time(restart_filename), 2.0) +ode = semidiscretize(semi, tspan, restart_filename); + + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); +summary_callback() # print the timer summary diff --git a/examples/3d/elixir_euler_free_stream_p4est.jl b/examples/3d/elixir_euler_free_stream_p4est.jl new file mode 100644 index 0000000000..5683b2c40b --- /dev/null +++ b/examples/3d/elixir_euler_free_stream_p4est.jl @@ -0,0 +1,105 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_constant + +boundary_conditions = Dict( + :all => BoundaryConditionDirichlet(initial_condition) +) + +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs, + volume_integral=VolumeIntegralWeakForm()) + +# Mapping as described in https://arxiv.org/abs/2012.12040 but with less warping. +function mapping(xi_, eta_, zeta_) + # Transform input variables between -1 and 1 onto [0,3] + xi = 1.5 * xi_ + 1.5 + eta = 1.5 * eta_ + 1.5 + zeta = 1.5 * zeta_ + 1.5 + + y = eta + 1/6 * (cos(1.5 * pi * (2 * xi - 3)/3) * + cos(0.5 * pi * (2 * eta - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + x = xi + 1/6 * (cos(0.5 * pi * (2 * xi - 3)/3) * + cos(2 * pi * (2 * y - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + z = zeta + 1/6 * (cos(0.5 * pi * (2 * x - 3)/3) * + cos(pi * (2 * y - 3)/3) * + cos(0.5 * pi * (2 * zeta - 3)/3)) + + return SVector(x, y, z) +end + +# Unstructured mesh with 68 cells of the cube domain [-1, 1]^3 +mesh_file = joinpath(@__DIR__, "cube_unstructured_1.inp") +isfile(mesh_file) || download("https://gist.githubusercontent.com/efaulhaber/d45c8ac1e248618885fa7cc31a50ab40/raw/37fba24890ab37cfa49c39eae98b44faf4502882/cube_unstructured_1.inp", + mesh_file) + +mesh = P4estMesh{3}(mesh_file, polydeg=3, + mapping=mapping, + initial_refinement_level=1) + +# TODO P4EST FSP +# # Refine bottom left quadrant of each tree to level 2 +# function refine_fn(p8est, which_tree, quadrant) +# if quadrant.x == 0 && quadrant.y == 0 && quadrant.z == 0 && quadrant.level < 2 && convert(Int, which_tree) == 0 +# # return true (refine) +# return Cint(1) +# else +# # return false (don't refine) +# return Cint(0) +# end +# end + +# Refine recursively until each bottom left quadrant of a tree has level 2 +# The mesh will be rebalanced before the simulation starts +# refine_fn_c = @cfunction(refine_fn, Cint, (Ptr{Trixi.p8est_t}, Ptr{Trixi.p4est_topidx_t}, Ptr{Trixi.p8est_quadrant_t})) +# Trixi.refine_p4est!(mesh.p4est, true, refine_fn_c, C_NULL) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, boundary_conditions=boundary_conditions) + + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval=analysis_interval) + +alive_callback = AliveCallback(analysis_interval=analysis_interval) + +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +save_solution = SaveSolutionCallback(interval=100, + save_initial_solution=true, + save_final_solution=true, + solution_variables=cons2prim) + +stepsize_callback = StepsizeCallback(cfl=1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + # save_restart, save_solution, + stepsize_callback) + + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), #maxiters=1, + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); +summary_callback() # print the timer summary diff --git a/examples/3d/elixir_euler_nonperiodic_p4est.jl b/examples/3d/elixir_euler_nonperiodic_p4est.jl new file mode 100644 index 0000000000..32f0f10083 --- /dev/null +++ b/examples/3d/elixir_euler_nonperiodic_p4est.jl @@ -0,0 +1,74 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations3D(1.4) + +initial_condition = initial_condition_convergence_test + +boundary_condition = BoundaryConditionDirichlet(initial_condition) +boundary_conditions = Dict( + :x_neg => boundary_condition, + :x_pos => boundary_condition, + :y_neg => boundary_condition, + :y_pos => boundary_condition, + :z_neg => boundary_condition, + :z_pos => boundary_condition +) + +solver = DGSEM(polydeg=3, surface_flux=flux_lax_friedrichs, + volume_integral=VolumeIntegralWeakForm()) + +coordinates_min = (0.0, 0.0, 0.0) +coordinates_max = (2.0, 2.0, 2.0) + +trees_per_dimension = (2, 2, 2) + +mesh = P4estMesh(trees_per_dimension, polydeg=1, + coordinates_min=coordinates_min, coordinates_max=coordinates_max, + periodicity=false, initial_refinement_level=1) + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms=source_terms_convergence_test, + boundary_conditions=boundary_conditions) + + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval=analysis_interval) + +alive_callback = AliveCallback(analysis_interval=analysis_interval) + +save_restart = SaveRestartCallback(interval=100, + save_final_restart=true) + +save_solution = SaveSolutionCallback(interval=100, + save_initial_solution=true, + save_final_solution=true, + solution_variables=cons2prim) + +stepsize_callback = StepsizeCallback(cfl=0.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + save_restart, save_solution, + stepsize_callback) + + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition=false), + dt=1.0, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep=false, callback=callbacks); +summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index 6b3155affc..c548c2ba44 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -64,6 +64,7 @@ include("basic_types.jl") # Include all top-level source files include("auxiliary/auxiliary.jl") include("auxiliary/mpi.jl") +include("auxiliary/p4est.jl") include("equations/equations.jl") include("mesh/mesh.jl") include("solvers/solvers.jl") diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index c9521eaf66..c5ce09568e 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -212,22 +212,3 @@ macro trixi_timeit(timer_output, label, expr) val end end - - -""" - init_p4est() - -Initialize p4est by calling `p4est_init` and setting the log level to `SC_LP_ERROR`. -This function will check if p4est is already initialized -and if yes, do nothing, thus it is safe to call it multiple times. -""" -function init_p4est() - if p4est_package_id()[] >= 0 - return nothing - end - - # Initialize p4est with log level ERROR to prevent a lot of output in AMR simulations - p4est_init(C_NULL, SC_LP_ERROR) - - return nothing -end diff --git a/src/auxiliary/p4est.jl b/src/auxiliary/p4est.jl new file mode 100644 index 0000000000..3910be9b25 --- /dev/null +++ b/src/auxiliary/p4est.jl @@ -0,0 +1,177 @@ +""" + init_p4est() + +Initialize p4est by calling `p4est_init` and setting the log level to `SC_LP_ERROR`. +This function will check if p4est is already initialized +and if yes, do nothing, thus it is safe to call it multiple times. +""" +function init_p4est() + if p4est_package_id()[] >= 0 + return nothing + end + + # Initialize p4est with log level ERROR to prevent a lot of output in AMR simulations + p4est_init(C_NULL, SC_LP_ERROR) + + return nothing +end + + +# Convert sc_array of type T to Julia array +function unsafe_wrap_sc(::Type{T}, sc_array) where T + element_count = sc_array.elem_count + + return [unsafe_load_sc(T, sc_array, i) for i in 1:element_count] +end + + +# Load the ith element (1-indexed) of an sc array of type T +function unsafe_load_sc(::Type{T}, sc_array, i=1) where T + element_size = sc_array.elem_size + + @assert element_size == sizeof(T) + + return unsafe_load(Ptr{T}(sc_array.array), i) +end + + +# Create new p4est from a p4est_connectivity +# 2D +function new_p4est(conn::Ptr{p4est_connectivity_t}, initial_refinement_level) + p4est_new_ext(0, # No MPI communicator + conn, + 0, # No minimum initial qudrants per processor + initial_refinement_level, + true, # Refine uniformly + 2 * sizeof(Int), # Use Int-Vector of size 2 as quadrant user data + C_NULL, # No init function + C_NULL) # No user pointer +end + +# 3D +function new_p4est(conn::Ptr{p8est_connectivity_t}, initial_refinement_level) + p8est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) +end + + +# Save p4est data to file +# 2D +function save_p4est!(file, p4est::Ptr{p4est_t}) + # Don't save user data of the quads + p4est_save(file, p4est, false) +end + +# 3D +function save_p4est!(file, p8est::Ptr{p8est_t}) + # Don't save user data of the quads + p8est_save(file, p8est, false) +end + + +# Load p4est from file +# 2D +function load_p4est(file, ::Val{2}) + conn_vec = Vector{Ptr{p4est_connectivity_t}}(undef, 1) + p4est_load(file, C_NULL, 0, false, C_NULL, pointer(conn_vec)) +end + +# 3D +function load_p4est(file, ::Val{3}) + conn_vec = Vector{Ptr{p8est_connectivity_t}}(undef, 1) + p8est_load(file, C_NULL, 0, false, C_NULL, pointer(conn_vec)) +end + + +# Read p4est connectivity from Abaqus mesh file (.inp) +# 2D +read_inp_p4est(meshfile, ::Val{2}) = p4est_connectivity_read_inp(meshfile) +# 3D +read_inp_p4est(meshfile, ::Val{3}) = p8est_connectivity_read_inp(meshfile) + + +# Refine p4est if refine_fn_c returns 1 +# 2D +refine_p4est!(p4est::Ptr{p4est_t}, recursive, refine_fn_c, init_fn_c) = p4est_refine(p4est, recursive, refine_fn_c, init_fn_c) +# 3D +refine_p4est!(p8est::Ptr{p8est_t}, recursive, refine_fn_c, init_fn_c) = p8est_refine(p8est, recursive, refine_fn_c, init_fn_c) + + +# Refine p4est if coarsen_fn_c returns 1 +# 2D +coarsen_p4est!(p4est::Ptr{p4est_t}, recursive, coarsen_fn_c, init_fn_c) = p4est_coarsen(p4est, recursive, coarsen_fn_c, init_fn_c) +# 3D +coarsen_p4est!(p8est::Ptr{p8est_t}, recursive, coarsen_fn_c, init_fn_c) = p8est_coarsen(p8est, recursive, coarsen_fn_c, init_fn_c) + + +# Let p4est iterate over each cell volume and cell face. +# Call iter_volume_c for each cell and iter_face_c for each face. +# 2D +function iterate_p4est(p4est::Ptr{p4est_t}, user_data; + iter_volume_c=C_NULL, iter_face_c=C_NULL) + if user_data === C_NULL + user_data_ptr = user_data + elseif user_data isa AbstractArray + user_data_ptr = pointer(user_data) + else + user_data_ptr = pointer_from_objref(user_data) + end + + GC.@preserve user_data begin + p4est_iterate(p4est, + C_NULL, # ghost layer + user_data_ptr, + iter_volume_c, # iter_volume + iter_face_c, # iter_face + C_NULL) # iter_corner + end + + return nothing +end + +# 3D +function iterate_p4est(p8est::Ptr{p8est_t}, user_data; + iter_volume_c=C_NULL, iter_face_c=C_NULL) + if user_data === C_NULL + user_data_ptr = user_data + elseif user_data isa AbstractArray + user_data_ptr = pointer(user_data) + else + user_data_ptr = pointer_from_objref(user_data) + end + + GC.@preserve user_data begin + p8est_iterate(p8est, + C_NULL, # ghost layer + user_data_ptr, + iter_volume_c, # iter_volume + iter_face_c, # iter_face + C_NULL, # iter_edge + C_NULL) # iter_corner + end + + return nothing +end + + +# Load i-th element of the sc_array info.sides of the type p[48]est_iter_face_side_t +# 2D version +function unsafe_load_side(info::Ptr{p4est_iter_face_info_t}, i=1) + return unsafe_load_sc(p4est_iter_face_side_t, info.sides, i) +end + +# 3D version +function unsafe_load_side(info::Ptr{p8est_iter_face_info_t}, i=1) + return unsafe_load_sc(p8est_iter_face_side_t, info.sides, i) +end + + +# Load i-th element of the sc_array p4est.trees of the type p[48]est_tree_t +# 2D version +function unsafe_load_tree(p4est::Ptr{p4est_t}, i=1) + return unsafe_load_sc(p4est_tree_t, p4est.trees, i) +end + +# 3D version +function unsafe_load_tree(p8est::Ptr{p8est_t}, i=1) + return unsafe_load_sc(p8est_tree_t, p8est.trees, i) +end diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 2a4dc2f965..b182f9af73 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -322,7 +322,7 @@ end # Copy controller values to quad user data storage, will be called below function copy_to_quad_iter_volume(info, user_data) # Load tree from global trees array, one-based indexing - tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) + tree = unsafe_load_tree(info.p4est, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID @@ -341,6 +341,11 @@ function copy_to_quad_iter_volume(info, user_data) return nothing end +# 2D +cfunction(::typeof(copy_to_quad_iter_volume), ::Val{2}) = @cfunction(copy_to_quad_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) +# 3D +cfunction(::typeof(copy_to_quad_iter_volume), ::Val{3}) = @cfunction(copy_to_quad_iter_volume, Cvoid, (Ptr{p8est_iter_volume_info_t}, Ptr{Cvoid})) + function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, equations, dg::DG, cache, semi, t, iter; @@ -359,19 +364,11 @@ function (amr_callback::AMRCallback)(u_ode::AbstractVector, mesh::P4estMesh, end # Copy controller value of each quad to the quad's user data storage - iter_volume_c = @cfunction(copy_to_quad_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + iter_volume_c = cfunction(copy_to_quad_iter_volume, Val(ndims(mesh))) # The pointer to lambda will be interpreted as Ptr{Int} above @assert lambda isa Vector{Int} - - GC.@preserve lambda begin - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - pointer(lambda), - iter_volume_c, # iter_volume - C_NULL, # iter_face - C_NULL) # iter_corner - end + iterate_p4est(mesh.p4est, lambda; iter_volume_c=iter_volume_c) @trixi_timeit timer() "refine" if !only_coarsen # Refine mesh @@ -544,7 +541,7 @@ end function extract_levels_iter_volume(info, user_data) # Load tree from global trees array, one-based indexing - tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) + tree = unsafe_load_tree(info.p4est, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID @@ -561,18 +558,16 @@ function extract_levels_iter_volume(info, user_data) return nothing end +# 2D +cfunction(::typeof(extract_levels_iter_volume), ::Val{2}) = @cfunction(extract_levels_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) +# 3D +cfunction(::typeof(extract_levels_iter_volume), ::Val{3}) = @cfunction(extract_levels_iter_volume, Cvoid, (Ptr{p8est_iter_volume_info_t}, Ptr{Cvoid})) + function current_element_levels(mesh::P4estMesh, solver, cache) current_levels = Vector{Int}(undef, nelements(solver, cache)) - iter_volume_c = @cfunction(extract_levels_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) - - GC.@preserve current_levels begin - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - pointer(current_levels), # user_data - iter_volume_c, # iter_volume - C_NULL, # iter_face - C_NULL) # iter_corner - end + + iter_volume_c = cfunction(extract_levels_iter_volume, Val(ndims(mesh))) + iterate_p4est(mesh.p4est, current_levels; iter_volume_c=iter_volume_c) return current_levels end diff --git a/src/callbacks_step/amr_dg2d.jl b/src/callbacks_step/amr_dg2d.jl index c9a3db3b6d..a07576b535 100644 --- a/src/callbacks_step/amr_dg2d.jl +++ b/src/callbacks_step/amr_dg2d.jl @@ -62,43 +62,6 @@ function rebalance_solver!(u_ode::AbstractVector, mesh::TreeMesh{2}, equations, end -function reinitialize_containers!(mesh::TreeMesh{2}, equations, dg::DGSEM, cache) - # Get new list of leaf cells - leaf_cell_ids = local_leaf_cells(mesh.tree) - - # re-initialize elements container - @unpack elements = cache - resize!(elements, length(leaf_cell_ids)) - init_elements!(elements, leaf_cell_ids, mesh, dg.basis) - - # re-initialize interfaces container - @unpack interfaces = cache - resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids)) - init_interfaces!(interfaces, elements, mesh) - - # re-initialize boundaries container - @unpack boundaries = cache - resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids)) - init_boundaries!(boundaries, elements, mesh) - - # re-initialize mortars container - @unpack mortars = cache - resize!(mortars, count_required_mortars(mesh, leaf_cell_ids)) - init_mortars!(mortars, elements, mesh) - - if mpi_isparallel() - # re-initialize mpi_interfaces container - @unpack mpi_interfaces = cache - resize!(mpi_interfaces, count_required_mpi_interfaces(mesh, leaf_cell_ids)) - init_mpi_interfaces!(mpi_interfaces, elements, mesh) - - # re-initialize mpi cache - @unpack mpi_cache = cache - init_mpi_cache!(mpi_cache, mesh, elements, mpi_interfaces, nvariables(equations), nnodes(dg)) - end -end - - # Refine elements in the DG solver based on a list of cell_ids that should be refined function refine!(u_ode::AbstractVector, adaptor, mesh::Union{TreeMesh{2}, P4estMesh{2}}, equations, dg::DGSEM, cache, elements_to_refine) diff --git a/src/callbacks_step/amr_dg3d.jl b/src/callbacks_step/amr_dg3d.jl index 292893a18a..2fe918403c 100644 --- a/src/callbacks_step/amr_dg3d.jl +++ b/src/callbacks_step/amr_dg3d.jl @@ -1,6 +1,7 @@ # Refine elements in the DG solver based on a list of cell_ids that should be refined -function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, +function refine!(u_ode::AbstractVector, adaptor, + mesh::Union{TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, elements_to_refine) # Return early if there is nothing to do if isempty(elements_to_refine) @@ -17,14 +18,7 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, GC.@preserve old_u_ode begin # OBS! If we don't GC.@preserve old_u_ode, it might be GC'ed old_u = wrap_array(old_u_ode, mesh, equations, dg, cache) - # Get new list of leaf cells - leaf_cell_ids = local_leaf_cells(mesh.tree) - - # re-initialize elements container - @unpack elements = cache - resize!(elements, length(leaf_cell_ids)) - init_elements!(elements, leaf_cell_ids, mesh, dg.basis) - @assert nelements(dg, cache) > old_n_elements + reinitialize_containers!(mesh, equations, dg, cache) resize!(u_ode, nvariables(equations) * nnodes(dg)^ndims(mesh) * nelements(dg, cache)) u = wrap_array(u_ode, mesh, equations, dg, cache) @@ -51,24 +45,9 @@ function refine!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, @assert element_id == nelements(dg, cache) + 1 || element_id == nelements(dg, cache) + 2^ndims(mesh) "element_id = $element_id, nelements(dg, cache) = $(nelements(dg, cache))" end # GC.@preserve old_u_ode - # re-initialize interfaces container - @unpack interfaces = cache - resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids)) - init_interfaces!(interfaces, elements, mesh) - - # re-initialize boundaries container - @unpack boundaries = cache - resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids)) - init_boundaries!(boundaries, elements, mesh) - - # re-initialize mortars container - @unpack mortars = cache - resize!(mortars, count_required_mortars(mesh, leaf_cell_ids)) - init_mortars!(mortars, elements, mesh) - # Sanity check - if isperiodic(mesh.tree) && nmortars(mortars) == 0 - @assert ninterfaces(interfaces) == ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") + if mesh isa TreeMesh && isperiodic(mesh.tree) && nmortars(cache.mortars) == 0 + @assert ninterfaces(cache.interfaces) == ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end return nothing @@ -154,7 +133,8 @@ end # Coarsen elements in the DG solver based on a list of cell_ids that should be removed -function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, +function coarsen!(u_ode::AbstractVector, adaptor, + mesh::Union{TreeMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache, elements_to_remove) # Return early if there is nothing to do if isempty(elements_to_remove) @@ -171,14 +151,7 @@ function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, GC.@preserve old_u_ode begin # OBS! If we don't GC.@preserve old_u_ode, it might be GC'ed old_u = wrap_array(old_u_ode, mesh, equations, dg, cache) - # Get new list of leaf cells - leaf_cell_ids = local_leaf_cells(mesh.tree) - - # re-initialize elements container - @unpack elements = cache - resize!(elements, length(leaf_cell_ids)) - init_elements!(elements, leaf_cell_ids, mesh, dg.basis) - @assert nelements(dg, cache) < old_n_elements + reinitialize_containers!(mesh, equations, dg, cache) resize!(u_ode, nvariables(equations) * nnodes(dg)^ndims(mesh) * nelements(dg, cache)) u = wrap_array(u_ode, mesh, equations, dg, cache) @@ -216,24 +189,9 @@ function coarsen!(u_ode::AbstractVector, adaptor, mesh::TreeMesh{3}, @assert element_id == nelements(dg, cache) + 1 "element_id = $element_id, nelements(dg, cache) = $(nelements(dg, cache))" end # GC.@preserve old_u_ode - # re-initialize interfaces container - @unpack interfaces = cache - resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids)) - init_interfaces!(interfaces, elements, mesh) - - # re-initialize boundaries container - @unpack boundaries = cache - resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids)) - init_boundaries!(boundaries, elements, mesh) - - # re-initialize mortars container - @unpack mortars = cache - resize!(mortars, count_required_mortars(mesh, leaf_cell_ids)) - init_mortars!(mortars, elements, mesh) - # Sanity check - if isperiodic(mesh.tree) && nmortars(mortars) == 0 - @assert ninterfaces(interfaces) == ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") + if mesh isa TreeMesh && isperiodic(mesh.tree) && nmortars(cache.mortars) == 0 + @assert ninterfaces(cache.interfaces) == ndims(mesh) * nelements(dg, cache) ("For $(ndims(mesh))D and periodic domains and conforming elements, the number of interfaces must be $(ndims(mesh)) times the number of elements") end return nothing @@ -318,7 +276,9 @@ end # this method is called when an `ControllerThreeLevel` is constructed -function create_cache(::Type{ControllerThreeLevel}, mesh::TreeMesh{3}, equations, dg::DG, cache) +function create_cache(::Type{ControllerThreeLevel}, + mesh::Union{TreeMesh{3}, P4estMesh{3}}, + equations, dg::DG, cache) controller_value = Vector{Int}(undef, nelements(dg, cache)) return (; controller_value) diff --git a/src/callbacks_step/analysis_dg3d.jl b/src/callbacks_step/analysis_dg3d.jl index 859b54feb9..6d81d5ad26 100644 --- a/src/callbacks_step/analysis_dg3d.jl +++ b/src/callbacks_step/analysis_dg3d.jl @@ -24,7 +24,7 @@ function create_cache_analysis(analyzer, mesh::TreeMesh{3}, end -function create_cache_analysis(analyzer, mesh::CurvedMesh{3}, +function create_cache_analysis(analyzer, mesh::Union{CurvedMesh{3}, P4estMesh{3}}, equations, dg::DG, cache, RealT, uEltype) @@ -92,7 +92,8 @@ end function calc_error_norms(func, u, t, analyzer, - mesh::CurvedMesh{3}, equations, initial_condition, + mesh::Union{CurvedMesh{3}, P4estMesh{3}}, + equations, initial_condition, dg::DGSEM, cache, cache_analysis) @unpack vandermonde, weights = analyzer @unpack node_coordinates, inverse_jacobian = cache.elements @@ -155,7 +156,8 @@ end function integrate_via_indices(func::Func, u, - mesh::CurvedMesh{3}, equations, dg::DGSEM, cache, + mesh::Union{CurvedMesh{3}, P4estMesh{3}}, + equations, dg::DGSEM, cache, args...; normalize=true) where {Func} @unpack weights = dg.basis @@ -182,7 +184,7 @@ end function integrate(func::Func, u, - mesh::Union{TreeMesh{3},CurvedMesh{3}}, + mesh::Union{TreeMesh{3}, CurvedMesh{3}, P4estMesh{3}}, equations, dg::DGSEM, cache; normalize=true) where {Func} integrate_via_indices(u, mesh, equations, dg, cache; normalize=normalize) do u, i, j, k, element, equations, dg u_local = get_node_vars(u, equations, dg, i, j, k, element) @@ -192,7 +194,8 @@ end function analyze(::typeof(entropy_timederivative), du, u, t, - mesh::Union{TreeMesh{3},CurvedMesh{3}}, equations, dg::DG, cache) + mesh::Union{TreeMesh{3}, CurvedMesh{3}, P4estMesh{3}}, + equations, dg::DG, cache) # Calculate ∫(∂S/∂u ⋅ ∂u/∂t)dΩ integrate_via_indices(u, mesh, equations, dg, cache, du) do u, i, j, k, element, equations, dg, du u_node = get_node_vars(u, equations, dg, i, j, k, element) diff --git a/src/callbacks_step/stepsize_dg3d.jl b/src/callbacks_step/stepsize_dg3d.jl index 0b00e209ea..ed30f695d0 100644 --- a/src/callbacks_step/stepsize_dg3d.jl +++ b/src/callbacks_step/stepsize_dg3d.jl @@ -38,7 +38,7 @@ function max_dt(u, t, mesh::TreeMesh{3}, end -function max_dt(u, t, mesh::CurvedMesh{3}, +function max_dt(u, t, mesh::Union{CurvedMesh{3}, P4estMesh{3}}, constant_speed::Val{false}, equations, dg::DG, cache) # to avoid a division by zero if the speed vanishes everywhere, # e.g. for steady-state linear advection @@ -73,7 +73,7 @@ function max_dt(u, t, mesh::CurvedMesh{3}, end -function max_dt(u, t, mesh::CurvedMesh{3}, +function max_dt(u, t, mesh::Union{CurvedMesh{3}, P4estMesh{3}}, constant_speed::Val{true}, equations, dg::DG, cache) # to avoid a division by zero if the speed vanishes everywhere, # e.g. for steady-state linear advection diff --git a/src/mesh/mesh_io.jl b/src/mesh/mesh_io.jl index 052577a320..ffd3338b92 100644 --- a/src/mesh/mesh_io.jl +++ b/src/mesh/mesh_io.jl @@ -152,7 +152,7 @@ function save_mesh_file(mesh::P4estMesh, output_directory, timestep=0) p4est_file = joinpath(output_directory, p4est_filename) # Save the complete connectivity/p4est data to disk. - p4est_save(p4est_file, mesh.p4est, false) + save_p4est!(p4est_file, mesh.p4est) # Open file (clobber existing content) h5open(filename, "w") do file @@ -237,13 +237,15 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) end end - mesh = CurvedMesh(size, mapping; RealT=RealT, unsaved_changes=false, mapping_as_string=mapping_as_string) + mesh = CurvedMesh(size, mapping; RealT=RealT, unsaved_changes=false, + mapping_as_string=mapping_as_string) elseif mesh_type == "UnstructuredQuadMesh" mesh_filename, periodicity_ = h5open(mesh_file, "r") do file return read(attributes(file)["mesh_filename"]), read(attributes(file)["periodicity"]) end - mesh = UnstructuredQuadMesh(mesh_filename; RealT=RealT, periodicity=periodicity_, unsaved_changes=false) + mesh = UnstructuredQuadMesh(mesh_filename; RealT=RealT, periodicity=periodicity_, + unsaved_changes=false) elseif mesh_type == "P4estMesh" p4est_filename, tree_node_coordinates, nodes, boundary_names_ = h5open(mesh_file, "r") do file @@ -259,8 +261,7 @@ function load_mesh_serial(mesh_file::AbstractString; n_cells_max, RealT) # Prevent Julia crashes when p4est can't find the file @assert isfile(p4est_file) - conn_vec = Vector{Ptr{p4est_connectivity_t}}(undef, 1) - p4est = p4est_load(p4est_file, C_NULL, 0, false, C_NULL, pointer(conn_vec)) + p4est = load_p4est(p4est_file, Val(ndims)) mesh = P4estMesh{ndims}(p4est, tree_node_coordinates, nodes, boundary_names, "", false) diff --git a/src/mesh/p4est_mesh.jl b/src/mesh/p4est_mesh.jl index 7ddc3a10fb..c6dddbe61f 100644 --- a/src/mesh/p4est_mesh.jl +++ b/src/mesh/p4est_mesh.jl @@ -7,8 +7,8 @@ to manage trees and mesh refinement. !!! warning "Experimental code" This mesh type is experimental and can change any time. """ -mutable struct P4estMesh{NDIMS, RealT<:Real, NDIMSP2, NNODES} <: AbstractMesh{NDIMS} - p4est ::Ptr{p4est_t} +mutable struct P4estMesh{NDIMS, RealT<:Real, P, NDIMSP2, NNODES} <: AbstractMesh{NDIMS} + p4est ::P # Either Ptr{p4est_t} or Ptr{p8est_t} # Coordinates at the nodes specified by the tensor product of `nodes` (NDIMS times). # This specifies the geometry interpolation for each tree. tree_node_coordinates ::Array{RealT, NDIMSP2} # [dimension, i, j, k, tree] @@ -19,9 +19,14 @@ mutable struct P4estMesh{NDIMS, RealT<:Real, NDIMSP2, NNODES} <: AbstractMesh{ND function P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, boundary_names, current_filename, unsaved_changes) where NDIMS - mesh = new{NDIMS, eltype(tree_node_coordinates), NDIMS+2, length(nodes)}( - p4est, tree_node_coordinates, - nodes, boundary_names, current_filename, unsaved_changes) + if NDIMS == 2 + @assert p4est isa Ptr{p4est_t} + elseif NDIMS == 3 + @assert p4est isa Ptr{p8est_t} + end + + mesh = new{NDIMS, eltype(tree_node_coordinates), typeof(p4est), NDIMS+2, length(nodes)}( + p4est, tree_node_coordinates, nodes, boundary_names, current_filename, unsaved_changes) # Destroy p4est structs when the mesh is garbage collected finalizer(destroy_mesh, mesh) @@ -31,12 +36,43 @@ mutable struct P4estMesh{NDIMS, RealT<:Real, NDIMSP2, NNODES} <: AbstractMesh{ND end -function destroy_mesh(mesh) +function destroy_mesh(mesh::P4estMesh{2}) conn = mesh.p4est.connectivity p4est_destroy(mesh.p4est) p4est_connectivity_destroy(conn) end +function destroy_mesh(mesh::P4estMesh{3}) + conn = mesh.p4est.connectivity + p8est_destroy(mesh.p4est) + p8est_connectivity_destroy(conn) +end + + +@inline Base.ndims(::P4estMesh{NDIMS}) where NDIMS = NDIMS +@inline Base.real(::P4estMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT + +@inline ntrees(mesh::P4estMesh) = mesh.p4est.trees.elem_count +@inline ncells(mesh::P4estMesh) = mesh.p4est.global_num_quadrants + + +function Base.show(io::IO, mesh::P4estMesh) + print(io, "P4estMesh{", ndims(mesh), ", ", real(mesh), "}") +end + +function Base.show(io::IO, ::MIME"text/plain", mesh::P4estMesh) + if get(io, :compact, false) + show(io, mesh) + else + setup = [ + "#trees" => ntrees(mesh), + "current #cells" => ncells(mesh), + "polydeg" => length(mesh.nodes) - 1, + ] + summary_box(io, "P4estMesh{" * string(ndims(mesh)) * ", " * string(real(mesh)) * "}", setup) + end +end + """ P4estMesh(trees_per_dimension; polydeg, @@ -120,13 +156,22 @@ function P4estMesh(trees_per_dimension; polydeg, prod(trees_per_dimension)) calc_tree_node_coordinates!(tree_node_coordinates, nodes, mapping, trees_per_dimension) - # p4est_connectivity_new_brick has trees in Morton order, so use our own function for this + # p4est_connectivity_new_brick has trees in Z-order, so use our own function for this conn = connectivity_structured(trees_per_dimension..., periodicity) - # Use Int-Vector of size 2 as quadrant user data - p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) + + p4est = new_p4est(conn, initial_refinement_level) # Non-periodic boundaries - boundary_names = fill(Symbol("---"), 4, prod(trees_per_dimension)) + boundary_names = fill(Symbol("---"), 2 * NDIMS, prod(trees_per_dimension)) + + structured_boundary_names!(boundary_names, trees_per_dimension, periodicity) + + return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, + boundary_names, "", unsaved_changes) +end + +# 2D version +function structured_boundary_names!(boundary_names, trees_per_dimension::NTuple{2}, periodicity) linear_indices = LinearIndices(trees_per_dimension) # Boundaries in x-direction @@ -150,16 +195,51 @@ function P4estMesh(trees_per_dimension; polydeg, boundary_names[4, tree] = :y_pos end end +end - return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, - boundary_names, "", unsaved_changes) +# 3D version +function structured_boundary_names!(boundary_names, trees_per_dimension::NTuple{3}, periodicity) + linear_indices = LinearIndices(trees_per_dimension) + + # Boundaries in x-direction + if !periodicity[1] + for cell_z in 1:trees_per_dimension[3], cell_y in 1:trees_per_dimension[2] + tree = linear_indices[1, cell_y, cell_z] + boundary_names[1, tree] = :x_neg + + tree = linear_indices[end, cell_y, cell_z] + boundary_names[2, tree] = :x_pos + end + end + + # Boundaries in y-direction + if !periodicity[2] + for cell_z in 1:trees_per_dimension[3], cell_x in 1:trees_per_dimension[1] + tree = linear_indices[cell_x, 1, cell_z] + boundary_names[3, tree] = :y_neg + + tree = linear_indices[cell_x, end, cell_z] + boundary_names[4, tree] = :y_pos + end + end + + # Boundaries in z-direction + if !periodicity[3] + for cell_y in 1:trees_per_dimension[2], cell_x in 1:trees_per_dimension[1] + tree = linear_indices[cell_x, cell_y, 1] + boundary_names[5, tree] = :z_neg + + tree = linear_indices[cell_x, cell_y, end] + boundary_names[6, tree] = :z_pos + end + end end """ - P4estMesh(meshfile::String; - mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0, unsaved_changes=true) + P4estMesh{NDIMS}(meshfile::String; + mapping=nothing, polydeg=1, RealT=Float64, + initial_refinement_level=0, unsaved_changes=true) Import an uncurved, unstructured, conforming mesh from an Abaqus mesh file (`.inp`), map the mesh with the specified mapping, and create a `P4estMesh` from the curved mesh. @@ -179,23 +259,20 @@ Cells in the mesh file will be imported as trees in the `P4estMesh`. - `initial_refinement_level::Integer`: refine the mesh uniformly to this level before the simulation starts. - `unsaved_changes::Bool`: if set to `true`, the mesh will be saved to a mesh file. """ -function P4estMesh(meshfile::String; +function P4estMesh{NDIMS}(meshfile::String; mapping=nothing, polydeg=1, RealT=Float64, - initial_refinement_level=0, unsaved_changes=true) - # TODO P4EST - NDIMS = 2 - + initial_refinement_level=0, unsaved_changes=true) where NDIMS # Prevent p4est from crashing Julia if the file doesn't exist @assert isfile(meshfile) - conn = p4est_connectivity_read_inp(meshfile) + conn = read_inp_p4est(meshfile, Val(NDIMS)) # These need to be of the type Int for unsafe_wrap below to work n_trees::Int = conn.num_trees n_vertices::Int = conn.num_vertices vertices = unsafe_wrap(Array, conn.vertices, (3, n_vertices)) - tree_to_vertex = unsafe_wrap(Array, conn.tree_to_vertex, (4, n_trees)) + tree_to_vertex = unsafe_wrap(Array, conn.tree_to_vertex, (2^NDIMS, n_trees)) basis = LobattoLegendreBasis(RealT, polydeg) nodes = basis.nodes @@ -205,11 +282,10 @@ function P4estMesh(meshfile::String; n_trees) calc_tree_node_coordinates!(tree_node_coordinates, nodes, mapping, vertices, tree_to_vertex) - # Use Int-Vector of size 2 as quadrant user data - p4est = p4est_new_ext(0, conn, 0, initial_refinement_level, true, 2 * sizeof(Int), C_NULL, C_NULL) + p4est = new_p4est(conn, initial_refinement_level) # There's no simple and generic way to distinguish boundaries. Name all of them :all. - boundary_names = fill(:all, 4, n_trees) + boundary_names = fill(:all, 2 * NDIMS, n_trees) return P4estMesh{NDIMS}(p4est, tree_node_coordinates, nodes, boundary_names, "", unsaved_changes) @@ -220,23 +296,24 @@ end # Similar to p4est_connectivity_new_brick, but doesn't use Morton order. # This order makes `calc_tree_node_coordinates!` below and the calculation # of `boundary_names` above easier but is irrelevant otherwise. -function connectivity_structured(cells_x, cells_y, periodicity) - linear_indices = LinearIndices((cells_x, cells_y)) +# 2D version +function connectivity_structured(n_cells_x, n_cells_y, periodicity) + linear_indices = LinearIndices((n_cells_x, n_cells_y)) # Vertices represent the coordinates of the forest. This is used by p4est # to write VTK files. # Trixi doesn't use p4est's coordinates, so the vertices can be empty. n_vertices = 0 - n_trees = cells_x * cells_y + n_trees = n_cells_x * n_cells_y # No corner connectivity is needed n_corners = 0 vertices = C_NULL tree_to_vertex = C_NULL - tree_to_tree = Matrix{p4est_topidx_t}(undef, 4, n_trees) - tree_to_face = Matrix{Int8}(undef, 4, n_trees) + tree_to_tree = Array{p4est_topidx_t, 2}(undef, 4, n_trees) + tree_to_face = Array{Int8, 2}(undef, 4, n_trees) - for cell_y in 1:cells_y, cell_x in 1:cells_x + for cell_y in 1:n_cells_y, cell_x in 1:n_cells_x tree = linear_indices[cell_x, cell_y] # Subtract 1 because p4est uses zero-based indexing @@ -245,7 +322,7 @@ function connectivity_structured(cells_x, cells_y, periodicity) tree_to_tree[1, tree] = linear_indices[cell_x - 1, cell_y] - 1 tree_to_face[1, tree] = 1 elseif periodicity[1] - tree_to_tree[1, tree] = linear_indices[cells_x, cell_y] - 1 + tree_to_tree[1, tree] = linear_indices[n_cells_x, cell_y] - 1 tree_to_face[1, tree] = 1 else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) tree_to_tree[1, tree] = tree - 1 @@ -253,7 +330,7 @@ function connectivity_structured(cells_x, cells_y, periodicity) end # Positive x-direction - if cell_x < cells_x + if cell_x < n_cells_x tree_to_tree[2, tree] = linear_indices[cell_x + 1, cell_y] - 1 tree_to_face[2, tree] = 0 elseif periodicity[1] @@ -264,20 +341,20 @@ function connectivity_structured(cells_x, cells_y, periodicity) tree_to_face[2, tree] = 1 end - # Negative x-direction + # Negative y-direction if cell_y > 1 tree_to_tree[3, tree] = linear_indices[cell_x, cell_y - 1] - 1 tree_to_face[3, tree] = 3 elseif periodicity[2] - tree_to_tree[3, tree] = linear_indices[cell_x, cells_y] - 1 + tree_to_tree[3, tree] = linear_indices[cell_x, n_cells_y] - 1 tree_to_face[3, tree] = 3 else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) tree_to_tree[3, tree] = tree - 1 tree_to_face[3, tree] = 2 end - # Positive x-direction - if cell_y < cells_y + # Positive y-direction + if cell_y < n_cells_y tree_to_tree[4, tree] = linear_indices[cell_x, cell_y + 1] - 1 tree_to_face[4, tree] = 2 elseif periodicity[2] @@ -297,40 +374,143 @@ function connectivity_structured(cells_x, cells_y, periodicity) corner_to_tree = C_NULL corner_to_corner = C_NULL - p4est_connectivity_new_copy(n_vertices, n_trees, n_corners, - vertices, tree_to_vertex, - tree_to_tree, tree_to_face, - tree_to_corner, ctt_offset, - corner_to_tree, corner_to_corner) + conn = p4est_connectivity_new_copy(n_vertices, n_trees, n_corners, + vertices, tree_to_vertex, + tree_to_tree, tree_to_face, + tree_to_corner, ctt_offset, + corner_to_tree, corner_to_corner) + + @assert p4est_connectivity_is_valid(conn) == 1 + + return conn end -@inline Base.ndims(::P4estMesh{NDIMS}) where NDIMS = NDIMS -@inline Base.real(::P4estMesh{NDIMS, RealT}) where {NDIMS, RealT} = RealT +# 3D version +function connectivity_structured(n_cells_x, n_cells_y, n_cells_z, periodicity) + linear_indices = LinearIndices((n_cells_x, n_cells_y, n_cells_z)) -@inline ntrees(mesh::P4estMesh) = mesh.p4est.trees.elem_count -@inline ncells(mesh::P4estMesh) = mesh.p4est.global_num_quadrants + # Vertices represent the coordinates of the forest. This is used by p4est + # to write VTK files. + # Trixi doesn't use p4est's coordinates, so the vertices can be empty. + n_vertices = 0 + n_trees = n_cells_x * n_cells_y * n_cells_z + # No edge connectivity is needed + n_edges = 0 + # No corner connectivity is needed + n_corners = 0 + vertices = C_NULL + tree_to_vertex = C_NULL + tree_to_tree = Array{p4est_topidx_t, 2}(undef, 6, n_trees) + tree_to_face = Array{Int8, 2}(undef, 6, n_trees) -function Base.show(io::IO, mesh::P4estMesh) - print(io, "P4estMesh{", ndims(mesh), ", ", real(mesh), "}") -end + for cell_z in 1:n_cells_z, cell_y in 1:n_cells_y, cell_x in 1:n_cells_x + tree = linear_indices[cell_x, cell_y, cell_z] + # Subtract 1 because p4est uses zero-based indexing + # Negative x-direction + if cell_x > 1 + tree_to_tree[1, tree] = linear_indices[cell_x - 1, cell_y, cell_z] - 1 + tree_to_face[1, tree] = 1 + elseif periodicity[1] + tree_to_tree[1, tree] = linear_indices[n_cells_x, cell_y, cell_z] - 1 + tree_to_face[1, tree] = 1 + else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) + tree_to_tree[1, tree] = tree - 1 + tree_to_face[1, tree] = 0 + end -function Base.show(io::IO, ::MIME"text/plain", mesh::P4estMesh) - if get(io, :compact, false) - show(io, mesh) - else - setup = [ - "#trees" => ntrees(mesh), - "current #cells" => ncells(mesh), - "polydeg" => length(mesh.nodes) - 1, - ] - summary_box(io, "P4estMesh{" * string(ndims(mesh)) * ", " * string(real(mesh)) * "}", setup) + # Positive x-direction + if cell_x < n_cells_x + tree_to_tree[2, tree] = linear_indices[cell_x + 1, cell_y, cell_z] - 1 + tree_to_face[2, tree] = 0 + elseif periodicity[1] + tree_to_tree[2, tree] = linear_indices[1, cell_y, cell_z] - 1 + tree_to_face[2, tree] = 0 + else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) + tree_to_tree[2, tree] = tree - 1 + tree_to_face[2, tree] = 1 + end + + # Negative y-direction + if cell_y > 1 + tree_to_tree[3, tree] = linear_indices[cell_x, cell_y - 1, cell_z] - 1 + tree_to_face[3, tree] = 3 + elseif periodicity[2] + tree_to_tree[3, tree] = linear_indices[cell_x, n_cells_y, cell_z] - 1 + tree_to_face[3, tree] = 3 + else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) + tree_to_tree[3, tree] = tree - 1 + tree_to_face[3, tree] = 2 + end + + # Positive y-direction + if cell_y < n_cells_y + tree_to_tree[4, tree] = linear_indices[cell_x, cell_y + 1, cell_z] - 1 + tree_to_face[4, tree] = 2 + elseif periodicity[2] + tree_to_tree[4, tree] = linear_indices[cell_x, 1, cell_z] - 1 + tree_to_face[4, tree] = 2 + else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) + tree_to_tree[4, tree] = tree - 1 + tree_to_face[4, tree] = 3 + end + + # Negative z-direction + if cell_z > 1 + tree_to_tree[5, tree] = linear_indices[cell_x, cell_y, cell_z - 1] - 1 + tree_to_face[5, tree] = 5 + elseif periodicity[3] + tree_to_tree[5, tree] = linear_indices[cell_x, cell_y, n_cells_z] - 1 + tree_to_face[5, tree] = 5 + else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) + tree_to_tree[5, tree] = tree - 1 + tree_to_face[5, tree] = 4 + end + + # Positive z-direction + if cell_z < n_cells_z + tree_to_tree[6, tree] = linear_indices[cell_x, cell_y, cell_z + 1] - 1 + tree_to_face[6, tree] = 4 + elseif periodicity[3] + tree_to_tree[6, tree] = linear_indices[cell_x, cell_y, 1] - 1 + tree_to_face[6, tree] = 4 + else # Non-periodic boundary, tree and face point to themselves (zero-based indexing) + tree_to_tree[6, tree] = tree - 1 + tree_to_face[6, tree] = 5 + end end + + tree_to_edge = C_NULL + # p4est docs: "in trivial cases it is just a pointer to a p4est_topix value of 0." + # We don't need edge connectivity, so this is a trivial case. + ett_offset = Array{p4est_topidx_t}([0]) + edge_to_tree = C_NULL + edge_to_edge = C_NULL + + tree_to_corner = C_NULL + # p4est docs: "in trivial cases it is just a pointer to a p4est_topix value of 0." + # We don't need corner connectivity, so this is a trivial case. + ctt_offset = Array{p4est_topidx_t}([0]) + + corner_to_tree = C_NULL + corner_to_corner = C_NULL + + conn = p8est_connectivity_new_copy(n_vertices, n_trees, n_corners, n_edges, + vertices, tree_to_vertex, + tree_to_tree, tree_to_face, + tree_to_edge, ett_offset, + edge_to_tree, edge_to_edge, + tree_to_corner, ctt_offset, + corner_to_tree, corner_to_corner) + + @assert p8est_connectivity_is_valid(conn) == 1 + + return conn end -# Calculate physical coordinates of each node. +# Calculate physical coordinates of each node of a structured mesh. # This function assumes a structured mesh with trees in row order. # 2D version function calc_tree_node_coordinates!(node_coordinates::AbstractArray{<:Any, 4}, @@ -356,9 +536,40 @@ function calc_tree_node_coordinates!(node_coordinates::AbstractArray{<:Any, 4}, end end +# 3D version +function calc_tree_node_coordinates!(node_coordinates::AbstractArray{<:Any, 5}, + nodes, mapping, trees_per_dimension) + linear_indices = LinearIndices(trees_per_dimension) + + # Get cell length in reference mesh + dx = 2 / trees_per_dimension[1] + dy = 2 / trees_per_dimension[2] + dz = 2 / trees_per_dimension[3] + + for cell_z in 1:trees_per_dimension[3], + cell_y in 1:trees_per_dimension[2], + cell_x in 1:trees_per_dimension[1] + + tree_id = linear_indices[cell_x, cell_y, cell_z] + + # Calculate node coordinates of reference mesh + cell_x_offset = -1 + (cell_x-1) * dx + dx/2 + cell_y_offset = -1 + (cell_y-1) * dy + dy/2 + cell_z_offset = -1 + (cell_z-1) * dz + dz/2 -# Calculate physical coordinates of each node. -# Extract corners of each tree, interpolate to requested interpolation nodes, + for k in eachindex(nodes), j in eachindex(nodes), i in eachindex(nodes) + # node_coordinates are the mapped reference node coordinates + node_coordinates[:, i, j, k, tree_id] .= mapping(cell_x_offset + dx/2 * nodes[i], + cell_y_offset + dy/2 * nodes[j], + cell_z_offset + dz/2 * nodes[k]) + end + end +end + + +# Calculate physical coordinates of each node of an unstructured mesh. +# Extract corners of each tree from the connectivity, +# interpolate to requested interpolation nodes, # map the resulting coordinates with the specified mapping. # 2D version function calc_tree_node_coordinates!(node_coordinates::AbstractArray{RealT, 4}, @@ -371,11 +582,12 @@ function calc_tree_node_coordinates!(node_coordinates::AbstractArray{RealT, 4}, for tree in 1:size(tree_to_vertex, 2) # Tree vertices are stored in Z-order, ignore z-coordinate in 2D, zero-based indexing - data_in[:, 1, 1] .= vertices[1:2, tree_to_vertex[1, tree] + 1] - data_in[:, 2, 1] .= vertices[1:2, tree_to_vertex[2, tree] + 1] - data_in[:, 1, 2] .= vertices[1:2, tree_to_vertex[3, tree] + 1] - data_in[:, 2, 2] .= vertices[1:2, tree_to_vertex[4, tree] + 1] + @views data_in[:, 1, 1] .= vertices[1:2, tree_to_vertex[1, tree] + 1] + @views data_in[:, 2, 1] .= vertices[1:2, tree_to_vertex[2, tree] + 1] + @views data_in[:, 1, 2] .= vertices[1:2, tree_to_vertex[3, tree] + 1] + @views data_in[:, 2, 2] .= vertices[1:2, tree_to_vertex[4, tree] + 1] + # Interpolate corner coordinates to specified nodes multiply_dimensionwise!( view(node_coordinates, :, :, :, tree), matrix, matrix, @@ -403,6 +615,66 @@ function map_node_coordinates!(node_coordinates::AbstractArray{<:Any, 4}, mappin return node_coordinates end +# 3D version +function calc_tree_node_coordinates!(node_coordinates::AbstractArray{RealT, 5}, + nodes, mapping, + vertices, tree_to_vertex) where RealT + nodes_in = [-1.0, 1.0] + matrix = polynomial_interpolation_matrix(nodes_in, nodes) + data_in = Array{RealT, 4}(undef, 3, 2, 2, 2) + + for tree in 1:size(tree_to_vertex, 2) + # Tree vertices are stored in Z-order, zero-based indexing + @views data_in[:, 1, 1, 1] .= vertices[:, tree_to_vertex[1, tree] + 1] + @views data_in[:, 2, 1, 1] .= vertices[:, tree_to_vertex[2, tree] + 1] + @views data_in[:, 1, 2, 1] .= vertices[:, tree_to_vertex[3, tree] + 1] + @views data_in[:, 2, 2, 1] .= vertices[:, tree_to_vertex[4, tree] + 1] + @views data_in[:, 1, 1, 2] .= vertices[:, tree_to_vertex[5, tree] + 1] + @views data_in[:, 2, 1, 2] .= vertices[:, tree_to_vertex[6, tree] + 1] + @views data_in[:, 1, 2, 2] .= vertices[:, tree_to_vertex[7, tree] + 1] + @views data_in[:, 2, 2, 2] .= vertices[:, tree_to_vertex[8, tree] + 1] + + # Interpolate corner coordinates to specified nodes + multiply_dimensionwise!( + view(node_coordinates, :, :, :, :, tree), + matrix, matrix, matrix, + data_in + ) + end + + map_node_coordinates!(node_coordinates, mapping) +end + +function map_node_coordinates!(node_coordinates::AbstractArray{<:Any, 5}, mapping) + for tree in axes(node_coordinates, 5), + k in axes(node_coordinates, 4), + j in axes(node_coordinates, 3), + i in axes(node_coordinates, 2) + + node_coordinates[:, i, j, k, tree] .= mapping(node_coordinates[1, i, j, k, tree], + node_coordinates[2, i, j, k, tree], + node_coordinates[3, i, j, k, tree]) + end + + return node_coordinates +end + +function map_node_coordinates!(node_coordinates::AbstractArray{<:Any, 5}, mapping::Nothing) + return node_coordinates +end + + +function balance!(mesh::P4estMesh{2}, init_fn=C_NULL) + p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn) + # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes + # See https://github.com/cburstedde/p4est/issues/112 + p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn) +end + +function balance!(mesh::P4estMesh{3}, init_fn=C_NULL) + p8est_balance(mesh.p4est, P8EST_CONNECT_FACE, init_fn) +end + function init_fn(p4est, which_tree, quadrant) # Unpack quadrant's user data ([global quad ID, controller_value]) @@ -415,6 +687,11 @@ function init_fn(p4est, which_tree, quadrant) return nothing end +# 2D +cfunction(::typeof(init_fn), ::Val{2}) = @cfunction(init_fn, Cvoid, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) +# 3D +cfunction(::typeof(init_fn), ::Val{3}) = @cfunction(init_fn, Cvoid, (Ptr{p8est_t}, Ptr{p4est_topidx_t}, Ptr{p8est_quadrant_t})) + function refine_fn(p4est, which_tree, quadrant) # Controller value has been copied to the quadrant's user data storage before. # Unpack quadrant's user data ([global quad ID, controller_value]). @@ -430,6 +707,11 @@ function refine_fn(p4est, which_tree, quadrant) end end +# 2D +cfunction(::typeof(refine_fn), ::Val{2}) = @cfunction(refine_fn, Cint, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) +# 3D +cfunction(::typeof(refine_fn), ::Val{3}) = @cfunction(refine_fn, Cint, (Ptr{p8est_t}, Ptr{p4est_topidx_t}, Ptr{p8est_quadrant_t})) + # Refine marked cells and rebalance forest. # Return a list of all cells that have been refined during refinement or rebalancing. function refine!(mesh::P4estMesh) @@ -437,25 +719,20 @@ function refine!(mesh::P4estMesh) original_n_cells = ncells(mesh) save_original_ids(mesh) - init_fn_c = @cfunction(init_fn, Cvoid, - (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + init_fn_c = cfunction(init_fn, Val(ndims(mesh))) + refine_fn_c = cfunction(refine_fn, Val(ndims(mesh))) # Refine marked cells - refine_fn_c = @cfunction(refine_fn, Cint, - (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) - @trixi_timeit timer() "refine" p4est_refine(mesh.p4est, false, refine_fn_c, init_fn_c) + @trixi_timeit timer() "refine" refine_p4est!(mesh.p4est, false, refine_fn_c, init_fn_c) - @trixi_timeit timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) - # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes - # See https://github.com/cburstedde/p4est/issues/112 - @trixi_timeit timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) + @trixi_timeit timer() "rebalance" balance!(mesh, init_fn_c) return collect_changed_cells(mesh, original_n_cells) end function coarsen_fn(p4est, which_tree, quadrants_ptr) - quadrants = unsafe_wrap(Array, quadrants_ptr, 4) + quadrants = unsafe_wrap_quadrants(quadrants_ptr, p4est) # Controller value has been copied to the quadrant's user data storage before. # Load controller value from quadrant's user data ([global quad ID, controller_value]). @@ -472,6 +749,16 @@ function coarsen_fn(p4est, which_tree, quadrants_ptr) end end +# 2D +unsafe_wrap_quadrants(quadrants_ptr, ::Ptr{p4est_t}) = unsafe_wrap(Array, quadrants_ptr, 4) +# 3D +unsafe_wrap_quadrants(quadrants_ptr, ::Ptr{p8est_t}) = unsafe_wrap(Array, quadrants_ptr, 8) + +# 2D +cfunction(::typeof(coarsen_fn), ::Val{2}) = @cfunction(coarsen_fn, Cint, (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{Ptr{p4est_quadrant_t}})) +# 3D +cfunction(::typeof(coarsen_fn), ::Val{3}) = @cfunction(coarsen_fn, Cint, (Ptr{p8est_t}, Ptr{p4est_topidx_t}, Ptr{Ptr{p8est_quadrant_t}})) + # Coarsen marked cells if the forest will stay balanced. # Return a list of all cells that have been coarsened. function coarsen!(mesh::P4estMesh) @@ -480,12 +767,10 @@ function coarsen!(mesh::P4estMesh) save_original_ids(mesh) # Coarsen marked cells - coarsen_fn_c = @cfunction(coarsen_fn, Cint, - (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{Ptr{p4est_quadrant_t}})) - init_fn_c = @cfunction(init_fn, Cvoid, - (Ptr{p4est_t}, Ptr{p4est_topidx_t}, Ptr{p4est_quadrant_t})) + coarsen_fn_c = cfunction(coarsen_fn, Val(ndims(mesh))) + init_fn_c = cfunction(init_fn, Val(ndims(mesh))) - @trixi_timeit timer() "coarsen!" p4est_coarsen(mesh.p4est, false, coarsen_fn_c, init_fn_c) + @trixi_timeit timer() "coarsen!" coarsen_p4est!(mesh.p4est, false, coarsen_fn_c, init_fn_c) # IDs of newly created cells (one-based) new_cells = collect_new_cells(mesh) @@ -500,10 +785,7 @@ function coarsen!(mesh::P4estMesh) intermediate_n_cells = ncells(mesh) save_original_ids(mesh) - @trixi_timeit timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) - # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes - # See https://github.com/cburstedde/p4est/issues/112 - @trixi_timeit timer() "rebalance" p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, init_fn_c) + @trixi_timeit timer() "rebalance" balance!(mesh, init_fn_c) refined_cells = collect_changed_cells(mesh, intermediate_n_cells) @@ -516,7 +798,7 @@ function coarsen!(mesh::P4estMesh) # i-th cell of the ones that have been created by coarsening has been refined again i = findfirst(==(refined_cell), new_cells) - # Remove IDs of the 2^NDIMS cells that have been coarsened to this cell + # Remove IDs of the 2^ndims cells that have been coarsened to this cell coarsened_cells[:, i] .= -1 end @@ -528,7 +810,7 @@ end # Copy global quad ID to quad's user data storage, will be called below function save_original_id_iter_volume(info, user_data) # Load tree from global trees array, one-based indexing - tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) + tree = unsafe_load_tree(info.p4est, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID @@ -542,16 +824,16 @@ function save_original_id_iter_volume(info, user_data) return nothing end +# 2D +cfunction(::typeof(save_original_id_iter_volume), ::Val{2}) = @cfunction(save_original_id_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) +# 3D +cfunction(::typeof(save_original_id_iter_volume), ::Val{3}) = @cfunction(save_original_id_iter_volume, Cvoid, (Ptr{p8est_iter_volume_info_t}, Ptr{Cvoid})) + # Copy old element IDs to each quad's user data storage function save_original_ids(mesh::P4estMesh) - iter_volume_c = @cfunction(save_original_id_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) + iter_volume_c = cfunction(save_original_id_iter_volume, Val(ndims(mesh))) - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - C_NULL, # user data - iter_volume_c, # iter_volume - C_NULL, # iter_face - C_NULL) # iter_corner + iterate_p4est(mesh.p4est, C_NULL; iter_volume_c=iter_volume_c) end @@ -576,20 +858,18 @@ function collect_changed_iter_volume(info, user_data) return nothing end -function collect_changed_cells(mesh, original_n_cells) +# 2D +cfunction(::typeof(collect_changed_iter_volume), ::Val{2}) = @cfunction(collect_changed_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) +# 3D +cfunction(::typeof(collect_changed_iter_volume), ::Val{3}) = @cfunction(collect_changed_iter_volume, Cvoid, (Ptr{p8est_iter_volume_info_t}, Ptr{Cvoid})) + +function collect_changed_cells(mesh::P4estMesh, original_n_cells) original_cells = collect(1:original_n_cells) # Iterate over all quads and set original cells that haven't been changed to zero - iter_volume_c = @cfunction(collect_changed_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) - - GC.@preserve original_cells begin - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - pointer(original_cells), - iter_volume_c, # iter_volume - C_NULL, # iter_face - C_NULL) # iter_corner - end + iter_volume_c = cfunction(collect_changed_iter_volume, Val(ndims(mesh))) + + iterate_p4est(mesh.p4est, original_cells; iter_volume_c=iter_volume_c) # Changed cells are all that haven't been set to zero above changed_original_cells = original_cells[original_cells .> 0] @@ -608,7 +888,7 @@ function collect_new_iter_volume(info, user_data) # original_id of cells that have been newly created is -1 if original_id < 0 # Load tree from global trees array, one-based indexing - tree = unsafe_load_sc(p4est_tree_t, info.p4est.trees, info.treeid + 1) + tree = unsafe_load_tree(info.p4est, info.treeid + 1) # Quadrant numbering offset of this quadrant offset = tree.quadrants_offset # Global quad ID @@ -624,20 +904,18 @@ function collect_new_iter_volume(info, user_data) return nothing end -function collect_new_cells(mesh) +# 2D +cfunction(::typeof(collect_new_iter_volume), ::Val{2}) = @cfunction(collect_new_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) +# 3D +cfunction(::typeof(collect_new_iter_volume), ::Val{3}) = @cfunction(collect_new_iter_volume, Cvoid, (Ptr{p8est_iter_volume_info_t}, Ptr{Cvoid})) + +function collect_new_cells(mesh::P4estMesh) cell_is_new = zeros(Int, ncells(mesh)) # Iterate over all quads and set original cells that have been changed to one - iter_volume_c = @cfunction(collect_new_iter_volume, Cvoid, (Ptr{p4est_iter_volume_info_t}, Ptr{Cvoid})) - - GC.@preserve cell_is_new begin - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - pointer(cell_is_new), - iter_volume_c, # iter_volume - C_NULL, # iter_face - C_NULL) # iter_corner - end + iter_volume_c = cfunction(collect_new_iter_volume, Val(ndims(mesh))) + + iterate_p4est(mesh.p4est, cell_is_new; iter_volume_c=iter_volume_c) # Changed cells are all that haven't been set to zero above new_cells = findall(==(1), cell_is_new) diff --git a/src/solvers/dg_curved/containers_3d.jl b/src/solvers/dg_curved/containers_3d.jl index 7e4feb6308..efebeac6de 100644 --- a/src/solvers/dg_curved/containers_3d.jl +++ b/src/solvers/dg_curved/containers_3d.jl @@ -53,13 +53,45 @@ end # Calculate Jacobian matrix of the mapping from the reference element to the element in the physical domain function calc_jacobian_matrix!(jacobian_matrix::AbstractArray{<:Any,6}, element, node_coordinates, basis) - for dim in 1:3, j in eachnode(basis), i in eachnode(basis) - # ∂/∂ξ - @views mul!(jacobian_matrix[dim, 1, :, i, j, element], basis.derivative_matrix, node_coordinates[dim, :, i, j, element]) - # ∂/∂η - @views mul!(jacobian_matrix[dim, 2, i, :, j, element], basis.derivative_matrix, node_coordinates[dim, i, :, j, element]) - # ∂/∂ζ - @views mul!(jacobian_matrix[dim, 3, i, j, :, element], basis.derivative_matrix, node_coordinates[dim, i, j, :, element]) + # The code below is equivalent to the following matrix multiplications but much faster. + # + # for dim in 1:3, j in eachnode(basis), i in eachnode(basis) + # # ∂/∂ξ + # jacobian_matrix[dim, 1, :, i, j, element] = basis.derivative_matrix * node_coordinates[dim, :, i, j, element] + # # ∂/∂η + # jacobian_matrix[dim, 2, i, :, j, element] = basis.derivative_matrix * node_coordinates[dim, i, :, j, element] + # # ∂/∂ζ + # jacobian_matrix[dim, 3, i, j, :, element] = basis.derivative_matrix * node_coordinates[dim, i, j, :, element] + # end + + @turbo for dim in 1:3, k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(jacobian_matrix)) + + for ii in eachnode(basis) + result += basis.derivative_matrix[i, ii] * node_coordinates[dim, ii, j, k, element] + end + + jacobian_matrix[dim, 1, i, j, k, element] = result + end + + @turbo for dim in 1:3, k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(jacobian_matrix)) + + for ii in eachnode(basis) + result += basis.derivative_matrix[j, ii] * node_coordinates[dim, i, ii, k, element] + end + + jacobian_matrix[dim, 2, i, j, k, element] = result + end + + @turbo for dim in 1:3, k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(jacobian_matrix)) + + for ii in eachnode(basis) + result += basis.derivative_matrix[k, ii] * node_coordinates[dim, i, j, ii, element] + end + + jacobian_matrix[dim, 3, i, j, k, element] = result end return jacobian_matrix @@ -71,57 +103,107 @@ end # These are called Ja^i in Kopriva's blue book. function calc_contravariant_vectors!(contravariant_vectors::AbstractArray{<:Any,6}, element, jacobian_matrix, node_coordinates, basis::LobattoLegendreBasis) + @unpack derivative_matrix = basis + # The general form is # Jaⁱₙ = 0.5 * ( ∇ × (Xₘ ∇ Xₗ - Xₗ ∇ Xₘ) )ᵢ where (n, m, l) cyclic and ∇ = (∂/∂ξ, ∂/∂η, ∂/∂ζ)ᵀ - # Calculate the first summand of the cross product in each dimension - for n in 1:3, j in eachnode(basis), i in eachnode(basis) + for n in 1:3 # (n, m, l) cyclic m = (n % 3) + 1 l = ((n + 1) % 3) + 1 - # Calc only the first summand 0.5 * (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_η of - # Ja¹ₙ = 0.5 * [ (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_η - (Xₘ Xₗ_η - Xₗ Xₘ_η)_ζ ] - @views contravariant_vectors[n, 1, i, :, j, element] = 0.5 * basis.derivative_matrix * ( - node_coordinates[m, i, :, j, element] .* jacobian_matrix[l, 3, i, :, j, element] .- - node_coordinates[l, i, :, j, element] .* jacobian_matrix[m, 3, i, :, j, element]) - - # Calc only the first summand 0.5 * (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_ζ of - # Ja²ₙ = 0.5 * [ (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_ζ - (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_ξ ] - @views contravariant_vectors[n, 2, i, j, :, element] = 0.5 * basis.derivative_matrix * ( - node_coordinates[m, i, j, :, element] .* jacobian_matrix[l, 1, i, j, :, element] .- - node_coordinates[l, i, j, :, element] .* jacobian_matrix[m, 1, i, j, :, element]) - - # Calc only the first summand 0.5 * (Xₘ Xₗ_η - Xₗ Xₘ_η)_ξ of - # Ja³ₙ = 0.5 * [ (Xₘ Xₗ_η - Xₗ Xₘ_η)_ξ - (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_η ] - @views contravariant_vectors[n, 3, :, i, j, element] = 0.5 * basis.derivative_matrix * ( - node_coordinates[m, :, i, j, element] .* jacobian_matrix[l, 2, :, i, j, element] .- - node_coordinates[l, :, i, j, element] .* jacobian_matrix[m, 2, :, i, j, element]) - end + # Calculate Ja¹ₙ = 0.5 * [ (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_η - (Xₘ Xₗ_η - Xₗ Xₘ_η)_ζ ] + # For each of these, the first and second summand are computed in separate loops + # for performance reasons. - # Calculate the second summand of the cross product in each dimension - for n in 1:3, j in eachnode(basis), i in eachnode(basis) - # (n, m, l) cyclic - m = (n % 3) + 1 - l = ((n + 1) % 3) + 1 + # First summand 0.5 * (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_η + @turbo for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(contravariant_vectors)) + + for ii in eachnode(basis) + # Multiply derivative_matrix to j-dimension to differentiate wrt η + result += 0.5 * derivative_matrix[j, ii] * ( + node_coordinates[m, i, ii, k, element] * jacobian_matrix[l, 3, i, ii, k, element] - + node_coordinates[l, i, ii, k, element] * jacobian_matrix[m, 3, i, ii, k, element]) + end + + contravariant_vectors[n, 1, i, j, k, element] = result + end + + # Second summand -0.5 * (Xₘ Xₗ_η - Xₗ Xₘ_η)_ζ + @turbo for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(contravariant_vectors)) - # Calc only the second summand -0.5 * (Xₘ Xₗ_η - Xₗ Xₘ_η)_ζ of - # Ja¹ₙ = 0.5 * [ (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_η - (Xₘ Xₗ_η - Xₗ Xₘ_η)_ζ ] - @views contravariant_vectors[n, 1, i, j, :, element] -= 0.5 * basis.derivative_matrix * ( - node_coordinates[m, i, j, :, element] .* jacobian_matrix[l, 2, i, j, :, element] .- - node_coordinates[l, i, j, :, element] .* jacobian_matrix[m, 2, i, j, :, element]) - - # Calc only the second summand -0.5 * (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_ξ of - # Ja²ₙ = 0.5 * [ (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_ζ - (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_ξ ] - @views contravariant_vectors[n, 2, :, i, j, element] -= 0.5 * basis.derivative_matrix * ( - node_coordinates[m, :, i, j, element] .* jacobian_matrix[l, 3, :, i, j, element] .- - node_coordinates[l, :, i, j, element] .* jacobian_matrix[m, 3, :, i, j, element]) - - # Calc only the second summand -0.5 * (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_η of - # Ja³ₙ = 0.5 * [ (Xₘ Xₗ_η - Xₗ Xₘ_η)_ξ - (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_η ] - @views contravariant_vectors[n, 3, i, :, j, element] -= 0.5 * basis.derivative_matrix * ( - node_coordinates[m, i, :, j, element] .* jacobian_matrix[l, 1, i, :, j, element] .- - node_coordinates[l, i, :, j, element] .* jacobian_matrix[m, 1, i, :, j, element]) + for ii in eachnode(basis) + # Multiply derivative_matrix to k-dimension to differentiate wrt ζ + result += 0.5 * derivative_matrix[k, ii] * ( + node_coordinates[m, i, j, ii, element] * jacobian_matrix[l, 2, i, j, ii, element] - + node_coordinates[l, i, j, ii, element] * jacobian_matrix[m, 2, i, j, ii, element]) + end + + contravariant_vectors[n, 1, i, j, k, element] -= result + end + + # Calculate Ja²ₙ = 0.5 * [ (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_ζ - (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_ξ ] + + # First summand 0.5 * (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_ζ + @turbo for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(contravariant_vectors)) + + for ii in eachnode(basis) + # Multiply derivative_matrix to k-dimension to differentiate wrt ζ + result += 0.5 * derivative_matrix[k, ii] * ( + node_coordinates[m, i, j, ii, element] * jacobian_matrix[l, 1, i, j, ii, element] - + node_coordinates[l, i, j, ii, element] * jacobian_matrix[m, 1, i, j, ii, element]) + end + + contravariant_vectors[n, 2, i, j, k, element] = result + end + + # Second summand -0.5 * (Xₘ Xₗ_ζ - Xₗ Xₘ_ζ)_ξ + @turbo for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(contravariant_vectors)) + + for ii in eachnode(basis) + # Multiply derivative_matrix to i-dimension to differentiate wrt ξ + result += 0.5 * derivative_matrix[i, ii] * ( + node_coordinates[m, ii, j, k, element] * jacobian_matrix[l, 3, ii, j, k, element] - + node_coordinates[l, ii, j, k, element] * jacobian_matrix[m, 3, ii, j, k, element]) + end + + contravariant_vectors[n, 2, i, j, k, element] -= result + end + + # Calculate Ja³ₙ = 0.5 * [ (Xₘ Xₗ_η - Xₗ Xₘ_η)_ξ - (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_η ] + + # First summand 0.5 * (Xₘ Xₗ_η - Xₗ Xₘ_η)_ξ + @turbo for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(contravariant_vectors)) + + for ii in eachnode(basis) + # Multiply derivative_matrix to i-dimension to differentiate wrt ξ + result += 0.5 * derivative_matrix[i, ii] * ( + node_coordinates[m, ii, j, k, element] * jacobian_matrix[l, 2, ii, j, k, element] - + node_coordinates[l, ii, j, k, element] * jacobian_matrix[m, 2, ii, j, k, element]) + end + + contravariant_vectors[n, 3, i, j, k, element] = result + end + + # Second summand -0.5 * (Xₘ Xₗ_ξ - Xₗ Xₘ_ξ)_η + @turbo for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + result = zero(eltype(contravariant_vectors)) + + for ii in eachnode(basis) + # Multiply derivative_matrix to j-dimension to differentiate wrt η + result += 0.5 * derivative_matrix[j, ii] * ( + node_coordinates[m, i, ii, k, element] * jacobian_matrix[l, 1, i, ii, k, element] - + node_coordinates[l, i, ii, k, element] * jacobian_matrix[m, 1, i, ii, k, element]) + end + + contravariant_vectors[n, 3, i, j, k, element] -= result + end end return contravariant_vectors @@ -130,7 +212,7 @@ end # Calculate inverse Jacobian (determinant of Jacobian matrix of the mapping) in each node function calc_inverse_jacobian!(inverse_jacobian::AbstractArray{<:Any, 4}, element, jacobian_matrix, basis) - for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) + @turbo for k in eachnode(basis), j in eachnode(basis), i in eachnode(basis) # Calculate Determinant by using Sarrus formula (about 100 times faster than LinearAlgebra.det()) inverse_jacobian[i, j, k, element] = inv( jacobian_matrix[1, 1, i, j, k, element] * jacobian_matrix[2, 2, i, j, k, element] * jacobian_matrix[3, 3, i, j, k, element] + diff --git a/src/solvers/dg_curved/dg_3d.jl b/src/solvers/dg_curved/dg_3d.jl index 1c8af7e1da..283663a88b 100644 --- a/src/solvers/dg_curved/dg_3d.jl +++ b/src/solvers/dg_curved/dg_3d.jl @@ -38,7 +38,7 @@ end function calc_volume_integral!(du, u, - mesh::CurvedMesh{3}, + mesh::Union{CurvedMesh{3}, P4estMesh{3}}, nonconservative_terms::Val{false}, equations, volume_integral::VolumeIntegralWeakForm, dg::DGSEM, cache) @@ -563,7 +563,7 @@ end function apply_jacobian!(du, - mesh::CurvedMesh{3}, + mesh::Union{CurvedMesh{3}, P4estMesh{3}}, equations, dg::DG, cache) @threaded for element in eachelement(dg, cache) diff --git a/src/solvers/dg_p4est/containers.jl b/src/solvers/dg_p4est/containers.jl index 547add992e..098d0496d9 100644 --- a/src/solvers/dg_p4est/containers.jl +++ b/src/solvers/dg_p4est/containers.jl @@ -1,5 +1,5 @@ # Note: This is an experimental feature and may be changed in future releases without notice. -mutable struct ElementContainerP4est{NDIMS, RealT<:Real, uEltype<:Real, NDIMSP1, NDIMSP2, NDIMSP3} <: AbstractContainer +mutable struct P4estElementContainer{NDIMS, RealT<:Real, uEltype<:Real, NDIMSP1, NDIMSP2, NDIMSP3} <: AbstractContainer # Physical coordinates at each node node_coordinates ::Array{RealT, NDIMSP2} # [orientation, node_i, node_j, node_k, element] # Jacobian matrix of the transformation @@ -20,16 +20,16 @@ mutable struct ElementContainerP4est{NDIMS, RealT<:Real, uEltype<:Real, NDIMSP1, _surface_flux_values ::Vector{uEltype} end -@inline nelements(elements::ElementContainerP4est) = size(elements.node_coordinates, ndims(elements) + 2) -@inline Base.ndims(::ElementContainerP4est{NDIMS}) where NDIMS = NDIMS -@inline Base.eltype(::ElementContainerP4est{NDIMS, RealT, uEltype}) where {NDIMS, RealT, uEltype} = uEltype +@inline nelements(elements::P4estElementContainer) = size(elements.node_coordinates, ndims(elements) + 2) +@inline Base.ndims(::P4estElementContainer{NDIMS}) where NDIMS = NDIMS +@inline Base.eltype(::P4estElementContainer{NDIMS, RealT, uEltype}) where {NDIMS, RealT, uEltype} = uEltype # Only one-dimensional `Array`s are `resize!`able in Julia. # Hence, we use `Vector`s as internal storage and `resize!` # them whenever needed. Then, we reuse the same memory by # `unsafe_wrap`ping multi-dimensional `Array`s around the # internal storage. -function Base.resize!(elements::ElementContainerP4est, capacity) +function Base.resize!(elements::P4estElementContainer, capacity) @unpack _node_coordinates, _jacobian_matrix, _contravariant_vectors, _inverse_jacobian, _surface_flux_values = elements @@ -88,7 +88,7 @@ function init_elements(mesh::P4estMesh{NDIMS, RealT}, equations, surface_flux_values = unsafe_wrap(Array, pointer(_surface_flux_values), (nvariables(equations), ntuple(_ -> nnodes(basis), NDIMS-1)..., NDIMS*2, nelements)) - elements = ElementContainerP4est{NDIMS, RealT, uEltype, NDIMS+1, NDIMS+2, NDIMS+3}( + elements = P4estElementContainer{NDIMS, RealT, uEltype, NDIMS+1, NDIMS+2, NDIMS+3}( node_coordinates, jacobian_matrix, contravariant_vectors, inverse_jacobian, surface_flux_values, _node_coordinates, _jacobian_matrix, _contravariant_vectors, @@ -99,7 +99,7 @@ function init_elements(mesh::P4estMesh{NDIMS, RealT}, equations, end -mutable struct InterfaceContainerP4est{NDIMS, uEltype<:Real, NDIMSP2} <: AbstractContainer +mutable struct P4estInterfaceContainer{NDIMS, uEltype<:Real, NDIMSP2} <: AbstractContainer u ::Array{uEltype, NDIMSP2} # [primary/secondary, variable, i, j, interface] element_ids ::Matrix{Int} # [primary/secondary, interface] node_indices ::Matrix{NTuple{NDIMS, Symbol}} # [primary/secondary, interface] @@ -110,11 +110,11 @@ mutable struct InterfaceContainerP4est{NDIMS, uEltype<:Real, NDIMSP2} <: Abstrac _node_indices ::Vector{NTuple{NDIMS, Symbol}} end -@inline ninterfaces(interfaces::InterfaceContainerP4est) = size(interfaces.element_ids, 2) -@inline Base.ndims(::InterfaceContainerP4est{NDIMS}) where NDIMS = NDIMS +@inline ninterfaces(interfaces::P4estInterfaceContainer) = size(interfaces.element_ids, 2) +@inline Base.ndims(::P4estInterfaceContainer{NDIMS}) where NDIMS = NDIMS # See explanation of Base.resize! for the element container -function Base.resize!(interfaces::InterfaceContainerP4est, capacity) +function Base.resize!(interfaces::P4estInterfaceContainer, capacity) @unpack _u, _element_ids, _node_indices = interfaces n_dims = ndims(interfaces) @@ -153,7 +153,7 @@ function init_interfaces(mesh::P4estMesh, equations, basis, elements) _node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_interfaces) node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_interfaces)) - interfaces = InterfaceContainerP4est{NDIMS, uEltype, NDIMS+2}(u, element_ids, node_indices, + interfaces = P4estInterfaceContainer{NDIMS, uEltype, NDIMS+2}(u, element_ids, node_indices, _u, _element_ids, _node_indices) init_interfaces!(interfaces, mesh) @@ -162,7 +162,14 @@ function init_interfaces(mesh::P4estMesh, equations, basis, elements) end -mutable struct BoundaryContainerP4est{NDIMS, uEltype<:Real, NDIMSP1} <: AbstractContainer +function init_interfaces!(interfaces, mesh::P4estMesh) + init_surfaces!(interfaces, nothing, nothing, mesh) + + return interfaces +end + + +mutable struct P4estBoundaryContainer{NDIMS, uEltype<:Real, NDIMSP1} <: AbstractContainer u ::Array{uEltype, NDIMSP1} # [variables, i, j, boundary] element_ids ::Vector{Int} # [boundary] node_indices::Vector{NTuple{NDIMS, Symbol}} # [boundary] @@ -172,11 +179,11 @@ mutable struct BoundaryContainerP4est{NDIMS, uEltype<:Real, NDIMSP1} <: Abstract _u ::Vector{uEltype} end -@inline nboundaries(boundaries::BoundaryContainerP4est) = length(boundaries.element_ids) -@inline Base.ndims(::BoundaryContainerP4est{NDIMS}) where NDIMS = NDIMS +@inline nboundaries(boundaries::P4estBoundaryContainer) = length(boundaries.element_ids) +@inline Base.ndims(::P4estBoundaryContainer{NDIMS}) where NDIMS = NDIMS # See explanation of Base.resize! for the element container -function Base.resize!(boundaries::BoundaryContainerP4est, capacity) +function Base.resize!(boundaries::P4estBoundaryContainer, capacity) @unpack _u, element_ids, node_indices, name = boundaries n_dims = ndims(boundaries) @@ -213,15 +220,57 @@ function init_boundaries(mesh::P4estMesh, equations, basis, elements) node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, n_boundaries) names = Vector{Symbol}(undef, n_boundaries) - boundaries = BoundaryContainerP4est{NDIMS, uEltype, NDIMS+1}(u, element_ids, + boundaries = P4estBoundaryContainer{NDIMS, uEltype, NDIMS+1}(u, element_ids, node_indices, names, _u) - init_boundaries!(boundaries, mesh) + if n_boundaries > 0 + init_boundaries!(boundaries, mesh) + end + + return boundaries +end + + +function init_boundaries!(boundaries, mesh::P4estMesh) + init_surfaces!(nothing, nothing, boundaries, mesh) return boundaries end +# Function barrier for type stability +function init_boundaries_iter_face_inner(info, boundaries, boundary_id, mesh) + # Extract boundary data + side = unsafe_load_side(info) + # Get local tree, one-based indexing + tree = unsafe_load_tree(mesh.p4est, side.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset + + # Verify before accessing is.full, but this should never happen + @assert side.is_hanging == false + + local_quad_id = side.is.full.quadid + # Global ID of this quad + quad_id = offset + local_quad_id + + # Write data to boundaries container + # p4est uses zero-based indexing; convert to one-based indexing + boundaries.element_ids[boundary_id] = quad_id + 1 + + # Face at which the boundary lies + face = side.face + + # Save boundaries.node_indices dimension specific in containers_[23]d.jl + init_boundary_node_indices!(boundaries, face, boundary_id) + + # One-based indexing + boundaries.name[boundary_id] = mesh.boundary_names[face + 1, side.treeid + 1] + + return nothing +end + + # Container data structure (structure-of-arrays style) for DG L2 mortars # # The positions used in `element_ids` are 1:3 (in 2D) or 1:5 (in 3D), where 1:2 (in 2D) @@ -234,20 +283,20 @@ end # of the mortar element, which are precisely the local coordinates that span # the surface of the smaller side. # Note that the orientation in the physical space is completely irrelevant here. -# /---------------------------\ /----------------------------\ -# | | | | | -# | small | small | | | -# | 3 | 4 | | | -# | | | | large | -# |-------------|-------------| | 5 | -# η | | | | | -# | small | small | | | -# ^ | 1 | 2 | | | -# | | | | | | -# | \---------------------------/ \----------------------------/ -# | -# ⋅----> ξ -mutable struct MortarContainerP4est{NDIMS, uEltype<:Real, NDIMSP1, NDIMSP3} <: AbstractContainer +# ┌─────────────┬─────────────┐ ┌───────────────────────────┐ +# │ │ │ │ │ +# │ small │ small │ │ │ +# │ 3 │ 4 │ │ │ +# │ │ │ │ large │ +# ├─────────────┼─────────────┤ │ 5 │ +# η │ │ │ │ │ +# │ small │ small │ │ │ +# ↑ │ 1 │ 2 │ │ │ +# │ │ │ │ │ │ +# │ └─────────────┴─────────────┘ └───────────────────────────┘ +# │ +# ⋅────> ξ +mutable struct P4estMortarContainer{NDIMS, uEltype<:Real, NDIMSP1, NDIMSP3} <: AbstractContainer u ::Array{uEltype, NDIMSP3} # [small/large side, variable, position, i, j, mortar] element_ids ::Matrix{Int} # [position, mortar] node_indices ::Matrix{NTuple{NDIMS, Symbol}} # [small/large, mortar] @@ -258,11 +307,11 @@ mutable struct MortarContainerP4est{NDIMS, uEltype<:Real, NDIMSP1, NDIMSP3} <: A _node_indices ::Vector{NTuple{NDIMS, Symbol}} end -@inline nmortars(mortars::MortarContainerP4est) = size(mortars.element_ids, 2) -@inline Base.ndims(::MortarContainerP4est{NDIMS}) where NDIMS = NDIMS +@inline nmortars(mortars::P4estMortarContainer) = size(mortars.element_ids, 2) +@inline Base.ndims(::P4estMortarContainer{NDIMS}) where NDIMS = NDIMS # See explanation of Base.resize! for the element container -function Base.resize!(mortars::MortarContainerP4est, capacity) +function Base.resize!(mortars::P4estMortarContainer, capacity) @unpack _u, _element_ids, _node_indices = mortars n_dims = ndims(mortars) @@ -303,10 +352,19 @@ function init_mortars(mesh::P4estMesh, equations, basis, elements) _node_indices = Vector{NTuple{NDIMS, Symbol}}(undef, 2 * n_mortars) node_indices = unsafe_wrap(Array, pointer(_node_indices), (2, n_mortars)) - mortars = MortarContainerP4est{NDIMS, uEltype, NDIMS+1, NDIMS+3}(u, element_ids, node_indices, + mortars = P4estMortarContainer{NDIMS, uEltype, NDIMS+1, NDIMS+3}(u, element_ids, node_indices, _u, _element_ids, _node_indices) - init_mortars!(mortars, mesh) + if n_mortars > 0 + init_mortars!(mortars, mesh) + end + + return mortars +end + + +function init_mortars!(mortars, mesh::P4estMesh) + init_surfaces!(nothing, mortars, nothing, mesh) return mortars end @@ -338,6 +396,195 @@ function reinitialize_containers!(mesh::P4estMesh, equations, dg::DGSEM, cache) end +# A helper struct used in initialization methods below +mutable struct InitSurfacesIterFaceUserData{Interfaces, Mortars, Boundaries, Mesh} + interfaces ::Interfaces + interface_id::Int + mortars ::Mortars + mortar_id ::Int + boundaries ::Boundaries + boundary_id ::Int + mesh ::Mesh +end + +function InitSurfacesIterFaceUserData(interfaces, mortars, boundaries, mesh) + return InitSurfacesIterFaceUserData{ + typeof(interfaces), typeof(mortars), typeof(boundaries), typeof(mesh)}( + interfaces, 1, mortars, 1, boundaries, 1, mesh) +end + +function init_surfaces_iter_face(info, user_data) + # Unpack user_data + data = unsafe_pointer_to_objref(Ptr{InitSurfacesIterFaceUserData}(user_data)) + + # Function barrier because the unpacked user_data above is type-unstable + init_surfaces_iter_face_inner(info, data) +end + +# 2D +cfunction(::typeof(init_surfaces_iter_face), ::Val{2}) = @cfunction(init_surfaces_iter_face, Cvoid, (Ptr{p4est_iter_face_info_t}, Ptr{Cvoid})) +# 3D +cfunction(::typeof(init_surfaces_iter_face), ::Val{3}) = @cfunction(init_surfaces_iter_face, Cvoid, (Ptr{p8est_iter_face_info_t}, Ptr{Cvoid})) + +# Function barrier for type stability +function init_surfaces_iter_face_inner(info, user_data) + @unpack interfaces, mortars, boundaries = user_data + + if info.sides.elem_count == 2 + # Two neighboring elements => Interface or mortar + + # Extract surface data + sides = (unsafe_load_side(info, 1), unsafe_load_side(info, 2)) + + if sides[1].is_hanging == 0 && sides[2].is_hanging == 0 + # No hanging nodes => normal interface + if interfaces !== nothing + init_interfaces_iter_face_inner(info, sides, user_data) + end + else + # Hanging nodes => mortar + if mortars !== nothing + init_mortars_iter_face_inner(info, sides, user_data) + end + end + elseif info.sides.elem_count == 1 + # One neighboring elements => boundary + if boundaries !== nothing + init_boundaries_iter_face_inner(info, user_data) + end + end + + return nothing +end + +function init_surfaces!(interfaces, mortars, boundaries, mesh::P4estMesh) + # Let p4est iterate over all interfaces and call init_surfaces_iter_face + iter_face_c = cfunction(init_surfaces_iter_face, Val(ndims(mesh))) + user_data = InitSurfacesIterFaceUserData( + interfaces, mortars, boundaries, mesh) + + iterate_p4est(mesh.p4est, user_data; iter_face_c=iter_face_c) + + return interfaces +end + + +# Initialization of interfaces after the function barrier +function init_interfaces_iter_face_inner(info, sides, user_data) + @unpack interfaces, interface_id, mesh = user_data + user_data.interface_id += 1 + + # Get Tuple of local trees, one-based indexing + trees = (unsafe_load_tree(mesh.p4est, sides[1].treeid + 1), + unsafe_load_tree(mesh.p4est, sides[2].treeid + 1)) + # Quadrant numbering offsets of the quadrants at this interface + offsets = SVector(trees[1].quadrants_offset, + trees[2].quadrants_offset) + + local_quad_ids = SVector(sides[1].is.full.quadid, sides[2].is.full.quadid) + # Global IDs of the neighboring quads + quad_ids = offsets + local_quad_ids + + # Write data to interfaces container + # p4est uses zero-based indexing; convert to one-based indexing + interfaces.element_ids[1, interface_id] = quad_ids[1] + 1 + interfaces.element_ids[2, interface_id] = quad_ids[2] + 1 + + # Face at which the interface lies + faces = (sides[1].face, sides[2].face) + + # Save interfaces.node_indices dimension specific in containers_[23]d.jl + init_interface_node_indices!(interfaces, faces, info.orientation, interface_id) + + return nothing +end + + +# Initialization of boundaries after the function barrier +function init_boundaries_iter_face_inner(info, user_data) + @unpack boundaries, boundary_id, mesh = user_data + user_data.boundary_id += 1 + + # Extract boundary data + side = unsafe_load_side(info) + # Get local tree, one-based indexing + tree = unsafe_load_tree(mesh.p4est, side.treeid + 1) + # Quadrant numbering offset of this quadrant + offset = tree.quadrants_offset + + # Verify before accessing is.full, but this should never happen + @assert side.is_hanging == false + + local_quad_id = side.is.full.quadid + # Global ID of this quad + quad_id = offset + local_quad_id + + # Write data to boundaries container + # p4est uses zero-based indexing; convert to one-based indexing + boundaries.element_ids[boundary_id] = quad_id + 1 + + # Face at which the boundary lies + face = side.face + + # Save boundaries.node_indices dimension specific in containers_[23]d.jl + init_boundary_node_indices!(boundaries, face, boundary_id) + + # One-based indexing + boundaries.name[boundary_id] = mesh.boundary_names[face + 1, side.treeid + 1] + + return nothing +end + + +# Initialization of mortars after the function barrier +function init_mortars_iter_face_inner(info, sides, user_data) + @unpack mortars, mortar_id, mesh = user_data + user_data.mortar_id += 1 + + # Get Tuple of local trees, one-based indexing + trees = (unsafe_load_tree(mesh.p4est, sides[1].treeid + 1), + unsafe_load_tree(mesh.p4est, sides[2].treeid + 1)) + # Quadrant numbering offsets of the quadrants at this interface + offsets = SVector(trees[1].quadrants_offset, + trees[2].quadrants_offset) + + if sides[1].is_hanging == true + # Left is small, right is large + faces = (sides[1].face, sides[2].face) + + local_small_quad_ids = sides[1].is.hanging.quadid + # Global IDs of the two small quads + small_quad_ids = offsets[1] .+ local_small_quad_ids + + # Just be sure before accessing is.full + @assert sides[2].is_hanging == false + large_quad_id = offsets[2] + sides[2].is.full.quadid + else # sides[2].is_hanging == true + # Right is small, left is large. + # init_mortar_node_indices! below expects side 1 to contain the small elements. + faces = (sides[2].face, sides[1].face) + + local_small_quad_ids = sides[2].is.hanging.quadid + # Global IDs of the two small quads + small_quad_ids = offsets[2] .+ local_small_quad_ids + + # Just be sure before accessing is.full + @assert sides[1].is_hanging == false + large_quad_id = offsets[1] + sides[1].is.full.quadid + end + + # Write data to mortar container, 1 and 2 are the small elements + # p4est uses zero-based indexing; convert to one-based indexing + mortars.element_ids[1:end-1, mortar_id] .= small_quad_ids[:] .+ 1 + # Last entry is the large element + mortars.element_ids[end, mortar_id] = large_quad_id + 1 + + init_mortar_node_indices!(mortars, faces, info.orientation, mortar_id) + + return nothing +end + + # Iterate over all interfaces and count # - (inner) interfaces # - mortars @@ -348,8 +595,7 @@ function count_surfaces_iter_face(info, user_data) # Two neighboring elements => Interface or mortar # Extract surface data - sides = (unsafe_load_sc(p4est_iter_face_side_t, info.sides, 1), - unsafe_load_sc(p4est_iter_face_side_t, info.sides, 2)) + sides = (unsafe_load_side(info, 1), unsafe_load_side(info, 2)) if sides[1].is_hanging == 0 && sides[2].is_hanging == 0 # No hanging nodes => normal interface @@ -376,14 +622,19 @@ function count_surfaces_iter_face(info, user_data) return nothing end +# 2D +cfunction(::typeof(count_surfaces_iter_face), ::Val{2}) = @cfunction(count_surfaces_iter_face, Cvoid, (Ptr{p4est_iter_face_info_t}, Ptr{Cvoid})) +# 3D +cfunction(::typeof(count_surfaces_iter_face), ::Val{3}) = @cfunction(count_surfaces_iter_face, Cvoid, (Ptr{p8est_iter_face_info_t}, Ptr{Cvoid})) + function count_required_surfaces(mesh::P4estMesh) # Let p4est iterate over all interfaces and call count_surfaces_iter_face - iter_face_c = @cfunction(count_surfaces_iter_face, Cvoid, (Ptr{p4est_iter_face_info_t}, Ptr{Cvoid})) + iter_face_c = cfunction(count_surfaces_iter_face, Val(ndims(mesh))) # interfaces, mortars, boundaries user_data = [0, 0, 0] - iterate_faces(mesh, iter_face_c, user_data) + iterate_p4est(mesh.p4est, user_data; iter_face_c=iter_face_c) # Return counters return (interfaces = user_data[1], @@ -391,46 +642,6 @@ function count_required_surfaces(mesh::P4estMesh) boundaries = user_data[3]) end -# Let p4est iterate over all interfaces and execute the C function iter_face_c -function iterate_faces(mesh::P4estMesh, iter_face_c, user_data) - if user_data isa AbstractArray - user_data_ptr = pointer(user_data) - else - user_data_ptr = pointer_from_objref(user_data) - end - GC.@preserve user_data begin - p4est_iterate(mesh.p4est, - C_NULL, # ghost layer - user_data_ptr, - C_NULL, # iter_volume - iter_face_c, # iter_face - C_NULL) # iter_corner - end - - return nothing -end - - -# Convert sc_array of type T to Julia array -function unsafe_wrap_sc(::Type{T}, sc_array) where T - element_count = sc_array.elem_count - element_size = sc_array.elem_size - - @assert element_size == sizeof(T) - - return [unsafe_wrap(T, sc_array.array + element_size * i) for i in 0:element_count-1] -end - - -# Load the ith element (1-indexed) of an sc array of type T -function unsafe_load_sc(::Type{T}, sc_array, i=1) where T - element_size = sc_array.elem_size - - @assert element_size == sizeof(T) - - return unsafe_wrap(T, sc_array.array + element_size * (i - 1)) -end - # Convert Tuple node_indices to actual indices. # E.g., (:one, :i, :j) will be (1, i, j) for some i and j, @@ -485,7 +696,7 @@ end indices_surface = indices[1:2] end - return evaluate_index(indices_surface, size, dim, i) + return evaluate_index(indices_surface, size, dim, i, j) end # Return direction of the face, which is indexed by node_indices @@ -508,3 +719,4 @@ end include("containers_2d.jl") +include("containers_3d.jl") diff --git a/src/solvers/dg_p4est/containers_2d.jl b/src/solvers/dg_p4est/containers_2d.jl index c8622f7d02..a48c9ddd74 100644 --- a/src/solvers/dg_p4est/containers_2d.jl +++ b/src/solvers/dg_p4est/containers_2d.jl @@ -17,7 +17,7 @@ function init_elements!(elements, mesh::P4estMesh{2}, basis::LobattoLegendreBasi end -# Interpolate tree_node_coordinates to each quadrant +# Interpolate tree_node_coordinates to each quadrant at the nodes of the specified basis function calc_node_coordinates!(node_coordinates, mesh::P4estMesh{2}, basis::LobattoLegendreBasis) @@ -28,6 +28,7 @@ function calc_node_coordinates!(node_coordinates, calc_node_coordinates!(node_coordinates, mesh, basis.nodes) end +# Interpolate tree_node_coordinates to each quadrant at the specified nodes function calc_node_coordinates!(node_coordinates, mesh::P4estMesh{2}, nodes::AbstractVector) @@ -75,104 +76,9 @@ function calc_node_coordinates!(node_coordinates, end -# A helper struct used in initialization methods below -mutable struct InitSurfacesIterFaceUserData{Interfaces, Mortars, Boundaries, Mesh} - interfaces ::Interfaces - interface_id::Int - mortars ::Mortars - mortar_id ::Int - boundaries ::Boundaries - boundary_id ::Int - mesh ::Mesh -end - -function InitSurfacesIterFaceUserData(interfaces, mortars, boundaries, mesh) - return InitSurfacesIterFaceUserData{ - typeof(interfaces), typeof(mortars), typeof(boundaries), typeof(mesh)}( - interfaces, 1, mortars, 1, boundaries, 1, mesh) -end - -function init_surfaces_iter_face(info, user_data) - # Unpack user_data - data = unsafe_pointer_to_objref(Ptr{InitSurfacesIterFaceUserData}(user_data)) - - # Function barrier because the unpacked user_data above is type-unstable - init_surfaces_iter_face_inner(info, data) -end - -# Function barrier for type stability -function init_surfaces_iter_face_inner(info, user_data) - @unpack interfaces, mortars, boundaries = user_data - - if info.sides.elem_count == 2 - # Two neighboring elements => Interface or mortar - - # Extract surface data - sides = (unsafe_load_sc(p4est_iter_face_side_t, info.sides, 1), - unsafe_load_sc(p4est_iter_face_side_t, info.sides, 2)) - - if sides[1].is_hanging == 0 && sides[2].is_hanging == 0 - # No hanging nodes => normal interface - if interfaces !== nothing - init_interfaces_iter_face_inner(info, sides, user_data) - end - else - # Hanging nodes => mortar - if mortars !== nothing - init_mortars_iter_face_inner(info, sides, user_data) - end - end - elseif info.sides.elem_count == 1 - # One neighboring elements => boundary - if boundaries !== nothing - init_boundaries_iter_face_inner(info, user_data) - end - end - - return nothing -end - -function init_surfaces!(interfaces, mortars, boundaries, mesh::P4estMesh) - # Let p4est iterate over all interfaces and call init_surfaces_iter_face - iter_face_c = @cfunction(init_surfaces_iter_face, - Cvoid, (Ptr{p4est_iter_face_info_t}, Ptr{Cvoid})) - user_data = InitSurfacesIterFaceUserData( - interfaces, mortars, boundaries, mesh) - - iterate_faces(mesh, iter_face_c, user_data) - - return interfaces -end - - -# Initialization of interfaces after the function barrier -function init_interfaces_iter_face_inner(info, sides, user_data) - @unpack interfaces, interface_id, mesh = user_data - user_data.interface_id += 1 - - # Global trees array, one-based indexing - trees = (unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), - unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) - # Quadrant numbering offsets of the quadrants at this interface - offsets = SVector(trees[1].quadrants_offset, - trees[2].quadrants_offset) - - local_quad_ids = SVector(sides[1].is.full.quadid, sides[2].is.full.quadid) - # Global IDs of the neighboring quads - quad_ids = offsets + local_quad_ids - - # Write data to interfaces container - # p4est uses zero-based indexing; convert to one-based indexing - interfaces.element_ids[1, interface_id] = quad_ids[1] + 1 - interfaces.element_ids[2, interface_id] = quad_ids[2] + 1 - - # Face at which the interface lies - faces = (sides[1].face, sides[2].face) - - # Relative orientation of the two cell faces, - # 0 for aligned coordinates, 1 for reversed coordinates. - orientation = info.orientation - +# Initialize node_indices of interface container +@inline function init_interface_node_indices!(interfaces::P4estInterfaceContainer{2}, + faces, orientation, interface_id) # Iterate over primary and secondary element for side in 1:2 # Align interface in positive coordinate direction of primary element. @@ -201,51 +107,13 @@ function init_interfaces_iter_face_inner(info, sides, user_data) end end - return nothing -end - -function init_interfaces!(interfaces, mesh::P4estMesh) - # Let p4est iterate over all interfaces and call init_surfaces_iter_face - iter_face_c = @cfunction(init_surfaces_iter_face, - Cvoid, (Ptr{p4est_iter_face_info_t}, Ptr{Cvoid})) - user_data = InitSurfacesIterFaceUserData( - interfaces, - nothing, # mortars - nothing, # boundaries - mesh) - - iterate_faces(mesh, iter_face_c, user_data) - return interfaces end -# Initialization of boundaries after the function barrier -function init_boundaries_iter_face_inner(info, user_data) - @unpack boundaries, boundary_id, mesh = user_data - user_data.boundary_id += 1 - - # Extract boundary data - side = unsafe_load_sc(p4est_iter_face_side_t, info.sides) - # Load tree from global trees array, one-based indexing - tree = unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, side.treeid + 1) - # Quadrant numbering offset of this quadrant - offset = tree.quadrants_offset - - # Verify before accessing is.full, but this should never happen - @assert side.is_hanging == false - - local_quad_id = side.is.full.quadid - # Global ID of this quad - quad_id = offset + local_quad_id - - # Write data to boundaries container - # p4est uses zero-based indexing; convert to one-based indexing - boundaries.element_ids[boundary_id] = quad_id + 1 - - # Face at which the boundary lies - face = side.face - +# Initialize node_indices of boundary container +@inline function init_boundary_node_indices!(boundaries::P4estBoundaryContainer{2}, + face, boundary_id) if face == 0 # Index face in negative x-direction boundaries.node_indices[boundary_id] = (:one, :i) @@ -260,83 +128,19 @@ function init_boundaries_iter_face_inner(info, user_data) boundaries.node_indices[boundary_id] = (:i, :end) end - # One-based indexing - boundaries.name[boundary_id] = mesh.boundary_names[face + 1, side.treeid + 1] - - return nothing -end - -function init_boundaries!(boundaries, mesh::P4estMesh{2}) - # Let p4est iterate over all interfaces and call init_surfaces_iter_face - iter_face_c = @cfunction(init_surfaces_iter_face, - Cvoid, (Ptr{p4est_iter_face_info_t}, Ptr{Cvoid})) - user_data = InitSurfacesIterFaceUserData( - nothing, # interfaces - nothing, # mortars - boundaries, - mesh) - - iterate_faces(mesh, iter_face_c, user_data) - return boundaries end -# Initialization of mortars after the function barrier -function init_mortars_iter_face_inner(info, sides, user_data) - @unpack mortars, mortar_id, mesh = user_data - user_data.mortar_id += 1 - - # Global trees array, one-based indexing - trees = (unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[1].treeid + 1), - unsafe_load_sc(p4est_tree_t, mesh.p4est.trees, sides[2].treeid + 1)) - # Quadrant numbering offsets of the quadrants at this interface - offsets = SVector(trees[1].quadrants_offset, - trees[2].quadrants_offset) - - if sides[1].is_hanging == true - # Left is small (1), right is large (2) - small_large = (1, 2) - - local_small_quad_ids = sides[1].is.hanging.quadid - # Global IDs of the two small quads - small_quad_ids = offsets[1] .+ local_small_quad_ids - - # Just be sure before accessing is.full - @assert sides[2].is_hanging == false - large_quad_id = offsets[2] + sides[2].is.full.quadid - else # sides[2].is_hanging == true - # Left is large (2), right is small (1) - small_large = (2, 1) - - local_small_quad_ids = sides[2].is.hanging.quadid - # Global IDs of the two small quads - small_quad_ids = offsets[2] .+ local_small_quad_ids - - # Just be sure before accessing is.full - @assert sides[1].is_hanging == false - large_quad_id = offsets[1] + sides[1].is.full.quadid - end - - # Write data to mortar container, 1 and 2 are the small elements - # p4est uses zero-based indexing; convert to one-based indexing - mortars.element_ids[1, mortar_id] = small_quad_ids[1] + 1 - mortars.element_ids[2, mortar_id] = small_quad_ids[2] + 1 - # 3 is the large element - mortars.element_ids[3, mortar_id] = large_quad_id + 1 - - # Face at which the interface lies - faces = [sides[1].face, sides[2].face] - - # Relative orientation of the two cell faces, - # 0 for aligned coordinates, 1 for reversed coordinates. - orientation = info.orientation - +# Initialize node_indices of mortar container +# faces[1] is expected to be the face of the small side. +@inline function init_mortar_node_indices!(mortars::P4estMortarContainer{2}, + faces, orientation, mortar_id) for side in 1:2 # Align mortar in positive coordinate direction of small side. # For orientation == 1, the large side needs to be indexed backwards # relative to the mortar. - if small_large[side] == 1 || orientation == 0 + if side == 1 || orientation == 0 # Forward indexing for small side or orientation == 0 i = :i else @@ -346,33 +150,18 @@ function init_mortars_iter_face_inner(info, sides, user_data) if faces[side] == 0 # Index face in negative x-direction - mortars.node_indices[small_large[side], mortar_id] = (:one, i) + mortars.node_indices[side, mortar_id] = (:one, i) elseif faces[side] == 1 # Index face in positive x-direction - mortars.node_indices[small_large[side], mortar_id] = (:end, i) + mortars.node_indices[side, mortar_id] = (:end, i) elseif faces[side] == 2 # Index face in negative y-direction - mortars.node_indices[small_large[side], mortar_id] = (i, :one) + mortars.node_indices[side, mortar_id] = (i, :one) else # faces[side] == 3 # Index face in positive y-direction - mortars.node_indices[small_large[side], mortar_id] = (i, :end) + mortars.node_indices[side, mortar_id] = (i, :end) end end - return nothing -end - -function init_mortars!(mortars, mesh::P4estMesh{2}) - # Let p4est iterate over all interfaces and call init_surfaces_iter_face - iter_face_c = @cfunction(init_surfaces_iter_face, - Cvoid, (Ptr{p4est_iter_face_info_t}, Ptr{Cvoid})) - user_data = InitSurfacesIterFaceUserData( - nothing, # interfaces - mortars, - nothing, # boundaries - mesh) - - iterate_faces(mesh, iter_face_c, user_data) - return mortars end diff --git a/src/solvers/dg_p4est/containers_3d.jl b/src/solvers/dg_p4est/containers_3d.jl new file mode 100644 index 0000000000..302821f4e7 --- /dev/null +++ b/src/solvers/dg_p4est/containers_3d.jl @@ -0,0 +1,307 @@ +# Initialize data structures in element container +function init_elements!(elements, mesh::P4estMesh{3}, basis::LobattoLegendreBasis) + @unpack node_coordinates, jacobian_matrix, + contravariant_vectors, inverse_jacobian = elements + + calc_node_coordinates!(node_coordinates, mesh, basis.nodes) + + for element in 1:ncells(mesh) + calc_jacobian_matrix!(jacobian_matrix, element, node_coordinates, basis) + + calc_contravariant_vectors!(contravariant_vectors, element, jacobian_matrix, + node_coordinates, basis) + + calc_inverse_jacobian!(inverse_jacobian, element, jacobian_matrix, basis) + end + + return nothing +end + + +# Interpolate tree_node_coordinates to each quadrant at the nodes of the specified basis +function calc_node_coordinates!(node_coordinates, + mesh::P4estMesh{3}, + basis::LobattoLegendreBasis) + # Hanging nodes will cause holes in the mesh if its polydeg is higher + # than the polydeg of the solver. + @assert length(basis.nodes) >= length(mesh.nodes) "The solver can't have a lower polydeg than the mesh" + + calc_node_coordinates!(node_coordinates, mesh, basis.nodes) +end + +# Interpolate tree_node_coordinates to each quadrant at the specified nodes +function calc_node_coordinates!(node_coordinates, + mesh::P4estMesh{3}, + nodes::AbstractVector) + # Macros from p4est + p4est_root_len = 1 << P4EST_MAXLEVEL + p4est_quadrant_len(l) = 1 << (P4EST_MAXLEVEL - l) + + trees = unsafe_wrap_sc(p8est_tree_t, mesh.p4est.trees) + + for tree in eachindex(trees) + offset = trees[tree].quadrants_offset + quadrants = unsafe_wrap_sc(p8est_quadrant_t, trees[tree].quadrants) + + for i in eachindex(quadrants) + element = offset + i + quad = quadrants[i] + + quad_length = p4est_quadrant_len(quad.level) / p4est_root_len + + nodes_out_x = 2 * (quad_length * 1/2 * (nodes .+ 1) .+ quad.x / p4est_root_len) .- 1 + nodes_out_y = 2 * (quad_length * 1/2 * (nodes .+ 1) .+ quad.y / p4est_root_len) .- 1 + nodes_out_z = 2 * (quad_length * 1/2 * (nodes .+ 1) .+ quad.z / p4est_root_len) .- 1 + + matrix1 = polynomial_interpolation_matrix(mesh.nodes, nodes_out_x) + matrix2 = polynomial_interpolation_matrix(mesh.nodes, nodes_out_y) + matrix3 = polynomial_interpolation_matrix(mesh.nodes, nodes_out_z) + + multiply_dimensionwise!( + view(node_coordinates, :, :, :, :, element), + matrix1, matrix2, matrix3, + view(mesh.tree_node_coordinates, :, :, :, :, tree) + ) + end + end + + return node_coordinates +end + + +# Initialize node_indices of interface container +@inline function init_interface_node_indices!(interfaces::P4estInterfaceContainer{3}, + faces, orientation, interface_id) + # Iterate over primary and secondary element + for side in 1:2 + # Align interface at the primary element (primary element has surface indices (:i, :j)). + # The secondary element needs to be indexed differently. + if side == 1 + surface_index1 = :i + surface_index2 = :j + else + surface_index1, surface_index2 = orientation_to_indices_p4est(faces[2], faces[1], orientation) + end + + if faces[side] == 0 + # Index face in negative x-direction + interfaces.node_indices[side, interface_id] = (:one, surface_index1, surface_index2) + elseif faces[side] == 1 + # Index face in positive x-direction + interfaces.node_indices[side, interface_id] = (:end, surface_index1, surface_index2) + elseif faces[side] == 2 + # Index face in negative y-direction + interfaces.node_indices[side, interface_id] = (surface_index1, :one, surface_index2) + elseif faces[side] == 3 + # Index face in positive y-direction + interfaces.node_indices[side, interface_id] = (surface_index1, :end, surface_index2) + elseif faces[side] == 4 + # Index face in negative z-direction + interfaces.node_indices[side, interface_id] = (surface_index1, surface_index2, :one) + else # faces[side] == 5 + # Index face in positive z-direction + interfaces.node_indices[side, interface_id] = (surface_index1, surface_index2, :end) + end + end + + return interfaces +end + + +# Initialize node_indices of boundary container +@inline function init_boundary_node_indices!(boundaries::P4estBoundaryContainer{3}, + face, boundary_id) + if face == 0 + # Index face in negative x-direction + boundaries.node_indices[boundary_id] = (:one, :i, :j) + elseif face == 1 + # Index face in positive x-direction + boundaries.node_indices[boundary_id] = (:end, :i, :j) + elseif face == 2 + # Index face in negative y-direction + boundaries.node_indices[boundary_id] = (:i, :one, :j) + elseif face == 3 + # Index face in positive y-direction + boundaries.node_indices[boundary_id] = (:i, :end, :j) + elseif face == 4 + # Index face in negative z-direction + boundaries.node_indices[boundary_id] = (:i, :j, :one) + else # face == 5 + # Index face in positive z-direction + boundaries.node_indices[boundary_id] = (:i, :j, :end) + end + + return boundaries +end + + +# Initialize node_indices of mortar container +# faces[1] is expected to be the face of the small side. +@inline function init_mortar_node_indices!(mortars::P4estMortarContainer{3}, + faces, orientation, mortar_id) + for side in 1:2 + # Align mortar at small side. + # The large side needs to be indexed differently. + if side == 1 + surface_index1 = :i + surface_index2 = :j + else + surface_index1, surface_index2 = orientation_to_indices_p4est(faces[2], faces[1], orientation) + end + + if faces[side] == 0 + # Index face in negative x-direction + mortars.node_indices[side, mortar_id] = (:one, surface_index1, surface_index2) + elseif faces[side] == 1 + # Index face in positive x-direction + mortars.node_indices[side, mortar_id] = (:end, surface_index1, surface_index2) + elseif faces[side] == 2 + # Index face in negative y-direction + mortars.node_indices[side, mortar_id] = (surface_index1, :one, surface_index2) + elseif faces[side] == 3 + # Index face in positive y-direction + mortars.node_indices[side, mortar_id] = (surface_index1, :end, surface_index2) + elseif faces[side] == 4 + # Index face in negative z-direction + mortars.node_indices[side, mortar_id] = (surface_index1, surface_index2, :one) + else # faces[side] == 5 + # Index face in positive z-direction + mortars.node_indices[side, mortar_id] = (surface_index1, surface_index2, :end) + end + end + + return mortars +end + + +# Convert p4est orientation code to node indices. +# Return node indices that index "my side" wrt "other side", +# i.e., i and j are indices of other side. +function orientation_to_indices_p4est(my_face, other_face, orientation_code) + # my_face and other_face are the face directions (zero-based) + # of "my side" and "other side" respectively. + # Face corner 0 of the face with the lower face direction connects to a corner of the other face. + # The number of this corner is the orientation code in p4est. + lower = my_face <= other_face + + # x_pos, y_neg, and z_pos are the directions in which the face has right-handed coordinates + # when looked at from the outside. + my_right_handed = my_face in (1, 2, 5) + other_right_handed = other_face in (1, 2, 5) + + # If both or none are right-handed when looked at from the outside, they will have different + # orientations when looked at from the same side of the interface. + flipped = my_right_handed == other_right_handed + + # In the folowing illustrations, p4est's face corner numbering is shown. + # ξ and η are the local coordinates of the respective face. + # We're looking at both faces from the same side of the interface, so that "other side" + # (in the illustrations on the left) has right-handed coordinates. + if !flipped + if orientation_code == 0 + # Corner 0 of other side matches corner 0 of my side + # 2┌──────┐3 2┌──────┐3 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 0└──────┘1 + # η η + # ↑ ↑ + # │ │ + # └───> ξ └───> ξ + surface_index1 = :i + surface_index2 = :j + elseif ((lower && orientation_code == 2) # Corner 0 of my side matches corner 2 of other side + || (!lower && orientation_code == 1)) # Corner 0 of other side matches corner 1 of my side + # 2┌──────┐3 0┌──────┐2 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 1└──────┘3 + # η ┌───> η + # ↑ │ + # │ ↓ + # └───> ξ ξ + surface_index1 = :j_backwards + surface_index2 = :i + elseif ((lower && orientation_code == 1) # Corner 0 of my side matches corner 1 of other side + || (!lower && orientation_code == 2)) # Corner 0 of other side matches corner 2 of my side + # 2┌──────┐3 3┌──────┐1 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 2└──────┘0 + # η ξ + # ↑ ↑ + # │ │ + # └───> ξ η <───┘ + surface_index1 = :j + surface_index2 = :i_backwards + else # orientation_code == 3 + # Corner 0 of my side matches corner 3 of other side and + # corner 0 of other side matches corner 3 of my side. + # 2┌──────┐3 1┌──────┐0 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 3└──────┘2 + # η ξ <───┐ + # ↑ │ + # │ ↓ + # └───> ξ η + surface_index1 = :i_backwards + surface_index2 = :j_backwards + end + else # flipped + if orientation_code == 0 + # Corner 0 of other side matches corner 0 of my side + # 2┌──────┐3 1┌──────┐3 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 0└──────┘2 + # η ξ + # ↑ ↑ + # │ │ + # └───> ξ └───> η + surface_index1 = :j + surface_index2 = :i + elseif orientation_code == 2 + # Corner 0 of my side matches corner 2 of other side and + # corner 0 of other side matches corner 2 of my side. + # 2┌──────┐3 0┌──────┐1 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 2└──────┘3 + # η ┌───> ξ + # ↑ │ + # │ ↓ + # └───> ξ η + surface_index1 = :i + surface_index2 = :j_backwards + elseif orientation_code == 1 + # Corner 0 of my side matches corner 1 of other side and + # corner 0 of other side matches corner 1 of my side. + # 2┌──────┐3 3┌──────┐2 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 1└──────┘0 + # η η + # ↑ ↑ + # │ │ + # └───> ξ ξ <───┘ + surface_index1 = :i_backwards + surface_index2 = :j + else # orientation_code == 3 + # Corner 0 of my side matches corner 3 of other side and + # corner 0 of other side matches corner 3 of my side. + # 2┌──────┐3 2┌──────┐0 + # │ │ │ │ + # │ │ │ │ + # 0└──────┘1 3└──────┘1 + # η η <───┐ + # ↑ │ + # │ ↓ + # └───> ξ ξ + surface_index1 = :j_backwards + surface_index2 = :i_backwards + end + end + + return surface_index1, surface_index2 +end diff --git a/src/solvers/dg_p4est/dg.jl b/src/solvers/dg_p4est/dg.jl index 2bb43caf24..063aafcd7b 100644 --- a/src/solvers/dg_p4est/dg.jl +++ b/src/solvers/dg_p4est/dg.jl @@ -4,10 +4,7 @@ function create_cache(mesh::P4estMesh, equations::AbstractEquations, dg::DG, ::Any, ::Type{uEltype}) where {uEltype<:Real} # Make sure to balance the p4est before creating any containers # in case someone has tampered with the p4est after creating the mesh - p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, C_NULL) - # Due to a bug in p4est, the forest needs to be rebalanced twice sometimes - # See https://github.com/cburstedde/p4est/issues/112 - p4est_balance(mesh.p4est, P4EST_CONNECT_FACE, C_NULL) + balance!(mesh) elements = init_elements(mesh, equations, dg.basis, uEltype) interfaces = init_interfaces(mesh, equations, dg.basis, elements) @@ -28,15 +25,10 @@ end @inline function get_normal_vector(direction, cache, indices...) @unpack contravariant_vectors, inverse_jacobian = cache.elements - # If the mapping is orientation-reversing, the contravariant vectors' orientation - # is reversed as well - sign_jacobian = sign(inverse_jacobian[indices...]) - orientation = div(direction + 1, 2) - normal = sign_jacobian * get_contravariant_vector(orientation, contravariant_vectors, indices...) + normal = get_contravariant_vector(orientation, contravariant_vectors, indices...) # Contravariant vectors at interfaces in negative coordinate direction are pointing inwards - # (after normalizing with sign(J) above) if direction in (1, 3, 5) normal *= -1 end @@ -50,3 +42,4 @@ end include("containers.jl") include("dg_2d.jl") +include("dg_3d.jl") diff --git a/src/solvers/dg_p4est/dg_2d.jl b/src/solvers/dg_p4est/dg_2d.jl index d4164f549e..4c0e3846ed 100644 --- a/src/solvers/dg_p4est/dg_2d.jl +++ b/src/solvers/dg_p4est/dg_2d.jl @@ -29,14 +29,16 @@ function prolong2interfaces!(cache, u, # Use Tuple `node_indices` and `evaluate_index` to copy values # from the correct face and in the correct orientation - for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, evaluate_index(primary_indices, size_, 1, i), - evaluate_index(primary_indices, size_, 2, i), - primary_element] - - interfaces.u[2, v, i, interface] = u[v, evaluate_index(secondary_indices, size_, 1, i), - evaluate_index(secondary_indices, size_, 2, i), - secondary_element] + for i in eachnode(dg) + for v in eachvariable(equations) + interfaces.u[1, v, i, interface] = u[v, evaluate_index(primary_indices, size_, 1, i), + evaluate_index(primary_indices, size_, 2, i), + primary_element] + + interfaces.u[2, v, i, interface] = u[v, evaluate_index(secondary_indices, size_, 1, i), + evaluate_index(secondary_indices, size_, 2, i), + secondary_element] + end end end @@ -47,8 +49,7 @@ end function calc_interface_flux!(surface_flux_values, mesh::P4estMesh{2}, nonconservative_terms::Val{false}, - equations, surface_integral, - dg::DG, cache) + equations, surface_integral, dg::DG, cache) @unpack surface_flux = surface_integral @unpack u, element_ids, node_indices = cache.interfaces @@ -80,11 +81,11 @@ function calc_interface_flux!(surface_flux_values, # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux # to left and right element storage in the correct orientation for v in eachvariable(equations) - surface_index = evaluate_index_surface(primary_indices, size_, 1, i) - surface_flux_values[v, surface_index, primary_direction, primary_element] = flux_[v] + surf_i = evaluate_index_surface(primary_indices, size_, 1, i) + surface_flux_values[v, surf_i, primary_direction, primary_element] = flux_[v] - surface_index = evaluate_index_surface(secondary_indices, size_, 1, i) - surface_flux_values[v, surface_index, secondary_direction, secondary_element] = -flux_[v] + surf_i = evaluate_index_surface(secondary_indices, size_, 1, i) + surface_flux_values[v, surf_i, secondary_direction, secondary_element] = -flux_[v] end end end @@ -94,8 +95,8 @@ end function prolong2boundaries!(cache, u, - mesh::P4estMesh{2}, equations, - surface_integral, dg::DG) + mesh::P4estMesh{2}, + equations, surface_integral, dg::DG) @unpack boundaries = cache size_ = (nnodes(dg), nnodes(dg)) @@ -106,10 +107,12 @@ function prolong2boundaries!(cache, u, # Use Tuple `node_indices` and `evaluate_index` to copy values # from the correct face and in the correct orientation - for i in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, i, boundary] = u[v, evaluate_index(node_indices, size_, 1, i), - evaluate_index(node_indices, size_, 2, i), - element] + for i in eachnode(dg) + for v in eachvariable(equations) + boundaries.u[v, i, boundary] = u[v, evaluate_index(node_indices, size_, 1, i), + evaluate_index(node_indices, size_, 2, i), + element] + end end end @@ -118,8 +121,8 @@ end function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, - mesh::P4estMesh, equations, - surface_integral, dg::DG) + mesh::P4estMesh{2}, + equations, surface_integral, dg::DG) @unpack boundaries = cache @unpack surface_flux_values, node_coordinates = cache.elements @unpack surface_flux = surface_integral @@ -154,8 +157,8 @@ function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux # to left and right element storage in the correct orientation for v in eachvariable(equations) - surface_index = evaluate_index_surface(node_indices, size_, 1, i) - surface_flux_values[v, surface_index, direction, element] = flux_[v] + surf_i = evaluate_index_surface(node_indices, size_, 1, i) + surface_flux_values[v, surf_i, direction, element] = flux_[v] end end end @@ -175,12 +178,16 @@ function prolong2mortars!(cache, u, large_indices = node_indices[2, mortar] # Copy solution small to small - for pos in 1:2, i in eachnode(dg), v in eachvariable(equations) - # Use Tuple `node_indices` and `evaluate_index` to copy values - # from the correct face and in the correct orientation - cache.mortars.u[1, v, pos, i, mortar] = u[v, evaluate_index(small_indices, size_, 1, i), - evaluate_index(small_indices, size_, 2, i), - element_ids[pos, mortar]] + for pos in 1:2 + for i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index` to copy values + # from the correct face and in the correct orientation + cache.mortars.u[1, v, pos, i, mortar] = u[v, evaluate_index(small_indices, size_, 1, i), + evaluate_index(small_indices, size_, 2, i), + element_ids[pos, mortar]] + end + end end # Buffer to copy solution values of the large element in the correct orientation @@ -188,12 +195,14 @@ function prolong2mortars!(cache, u, u_buffer = cache.u_threaded[Threads.threadid()] # Copy solution of large element face to buffer in the correct orientation - for i in eachnode(dg), v in eachvariable(equations) - # Use Tuple `node_indices` and `evaluate_index` to copy values - # from the correct face and in the correct orientation - u_buffer[v, i] = u[v, evaluate_index(large_indices, size_, 1, i), - evaluate_index(large_indices, size_, 2, i), - element_ids[3, mortar]] + for i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index` to copy values + # from the correct face and in the correct orientation + u_buffer[v, i] = u[v, evaluate_index(large_indices, size_, 1, i), + evaluate_index(large_indices, size_, 2, i), + element_ids[3, mortar]] + end end # Interpolate large element face data from buffer to small face locations @@ -230,18 +239,20 @@ function calc_mortar_flux!(surface_flux_values, # Use Tuple `node_indices` and `evaluate_index` to access node indices # at the correct face and in the correct orientation to get normal vectors - for pos in 1:2, i in eachnode(dg) - u_ll, u_rr = get_surface_node_vars(u, equations, dg, pos, i, mortar) + for pos in 1:2 + for i in eachnode(dg) + u_ll, u_rr = get_surface_node_vars(u, equations, dg, pos, i, mortar) - normal_vector = get_normal_vector(small_direction, cache, - evaluate_index(small_indices, size_, 1, i), - evaluate_index(small_indices, size_, 2, i), - element_ids[pos, mortar]) + normal_vector = get_normal_vector(small_direction, cache, + evaluate_index(small_indices, size_, 1, i), + evaluate_index(small_indices, size_, 2, i), + element_ids[pos, mortar]) - flux_ = surface_flux(u_ll, u_rr, normal_vector, equations) + flux_ = surface_flux(u_ll, u_rr, normal_vector, equations) - # Copy flux to buffer - set_node_vars!(fstar[pos], flux_, equations, dg, i) + # Copy flux to buffer + set_node_vars!(fstar[pos], flux_, equations, dg, i) + end end # Buffer to interpolate flux values of the large element to before copying @@ -272,13 +283,17 @@ end size_ = (nnodes(dg), nnodes(dg)) # Copy solution small to small - for pos in 1:2, i in eachnode(dg), v in eachvariable(equations) - # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux - # to left and right element storage in the correct orientation - surface_index = evaluate_index_surface(small_indices, size_, 1, i) - surface_flux_values[v, surface_index, - small_direction, - element_ids[pos, mortar]] = fstar[pos][v, i] + for pos in 1:2 + for i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux + # to left and right element storage in the correct orientation + surface_index = evaluate_index_surface(small_indices, size_, 1, i) + surface_flux_values[v, surface_index, + small_direction, + element_ids[pos, mortar]] = fstar[pos][v, i] + end + end end large_element = element_ids[3, mortar] @@ -299,19 +314,23 @@ end u_buffer .*= -2 # Copy interpolated flux values from buffer to large element face in the correct orientation - for i in eachnode(dg), v in eachvariable(equations) - # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux - # to surface flux storage in the correct orientation - surface_index = evaluate_index_surface(large_indices, size_, 1, i) - surface_flux_values[v, surface_index, large_direction, large_element] = u_buffer[v, i] + for i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux + # to surface flux storage in the correct orientation + surface_index = evaluate_index_surface(large_indices, size_, 1, i) + surface_flux_values[v, surface_index, large_direction, large_element] = u_buffer[v, i] + end end return nothing end -function calc_surface_integral!(du, u, mesh::P4estMesh, - equations, surface_integral::SurfaceIntegralWeakForm, +function calc_surface_integral!(du, u, + mesh::P4estMesh{2}, + equations, + surface_integral::SurfaceIntegralWeakForm, dg::DGSEM, cache) @unpack boundary_interpolation = dg.basis @unpack surface_flux_values = cache.elements diff --git a/src/solvers/dg_p4est/dg_3d.jl b/src/solvers/dg_p4est/dg_3d.jl new file mode 100644 index 0000000000..395a2f0e51 --- /dev/null +++ b/src/solvers/dg_p4est/dg_3d.jl @@ -0,0 +1,413 @@ +# The methods below are specialized on the mortar type +# and called from the basic `create_cache` method at the top. +function create_cache(mesh::P4estMesh{3}, equations, mortar_l2::LobattoLegendreMortarL2, uEltype) + # TODO: Taal compare performance of different types + fstar_threaded = [Array{uEltype, 4}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2), 4) + for _ in 1:Threads.nthreads()] + + fstar_tmp_threaded = [Array{uEltype, 3}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2)) + for _ in 1:Threads.nthreads()] + u_threaded = [Array{uEltype, 3}(undef, nvariables(equations), nnodes(mortar_l2), nnodes(mortar_l2)) + for _ in 1:Threads.nthreads()] + + (; fstar_threaded, fstar_tmp_threaded, u_threaded) +end + + +function prolong2interfaces!(cache, u, + mesh::P4estMesh{3}, + equations, surface_integral, dg::DG) + @unpack interfaces = cache + + size_ = (nnodes(dg), nnodes(dg), nnodes(dg)) + + @threaded for interface in eachinterface(dg, cache) + primary_element = interfaces.element_ids[1, interface] + secondary_element = interfaces.element_ids[2, interface] + + primary_indices = interfaces.node_indices[1, interface] + secondary_indices = interfaces.node_indices[2, interface] + + # Use Tuple `node_indices` and `evaluate_index` to copy values + # from the correct face and in the correct orientation + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + interfaces.u[1, v, i, j, interface] = u[v, evaluate_index(primary_indices, size_, 1, i, j), + evaluate_index(primary_indices, size_, 2, i, j), + evaluate_index(primary_indices, size_, 3, i, j), + primary_element] + + interfaces.u[2, v, i, j, interface] = u[v, evaluate_index(secondary_indices, size_, 1, i, j), + evaluate_index(secondary_indices, size_, 2, i, j), + evaluate_index(secondary_indices, size_, 3, i, j), + secondary_element] + end + end + end + + return nothing +end + + +function calc_interface_flux!(surface_flux_values, + mesh::P4estMesh{3}, + nonconservative_terms::Val{false}, + equations, surface_integral, dg::DG, cache) + @unpack surface_flux = surface_integral + @unpack u, element_ids, node_indices = cache.interfaces + + size_ = (nnodes(dg), nnodes(dg), nnodes(dg)) + + @threaded for interface in eachinterface(dg, cache) + # Get neighboring elements + primary_element = element_ids[1, interface] + secondary_element = element_ids[2, interface] + + primary_indices = node_indices[1, interface] + secondary_indices = node_indices[2, interface] + + primary_direction = indices2direction(primary_indices) + secondary_direction = indices2direction(secondary_indices) + + # Use Tuple `node_indices` and `evaluate_index` to access node indices + # at the correct face and in the correct orientation to get normal vectors + for j in eachnode(dg), i in eachnode(dg) + u_ll, u_rr = get_surface_node_vars(u, equations, dg, i, j, interface) + + normal_vector = get_normal_vector(primary_direction, cache, + evaluate_index(primary_indices, size_, 1, i, j), + evaluate_index(primary_indices, size_, 2, i, j), + evaluate_index(primary_indices, size_, 3, i, j), + primary_element) + + flux_ = surface_flux(u_ll, u_rr, normal_vector, equations) + + # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux + # to left and right element storage in the correct orientation + for v in eachvariable(equations) + surf_i = evaluate_index_surface(primary_indices, size_, 1, i, j) + surf_j = evaluate_index_surface(primary_indices, size_, 2, i, j) + surface_flux_values[v, surf_i, surf_j, primary_direction, primary_element] = flux_[v] + + surf_i = evaluate_index_surface(secondary_indices, size_, 1, i, j) + surf_j = evaluate_index_surface(secondary_indices, size_, 2, i, j) + surface_flux_values[v, surf_i, surf_j, secondary_direction, secondary_element] = -flux_[v] + end + end + end + + return nothing +end + + +function prolong2boundaries!(cache, u, + mesh::P4estMesh{3}, + equations, surface_integral, dg::DG) + @unpack boundaries = cache + + size_ = (nnodes(dg), nnodes(dg), nnodes(dg)) + + @threaded for boundary in eachboundary(dg, cache) + element = boundaries.element_ids[boundary] + node_indices = boundaries.node_indices[boundary] + + # Use Tuple `node_indices` and `evaluate_index` to copy values + # from the correct face and in the correct orientation + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + boundaries.u[v, i, j, boundary] = u[v, evaluate_index(node_indices, size_, 1, i, j), + evaluate_index(node_indices, size_, 2, i, j), + evaluate_index(node_indices, size_, 3, i, j), + element] + end + end + end + + return nothing +end + + +function calc_boundary_flux!(cache, t, boundary_condition, boundary_indexing, + mesh::P4estMesh{3}, + equations, surface_integral, dg::DG) + @unpack boundaries = cache + @unpack surface_flux_values, node_coordinates = cache.elements + @unpack surface_flux = surface_integral + + size_ = (nnodes(dg), nnodes(dg), nnodes(dg)) + + @threaded for local_index in eachindex(boundary_indexing) + # Use the local index to get the global boundary index from the pre-sorted list + boundary = boundary_indexing[local_index] + + element = boundaries.element_ids[boundary] + node_indices = boundaries.node_indices[boundary] + direction = indices2direction(node_indices) + + # Use Tuple `node_indices` and `evaluate_index` to access node indices + # at the correct face and in the correct orientation to get normal vectors + for j in eachnode(dg), i in eachnode(dg) + node_i = evaluate_index(node_indices, size_, 1, i, j) + node_j = evaluate_index(node_indices, size_, 2, i, j) + node_k = evaluate_index(node_indices, size_, 3, i, j) + + # Extract solution data from boundary container + u_inner = get_node_vars(boundaries.u, equations, dg, i, j, boundary) + + # Outward-pointing normal vector + normal_vector = get_normal_vector(direction, cache, node_i, node_j, node_k, element) + + # Coordinates at boundary node + x = get_node_coords(node_coordinates, equations, dg, node_i, node_j, node_k, element) + + flux_ = boundary_condition(u_inner, normal_vector, x, t, surface_flux, equations) + + # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux + # to left and right element storage in the correct orientation + for v in eachvariable(equations) + surf_i = evaluate_index_surface(node_indices, size_, 1, i, j) + surf_j = evaluate_index_surface(node_indices, size_, 2, i, j) + surface_flux_values[v, surf_i, surf_j, direction, element] = flux_[v] + end + end + end +end + + +function prolong2mortars!(cache, u, + mesh::P4estMesh{3}, equations, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DGSEM) + # temporary buffer for projections + @unpack fstar_tmp_threaded = cache + @unpack element_ids, node_indices = cache.mortars + + size_ = (nnodes(dg), nnodes(dg), nnodes(dg)) + + @threaded for mortar in eachmortar(dg, cache) + fstar_tmp = fstar_tmp_threaded[Threads.threadid()] + + small_indices = node_indices[1, mortar] + large_indices = node_indices[2, mortar] + + # Copy solution small to small + for pos in 1:4 + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index` to copy values + # from the correct face and in the correct orientation + cache.mortars.u[1, v, pos, i, j, mortar] = u[v, evaluate_index(small_indices, size_, 1, i, j), + evaluate_index(small_indices, size_, 2, i, j), + evaluate_index(small_indices, size_, 3, i, j), + element_ids[pos, mortar]] + end + end + end + + # Buffer to copy solution values of the large element in the correct orientation + # before interpolating + u_buffer = cache.u_threaded[Threads.threadid()] + + # Copy solution of large element face to buffer in the correct orientation + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index` to copy values + # from the correct face and in the correct orientation + u_buffer[v, i, j] = u[v, evaluate_index(large_indices, size_, 1, i, j), + evaluate_index(large_indices, size_, 2, i, j), + evaluate_index(large_indices, size_, 3, i, j), + element_ids[5, mortar]] + end + end + + # Interpolate large element face data from buffer to small face locations + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 1, :, :, mortar), + mortar_l2.forward_lower, + mortar_l2.forward_lower, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 2, :, :, mortar), + mortar_l2.forward_upper, + mortar_l2.forward_lower, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 3, :, :, mortar), + mortar_l2.forward_lower, + mortar_l2.forward_upper, + u_buffer, + fstar_tmp) + multiply_dimensionwise!(view(cache.mortars.u, 2, :, 4, :, :, mortar), + mortar_l2.forward_upper, + mortar_l2.forward_upper, + u_buffer, + fstar_tmp) + end + + return nothing +end + + +function calc_mortar_flux!(surface_flux_values, + mesh::P4estMesh{3}, + nonconservative_terms::Val{false}, equations, + mortar_l2::LobattoLegendreMortarL2, + surface_integral, dg::DG, cache) + @unpack u, element_ids, node_indices = cache.mortars + @unpack fstar_threaded, fstar_tmp_threaded = cache + @unpack surface_flux = surface_integral + + size_ = (nnodes(dg), nnodes(dg), nnodes(dg)) + + @threaded for mortar in eachmortar(dg, cache) + # Choose thread-specific pre-allocated container + fstar = fstar_threaded[Threads.threadid()] + fstar_tmp = fstar_tmp_threaded[Threads.threadid()] + + small_indices = node_indices[1, mortar] + small_direction = indices2direction(small_indices) + + # Use Tuple `node_indices` and `evaluate_index` to access node indices + # at the correct face and in the correct orientation to get normal vectors + for pos in 1:4 + for j in eachnode(dg), i in eachnode(dg) + u_ll, u_rr = get_surface_node_vars(u, equations, dg, pos, i, j, mortar) + + normal_vector = get_normal_vector(small_direction, cache, + evaluate_index(small_indices, size_, 1, i, j), + evaluate_index(small_indices, size_, 2, i, j), + evaluate_index(small_indices, size_, 3, i, j), + element_ids[pos, mortar]) + + flux_ = surface_flux(u_ll, u_rr, normal_vector, equations) + + # Copy flux to buffer + set_node_vars!(fstar, flux_, equations, dg, i, j, pos) + end + end + + # Buffer to interpolate flux values of the large element to before copying + # in the correct orientation + u_buffer = cache.u_threaded[Threads.threadid()] + + mortar_fluxes_to_elements!(surface_flux_values, + mesh, equations, mortar_l2, dg, cache, + mortar, fstar, u_buffer, fstar_tmp) + end + + return nothing +end + + +@inline function mortar_fluxes_to_elements!(surface_flux_values, + mesh::P4estMesh{3}, equations, + mortar_l2::LobattoLegendreMortarL2, + dg::DGSEM, cache, mortar, fstar, u_buffer, fstar_tmp) + @unpack element_ids, node_indices = cache.mortars + + small_indices = node_indices[1, mortar] + large_indices = node_indices[2, mortar] + + small_direction = indices2direction(small_indices) + large_direction = indices2direction(large_indices) + + size_ = (nnodes(dg), nnodes(dg), nnodes(dg)) + + # Copy solution small to small + for pos in 1:4 + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux + # to left and right element storage in the correct orientation + surface_index1 = evaluate_index_surface(small_indices, size_, 1, i, j) + surface_index2 = evaluate_index_surface(small_indices, size_, 2, i, j) + surface_flux_values[v, surface_index1, surface_index2, small_direction, + element_ids[pos, mortar]] = fstar[v, i, j, pos] + end + end + end + + large_element = element_ids[5, mortar] + + # Project small fluxes to large element. + multiply_dimensionwise!( + u_buffer, + mortar_l2.reverse_lower, mortar_l2.reverse_lower, + view(fstar, .., 1), + fstar_tmp) + add_multiply_dimensionwise!( + u_buffer, + mortar_l2.reverse_upper, mortar_l2.reverse_lower, + view(fstar, .., 2), + fstar_tmp) + add_multiply_dimensionwise!( + u_buffer, + mortar_l2.reverse_lower, mortar_l2.reverse_upper, + view(fstar, .., 3), + fstar_tmp) + add_multiply_dimensionwise!( + u_buffer, + mortar_l2.reverse_upper, mortar_l2.reverse_upper, + view(fstar, .., 4), + fstar_tmp) + + # The flux is calculated in the outward direction of the small elements, + # so the sign must be switched to get the flux in outward direction + # of the large element. + # The contravariant vectors of the large element (and therefore the normal vectors + # of the large element as well) are four times as large as the contravariant vectors + # of the small elements. Therefore, the flux need to be scaled by a factor of 4 + # to obtain the flux of the large element. + u_buffer .*= -4 + + # Copy interpolated flux values from buffer to large element face in the correct orientation + for j in eachnode(dg), i in eachnode(dg) + for v in eachvariable(equations) + # Use Tuple `node_indices` and `evaluate_index_surface` to copy flux + # to surface flux storage in the correct orientation + surface_index1 = evaluate_index_surface(large_indices, size_, 1, i, j) + surface_index2 = evaluate_index_surface(large_indices, size_, 2, i, j) + surface_flux_values[v, surface_index1, surface_index2, + large_direction, large_element] = u_buffer[v, i, j] + end + end + + return nothing +end + + +function calc_surface_integral!(du, u, + mesh::P4estMesh{3}, + equations, + surface_integral::SurfaceIntegralWeakForm, + dg::DGSEM, cache) + @unpack boundary_interpolation = dg.basis + @unpack surface_flux_values = cache.elements + + # Note that all fluxes have been computed with outward-pointing normal vectors + @threaded for element in eachelement(dg, cache) + for m in eachnode(dg), l in eachnode(dg) + for v in eachvariable(equations) + # surface at -x + du[v, 1, l, m, element] += (surface_flux_values[v, l, m, 1, element] + * boundary_interpolation[1, 1]) + # surface at +x + du[v, nnodes(dg), l, m, element] += (surface_flux_values[v, l, m, 2, element] + * boundary_interpolation[nnodes(dg), 2]) + # surface at -y + du[v, l, 1, m, element] += (surface_flux_values[v, l, m, 3, element] + * boundary_interpolation[1, 1]) + # surface at +y + du[v, l, nnodes(dg), m, element] += (surface_flux_values[v, l, m, 4, element] + * boundary_interpolation[nnodes(dg), 2]) + # surface at -z + du[v, l, m, 1, element] += (surface_flux_values[v, l, m, 5, element] + * boundary_interpolation[1, 1]) + # surface at +z + du[v, l, m, nnodes(dg), element] += (surface_flux_values[v, l, m, 6, element] + * boundary_interpolation[nnodes(dg), 2]) + end + end + end + + return nothing +end diff --git a/src/solvers/dg_tree/containers.jl b/src/solvers/dg_tree/containers.jl new file mode 100644 index 0000000000..75a6184f94 --- /dev/null +++ b/src/solvers/dg_tree/containers.jl @@ -0,0 +1,45 @@ + +# Dimension independent code related to containers of the DG solver +# with the mesh type TreeMesh + +function reinitialize_containers!(mesh::TreeMesh, equations, dg::DGSEM, cache) + # Get new list of leaf cells + leaf_cell_ids = local_leaf_cells(mesh.tree) + + # re-initialize elements container + @unpack elements = cache + resize!(elements, length(leaf_cell_ids)) + init_elements!(elements, leaf_cell_ids, mesh, dg.basis) + + # re-initialize interfaces container + @unpack interfaces = cache + resize!(interfaces, count_required_interfaces(mesh, leaf_cell_ids)) + init_interfaces!(interfaces, elements, mesh) + + # re-initialize boundaries container + @unpack boundaries = cache + resize!(boundaries, count_required_boundaries(mesh, leaf_cell_ids)) + init_boundaries!(boundaries, elements, mesh) + + # re-initialize mortars container + @unpack mortars = cache + resize!(mortars, count_required_mortars(mesh, leaf_cell_ids)) + init_mortars!(mortars, elements, mesh) + + if mpi_isparallel() + # re-initialize mpi_interfaces container + @unpack mpi_interfaces = cache + resize!(mpi_interfaces, count_required_mpi_interfaces(mesh, leaf_cell_ids)) + init_mpi_interfaces!(mpi_interfaces, elements, mesh) + + # re-initialize mpi cache + @unpack mpi_cache = cache + init_mpi_cache!(mpi_cache, mesh, elements, mpi_interfaces, nvariables(equations), nnodes(dg)) + end +end + + +# Dimension-specific implementations +include("containers_1d.jl") +include("containers_2d.jl") +include("containers_3d.jl") diff --git a/src/solvers/dg_tree/dg.jl b/src/solvers/dg_tree/dg.jl index 6675acf99a..acba62a698 100644 --- a/src/solvers/dg_tree/dg.jl +++ b/src/solvers/dg_tree/dg.jl @@ -429,16 +429,16 @@ include("indicators_1d.jl") include("indicators_2d.jl") include("indicators_3d.jl") +# Container data structures +include("containers.jl") + # 1D DG implementation -include("containers_1d.jl") include("dg_1d.jl") # 2D DG implementation -include("containers_2d.jl") include("dg_2d.jl") include("dg_2d_parallel.jl") # 3D DG implementation -include("containers_3d.jl") include("dg_3d.jl") diff --git a/src/solvers/dg_tree/dg_3d.jl b/src/solvers/dg_tree/dg_3d.jl index aca192c07e..39a3d51485 100644 --- a/src/solvers/dg_tree/dg_3d.jl +++ b/src/solvers/dg_tree/dg_3d.jl @@ -143,7 +143,7 @@ end # TODO: Taal discuss/refactor timer, allowing users to pass a custom timer? function rhs!(du, u, t, - mesh::TreeMesh{3}, equations, + mesh::Union{TreeMesh{3}, P4estMesh{3}}, equations, initial_condition, boundary_conditions, source_terms, dg::DG, cache) # Reset du diff --git a/test/test_examples_3d_curved.jl b/test/test_examples_3d_curved.jl index 3bda61ce8e..c6304380f3 100644 --- a/test/test_examples_3d_curved.jl +++ b/test/test_examples_3d_curved.jl @@ -44,14 +44,14 @@ EXAMPLES_DIR = joinpath(pathof(Trixi) |> dirname |> dirname, "examples", "3d") @trixi_testset "elixir_euler_free_stream_curved.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_curved.jl"), l2 = [2.8815700334367128e-15, 9.361915278236651e-15, 9.95614203619935e-15, 1.6809941842374106e-14, 1.4815037041566735e-14], - linf = [4.1300296516055823e-14, 2.0444756998472258e-13, 1.0133560657266116e-13, 3.1896707497480747e-13, 6.092903959142859e-13]) + linf = [4.1300296516055823e-14, 2.0444756998472258e-13, 1.0133560657266116e-13, 2.0627943797535409e-13, 2.8954616482224083e-13]) end @trixi_testset "elixir_euler_free_stream_curved.jl with FluxRotated(flux_lax_friedrichs)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_curved.jl"), surface_flux=FluxRotated(flux_lax_friedrichs), l2 = [2.8815700334367128e-15, 9.361915278236651e-15, 9.95614203619935e-15, 1.6809941842374106e-14, 1.4815037041566735e-14], - linf = [4.1300296516055823e-14, 2.0444756998472258e-13, 1.0133560657266116e-13, 3.1896707497480747e-13, 6.092903959142859e-13]) + linf = [4.1300296516055823e-14, 2.0444756998472258e-13, 1.0133560657266116e-13, 2.0627943797535409e-13, 2.8954616482224083e-13]) end @trixi_testset "elixir_euler_nonperiodic_curved.jl" begin diff --git a/test/test_examples_3d_p4est.jl b/test/test_examples_3d_p4est.jl new file mode 100644 index 0000000000..4c7656d8eb --- /dev/null +++ b/test/test_examples_3d_p4est.jl @@ -0,0 +1,61 @@ +module TestExamples3dP4est + +using Test +using Trixi + +include("test_trixi.jl") + +# pathof(Trixi) returns /path/to/Trixi/src/Trixi.jl, dirname gives the parent directory +EXAMPLES_DIR = joinpath(pathof(Trixi) |> dirname |> dirname, "examples", "3d") + +@testset "P4estMesh" begin + @trixi_testset "elixir_advection_basic_p4est.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic_p4est.jl"), + l2 = [0.00013446460962856976], + linf = [0.0012577781391462928]) + end + + @trixi_testset "elixir_advection_p4est_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_p4est_unstructured_curved.jl"), + l2 = [0.0006244885699399409], + linf = [0.04076651402041587]) + end + + @trixi_testset "elixir_advection_p4est_non_conforming.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_p4est_non_conforming.jl"), + l2 = [0.0024774648310858928], + linf = [0.021727876954353964]) + end + + @trixi_testset "elixir_advection_amr_p4est.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_p4est.jl"), + l2 = [9.773852895157622e-6], + linf = [0.0005853874124926162]) + end + + @trixi_testset "elixir_advection_amr_p4est_unstructured_curved.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_amr_p4est_unstructured_curved.jl"), + l2 = [0.00014665036779554962], + linf = [0.00845936405684372]) + end + + @trixi_testset "elixir_advection_restart_p4est.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_restart_p4est.jl"), + l2 = [0.0281388160824776], + linf = [0.08740635193023694]) + end + + @trixi_testset "elixir_euler_nonperiodic_p4est.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_nonperiodic_p4est.jl"), + l2 = [0.0018268326813744103, 0.001745601029521995, 0.001745601029521962, 0.0017456010295218891, 0.003239834454817457], + linf = [0.014660503198892005, 0.01506958815284798, 0.01506958815283821, 0.015069588152864632, 0.02700205515651044]) + end + + @trixi_testset "elixir_euler_free_stream_p4est.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_p4est.jl"), + l2 = [8.116891021528916e-15, 3.5522708607626286e-14, 4.143281237333047e-14, 6.289087484242244e-14, 6.4284229587973e-14], + linf = [5.220268661787486e-13, 1.620439893379455e-12, 2.3271384819167906e-12, 2.466915560717098e-12, 5.472955422192172e-12]) + end +end + +end # module diff --git a/test/test_examples_3d_part2.jl b/test/test_examples_3d_part2.jl index 0fbb3b63ad..7513104c27 100644 --- a/test/test_examples_3d_part2.jl +++ b/test/test_examples_3d_part2.jl @@ -24,6 +24,9 @@ isdir(outdir) && rm(outdir, recursive=true) # Curved mesh include("test_examples_3d_curved.jl") + + # P4estMesh + include("test_examples_3d_p4est.jl") end diff --git a/test/test_special_elixirs.jl b/test/test_special_elixirs.jl index 407d7bde3c..6b032809f5 100644 --- a/test/test_special_elixirs.jl +++ b/test/test_special_elixirs.jl @@ -28,6 +28,9 @@ const EXAMPLES_DIR = joinpath(pathof(Trixi) |> dirname |> dirname, "examples") mean_eoc = convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "3d", "elixir_advection_basic_curved.jl"), 2) @test isapprox(mean_eoc[:l2], [4.0], rtol=0.01) + mean_eoc = convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "3d", "elixir_advection_p4est_unstructured_curved.jl"), 2, initial_refinement_level=1) + @test isapprox(mean_eoc[:l2], [3.31], rtol=0.01) + mean_eoc = convergence_test(@__MODULE__, joinpath(EXAMPLES_DIR, "paper-self-gravitating-gas-dynamics", "elixir_eulergravity_eoc_test.jl"), 2, tspan=(0.0, 0.1)) @test isapprox(mean_eoc[:l2], 4 * ones(4), atol=0.4) end