diff --git a/framework/src/base/MooseInit.C b/framework/src/base/MooseInit.C index f37362cc13d6..1bcf26f1ed69 100644 --- a/framework/src/base/MooseInit.C +++ b/framework/src/base/MooseInit.C @@ -42,8 +42,7 @@ RegisterSigHandler() MooseInit::MooseInit(int argc, char * argv[], MPI_Comm COMM_WORLD_IN) : LibMeshInit(argc, argv, COMM_WORLD_IN) { - auto ierr = PetscPopSignalHandler(); // get rid of PETSc error handler - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(COMM_WORLD_IN, PetscPopSignalHandler()); // get rid of PETSc error handler // Set the number of OpenMP threads to the same as the number of threads libMesh is going to use #ifdef LIBMESH_HAVE_OPENMP diff --git a/framework/src/preconditioners/VariableCondensationPreconditioner.C b/framework/src/preconditioners/VariableCondensationPreconditioner.C index 0ffd15283e25..0fbf42aff3bb 100644 --- a/framework/src/preconditioners/VariableCondensationPreconditioner.C +++ b/framework/src/preconditioners/VariableCondensationPreconditioner.C @@ -342,8 +342,6 @@ VariableCondensationPreconditioner::init() void VariableCondensationPreconditioner::condenseSystem() { - PetscErrorCode ierr = (PetscErrorCode)0; - // extract _M from the original matrix _matrix->create_submatrix(*_M, _rows, _lm_dofs); @@ -351,8 +349,8 @@ VariableCondensationPreconditioner::condenseSystem() _K->init(_global_primary_dofs.size(), _global_cols.size(), _primary_dofs.size(), _cols.size()); // Note: enabling nonzero allocation may be expensive. Improved memeory pre-allocation will be // investigated in the future - ierr = MatSetOption(_K->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatSetOption(_K->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE)); // here the _global_cols may not be sorted _matrix->create_submatrix_nosort(*_K, _global_primary_dofs, _global_cols); @@ -361,8 +359,7 @@ VariableCondensationPreconditioner::condenseSystem() // clean dinv if (_dinv) { - ierr = MatDestroy(&_dinv); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatDestroy(&_dinv)); _dinv = nullptr; } @@ -378,8 +375,9 @@ VariableCondensationPreconditioner::condenseSystem() Mat MdinvK; // calculate MdinvK - ierr = MatMatMatMult(_M->mat(), _dinv, _K->mat(), MAT_INITIAL_MATRIX, PETSC_DEFAULT, &MdinvK); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + this->MoosePreconditioner::comm().get(), + MatMatMatMult(_M->mat(), _dinv, _K->mat(), MAT_INITIAL_MATRIX, PETSC_DEFAULT, &MdinvK)); PetscMatrix MDinv_K(MdinvK, MoosePreconditioner::_communicator); // Preallocate memory for _J_condensed @@ -393,8 +391,7 @@ VariableCondensationPreconditioner::condenseSystem() computeCondensedJacobian(*_J_condensed, *pc_original_mat, _global_rows, MDinv_K); // Destroy MdinvK here otherwise we will have memory leak - ierr = MatDestroy(&MdinvK); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatDestroy(&MdinvK)); } void @@ -403,8 +400,6 @@ VariableCondensationPreconditioner::computeCondensedJacobian(PetscMatrix const std::vector & grows, PetscMatrix & block_mat) { - PetscErrorCode ierr = (PetscErrorCode)0; - // obtain entries from the original matrix PetscInt pc_ncols = 0, block_ncols = 0; const PetscInt *pc_cols, *block_cols; @@ -421,11 +416,11 @@ VariableCondensationPreconditioner::computeCondensedJacobian(PetscMatrix if (grows[i] >= original_mat.row_start() && grows[i] < original_mat.row_stop()) { // get one row of data from the original matrix - ierr = MatGetRow(original_mat.mat(), rid, &pc_ncols, &pc_cols, &pc_vals); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatGetRow(original_mat.mat(), rid, &pc_ncols, &pc_cols, &pc_vals)); // get corresponding row of data from the block matrix - ierr = MatGetRow(block_mat.mat(), i, &block_ncols, &block_cols, &block_vals); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatGetRow(block_mat.mat(), i, &block_ncols, &block_cols, &block_vals)); // extract data from certain cols, subtract the value from the block mat, and save the indices // and entries sub_cols and sub_vals // First, save the submatrix col index and value as a map @@ -459,18 +454,18 @@ VariableCondensationPreconditioner::computeCondensedJacobian(PetscMatrix } // Then, set values - ierr = MatSetValues(condensed_mat.mat(), - 1, - sub_rid, - sub_vals.size(), - sub_cols.data(), - sub_vals.data(), - INSERT_VALUES); - LIBMESH_CHKERR(ierr); - ierr = MatRestoreRow(original_mat.mat(), rid, &pc_ncols, &pc_cols, &pc_vals); - LIBMESH_CHKERR(ierr); - ierr = MatRestoreRow(block_mat.mat(), i, &block_ncols, &block_cols, &block_vals); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatSetValues(condensed_mat.mat(), + 1, + sub_rid, + sub_vals.size(), + sub_cols.data(), + sub_vals.data(), + INSERT_VALUES)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatRestoreRow(original_mat.mat(), rid, &pc_ncols, &pc_cols, &pc_vals)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatRestoreRow(block_mat.mat(), i, &block_ncols, &block_cols, &block_vals)); // clear data for this row sub_cols.clear(); sub_vals.clear(); @@ -505,14 +500,12 @@ VariableCondensationPreconditioner::preallocateCondensedJacobian( // condensed matrix std::vector n_nz, n_oz; - PetscErrorCode ierr = (PetscErrorCode)0; - // Get number of nonzeros from original_mat and block_mat for each row for (const auto & row_id : _rows) { // get number of non-zeros in the original matrix - ierr = MatGetRow(original_mat.mat(), row_id, &ncols, &col_vals, &vals); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatGetRow(original_mat.mat(), row_id, &ncols, &col_vals, &vals)); // get number of non-zeros in the block matrix dof_id_type block_row_id; // row id in the block matrix @@ -522,8 +515,9 @@ VariableCondensationPreconditioner::preallocateCondensedJacobian( else mooseError("DoF ", row_id, " does not exist in the rows of condensed_mat"); - ierr = MatGetRow(block_mat.mat(), block_row_id, &block_ncols, &block_col_vals, &block_vals); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + this->MoosePreconditioner::comm().get(), + MatGetRow(block_mat.mat(), block_row_id, &block_ncols, &block_col_vals, &block_vals)); // make sure the block index is transformed in terms of the original mat block_cols_to_org.clear(); @@ -537,11 +531,12 @@ VariableCondensationPreconditioner::preallocateCondensedJacobian( // merge `col_vals` and `block_cols_to_org` and save the common indices in `merged_cols`. mergeArrays(col_vals, block_cols_to_org.data(), ncols, block_ncols, merged_cols); - ierr = MatRestoreRow(block_mat.mat(), block_row_id, &block_ncols, &block_col_vals, &block_vals); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + this->MoosePreconditioner::comm().get(), + MatRestoreRow(block_mat.mat(), block_row_id, &block_ncols, &block_col_vals, &block_vals)); - ierr = MatRestoreRow(original_mat.mat(), row_id, &ncols, &col_vals, &vals); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatRestoreRow(original_mat.mat(), row_id, &ncols, &col_vals, &vals)); // Count the nnz for DIAGONAL and OFF-DIAGONAL parts PetscInt row_n_nz = 0, row_n_oz = 0; @@ -652,10 +647,9 @@ VariableCondensationPreconditioner::getCondensedXY(const NumericVector & NumericVector & x) { Mat mdinv; - PetscErrorCode ierr = (PetscErrorCode)0; // calculate mdinv - ierr = MatMatMult(_M->mat(), _dinv, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &mdinv); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatMatMult(_M->mat(), _dinv, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &mdinv)); PetscMatrix MDinv(mdinv, MoosePreconditioner::_communicator); _x_hat->init(_J_condensed->n(), _J_condensed->local_n(), false, PARALLEL); @@ -681,8 +675,7 @@ VariableCondensationPreconditioner::getCondensedXY(const NumericVector & _y_hat->close(); _x_hat->close(); - ierr = MatDestroy(&mdinv); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatDestroy(&mdinv)); } void @@ -737,121 +730,111 @@ VariableCondensationPreconditioner::findZeroDiagonals(SparseMatrix & mat { indices.clear(); IS zerodiags, zerodiags_all; - PetscErrorCode ierr; const PetscInt * petsc_idx; PetscInt nrows; // make sure we have a PETSc matrix PetscMatrix * petsc_mat = cast_ptr *>(&mat); - ierr = MatFindZeroDiagonals(petsc_mat->mat(), &zerodiags); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatFindZeroDiagonals(petsc_mat->mat(), &zerodiags)); // synchronize all indices - ierr = ISAllGather(zerodiags, &zerodiags_all); - LIBMESH_CHKERR(ierr); - ierr = ISGetIndices(zerodiags_all, &petsc_idx); - LIBMESH_CHKERR(ierr); - ierr = ISGetSize(zerodiags_all, &nrows); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + ISAllGather(zerodiags, &zerodiags_all)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + ISGetIndices(zerodiags_all, &petsc_idx)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), ISGetSize(zerodiags_all, &nrows)); for (PetscInt i = 0; i < nrows; ++i) indices.push_back(petsc_idx[i]); - ierr = ISRestoreIndices(zerodiags_all, &petsc_idx); - LIBMESH_CHKERR(ierr); - ierr = ISDestroy(&zerodiags); - LIBMESH_CHKERR(ierr); - ierr = ISDestroy(&zerodiags_all); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + ISRestoreIndices(zerodiags_all, &petsc_idx)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), ISDestroy(&zerodiags)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), ISDestroy(&zerodiags_all)); } void VariableCondensationPreconditioner::clear() { - PetscErrorCode ierr; if (_dinv != nullptr) - { - ierr = MatDestroy(&_dinv); - LIBMESH_CHKERR(ierr); - } + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatDestroy(&_dinv)); } void VariableCondensationPreconditioner::computeDInverse(Mat & dinv) { - PetscErrorCode ierr; Mat F, I, dinv_dense; IS perm, iperm; MatFactorInfo info; - ierr = MatCreateDense( - PETSC_COMM_WORLD, _D->local_n(), _D->local_m(), _D->n(), _D->m(), NULL, &dinv_dense); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + this->MoosePreconditioner::comm().get(), + MatCreateDense( + PETSC_COMM_WORLD, _D->local_n(), _D->local_m(), _D->n(), _D->m(), NULL, &dinv_dense)); // Create an identity matrix as the right-hand-side - ierr = MatCreateDense(PETSC_COMM_WORLD, _D->local_m(), _D->local_m(), _D->m(), _D->m(), NULL, &I); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + this->MoosePreconditioner::comm().get(), + MatCreateDense(PETSC_COMM_WORLD, _D->local_m(), _D->local_m(), _D->m(), _D->m(), NULL, &I)); for (unsigned int i = 0; i < _D->m(); ++i) - { - ierr = MatSetValue(I, i, i, 1.0, INSERT_VALUES); - LIBMESH_CHKERR(ierr); - } + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatSetValue(I, i, i, 1.0, INSERT_VALUES)); - ierr = MatAssemblyBegin(I, MAT_FINAL_ASSEMBLY); - LIBMESH_CHKERR(ierr); - ierr = MatAssemblyEnd(I, MAT_FINAL_ASSEMBLY); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatAssemblyBegin(I, MAT_FINAL_ASSEMBLY)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatAssemblyEnd(I, MAT_FINAL_ASSEMBLY)); // Factorize D - ierr = MatGetOrdering(_D->mat(), MATORDERINGND, &perm, &iperm); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatGetOrdering(_D->mat(), MATORDERINGND, &perm, &iperm)); - ierr = MatFactorInfoInitialize(&info); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatFactorInfoInitialize(&info)); - ierr = MatGetFactor(_D->mat(), MATSOLVERSUPERLU_DIST, MAT_FACTOR_LU, &F); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatGetFactor(_D->mat(), MATSOLVERSUPERLU_DIST, MAT_FACTOR_LU, &F)); - ierr = MatLUFactorSymbolic(F, _D->mat(), perm, iperm, &info); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatLUFactorSymbolic(F, _D->mat(), perm, iperm, &info)); - ierr = MatLUFactorNumeric(F, _D->mat(), &info); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatLUFactorNumeric(F, _D->mat(), &info)); // Solve for Dinv - ierr = MatMatSolve(F, I, dinv_dense); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatMatSolve(F, I, dinv_dense)); - ierr = MatAssemblyBegin(dinv_dense, MAT_FINAL_ASSEMBLY); - LIBMESH_CHKERR(ierr); - ierr = MatAssemblyEnd(dinv_dense, MAT_FINAL_ASSEMBLY); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatAssemblyBegin(dinv_dense, MAT_FINAL_ASSEMBLY)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatAssemblyEnd(dinv_dense, MAT_FINAL_ASSEMBLY)); // copy value to dinv - ierr = MatConvert(dinv_dense, MATAIJ, MAT_INITIAL_MATRIX, &dinv); - LIBMESH_CHKERR(ierr); - - ierr = MatDestroy(&dinv_dense); - LIBMESH_CHKERR(ierr); - - ierr = MatDestroy(&I); - LIBMESH_CHKERR(ierr); - ierr = MatDestroy(&F); - LIBMESH_CHKERR(ierr); - ierr = ISDestroy(&perm); - LIBMESH_CHKERR(ierr); - ierr = ISDestroy(&iperm); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatConvert(dinv_dense, MATAIJ, MAT_INITIAL_MATRIX, &dinv)); + + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatDestroy(&dinv_dense)); + + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatDestroy(&I)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), MatDestroy(&F)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), ISDestroy(&perm)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), ISDestroy(&iperm)); } void VariableCondensationPreconditioner::computeDInverseDiag(Mat & dinv) { - PetscErrorCode ierr; auto diag_D = NumericVector::build(MoosePreconditioner::_communicator); // Initialize dinv - ierr = MatCreateAIJ( - PETSC_COMM_WORLD, _D->local_n(), _D->local_m(), _D->n(), _D->m(), 1, NULL, 0, NULL, &dinv); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatCreateAIJ(PETSC_COMM_WORLD, + _D->local_n(), + _D->local_m(), + _D->n(), + _D->m(), + 1, + NULL, + 0, + NULL, + &dinv)); // Allocate storage diag_D->init(_D->m(), _D->local_m(), false, PARALLEL); // Fill entries @@ -866,16 +849,16 @@ VariableCondensationPreconditioner::computeDInverseDiag(Mat & dinv) { if (MooseUtils::absoluteFuzzyEqual((*diag_D)(i), 0.0)) mooseError("Trying to compute reciprocal of 0."); - ierr = MatSetValue(dinv, - i, - _map_global_primary_order.at(_global_primary_dofs[i]), - 1.0 / (*diag_D)(i), - INSERT_VALUES); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatSetValue(dinv, + i, + _map_global_primary_order.at(_global_primary_dofs[i]), + 1.0 / (*diag_D)(i), + INSERT_VALUES)); } - ierr = MatAssemblyBegin(dinv, MAT_FINAL_ASSEMBLY); - LIBMESH_CHKERR(ierr); - ierr = MatAssemblyEnd(dinv, MAT_FINAL_ASSEMBLY); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatAssemblyBegin(dinv, MAT_FINAL_ASSEMBLY)); + LibmeshPetscCallA(this->MoosePreconditioner::comm().get(), + MatAssemblyEnd(dinv, MAT_FINAL_ASSEMBLY)); } diff --git a/framework/src/systems/NonlinearEigenSystem.C b/framework/src/systems/NonlinearEigenSystem.C index 6011773dc5ea..9dc42e44cbec 100644 --- a/framework/src/systems/NonlinearEigenSystem.C +++ b/framework/src/systems/NonlinearEigenSystem.C @@ -83,11 +83,9 @@ assemble_matrix(EquationSystems & es, const std::string & system_name) eigen_nl.eigenMatrixTag()); #if LIBMESH_HAVE_SLEPC if (p->negativeSignEigenKernel()) - { - auto ierr = - MatScale(static_cast &>(eigen_system.get_matrix_B()).mat(), -1.0); - LIBMESH_CHKERR(ierr); - } + LibmeshPetscCallA( + p->comm().get(), + MatScale(static_cast &>(eigen_system.get_matrix_B()).mat(), -1.0)); #endif return; } diff --git a/framework/src/utils/PetscSupport.C b/framework/src/utils/PetscSupport.C index 25028ffa8603..c869c3910637 100644 --- a/framework/src/utils/PetscSupport.C +++ b/framework/src/utils/PetscSupport.C @@ -62,16 +62,14 @@ void MooseVecView(NumericVector & vector) { PetscVector & petsc_vec = static_cast &>(vector); - auto ierr = VecView(petsc_vec.vec(), 0); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(vector.comm().get(), VecView(petsc_vec.vec(), 0)); } void MooseMatView(SparseMatrix & mat) { PetscMatrix & petsc_mat = static_cast &>(mat); - auto ierr = MatView(petsc_mat.mat(), 0); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mat.comm().get(), MatView(petsc_mat.mat(), 0)); } void @@ -79,8 +77,7 @@ MooseVecView(const NumericVector & vector) { PetscVector & petsc_vec = static_cast &>(const_cast &>(vector)); - auto ierr = VecView(petsc_vec.vec(), 0); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(vector.comm().get(), VecView(petsc_vec.vec(), 0)); } void @@ -88,8 +85,7 @@ MooseMatView(const SparseMatrix & mat) { PetscMatrix & petsc_mat = static_cast &>(const_cast &>(mat)); - auto ierr = MatView(petsc_mat.mat(), 0); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mat.comm().get(), MatView(petsc_mat.mat(), 0)); } namespace Moose diff --git a/framework/src/utils/SlepcEigenSolverConfiguration.C b/framework/src/utils/SlepcEigenSolverConfiguration.C index 0ed33357baad..57d83f0b51d6 100644 --- a/framework/src/utils/SlepcEigenSolverConfiguration.C +++ b/framework/src/utils/SlepcEigenSolverConfiguration.C @@ -28,41 +28,41 @@ SlepcEigenSolverConfiguration::SlepcEigenSolverConfiguration( void SlepcEigenSolverConfiguration::configure_solver() { - auto ierr = (PetscErrorCode)0; - if (_eigen_problem.isNonlinearEigenvalueSolver()) { // Set custom monitors for SNES and KSP _eigen_problem.initPetscOutputAndSomeSolverSettings(); // Let us remove extra "eps_power" from SNES since users do not like it - ierr = Moose::SlepcSupport::mooseSlepcEPSSNESSetUpOptionPrefix(_slepc_solver.eps()); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(_eigen_problem.comm().get(), + Moose::SlepcSupport::mooseSlepcEPSSNESSetUpOptionPrefix(_slepc_solver.eps())); // Let us hook up a customize PC if users ask. Users still can use PETSc options to override // this setting if (_eigen_problem.solverParams()._customized_pc_for_eigen) - { - ierr = Moose::SlepcSupport::mooseSlepcEPSSNESSetCustomizePC(_slepc_solver.eps()); - LIBMESH_CHKERR(ierr); - } + LibmeshPetscCallA(_eigen_problem.comm().get(), + Moose::SlepcSupport::mooseSlepcEPSSNESSetCustomizePC(_slepc_solver.eps())); + // Let set a default PC side. I would like to have the setting be consistent with // what we do in regular nonlinear executioner. Petsc options are able to override // this setting - ierr = Moose::SlepcSupport::mooseSlepcEPSSNESKSPSetPCSide(_eigen_problem, _slepc_solver.eps()); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + _eigen_problem.comm().get(), + Moose::SlepcSupport::mooseSlepcEPSSNESKSPSetPCSide(_eigen_problem, _slepc_solver.eps())); // A customized stopping test for nonlinear free power iterations. // Nonlinear power iterations need to be marked as converged in EPS to // retrieve solution from SLEPc EPS. - ierr = EPSSetStoppingTestFunction( - _slepc_solver.eps(), Moose::SlepcSupport::mooseSlepcStoppingTest, &_eigen_problem, NULL); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(_eigen_problem.comm().get(), + EPSSetStoppingTestFunction(_slepc_solver.eps(), + Moose::SlepcSupport::mooseSlepcStoppingTest, + &_eigen_problem, + NULL)); // Remove all SLEPc monitors. - ierr = EPSMonitorCancel(_slepc_solver.eps()); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(_eigen_problem.comm().get(), EPSMonitorCancel(_slepc_solver.eps())); // A customized EPS monitor in moose. We need to print only eigenvalue - ierr = EPSMonitorSet( - _slepc_solver.eps(), Moose::SlepcSupport::mooseSlepcEPSMonitor, &_eigen_problem, NULL); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + _eigen_problem.comm().get(), + EPSMonitorSet( + _slepc_solver.eps(), Moose::SlepcSupport::mooseSlepcEPSMonitor, &_eigen_problem, NULL)); } } diff --git a/framework/src/utils/SlepcSupport.C b/framework/src/utils/SlepcSupport.C index fc854ed5ebbc..1623461514dc 100644 --- a/framework/src/utils/SlepcSupport.C +++ b/framework/src/utils/SlepcSupport.C @@ -1000,51 +1000,54 @@ attachCallbacksToMat(EigenProblem & eigen_problem, Mat mat, bool eigen) // Attach the Jacobian computation function. If \p mat is the "eigen" matrix corresponding to B, // then attach our JacobianB computation routine, else the matrix corresponds to A, and we attach // the JacobianA computation routine - auto ierr = PetscObjectComposeFunction((PetscObject)mat, - "formJacobian", - eigen ? Moose::SlepcSupport::mooseSlepcEigenFormJacobianB - : Moose::SlepcSupport::mooseSlepcEigenFormJacobianA); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + eigen_problem.comm().get(), + PetscObjectComposeFunction((PetscObject)mat, + "formJacobian", + eigen ? Moose::SlepcSupport::mooseSlepcEigenFormJacobianB + : Moose::SlepcSupport::mooseSlepcEigenFormJacobianA)); // Attach the residual computation function. If \p mat is the "eigen" matrix corresponding to B, // then attach our FunctionB computation routine, else the matrix corresponds to A, and we attach // the FunctionA computation routine - ierr = PetscObjectComposeFunction((PetscObject)mat, - "formFunction", - eigen ? Moose::SlepcSupport::mooseSlepcEigenFormFunctionB - : Moose::SlepcSupport::mooseSlepcEigenFormFunctionA); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA( + eigen_problem.comm().get(), + PetscObjectComposeFunction((PetscObject)mat, + "formFunction", + eigen ? Moose::SlepcSupport::mooseSlepcEigenFormFunctionB + : Moose::SlepcSupport::mooseSlepcEigenFormFunctionA)); // It's also beneficial to be able to evaluate both A and B residuals at once - ierr = PetscObjectComposeFunction( - (PetscObject)mat, "formFunctionAB", Moose::SlepcSupport::mooseSlepcEigenFormFunctionAB); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(eigen_problem.comm().get(), + PetscObjectComposeFunction((PetscObject)mat, + "formFunctionAB", + Moose::SlepcSupport::mooseSlepcEigenFormFunctionAB)); // Users may choose to provide a custom measure of the norm of B (Bx for a linear system) if (eigen_problem.bxNormProvided()) - { - ierr = PetscObjectComposeFunction( - (PetscObject)mat, "formNorm", Moose::SlepcSupport::mooseSlepcEigenFormNorm); - LIBMESH_CHKERR(ierr); - } + LibmeshPetscCallA(eigen_problem.comm().get(), + PetscObjectComposeFunction((PetscObject)mat, + "formNorm", + Moose::SlepcSupport::mooseSlepcEigenFormNorm)); // Finally we need to attach the "context" object, which is our EigenProblem, to the matrices so // that eventually when we get callbacks from SLEPc we can call methods on the EigenProblem PetscContainer container; - ierr = PetscContainerCreate(eigen_problem.comm().get(), &container); - LIBMESH_CHKERR(ierr); - ierr = PetscContainerSetPointer(container, &eigen_problem); - LIBMESH_CHKERR(ierr); - ierr = PetscObjectCompose((PetscObject)mat, "formJacobianCtx", (PetscObject)container); - LIBMESH_CHKERR(ierr); - ierr = PetscObjectCompose((PetscObject)mat, "formFunctionCtx", (PetscObject)container); + LibmeshPetscCallA(eigen_problem.comm().get(), + PetscContainerCreate(eigen_problem.comm().get(), &container)); + LibmeshPetscCallA(eigen_problem.comm().get(), + PetscContainerSetPointer(container, &eigen_problem)); + LibmeshPetscCallA( + eigen_problem.comm().get(), + PetscObjectCompose((PetscObject)mat, "formJacobianCtx", (PetscObject)container)); + LibmeshPetscCallA( + eigen_problem.comm().get(), + PetscObjectCompose((PetscObject)mat, "formFunctionCtx", (PetscObject)container)); if (eigen_problem.bxNormProvided()) - { - ierr = PetscObjectCompose((PetscObject)mat, "formNormCtx", (PetscObject)container); - LIBMESH_CHKERR(ierr); - } - ierr = PetscContainerDestroy(&container); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(eigen_problem.comm().get(), + PetscObjectCompose((PetscObject)mat, "formNormCtx", (PetscObject)container)); + + LibmeshPetscCallA(eigen_problem.comm().get(), PetscContainerDestroy(&container)); } PetscErrorCode @@ -1089,13 +1092,12 @@ mooseMatMult_NonEigen(Mat mat, Vec x, Vec r) void setOperationsForShellMat(EigenProblem & eigen_problem, Mat mat, bool eigen) { - auto ierr = MatShellSetContext(mat, &eigen_problem); - LIBMESH_CHKERR(ierr); - ierr = MatShellSetOperation(mat, - MATOP_MULT, - eigen ? (void (*)(void))mooseMatMult_Eigen - : (void (*)(void))mooseMatMult_NonEigen); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(eigen_problem.comm().get(), MatShellSetContext(mat, &eigen_problem)); + LibmeshPetscCallA(eigen_problem.comm().get(), + MatShellSetOperation(mat, + MATOP_MULT, + eigen ? (void (*)(void))mooseMatMult_Eigen + : (void (*)(void))mooseMatMult_NonEigen)); } PETSC_EXTERN PetscErrorCode diff --git a/modules/external_petsc_solver/src/mesh/PETScDMDAMesh.C b/modules/external_petsc_solver/src/mesh/PETScDMDAMesh.C index ab15be18dcd1..1ee377c2324b 100644 --- a/modules/external_petsc_solver/src/mesh/PETScDMDAMesh.C +++ b/modules/external_petsc_solver/src/mesh/PETScDMDAMesh.C @@ -91,38 +91,36 @@ add_element_Quad4(DM da, // xp: number of processors in x direction // yp: number of processors in y direction PetscInt Mx, My, xp, yp; - auto ierr = DMDAGetInfo(da, - PETSC_IGNORE, - &Mx, - &My, - PETSC_IGNORE, - &xp, - &yp, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mesh.comm().get(), + DMDAGetInfo(da, + PETSC_IGNORE, + &Mx, + &My, + PETSC_IGNORE, + &xp, + &yp, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE)); const PetscInt *lx, *ly; PetscInt *lxo, *lyo; // PETSc-3.8.x or older use PetscDataType #if PETSC_VERSION_LESS_THAN(3, 9, 0) - ierr = DMGetWorkArray(da, xp + yp + 2, PETSC_INT, &lxo); + LibmeshPetscCallA(mesh.comm().get(), DMGetWorkArray(da, xp + yp + 2, PETSC_INT, &lxo)); #else // PETSc-3.9.x or newer use MPI_DataType - ierr = DMGetWorkArray(da, xp + yp + 2, MPIU_INT, &lxo); + LibmeshPetscCallA(mesh.comm().get(), DMGetWorkArray(da, xp + yp + 2, MPIU_INT, &lxo)); #endif - LIBMESH_CHKERR(ierr); // Gets the ranges of indices in the x, y and z direction that are owned by each process // Ranges here are different from what we have in Mat and Vec. // It means how many points each processor holds - ierr = DMDAGetOwnershipRanges(da, &lx, &ly, NULL); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mesh.comm().get(), DMDAGetOwnershipRanges(da, &lx, &ly, NULL)); lxo[0] = 0; for (PetscInt i = 0; i < xp; i++) lxo[i + 1] = lxo[i] + lx[i]; @@ -137,31 +135,26 @@ add_element_Quad4(DM da, // Finds integer in a sorted array of integers // Loc: the location if found, otherwise -(slot+1) // where slot is the place the value would go - ierr = PetscFindInt(i, xp + 1, lxo, &xpid); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mesh.comm().get(), PetscFindInt(i, xp + 1, lxo, &xpid)); xpid = xpid < 0 ? -xpid - 1 - 1 : xpid; - ierr = PetscFindInt(i + 1, xp + 1, lxo, &xpidplus); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mesh.comm().get(), PetscFindInt(i + 1, xp + 1, lxo, &xpidplus)); xpidplus = xpidplus < 0 ? -xpidplus - 1 - 1 : xpidplus; - ierr = PetscFindInt(j, yp + 1, lyo, &ypid); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mesh.comm().get(), PetscFindInt(j, yp + 1, lyo, &ypid)); ypid = ypid < 0 ? -ypid - 1 - 1 : ypid; - ierr = PetscFindInt(j + 1, yp + 1, lyo, &ypidplus); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mesh.comm().get(), PetscFindInt(j + 1, yp + 1, lyo, &ypidplus)); ypidplus = ypidplus < 0 ? -ypidplus - 1 - 1 : ypidplus; #if PETSC_VERSION_LESS_THAN(3, 9, 0) - ierr = DMRestoreWorkArray(da, xp + yp + 2, PETSC_INT, &lxo); + LibmeshPetscCallA(mesh.comm().get(), DMRestoreWorkArray(da, xp + yp + 2, PETSC_INT, &lxo)); #else - ierr = DMRestoreWorkArray(da, xp + yp + 2, MPIU_INT, &lxo); + LibmeshPetscCallA(mesh.comm().get(), DMRestoreWorkArray(da, xp + yp + 2, MPIU_INT, &lxo)); #endif - LIBMESH_CHKERR(ierr); // Bottom Left auto node0_ptr = mesh.add_point(Point(static_cast(i) / nx, static_cast(j) / ny, 0), @@ -347,23 +340,23 @@ build_cube_Quad4(UnstructuredMesh & mesh, DM da) PetscInt xs, ys, xm, ym, Mx, My, xp, yp; /* Get local grid boundaries */ - auto ierr = DMDAGetCorners(da, &xs, &ys, PETSC_IGNORE, &xm, &ym, PETSC_IGNORE); - LIBMESH_CHKERR(ierr); - ierr = DMDAGetInfo(da, - PETSC_IGNORE, - &Mx, - &My, - PETSC_IGNORE, - &xp, - &yp, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE, - PETSC_IGNORE); - LIBMESH_CHKERR(ierr); + LibmeshPetscCallA(mesh.comm().get(), + DMDAGetCorners(da, &xs, &ys, PETSC_IGNORE, &xm, &ym, PETSC_IGNORE)); + LibmeshPetscCallA(mesh.comm().get(), + DMDAGetInfo(da, + PETSC_IGNORE, + &Mx, + &My, + PETSC_IGNORE, + &xp, + &yp, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE, + PETSC_IGNORE)); for (PetscInt j = ys; j < ys + ym; j++) for (PetscInt i = xs; i < xs + xm; i++) diff --git a/modules/stochastic_tools/src/utils/POD.C b/modules/stochastic_tools/src/utils/POD.C index 59a93a3ecb46..cb57617ae72f 100644 --- a/modules/stochastic_tools/src/utils/POD.C +++ b/modules/stochastic_tools/src/utils/POD.C @@ -69,14 +69,17 @@ POD::computePOD(const VariableName & vname, _communicator.max(snapshot_size); // Generally snapshot matrices are dense. - LIBMESH_CHKERR(MatCreateDense( - _communicator.get(), local_rows, PETSC_DECIDE, global_rows, snapshot_size, NULL, &mat)); + LibmeshPetscCallA( + _communicator.get(), + MatCreateDense( + _communicator.get(), local_rows, PETSC_DECIDE, global_rows, snapshot_size, NULL, &mat)); // Check where the local rows begin in the matrix, we use these to convert from local to // global indices dof_id_type local_beg = 0; dof_id_type local_end = 0; - LIBMESH_CHKERR( + LibmeshPetscCallA( + _communicator.get(), MatGetOwnershipRange(mat, numeric_petsc_cast(&local_beg), numeric_petsc_cast(&local_end))); unsigned int counter = 0; @@ -93,61 +96,67 @@ POD::computePOD(const VariableName & vname, std::iota(std::begin(columns), std::end(columns), 0); // Set the rows in the "sparse" matrix - LIBMESH_CHKERR(MatSetValues(mat, - 1, - rows.data(), - snapshot_size, - columns.data(), - snap.get_values().data(), - INSERT_VALUES)); + LibmeshPetscCallA(_communicator.get(), + MatSetValues(mat, + 1, + rows.data(), + snapshot_size, + columns.data(), + snap.get_values().data(), + INSERT_VALUES)); } } // Assemble the matrix - LIBMESH_CHKERR(MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY)); - LIBMESH_CHKERR(MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY)); + LibmeshPetscCallA(_communicator.get(), MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY)); + LibmeshPetscCallA(_communicator.get(), MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY)); SVD svd; - LIBMESH_CHKERR(SVDCreate(_communicator.get(), &svd)); + LibmeshPetscCallA(_communicator.get(), SVDCreate(_communicator.get(), &svd)); // Now we set the operators for our SVD objects - LIBMESH_CHKERR(SVDSetOperators(svd, mat, NULL)); + LibmeshPetscCallA(_communicator.get(), SVDSetOperators(svd, mat, NULL)); // Set the parallel operation mode to "DISTRIBUTED", default is "REDUNDANT" DS ds; - LIBMESH_CHKERR(SVDGetDS(svd, &ds)); - LIBMESH_CHKERR(DSSetParallel(ds, DS_PARALLEL_DISTRIBUTED)); + LibmeshPetscCallA(_communicator.get(), SVDGetDS(svd, &ds)); + LibmeshPetscCallA(_communicator.get(), DSSetParallel(ds, DS_PARALLEL_DISTRIBUTED)); // We want the Lanczos method, might give the choice to the user // at some point - LIBMESH_CHKERR(SVDSetType(svd, SVDTRLANCZOS)); + LibmeshPetscCallA(_communicator.get(), SVDSetType(svd, SVDTRLANCZOS)); // Default is the transpose is explicitly created. This method is less efficient // computationally but better for storage - LIBMESH_CHKERR(SVDSetImplicitTranspose(svd, PETSC_TRUE)); + LibmeshPetscCallA(_communicator.get(), SVDSetImplicitTranspose(svd, PETSC_TRUE)); - LIBMESH_CHKERR(PetscOptionsInsertString(NULL, _extra_slepc_options.c_str())); + LibmeshPetscCallA(_communicator.get(), + PetscOptionsInsertString(NULL, _extra_slepc_options.c_str())); // Set the subspace size for the Lanczos method, we take twice as many // basis vectors as the requested number of POD modes. This guarantees in most of the case the // convergence of the singular triplets. - LIBMESH_CHKERR(SVDSetDimensions( - svd, num_modes, std::min(2 * num_modes, global_rows), std::min(2 * num_modes, global_rows))); + LibmeshPetscCallA(_communicator.get(), + SVDSetDimensions(svd, + num_modes, + std::min(2 * num_modes, global_rows), + std::min(2 * num_modes, global_rows))); // Gives the user the ability to override any option set before the solve. - LIBMESH_CHKERR(SVDSetFromOptions(svd)); + LibmeshPetscCallA(_communicator.get(), SVDSetFromOptions(svd)); // Compute the singular value triplets - LIBMESH_CHKERR(SVDSolve(svd)); + LibmeshPetscCallA(_communicator.get(), SVDSolve(svd)); // Check how many singular triplets converged PetscInt nconv; - LIBMESH_CHKERR(SVDGetConverged(svd, &nconv)); + LibmeshPetscCallA(_communicator.get(), SVDGetConverged(svd, &nconv)); // We start extracting the basis functions and the singular values. // Find the local size needed for u dof_id_type local_snapsize = 0; - LIBMESH_CHKERR(MatGetLocalSize(mat, NULL, numeric_petsc_cast(&local_snapsize))); + LibmeshPetscCallA(_communicator.get(), + MatGetLocalSize(mat, NULL, numeric_petsc_cast(&local_snapsize))); PetscVector u(_communicator); u.init(snapshot_size, local_snapsize, false, PARALLEL); @@ -162,7 +171,8 @@ POD::computePOD(const VariableName & vname, singular_values.resize(nconv); // Fetch the singular value triplet and immediately save the singular value for (PetscInt j = 0; j < nconv; ++j) - LIBMESH_CHKERR(SVDGetSingularTriplet(svd, j, &singular_values[j], NULL, NULL)); + LibmeshPetscCallA(_communicator.get(), + SVDGetSingularTriplet(svd, j, &singular_values[j], NULL, NULL)); // Determine how many modes we need unsigned int num_requested_modes = determineNumberOfModes(singular_values, num_modes, energy); @@ -172,12 +182,12 @@ POD::computePOD(const VariableName & vname, right_basis_functions.resize(num_requested_modes); for (PetscInt j = 0; j < cast_int(num_requested_modes); ++j) { - LIBMESH_CHKERR(SVDGetSingularTriplet(svd, j, NULL, v.vec(), u.vec())); + LibmeshPetscCallA(_communicator.get(), SVDGetSingularTriplet(svd, j, NULL, v.vec(), u.vec())); u.localize(left_basis_functions[j].get_values()); v.localize(right_basis_functions[j].get_values()); } - LIBMESH_CHKERR(MatDestroy(&mat)); - LIBMESH_CHKERR(SVDDestroy(&svd)); + LibmeshPetscCallA(_communicator.get(), MatDestroy(&mat)); + LibmeshPetscCallA(_communicator.get(), SVDDestroy(&svd)); #else // These variables would otherwise be unused libmesh_ignore(vname);