diff --git a/Common/include/code_config.hpp b/Common/include/code_config.hpp index 377432ee945..1524b720aff 100644 --- a/Common/include/code_config.hpp +++ b/Common/include/code_config.hpp @@ -60,6 +60,13 @@ template using su2enable_if = typename std::enable_if::type; +/*--- Compile-time type selection. ---*/ +template struct su2conditional { using type = T; }; +template struct su2conditional { using type = F; }; + +template +using su2conditional_t = typename su2conditional::type; + /*--- Detect compilation with OpenMP. ---*/ #if defined(_OPENMP) #define HAVE_OMP @@ -103,7 +110,7 @@ using su2double = codi::RealForward; using su2double = double; #endif -/*--- This type can be used for (rare) compatiblity cases or for +/*--- This type can be used for (rare) compatibility cases or for * computations that are intended to be (always) passive. ---*/ using passivedouble = double; diff --git a/Common/include/fem/fem_geometry_structure.hpp b/Common/include/fem/fem_geometry_structure.hpp index fb4787d645c..eec7e3bac29 100644 --- a/Common/include/fem/fem_geometry_structure.hpp +++ b/Common/include/fem/fem_geometry_structure.hpp @@ -1227,11 +1227,14 @@ class CMeshFEM_DG: public CMeshFEM { void SetWallDistance(su2double val) override; /*! - * \brief Set the wall distance based on an previously constructed ADT - * \param[in] config - Definition of the particular problem. - * \param[in] WallADT - The ADT to compute the wall distance + * \brief Reduce the wall distance based on an previously constructed ADT. + * \details The ADT might belong to another zone, giving rise to lower wall distances + * than those already stored. + * \param[in] WallADT - The ADT to reduce the wall distance + * \param[in] config - Config of this geometry (not the ADT zone's geometry) + * \param[in] iZone - ignored */ - void SetWallDistance(const CConfig *config, CADTElemClass* WallADT) override; + void SetWallDistance(CADTElemClass* WallADT, const CConfig* config, unsigned short iZone) override; }; /*! diff --git a/Common/include/geometry/CGeometry.hpp b/Common/include/geometry/CGeometry.hpp index cc8aa2867b2..61dd15714a1 100644 --- a/Common/include/geometry/CGeometry.hpp +++ b/Common/include/geometry/CGeometry.hpp @@ -1683,11 +1683,14 @@ class CGeometry { virtual std::unique_ptr ComputeViscousWallADT(const CConfig *config) const { return nullptr; } /*! - * \brief Set the wall distance based on an previously constructed ADT - * \param[in] config - Definition of the particular problem. - * \param[in] WallADT - The ADT to compute the wall distance - */ - virtual void SetWallDistance(const CConfig *config, CADTElemClass* WallADT) {} + * \brief Reduce the wall distance based on an previously constructed ADT. + * \details The ADT might belong to another zone, giving rise to lower wall distances + * than those already stored. + * \param[in] WallADT - The ADT to reduce the wall distance + * \param[in] config - Config of this geometry (not the ADT zone's geometry) + * \param[in] iZone - Zone whose markers made the ADT + */ + virtual void SetWallDistance(CADTElemClass* WallADT, const CConfig* config, unsigned short iZone = numeric_limits::max()) {} /*! * \brief Set wall distances a specific value diff --git a/Common/include/geometry/CPhysicalGeometry.hpp b/Common/include/geometry/CPhysicalGeometry.hpp index 4f8866adca3..21e1ec02b7f 100644 --- a/Common/include/geometry/CPhysicalGeometry.hpp +++ b/Common/include/geometry/CPhysicalGeometry.hpp @@ -104,9 +104,6 @@ class CPhysicalGeometry final : public CGeometry { unsigned long *Elem_ID_BoundTria_Linear{nullptr}; unsigned long *Elem_ID_BoundQuad_Linear{nullptr}; - vector GlobalMarkerStorageDispl; - vector GlobalRoughness_Height; - su2double Streamwise_Periodic_RefNode[MAXNDIM] = {0}; /*!< \brief Coordinates of the reference node [m] on the receiving periodic marker, for recovered pressure/temperature computation only.*/ public: @@ -767,10 +764,14 @@ class CPhysicalGeometry final : public CGeometry { std::unique_ptr ComputeViscousWallADT(const CConfig *config) const override; /*! - * \brief Set the wall distance based on an previously constructed ADT - * \param[in] WallADT - The ADT to compute the wall distance + * \brief Reduce the wall distance based on an previously constructed ADT. + * \details The ADT might belong to another zone, giving rise to lower wall distances + * than those already stored. + * \param[in] WallADT - The ADT to reduce the wall distance + * \param[in] config - ignored + * \param[in] iZone - zone whose markers made the ADT */ - void SetWallDistance(const CConfig *config, CADTElemClass* WallADT) override; + void SetWallDistance(CADTElemClass* WallADT, const CConfig* config, unsigned short iZone) override; /*! * \brief Set wall distances a specific value @@ -781,11 +782,6 @@ class CPhysicalGeometry final : public CGeometry { } } - /*! - * \brief Set roughness values for markers in a global array. - */ - void SetGlobalMarkerRoughness(const CConfig* config); - /*! * \brief For streamwise periodicity, find & store a unique reference node on the designated periodic inlet. * \param[in] config - Definition of the particular problem. diff --git a/Common/include/geometry/dual_grid/CPoint.hpp b/Common/include/geometry/dual_grid/CPoint.hpp index 1ef38f5cd24..417b88974ff 100644 --- a/Common/include/geometry/dual_grid/CPoint.hpp +++ b/Common/include/geometry/dual_grid/CPoint.hpp @@ -32,6 +32,7 @@ #include "../../containers/container_decorators.hpp" #include "../../toolboxes/graph_toolbox.hpp" #include +#include "../../toolboxes/ndflattener.hpp" using namespace std; @@ -88,7 +89,14 @@ class CPoint { su2vector Agglomerate; /*!< \brief This flag indicates if the element has been agglomerated. */ su2vector nNeighbor; /*!< \brief Number of neighbors, needed by some numerical methods. */ + + /*--- Closest element on a viscous wall, and distance to it. ---*/ su2activevector Wall_Distance; /*!< \brief Distance to the nearest wall. */ + su2vector ClosestWall_Rank; /*!< \brief Rank of process holding the closest wall element. */ + su2vector ClosestWall_Zone; /*!< \brief Zone index of closest wall element. */ + su2vector ClosestWall_Marker; /*!< \brief Marker index of closest wall element, for given rank and zone index. */ + su2vector ClosestWall_Elem; /*!< \brief Element index of closest wall element, for givenrank, zone and marker index. */ + su2activevector SharpEdge_Distance; /*!< \brief Distance to a sharp edge. */ su2activevector Curvature; /*!< \brief Value of the surface curvature (SU2_GEO). */ su2activevector MaxLength; /*!< \brief The maximum cell-center to cell-center length. */ @@ -418,7 +426,19 @@ class CPoint { * \brief Set the value of the distance to the nearest wall. * \param[in] iPoint - Index of the point. * \param[in] distance - Value of the distance. - */ + * \param[in] rankID - Rank of process holding the closest wall element. + * \param[in] zoneID - Zone index of closest wall element. + * \param[in] markerID - Marker index of closest wall element. + * \param[in] elemID - Element index of closest wall element. + */ + inline void SetWall_Distance(unsigned long iPoint, su2double distance, int rankID, unsigned short zoneID, + unsigned short markerID, unsigned long elemID) { + Wall_Distance(iPoint) = distance; + ClosestWall_Rank(iPoint) = rankID; + ClosestWall_Zone(iPoint) = zoneID; + ClosestWall_Marker(iPoint) = markerID; + ClosestWall_Elem(iPoint) = elemID; + } inline void SetWall_Distance(unsigned long iPoint, su2double distance) { Wall_Distance(iPoint) = distance; } /*! @@ -857,4 +877,19 @@ class CPoint { */ void SetIndex(unsigned long iPoint, bool input); + /*! + * \brief Set wall roughnesses according to stored closest wall information. + * \param[in] roughness - Mapping [rank][zone][marker] -> roughness + */ + template + void SetWallRoughness(Roughness_type const& roughness){ + for (unsigned long iPoint=0; iPoint= 0){ + SetRoughnessHeight(iPoint, roughness[rankID][zoneID][markerID]); + } + } + } }; diff --git a/Common/include/geometry/primal_grid/CPrimalGrid.hpp b/Common/include/geometry/primal_grid/CPrimalGrid.hpp index 5697b6fbf07..1a1d4fa1482 100644 --- a/Common/include/geometry/primal_grid/CPrimalGrid.hpp +++ b/Common/include/geometry/primal_grid/CPrimalGrid.hpp @@ -33,6 +33,7 @@ #include #include #include +#include #include "../../CConfig.hpp" diff --git a/Common/include/parallelization/mpi_structure.cpp b/Common/include/parallelization/mpi_structure.cpp index 3f4b540211e..451d03178c5 100644 --- a/Common/include/parallelization/mpi_structure.cpp +++ b/Common/include/parallelization/mpi_structure.cpp @@ -123,41 +123,41 @@ void CBaseMPIWrapper::Error(std::string ErrorMsg, std::string FunctionName){ Abort(currentComm, 0); } -void CBaseMPIWrapper::CopyData(const void* sendbuf, void* recvbuf, int size, Datatype datatype) { +void CBaseMPIWrapper::CopyData(const void* sendbuf, void* recvbuf, int size, Datatype datatype, int recvshift, int sendshift) { switch (datatype) { case MPI_DOUBLE: for (int i = 0; i < size; i++) { - static_cast(recvbuf)[i] = static_cast(sendbuf)[i]; + static_cast(recvbuf)[i+recvshift] = static_cast(sendbuf)[i+sendshift]; } break; case MPI_UNSIGNED_LONG: for (int i = 0; i < size; i++) { - static_cast(recvbuf)[i] = static_cast(sendbuf)[i]; + static_cast(recvbuf)[i+recvshift] = static_cast(sendbuf)[i+sendshift]; } break; case MPI_LONG: for (int i = 0; i < size; i++) { - static_cast(recvbuf)[i] = static_cast(sendbuf)[i]; + static_cast(recvbuf)[i+recvshift] = static_cast(sendbuf)[i+sendshift]; } break; case MPI_UNSIGNED_SHORT: for (int i = 0; i < size; i++) { - static_cast(recvbuf)[i] = static_cast(sendbuf)[i]; + static_cast(recvbuf)[i+recvshift] = static_cast(sendbuf)[i+sendshift]; } break; case MPI_CHAR: for (int i = 0; i < size; i++) { - static_cast(recvbuf)[i] = static_cast(sendbuf)[i]; + static_cast(recvbuf)[i+recvshift] = static_cast(sendbuf)[i+sendshift]; } break; case MPI_SHORT: for (int i = 0; i < size; i++) { - static_cast(recvbuf)[i] = static_cast(sendbuf)[i]; + static_cast(recvbuf)[i+recvshift] = static_cast(sendbuf)[i+sendshift]; } break; case MPI_INT: for (int i = 0; i < size; i++) { - static_cast(recvbuf)[i] = static_cast(sendbuf)[i]; + static_cast(recvbuf)[i+recvshift] = static_cast(sendbuf)[i+sendshift]; } break; default: diff --git a/Common/include/parallelization/mpi_structure.hpp b/Common/include/parallelization/mpi_structure.hpp index 3ae5de48abd..a6d2524bb7f 100644 --- a/Common/include/parallelization/mpi_structure.hpp +++ b/Common/include/parallelization/mpi_structure.hpp @@ -503,7 +503,7 @@ class CBaseMPIWrapper { static int Rank, Size; static Comm currentComm; - static void CopyData(const void* sendbuf, void* recvbuf, int size, Datatype datatype); + static void CopyData(const void* sendbuf, void* recvbuf, int size, Datatype datatype, int recvshift=0, int sendshift=0); public: static void Error(std::string ErrorMsg, std::string FunctionName); @@ -570,7 +570,7 @@ class CBaseMPIWrapper { static inline void Allgatherv(const void* sendbuf, int sendcnt, Datatype sendtype, void* recvbuf, const int* recvcnt, const int* displs, Datatype recvtype, Comm comm) { - CopyData(sendbuf, recvbuf, sendcnt, sendtype); + CopyData(sendbuf, recvbuf, sendcnt, sendtype, displs[0]); } static inline void Allgather(const void* sendbuf, int sendcnt, Datatype sendtype, void* recvbuf, int recvcnt, @@ -596,7 +596,7 @@ class CBaseMPIWrapper { static inline void Alltoallv(const void* sendbuf, const int* sendcounts, const int* sdispls, Datatype sendtype, void* recvbuf, const int* recvcounts, const int* recvdispls, Datatype recvtype, Comm comm) { - CopyData(sendbuf, recvbuf, recvcounts[0], recvtype); + CopyData(sendbuf, recvbuf, recvcounts[0], recvtype, recvdispls[0], sdispls[0]); } static inline void Probe(int source, int tag, Comm comm, Status* status) {} diff --git a/Common/include/toolboxes/ndflattener.hpp b/Common/include/toolboxes/ndflattener.hpp new file mode 100644 index 00000000000..8898c65e64a --- /dev/null +++ b/Common/include/toolboxes/ndflattener.hpp @@ -0,0 +1,828 @@ +/*! + * \file ndflattener.hpp + * \brief Flatten pointer-to-pointer-... arrays for MPI communication + * \author M. Aehle + * \version 7.1.1 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2021, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "../containers/C2DContainer.hpp" +#include "../parallelization/mpi_structure.hpp" + +// --- Usage +/*! \page ndflattener_usage NdFlattener usage + * + * # What is an NdFlattener? + * + * NdFlattener is a container to store a deep copy of multidimensional data in a single object that provides a + * generic []...[] interface. The data and their structure are stored in few contiguous arrays, allowing for an + * efficient MPI gathering operation. + * + * # Which problem does it solve? + * In SU2, there is a lot of multidimensional data: data depending on K>1 indices, like a wall property + * depending on the rank, zone index, and marker index. + * + * The data is often stored non-contiguously as a "tree": + * - an array of pointers, each one of which points to + * - (repeat the previous line another (K-2) times) + * - an array of data. + * + * The individual arrays' sizes are usually unrelated to each other. Therefore in addition to this tree of + * pointers-to-...-to-data, usually there is a tree of pointers-to-...-to-size for each lower level. + * Instead of arrays, there might be getter functions for the data and the sizes. + * + * In order to use multidimensional data behind such a non-generic interface as an argument in a function call, + * we usually make the callee aware of the interface and expose all the trees: Either as separate pointers to arrays + * (or functions), which is tedious, or by handing an object in which they naturally live, which exposes much more + * information. + * + * If the callee is an MPI communication operation, this is not possible. Here, a frequent pattern is to copy the data + * into a single contiguous array, which is then communicated alongside with the structure (lengths of the arrays). + * + * The NdFlattener container is a systematic way to store multidimensional data in a single contiguous array, and + * the structure in few additional contiguous arrays. It exposes them through a generic []...[] interface and allows + * for an efficient MPI_Allgatherv-like operation. + * + * To demonstrate the usage of NdFlattener, let us store a property of type su2double that depends on + * the rank, zone index, and marker index. + * + * # Form the local NdFlattener + * by a "recursive function" like this: + * + * auto f_local = + * make_pair( nZone, [=](unsigned long iZone){ + * return make_pair( (Geometry of iZone)->GetnMarker() , [=](unsigned long iMarker){ + * return YOUR_PROPERTY(iZone, iMarker); + * }); + * }); + * NdFlattener<2> nd_local(f_local); + * + * It might be safer to explicitly capture what you need in the lambda expressions. + * + * You can also construct an NdFlattener without any arguments, and use NdFlattener::initialize(f_local). + * + * f_local is + * - a pair of an (exclusive) upper bound for the first index and a function that maps the first index to + * - a pair of an (exclusive) upper bound for the second index and a function that maps the second index to + * - (iterate this if there are more layers) + * - the value corresponding to these indices. + * The template parameter "2" is the number of indices. The data type (default: su2double) and the index type + * (default: unsigned long) are further optional template parameters. + * + * # Form the global NdFlattener + * by "collective communication" like this: + * + * NdFlattener<3> nd_global(Nd_MPI_Environment(), &nd_local); + * + * nd_global's first index is the rank, then the indices of nd_local follow. + * + * The struct Nd_MPI_Environment contains the relevant information about the parallel environment. The default + * values are suitable to communicate su2double data. In order to communicate data of another MPI datatype, + * use e.g. Nd_MPI_Environment(MPI_UNSIGNED_LONG). + * + * You can also construct an NdFlattener without any arguments, and use NdFlattener::initialize(mpi_env, &nd_local). + * + * # Refreshing + * If only the data but not the indices (i.e. the structure of sublists' lengths) have changed, you can refresh + * them with the call + * + * nd_local.refresh(f_local); // , or + * nd_global(Nd_MPI_Environment(), &nd_local); // respectively. + * + * An NdFlattener constructed or initialized from a "recursive function" or "collective communication" must be + * refreshed in the same way. + * + * We also provide a function NdFlattener::initialize_or_refresh, that initializes if not initialized, and refreshes + * otherwise. + * + * # Look-up + * You can access the NdFlattener via a "[]...[]" interface that resembles multidimensional arrays + * or pointer-to-pointer-... arrays. If you provide all of the K indices, this returns a reference + * to the data element. If you provide less than K indices, you obtain an IndexAccumulator object + * to which you can later pass more indices. With respect to the above example: + * + * std::cout << nd_global[rank][zone][marker]; // e.g. 4.0 + * auto nd_g_rank = nd_global[rank]; + * nd_g_rank[zone][marker] *= 2.0; + * std::cout << nd_g_rank[zone][marker]; // e.g. 8.0 + * + * You may use the IndexAccumulator repeatedly as long as the accessed NdFlattener exists. + * As NdFlatteners are always deep copies, the above block would not change nd_local or the non-contiguous + * data from which it was copied. + * + * Each index is checked to lie in the correct range dictated by the NdFlattener and the + * previous indices. You can obtain the minimal overflowing index with `size`: + * + * auto nd_g_rank = nd_global[rank]; + * std::cout << nd_g_rank[zone].size(); + * + * When all indices except the last one are fixed, the corresponding data is stored contiguously and a pointer + * to this 1D array can be retrieved by + * + * nd_global[rank][zone].data() + * + * # Constness + * The reference returned by the last `operator[]`, and the pointer returned by `data()`, are const if + * - the accessed NdFlattener object was const, or + * - one of the intermediate IndexAccumulator objects was qualified as const. + * + * This reflects the semantic meaning of const: + * + * NdFlattener<2> nd2(...); + * + * template set(T& t) { t[0] = 1.0; } + * set( nd2[0] ); // OK + * + * template set_const(T const& t) { t[0] = 1.0; } + * set_const( nd2[0] ); // does not compile + * + * # Unit tests + * The interface described here is tested in UnitTests/Common/toolboxes/ndflattener_tests.cpp, which may also + * serve as an addition example of how to use the NdFlattener. + */ + +// --- Implementation details--- +/*! \page ndflattener_implementationdetails Implementation details of NdFlattener + * If your array has K indices, instantiate this class with template parameter K, + * which is derived recursively from this class with template parameter (K-1). + * In each layer, there is an array: Of type Data for K=1, and of type Index for K>1. + * + * The data array of K=1 contains the values of the array A in lexicographic ordering: + * [0]...[0][0][0], [0]...[0][0][1], ..., [0]...[0][0][something], + * [0]...[0][1][0], [0]...[0][1][1], ..., [0]...[0][1][something], + * ..., + * [0]...[1][0][0], [0]...[1][0][1], ..., [0]...[1][0][something], + * ... ..., + * [1]...[0][0][0], [1]...[0][0][1], ..., [1]...[0][0][something], + * ... ... ... + * Let us call each row in this representation a "last-index list". + * Note that they might have different lengths, so "something" might stand for different + * values here. Last-index lists can also be empty. + * + * The indices array of K=2 contains the indices of the (K=1)-data array at which a new + * last-index list starts. If a last-index list is empty, the are repetitive entries in the + * indices array. + * + * The indices array of K=3 contains the indices of the (K=2)-indices array at which the + * last-but-two index increases by one or drops to zero. If the last-but-two index is increased + * by more than one, there are repetitive entries. + * + * Etc. etc, up to the indices array at layer K. + * + * To form such a structure, we typically iterate twice through the pointer-to-pointer-... + * array: The first time we get to know how much space to reserve in each layer, then we + * allocate it and during the second iteration we copy the data. + */ + +template +class NdFlattener; + +/*! \struct Nd_MPI_Environment + * \brief Contains information for the collective communication of NdFlatteners. + * + * The default arguments in the constructor are chosen in a sensible way: + * - To communicate su2double data, just call Nd_MPI_Environment(). + * - To communicate unsigned long data, call Nd_MPI_Environment(MPI_UNSIGNED_LONG). + */ +struct Nd_MPI_Environment { + using MPI_Allgather_t = decltype(&(SU2_MPI::Allgather)); + using MPI_Allgatherv_t = decltype(&(SU2_MPI::Allgatherv)); + using MPI_Datatype_t = typename SU2_MPI::Datatype; + using MPI_Communicator_t = typename SU2_MPI::Comm; + + const MPI_Datatype_t mpi_data; + const MPI_Datatype_t mpi_index; + MPI_Communicator_t comm; + MPI_Allgather_t MPI_Allgather_fun; + MPI_Allgatherv_t MPI_Allgatherv_fun; + const int rank; + const int size; + + Nd_MPI_Environment(MPI_Datatype_t mpi_data = MPI_DOUBLE, + MPI_Datatype_t mpi_index = MPI_UNSIGNED_LONG, + MPI_Communicator_t comm = SU2_MPI::GetComm(), + MPI_Allgather_t MPI_Allgather_fun = &(SU2_MPI::Allgather), + MPI_Allgatherv_t MPI_Allgatherv_fun = &(SU2_MPI::Allgatherv)) + : mpi_data(mpi_data), + mpi_index(mpi_index), + comm(comm), + MPI_Allgather_fun(MPI_Allgather_fun), + MPI_Allgatherv_fun(MPI_Allgatherv_fun), + rank(SU2_MPI::GetRank()), + size(SU2_MPI::GetSize()) {} +}; + +namespace helpers { +/*! \class IndexAccumulator + * \brief Data structure holding an offset for the NdFlattener, to provide a []...[]-interface. + * \details Derived from IndexAccumulator_Base, specifying the operator[] method: + * - For N==1, the structure has already read all indices but one. So after this method has read the last + * index, return a (possibly const) reference to the data. An additional function data() should be provided. + * - For N>1, more indices have to be read, return an IndexAccumulator. Nd_t_lower is the + * NdFlattener type with one index less. Nd_t_lower is const-qualified if Nd_t is const-qualified, or if the + * index look-up is performed in a const IndexAccumulator. + * \tparam N - Number of missing parameters + * \tparam Nd_t - Type of the accessed NdFlattener, should be NdFlattener + * \tparam Check - if true, raise error if index to operator[] is not in range + */ +/*! \class IndexAccumulator_Base + * \brief Parent class of IndexAccumulator. + * \details IndexAccumulator provides the operator[] method. + */ +template +class IndexAccumulator_Base { + public: + static constexpr size_t N = N_; + using Nd_t = Nd_t_; + using Index_t = typename Nd_t::Index_t; + + /*! \brief Return exclusive upper bound for next index. + */ + Index_t size() const { return size_; } + + protected: + Nd_t& nd; /*!< \brief The accessed NdFlattener. */ + const Index_t offset; /*!< \brief Index in the currently accessed layer. */ + const Index_t size_; /*!< \brief Exclusive upper bound for the next index. */ + + IndexAccumulator_Base(Nd_t& nd, Index_t offset, Index_t size) : nd(nd), offset(offset), size_(size) {} + + FORCEINLINE void CheckBound(Index_t i) const { + assert(i < size()); + if (Check) { + if (i >= size()) SU2_MPI::Error("NdFlattener: Index out of range.", CURRENT_FUNCTION); + } + } +}; + +template +class IndexAccumulator : public IndexAccumulator_Base { + public: + using Base = IndexAccumulator_Base; + static constexpr size_t N = N_; + using Nd_t = Nd_t_; + using Index_t = typename Nd_t::Index_t; + + IndexAccumulator(Nd_t& nd, Index_t offset, Index_t size) : Base(nd, offset, size) {} + + /*! The Base of NdFlattener is NdFlattener, but do also preserve constness. + */ + using Nd_Base_t = su2conditional_t< + std::is_const::value, + const typename Nd_t::Base, + typename Nd_t::Base + >; + /*! Return type of operator[]. */ + using LookupType = IndexAccumulator; + /*! Return type of operator[] const. */ + using LookupType_const = IndexAccumulator; + using Base::nd; + using Base::offset; + using Base::size; + using Base::CheckBound; + + /*! \brief Read one more index, checking whether it is in the range dictated by the NdFlattener and + * previous indices. Non-const version. + * \param[in] i - Index. + */ + LookupType operator[](Index_t i) { + CheckBound(i); + const Index_t new_offset = nd.GetIndices()[offset + i]; + const Index_t new_size = nd.GetIndices()[offset + i + 1] - new_offset; + return LookupType(nd, new_offset, new_size); + } + /*! \brief Read one more index, const version. + * \param[in] i - Index. + */ + LookupType_const operator[](Index_t i) const { + CheckBound(i); + const Index_t new_offset = nd.GetIndices()[offset + i]; + const Index_t new_size = nd.GetIndices()[offset + i + 1] - new_offset; + return LookupType_const(nd, new_offset, new_size); + } +}; + +template +class IndexAccumulator<1, Nd_t_, Check> : public IndexAccumulator_Base<1, Nd_t_, Check> { + public: + using Base = IndexAccumulator_Base<1, Nd_t_, Check>; + static constexpr size_t N = 1; + using Nd_t = Nd_t_; + using Index_t = typename Nd_t::Index_t; + + IndexAccumulator(Nd_t& nd, Index_t offset, Index_t size) : Base(nd, offset, size) {} + + /*! Return type of operator[]. + * \details Data type of NdFlattener, but do also preserve constness. + */ + using LookupType = su2conditional_t< + std::is_const::value, + const typename Nd_t::Data_t, + typename Nd_t::Data_t + >; + using LookupType_const = const typename Nd_t::Data_t; + using Base::nd; + using Base::offset; + using Base::size; + using Base::CheckBound; + + /*! \brief Return (possibly const) reference to the corresponding data element, checking if the index is in its range. + * Non-const version. + * \details The returned reference is const if and only if the type of the accessed NdFlattener is const-qualified. + * Even though the original NdFlattener might be non-const, marking any IndexAccumulator as const during index look-up + * will invoke the const version of operator[], which turns the NdFlattener type to const. + * \param[in] i - Last index. + */ + LookupType& operator[](Index_t i) { + CheckBound(i); + return nd.data()[offset + i]; + } + /*! \brief Return const reference to the corresponding data element, checking if the index is in its range. + * \param[in] i - Last index. + */ + LookupType_const& operator[](Index_t i) const { + CheckBound(i); + return nd.data()[offset + i]; + } + + /*! \brief Return (possibly const) pointer to data, non-const version. + * \details If all indices except the last one are fixed, the corresponding data + * is stored contiguously. Return a pointer to the beginning of the + * block. If this IndexAccumulator was generated from a non-const NdFlattener, the + * pointer is non-const, otherwise it is const. + */ + LookupType* data() { return &(this->operator[](0)); } + /*! \brief Return const pointer to data. + */ + LookupType_const* data() const { return &(this->operator[](0)); } +}; + +} // namespace helpers + +/*! + * \class NdFlattener + * \brief Serialize pointer-to-pointer-... array into one 1D array, keeping track + * of the offsets in few additional 1D arrays. + * + * The pointer-to-pointer-... array can be provided by a nested lambda function + * ('recursive function') or by gathering such arrays from MPI processes ('collective + * communication'). After initializing an NdFlattener with either of these data, + * it can be refreshed in the same way after the the pointer-to-pointer-... array's + * values (but not its structure) have changed. + * + * \tparam K - number of indices + * \tparam Data_t - Type of stored array data + * \tparam Index - Type of index + */ + +template +class NdFlattener : public NdFlattener { + public: + static constexpr size_t K = K_; + using Data_t = Data_t_; + using Index_t = Index_t_; + + using Base = NdFlattener; + using CurrentLayer = NdFlattener; + using LowestLayer = typename Base::LowestLayer; // the K=1 class + + private: + /*! \brief Number of nodes in this layer. + * + * For the layer K=1, nNodes will be the number of data points. + * For a layer K>1, nNodes will be the number of sublists. + */ + Index_t nNodes = 0; + + /*! \brief Iterator used at construction, runs from 0 to (nNodes-1). */ + Index_t iNode = 0; + + /*! \brief Indices in the lower layer's indices or data array */ + std::vector indices; + + /*=== Getters ===*/ + public: + Index_t* GetIndices() { return indices.data(); } + const Index_t* GetIndices() const { return indices.data(); } + + /*=== Outputting ===*/ + + /*! \brief Write in Python-list style. + * + * Like this: [[1, 2], [10, 20, 30]] + */ + friend std::ostream& operator<<(std::ostream& output, NdFlattener const& nd) { + nd.toPythonString_fromto(output, 0, nd.size()); + return output; + } + + protected: + /*! \brief Write to stream in Python-list style, using the data of the + * indices array between 'from' (inclusive) and 'to' (exclusive). + * + * Like this: [[1, 2], [10, 20, 30]] + * \param[in] output - Stream + * \param[in] from - Beginning of the representation in the indices array. + * \param[in] to - Ending of the representation in the indices array. + */ + void toPythonString_fromto(std::ostream& output, Index_t from, Index_t to) const { + output << "["; + for (Index_t i = from; i < to;) { + Base::toPythonString_fromto(output, indices[i], indices[i + 1]); + if (++i < to) output << ", "; + } + output << "]"; + } + + public: + /*! \brief Basic constructor. Afterwards, initialization can be done with initialize_or_refresh. + * + * Called recursively when a derived class (higher K) is constructed. + */ + NdFlattener() {} + + /*! \brief Constructor which calls initialize_or_refresh. + * + */ + template + NdFlattener(ARGS const&... args) { + initialize_or_refresh(args...); + } + + /*! \brief Initialize or refresh the NdFlattener. + * \details Either a 'recursive function' or 'collective communication' + * may be used. When the NdFlattener does not hold data yet, it is + * initialized, meaning that the data are collected and the indices arrays + * are allocated and filled. Otherwise it is refreshed, meaning that the data are + * recollected under the assumption that the indices arrays did not change. + */ + template + void initialize_or_refresh(ARGS const&... args) { + if (initialized()) { + refresh(args...); + } else { + initialize(args...); + } + } + + /*! \brief Initialization status of the NdFlattener. + * \returns true if the NdFlattener has been initialized + */ + bool initialized() { return nNodes > 0; } + + protected: + /*! \brief Allocate the indices array after \a nNodes has been determined. + */ + void allocate() { + indices.resize(nNodes + 1); + indices[0] = 0; + Base::allocate(); + } + + /*! \brief Set \a iNode to 0 in all layers. + */ + void reset_iNode() { + iNode = 0; + Base::reset_iNode(); + } + + /*=== Construct from 'recursive function' ===*/ + public: + /*! \brief Initialize from a 'recursive function'. + * + * The function should return a pair. Its first entry is the number of children. + * Its second entry is a function with the same meaning, recursively + * one layer down. + * \param f - the 'recursive function' + */ + template + void initialize(f_type f) { + count_f(f); + allocate(); + set_f(f, false); + } + + /*! \brief Refresh the data according to the 'recursive function' + * + * The NdFlattener must have been constructed with a 'recursive function'. + * Now refresh the values with another 'recursive function'. The subarray lengths + * resulting from both 'recursive functions' must coincide, as the indices arrays + * are not changed. + * + * \param f - the 'recursive function' + * \tparam f_type - to allow for any type of the 'recursive function' + */ + template + void refresh(f_type f) { + reset_iNode(); + set_f(f, true); + } + + protected: + /*! \brief Determine the space required for reading the 'recursive function'. + * + * \param f - the 'recursive function' + */ + template + void count_f(f_type f) { + Index_t nChild = f.first; + for (Index_t iChild = 0; iChild < nChild; iChild++) { + nNodes++; + Base::count_f(f.second(iChild)); + } + } + + /*! \brief Read the 'recursive function' into the allocated arrays. + * + * \param f - the 'recursive function' + * \param refresh - if true, the object is already initialized and only the data + * in layer 1 have to be overwritten + * \tparam f_type - to allow for any type of the 'recursive function' + */ + template + void set_f(f_type f, bool refresh) { + Index_t nChild = f.first; + for (Index_t iChild = 0; iChild < nChild; iChild++) { + Base::set_f(f.second(iChild), refresh); + if (!refresh) { + indices[iNode + 1] = indices[iNode] + f.second(iChild).first; + } else { + if (indices[iNode + 1] != indices[iNode] + f.second(iChild).first) { + SU2_MPI::Error("NdFlattener: Structure has changed, cannot refresh.", CURRENT_FUNCTION); + } + } + iNode++; + } + } + + /*=== Construct with Allgatherv ===*/ + + public: + /*! \brief Initialize a flattener with K indices by combining distributed flatteners with (K-1) indices each. + * + * The new first index will encode the rank of the process. Data is exchanged in MPI::Allgatherv-style + * collective communication. + * \param[in] mpi_env - The MPI environment used for communication. + * \param[in] local_version - The local NdFlattener structure with (K-1) indices. + */ + template + void initialize(MPI_Environment_type const& mpi_env, Base const& local_version) { + // [k][r] is number of all nodes in layer (k+1), rank r in the new structure + su2matrix Nodes_all(K, mpi_env.size); + // the first index decides on the rank, so there is exactly one node per rank + for (int r = 0; r < mpi_env.size; r++) { + nNodes += Nodes_all[K - 1][r] = 1; + } + // set the lower layers' nNodes and Nodes_all[k] + Base::count_g(mpi_env, Nodes_all, local_version); + + allocate(); + + indices[0] = 0; + for (int r = 0; r < mpi_env.size; r++) { + indices[r + 1] = indices[r] + Nodes_all[K - 2][r]; + } + Base::set_g(mpi_env, Nodes_all, local_version); + } + + /*! \brief Refresh the data by MPI collective communication. + * + * The NdFlattener must have been constructed by MPI collective communication. + * Now refresh the values with another collective communication. The subarray lengths + * resulting from both collective communications must coincide, as the indices arrays + * are not changed. + * \param[in] mpi_env - The MPI environment used for communication. + * \param[in] local_version - The local NdFlattener structure. + */ + template + void refresh(MPI_Environment_type const& mpi_env, Base const& local_version) { + su2matrix Nodes_all_0(1, mpi_env.size); + LowestLayer::count_g(mpi_env, Nodes_all_0, local_version); + LowestLayer::set_g(mpi_env, Nodes_all_0, local_version); + } + + protected: + /*! \brief Count the distributed flatteners' numbers of nodes, and set nNodes. + * + * \param[in] mpi_env - MPI environment for communication + * \param[out] Nodes_all - [k][r] is set to number of nodes in layer (k+1), rank r. + * \param[in] local_version - local instance to be send to the other processes + */ + void count_g(Nd_MPI_Environment const& mpi_env, su2matrix& Nodes_all, CurrentLayer const& local_version) { + nNodes = 0; + // gather numbers of nodes in the current layer from all processes + mpi_env.MPI_Allgather_fun(&(local_version.nNodes), 1, mpi_env.mpi_index, Nodes_all[K - 1], 1, mpi_env.mpi_index, + mpi_env.comm); + for (int r = 0; r < mpi_env.size; r++) { + nNodes += Nodes_all[K - 1][r]; + } + Base::count_g(mpi_env, Nodes_all, local_version); + } + + /*! \brief Gather the distributed flatteners' data and index arrays into the allocated arrays. + * + * \param[in] mpi_env - MPI environment for communication + * \param[in] Nodes_all - [k][r] is the number of nodes in layer (k+1), rank r. + * \param[in] local_version - local instance to be sent to the other processes + */ + void set_g(Nd_MPI_Environment const& mpi_env, su2matrix const& Nodes_all, + CurrentLayer const& local_version) { + std::vector Nodes_all_K_as_int(mpi_env.size); + std::vector Nodes_all_k_cumulated( mpi_env.size + 1); + //< [r] is the number of nodes in the current layer, summed over all processes with rank below r, **plus one**. + // Used as displacements in Allgatherv, as we do not want to transfer the initial zeros, but we want to transfer the + // last element of indices, which is the local nNodes of the layer below. Note that MPI needs indices of type 'int'. + Nodes_all_k_cumulated[0] = 1; + for (int r = 0; r < mpi_env.size; r++) { + Nodes_all_k_cumulated[r + 1] = Nodes_all_k_cumulated[r] + Nodes_all[K - 1][r]; + Nodes_all_K_as_int[r] = Nodes_all[K - 1][r]; + } + mpi_env.MPI_Allgatherv_fun(local_version.indices.data() + 1, Nodes_all[K - 1][mpi_env.rank], mpi_env.mpi_index, + indices.data(), Nodes_all_K_as_int.data(), Nodes_all_k_cumulated.data(), + mpi_env.mpi_index, mpi_env.comm); + // shift indices + for (int r = 1; r < mpi_env.size; r++) { + Index_t first_entry_to_be_shifted = Nodes_all_k_cumulated[r]; + Index_t last_entry_to_be_shifted = Nodes_all_k_cumulated[r + 1] - 1; + Index_t shift = indices[first_entry_to_be_shifted - 1]; + for (Index_t i = first_entry_to_be_shifted; i <= last_entry_to_be_shifted; i++) { + indices[i] += shift; + } + } + + Base::set_g(mpi_env, Nodes_all, local_version); + } + + /*== Data access ==*/ + public: + Index_t size() const { // should not be called by recursion, is incorrect in lower layers! + return nNodes; + } + + /*! \brief Look-up with IndexAccumulator, non-const version. + */ + helpers::IndexAccumulator > operator[](Index_t i0) { + return helpers::IndexAccumulator >(*this, 0, size())[i0]; + } + /*! \brief Look-up with IndexAccumulator, const version. + */ + helpers::IndexAccumulator > operator[](Index_t i0) const { + return helpers::IndexAccumulator >(*this, 0, size())[i0]; + } +}; + +template +class NdFlattener<1, Data_t_, Index_t_> { + public: + static constexpr size_t K = 1; + using Data_t = Data_t_; + using Index_t = Index_t_; + + using CurrentLayer = NdFlattener<1, Data_t, Index_t>; + using LowestLayer = CurrentLayer; + + private: + Index_t nNodes = 0; + Index_t iNode = 0; + std::vector data_; + + /*=== Outputting ===*/ + protected: + void toPythonString_fromto(std::ostream& output, Index_t from, Index_t to) const { + output << "["; + for (Index_t i = from; i < to;) { + output << data_[i]; + if (++i < to) output << ", "; + } + output << "]"; + } + + public: + NdFlattener(void) {} + + template + NdFlattener(ARGS const&... args) { + initialize_or_refresh(args...); + } + + template + void initialize_or_refresh(ARGS const&... args) { + if (initialized()) { + refresh(args...); + } else { + initialize(args...); + } + } + + bool initialized() { return nNodes > 0; } + + // Functionality to initialize/refresh from a recursive function + // could be desirable also for N=1, in order to gather from such + // NdFlatteners an NdFlattener with N=2. + // Gathering an NdFlattener with N=1 is not meaningful however. + + template + void initialize(f_type f) { + count_f(f); + allocate(); + set_f(f, false); + } + + template + void refresh(f_type f) { + reset_iNode(); + set_f(f, true); + } + + protected: + void allocate() { data_.resize(nNodes); } + + void reset_iNode() { iNode = 0; } + + /*=== Construct from 'recursive function' ===*/ + protected: + template + void count_f(f_type f) { + nNodes += f.first; + } + + template + void set_f(f_type f, bool refresh) { + Index_t nChild = f.first; + for (Index_t iChild = 0; iChild < nChild; iChild++) { + data_[iNode] = f.second(iChild); + iNode++; + } + } + + /*=== Construct with Allgatherv ===*/ + protected: + void count_g(Nd_MPI_Environment const& mpi_env, su2matrix& Nodes_all, CurrentLayer const& local_version) { + nNodes = 0; + // gather numbers of nodes in the current layer from all processes + mpi_env.MPI_Allgather_fun(&(local_version.nNodes), 1, mpi_env.mpi_index, Nodes_all[0], 1, mpi_env.mpi_index, + mpi_env.comm); + for (int r = 0; r < mpi_env.size; r++) { + nNodes += Nodes_all[0][r]; + } + } + + void set_g(Nd_MPI_Environment const& mpi_env, su2matrix const& Nodes_all, + CurrentLayer const& local_version) { + std::vector Nodes_all_0_as_int(mpi_env.size); + std::vector Nodes_all_0_cumulated(mpi_env.size + 1); + Nodes_all_0_cumulated[0] = 0; + for (int r = 0; r < mpi_env.size; r++) { + Nodes_all_0_cumulated[r + 1] = Nodes_all_0_cumulated[r] + Nodes_all[0][r]; + Nodes_all_0_as_int[r] = Nodes_all[0][r]; + } + + mpi_env.MPI_Allgatherv_fun(local_version.data_.data(), Nodes_all[0][mpi_env.rank], mpi_env.mpi_data, data_.data(), + Nodes_all_0_as_int.data(), Nodes_all_0_cumulated.data(), mpi_env.mpi_data, mpi_env.comm); + } + + /*== Access to data ==*/ + // Calling the following functions is a bit strange, because if you have only one level, + // you do not really benefit from NdFlattener's functionality. + public: + Index_t size() const { // should not be called by recursion, is incorrect in lower layers! + return nNodes; + } + /*! \brief Data look-up, non-const version. + */ + Data_t& operator[](Index_t i0) { return data_[i0]; } + /*! \brief Data look-up, const version. + */ + const Data_t& operator[](Index_t i0) const { return data_[i0]; } + + Data_t* data() { return data_.data(); } + + const Data_t* data() const { return data_.data(); } +}; diff --git a/Common/src/fem/fem_wall_distance.cpp b/Common/src/fem/fem_wall_distance.cpp index b4d2c70c1a0..d8830797e5f 100644 --- a/Common/src/fem/fem_wall_distance.cpp +++ b/Common/src/fem/fem_wall_distance.cpp @@ -172,7 +172,7 @@ void CMeshFEM_DG::SetWallDistance(su2double val){ } } -void CMeshFEM_DG::SetWallDistance(const CConfig *config, CADTElemClass *WallADT){ +void CMeshFEM_DG::SetWallDistance(CADTElemClass *WallADT, const CConfig* config, unsigned short iZone){ /*--------------------------------------------------------------------------*/ /*--- Step 3: Determine the wall distance of the integration points of ---*/ diff --git a/Common/src/geometry/CGeometry.cpp b/Common/src/geometry/CGeometry.cpp index cf708b8a112..3f1c9066792 100644 --- a/Common/src/geometry/CGeometry.cpp +++ b/Common/src/geometry/CGeometry.cpp @@ -29,6 +29,7 @@ #include "../../include/geometry/elements/CElement.hpp" #include "../../include/parallelization/omp_structure.hpp" #include "../../include/toolboxes/geometry_toolbox.hpp" +#include "../../include/toolboxes/ndflattener.hpp" CGeometry::CGeometry(void) : size(SU2_MPI::GetSize()), @@ -3922,15 +3923,14 @@ void CGeometry::ComputeWallDistance(const CConfig* const* config_container, CGeo /*--- Loop over all zones and compute the ADT based on the viscous walls in that zone ---*/ for (int iZone = 0; iZone < nZone; iZone++){ - CGeometry *geometry = geometry_container[iZone][iInst][MESH_0]; - unique_ptr WallADT = geometry->ComputeViscousWallADT(config_container[iZone]); + unique_ptr WallADT = geometry_container[iZone][iInst][MESH_0]->ComputeViscousWallADT(config_container[iZone]); if (WallADT && !WallADT->IsEmpty()){ allEmpty = false; /*--- Inner loop over all zones to update the wall distances. * It might happen that there is a closer viscous wall in zone iZone for points in zone jZone. ---*/ for (int jZone = 0; jZone < nZone; jZone++){ if (wallDistanceNeeded[jZone]) - geometry_container[jZone][iInst][MESH_0]->SetWallDistance(config_container[jZone], WallADT.get()); + geometry_container[jZone][iInst][MESH_0]->SetWallDistance(WallADT.get(), config_container[jZone], iZone); } } } @@ -3942,5 +3942,28 @@ void CGeometry::ComputeWallDistance(const CConfig* const* config_container, CGeo geometry->SetWallDistance(0.0); } } + /*--- Otherwise, set wall roughnesses. ---*/ + if(!allEmpty){ + /*--- Store all wall roughnesses in a common data structure. ---*/ + // [iZone][iMarker] -> roughness, for this rank + auto roughness_f = + make_pair( nZone, [config_container,geometry_container,iInst](unsigned long iZone){ + const CConfig* config = config_container[iZone]; + const auto nMarker = geometry_container[iZone][iInst][MESH_0]->GetnMarker(); + + return make_pair( nMarker, [config](unsigned long iMarker){ + return config->GetWallRoughnessProperties(config->GetMarker_All_TagBound(iMarker)).second; + }); + }); + NdFlattener<2> roughness_local(roughness_f); + // [rank][iZone][iMarker] -> roughness + NdFlattener<3> roughness_global(Nd_MPI_Environment(), roughness_local); + // use it to update roughnesses + for(int jZone=0; jZoneGetnRoughWall()>0){ + geometry_container[jZone][iInst][MESH_0]->nodes->SetWallRoughness(roughness_global); + } + } + } } } diff --git a/Common/src/geometry/CPhysicalGeometry.cpp b/Common/src/geometry/CPhysicalGeometry.cpp index 75db70bc909..5bebaded5f0 100644 --- a/Common/src/geometry/CPhysicalGeometry.cpp +++ b/Common/src/geometry/CPhysicalGeometry.cpp @@ -11017,16 +11017,13 @@ std::unique_ptr CPhysicalGeometry::ComputeViscousWallADT(const CC } -void CPhysicalGeometry::SetWallDistance(const CConfig *config, CADTElemClass *WallADT) { +void CPhysicalGeometry::SetWallDistance(CADTElemClass* WallADT, const CConfig* config, unsigned short iZone) { /*--------------------------------------------------------------------------*/ /*--- Step 3: Loop over all interior mesh nodes and compute minimum ---*/ /*--- distance to a solid wall element ---*/ /*--------------------------------------------------------------------------*/ - /*--- Store marker list and roughness in a global array. ---*/ - if (config->GetnRoughWall() > 0) SetGlobalMarkerRoughness(config); - SU2_OMP_PARALLEL if (!WallADT->IsEmpty()) { /*--- Solid wall boundary nodes are present. Compute the wall @@ -11041,49 +11038,11 @@ void CPhysicalGeometry::SetWallDistance(const CConfig *config, CADTElemClass *Wa WallADT->DetermineNearestElement(nodes->GetCoord(iPoint), dist, markerID, elemID, rankID); - nodes->SetWall_Distance(iPoint, min(dist,nodes->GetWall_Distance(iPoint))); - - if (config->GetnRoughWall() > 0) { - auto index = GlobalMarkerStorageDispl[rankID] + markerID; - auto localRoughness = GlobalRoughness_Height[index]; - nodes->SetRoughnessHeight(iPoint, localRoughness); + if(dist < nodes->GetWall_Distance(iPoint)){ + nodes->SetWall_Distance(iPoint, dist, rankID, iZone, markerID, elemID); } } END_SU2_OMP_FOR - } END_SU2_OMP_PARALLEL } - -void CPhysicalGeometry::SetGlobalMarkerRoughness(const CConfig* config) { - - const auto nMarker_All = config->GetnMarker_All(); - - vector recvCounts(size); - auto sizeLocal = static_cast(nMarker_All); // number of local markers - - /*--- Communicate size of local marker array and make an array large enough to hold all data. ---*/ - SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1, MPI_INT, SU2_MPI::GetComm()); - - /*--- Set the global array of displacements, needed to access the correct roughness element. ---*/ - GlobalMarkerStorageDispl.resize(size); - GlobalMarkerStorageDispl[0] = 0; - for (int iRank = 1; iRank < size; iRank++) - GlobalMarkerStorageDispl[iRank] = GlobalMarkerStorageDispl[iRank-1] + recvCounts[iRank-1]; - - /*--- Total size ---*/ - const auto sizeGlobal = GlobalMarkerStorageDispl[size-1] + recvCounts[size-1]; - - /*--- Allocate local and global arrays to hold roughness. ---*/ - vector localRough(nMarker_All); // local number of markers - GlobalRoughness_Height.resize(sizeGlobal); // all markers including send recieve - - for (auto iMarker = 0u; iMarker < nMarker_All; iMarker++) { - auto wallprop = config->GetWallRoughnessProperties(config->GetMarker_All_TagBound(iMarker)); - localRough[iMarker] = wallprop.second; - } - - /*--- Finally, gather the roughness of all markers. ---*/ - SU2_MPI::Allgatherv(localRough.data(), sizeLocal, MPI_DOUBLE, GlobalRoughness_Height.data(), - recvCounts.data(), GlobalMarkerStorageDispl.data(), MPI_DOUBLE, SU2_MPI::GetComm()); -} diff --git a/Common/src/geometry/dual_grid/CPoint.cpp b/Common/src/geometry/dual_grid/CPoint.cpp index bc568662729..6fd84b1afde 100644 --- a/Common/src/geometry/dual_grid/CPoint.cpp +++ b/Common/src/geometry/dual_grid/CPoint.cpp @@ -130,7 +130,13 @@ void CPoint::FullAllocation(unsigned short imesh, const CConfig *config) { nNeighbor.resize(npoint) = 0; MaxLength.resize(npoint) = su2double(0.0); Curvature.resize(npoint) = su2double(0.0); + Wall_Distance.resize(npoint) = su2double(0.0); + ClosestWall_Rank.resize(npoint) = -1; + ClosestWall_Zone.resize(npoint) = numeric_limits::max(); + ClosestWall_Marker = ClosestWall_Zone; + ClosestWall_Elem.resize(npoint) = numeric_limits::max(); + RoughnessHeight.resize(npoint) = su2double(0.0); SharpEdge_Distance.resize(npoint) = su2double(0.0); @@ -209,3 +215,4 @@ void CPoint::SetCoord_Old() { } void CPoint::SetCoord_SumZero() { parallelSet(Coord_Sum.size(), 0.0, Coord_Sum.data()); } + diff --git a/UnitTests/Common/toolboxes/ndflattener_tests.cpp b/UnitTests/Common/toolboxes/ndflattener_tests.cpp new file mode 100644 index 00000000000..9ec4440e8e6 --- /dev/null +++ b/UnitTests/Common/toolboxes/ndflattener_tests.cpp @@ -0,0 +1,140 @@ +/*! + * \file ndflattener_tests.cpp + * \brief Unit tests for NdFlattener template classes. + * \author M. Aehle + * \version 7.1.1 "Blackbird" + * + * SU2 Project Website: https://su2code.github.io + * + * The SU2 Project is maintained by the SU2 Foundation + * (http://su2foundation.org) + * + * Copyright 2012-2021, SU2 Contributors (cf. AUTHORS.md) + * + * SU2 is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * SU2 is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with SU2. If not, see . + */ + +#include "catch.hpp" +#include "../../Common/include/toolboxes/ndflattener.hpp" + +TEST_CASE("NdFlattener Test", "[NdFlattener]"){ + + int rank; SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank); + int size; SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size); + + /*-- Provide non-flat array --*/ + su2double** A = new su2double*[2]; + A[0] = new su2double[2]; A[0][0] = 0.0; A[0][1] = 1.0; + A[1] = new su2double[3+rank]; + for(int i=0; i<3+rank; i++) + A[1][i] = 2.0 + rank + i; + + /*-- Accessor --*/ + auto f = std::make_pair( (size_t)2, [rank,A](int i) { + return std::make_pair( (size_t)(i==0?2:(3+rank)), [rank,A,i](int j){ + return A[i][j]; + }); + }); + + /*-- Read into flattening structure --*/ + NdFlattener<2> nd2; + nd2.initialize_or_refresh(f); + + /*-- Modify A -> this should not alter nd2 at this point --*/ + A[0][0] = 0.5; + + /*-- Check structure --*/ + REQUIRE( nd2.size() == 2 ); + REQUIRE( nd2[0][0] == 0.0 ); + REQUIRE( nd2[0][1] == 1.0 ); + REQUIRE( nd2[1].size() == 3 + rank ); + for(int i=0; i<3+rank; i++){ + REQUIRE( nd2[1][i] == 2.0 + rank + i ); + } + + /*-- Modify structure. --*/ + nd2[0][0] = 0.7; + nd2[0].data()[1] = 1.7; + + /*-- gather flattening structures of all processes --*/ + NdFlattener<3> nd3(Nd_MPI_Environment(), nd2); + + /*-- Check gathered structure, non-const look-up. --*/ + REQUIRE( nd3.size() == size ); + for(int r=0; r& nd3_const = nd3; + REQUIRE( nd3_const.size() == size ); + for(int r=0; r a1( + std::make_pair( (size_t)(3+rank), [rank,A](int i) { + return (unsigned long)(2+rank+i); + }) + ); + const NdFlattener<1, unsigned long>& a1_const = a1; + REQUIRE( a1.size() == 3 + rank ); + for(int i=0; i<3+rank; i++){ + REQUIRE( a1[i] == 2 + rank + i ); + REQUIRE( a1_const[i] == 2 + rank + i ); + } + a1[0] = 1; + REQUIRE( a1_const.data()[0] == 1 ); + a1.data()[0] = 2 + rank; + REQUIRE( a1_const[0] == 2 + rank ); + const NdFlattener<2, unsigned long> a2_const(Nd_MPI_Environment(MPI_UNSIGNED_LONG), a1); + REQUIRE( a2_const.size() == size ); + for(int r=0; r