From d1a716e371d7cc8a5f23a0f645edd57eb9383e66 Mon Sep 17 00:00:00 2001 From: Matteo Cicuttin <datafl4sh@toxicnet.eu> Date: Wed, 18 Aug 2021 19:02:24 +0200 Subject: [PATCH] reference_element is MPI-ready. --- include/entity.h | 9 +++ include/gmsh_io.h | 85 +++----------------- include/gmsh_mpi.h | 156 +++++++++++++++++++++++++++++++++--- include/physical_element.h | 2 - include/point.hpp | 12 +-- include/reference_element.h | 9 +++ src/entity.cpp | 13 +++ src/gmsh_io.cpp | 52 ++++++++++-- src/gmsh_mpi.cpp | 5 +- src/reference_element.cpp | 35 ++++++++ 10 files changed, 274 insertions(+), 104 deletions(-) diff --git a/include/entity.h b/include/entity.h index 7dfabde..17aaa5d 100644 --- a/include/entity.h +++ b/include/entity.h @@ -2,6 +2,10 @@ #include <iostream> +#ifdef USE_MPI +#include "gmsh_mpi.h" +#endif /* USE_MPI */ + #include "reference_element.h" #include "physical_element.h" #include "connectivity.h" @@ -112,6 +116,11 @@ public: std::vector<physical_element>::const_iterator begin() const { return physical_cells.begin(); } std::vector<physical_element>::const_iterator end() const { return physical_cells.end(); } +#ifdef USE_MPI + void mpi_send(int dst, MPI_Comm comm); + void mpi_recv(int src, MPI_Comm comm); +#endif /* USE_MPI */ + }; std::pair<std::vector<point_3d>, std::vector<quadrature_point_3d>> diff --git a/include/gmsh_io.h b/include/gmsh_io.h index 9ab503d..885dcf0 100644 --- a/include/gmsh_io.h +++ b/include/gmsh_io.h @@ -6,7 +6,7 @@ #include <type_traits> #ifdef USE_MPI -#include <mpi/mpi.h> +#include "gmsh_mpi.h" #endif /* USE_MPI */ #include "entity.h" @@ -62,83 +62,12 @@ struct boundary_descriptor {} }; -#if ((!defined(NDEBUG)) && defined(USE_MPI)) - #define ASSERT_MPI_RANK_0 { \ - int __rank; \ - MPI_Comm_rank(MPI_COMM_WORLD, &__rank); \ - assert(__rank == 0); \ - } -#else - #define ASSERT_MPI_RANK_0 (void) 0 -#endif + #define IMPLIES(a, b) ((not(a)) or (b)) #define IFF(a,b) (IMPLIES(a,b) and IMPLIES(b,a)) -template<typename T> -void -priv_MPI_Bcast(std::vector<T>& vec, int root, MPI_Comm comm) -{ - static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable"); - - int rank; - MPI_Comm_rank(comm, &rank); - if (rank == root) - { - size_t vsize = vec.size(); - MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); - MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm); - } - else - { - size_t vsize; - MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); - vec.resize(vsize); - MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm); - } -} - -template<typename T1, typename T2> -void -priv_MPI_Bcast(std::map<T1, std::vector<T2>>& map, int root, MPI_Comm comm) -{ - static_assert(std::is_trivially_copyable<T1>::value, "Type must be trivially copyable"); - static_assert(std::is_trivially_copyable<T2>::value, "Type must be trivially copyable"); - - int rank; - MPI_Comm_rank(comm, &rank); - - if (rank == root) - { - size_t msize = map.size(); - MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); - for (auto& [l, rv] : map) - { - auto ll = l; - MPI_Bcast(&ll, sizeof(T1), MPI_PACKED, root, comm); - size_t vsize = rv.size(); - MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); - MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm); - } - } - else - { - size_t msize; - MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); - for (size_t i = 0; i < msize; i++) - { - T1 l; - MPI_Bcast(&l, sizeof(T1), MPI_PACKED, root, comm); - size_t vsize; - MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); - std::vector<T2> rv; - rv.resize(vsize); - MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm); - map[l] = std::move(rv); - } - } -} class model { @@ -146,8 +75,13 @@ class model int approximation_order; int num_partitions; - std::map<int, std::vector<element_key>> boundary_map; - std::map<int, std::vector<int>> comm_map; + /* Map from boundary tag to all the faces of the entity with that tag */ + std::map<int, std::vector<element_key>> boundary_map; + + /* Map from boundary tag to all the partitions it belongs to */ + std::map<int, std::vector<int>> comm_map; + + /* Map from the partition number to the entities belonging to that partition */ std::map<int, std::vector<int>> partition_map; std::vector<entity> entities; @@ -168,6 +102,7 @@ class model #endif void make_boundary_to_faces_map(void); + void make_partition_to_entities_map(void); public: model(); diff --git a/include/gmsh_mpi.h b/include/gmsh_mpi.h index c428de0..28361d6 100644 --- a/include/gmsh_mpi.h +++ b/include/gmsh_mpi.h @@ -1,20 +1,152 @@ #pragma once +#include <vector> +#include <map> +#include <array> #include <mpi/mpi.h> -#include "gmsh.h" -namespace g = gmsh; -namespace go = gmsh::option; -namespace gm = gmsh::model; -namespace gmm = gmsh::model::mesh; -namespace gmo = gmsh::model::occ; -namespace gmg = gmsh::model::geo; -namespace gv = gmsh::view; +#include "eigen.h" -using gvp_t = gmsh::vectorpair; +#if ((!defined(NDEBUG)) && defined(USE_MPI)) + #define ASSERT_MPI_RANK_0 { \ + int __rank; \ + MPI_Comm_rank(MPI_COMM_WORLD, &__rank); \ + assert(__rank == 0); \ + } +#else + #define ASSERT_MPI_RANK_0 (void) 0 +#endif -/* Macros used to annotate GMSH integer inputs */ -#define DIMENSION(x) x +template<typename T> +void +priv_MPI_Send(Eigen::Matrix<T, Eigen::Dynamic, 1>& vec, int dst, MPI_Comm comm) +{ + Eigen::Index vsize = vec.size(); + MPI_Send(&vsize, sizeof(Eigen::Index), MPI_PACKED, dst, 0, comm); + MPI_Send(vec.data(), vec.size()*sizeof(T), MPI_PACKED, dst, 0, comm); +} -void MPIGMSH_getEntities(gvp_t&, int, MPI_Comm); +template<typename T> +void +priv_MPI_Recv(Eigen::Matrix<T, Eigen::Dynamic, 1>& vec, int src, MPI_Comm comm) +{ + MPI_Status status; + Eigen::Index vsize; + MPI_Recv(&vsize, sizeof(Eigen::Index), MPI_PACKED, src, 0, comm, &status); + vec.resize(vsize); + MPI_Recv(vec.data(), vec.size()*sizeof(T), MPI_PACKED, src, 0, comm, &status); +} + +template<typename T> +void +priv_MPI_Send(Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>& mat, + int dst, MPI_Comm comm) +{ + std::array<Eigen::Index, 2> msizes; + msizes[0] = mat.rows(); + msizes[1] = mat.cols(); + MPI_Send(&msizes, sizeof(msizes), MPI_PACKED, dst, 0, comm); + MPI_Send(mat.data(), mat.rows()*mat.cols()*sizeof(T), MPI_PACKED, dst, 0, comm); +} + +template<typename T> +void +priv_MPI_Recv(Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>& mat, + int src, MPI_Comm comm) +{ + MPI_Status status; + std::array<Eigen::Index, 2> msizes; + MPI_Recv(&msizes, sizeof(msizes), MPI_PACKED, src, 0, comm, &status); + mat.resize(msizes[0], msizes[1]); + MPI_Recv(mat.data(), mat.rows()*mat.cols()*sizeof(T), MPI_PACKED, src, 0, comm, &status); +} + +template<typename T> +void +priv_MPI_Send(std::vector<T>& vec, int dst, MPI_Comm comm) +{ + static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable"); + size_t vsize = vec.size(); + MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm); + MPI_Send(vec.data(), vec.size()*sizeof(T), MPI_PACKED, dst, 0, comm); +} + +template<typename T> +void +priv_MPI_Recv(std::vector<T>& vec, int src, MPI_Comm comm) +{ + static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable"); + MPI_Status status; + size_t vsize; + MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status); + vec.resize(vsize); + MPI_Recv(vec.data(), vec.size()*sizeof(T), MPI_PACKED, src, 0, comm, &status); +} + + + +template<typename T> +void +priv_MPI_Bcast(std::vector<T>& vec, int root, MPI_Comm comm) +{ + static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable"); + + int rank; + MPI_Comm_rank(comm, &rank); + + if (rank == root) + { + size_t vsize = vec.size(); + MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); + MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm); + } + else + { + size_t vsize; + MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); + vec.resize(vsize); + MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm); + } +} + +template<typename T1, typename T2> +void +priv_MPI_Bcast(std::map<T1, std::vector<T2>>& map, int root, MPI_Comm comm) +{ + static_assert(std::is_trivially_copyable<T1>::value, "Type must be trivially copyable"); + static_assert(std::is_trivially_copyable<T2>::value, "Type must be trivially copyable"); + + int rank; + MPI_Comm_rank(comm, &rank); + + if (rank == root) + { + size_t msize = map.size(); + MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); + for (auto& [l, rv] : map) + { + auto ll = l; + MPI_Bcast(&ll, sizeof(T1), MPI_PACKED, root, comm); + size_t vsize = rv.size(); + MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); + MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm); + } + } + else + { + size_t msize; + MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); + for (size_t i = 0; i < msize; i++) + { + T1 l; + MPI_Bcast(&l, sizeof(T1), MPI_PACKED, root, comm); + size_t vsize; + MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm); + std::vector<T2> rv; + rv.resize(vsize); + MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm); + map[l] = std::move(rv); + } + } +} diff --git a/include/physical_element.h b/include/physical_element.h index ebd07e1..b8dad8c 100644 --- a/include/physical_element.h +++ b/include/physical_element.h @@ -111,8 +111,6 @@ public: class element_key { - /* Objects of this class get MPI_Bcast'ed as MPI_PACKED, - * beware of data types you use below. */ int m_dim; int m_elemType; std::array<size_t, 8> m_key_data{}; diff --git a/include/point.hpp b/include/point.hpp index ed47586..0f8a0c3 100644 --- a/include/point.hpp +++ b/include/point.hpp @@ -22,7 +22,7 @@ #include <iostream> #include <array> #include <cmath> - +#include <type_traits> template<typename T, size_t DIM> class point { @@ -38,15 +38,9 @@ public: m_coords[i] = T(0); } - point(const point& other) - : m_coords(other.m_coords) - {} - point operator=(const point& other) - { - m_coords = other.m_coords; - return *this; - } + point(const point&) = default; + point& operator=(const point&) = default; template<typename U = T> point(const typename std::enable_if<DIM == 1, U>::type& x) diff --git a/include/reference_element.h b/include/reference_element.h index 19961b4..ab261fc 100644 --- a/include/reference_element.h +++ b/include/reference_element.h @@ -1,5 +1,9 @@ #pragma once +#ifdef USE_MPI +#include "gmsh_mpi.h" +#endif /* USE_MPI */ + #include "eigen.h" #include "types.h" @@ -35,6 +39,11 @@ public: matxd mass_matrix(size_t iQp) const; matxd mass_matrices(void) const; +#ifdef USE_MPI + void mpi_send(int dst, MPI_Comm comm); + void mpi_recv(int src, MPI_Comm comm); +#endif /* USE_MPI */ + /* This class is not intended to be constructed * by the user but by the appropriate factory */ friend class reference_elements_factory; diff --git a/src/entity.cpp b/src/entity.cpp index 4eb1f23..39cb7bf 100644 --- a/src/entity.cpp +++ b/src/entity.cpp @@ -879,7 +879,20 @@ entity::populate_entity_data(entity_data_cpu& ed, const model& mod) const } } +#ifdef USE_MPI +void +entity::mpi_send(int dst, MPI_Comm comm) +{ + for (auto& re : reference_cells) + re.mpi_send(dst, comm); + for (auto& rf : reference_faces) + rf.mpi_send(dst, comm); + + priv_MPI_Send(faceTags, dst, comm); + priv_MPI_Send(faceNodesTags, dst, comm); +} +#endif /* USE_MPI */ diff --git a/src/gmsh_io.cpp b/src/gmsh_io.cpp index dca52a1..faf396d 100644 --- a/src/gmsh_io.cpp +++ b/src/gmsh_io.cpp @@ -102,6 +102,38 @@ model::make_boundary_to_faces_map(void) assert( IFF(comm_map.size() == 0, num_partitions == 1) ); } +void +model::make_partition_to_entities_map() +{ + ASSERT_MPI_RANK_0; + + gvp_t gmsh_entities; + + gm::getEntities(gmsh_entities, DIMENSION(3)); + for (auto [dim, tag] : gmsh_entities) + { + if (num_partitions > 1) + { + /* If the model is partitioned and entity has no parents, + * it means that there are no elements in the entity on which + * we will compute. Skip it. */ + int pdim, ptag; + gmsh::model::getParent(dim, tag, pdim, ptag); + + if (ptag == -1) + continue; + + std::vector<int> parts; + assert(parts.size() == 1); + gm::getPartitions(dim, tag, parts); + partition_map[ parts[0] ].push_back(tag); + } + } + + for (auto& [pn, tags] : partition_map) + std::sort(tags.begin(), tags.end()); /* For lookups */ +} + void model::update_connectivity(const entity& e, size_t entity_index) { @@ -259,8 +291,8 @@ model::populate_from_gmsh_rank0() ASSERT_MPI_RANK_0; gmm::setOrder( geometric_order ); - make_boundary_to_faces_map(); + make_partition_to_entities_map(); #ifdef USE_MPI /* At this point, the following members are populated: @@ -278,8 +310,13 @@ model::populate_from_gmsh_rank0() priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD); priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD); - for (auto& [tag, vec] : comm_map) - std::cout << tag << ": " << vec.size() << " " << vec[0] << std::endl; + for (auto& [tag, vec] : partition_map) + { + std::cout << tag << ": " << vec.size() << " -> "; + for (auto& v : vec) + std::cout << v << " "; + std::cout << std::endl; + } #endif /* USE_MPI */ @@ -300,8 +337,13 @@ model::populate_from_gmsh_rankN() priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD); priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD); - for (auto& [tag, vec] : comm_map) - std::cout << tag << ": " << vec.size() << " " << vec[0] << std::endl; + for (auto& [tag, vec] : partition_map) + { + std::cout << tag << ": " << vec.size() << " -> "; + for (auto& v : vec) + std::cout << v << " "; + std::cout << std::endl; + } } #endif /* USE_MPI */ diff --git a/src/gmsh_mpi.cpp b/src/gmsh_mpi.cpp index f644e3a..07f9c3c 100644 --- a/src/gmsh_mpi.cpp +++ b/src/gmsh_mpi.cpp @@ -1,5 +1,6 @@ #include "gmsh_mpi.h" +#if 0 template<typename T1, typename T2> void zip(const std::vector<T1>& firsts, const std::vector<T2>& seconds, std::vector<std::pair<T1, T2>>& pairs) @@ -89,4 +90,6 @@ MPIGMSH_getElementTypes(std::vector<int>& elemTypes, int dim, int tag, MPI_Comm elemTypes.resize(num_eT); MPI_Bcast(elemTypes.data(), num_eT, MPI_INT, 0, comm); } -} \ No newline at end of file +} + +#endif \ No newline at end of file diff --git a/src/reference_element.cpp b/src/reference_element.cpp index 7d6ad5c..157babf 100644 --- a/src/reference_element.cpp +++ b/src/reference_element.cpp @@ -80,7 +80,42 @@ reference_element::integration_points(void) const return m_quadpoints; } +#ifdef USE_MPI +void +reference_element::mpi_send(int dst, MPI_Comm comm) +{ + MPI_Send(&m_approx_order, 1, MPI_INT, dst, 0, comm); + MPI_Send(&m_geometric_order, 1, MPI_INT, dst, 0, comm); + MPI_Send(&m_dimension, 1, MPI_INT, dst, 0, comm); + MPI_Send(&m_orientation, 1, MPI_INT, dst, 0, comm); + MPI_Send(&m_gmsh_elem_type, 1, MPI_INT, dst, 0, comm); + MPI_Send(&m_parent_entity_tag, 1, MPI_INT, dst, 0, comm); + MPI_Send(&m_num_bf, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm); + priv_MPI_Send(m_basis_functions, dst, comm); + priv_MPI_Send(m_basis_gradients, dst, comm); + priv_MPI_Send(m_mass_matrix, dst, comm); + priv_MPI_Send(m_mass_matrices, dst, comm); + priv_MPI_Send(m_quadpoints, dst, comm); +} +void +reference_element::mpi_recv(int src, MPI_Comm comm) +{ + MPI_Status status; + MPI_Recv(&m_approx_order, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&m_geometric_order, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&m_dimension, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&m_orientation, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&m_gmsh_elem_type, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&m_parent_entity_tag, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&m_num_bf, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status); + priv_MPI_Recv(m_basis_functions, src, comm); + priv_MPI_Recv(m_basis_gradients, src, comm); + priv_MPI_Recv(m_mass_matrix, src, comm); + priv_MPI_Recv(m_mass_matrices, src, comm); + priv_MPI_Recv(m_quadpoints, src, comm); +} +#endif /* USE_MPI */ -- GitLab