Skip to content
Snippets Groups Projects
Commit d1a716e3 authored by Matteo Cicuttin's avatar Matteo Cicuttin
Browse files

reference_element is MPI-ready.

parent f2f6d748
Branches
Tags
No related merge requests found
...@@ -2,6 +2,10 @@ ...@@ -2,6 +2,10 @@
#include <iostream> #include <iostream>
#ifdef USE_MPI
#include "gmsh_mpi.h"
#endif /* USE_MPI */
#include "reference_element.h" #include "reference_element.h"
#include "physical_element.h" #include "physical_element.h"
#include "connectivity.h" #include "connectivity.h"
...@@ -112,6 +116,11 @@ public: ...@@ -112,6 +116,11 @@ public:
std::vector<physical_element>::const_iterator begin() const { return physical_cells.begin(); } std::vector<physical_element>::const_iterator begin() const { return physical_cells.begin(); }
std::vector<physical_element>::const_iterator end() const { return physical_cells.end(); } std::vector<physical_element>::const_iterator end() const { return physical_cells.end(); }
#ifdef USE_MPI
void mpi_send(int dst, MPI_Comm comm);
void mpi_recv(int src, MPI_Comm comm);
#endif /* USE_MPI */
}; };
std::pair<std::vector<point_3d>, std::vector<quadrature_point_3d>> std::pair<std::vector<point_3d>, std::vector<quadrature_point_3d>>
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <type_traits> #include <type_traits>
#ifdef USE_MPI #ifdef USE_MPI
#include <mpi/mpi.h> #include "gmsh_mpi.h"
#endif /* USE_MPI */ #endif /* USE_MPI */
#include "entity.h" #include "entity.h"
...@@ -62,83 +62,12 @@ struct boundary_descriptor ...@@ -62,83 +62,12 @@ struct boundary_descriptor
{} {}
}; };
#if ((!defined(NDEBUG)) && defined(USE_MPI))
#define ASSERT_MPI_RANK_0 { \
int __rank; \
MPI_Comm_rank(MPI_COMM_WORLD, &__rank); \
assert(__rank == 0); \
}
#else
#define ASSERT_MPI_RANK_0 (void) 0
#endif
#define IMPLIES(a, b) ((not(a)) or (b)) #define IMPLIES(a, b) ((not(a)) or (b))
#define IFF(a,b) (IMPLIES(a,b) and IMPLIES(b,a)) #define IFF(a,b) (IMPLIES(a,b) and IMPLIES(b,a))
template<typename T>
void
priv_MPI_Bcast(std::vector<T>& vec, int root, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable");
int rank;
MPI_Comm_rank(comm, &rank);
if (rank == root)
{
size_t vsize = vec.size();
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm);
}
else
{
size_t vsize;
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
vec.resize(vsize);
MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm);
}
}
template<typename T1, typename T2>
void
priv_MPI_Bcast(std::map<T1, std::vector<T2>>& map, int root, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T1>::value, "Type must be trivially copyable");
static_assert(std::is_trivially_copyable<T2>::value, "Type must be trivially copyable");
int rank;
MPI_Comm_rank(comm, &rank);
if (rank == root)
{
size_t msize = map.size();
MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
for (auto& [l, rv] : map)
{
auto ll = l;
MPI_Bcast(&ll, sizeof(T1), MPI_PACKED, root, comm);
size_t vsize = rv.size();
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm);
}
}
else
{
size_t msize;
MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
for (size_t i = 0; i < msize; i++)
{
T1 l;
MPI_Bcast(&l, sizeof(T1), MPI_PACKED, root, comm);
size_t vsize;
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
std::vector<T2> rv;
rv.resize(vsize);
MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm);
map[l] = std::move(rv);
}
}
}
class model class model
{ {
...@@ -146,8 +75,13 @@ class model ...@@ -146,8 +75,13 @@ class model
int approximation_order; int approximation_order;
int num_partitions; int num_partitions;
std::map<int, std::vector<element_key>> boundary_map; /* Map from boundary tag to all the faces of the entity with that tag */
std::map<int, std::vector<int>> comm_map; std::map<int, std::vector<element_key>> boundary_map;
/* Map from boundary tag to all the partitions it belongs to */
std::map<int, std::vector<int>> comm_map;
/* Map from the partition number to the entities belonging to that partition */
std::map<int, std::vector<int>> partition_map; std::map<int, std::vector<int>> partition_map;
std::vector<entity> entities; std::vector<entity> entities;
...@@ -168,6 +102,7 @@ class model ...@@ -168,6 +102,7 @@ class model
#endif #endif
void make_boundary_to_faces_map(void); void make_boundary_to_faces_map(void);
void make_partition_to_entities_map(void);
public: public:
model(); model();
......
#pragma once #pragma once
#include <vector>
#include <map>
#include <array>
#include <mpi/mpi.h> #include <mpi/mpi.h>
#include "gmsh.h"
namespace g = gmsh; #include "eigen.h"
namespace go = gmsh::option;
namespace gm = gmsh::model;
namespace gmm = gmsh::model::mesh;
namespace gmo = gmsh::model::occ;
namespace gmg = gmsh::model::geo;
namespace gv = gmsh::view;
using gvp_t = gmsh::vectorpair; #if ((!defined(NDEBUG)) && defined(USE_MPI))
#define ASSERT_MPI_RANK_0 { \
int __rank; \
MPI_Comm_rank(MPI_COMM_WORLD, &__rank); \
assert(__rank == 0); \
}
#else
#define ASSERT_MPI_RANK_0 (void) 0
#endif
/* Macros used to annotate GMSH integer inputs */ template<typename T>
#define DIMENSION(x) x void
priv_MPI_Send(Eigen::Matrix<T, Eigen::Dynamic, 1>& vec, int dst, MPI_Comm comm)
{
Eigen::Index vsize = vec.size();
MPI_Send(&vsize, sizeof(Eigen::Index), MPI_PACKED, dst, 0, comm);
MPI_Send(vec.data(), vec.size()*sizeof(T), MPI_PACKED, dst, 0, comm);
}
void MPIGMSH_getEntities(gvp_t&, int, MPI_Comm); template<typename T>
void
priv_MPI_Recv(Eigen::Matrix<T, Eigen::Dynamic, 1>& vec, int src, MPI_Comm comm)
{
MPI_Status status;
Eigen::Index vsize;
MPI_Recv(&vsize, sizeof(Eigen::Index), MPI_PACKED, src, 0, comm, &status);
vec.resize(vsize);
MPI_Recv(vec.data(), vec.size()*sizeof(T), MPI_PACKED, src, 0, comm, &status);
}
template<typename T>
void
priv_MPI_Send(Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>& mat,
int dst, MPI_Comm comm)
{
std::array<Eigen::Index, 2> msizes;
msizes[0] = mat.rows();
msizes[1] = mat.cols();
MPI_Send(&msizes, sizeof(msizes), MPI_PACKED, dst, 0, comm);
MPI_Send(mat.data(), mat.rows()*mat.cols()*sizeof(T), MPI_PACKED, dst, 0, comm);
}
template<typename T>
void
priv_MPI_Recv(Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>& mat,
int src, MPI_Comm comm)
{
MPI_Status status;
std::array<Eigen::Index, 2> msizes;
MPI_Recv(&msizes, sizeof(msizes), MPI_PACKED, src, 0, comm, &status);
mat.resize(msizes[0], msizes[1]);
MPI_Recv(mat.data(), mat.rows()*mat.cols()*sizeof(T), MPI_PACKED, src, 0, comm, &status);
}
template<typename T>
void
priv_MPI_Send(std::vector<T>& vec, int dst, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable");
size_t vsize = vec.size();
MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm);
MPI_Send(vec.data(), vec.size()*sizeof(T), MPI_PACKED, dst, 0, comm);
}
template<typename T>
void
priv_MPI_Recv(std::vector<T>& vec, int src, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable");
MPI_Status status;
size_t vsize;
MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status);
vec.resize(vsize);
MPI_Recv(vec.data(), vec.size()*sizeof(T), MPI_PACKED, src, 0, comm, &status);
}
template<typename T>
void
priv_MPI_Bcast(std::vector<T>& vec, int root, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable");
int rank;
MPI_Comm_rank(comm, &rank);
if (rank == root)
{
size_t vsize = vec.size();
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm);
}
else
{
size_t vsize;
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
vec.resize(vsize);
MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm);
}
}
template<typename T1, typename T2>
void
priv_MPI_Bcast(std::map<T1, std::vector<T2>>& map, int root, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T1>::value, "Type must be trivially copyable");
static_assert(std::is_trivially_copyable<T2>::value, "Type must be trivially copyable");
int rank;
MPI_Comm_rank(comm, &rank);
if (rank == root)
{
size_t msize = map.size();
MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
for (auto& [l, rv] : map)
{
auto ll = l;
MPI_Bcast(&ll, sizeof(T1), MPI_PACKED, root, comm);
size_t vsize = rv.size();
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm);
}
}
else
{
size_t msize;
MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
for (size_t i = 0; i < msize; i++)
{
T1 l;
MPI_Bcast(&l, sizeof(T1), MPI_PACKED, root, comm);
size_t vsize;
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
std::vector<T2> rv;
rv.resize(vsize);
MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm);
map[l] = std::move(rv);
}
}
}
...@@ -111,8 +111,6 @@ public: ...@@ -111,8 +111,6 @@ public:
class element_key class element_key
{ {
/* Objects of this class get MPI_Bcast'ed as MPI_PACKED,
* beware of data types you use below. */
int m_dim; int m_dim;
int m_elemType; int m_elemType;
std::array<size_t, 8> m_key_data{}; std::array<size_t, 8> m_key_data{};
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <iostream> #include <iostream>
#include <array> #include <array>
#include <cmath> #include <cmath>
#include <type_traits>
template<typename T, size_t DIM> template<typename T, size_t DIM>
class point class point
{ {
...@@ -38,15 +38,9 @@ public: ...@@ -38,15 +38,9 @@ public:
m_coords[i] = T(0); m_coords[i] = T(0);
} }
point(const point& other)
: m_coords(other.m_coords)
{}
point operator=(const point& other) point(const point&) = default;
{ point& operator=(const point&) = default;
m_coords = other.m_coords;
return *this;
}
template<typename U = T> template<typename U = T>
point(const typename std::enable_if<DIM == 1, U>::type& x) point(const typename std::enable_if<DIM == 1, U>::type& x)
......
#pragma once #pragma once
#ifdef USE_MPI
#include "gmsh_mpi.h"
#endif /* USE_MPI */
#include "eigen.h" #include "eigen.h"
#include "types.h" #include "types.h"
...@@ -35,6 +39,11 @@ public: ...@@ -35,6 +39,11 @@ public:
matxd mass_matrix(size_t iQp) const; matxd mass_matrix(size_t iQp) const;
matxd mass_matrices(void) const; matxd mass_matrices(void) const;
#ifdef USE_MPI
void mpi_send(int dst, MPI_Comm comm);
void mpi_recv(int src, MPI_Comm comm);
#endif /* USE_MPI */
/* This class is not intended to be constructed /* This class is not intended to be constructed
* by the user but by the appropriate factory */ * by the user but by the appropriate factory */
friend class reference_elements_factory; friend class reference_elements_factory;
......
...@@ -879,7 +879,20 @@ entity::populate_entity_data(entity_data_cpu& ed, const model& mod) const ...@@ -879,7 +879,20 @@ entity::populate_entity_data(entity_data_cpu& ed, const model& mod) const
} }
} }
#ifdef USE_MPI
void
entity::mpi_send(int dst, MPI_Comm comm)
{
for (auto& re : reference_cells)
re.mpi_send(dst, comm);
for (auto& rf : reference_faces)
rf.mpi_send(dst, comm);
priv_MPI_Send(faceTags, dst, comm);
priv_MPI_Send(faceNodesTags, dst, comm);
}
#endif /* USE_MPI */
......
...@@ -102,6 +102,38 @@ model::make_boundary_to_faces_map(void) ...@@ -102,6 +102,38 @@ model::make_boundary_to_faces_map(void)
assert( IFF(comm_map.size() == 0, num_partitions == 1) ); assert( IFF(comm_map.size() == 0, num_partitions == 1) );
} }
void
model::make_partition_to_entities_map()
{
ASSERT_MPI_RANK_0;
gvp_t gmsh_entities;
gm::getEntities(gmsh_entities, DIMENSION(3));
for (auto [dim, tag] : gmsh_entities)
{
if (num_partitions > 1)
{
/* If the model is partitioned and entity has no parents,
* it means that there are no elements in the entity on which
* we will compute. Skip it. */
int pdim, ptag;
gmsh::model::getParent(dim, tag, pdim, ptag);
if (ptag == -1)
continue;
std::vector<int> parts;
assert(parts.size() == 1);
gm::getPartitions(dim, tag, parts);
partition_map[ parts[0] ].push_back(tag);
}
}
for (auto& [pn, tags] : partition_map)
std::sort(tags.begin(), tags.end()); /* For lookups */
}
void void
model::update_connectivity(const entity& e, size_t entity_index) model::update_connectivity(const entity& e, size_t entity_index)
{ {
...@@ -259,8 +291,8 @@ model::populate_from_gmsh_rank0() ...@@ -259,8 +291,8 @@ model::populate_from_gmsh_rank0()
ASSERT_MPI_RANK_0; ASSERT_MPI_RANK_0;
gmm::setOrder( geometric_order ); gmm::setOrder( geometric_order );
make_boundary_to_faces_map(); make_boundary_to_faces_map();
make_partition_to_entities_map();
#ifdef USE_MPI #ifdef USE_MPI
/* At this point, the following members are populated: /* At this point, the following members are populated:
...@@ -278,8 +310,13 @@ model::populate_from_gmsh_rank0() ...@@ -278,8 +310,13 @@ model::populate_from_gmsh_rank0()
priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD); priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD); priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD);
for (auto& [tag, vec] : comm_map) for (auto& [tag, vec] : partition_map)
std::cout << tag << ": " << vec.size() << " " << vec[0] << std::endl; {
std::cout << tag << ": " << vec.size() << " -> ";
for (auto& v : vec)
std::cout << v << " ";
std::cout << std::endl;
}
#endif /* USE_MPI */ #endif /* USE_MPI */
...@@ -300,8 +337,13 @@ model::populate_from_gmsh_rankN() ...@@ -300,8 +337,13 @@ model::populate_from_gmsh_rankN()
priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD); priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD); priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD);
for (auto& [tag, vec] : comm_map) for (auto& [tag, vec] : partition_map)
std::cout << tag << ": " << vec.size() << " " << vec[0] << std::endl; {
std::cout << tag << ": " << vec.size() << " -> ";
for (auto& v : vec)
std::cout << v << " ";
std::cout << std::endl;
}
} }
#endif /* USE_MPI */ #endif /* USE_MPI */
......
#include "gmsh_mpi.h" #include "gmsh_mpi.h"
#if 0
template<typename T1, typename T2> template<typename T1, typename T2>
void zip(const std::vector<T1>& firsts, const std::vector<T2>& seconds, void zip(const std::vector<T1>& firsts, const std::vector<T2>& seconds,
std::vector<std::pair<T1, T2>>& pairs) std::vector<std::pair<T1, T2>>& pairs)
...@@ -89,4 +90,6 @@ MPIGMSH_getElementTypes(std::vector<int>& elemTypes, int dim, int tag, MPI_Comm ...@@ -89,4 +90,6 @@ MPIGMSH_getElementTypes(std::vector<int>& elemTypes, int dim, int tag, MPI_Comm
elemTypes.resize(num_eT); elemTypes.resize(num_eT);
MPI_Bcast(elemTypes.data(), num_eT, MPI_INT, 0, comm); MPI_Bcast(elemTypes.data(), num_eT, MPI_INT, 0, comm);
} }
} }
\ No newline at end of file
#endif
\ No newline at end of file
...@@ -80,7 +80,42 @@ reference_element::integration_points(void) const ...@@ -80,7 +80,42 @@ reference_element::integration_points(void) const
return m_quadpoints; return m_quadpoints;
} }
#ifdef USE_MPI
void
reference_element::mpi_send(int dst, MPI_Comm comm)
{
MPI_Send(&m_approx_order, 1, MPI_INT, dst, 0, comm);
MPI_Send(&m_geometric_order, 1, MPI_INT, dst, 0, comm);
MPI_Send(&m_dimension, 1, MPI_INT, dst, 0, comm);
MPI_Send(&m_orientation, 1, MPI_INT, dst, 0, comm);
MPI_Send(&m_gmsh_elem_type, 1, MPI_INT, dst, 0, comm);
MPI_Send(&m_parent_entity_tag, 1, MPI_INT, dst, 0, comm);
MPI_Send(&m_num_bf, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm);
priv_MPI_Send(m_basis_functions, dst, comm);
priv_MPI_Send(m_basis_gradients, dst, comm);
priv_MPI_Send(m_mass_matrix, dst, comm);
priv_MPI_Send(m_mass_matrices, dst, comm);
priv_MPI_Send(m_quadpoints, dst, comm);
}
void
reference_element::mpi_recv(int src, MPI_Comm comm)
{
MPI_Status status;
MPI_Recv(&m_approx_order, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&m_geometric_order, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&m_dimension, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&m_orientation, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&m_gmsh_elem_type, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&m_parent_entity_tag, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&m_num_bf, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status);
priv_MPI_Recv(m_basis_functions, src, comm);
priv_MPI_Recv(m_basis_gradients, src, comm);
priv_MPI_Recv(m_mass_matrix, src, comm);
priv_MPI_Recv(m_mass_matrices, src, comm);
priv_MPI_Recv(m_quadpoints, src, comm);
}
#endif /* USE_MPI */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment