Skip to content
Snippets Groups Projects
Commit f2f6d748 authored by Matteo Cicuttin's avatar Matteo Cicuttin
Browse files

Broadcasts before entity import.

parent fa247c55
No related branches found
No related tags found
No related merge requests found
...@@ -3,6 +3,11 @@ ...@@ -3,6 +3,11 @@
#include <vector> #include <vector>
#include <array> #include <array>
#include <gmsh.h> #include <gmsh.h>
#include <type_traits>
#ifdef USE_MPI
#include <mpi/mpi.h>
#endif /* USE_MPI */
#include "entity.h" #include "entity.h"
#include "physical_element.h" #include "physical_element.h"
...@@ -26,11 +31,12 @@ std::string basis_grad_name(int); ...@@ -26,11 +31,12 @@ std::string basis_grad_name(int);
using face_key = std::array<size_t, 3>; using face_key = std::array<size_t, 3>;
enum class face_type enum class face_type : int
{ {
NONE, NONE = 0,
INTERFACE, INTERFACE,
BOUNDARY BOUNDARY,
INTERPROCESS_BOUNDARY
}; };
struct boundary_descriptor struct boundary_descriptor
...@@ -56,15 +62,95 @@ struct boundary_descriptor ...@@ -56,15 +62,95 @@ struct boundary_descriptor
{} {}
}; };
#if ((!defined(NDEBUG)) && defined(USE_MPI))
#define ASSERT_MPI_RANK_0 { \
int __rank; \
MPI_Comm_rank(MPI_COMM_WORLD, &__rank); \
assert(__rank == 0); \
}
#else
#define ASSERT_MPI_RANK_0 (void) 0
#endif
#define IMPLIES(a, b) ((not(a)) or (b))
#define IFF(a,b) (IMPLIES(a,b) and IMPLIES(b,a))
template<typename T>
void
priv_MPI_Bcast(std::vector<T>& vec, int root, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T>::value, "Type must be trivially copyable");
int rank;
MPI_Comm_rank(comm, &rank);
if (rank == root)
{
size_t vsize = vec.size();
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm);
}
else
{
size_t vsize;
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
vec.resize(vsize);
MPI_Bcast(vec.data(), vec.size()*sizeof(T), MPI_PACKED, root, comm);
}
}
template<typename T1, typename T2>
void
priv_MPI_Bcast(std::map<T1, std::vector<T2>>& map, int root, MPI_Comm comm)
{
static_assert(std::is_trivially_copyable<T1>::value, "Type must be trivially copyable");
static_assert(std::is_trivially_copyable<T2>::value, "Type must be trivially copyable");
int rank;
MPI_Comm_rank(comm, &rank);
if (rank == root)
{
size_t msize = map.size();
MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
for (auto& [l, rv] : map)
{
auto ll = l;
MPI_Bcast(&ll, sizeof(T1), MPI_PACKED, root, comm);
size_t vsize = rv.size();
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm);
}
}
else
{
size_t msize;
MPI_Bcast(&msize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
for (size_t i = 0; i < msize; i++)
{
T1 l;
MPI_Bcast(&l, sizeof(T1), MPI_PACKED, root, comm);
size_t vsize;
MPI_Bcast(&vsize, 1, MPI_UNSIGNED_LONG_LONG, root, comm);
std::vector<T2> rv;
rv.resize(vsize);
MPI_Bcast(rv.data(), vsize*sizeof(T2), MPI_PACKED, root, comm);
map[l] = std::move(rv);
}
}
}
class model class model
{ {
int geometric_order; int geometric_order;
int approximation_order; int approximation_order;
int num_partitions; int num_partitions;
std::map<int, std::vector<element_key>> boundary_map;
std::map<int, std::vector<int>> comm_map;
std::map<int, std::vector<int>> partition_map;
std::vector<entity> entities; std::vector<entity> entities;
std::map<size_t, std::vector<element_key>> boundary_map;
std::map<size_t, std::vector<int>> comm_map;
std::vector<boundary_descriptor> bnd_descriptors; std::vector<boundary_descriptor> bnd_descriptors;
element_connectivity<element_key> conn; element_connectivity<element_key> conn;
...@@ -77,7 +163,7 @@ class model ...@@ -77,7 +163,7 @@ class model
void populate_from_gmsh_rank0(void); void populate_from_gmsh_rank0(void);
#ifdef HAVE_MPI #ifdef USE_MPI
void populate_from_gmsh_rankN(void); void populate_from_gmsh_rankN(void);
#endif #endif
......
...@@ -111,6 +111,8 @@ public: ...@@ -111,6 +111,8 @@ public:
class element_key class element_key
{ {
/* Objects of this class get MPI_Bcast'ed as MPI_PACKED,
* beware of data types you use below. */
int m_dim; int m_dim;
int m_elemType; int m_elemType;
std::array<size_t, 8> m_key_data{}; std::array<size_t, 8> m_key_data{};
......
...@@ -5,9 +5,9 @@ ...@@ -5,9 +5,9 @@
#include "gmsh_io.h" #include "gmsh_io.h"
#include "entity.h" #include "entity.h"
#ifdef HAVE_MPI #ifdef USE_MPI
#include <mpi/mpi.h> #include <mpi/mpi.h>
#endif /* HAVE_MPI */ #endif /* USE_MPI */
std::string std::string
quadrature_name(int order) quadrature_name(int order)
...@@ -57,7 +57,7 @@ model::~model() ...@@ -57,7 +57,7 @@ model::~model()
void void
model::make_boundary_to_faces_map(void) model::make_boundary_to_faces_map(void)
{ {
assert(num_partitions > 1); ASSERT_MPI_RANK_0;
gvp_t gmsh_entities; gvp_t gmsh_entities;
...@@ -98,11 +98,15 @@ model::make_boundary_to_faces_map(void) ...@@ -98,11 +98,15 @@ model::make_boundary_to_faces_map(void)
boundary_map[tag] = ekf.get_keys(); boundary_map[tag] = ekf.get_keys();
} }
} }
assert( IFF(comm_map.size() == 0, num_partitions == 1) );
} }
void void
model::update_connectivity(const entity& e, size_t entity_index) model::update_connectivity(const entity& e, size_t entity_index)
{ {
ASSERT_MPI_RANK_0;
for (size_t iT = 0; iT < e.num_cells(); iT++) for (size_t iT = 0; iT < e.num_cells(); iT++)
{ {
for (size_t iF = 0; iF < e.num_faces_per_elem(); iF++) for (size_t iF = 0; iF < e.num_faces_per_elem(); iF++)
...@@ -180,6 +184,8 @@ model::import_gmsh_entities(void) ...@@ -180,6 +184,8 @@ model::import_gmsh_entities(void)
void void
model::map_boundaries(void) model::map_boundaries(void)
{ {
ASSERT_MPI_RANK_0;
/* Make a vector mapping element_key to entity tag */ /* Make a vector mapping element_key to entity tag */
using bfk_t = std::pair<element_key, int>; using bfk_t = std::pair<element_key, int>;
std::vector<bfk_t> bfk; std::vector<bfk_t> bfk;
...@@ -227,64 +233,91 @@ model::map_boundaries(void) ...@@ -227,64 +233,91 @@ model::map_boundaries(void)
fbase += e.num_faces(); fbase += e.num_faces();
} }
/*
size_t normal = 0; size_t normal = 0;
size_t interface = 0; size_t interface = 0;
size_t boundary = 0; size_t boundary = 0;
for (auto& bd : boundary_descriptors) size_t ipc_boundary = 0;
for (auto& bd : bnd_descriptors)
{ {
switch (bd.type) switch (bd.type)
{ {
case face_type::NONE: normal++; break; case face_type::NONE: normal++; break;
case face_type::INTERFACE: interface++; break; case face_type::INTERFACE: interface++; break;
case face_type::BOUNDARY: boundary++; break; case face_type::BOUNDARY: boundary++; break;
case face_type::INTERPROCESS_BOUNDARY: ipc_boundary++; break;
} }
} }
std::cout << bfk.size() << " " << boundary_descriptors.size() << std::endl; std::cout << bfk.size() << " " << bnd_descriptors.size() << std::endl;
std::cout << normal << " " << interface << " " << boundary << std::endl; std::cout << normal << " " << interface << " " << boundary << " " << ipc_boundary << std::endl;
*/
} }
void void
model::populate_from_gmsh_rank0() model::populate_from_gmsh_rank0()
{ {
ASSERT_MPI_RANK_0;
gmm::setOrder( geometric_order ); gmm::setOrder( geometric_order );
make_boundary_to_faces_map(); make_boundary_to_faces_map();
for (auto& [tag, parts] : comm_map) #ifdef USE_MPI
{ /* At this point, the following members are populated:
std::cout << tag << ": "; * - geometric_order, approximation_order and num_partitions
for (auto& part : parts) * - boundary_map
std::cout << part << " "; * - conn_map
std::cout << std::endl; * - partition_map
} * We broadcast them to the other processes
*/
MPI_Bcast(&geometric_order, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&approximation_order, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&num_partitions, 1, MPI_INT, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(boundary_map, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD);
for (auto& [tag, vec] : comm_map)
std::cout << tag << ": " << vec.size() << " " << vec[0] << std::endl;
#endif /* USE_MPI */
entities.clear(); entities.clear();
import_gmsh_entities(); import_gmsh_entities();
map_boundaries(); /* This must happen after import_gmsh_entities(). */ map_boundaries(); /* This must happen after import_gmsh_entities(). */
} }
#ifdef HAVE_MPI #ifdef USE_MPI
void void
model::populate_from_gmsh_rankN() model::populate_from_gmsh_rankN()
{} {
#endif /* HAVE_MPI */ MPI_Bcast(&geometric_order, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&approximation_order, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&num_partitions, 1, MPI_INT, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(boundary_map, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(comm_map, 0, MPI_COMM_WORLD);
priv_MPI_Bcast(partition_map, 0, MPI_COMM_WORLD);
for (auto& [tag, vec] : comm_map)
std::cout << tag << ": " << vec.size() << " " << vec[0] << std::endl;
}
#endif /* USE_MPI */
void void
model::populate_from_gmsh() model::populate_from_gmsh()
{ {
#ifdef HAVE_MPI #ifdef USE_MPI
int rank; int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 0) if (rank == 0)
populate_from_gmsh_rank0(); populate_from_gmsh_rank0();
else else
populate_from_gmsh_rankN(); populate_from_gmsh_rankN();
#else /* HAVE_MPI */ #else /* USE_MPI */
populate_from_gmsh_rank0(); populate_from_gmsh_rank0();
#endif /* HAVE_MPI */ #endif /* USE_MPI */
} }
void void
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment