Skip to content
Snippets Groups Projects
Commit 47cd04ea authored by Matteo Cicuttin's avatar Matteo Cicuttin
Browse files

entity is MPI-ready.

parent 9bc6b58d
No related branches found
No related tags found
No related merge requests found
...@@ -894,15 +894,88 @@ entity::populate_entity_data(entity_data_cpu& ed, const model& mod) const ...@@ -894,15 +894,88 @@ entity::populate_entity_data(entity_data_cpu& ed, const model& mod) const
void void
entity::mpi_send(int dst, MPI_Comm comm) entity::mpi_send(int dst, MPI_Comm comm)
{ {
/* Here (and in the nested objects) we are doing a lot of small
* MPI_Send() and this could be really inefficient. This code however
* runs only at the beginning of the simulation and thus for now it
* will remain as is. If it results to be a problem, then we'll think
* about it. For now, no premature optimization. */
MPI_Send(&dim, 1, MPI_INT, dst, 0, comm);
MPI_Send(&tag, 1, MPI_INT, dst, 0, comm);
MPI_Send(&elemType, 1, MPI_INT, dst, 0, comm);
MPI_Send(&elemType_2D, 1, MPI_INT, dst, 0, comm);
MPI_Send(&g_order, 1, MPI_INT, dst, 0, comm);
MPI_Send(&a_order, 1, MPI_INT, dst, 0, comm);
MPI_Send(&parent_dim, 1, MPI_INT, dst, 0, comm);
MPI_Send(&parent_tag, 1, MPI_INT, dst, 0, comm);
priv_MPI_Send(cur_elem_ordering, dst, comm);
size_t vsize;
vsize = reference_cells.size();
MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm);
for (auto& re : reference_cells) for (auto& re : reference_cells)
re.mpi_send(dst, comm); re.mpi_send(dst, comm);
vsize = physical_cells.size();
MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm);
for (auto& pe : physical_cells)
pe.mpi_send(dst, comm);
vsize = reference_faces.size();
MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm);
for (auto& rf : reference_faces) for (auto& rf : reference_faces)
rf.mpi_send(dst, comm); rf.mpi_send(dst, comm);
vsize = physical_faces.size();
MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm);
for (auto& pf : physical_faces)
pf.mpi_send(dst, comm);
priv_MPI_Send(faceTags, dst, comm); priv_MPI_Send(faceTags, dst, comm);
priv_MPI_Send(faceNodesTags, dst, comm); priv_MPI_Send(faceNodesTags, dst, comm);
} }
void
entity::mpi_recv(int src, MPI_Comm comm)
{
MPI_Status status;
MPI_Recv(&dim, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&tag, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&elemType, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&elemType_2D, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&g_order, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&a_order, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&parent_dim, 1, MPI_INT, src, 0, comm, &status);
MPI_Recv(&parent_tag, 1, MPI_INT, src, 0, comm, &status);
priv_MPI_Recv(cur_elem_ordering, src, comm);
size_t vsize;
MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status);
reference_cells.resize( vsize );
for (auto& re : reference_cells)
re.mpi_recv(src, comm);
MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status);
physical_cells.resize( vsize );
for (auto& pe : physical_cells)
pe.mpi_recv(src, comm);
MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status);
reference_faces.resize( vsize );
for (auto& rf : reference_faces)
rf.mpi_recv(src, comm);
MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status);
physical_faces.resize( vsize );
for (auto& pf : physical_faces)
pf.mpi_recv(src, comm);
priv_MPI_Recv(faceTags, src, comm);
priv_MPI_Recv(faceNodesTags, src, comm);
}
#endif /* USE_MPI */ #endif /* USE_MPI */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment