diff --git a/src/entity.cpp b/src/entity.cpp index 52e86df42ff6f0dbffe268cee912be858025f988..0fd29eba6270af2b757b324123a114883492c200 100644 --- a/src/entity.cpp +++ b/src/entity.cpp @@ -894,15 +894,88 @@ entity::populate_entity_data(entity_data_cpu& ed, const model& mod) const void entity::mpi_send(int dst, MPI_Comm comm) { + /* Here (and in the nested objects) we are doing a lot of small + * MPI_Send() and this could be really inefficient. This code however + * runs only at the beginning of the simulation and thus for now it + * will remain as is. If it results to be a problem, then we'll think + * about it. For now, no premature optimization. */ + MPI_Send(&dim, 1, MPI_INT, dst, 0, comm); + MPI_Send(&tag, 1, MPI_INT, dst, 0, comm); + MPI_Send(&elemType, 1, MPI_INT, dst, 0, comm); + MPI_Send(&elemType_2D, 1, MPI_INT, dst, 0, comm); + MPI_Send(&g_order, 1, MPI_INT, dst, 0, comm); + MPI_Send(&a_order, 1, MPI_INT, dst, 0, comm); + MPI_Send(&parent_dim, 1, MPI_INT, dst, 0, comm); + MPI_Send(&parent_tag, 1, MPI_INT, dst, 0, comm); + + priv_MPI_Send(cur_elem_ordering, dst, comm); + + size_t vsize; + + vsize = reference_cells.size(); + MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm); for (auto& re : reference_cells) re.mpi_send(dst, comm); + vsize = physical_cells.size(); + MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm); + for (auto& pe : physical_cells) + pe.mpi_send(dst, comm); + + vsize = reference_faces.size(); + MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm); for (auto& rf : reference_faces) rf.mpi_send(dst, comm); + vsize = physical_faces.size(); + MPI_Send(&vsize, 1, MPI_UNSIGNED_LONG_LONG, dst, 0, comm); + for (auto& pf : physical_faces) + pf.mpi_send(dst, comm); + priv_MPI_Send(faceTags, dst, comm); priv_MPI_Send(faceNodesTags, dst, comm); } + +void +entity::mpi_recv(int src, MPI_Comm comm) +{ + MPI_Status status; + MPI_Recv(&dim, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&tag, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&elemType, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&elemType_2D, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&g_order, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&a_order, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&parent_dim, 1, MPI_INT, src, 0, comm, &status); + MPI_Recv(&parent_tag, 1, MPI_INT, src, 0, comm, &status); + + priv_MPI_Recv(cur_elem_ordering, src, comm); + + size_t vsize; + + MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status); + reference_cells.resize( vsize ); + for (auto& re : reference_cells) + re.mpi_recv(src, comm); + + MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status); + physical_cells.resize( vsize ); + for (auto& pe : physical_cells) + pe.mpi_recv(src, comm); + + MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status); + reference_faces.resize( vsize ); + for (auto& rf : reference_faces) + rf.mpi_recv(src, comm); + + MPI_Recv(&vsize, 1, MPI_UNSIGNED_LONG_LONG, src, 0, comm, &status); + physical_faces.resize( vsize ); + for (auto& pf : physical_faces) + pf.mpi_recv(src, comm); + + priv_MPI_Recv(faceTags, src, comm); + priv_MPI_Recv(faceNodesTags, src, comm); +} #endif /* USE_MPI */