Skip to content
Snippets Groups Projects
Commit 06657626 authored by Boris Martin's avatar Boris Martin
Browse files

Topology aware distbuted formulation

parent 2d80c095
No related branches found
No related tags found
No related merge requests found
Pipeline #12700 failed
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <CSVio.h> #include <CSVio.h>
#include <Options.h> #include <Options.h>
#include <gmsh.h> #include <gmsh.h>
#include <numeric>
#ifdef HAVE_MPI #ifdef HAVE_MPI
#include <mpi.h> #include <mpi.h>
...@@ -178,7 +179,39 @@ namespace gmshfem::field ...@@ -178,7 +179,39 @@ namespace gmshfem::field
MPI_COMM_WORLD); MPI_COMM_WORLD);
} }
else { else {
// TODO const auto &ranks = *neighboringRanks;
std::vector< unsigned long long > recvSizes(ranks.size());
unsigned long long toSend = local.size();
// 1) Send my local size to all my neighbors
std::vector<MPI_Request> sendRequests(ranks.size()), receiveRequests(ranks.size());
for (size_t k = 0; k < ranks.size(); ++k) {
MPI_Isend(&toSend, 1, MPI_UNSIGNED_LONG_LONG, ranks[k], rank, MPI_COMM_WORLD, &sendRequests[k]);
MPI_Irecv(&recvSizes[k], 1, MPI_UNSIGNED_LONG_LONG, ranks[k], ranks[k], MPI_COMM_WORLD, &receiveRequests[k]);
}
MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE);
MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE);
size_t total_size = std::reduce(recvSizes.begin(), recvSizes.end());
// Does global need to contain local ?
global.resize(total_size);
sendRequests.clear(); sendRequests.resize(ranks.size());
receiveRequests.clear(); receiveRequests.resize(ranks.size());
size_t currentOffset = 0;
for (size_t k = 0; k < ranks.size(); ++k) {
MPI_Isend(local.data(), local.size(), mpi_struct_type, ranks[k], rank, MPI_COMM_WORLD, &sendRequests[k]);
MPI_Irecv(global.data() + currentOffset, recvSizes[k], mpi_struct_type, ranks[k], ranks[k], MPI_COMM_WORLD, &receiveRequests[k]);
currentOffset += recvSizes[k];
}
MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE);
MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE);
for (auto entry: local) {
global.push_back(entry);
}
} }
// Free the type // Free the type
......
...@@ -1151,7 +1151,7 @@ namespace gmshfem::problem ...@@ -1151,7 +1151,7 @@ namespace gmshfem::problem
// Make all fields synchronize ownerships // Make all fields synchronize ownerships
for(auto [tag, fieldInterfacePtr] : _unknownFields) { for(auto [tag, fieldInterfacePtr] : _unknownFields) {
msg::debug << '[' << rank << "] does MPI prepro of field with tag " << tag << '.' << msg::endl; msg::debug << '[' << rank << "] does MPI prepro of field with tag " << tag << '.' << msg::endl;
fieldInterfacePtr->preProMPI(); fieldInterfacePtr->preProMPI(neighboringRanks);
localOwnedNumDof += fieldInterfacePtr->getAllOwnedDofs().size(); localOwnedNumDof += fieldInterfacePtr->getAllOwnedDofs().size();
localNonOwnedNumDof += fieldInterfacePtr->getNonOwnedDofs().size(); localNonOwnedNumDof += fieldInterfacePtr->getNonOwnedDofs().size();
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment