Skip to content
Snippets Groups Projects
Commit 6e764f7c authored by Boris Martin's avatar Boris Martin
Browse files

seemingly working P2P setup

parent 74d396b9
No related branches found
No related tags found
No related merge requests found
Pipeline #12711 failed
...@@ -429,17 +429,16 @@ namespace gmshfem::field ...@@ -429,17 +429,16 @@ namespace gmshfem::field
myOwnedDofs.push_back({dof->numType(), dof->entity(), dof->numGlobalDof()}); myOwnedDofs.push_back({dof->numType(), dof->entity(), dof->numGlobalDof()});
} }
if(!neighboringRanks) {
// Get the sizes and offset of the global array // Get the sizes and offset of the global array
int total_size = 0; int total_size = 0;
std::vector< int > sizes(commSize), displs(commSize); std::vector< int > sizes(commSize), displs(commSize);
int loc_size = myOwnedDofs.size(); int loc_size = myOwnedDofs.size();
MPI_Allgather(&loc_size, 1, MPI_INT, sizes.data(), 1, MPI_INT, MPI_COMM_WORLD); MPI_Allgather(&loc_size, 1, MPI_INT, sizes.data(), 1, MPI_INT, MPI_COMM_WORLD);
for(unsigned i = 0; i < commSize; ++i) { for(unsigned i = 0; i < commSize; ++i) {
displs[i] = total_size; displs[i] = total_size;
total_size += sizes[i]; total_size += sizes[i];
} }
// Synchronize the full array // Synchronize the full array
allSharedDofsArray.resize(total_size); allSharedDofsArray.resize(total_size);
...@@ -451,6 +450,36 @@ namespace gmshfem::field ...@@ -451,6 +450,36 @@ namespace gmshfem::field
msg::info << "Gathered allSharedDofsArray. Total size is " << allSharedDofsArray.size() << msg::endl; msg::info << "Gathered allSharedDofsArray. Total size is " << allSharedDofsArray.size() << msg::endl;
} }
MPI_Barrier(MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD);
}
else {
const auto& ranks = *neighboringRanks;
std::vector< unsigned long long > recvSizes(ranks.size());
unsigned long long toSendSize = myOwnedDofs.size();
// 1) Send my local size to all my neighbors
std::vector<MPI_Request> sendRequests(ranks.size()), receiveRequests(ranks.size());
for (size_t k = 0; k < ranks.size(); ++k) {
MPI_Isend(&toSendSize, 1, MPI_UNSIGNED_LONG_LONG, ranks[k], rank, MPI_COMM_WORLD, &sendRequests[k]);
MPI_Irecv(&recvSizes[k], 1, MPI_UNSIGNED_LONG_LONG, ranks[k], ranks[k], MPI_COMM_WORLD, &receiveRequests[k]);
}
MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE);
MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE);
size_t total_size = std::reduce(recvSizes.begin(), recvSizes.end());
allSharedDofsArray.resize(total_size);
sendRequests.clear(); sendRequests.resize(ranks.size());
receiveRequests.clear(); receiveRequests.resize(ranks.size());
size_t currentOffset = 0;
for (size_t k = 0; k < ranks.size(); ++k) {
MPI_Isend(myOwnedDofs.data(), myOwnedDofs.size(), mpi_struct_type, ranks[k], rank, MPI_COMM_WORLD, &sendRequests[k]);
MPI_Irecv(allSharedDofsArray.data() + currentOffset, recvSizes[k], mpi_struct_type, ranks[k], ranks[k], MPI_COMM_WORLD, &receiveRequests[k]);
currentOffset += recvSizes[k];
}
MPI_Waitall(sendRequests.size(), sendRequests.data(), MPI_STATUSES_IGNORE);
MPI_Waitall(receiveRequests.size(), receiveRequests.data(), MPI_STATUSES_IGNORE);
MPI_Barrier(MPI_COMM_WORLD);
if(rank == 0) {
msg::info << "Gathered allSharedDofsArray with peer-to-peer comms." << msg::endl;
}
}
std::unordered_map< DofIndex, unsigned long long, HashBySecond, std::equal_to<DofIndex> > allSharedDofs; std::unordered_map< DofIndex, unsigned long long, HashBySecond, std::equal_to<DofIndex> > allSharedDofs;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment