Skip to content
Snippets Groups Projects
Commit 70422531 authored by Bastien Gorissen's avatar Bastien Gorissen
Browse files

Removes Get Vertex Array from the menu, and broadcasts options to be parsed to all mpi nodes.

parent 7cefc036
No related branches found
No related tags found
No related merge requests found
...@@ -133,6 +133,39 @@ static void addToVertexArrays(int length, const char* bytes, int swap) ...@@ -133,6 +133,39 @@ static void addToVertexArrays(int length, const char* bytes, int swap)
delete toAdd; delete toAdd;
} }
static void gatherAndSendVertexArrays(GmshClient* client, bool swap) {
int rank = Msg::GetCommRank();
int nbDaemon = Msg::GetCommSize();
// tell every node to start computing
int mpi_msg = MPI_GMSH_COMPUTE_VIEW;
MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
// fill the arrays on the master node
for(unsigned int i = 0; i < PView::list.size(); i++)
PView::list[i]->fillVertexArrays();
// wait and send the data from every other node
for (int i = 0; i < nbDaemon - 1; i++) {
int nbArrays;
MPI_Status status;
MPI_Recv(&nbArrays, 1, MPI_INT, MPI_ANY_SOURCE,
MPI_GMSH_DATA_READY, MPI_COMM_WORLD, &status);
int source = status.MPI_SOURCE;
// get each varray in turn, then add it to the varrays of
// the master node
for (int j = 0; j < nbArrays; j++) {
int len;
MPI_Status status2;
MPI_Recv(&len, 1, MPI_INT, status.MPI_SOURCE,
MPI_GMSH_VARRAY_LEN, MPI_COMM_WORLD, &status2);
char str[len];
MPI_Recv(str, len, MPI_CHAR, status.MPI_SOURCE,
MPI_GMSH_VARRAY, MPI_COMM_WORLD, &status2);
addToVertexArrays(len, str, swap);
}
}
computeAndSendVertexArrays(client, false);
}
int GmshRemote() int GmshRemote()
{ {
GmshClient *client = Msg::GetClient(); GmshClient *client = Msg::GetClient();
...@@ -143,6 +176,7 @@ int GmshRemote() ...@@ -143,6 +176,7 @@ int GmshRemote()
if(!client && rank == 0) return 0; if(!client && rank == 0) return 0;
if(client && nbDaemon < 2) computeAndSendVertexArrays(client); if(client && nbDaemon < 2) computeAndSendVertexArrays(client);
else if(client && nbDaemon >= 2 && rank == 0) gatherAndSendVertexArrays(client,false);
while(1){ while(1){
...@@ -177,39 +211,17 @@ int GmshRemote() ...@@ -177,39 +211,17 @@ int GmshRemote()
break; break;
} }
else if(type == GmshSocket::GMSH_VERTEX_ARRAY){ else if(type == GmshSocket::GMSH_VERTEX_ARRAY){
#if !defined(HAVE_MPI)
ParseString(msg); ParseString(msg);
#if !defined(HAVE_MPI)
computeAndSendVertexArrays(client); computeAndSendVertexArrays(client);
#else #else
// FIXME should parse options on each node before computing varrays! int mpi_msg = MPI_GMSH_PARSE_STRING;
// tell every node to start computing
int mpi_msg = MPI_GMSH_COMPUTE_VIEW;
MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
// fill the arrays on the master node MPI_Bcast(&length, 1, MPI_INT, 0, MPI_COMM_WORLD);
for(unsigned int i = 0; i < PView::list.size(); i++) MPI_Bcast(msg, length, MPI_CHAR, 0, MPI_COMM_WORLD);
PView::list[i]->fillVertexArrays();
// wait and send the data from every other node gatherAndSendVertexArrays(client,swap);
for (int i = 0; i < nbDaemon - 1; i++) {
int nbArrays;
MPI_Status status;
MPI_Recv(&nbArrays, 1, MPI_INT, MPI_ANY_SOURCE,
MPI_GMSH_DATA_READY, MPI_COMM_WORLD, &status);
int source = status.MPI_SOURCE;
// get each varray in turn, then add it to the varrays of
// the master node
for (int j = 0; j < nbArrays; j++) {
int len;
MPI_Status status2;
MPI_Recv(&len, 1, MPI_INT, status.MPI_SOURCE,
MPI_GMSH_VARRAY_LEN, MPI_COMM_WORLD, &status2);
char str[len];
MPI_Recv(str, len, MPI_CHAR, status.MPI_SOURCE,
MPI_GMSH_VARRAY, MPI_COMM_WORLD, &status2);
addToVertexArrays(len, str, swap);
}
}
computeAndSendVertexArrays(client, false);
#endif #endif
} }
else if(type == GmshSocket::GMSH_MERGE_FILE){ else if(type == GmshSocket::GMSH_MERGE_FILE){
...@@ -221,6 +233,7 @@ int GmshRemote() ...@@ -221,6 +233,7 @@ int GmshRemote()
MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&length, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&length, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(msg, length, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast(msg, length, MPI_CHAR, 0, MPI_COMM_WORLD);
gatherAndSendVertexArrays(client,swap);
#endif #endif
} }
else if(type == GmshSocket::GMSH_PARSE_STRING){ else if(type == GmshSocket::GMSH_PARSE_STRING){
......
...@@ -183,9 +183,6 @@ static void file_remote_cb(Fl_Widget *w, void *data) ...@@ -183,9 +183,6 @@ static void file_remote_cb(Fl_Widget *w, void *data)
FlGui::instance()->updateViews(); FlGui::instance()->updateViews();
drawContext::global()->draw(); drawContext::global()->draw();
} }
else if(str == "varrays"){
server->SendString(GmshSocket::GMSH_VERTEX_ARRAY, " ");
}
else if(str == "test"){ else if(str == "test"){
server->SendString(GmshSocket::GMSH_SPEED_TEST, "Speed test"); server->SendString(GmshSocket::GMSH_SPEED_TEST, "Speed test");
} }
...@@ -2206,7 +2203,6 @@ static Fl_Menu_Item bar_table[] = { ...@@ -2206,7 +2203,6 @@ static Fl_Menu_Item bar_table[] = {
{"Start...", 0, (Fl_Callback *)file_remote_cb, (void*)"start"}, {"Start...", 0, (Fl_Callback *)file_remote_cb, (void*)"start"},
{"Merge...", 0, (Fl_Callback *)file_remote_cb, (void*)"merge"}, {"Merge...", 0, (Fl_Callback *)file_remote_cb, (void*)"merge"},
{"Clear", 0, (Fl_Callback *)file_remote_cb, (void*)"clear"}, {"Clear", 0, (Fl_Callback *)file_remote_cb, (void*)"clear"},
{"Get vertex arrays", 0, (Fl_Callback *)file_remote_cb, (void*)"varrays"},
{"Stop", 0, (Fl_Callback *)file_remote_cb, (void*)"stop"}, {"Stop", 0, (Fl_Callback *)file_remote_cb, (void*)"stop"},
{0}, {0},
{"New Window", 0, (Fl_Callback *)file_window_cb, (void*)"new"}, {"New Window", 0, (Fl_Callback *)file_window_cb, (void*)"new"},
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment