From 61a06de0734c10d513bcf066f60c90b2feb37f4a Mon Sep 17 00:00:00 2001
From: Jonathan Lambrechts <jonathan.lambrechts@uclouvain.be>
Date: Tue, 21 Jun 2011 10:50:46 +0000
Subject: [PATCH] dg : parallel implicit CG

---
 Solver/CMakeLists.txt |  1 +
 Solver/dofManager.cpp | 33 ++++++++++++++++++
 Solver/dofManager.h   | 80 +++++++++++++++++++------------------------
 Solver/linearSystem.h |  2 +-
 4 files changed, 70 insertions(+), 46 deletions(-)
 create mode 100644 Solver/dofManager.cpp

diff --git a/Solver/CMakeLists.txt b/Solver/CMakeLists.txt
index 1a003c3749..dd798487d2 100644
--- a/Solver/CMakeLists.txt
+++ b/Solver/CMakeLists.txt
@@ -7,6 +7,7 @@ set(SRC
   linearSystem.cpp
   linearSystemCSR.cpp
   linearSystemPETSc.cpp
+  dofManager.cpp
   groupOfElements.cpp
   elasticityTerm.cpp
   elasticitySolver.cpp
diff --git a/Solver/dofManager.cpp b/Solver/dofManager.cpp
new file mode 100644
index 0000000000..2ce4b596a4
--- /dev/null
+++ b/Solver/dofManager.cpp
@@ -0,0 +1,33 @@
+#include <dofManager.h>
+template<>
+void dofManager<double>::scatterSolution()
+{
+#ifdef HAVE_MPI
+  if (!_parallelFinalized) {
+    _parallelFinalize();
+  }
+  MPI_Status status;
+  std::vector<std::vector<double> > sendBuf(Msg::GetCommSize()), recvBuf(Msg::GetCommSize());
+  std::vector<MPI_Request> reqRecv(Msg::GetCommSize()), reqSend(Msg::GetCommSize());
+  for (int i = 0; i<Msg::GetCommSize(); i++) {
+    reqRecv[i] = reqSend[i] = MPI_REQUEST_NULL;
+    if (!ghostByProc[i].empty()) {
+      recvBuf[i].resize(ghostByProc[i].size());
+      MPI_Irecv (&recvBuf[i][0], recvBuf[i].size(), MPI_DOUBLE, i, 0, MPI_COMM_WORLD, &reqRecv[i]);
+    }
+    if (!parentByProc[i].empty()) {
+      getDofValue(parentByProc[i], sendBuf[i]);
+      MPI_Isend(&sendBuf[i][0], sendBuf[i].size(), MPI_DOUBLE, i, 0, MPI_COMM_WORLD, &reqSend[i]);
+    }
+  }
+  int index;
+  while (MPI_Waitany (Msg::GetCommSize(), &reqRecv[0], &index, &status) == 0 && index != MPI_UNDEFINED) {
+    if (status.MPI_TAG == 0)
+      for (int j = 0; j < recvBuf[index].size(); j++) {
+        ghostValue[ghostByProc[index][j]] = recvBuf[index][j];
+        const Dof &dof = ghostByProc[index][j];
+      }
+  }
+  MPI_Waitall (Msg::GetCommSize(), &reqSend[0], MPI_STATUS_IGNORE);
+#endif
+}
diff --git a/Solver/dofManager.h b/Solver/dofManager.h
index 22a39f7a15..d6fbe7a26f 100644
--- a/Solver/dofManager.h
+++ b/Solver/dofManager.h
@@ -124,12 +124,15 @@ class dofManager{
  private :
   // those dof are images of ghost located on another proc (id givent by the map).
   // this is a first try, maybe not the final implementation
-  std::map<Dof, int > ghosted;  // dof => procId
+  std::map<Dof, std::pair<int, int> > ghostByDof;  // dof => procId, globalId
+  std::map<Dof, T> ghostValue;
+  std::vector<std::vector<Dof> > ghostByProc, parentByProc;
   int _localSize;
   bool _parallelFinalized;
   bool _isParallel;
-  public:
   void _parallelFinalize();
+  public:
+  void scatterSolution();
 
  public:
   dofManager(linearSystem<dataMat> *l, bool isParallel=false)
@@ -175,14 +178,14 @@ class dofManager{
   inline void numberGhostDof (Dof key, int procId) {
     if (fixed.find(key) != fixed.end()) return;
     if (constraints.find(key) != constraints.end()) return;
-    if (ghosted.find(key) != ghosted.end()) return;
-    ghosted[key] = procId;
+    if (ghostByDof.find(key) != ghostByDof.end()) return;
+    ghostByDof[key] = std::make_pair(procId, 0);
   }
   inline void numberDof(Dof key)
   {
     if (fixed.find(key) != fixed.end()) return;
     if (constraints.find(key) != constraints.end()) return;
-    if (ghosted.find(key) != ghosted.end()) return;
+    if (ghostByDof.find(key) != ghostByDof.end()) return; 
 
     std::map<Dof, int> :: iterator it = unknown.find(key);
     if (it == unknown.end()) {
@@ -208,8 +211,8 @@ class dofManager{
   virtual inline void getDofValue(Dof key,  dataVec &val) const
   {
     {
-      typename std::map<Dof, dataVec>::const_iterator it = fixed.find(key);
-      if (it != fixed.end()) {
+      typename std::map<Dof, dataVec>::const_iterator it = ghostValue.find(key);
+      if (it != ghostValue.end()) {
         val =  it->second;
         return;
       }
@@ -221,6 +224,13 @@ class dofManager{
         return;
       }
     }
+    {
+      typename std::map<Dof, dataVec>::const_iterator it = fixed.find(key);
+      if (it != fixed.end()) {
+        val =  it->second;
+        return;
+      }
+    }
     {
       typename std::map<Dof, DofAffineConstraint< dataVec > >::const_iterator it =
         constraints.find(key);
@@ -239,36 +249,7 @@ class dofManager{
   }
   inline void getDofValue(int ent, int type, dataVec &v) const
   {
-    Dof key(ent, type);
-    {
-      typename std::map<Dof, dataVec>::const_iterator it = fixed.find(key);
-      if (it != fixed.end()){
-        v = it->second;
-        return;
-      }
-    }
-    {
-      std::map<Dof, int>::const_iterator it = unknown.find(key);
-      if (it != unknown.end()){
-        _current->getFromSolution(it->second, v);
-        return;
-      }
-    }
-    {
-      typename std::map<Dof, DofAffineConstraint< dataVec > >::const_iterator it =
-        constraints.find(key);
-      if (it != constraints.end()){
-        v = it->second.shift;
-        dataVec tmp(v);
-        for (unsigned i = 0; i < (it->second).linear.size(); i++){
-          std::map<Dof, int>::const_iterator itu = unknown.find
-            (((it->second).linear[i]).first);
-          getDofValue(((it->second).linear[i]).first, tmp);
-          dofTraits<T>::gemm(v, ((it->second).linear[i]).second, tmp, 1, 1);
-        }
-        return;
-      }
-    }
+    getDofValue(Dof(ent, type), v);
   }
   inline void getDofValue(MVertex *v, int iComp, int iField, dataVec &value) const
   {
@@ -576,6 +557,8 @@ void dofManager<T>::_parallelFinalize()
   int _numStart;
   int _numTotal;
   MPI_Status status;
+  parentByProc.resize(Msg::GetCommSize());
+  ghostByProc.resize(Msg::GetCommSize());
   if (Msg::GetCommRank() == 0){
     _numStart = 0;
   }
@@ -592,8 +575,10 @@ void dofManager<T>::_parallelFinalize()
   int *nRequested = new int[Msg::GetCommSize()];
   for (int i = 0; i<Msg::GetCommSize(); i++)
     nRequest[i] = 0;
-  for (std::map <Dof, int >::iterator it = ghosted.begin(); it != ghosted.end(); it++)
-    nRequest[it->second] ++;
+  for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) {
+    int procId = it->second.first;
+    it->second.second = nRequest[procId]++;
+  }
   MPI_Alltoall(nRequest, 1, MPI_INT, nRequested, 1, MPI_INT, MPI_COMM_WORLD);
   long int **recv0 = new long int*[Msg::GetCommSize()];
   int **recv1 = new int*[Msg::GetCommSize()];
@@ -609,13 +594,16 @@ void dofManager<T>::_parallelFinalize()
     send1[i] = new int[nRequested[i]];
     recv1[i] = new int[nRequest[i]];
     reqSend0[i] = reqSend1[i] = reqRecv0[i] = reqRecv1[i] = MPI_REQUEST_NULL;
+    parentByProc[i].resize(nRequested[i], Dof(0,0));
+    ghostByProc[i].resize(nRequest[i], Dof(0,0));
   }
   for (int i = 0; i<Msg::GetCommSize(); i++)
     nRequest [i] = 0;
-  for (std::map <Dof, int >::iterator it = ghosted.begin(); it != ghosted.end(); it++) {
-    int proc = it->second;
+  for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) {
+    int proc = it->second.first;
     send0 [proc] [nRequest[proc]*2] = it->first.getEntity();
     send0 [proc] [nRequest[proc]*2+1] = it->first.getType();
+    ghostByProc[proc][nRequest[proc]] = it->first;
     nRequest [proc] ++;
   }
   for (int i = 0; i<Msg::GetCommSize(); i++) {
@@ -632,20 +620,22 @@ void dofManager<T>::_parallelFinalize()
          index != MPI_UNDEFINED) {
     if (status.MPI_TAG == 0) {
       for (int j = 0; j < nRequested[index]; j++) {
-        std::map<Dof, int>::iterator it = unknown.find
-          (Dof(recv0[index][j*2], recv0[index][j*2+1]));
+        Dof d(recv0[index][j*2], recv0[index][j*2+1]);
+        std::map<Dof, int>::iterator it = unknown.find(d);
         if (it == unknown.end ())
           Msg::Error ("ghost Dof does not exist on parent process");
         send1[index][j] = it->second;
+        parentByProc[index][j] = d;
       }
       MPI_Isend(send1[index], nRequested[index], MPI_INT, index, 1,
                 MPI_COMM_WORLD, &reqSend1[index]);
     }
   }
+  for (int i = 0; i<Msg::GetCommSize(); i++)
   for (int i = 0; i<Msg::GetCommSize(); i++)
     nRequest[i] = 0;
-  for (std::map <Dof, int>::iterator it = ghosted.begin(); it != ghosted.end(); it++) {
-    int proc = it->second;
+  for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) {
+    int proc = it->second.first;
     unknown[it->first] = recv1 [proc][nRequest[proc] ++];
   }
   MPI_Waitall (Msg::GetCommSize(), reqSend0, MPI_STATUS_IGNORE);
diff --git a/Solver/linearSystem.h b/Solver/linearSystem.h
index 80767984ef..6b3b9f2708 100644
--- a/Solver/linearSystem.h
+++ b/Solver/linearSystem.h
@@ -23,6 +23,7 @@ class linearSystemBase {
   virtual int systemSolve() = 0;
   void setParameter (std::string key, std::string value);
   virtual void insertInSparsityPattern(int _row, int _col){};
+  virtual double normInfRightHandSide() const = 0;
 };
 
 template <class scalar>
@@ -35,7 +36,6 @@ class linearSystem : public linearSystemBase {
   virtual void addToRightHandSide(int _row, const scalar &val) = 0;
   virtual void getFromRightHandSide(int _row, scalar &val) const = 0;
   virtual void getFromSolution(int _row, scalar &val) const = 0;
-  virtual double normInfRightHandSide() const = 0;
 };
 
 #endif
-- 
GitLab