diff --git a/src/problem/Formulation.cpp b/src/problem/Formulation.cpp
index f4982bbeefeb4446e83c0c3adae7199527dc78b1..ba242a33cca9a82a328dedeb998d78a5334b738d 100644
--- a/src/problem/Formulation.cpp
+++ b/src/problem/Formulation.cpp
@@ -1123,31 +1123,42 @@ namespace gmshddm
       return time;
     }
 
-    // TODO
     template< class T_Scalar >
     gmshfem::algebra::MatrixCCS< T_Scalar > Formulation< T_Scalar >::computeMatrix()
     {
       const int outerVerbosity = gmshfem::common::Options::instance()->verbose;
       const int innerVerbosity = (outerVerbosity != 0 ? outerVerbosity - 1 : 0);
 
-      if(mpi::getMPISize() != 1) {
-        throw gmshfem::common::Exception("Formulation< T_Scalar >::computeMatrix() does not work on multiple processes");
-      }
       Mat A;
-      Vec X, Y;
+      Vec X, Y, Y_seq;
+      VecScatter     ctx;
+
+      // Create a distributed vector that acts as a RHS
+      PetscInt locSize = _rhs.size();
+      VecCreateMPI(MPI_COMM_WORLD, locSize, PETSC_DETERMINE, &Y);
+      VecDuplicate(Y, &X);
+      
+      // Create a sequential full vector and a scatter object to synchronize the global vector of outputs,
+      // Needed to build the matrix
+      PetscInt globSize;
+      VecGetSize(Y, &globSize);
+      VecCreateSeq(MPI_COMM_SELF, globSize, &Y_seq);
+      VecScatterCreateToAll(Y, &ctx, &Y_seq);
+
+      if (mpi::getMPIRank() == 0)
+        gmshfem::msg::info << "Local size:" << locSize << " and global size:" << globSize << "." << gmshfem::msg::endl;
 
-      VecDuplicate(_rhs.getPetsc(), &X);
-      VecDuplicate(_rhs.getPetsc(), &Y);
 
-      MatCreateShell(MPI_COMM_SELF, _rhs.size(), _rhs.size(), _rhs.size(), _rhs.size(), this, &A);
-      MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProductA< T_Scalar >);
+      // Setup the A matrix
+      MatCreateShell(MPI_COMM_WORLD, locSize, locSize, globSize, globSize, this, &A);
+      MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProductI_A< T_Scalar >);
 
       _physicalCommutator = false;
       _artificialCommutator = true;
       togglePhysicalAndArtificialSourceTerms();
 
-
-
+  
+      // Compute sub matrices then disable bilinear terms (to assemble only RHS)
       for(auto idom = 0ULL; idom < _volume.size(); ++idom) {
         _volume[idom]->setAttribute("ddm::physicalCommutator", _physicalCommutator);
         _volume[idom]->setAttribute("ddm::artificialCommutator", _artificialCommutator);
@@ -1161,24 +1172,47 @@ namespace gmshddm
       }
 
 
-      gmshfem::algebra::Vector< T_Scalar > vec(_rhs.size());
-      std::vector< PetscInt > aiVector(_rhs.size());
-      for(auto i = 0ULL; i < _rhs.size(); ++i) {
+      gmshfem::algebra::Vector< T_Scalar > vec(globSize);
+      std::vector< PetscInt > aiVector(globSize);
+      for(auto i = 0ULL; i < globSize; ++i) {
         aiVector[i] = i;
       }
-      std::vector< PetscScalar > aVector(_rhs.size());
+      std::vector< PetscScalar > aVector(globSize);
 
-      std::vector< unsigned long long > aj(_rhs.size() + 1, 0);
+      std::vector< unsigned long long > aj(globSize + 1, 0);
       std::vector< unsigned long long > ai;
-      ai.reserve(_rhs.size());
+      ai.reserve(globSize);
       std::vector< T_Scalar > a;
-      a.reserve(_rhs.size());
-      for(auto i = 0ULL; i < _rhs.size(); ++i) {
-        gmshfem::msg::info << "Column number " << i << " over " << _rhs.size() << "." << gmshfem::msg::endl;
-        vec[i] = 1.;
-        vec.removePetsc();
-        MatVectProductI_A< T_Scalar >(A, vec.getPetsc(), Y);
-        VecGetValues(Y, _rhs.size(), &aiVector[0], &aVector[0]);
+      a.reserve(globSize);
+
+      std::vector<PetscScalar> zeros(globSize, 0);
+      // Init RHS vector
+      VecSet(X, 0.0);
+      VecSetValue(X, 0, 1.0, INSERT_VALUES);
+      VecAssemblyBegin(X);
+      VecAssemblyEnd(X);
+
+      for(auto i = 0ULL; i < globSize; ++i) {
+
+        VecSet(X, 0.0);
+        VecSetValue(X, i, 1.0, INSERT_VALUES);
+        VecAssemblyBegin(X);
+        VecAssemblyEnd(X);
+
+        if(mpi::getMPIRank() == 0) {
+         
+          gmshfem::msg::info << "Column number " << i << " over " << globSize << "." << gmshfem::msg::endl;
+        }
+        MatMult(A, X, Y);
+
+        // Scatter to rank 0
+        VecScatterBegin(ctx, Y, Y_seq, INSERT_VALUES, SCATTER_FORWARD);
+        VecScatterEnd(ctx, Y, Y_seq, INSERT_VALUES, SCATTER_FORWARD);
+
+        VecGetValues(Y_seq, globSize, &aiVector[0], &aVector[0]);
+
+
+
         aj[i + 1] = aj[i];
         for(auto j = 0ULL; j < aVector.size(); ++j) {
           if(aVector[j] != 0) {
@@ -1186,16 +1220,26 @@ namespace gmshddm
             ai.push_back(j);
             a.push_back(T_Scalar(aVector[j]));
           }
+          
         }
-        ai.reserve(aj.size() + _rhs.size());
-        a.reserve(a.size() + _rhs.size());
-        vec[i] = 0.;
+
+        ai.reserve(aj.size() + globSize);
+        a.reserve(a.size() + globSize);
+
+        // Update RHS
+        VecSetValue(X, i, 0.0, INSERT_VALUES);
+        if(i + 1 < globSize)
+          VecSetValue(X, i + 1, 1.0, INSERT_VALUES);
+        VecAssemblyBegin(X);
+        VecAssemblyEnd(X);
       }
 
       for(auto idom = 0ULL; idom < _volume.size(); ++idom) {
         _activateBilinear(idom);
       }
 
+      VecScatterDestroy(&ctx);
+
 
       gmshfem::algebra::MatrixCCS< T_Scalar > mat(std::move(ai), std::move(aj), std::move(a));
       return mat;