diff --git a/src/common/resources.cpp b/src/common/resources.cpp
index 6c50778e1c2072874180222699c446d6695d6271..d5fb21010fd172d999bdfa04db8fd05aa847fa26 100644
--- a/src/common/resources.cpp
+++ b/src/common/resources.cpp
@@ -6,8 +6,8 @@
 #include <cstdio>
 
 #if defined(_WIN32) && !defined(__CYGWIN__)
-#include <windows.h>
 #include <psapi.h>
+#include <windows.h>
 #endif
 
 #if !defined(WIN32) || defined(__CYGWIN__)
@@ -20,6 +20,21 @@
 #include <mach/mach.h>
 #endif
 
+#include "GmshDdm.h"
+#include "MPIInterface.h"
+#include "Options.h"
+
+#include <gmshfem/Message.h>
+#include <gmshfem/Timer.h>
+
+#ifdef HAVE_MPI
+#include <mpi.h>
+#endif
+
+#ifdef HAVE_PETSC
+#include <petsc.h>
+#endif
+
 namespace gmshddm
 {
 
@@ -66,6 +81,44 @@ namespace gmshddm
     }
 
 
+    void s_printResources(const std::string &step, bool first = false)
+    {
+      static gmshfem::common::Timer timeRef;
+      if(first) timeRef.tick();
+      gmshfem::common::Timer time = timeRef;
+      time.tock();
+
+      double cpuTime, peakRSS, currentRSS;
+      common::getResources(cpuTime, peakRSS, currentRSS);
+      if(mpi::getMPISize() > 1) {
+        double val[3] = {cpuTime, peakRSS, currentRSS};
+        double min[3] = {val[0], val[1], val[2]};
+        double max[3] = {val[0], val[1], val[2]};
+        MPI_Reduce(val, min, 3, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
+        MPI_Reduce(val, max, 3, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
+        if(mpi::getMPIRank() == 0) {
+          if(gmshddm::common::Options::instance()->memory) {
+            gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
+                               << "CPU = [" << min[0] << "s, " << max[0] << "s], "
+                               << "PeakRSS = [" << min[1] << "Mb, " << max[1] << "Mb], "
+                               << "CurrentRSS = [" << min[2] << "Mb, " << max[2] << "Mb]"
+                               << gmshfem::msg::endl;
+          }
+        }
+        MPI_Barrier(PETSC_COMM_WORLD);
+      }
+      else {
+        if(gmshddm::common::Options::instance()->memory) {
+          gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
+                             << "CPU = " << cpuTime << "s, "
+                             << "PeakRSS = " << peakRSS << "Mb, "
+                             << "CurrentRSS = " << currentRSS << "Mb"
+                             << gmshfem::msg::endl;
+        }
+      }
+    }
+
+
   } // namespace common
 
 
diff --git a/src/common/resources.h b/src/common/resources.h
index 64070be9ac0286d3f9cf427eb00eacc8a583b291..e78256e61a04e3872d5a2f7f0a3b1927d4a78a47 100644
--- a/src/common/resources.h
+++ b/src/common/resources.h
@@ -14,6 +14,7 @@ namespace gmshddm
   {
 
     void getResources(double &cpuTime, double &peakRSS, double &currentRSS);
+    void s_printResources(const std::string& step, bool first = false);
 
   }
 
diff --git a/src/problem/CMakeLists.txt b/src/problem/CMakeLists.txt
index 5769d63d4e158c0526da70794d92e2c5b13c2067..fa9bc69926cf89322358f410090cd1e99c513253 100644
--- a/src/problem/CMakeLists.txt
+++ b/src/problem/CMakeLists.txt
@@ -5,6 +5,7 @@
 
 set(SRC
   Formulation.cpp
+  Solvers.cpp
 )
 
 file(GLOB HDR RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.h)
diff --git a/src/problem/Formulation.cpp b/src/problem/Formulation.cpp
index 83a1ad8bbce968e5dfde1134e1a42b4f18f576fc..6bdeb20df0df2f14b02622579feaecdf743c8796 100644
--- a/src/problem/Formulation.cpp
+++ b/src/problem/Formulation.cpp
@@ -4,6 +4,7 @@
 // issues on https://gitlab.onelab.info/gmsh/ddm/issues
 
 #include "Formulation.h"
+#include "Solvers.h"
 
 #include "GmshDdm.h"
 #include "MPIInterface.h"
@@ -33,46 +34,11 @@ namespace gmshddm
   namespace problem
   {
 
-    static void s_printResources(const std::string &step, bool first = false)
-    {
-      static gmshfem::common::Timer timeRef;
-      if(first) timeRef.tick();
-      gmshfem::common::Timer time = timeRef;
-      time.tock();
-
-      double cpuTime, peakRSS, currentRSS;
-      common::getResources(cpuTime, peakRSS, currentRSS);
-      if(mpi::getMPISize() > 1) {
-        double val[3] = {cpuTime, peakRSS, currentRSS};
-        double min[3] = {val[0], val[1], val[2]};
-        double max[3] = {val[0], val[1], val[2]};
-        MPI_Reduce(val, min, 3, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
-        MPI_Reduce(val, max, 3, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
-        if(mpi::getMPIRank() == 0) {
-          if(gmshddm::common::Options::instance()->memory) {
-            gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
-                               << "CPU = [" << min[0] << "s, " << max[0] << "s], "
-                               << "PeakRSS = [" << min[1] << "Mb, " << max[1] << "Mb], "
-                               << "CurrentRSS = [" << min[2] << "Mb, " << max[2] << "Mb]"
-                               << gmshfem::msg::endl;
-          }
-        }
-        MPI_Barrier(PETSC_COMM_WORLD);
-      }
-      else {
-        if(gmshddm::common::Options::instance()->memory) {
-          gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
-                             << "CPU = " << cpuTime << "s, "
-                             << "PeakRSS = " << peakRSS << "Mb, "
-                             << "CurrentRSS = " << currentRSS << "Mb"
-                             << gmshfem::msg::endl;
-        }
-      }
-    }
+    using gmshddm::common::s_printResources;
 
     template< class T_Scalar >
     Formulation< T_Scalar >::Formulation(const std::string &name, const std::vector< std::vector< unsigned int > > &topology) :
-      _name(name), _volume(), _surface(topology.size()), _interfaceFields(), _interfaceMapping(), _rhs(), _absoluteResidual(), _relativeResidual(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
+      _name(name), _volume(), _surface(topology.size()), _interfaceFields(), _interfaceMapping(), _rhs(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
     {
       for(auto i = 0ULL; i < topology.size(); ++i) {
         gmshfem::msg::debug << "Create volume formulation (" << i << ")." << gmshfem::msg::endl;
@@ -87,7 +53,7 @@ namespace gmshddm
 
     template< class T_Scalar >
     Formulation< T_Scalar >::Formulation(const std::string &name, const std::vector< gmshfem::problem::Formulation< T_Scalar > * > &volume, const std::vector< std::unordered_map< unsigned int, gmshfem::problem::Formulation< T_Scalar > * > > &surface) :
-      _name(name), _volume(volume), _surface(surface), _interfaceFields(), _interfaceMapping(), _rhs(), _absoluteResidual(), _relativeResidual(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
+      _name(name), _volume(volume), _surface(surface), _interfaceFields(), _interfaceMapping(), _rhs(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
     {
     }
 
@@ -104,20 +70,6 @@ namespace gmshddm
       }
     }
 
-    template< class T_Scalar >
-    void Formulation< T_Scalar >::_runIterationOperations(const unsigned int iteration, const gmshfem::scalar::Precision< T_Scalar > rnorm)
-    {
-      _absoluteResidual.push_back(rnorm);
-      _relativeResidual.push_back(rnorm / _absoluteResidual[0]);
-      _numberOfIterations = iteration;
-
-      const unsigned int MPI_Rank = mpi::getMPIRank();
-      if(MPI_Rank == 0) {
-        gmshfem::msg::info << " - Iteration " << iteration << ": absolute residual = " << _absoluteResidual.back() << ", relative residual = " << _relativeResidual.back() << gmshfem::msg::endl;
-      }
-      s_printResources("DDM Iteration");
-    }
-
     template< class T_Scalar >
     gmshfem::algebra::Vector< T_Scalar > Formulation< T_Scalar >::_sortDofValues(const gmshfem::algebra::Vector< T_Scalar > &vector, const unsigned int fieldTag)
     {
@@ -944,7 +896,22 @@ namespace gmshddm
     }
 
     template< class T_Scalar >
-    gmshfem::common::Timer Formulation< T_Scalar >::solve(const std::string &solver, const double tolerance, const int iterMax, const bool sameMatrixWithArtificialAndPhysicalSources, const bool skipFinalSolutionComputation)
+    gmshfem::common::Timer Formulation< T_Scalar >::solve(const std::string& solver, const double tolerance, const int iterMax, const bool sameMatrixWithArtificialAndPhysicalSources, const bool skipFinalSolutionComputation)
+    {
+      std::unique_ptr < AbstractIterativeSolver< T_Scalar >> solverObject;
+      // For backwards compatibility.
+      if(solver == "jacobi") {
+        solverObject = std::make_unique< JacobiIterativeSolver< T_Scalar > >();
+      }
+      else {
+        solverObject = std::make_unique< KSPIterativeSolver< T_Scalar > >(solver);
+      }
+
+      return solve(*solverObject, tolerance, iterMax, sameMatrixWithArtificialAndPhysicalSources, skipFinalSolutionComputation);
+    }
+
+    template< class T_Scalar >
+    gmshfem::common::Timer Formulation< T_Scalar >::solve(AbstractIterativeSolver<T_Scalar>& solver, const double tolerance, const int iterMax, const bool sameMatrixWithArtificialAndPhysicalSources, const bool skipFinalSolutionComputation)
     {
       // sameMatrixWithArtificialAndPhysicalSources could be automatically set
       // to true if bilinear terms of volume formulations do not use the
@@ -962,7 +929,7 @@ namespace gmshddm
       time.tick();
 
       if(MPI_Rank == 0) {
-        gmshfem::msg::info << "Solving " << _name << " using " << solver << "..." << gmshfem::msg::endl;
+        gmshfem::msg::info << "Solving " << _name << " using " << solver.solverName() << "..." << gmshfem::msg::endl;
       }
       s_printResources("DDM iterative solver begin");
 
@@ -987,7 +954,7 @@ namespace gmshddm
       VecDuplicate(RHS, &Y); // new iterate
 
       MatCreateShell(PETSC_COMM_WORLD, locSize, locSize, globSize, globSize, this, &A);
-      MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProduct< T_Scalar >);
+      MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProductOverride< T_Scalar >);
 
       _physicalCommutator = false;
       _artificialCommutator = true;
@@ -1045,67 +1012,10 @@ namespace gmshddm
         }
       }
 
-      // Extract solver and modifiers from "solver"
-      std::string solverBase;
-      std::set<std::string> solverMod;
-      {
-        const std::regex sep("\\+");
-        std::list<std::string> split;
-        std::copy(std::sregex_token_iterator(solver.begin(), solver.end(), sep, -1),
-                  std::sregex_token_iterator(),
-                  std::back_inserter(split));
-
-        if(!split.empty()){
-          std::list<std::string>::iterator it = split.begin();
-          solverBase = *(it++);
-          for(; it != split.end(); it++)
-            solverMod.insert(*it);
-        }
-      }
 
-      // Solver
-      if(solverBase == "jacobi") {
-        _IA = false;
-        Vec W; // residual
-        VecDuplicate(RHS, &W);
-
-        VecSet(X, 0.);
-        PetscReal norm = 1;
-        int i = 0;
-        do { // jacobi
-          MatVectProduct< T_Scalar >(A, X, Y);
-          VecAYPX(Y, 1., RHS);
-          VecWAXPY(W, -1., X, Y);
-          VecCopy(Y, X);
-
-          VecNorm(W, NORM_2, &norm);
-          _runIterationOperations(i, norm);
-          i++;
-        } while(_relativeResidual.back() > tolerance && i < iterMax);
-        VecDestroy(&W);
-      }
-      else { // gmres, bcgs, bcgsl, ...
-        _IA = true;
-        KSP ksp;
-        PC pc;
-        KSPCreate(PETSC_COMM_WORLD, &ksp);
-        KSPSetFromOptions(ksp);
-        KSPSetOperators(ksp, A, A);
-        KSPSetType(ksp, solverBase.c_str());
-        KSPMonitorSet(ksp, PetscInterface< T_Scalar, PetscScalar, PetscInt >::kspPrint, this, PETSC_NULL);
-        KSPSetTolerances(ksp, tolerance, PETSC_DEFAULT, PETSC_DEFAULT, iterMax);
-        KSPGetPC(ksp, &pc);
-        PCSetType(pc, PCNONE);
-        if(solverBase == "gmres"){
-          KSPGMRESSetRestart(ksp, iterMax);
-          if(solverMod.count("mgs"))
-             KSPGMRESSetOrthogonalization(ksp, KSPGMRESModifiedGramSchmidtOrthogonalization);
-        }
-        else if(solverBase == "bcgsl")
-          KSPBCGSLSetEll(ksp, 6);
-        KSPSolve(ksp, RHS, Y);
-        KSPDestroy(&ksp);
-      }
+      // Call the AbstractIterativeSolver object
+      solver.solve(A, X, Y, RHS, tolerance, iterMax);
+      
 
       // ******************************************************
       // Final solution
@@ -1194,7 +1104,7 @@ namespace gmshddm
       VecDuplicate(_rhs.getPetsc(), &Y);
 
       MatCreateShell(PETSC_COMM_SELF, _rhs.size(), _rhs.size(), _rhs.size(), _rhs.size(), this, &A);
-      MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProduct< T_Scalar >);
+      MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProductOverride< T_Scalar >);
 
       for(auto idom = 0ULL; idom < _volume.size(); ++idom) {
         _volume[idom]->removeSystem();
@@ -1261,27 +1171,23 @@ namespace gmshddm
     {
       return _rhs;
     }
-
+    
     template< class T_Scalar >
-    std::vector< gmshfem::scalar::Precision< T_Scalar > > Formulation< T_Scalar >::absoluteResidual() const
+    unsigned int Formulation< T_Scalar >::numberOfIterations() const
     {
-      return _absoluteResidual;
+      return _numberOfIterations;
     }
-
     template< class T_Scalar >
-    std::vector< gmshfem::scalar::Precision< T_Scalar > > Formulation< T_Scalar >::relativeResidual() const
+    int MatVectProduct(Mat A, Vec X, Vec Y)
     {
-      return _relativeResidual;
-    }
+      Formulation< T_Scalar > *formulation;
+      MatShellGetContext(A, (void **)&formulation);
 
-    template< class T_Scalar >
-    unsigned int Formulation< T_Scalar >::numberOfIterations() const
-    {
-      return _numberOfIterations;
+      return MatVectProductOverride<T_Scalar>(A, X, Y, formulation->_IA);
     }
 
     template< class T_Scalar >
-    int MatVectProduct(Mat A, Vec X, Vec Y)
+    int MatVectProductOverride(Mat A, Vec X, Vec Y, bool IA)
     {
       PetscFunctionBegin;
 
@@ -1360,7 +1266,7 @@ namespace gmshddm
       }
 
       VecCopy(g_local.getPetsc(), Y);
-      if(formulation->_IA) {
+      if(IA) {
         VecAYPX(Y, -1., X);
       }
 
diff --git a/src/problem/Formulation.h b/src/problem/Formulation.h
index d9ba1bf627ab4f56dd834e2f3c87e003b6e07fc5..2828273f94872aedaeaee89cc71df66f58ac0225 100644
--- a/src/problem/Formulation.h
+++ b/src/problem/Formulation.h
@@ -27,7 +27,12 @@ namespace gmshddm
   {
 
     template< class T_Scalar >
-    static int MatVectProduct(Mat A, Vec X, Vec Y);
+    int MatVectProduct(Mat A, Vec X, Vec Y);
+    template< class T_Scalar >
+    int MatVectProductOverride(Mat A, Vec X, Vec Y, bool IA);
+
+    template< class T_Scalar >
+    class AbstractIterativeSolver;
 
     template< class T_Scalar >
     class Formulation
@@ -39,8 +44,7 @@ namespace gmshddm
       std::unordered_map< field::InterfaceFieldInterface< T_Scalar > *, field::InterfaceFieldInterface< T_Scalar > * > _interfaceFields;
       std::unordered_map< unsigned int, std::vector< unsigned long long > > _interfaceMapping;
       gmshfem::algebra::Vector< T_Scalar > _rhs;
-      std::vector< gmshfem::scalar::Precision< T_Scalar > > _absoluteResidual;
-      std::vector< gmshfem::scalar::Precision< T_Scalar > > _relativeResidual;
+
       unsigned int _numberOfIterations;
       bool _physicalCommutator;
       bool _artificialCommutator;
@@ -79,26 +83,22 @@ namespace gmshddm
       gmshfem::common::Timer pre();
 
       gmshfem::common::Timer solve(const std::string &solver, const double tolerance = 1e-6, const int iterMax = 1000, const bool sameMatrixWithArtificialAndPhysicalSources = false, const bool skipFinalSolutionComputation = false);
+      gmshfem::common::Timer solve(AbstractIterativeSolver<T_Scalar> &solver, const double tolerance = 1e-6, const int iterMax = 1000, const bool sameMatrixWithArtificialAndPhysicalSources = false, const bool skipFinalSolutionComputation = false);
       gmshfem::algebra::MatrixCCS< T_Scalar > computeMatrix();
 
       void setSolutionIntoInterfaceFields(const std::vector< T_Scalar > &solution);
       const gmshfem::algebra::Vector< T_Scalar > &getRHS() const;
 
-      std::vector< gmshfem::scalar::Precision< T_Scalar > > absoluteResidual() const;
-      std::vector< gmshfem::scalar::Precision< T_Scalar > > relativeResidual() const;
+
       unsigned int numberOfIterations() const;
 
       friend int MatVectProduct< T_Scalar >(Mat A, Vec X, Vec Y);
+      friend int MatVectProductOverride< T_Scalar >(Mat A, Vec X, Vec Y, bool IA);
 
+      //*
       // T_Scalar, PetscScalar, T_Interger
       template< class T_Scalar1, class T_Scalar2, class T_Integer >
       struct PetscInterface {
-        static int kspPrint(KSP ksp, T_Integer it, gmshfem::scalar::Precision< T_Scalar2 > rnorm, void *mctx)
-        {
-          Formulation< T_Scalar1 > *formulation = static_cast< Formulation< T_Scalar1 > * >(mctx);
-          formulation->_runIterationOperations(it, rnorm);
-          return 0;
-        }
 
         static const T_Scalar1 *arrayInterface(const T_Scalar2 *array, const T_Integer size)
         {
@@ -115,13 +115,14 @@ namespace gmshddm
           delete[] array;
         }
       };
-
+      //*/
+      /*
       template< class T_Scalar1, class T_Integer >
       struct PetscInterface< T_Scalar1, T_Scalar1, T_Integer > {
         static int kspPrint(KSP ksp, T_Integer it, gmshfem::scalar::Precision< T_Scalar1 > rnorm, void *mctx)
         {
           Formulation< T_Scalar1 > *formulation = static_cast< Formulation< T_Scalar1 > * >(mctx);
-          formulation->_runIterationOperations(it, rnorm);
+          //formulation->_runIterationOperations(it, rnorm);
           return 0;
         }
 
@@ -133,7 +134,7 @@ namespace gmshddm
         static void freeArray(const T_Scalar1 *array)
         {
         }
-      };
+      };*/
     };
 
 
diff --git a/src/problem/Solvers.cpp b/src/problem/Solvers.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1ebfd843616eed42f7218599ad3370248d5c3ce3
--- /dev/null
+++ b/src/problem/Solvers.cpp
@@ -0,0 +1,162 @@
+// GmshDDM - Copyright (C) 2019-2021, A. Royer, C. Geuzaine, B. Martin, Université de Liège
+// GmshDDM - Copyright (C) 2022, A. Royer, C. Geuzaine, B. Martin, Université de Liège
+//
+// See the LICENSE.txt file for license information. Please report all
+// issues on https://gitlab.onelab.info/gmsh/ddm/issues
+
+#include "Solvers.h"
+
+#include "Formulation.h"
+#include "MPIInterface.h"
+#include "resources.h"
+
+#include <gmshfem/Message.h>
+
+#include <regex>
+
+
+#ifdef HAVE_PETSC
+#include <petsc.h>
+#endif
+
+
+namespace gmshddm
+{
+
+
+  namespace problem
+  {
+
+
+    // Instantiate the required templates
+    template class AbstractIterativeSolver< std::complex< double > >;
+    template class AbstractIterativeSolver< std::complex< float > >;
+    template class JacobiIterativeSolver< std::complex< double > >;
+    template class JacobiIterativeSolver< std::complex< float > >;
+    template class KSPIterativeSolver< std::complex< double > >;
+    template class KSPIterativeSolver< std::complex< float > >;
+
+    template< class T_Scalar >
+    std::vector< gmshfem::scalar::Precision< T_Scalar > > AbstractIterativeSolver< T_Scalar >::absoluteResidual() const
+    {
+      return _absoluteResidual;
+    }
+
+    template< class T_Scalar >
+    std::vector< gmshfem::scalar::Precision< T_Scalar > > AbstractIterativeSolver< T_Scalar >::relativeResidual() const
+    {
+      return _relativeResidual;
+    }
+
+    template< class T_Scalar >
+    void AbstractIterativeSolver< T_Scalar >::_runIterationOperations(const unsigned int iteration, const gmshfem::scalar::Precision< T_Scalar > rnorm)
+    {
+      _absoluteResidual.push_back(rnorm);
+      _relativeResidual.push_back(rnorm / _absoluteResidual[0]);
+      //_numberOfIterations = iteration;
+
+      const unsigned int MPI_Rank = mpi::getMPIRank();
+      if(MPI_Rank == 0) {
+        gmshfem::msg::info << " - Iteration " << iteration << ": absolute residual = " << _absoluteResidual.back() << ", relative residual = " << _relativeResidual.back() << gmshfem::msg::endl;
+      }
+      gmshddm::common::s_printResources("DDM Iteration");
+    }
+
+    template< class T_Scalar >
+    void JacobiIterativeSolver< T_Scalar >::solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol, int maxIter)
+    {
+
+
+      double r0;
+      VecNorm(RHS, NORM_2, &r0);
+      const double stopCriterion = relTol * r0;
+
+#ifndef HAVE_PETSC
+      gmshfem::msg::error << "PETSc is required for the Jacobi iterative solver." << gmshfem::msg::endl;
+#endif
+
+      Vec W; // residual
+      VecDuplicate(RHS, &W);
+
+      VecSet(X, 0.);
+      PetscReal norm = 1;
+      int i = 0;
+      do { // jacobi
+        MatVectProductOverride< T_Scalar >(A, X, Y, false);
+        VecAYPX(Y, 1., RHS);
+        VecWAXPY(W, -1., X, Y);
+        VecCopy(Y, X);
+
+        VecNorm(W, NORM_2, &norm);
+        this->_runIterationOperations(i, norm);
+        i++;
+
+      } while(norm > stopCriterion && i < maxIter);
+      VecDestroy(&W);
+    }
+
+    template< class T_Scalar >
+    std::string JacobiIterativeSolver< T_Scalar >::solverName() const
+    {
+      return "jacobi";
+    }
+
+    template< class T_Scalar >
+    void KSPIterativeSolver< T_Scalar >::solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol, int maxIter)
+    {
+
+
+#ifndef HAVE_PETSC
+      gmshfem::msg::error << "PETSc is required for KSP iterative solvers." << gmshfem::msg::endl;
+#endif
+      std::string solverBase = _solverName;
+      std::set< std::string > solverMod;
+      {
+        const std::regex sep("\\+");
+        std::list< std::string > split;
+        std::copy(std::sregex_token_iterator(_solverName.begin(), _solverName.end(), sep, -1),
+                  std::sregex_token_iterator(),
+                  std::back_inserter(split));
+
+        if(!split.empty()) {
+          std::list< std::string >::iterator it = split.begin();
+          solverBase = *(it++);
+          for(; it != split.end(); it++)
+            solverMod.insert(*it);
+        }
+      }
+      gmshfem::msg::print << "solverBase: " << solverBase << "solverName : " << _solverName << gmshfem::msg::endl;
+      KSP ksp;
+      PC pc;
+      KSPCreate(PETSC_COMM_WORLD, &ksp);
+      KSPSetFromOptions(ksp);
+      KSPSetOperators(ksp, A, A);
+      KSPSetType(ksp, solverBase.c_str());
+      KSPMonitorSet(ksp, PetscInterface< T_Scalar, PetscScalar, PetscInt >::kspPrint, this, PETSC_NULL);
+      KSPSetTolerances(ksp, relTol, PETSC_DEFAULT, PETSC_DEFAULT, maxIter);
+      KSPGetPC(ksp, &pc);
+      PCSetType(pc, PCNONE);
+      if(solverBase == "gmres") {
+        KSPGMRESSetRestart(ksp, maxIter);
+        if(solverMod.count("mgs")) {
+          KSPGMRESSetOrthogonalization(ksp, KSPGMRESModifiedGramSchmidtOrthogonalization);
+          gmshfem::msg::print << "Using modified Gram-Schmidt in GMRES" << gmshfem::msg::endl;
+        }
+      }
+      else if(solverBase == "bcgsl")
+        KSPBCGSLSetEll(ksp, 6);
+      KSPSolve(ksp, RHS, Y);
+      KSPDestroy(&ksp);
+    }
+
+    template< class T_Scalar >
+    std::string KSPIterativeSolver< T_Scalar >::solverName() const
+    {
+      return this->_solverName;
+    }
+
+
+  } // namespace problem
+  
+  
+} // namespace gmshddm
diff --git a/src/problem/Solvers.h b/src/problem/Solvers.h
new file mode 100644
index 0000000000000000000000000000000000000000..2518180341e3ca06eb47822d74a3b4cf2b4af2b1
--- /dev/null
+++ b/src/problem/Solvers.h
@@ -0,0 +1,85 @@
+// GmshDDM - Copyright (C) 2019-2021, A. Royer, C. Geuzaine, Université de Liège
+// GmshDDM - Copyright (C) 2022, A. Royer, C. Geuzaine, B. Martin, Université de Liège
+//
+// See the LICENSE.txt file for license information. Please report all
+// issues on https://gitlab.onelab.info/gmsh/ddm/issues
+
+#ifndef H_GMSHDDM_SOLVERS
+#define H_GMSHDDM_SOLVERS
+
+#include "InterfaceCompoundField.h"
+#include "InterfaceField.h"
+#include "SubdomainCompoundField.h"
+#include "SubdomainField.h"
+
+#include <gmshfem/Formulation.h>
+#include <unordered_map>
+#include <vector>
+
+typedef struct _p_Mat *Mat;
+typedef struct _p_Vec *Vec;
+typedef struct _p_KSP *KSP;
+
+namespace gmshddm
+{
+
+
+  namespace problem
+  {
+    template< class T_Scalar >
+    class AbstractIterativeSolver
+    {
+
+     public:
+      virtual ~AbstractIterativeSolver() = default;
+      virtual void solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol=1e-6, int maxIter=100) = 0;
+      virtual std::string solverName() const = 0;
+
+      std::vector< gmshfem::scalar::Precision< T_Scalar > > absoluteResidual() const;
+      std::vector< gmshfem::scalar::Precision< T_Scalar > > relativeResidual() const;
+
+     protected:
+      std::vector< gmshfem::scalar::Precision< T_Scalar > > _absoluteResidual;
+      std::vector< gmshfem::scalar::Precision< T_Scalar > > _relativeResidual;
+      void _runIterationOperations(const unsigned int iteration, const gmshfem::scalar::Precision< T_Scalar > rnorm);
+    };
+
+    template< class T_Scalar >
+    class JacobiIterativeSolver : public AbstractIterativeSolver< T_Scalar >
+    {
+     public:
+      virtual void solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol=1e-6, int maxIter=100) override;
+      virtual std::string solverName() const override;
+    };
+
+    template< class T_Scalar >
+    class KSPIterativeSolver : public AbstractIterativeSolver< T_Scalar >
+    {
+     public:
+      KSPIterativeSolver(const std::string &name) :
+        _solverName(name) {}
+      virtual void solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol=1e-6, int maxIter=100) override;
+      virtual std::string solverName() const override;
+
+     private:
+      std::string _solverName;
+
+      // T_Scalar, PetscScalar, T_Interger
+      template< class T_Scalar1, class T_Scalar2, class T_Integer >
+      struct PetscInterface {
+        static int kspPrint(KSP ksp, T_Integer it, gmshfem::scalar::Precision< T_Scalar2 > rnorm, void *mctx)
+        {
+          KSPIterativeSolver< T_Scalar1 > *solver = static_cast< KSPIterativeSolver< T_Scalar1 > * >(mctx);
+          solver->_runIterationOperations(it, rnorm);
+          return 0;
+        }
+
+      };
+    };
+
+  } // namespace problem
+
+} // namespace gmshddm
+
+
+#endif // H_GMSHDDM_SOLVERS