Skip to content
Snippets Groups Projects
Commit c75f266b authored by Anthony Royer's avatar Anthony Royer
Browse files

Merge branch 'refactor_solvers' into 'master'

Refactoring iterative interface solvers

See merge request !26
parents 6655115a 8970b21a
No related branches found
No related tags found
1 merge request!26Refactoring iterative interface solvers
Pipeline #10238 failed
......@@ -6,8 +6,8 @@
#include <cstdio>
#if defined(_WIN32) && !defined(__CYGWIN__)
#include <windows.h>
#include <psapi.h>
#include <windows.h>
#endif
#if !defined(WIN32) || defined(__CYGWIN__)
......@@ -20,6 +20,21 @@
#include <mach/mach.h>
#endif
#include "GmshDdm.h"
#include "MPIInterface.h"
#include "Options.h"
#include <gmshfem/Message.h>
#include <gmshfem/Timer.h>
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef HAVE_PETSC
#include <petsc.h>
#endif
namespace gmshddm
{
......@@ -66,6 +81,44 @@ namespace gmshddm
}
void s_printResources(const std::string &step, bool first = false)
{
static gmshfem::common::Timer timeRef;
if(first) timeRef.tick();
gmshfem::common::Timer time = timeRef;
time.tock();
double cpuTime, peakRSS, currentRSS;
common::getResources(cpuTime, peakRSS, currentRSS);
if(mpi::getMPISize() > 1) {
double val[3] = {cpuTime, peakRSS, currentRSS};
double min[3] = {val[0], val[1], val[2]};
double max[3] = {val[0], val[1], val[2]};
MPI_Reduce(val, min, 3, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(val, max, 3, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if(mpi::getMPIRank() == 0) {
if(gmshddm::common::Options::instance()->memory) {
gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
<< "CPU = [" << min[0] << "s, " << max[0] << "s], "
<< "PeakRSS = [" << min[1] << "Mb, " << max[1] << "Mb], "
<< "CurrentRSS = [" << min[2] << "Mb, " << max[2] << "Mb]"
<< gmshfem::msg::endl;
}
}
MPI_Barrier(PETSC_COMM_WORLD);
}
else {
if(gmshddm::common::Options::instance()->memory) {
gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
<< "CPU = " << cpuTime << "s, "
<< "PeakRSS = " << peakRSS << "Mb, "
<< "CurrentRSS = " << currentRSS << "Mb"
<< gmshfem::msg::endl;
}
}
}
} // namespace common
......
......@@ -14,6 +14,7 @@ namespace gmshddm
{
void getResources(double &cpuTime, double &peakRSS, double &currentRSS);
void s_printResources(const std::string& step, bool first = false);
}
......
......@@ -5,6 +5,7 @@
set(SRC
Formulation.cpp
Solvers.cpp
)
file(GLOB HDR RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.h)
......
......@@ -4,6 +4,7 @@
// issues on https://gitlab.onelab.info/gmsh/ddm/issues
#include "Formulation.h"
#include "Solvers.h"
#include "GmshDdm.h"
#include "MPIInterface.h"
......@@ -33,46 +34,11 @@ namespace gmshddm
namespace problem
{
static void s_printResources(const std::string &step, bool first = false)
{
static gmshfem::common::Timer timeRef;
if(first) timeRef.tick();
gmshfem::common::Timer time = timeRef;
time.tock();
double cpuTime, peakRSS, currentRSS;
common::getResources(cpuTime, peakRSS, currentRSS);
if(mpi::getMPISize() > 1) {
double val[3] = {cpuTime, peakRSS, currentRSS};
double min[3] = {val[0], val[1], val[2]};
double max[3] = {val[0], val[1], val[2]};
MPI_Reduce(val, min, 3, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(val, max, 3, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if(mpi::getMPIRank() == 0) {
if(gmshddm::common::Options::instance()->memory) {
gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
<< "CPU = [" << min[0] << "s, " << max[0] << "s], "
<< "PeakRSS = [" << min[1] << "Mb, " << max[1] << "Mb], "
<< "CurrentRSS = [" << min[2] << "Mb, " << max[2] << "Mb]"
<< gmshfem::msg::endl;
}
}
MPI_Barrier(PETSC_COMM_WORLD);
}
else {
if(gmshddm::common::Options::instance()->memory) {
gmshfem::msg::info << "* Resources @ " << step << ": Wall = " << time << "s, "
<< "CPU = " << cpuTime << "s, "
<< "PeakRSS = " << peakRSS << "Mb, "
<< "CurrentRSS = " << currentRSS << "Mb"
<< gmshfem::msg::endl;
}
}
}
using gmshddm::common::s_printResources;
template< class T_Scalar >
Formulation< T_Scalar >::Formulation(const std::string &name, const std::vector< std::vector< unsigned int > > &topology) :
_name(name), _volume(), _surface(topology.size()), _interfaceFields(), _interfaceMapping(), _rhs(), _absoluteResidual(), _relativeResidual(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
_name(name), _volume(), _surface(topology.size()), _interfaceFields(), _interfaceMapping(), _rhs(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
{
for(auto i = 0ULL; i < topology.size(); ++i) {
gmshfem::msg::debug << "Create volume formulation (" << i << ")." << gmshfem::msg::endl;
......@@ -87,7 +53,7 @@ namespace gmshddm
template< class T_Scalar >
Formulation< T_Scalar >::Formulation(const std::string &name, const std::vector< gmshfem::problem::Formulation< T_Scalar > * > &volume, const std::vector< std::unordered_map< unsigned int, gmshfem::problem::Formulation< T_Scalar > * > > &surface) :
_name(name), _volume(volume), _surface(surface), _interfaceFields(), _interfaceMapping(), _rhs(), _absoluteResidual(), _relativeResidual(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
_name(name), _volume(volume), _surface(surface), _interfaceFields(), _interfaceMapping(), _rhs(), _numberOfIterations(0), _physicalCommutator(false), _artificialCommutator(false), _IA(false)
{
}
......@@ -104,20 +70,6 @@ namespace gmshddm
}
}
template< class T_Scalar >
void Formulation< T_Scalar >::_runIterationOperations(const unsigned int iteration, const gmshfem::scalar::Precision< T_Scalar > rnorm)
{
_absoluteResidual.push_back(rnorm);
_relativeResidual.push_back(rnorm / _absoluteResidual[0]);
_numberOfIterations = iteration;
const unsigned int MPI_Rank = mpi::getMPIRank();
if(MPI_Rank == 0) {
gmshfem::msg::info << " - Iteration " << iteration << ": absolute residual = " << _absoluteResidual.back() << ", relative residual = " << _relativeResidual.back() << gmshfem::msg::endl;
}
s_printResources("DDM Iteration");
}
template< class T_Scalar >
gmshfem::algebra::Vector< T_Scalar > Formulation< T_Scalar >::_sortDofValues(const gmshfem::algebra::Vector< T_Scalar > &vector, const unsigned int fieldTag)
{
......@@ -945,6 +897,21 @@ namespace gmshddm
template< class T_Scalar >
gmshfem::common::Timer Formulation< T_Scalar >::solve(const std::string& solver, const double tolerance, const int iterMax, const bool sameMatrixWithArtificialAndPhysicalSources, const bool skipFinalSolutionComputation)
{
std::unique_ptr < AbstractIterativeSolver< T_Scalar >> solverObject;
// For backwards compatibility.
if(solver == "jacobi") {
solverObject = std::make_unique< JacobiIterativeSolver< T_Scalar > >();
}
else {
solverObject = std::make_unique< KSPIterativeSolver< T_Scalar > >(solver);
}
return solve(*solverObject, tolerance, iterMax, sameMatrixWithArtificialAndPhysicalSources, skipFinalSolutionComputation);
}
template< class T_Scalar >
gmshfem::common::Timer Formulation< T_Scalar >::solve(AbstractIterativeSolver<T_Scalar>& solver, const double tolerance, const int iterMax, const bool sameMatrixWithArtificialAndPhysicalSources, const bool skipFinalSolutionComputation)
{
// sameMatrixWithArtificialAndPhysicalSources could be automatically set
// to true if bilinear terms of volume formulations do not use the
......@@ -962,7 +929,7 @@ namespace gmshddm
time.tick();
if(MPI_Rank == 0) {
gmshfem::msg::info << "Solving " << _name << " using " << solver << "..." << gmshfem::msg::endl;
gmshfem::msg::info << "Solving " << _name << " using " << solver.solverName() << "..." << gmshfem::msg::endl;
}
s_printResources("DDM iterative solver begin");
......@@ -987,7 +954,7 @@ namespace gmshddm
VecDuplicate(RHS, &Y); // new iterate
MatCreateShell(PETSC_COMM_WORLD, locSize, locSize, globSize, globSize, this, &A);
MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProduct< T_Scalar >);
MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProductOverride< T_Scalar >);
_physicalCommutator = false;
_artificialCommutator = true;
......@@ -1045,67 +1012,10 @@ namespace gmshddm
}
}
// Extract solver and modifiers from "solver"
std::string solverBase;
std::set<std::string> solverMod;
{
const std::regex sep("\\+");
std::list<std::string> split;
std::copy(std::sregex_token_iterator(solver.begin(), solver.end(), sep, -1),
std::sregex_token_iterator(),
std::back_inserter(split));
if(!split.empty()){
std::list<std::string>::iterator it = split.begin();
solverBase = *(it++);
for(; it != split.end(); it++)
solverMod.insert(*it);
}
}
// Solver
if(solverBase == "jacobi") {
_IA = false;
Vec W; // residual
VecDuplicate(RHS, &W);
VecSet(X, 0.);
PetscReal norm = 1;
int i = 0;
do { // jacobi
MatVectProduct< T_Scalar >(A, X, Y);
VecAYPX(Y, 1., RHS);
VecWAXPY(W, -1., X, Y);
VecCopy(Y, X);
VecNorm(W, NORM_2, &norm);
_runIterationOperations(i, norm);
i++;
} while(_relativeResidual.back() > tolerance && i < iterMax);
VecDestroy(&W);
}
else { // gmres, bcgs, bcgsl, ...
_IA = true;
KSP ksp;
PC pc;
KSPCreate(PETSC_COMM_WORLD, &ksp);
KSPSetFromOptions(ksp);
KSPSetOperators(ksp, A, A);
KSPSetType(ksp, solverBase.c_str());
KSPMonitorSet(ksp, PetscInterface< T_Scalar, PetscScalar, PetscInt >::kspPrint, this, PETSC_NULL);
KSPSetTolerances(ksp, tolerance, PETSC_DEFAULT, PETSC_DEFAULT, iterMax);
KSPGetPC(ksp, &pc);
PCSetType(pc, PCNONE);
if(solverBase == "gmres"){
KSPGMRESSetRestart(ksp, iterMax);
if(solverMod.count("mgs"))
KSPGMRESSetOrthogonalization(ksp, KSPGMRESModifiedGramSchmidtOrthogonalization);
}
else if(solverBase == "bcgsl")
KSPBCGSLSetEll(ksp, 6);
KSPSolve(ksp, RHS, Y);
KSPDestroy(&ksp);
}
// Call the AbstractIterativeSolver object
solver.solve(A, X, Y, RHS, tolerance, iterMax);
// ******************************************************
// Final solution
......@@ -1194,7 +1104,7 @@ namespace gmshddm
VecDuplicate(_rhs.getPetsc(), &Y);
MatCreateShell(PETSC_COMM_SELF, _rhs.size(), _rhs.size(), _rhs.size(), _rhs.size(), this, &A);
MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProduct< T_Scalar >);
MatShellSetOperation(A, MATOP_MULT, (void (*)(void)) & MatVectProductOverride< T_Scalar >);
for(auto idom = 0ULL; idom < _volume.size(); ++idom) {
_volume[idom]->removeSystem();
......@@ -1263,25 +1173,21 @@ namespace gmshddm
}
template< class T_Scalar >
std::vector< gmshfem::scalar::Precision< T_Scalar > > Formulation< T_Scalar >::absoluteResidual() const
unsigned int Formulation< T_Scalar >::numberOfIterations() const
{
return _absoluteResidual;
return _numberOfIterations;
}
template< class T_Scalar >
std::vector< gmshfem::scalar::Precision< T_Scalar > > Formulation< T_Scalar >::relativeResidual() const
int MatVectProduct(Mat A, Vec X, Vec Y)
{
return _relativeResidual;
}
Formulation< T_Scalar > *formulation;
MatShellGetContext(A, (void **)&formulation);
template< class T_Scalar >
unsigned int Formulation< T_Scalar >::numberOfIterations() const
{
return _numberOfIterations;
return MatVectProductOverride<T_Scalar>(A, X, Y, formulation->_IA);
}
template< class T_Scalar >
int MatVectProduct(Mat A, Vec X, Vec Y)
int MatVectProductOverride(Mat A, Vec X, Vec Y, bool IA)
{
PetscFunctionBegin;
......@@ -1360,7 +1266,7 @@ namespace gmshddm
}
VecCopy(g_local.getPetsc(), Y);
if(formulation->_IA) {
if(IA) {
VecAYPX(Y, -1., X);
}
......
......@@ -27,7 +27,12 @@ namespace gmshddm
{
template< class T_Scalar >
static int MatVectProduct(Mat A, Vec X, Vec Y);
int MatVectProduct(Mat A, Vec X, Vec Y);
template< class T_Scalar >
int MatVectProductOverride(Mat A, Vec X, Vec Y, bool IA);
template< class T_Scalar >
class AbstractIterativeSolver;
template< class T_Scalar >
class Formulation
......@@ -39,8 +44,7 @@ namespace gmshddm
std::unordered_map< field::InterfaceFieldInterface< T_Scalar > *, field::InterfaceFieldInterface< T_Scalar > * > _interfaceFields;
std::unordered_map< unsigned int, std::vector< unsigned long long > > _interfaceMapping;
gmshfem::algebra::Vector< T_Scalar > _rhs;
std::vector< gmshfem::scalar::Precision< T_Scalar > > _absoluteResidual;
std::vector< gmshfem::scalar::Precision< T_Scalar > > _relativeResidual;
unsigned int _numberOfIterations;
bool _physicalCommutator;
bool _artificialCommutator;
......@@ -79,26 +83,22 @@ namespace gmshddm
gmshfem::common::Timer pre();
gmshfem::common::Timer solve(const std::string &solver, const double tolerance = 1e-6, const int iterMax = 1000, const bool sameMatrixWithArtificialAndPhysicalSources = false, const bool skipFinalSolutionComputation = false);
gmshfem::common::Timer solve(AbstractIterativeSolver<T_Scalar> &solver, const double tolerance = 1e-6, const int iterMax = 1000, const bool sameMatrixWithArtificialAndPhysicalSources = false, const bool skipFinalSolutionComputation = false);
gmshfem::algebra::MatrixCCS< T_Scalar > computeMatrix();
void setSolutionIntoInterfaceFields(const std::vector< T_Scalar > &solution);
const gmshfem::algebra::Vector< T_Scalar > &getRHS() const;
std::vector< gmshfem::scalar::Precision< T_Scalar > > absoluteResidual() const;
std::vector< gmshfem::scalar::Precision< T_Scalar > > relativeResidual() const;
unsigned int numberOfIterations() const;
friend int MatVectProduct< T_Scalar >(Mat A, Vec X, Vec Y);
friend int MatVectProductOverride< T_Scalar >(Mat A, Vec X, Vec Y, bool IA);
//*
// T_Scalar, PetscScalar, T_Interger
template< class T_Scalar1, class T_Scalar2, class T_Integer >
struct PetscInterface {
static int kspPrint(KSP ksp, T_Integer it, gmshfem::scalar::Precision< T_Scalar2 > rnorm, void *mctx)
{
Formulation< T_Scalar1 > *formulation = static_cast< Formulation< T_Scalar1 > * >(mctx);
formulation->_runIterationOperations(it, rnorm);
return 0;
}
static const T_Scalar1 *arrayInterface(const T_Scalar2 *array, const T_Integer size)
{
......@@ -115,13 +115,14 @@ namespace gmshddm
delete[] array;
}
};
//*/
/*
template< class T_Scalar1, class T_Integer >
struct PetscInterface< T_Scalar1, T_Scalar1, T_Integer > {
static int kspPrint(KSP ksp, T_Integer it, gmshfem::scalar::Precision< T_Scalar1 > rnorm, void *mctx)
{
Formulation< T_Scalar1 > *formulation = static_cast< Formulation< T_Scalar1 > * >(mctx);
formulation->_runIterationOperations(it, rnorm);
//formulation->_runIterationOperations(it, rnorm);
return 0;
}
......@@ -133,7 +134,7 @@ namespace gmshddm
static void freeArray(const T_Scalar1 *array)
{
}
};
};*/
};
......
// GmshDDM - Copyright (C) 2019-2021, A. Royer, C. Geuzaine, B. Martin, Université de Liège
// GmshDDM - Copyright (C) 2022, A. Royer, C. Geuzaine, B. Martin, Université de Liège
//
// See the LICENSE.txt file for license information. Please report all
// issues on https://gitlab.onelab.info/gmsh/ddm/issues
#include "Solvers.h"
#include "Formulation.h"
#include "MPIInterface.h"
#include "resources.h"
#include <gmshfem/Message.h>
#include <regex>
#ifdef HAVE_PETSC
#include <petsc.h>
#endif
namespace gmshddm
{
namespace problem
{
// Instantiate the required templates
template class AbstractIterativeSolver< std::complex< double > >;
template class AbstractIterativeSolver< std::complex< float > >;
template class JacobiIterativeSolver< std::complex< double > >;
template class JacobiIterativeSolver< std::complex< float > >;
template class KSPIterativeSolver< std::complex< double > >;
template class KSPIterativeSolver< std::complex< float > >;
template< class T_Scalar >
std::vector< gmshfem::scalar::Precision< T_Scalar > > AbstractIterativeSolver< T_Scalar >::absoluteResidual() const
{
return _absoluteResidual;
}
template< class T_Scalar >
std::vector< gmshfem::scalar::Precision< T_Scalar > > AbstractIterativeSolver< T_Scalar >::relativeResidual() const
{
return _relativeResidual;
}
template< class T_Scalar >
void AbstractIterativeSolver< T_Scalar >::_runIterationOperations(const unsigned int iteration, const gmshfem::scalar::Precision< T_Scalar > rnorm)
{
_absoluteResidual.push_back(rnorm);
_relativeResidual.push_back(rnorm / _absoluteResidual[0]);
//_numberOfIterations = iteration;
const unsigned int MPI_Rank = mpi::getMPIRank();
if(MPI_Rank == 0) {
gmshfem::msg::info << " - Iteration " << iteration << ": absolute residual = " << _absoluteResidual.back() << ", relative residual = " << _relativeResidual.back() << gmshfem::msg::endl;
}
gmshddm::common::s_printResources("DDM Iteration");
}
template< class T_Scalar >
void JacobiIterativeSolver< T_Scalar >::solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol, int maxIter)
{
double r0;
VecNorm(RHS, NORM_2, &r0);
const double stopCriterion = relTol * r0;
#ifndef HAVE_PETSC
gmshfem::msg::error << "PETSc is required for the Jacobi iterative solver." << gmshfem::msg::endl;
#endif
Vec W; // residual
VecDuplicate(RHS, &W);
VecSet(X, 0.);
PetscReal norm = 1;
int i = 0;
do { // jacobi
MatVectProductOverride< T_Scalar >(A, X, Y, false);
VecAYPX(Y, 1., RHS);
VecWAXPY(W, -1., X, Y);
VecCopy(Y, X);
VecNorm(W, NORM_2, &norm);
this->_runIterationOperations(i, norm);
i++;
} while(norm > stopCriterion && i < maxIter);
VecDestroy(&W);
}
template< class T_Scalar >
std::string JacobiIterativeSolver< T_Scalar >::solverName() const
{
return "jacobi";
}
template< class T_Scalar >
void KSPIterativeSolver< T_Scalar >::solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol, int maxIter)
{
#ifndef HAVE_PETSC
gmshfem::msg::error << "PETSc is required for KSP iterative solvers." << gmshfem::msg::endl;
#endif
std::string solverBase = _solverName;
std::set< std::string > solverMod;
{
const std::regex sep("\\+");
std::list< std::string > split;
std::copy(std::sregex_token_iterator(_solverName.begin(), _solverName.end(), sep, -1),
std::sregex_token_iterator(),
std::back_inserter(split));
if(!split.empty()) {
std::list< std::string >::iterator it = split.begin();
solverBase = *(it++);
for(; it != split.end(); it++)
solverMod.insert(*it);
}
}
gmshfem::msg::print << "solverBase: " << solverBase << "solverName : " << _solverName << gmshfem::msg::endl;
KSP ksp;
PC pc;
KSPCreate(PETSC_COMM_WORLD, &ksp);
KSPSetFromOptions(ksp);
KSPSetOperators(ksp, A, A);
KSPSetType(ksp, solverBase.c_str());
KSPMonitorSet(ksp, PetscInterface< T_Scalar, PetscScalar, PetscInt >::kspPrint, this, PETSC_NULL);
KSPSetTolerances(ksp, relTol, PETSC_DEFAULT, PETSC_DEFAULT, maxIter);
KSPGetPC(ksp, &pc);
PCSetType(pc, PCNONE);
if(solverBase == "gmres") {
KSPGMRESSetRestart(ksp, maxIter);
if(solverMod.count("mgs")) {
KSPGMRESSetOrthogonalization(ksp, KSPGMRESModifiedGramSchmidtOrthogonalization);
gmshfem::msg::print << "Using modified Gram-Schmidt in GMRES" << gmshfem::msg::endl;
}
}
else if(solverBase == "bcgsl")
KSPBCGSLSetEll(ksp, 6);
KSPSolve(ksp, RHS, Y);
KSPDestroy(&ksp);
}
template< class T_Scalar >
std::string KSPIterativeSolver< T_Scalar >::solverName() const
{
return this->_solverName;
}
} // namespace problem
} // namespace gmshddm
// GmshDDM - Copyright (C) 2019-2021, A. Royer, C. Geuzaine, Université de Liège
// GmshDDM - Copyright (C) 2022, A. Royer, C. Geuzaine, B. Martin, Université de Liège
//
// See the LICENSE.txt file for license information. Please report all
// issues on https://gitlab.onelab.info/gmsh/ddm/issues
#ifndef H_GMSHDDM_SOLVERS
#define H_GMSHDDM_SOLVERS
#include "InterfaceCompoundField.h"
#include "InterfaceField.h"
#include "SubdomainCompoundField.h"
#include "SubdomainField.h"
#include <gmshfem/Formulation.h>
#include <unordered_map>
#include <vector>
typedef struct _p_Mat *Mat;
typedef struct _p_Vec *Vec;
typedef struct _p_KSP *KSP;
namespace gmshddm
{
namespace problem
{
template< class T_Scalar >
class AbstractIterativeSolver
{
public:
virtual ~AbstractIterativeSolver() = default;
virtual void solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol=1e-6, int maxIter=100) = 0;
virtual std::string solverName() const = 0;
std::vector< gmshfem::scalar::Precision< T_Scalar > > absoluteResidual() const;
std::vector< gmshfem::scalar::Precision< T_Scalar > > relativeResidual() const;
protected:
std::vector< gmshfem::scalar::Precision< T_Scalar > > _absoluteResidual;
std::vector< gmshfem::scalar::Precision< T_Scalar > > _relativeResidual;
void _runIterationOperations(const unsigned int iteration, const gmshfem::scalar::Precision< T_Scalar > rnorm);
};
template< class T_Scalar >
class JacobiIterativeSolver : public AbstractIterativeSolver< T_Scalar >
{
public:
virtual void solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol=1e-6, int maxIter=100) override;
virtual std::string solverName() const override;
};
template< class T_Scalar >
class KSPIterativeSolver : public AbstractIterativeSolver< T_Scalar >
{
public:
KSPIterativeSolver(const std::string &name) :
_solverName(name) {}
virtual void solve(Mat A, Vec X, Vec Y, Vec RHS, double relTol=1e-6, int maxIter=100) override;
virtual std::string solverName() const override;
private:
std::string _solverName;
// T_Scalar, PetscScalar, T_Interger
template< class T_Scalar1, class T_Scalar2, class T_Integer >
struct PetscInterface {
static int kspPrint(KSP ksp, T_Integer it, gmshfem::scalar::Precision< T_Scalar2 > rnorm, void *mctx)
{
KSPIterativeSolver< T_Scalar1 > *solver = static_cast< KSPIterativeSolver< T_Scalar1 > * >(mctx);
solver->_runIterationOperations(it, rnorm);
return 0;
}
};
};
} // namespace problem
} // namespace gmshddm
#endif // H_GMSHDDM_SOLVERS
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment