Skip to content
Snippets Groups Projects
Commit 9813098e authored by Boris Martin's avatar Boris Martin
Browse files

Fixed missing ifdefs for builds without MPI/without PETSc

parent 7c9fd841
No related branches found
No related tags found
1 merge request!90Fixed missing ifdefs for builds without MPI/without PETSc
Pipeline #12029 failed
...@@ -12,7 +12,10 @@ ...@@ -12,7 +12,10 @@
#include "Options.h" #include "Options.h"
#include "Timer.h" #include "Timer.h"
#include "gmshfemDefines.h" #include "gmshfemDefines.h"
#ifdef HAVE_MPI
#include <mpi.h> #include <mpi.h>
#endif
#include <gmsh.h> #include <gmsh.h>
...@@ -261,16 +264,25 @@ namespace gmshfem::common ...@@ -261,16 +264,25 @@ namespace gmshfem::common
double GmshFem::reductionSum(double x) double GmshFem::reductionSum(double x)
{ {
#ifdef HAVE_MPI
double sum; double sum;
MPI_Allreduce(&x, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&x, &sum, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
return sum; return sum;
#else
throw common::Exception("MPI is not available");
#endif
} }
int GmshFem::reductionInt(int x) int GmshFem::reductionInt(int x)
{ {
#ifdef HAVE_MPI
int sum; int sum;
MPI_Allreduce(&x, &sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&x, &sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
return sum; return sum;
#else
throw common::Exception("MPI is not available");
#endif
} }
void GmshFem::BarrierMPI() void GmshFem::BarrierMPI()
......
...@@ -9,11 +9,15 @@ ...@@ -9,11 +9,15 @@
#include "GmshFem.h" #include "GmshFem.h"
#include "Timer.h" #include "Timer.h"
#include "instantiate.h" #include "instantiate.h"
#include "gmshfemDefines.h"
#include <CSVio.h> #include <CSVio.h>
#include <Options.h> #include <Options.h>
#include <gmsh.h> #include <gmsh.h>
#ifdef HAVE_MPI
#include <mpi.h> #include <mpi.h>
#endif
#include <unordered_set> #include <unordered_set>
...@@ -121,7 +125,7 @@ namespace gmshfem::field ...@@ -121,7 +125,7 @@ namespace gmshfem::field
* - _toRead contains all the non owned Dofs * - _toRead contains all the non owned Dofs
* - _toSend is NOT filled * - _toSend is NOT filled
* */ * */
#ifdef HAVE_MPI
struct ShareDofInfo { struct ShareDofInfo {
unsigned long long type; unsigned long long type;
...@@ -206,11 +210,17 @@ namespace gmshfem::field ...@@ -206,11 +210,17 @@ namespace gmshfem::field
_toRead.erase(dof); _toRead.erase(dof);
} }
} }
#else
throw common::Exception("MPI is not available");
#endif
} }
template< class T_Scalar, field::Form T_Form > template< class T_Scalar, field::Form T_Form >
void DistributedField< T_Scalar, T_Form >::_computeToSend() void DistributedField< T_Scalar, T_Form >::_computeToSend()
{ {
#ifdef HAVE_MPI
// Send all owned interfaces and inner dofs that are read by someone else // Send all owned interfaces and inner dofs that are read by someone else
std::vector< DofGlobalIndex > local, global; std::vector< DofGlobalIndex > local, global;
local.reserve(_sharedDofs.size()); local.reserve(_sharedDofs.size());
...@@ -269,6 +279,9 @@ namespace gmshfem::field ...@@ -269,6 +279,9 @@ namespace gmshfem::field
_toSend.insert(dof); _toSend.insert(dof);
} }
} }
#else
throw common::Exception("MPI is not available");
#endif
} }
...@@ -305,6 +318,8 @@ namespace gmshfem::field ...@@ -305,6 +318,8 @@ namespace gmshfem::field
template< class T_Scalar, field::Form T_Form > template< class T_Scalar, field::Form T_Form >
void DistributedField< T_Scalar, T_Form >::syncGlobalDofs(std::vector< unsigned long long > &localToGlobal, std::vector< unsigned long long > &readIDs) void DistributedField< T_Scalar, T_Form >::syncGlobalDofs(std::vector< unsigned long long > &localToGlobal, std::vector< unsigned long long > &readIDs)
{ {
#ifdef HAVE_MPI
// To be called AFTER local dofs get numbered // To be called AFTER local dofs get numbered
unsigned rank = gmshfem::common::GmshFem::getMPIRank(); unsigned rank = gmshfem::common::GmshFem::getMPIRank();
unsigned commSize = gmshfem::common::GmshFem::getMPISize(); unsigned commSize = gmshfem::common::GmshFem::getMPISize();
...@@ -391,6 +406,10 @@ namespace gmshfem::field ...@@ -391,6 +406,10 @@ namespace gmshfem::field
msg::debug << msg::endl msg::debug << msg::endl
<< "Leaving syncGlobalDofs" << msg::endl; << "Leaving syncGlobalDofs" << msg::endl;
MPI_Barrier(MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD);
#else
throw common::Exception("MPI is not available");
#endif
} }
template< class T_Scalar, field::Form T_Form > template< class T_Scalar, field::Form T_Form >
......
...@@ -1088,6 +1088,9 @@ namespace gmshfem::problem ...@@ -1088,6 +1088,9 @@ namespace gmshfem::problem
template< class T_Scalar > template< class T_Scalar >
common::Timer Formulation< T_Scalar >::preDistributed() common::Timer Formulation< T_Scalar >::preDistributed()
{ {
#ifndef HAVE_MPI
throw common::Exception("preDistributed without MPI support");
#else
unsigned rank = gmshfem::common::GmshFem::getMPIRank(); unsigned rank = gmshfem::common::GmshFem::getMPIRank();
unsigned commSize = gmshfem::common::GmshFem::getMPISize(); unsigned commSize = gmshfem::common::GmshFem::getMPISize();
common::GmshFem::BarrierMPI(); common::GmshFem::BarrierMPI();
...@@ -1185,6 +1188,7 @@ namespace gmshfem::problem ...@@ -1185,6 +1188,7 @@ namespace gmshfem::problem
_distributedContext = std::make_unique<system::DistributedContext<T_Scalar>>(std::move(localToGlobalMap), std::move(localIdOfOwned), std::move(localIdOfNonOwned)); _distributedContext = std::make_unique<system::DistributedContext<T_Scalar>>(std::move(localToGlobalMap), std::move(localIdOfOwned), std::move(localIdOfNonOwned));
return time; return time;
#endif
} }
template< class T_Scalar > template< class T_Scalar >
...@@ -1810,6 +1814,9 @@ namespace gmshfem::problem ...@@ -1810,6 +1814,9 @@ namespace gmshfem::problem
template< class T_Scalar > template< class T_Scalar >
common::Timer Formulation< T_Scalar >::solveForPETSc(Vec x, Vec y, const bool reusePreconditioner) const common::Timer Formulation< T_Scalar >::solveForPETSc(Vec x, Vec y, const bool reusePreconditioner) const
{ {
#ifndef HAVE_PETSC
throw common::Exception("solveForPETSc called but PETSc is not available");
#else
msg::info << "Solving with a PETSc vector..." << msg::endl; msg::info << "Solving with a PETSc vector..." << msg::endl;
common::Timer time; common::Timer time;
time.tick(); time.tick();
...@@ -1827,11 +1834,15 @@ namespace gmshfem::problem ...@@ -1827,11 +1834,15 @@ namespace gmshfem::problem
time.tock(); time.tock();
return time; return time;
#endif
} }
template< class T_Scalar > template< class T_Scalar >
common::Timer Formulation< T_Scalar >::solveForPETSc(Mat x, Mat y, const bool reusePreconditioner) const common::Timer Formulation< T_Scalar >::solveForPETSc(Mat x, Mat y, const bool reusePreconditioner) const
{ {
#ifndef HAVE_PETSC
throw common::Exception("solveForPETSc called but PETSc is not available");
#else
msg::info << "Solving with a PETSc matrix..." << msg::endl; msg::info << "Solving with a PETSc matrix..." << msg::endl;
common::Timer time; common::Timer time;
time.tick(); time.tick();
...@@ -1849,6 +1860,7 @@ namespace gmshfem::problem ...@@ -1849,6 +1860,7 @@ namespace gmshfem::problem
time.tock(); time.tock();
return time; return time;
#endif
} }
template< class T_Scalar > template< class T_Scalar >
......
...@@ -19,17 +19,18 @@ namespace gmshfem::system ...@@ -19,17 +19,18 @@ namespace gmshfem::system
class DistributedContextImpl class DistributedContextImpl
{ {
private: private:
#ifdef HAVE_PETSC
IS _globalIds; IS _globalIds;
Vec _distributedExtendedVector; Vec _distributedExtendedVector;
Vec _distributedVector; Vec _distributedVector;
VecScatter _scatter; VecScatter _scatter;
//Vec _globalVec; #endif
//Vec _localVec;
DistributedContext< T_Scalar > &ctx; DistributedContext< T_Scalar > &ctx;
public: public:
DistributedContextImpl(DistributedContext<T_Scalar>& ctx) : ctx(ctx) { DistributedContextImpl(DistributedContext<T_Scalar>& ctx) : ctx(ctx) {
// TODO: check petsc available #ifndef HAVE_PETSC
throw gmshfem::common::Exception("PETSC is not available");
#else
unsigned nLoc = ctx.localIDofOwned().size(); unsigned nLoc = ctx.localIDofOwned().size();
unsigned nOverlap = ctx.localIDofNonOwned().size(); unsigned nOverlap = ctx.localIDofNonOwned().size();
...@@ -46,18 +47,25 @@ namespace gmshfem::system ...@@ -46,18 +47,25 @@ namespace gmshfem::system
ISCreateGeneral(PETSC_COMM_WORLD, nLoc+nOverlap, mapping.data(), PETSC_COPY_VALUES, &_globalIds); ISCreateGeneral(PETSC_COMM_WORLD, nLoc+nOverlap, mapping.data(), PETSC_COPY_VALUES, &_globalIds);
VecScatterCreate(_distributedVector, _globalIds, _distributedExtendedVector, nullptr, &_scatter); VecScatterCreate(_distributedVector, _globalIds, _distributedExtendedVector, nullptr, &_scatter);
#endif
} }
Vec doScatter(Vec distributedSol) { Vec doScatter(Vec distributedSol) {
#ifndef HAVE_PETSC
throw gmshfem::common::Exception("PETSC is not available");
#else
VecScatterBegin(_scatter, distributedSol, _distributedExtendedVector, INSERT_VALUES, SCATTER_FORWARD); VecScatterBegin(_scatter, distributedSol, _distributedExtendedVector, INSERT_VALUES, SCATTER_FORWARD);
VecScatterEnd(_scatter, distributedSol, _distributedExtendedVector, INSERT_VALUES, SCATTER_FORWARD); VecScatterEnd(_scatter, distributedSol, _distributedExtendedVector, INSERT_VALUES, SCATTER_FORWARD);
return _distributedExtendedVector; return _distributedExtendedVector;
#endif
} }
~DistributedContextImpl() { ~DistributedContextImpl() {
#ifdef HAVE_PETSC
VecDestroy(&_distributedExtendedVector); VecDestroy(&_distributedExtendedVector);
VecDestroy(&_distributedVector); VecDestroy(&_distributedVector);
VecScatterDestroy(&_scatter); VecScatterDestroy(&_scatter);
ISDestroy(&_globalIds); ISDestroy(&_globalIds);
#endif
} }
}; };
...@@ -81,6 +89,7 @@ namespace gmshfem::system ...@@ -81,6 +89,7 @@ namespace gmshfem::system
template< class T_Scalar > template< class T_Scalar >
void DistributedContext< T_Scalar >::readScatteredData(std::vector<T_Scalar>& values, Vec sol) const void DistributedContext< T_Scalar >::readScatteredData(std::vector<T_Scalar>& values, Vec sol) const
{ {
#ifdef HAVE_PETSC
if (!_impl) if (!_impl)
throw common::Exception("Uninitialized DistributedContextImpl"); throw common::Exception("Uninitialized DistributedContextImpl");
Vec distSol = _impl->doScatter(sol); Vec distSol = _impl->doScatter(sol);
...@@ -91,7 +100,7 @@ namespace gmshfem::system ...@@ -91,7 +100,7 @@ namespace gmshfem::system
VecGetArray(distSol, &data); VecGetArray(distSol, &data);
interface(values, data, static_cast<unsigned long long>(_localToGlobal.size())); interface(values, data, static_cast<unsigned long long>(_localToGlobal.size()));
VecRestoreArray(distSol, &data); VecRestoreArray(distSol, &data);
//T_Scalar* data = #endif
} }
#ifdef HAVE_PETSC #ifdef HAVE_PETSC
......
...@@ -7,13 +7,16 @@ ...@@ -7,13 +7,16 @@
#include "instantiate.h" #include "instantiate.h"
#ifdef HAVE_PETSC
#include "petsc.h" #include "petsc.h"
#endif
namespace gmshfem::system namespace gmshfem::system
{ {
template< class T_Scalar > template< class T_Scalar >
RestrictionLowOrder< T_Scalar >::RestrictionLowOrder(problem::Formulation< T_Scalar > &formulation) RestrictionLowOrder< T_Scalar >::RestrictionLowOrder(problem::Formulation< T_Scalar > &formulation)
{ {
#ifdef HAVE_PETSC
// Find all the coarse dofs that are owned. // Find all the coarse dofs that are owned.
std::vector< dofs::UnknownDof * > ownedCoarseDofs; std::vector< dofs::UnknownDof * > ownedCoarseDofs;
for(auto &[tag, fieldPtr] : formulation._unknownFields) { for(auto &[tag, fieldPtr] : formulation._unknownFields) {
...@@ -39,8 +42,7 @@ namespace gmshfem::system ...@@ -39,8 +42,7 @@ namespace gmshfem::system
// Create the restriction matrix // Create the restriction matrix
MatCreateMPIAIJWithArrays(PETSC_COMM_WORLD, ownedCoarseDofs.size(), formulation.getDistributedContext()->localToGlobal().size(), PETSC_DETERMINE, PETSC_DETERMINE, ai.data(), aj.data(), values.data(), &_globalRestriction); MatCreateMPIAIJWithArrays(PETSC_COMM_WORLD, ownedCoarseDofs.size(), formulation.getDistributedContext()->localToGlobal().size(), PETSC_DETERMINE, PETSC_DETERMINE, ai.data(), aj.data(), values.data(), &_globalRestriction);
#endif
//MatView(_globalRestriction, PETSC_VIEWER_DRAW_WORLD);
} }
INSTANTIATE_CLASS(RestrictionLowOrder, 4, TEMPLATE_ARGS(std::complex< double >, std::complex< float >, double, float)) INSTANTIATE_CLASS(RestrictionLowOrder, 4, TEMPLATE_ARGS(std::complex< double >, std::complex< float >, double, float))
......
...@@ -59,6 +59,8 @@ namespace gmshfem::system ...@@ -59,6 +59,8 @@ namespace gmshfem::system
}; };
#endif // HAVE_PETSC #endif // HAVE_PETSC
#ifdef HAVE_PETSC
template< class T_Scalar > template< class T_Scalar >
void Solver< T_Scalar >::_loadPetscSolution(Vec x, std::vector< T_Scalar > &values) void Solver< T_Scalar >::_loadPetscSolution(Vec x, std::vector< T_Scalar > &values)
{ {
...@@ -170,6 +172,7 @@ namespace gmshfem::system ...@@ -170,6 +172,7 @@ namespace gmshfem::system
PetscFunctionReturn(0); PetscFunctionReturn(0);
} }
#endif // HAVE_PETSC
template< class T_Scalar > template< class T_Scalar >
const char *Solver< T_Scalar >::defaultSparseSolver() const char *Solver< T_Scalar >::defaultSparseSolver()
...@@ -268,8 +271,8 @@ return "mkl_pardiso"; ...@@ -268,8 +271,8 @@ return "mkl_pardiso";
template< class T_Scalar > template< class T_Scalar >
common::Memory Solver< T_Scalar >::getEstimatedFactorizationDistributedMemoryUsage(const DistributedContext<T_Scalar>& distributedContext, bool sum) const common::Memory Solver< T_Scalar >::getEstimatedFactorizationDistributedMemoryUsage(const DistributedContext<T_Scalar>& distributedContext, bool sum) const
{ {
Mat mat = _A->getPetscDistributed(distributedContext);
#if defined(HAVE_PETSC) && defined(PETSC_HAVE_MUMPS) #if defined(HAVE_PETSC) && defined(PETSC_HAVE_MUMPS)
Mat mat = _A->getPetscDistributed(distributedContext);
const bool isCholesky = _isCholesky< T_Scalar >(_A->getOptions().symmetric(), _A->getOptions().hermitian()); const bool isCholesky = _isCholesky< T_Scalar >(_A->getOptions().symmetric(), _A->getOptions().hermitian());
Mat matFac; Mat matFac;
MatGetFactor(mat, MATSOLVERMUMPS, (_isCholesky< T_Scalar >(_A->getOptions().symmetric(), _A->getOptions().hermitian()) ? MAT_FACTOR_CHOLESKY : MAT_FACTOR_LU), &matFac); MatGetFactor(mat, MATSOLVERMUMPS, (_isCholesky< T_Scalar >(_A->getOptions().symmetric(), _A->getOptions().hermitian()) ? MAT_FACTOR_CHOLESKY : MAT_FACTOR_LU), &matFac);
...@@ -314,17 +317,17 @@ return "mkl_pardiso"; ...@@ -314,17 +317,17 @@ return "mkl_pardiso";
throw; throw;
} }
} }
// Only implemented those if HAVE_PETSC is defined
#ifdef HAVE_PETSC
template< class T_Scalar > template< class T_Scalar >
void Solver< T_Scalar >::solveForPETSc(Vec b, Vec x, const bool reusePreconditioner) void Solver< T_Scalar >::solveForPETSc(Vec b, Vec x, const bool reusePreconditioner)
{ {
PetscFunctionBeginUser;
//******************* //*******************
// Solve : // Solve :
// A x = b for given and already allocated PETSc vectors // A x = b for given and already allocated PETSc vectors
//******************* //*******************
try { try {
#ifdef HAVE_PETSC
PetscFunctionBeginUser;
if(_A->getModule()->name() != "A" && _A->getModule()->name() != "AFrequency") { if(_A->getModule()->name() != "A" && _A->getModule()->name() != "AFrequency") {
throw common::Exception("A separately assembled system can not be solved"); throw common::Exception("A separately assembled system can not be solved");
...@@ -377,15 +380,14 @@ return "mkl_pardiso"; ...@@ -377,15 +380,14 @@ return "mkl_pardiso";
PetscCallVoid(KSPSolve(_ksp, b, x)); PetscCallVoid(KSPSolve(_ksp, b, x));
PetscFunctionReturnVoid(); PetscFunctionReturnVoid();
#else
msg::error << "Unable to solve the linear system without PETSc" << msg::endl;
#endif
} }
catch(const std::exception &exc) { catch(const std::exception &exc) {
msg::error << "Error in solveForPETSc: " << exc.what() << msg::endl; msg::error << "Error in solveForPETSc: " << exc.what() << msg::endl;
throw; throw;
} }
} }
#endif
template< class T_Scalar > template< class T_Scalar >
void Solver< T_Scalar >::solveAll(std::vector< std::vector< T_Scalar > > &values, const bool reusePreconditioner) void Solver< T_Scalar >::solveAll(std::vector< std::vector< T_Scalar > > &values, const bool reusePreconditioner)
...@@ -433,17 +435,16 @@ PetscFunctionBeginUser; ...@@ -433,17 +435,16 @@ PetscFunctionBeginUser;
#endif #endif
} }
#ifdef HAVE_PETSC
template< class T_Scalar > template< class T_Scalar >
void Solver< T_Scalar >::solveForPETSc(Mat B, Mat X, const bool reusePreconditioner) void Solver< T_Scalar >::solveForPETSc(Mat B, Mat X, const bool reusePreconditioner)
{ {
PetscFunctionBeginUser;
//******************* //*******************
// Solve : // Solve :
// A x = b for given and already allocated PETSc vectors // A x = b for given and already allocated PETSc vectors
//******************* //*******************
try { try {
#ifdef HAVE_PETSC
PetscFunctionBeginUser;
if(_A->getModule()->name() != "A" && _A->getModule()->name() != "AFrequency") { if(_A->getModule()->name() != "A" && _A->getModule()->name() != "AFrequency") {
throw common::Exception("A separately assembled system can not be solved"); throw common::Exception("A separately assembled system can not be solved");
} }
...@@ -498,16 +499,15 @@ PetscFunctionBeginUser; ...@@ -498,16 +499,15 @@ PetscFunctionBeginUser;
PetscCallVoid(KSPMatSolve(_ksp, B, X)); PetscCallVoid(KSPMatSolve(_ksp, B, X));
PetscFunctionReturnVoid();
#else
msg::error << "Unable to solve the linear system without PETSc" << msg::endl;
#endif
} }
catch(const std::exception &exc) { catch(const std::exception &exc) {
msg::error << "Error in solveForPETSc: " << exc.what() << msg::endl; msg::error << "Error in solveForPETSc: " << exc.what() << msg::endl;
throw; throw;
} }
PetscFunctionReturnVoid();
} }
#endif
template< class T_Scalar > template< class T_Scalar >
void Solver< T_Scalar >::solveDistributed(std::vector< T_Scalar > &values, system::DISTRIBUTED_SOLVE_TYPE solveType, const DistributedContext<T_Scalar>& distributedContext, const bool reusePreconditioner) void Solver< T_Scalar >::solveDistributed(std::vector< T_Scalar > &values, system::DISTRIBUTED_SOLVE_TYPE solveType, const DistributedContext<T_Scalar>& distributedContext, const bool reusePreconditioner)
......
...@@ -56,13 +56,16 @@ namespace gmshfem::system ...@@ -56,13 +56,16 @@ namespace gmshfem::system
void _loadPetscSolutionAllDistributed(Mat x, std::vector< std::vector< T_Scalar > > &values, const DistributedContext< T_Scalar > &distributedContext); void _loadPetscSolutionAllDistributed(Mat x, std::vector< std::vector< T_Scalar > > &values, const DistributedContext< T_Scalar > &distributedContext);
int _ensureCorrectOperator(Mat *A, Mat *B, bool reusePrec); int _ensureCorrectOperator(Mat *A, Mat *B, bool reusePrec);
int _ensureCorrectOperatorDistributed(Mat *A, Mat *B, bool reusePrec, const DistributedContext< T_Scalar > &distributedContext); int _ensureCorrectOperatorDistributed(Mat *A, Mat *B, bool reusePrec, const DistributedContext< T_Scalar > &distributedContext);
static inline constexpr bool isComplexWithRealPetsc()
static const char* defaultSparseSolver(); {
#endif
static inline constexpr bool isComplexWithRealPetsc() {
return scalar::IsComplex< T_Scalar >::value == true && scalar::IsComplex< PetscScalar >::value == false; return scalar::IsComplex< T_Scalar >::value == true && scalar::IsComplex< PetscScalar >::value == false;
} }
static const char *defaultSparseSolver();
public:
void solveForPETSc(Vec x, Vec y, const bool reusePreconditioner = false);
void solveForPETSc(Mat x, Mat y, const bool reusePreconditioner = false);
#endif
public: public:
...@@ -80,8 +83,6 @@ namespace gmshfem::system ...@@ -80,8 +83,6 @@ namespace gmshfem::system
void solve(std::vector< T_Scalar > &values, const bool reusePreconditioner = false, unsigned rhsIdx = 0); void solve(std::vector< T_Scalar > &values, const bool reusePreconditioner = false, unsigned rhsIdx = 0);
void solveAll(std::vector< std::vector< T_Scalar > > &values, const bool reusePreconditioner = false); void solveAll(std::vector< std::vector< T_Scalar > > &values, const bool reusePreconditioner = false);
void solveForPETSc(Vec x, Vec y, const bool reusePreconditioner = false);
void solveForPETSc(Mat x, Mat y, const bool reusePreconditioner = false);
void solveDistributed(std::vector< T_Scalar > &values, DISTRIBUTED_SOLVE_TYPE, const DistributedContext< T_Scalar > &distributedContext, const bool reusePreconditioner); void solveDistributed(std::vector< T_Scalar > &values, DISTRIBUTED_SOLVE_TYPE, const DistributedContext< T_Scalar > &distributedContext, const bool reusePreconditioner);
void solveAllDistributed(std::vector< std::vector< T_Scalar > > &values, DISTRIBUTED_SOLVE_TYPE, const DistributedContext<T_Scalar>& distributedContext, const bool reusePreconditioner = false); void solveAllDistributed(std::vector< std::vector< T_Scalar > > &values, DISTRIBUTED_SOLVE_TYPE, const DistributedContext<T_Scalar>& distributedContext, const bool reusePreconditioner = false);
void eigensolve(std::vector< scalar::ComplexPrecision< T_Scalar > > &eigenvalues, std::vector< std::vector< scalar::ComplexPrecision< T_Scalar > > > &eigenvectors, const bool computeEigenvectors, const unsigned int numberOfEigenvalues, const scalar::ComplexPrecision< T_Scalar > target); void eigensolve(std::vector< scalar::ComplexPrecision< T_Scalar > > &eigenvalues, std::vector< std::vector< scalar::ComplexPrecision< T_Scalar > > > &eigenvectors, const bool computeEigenvectors, const unsigned int numberOfEigenvalues, const scalar::ComplexPrecision< T_Scalar > target);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment