diff --git a/Fltk/FlGui.cpp b/Fltk/FlGui.cpp index abd35b0f0b0dd31e6ed7ab72d133d4ff60828fc4..3f1dc378c2a6170539886dffb390e9f29d2eaf5a 100644 --- a/Fltk/FlGui.cpp +++ b/Fltk/FlGui.cpp @@ -44,6 +44,15 @@ #include "Generator.h" #include "gl2ps.h" +// check (now!) if there are any pending events, and process them +void FlGui::check(){ Fl::check(); } + +// wait (possibly indefinitely) for any events, then process them +void FlGui::wait(){ Fl::wait(); } + +// wait (at most time seconds) for any events, then process them +void FlGui::wait(double time){ Fl::wait(time); } + class drawContextFltk : public drawContextGlobal{ public: void draw() diff --git a/Fltk/FlGui.h b/Fltk/FlGui.h index b79330756304d47e259158f3fabc4c589c9b292b..3388bb5a8b6876754468fd2aed94ef9eec977384 100644 --- a/Fltk/FlGui.h +++ b/Fltk/FlGui.h @@ -8,7 +8,6 @@ #include <string> #include <vector> -#include <FL/Fl.H> #define GMSH_WINDOW_BOX FL_FLAT_BOX #define NB_BUTT_SCROLL 25 @@ -33,6 +32,7 @@ class geometryContextWindow; class meshContextWindow; class aboutWindow; class onelabWindow; +class Fl_Widget; class GVertex; class GEdge; @@ -76,11 +76,11 @@ class FlGui{ // run the GUI until there's no window left static int run(); // check (now!) if there are any pending events, and process them - static void check(){ Fl::check(); } + static void check(); // wait (possibly indefinitely) for any events, then process them - static void wait(){ Fl::wait(); } + static void wait(); // wait (at most time seconds) for any events, then process them - static void wait(double time){ Fl::wait(time); } + static void wait(double time); // is a file opened through the Mac Finder? void setOpenedThroughMacFinder(bool val){ _openedThroughMacFinder = val; } bool getOpenedThroughMacFinder(){ return _openedThroughMacFinder; } diff --git a/Geo/GRbf.cpp b/Geo/GRbf.cpp index cd8af08eff2cce8b1a0728f08877f4c39eb37ef2..d6094e6dbcfa16b1b25aa109a746b3faacde1dee 100644 --- a/Geo/GRbf.cpp +++ b/Geo/GRbf.cpp @@ -153,10 +153,12 @@ GRbf::GRbf (double sizeBox, int variableEps, int rbfFun, std::map<MVertex*, SVec GRbf::~GRbf(){ #if defined (HAVE_ANN) + ANNpointArray XYZNodes = XYZkdtree->thePoints(); + ANNpointArray UVNodes = UVkdtree->thePoints(); + annDeallocPts(XYZNodes); + annDeallocPts(UVNodes); delete XYZkdtree; delete UVkdtree; - annDeallocPts(XYZnodes); - annDeallocPts(UVnodes); #endif } @@ -164,7 +166,7 @@ GRbf::~GRbf(){ void GRbf::buildXYZkdtree(){ #if defined (HAVE_ANN) - XYZnodes = annAllocPts(nbNodes, 3); + ANNpointArray XYZnodes = annAllocPts(nbNodes, 3); for(int i = 0; i < nbNodes; i++){ XYZnodes[i][0] = centers(i,0); XYZnodes[i][1] = centers(i,1); @@ -821,7 +823,7 @@ void GRbf::solveHarmonicMap(fullMatrix<double> Oper, //ANN UVtree double dist_min = 1.e6; #if defined (HAVE_ANN) - UVnodes = annAllocPts(nbNodes, 3); + ANNpointArray UVnodes = annAllocPts(nbNodes, 3); for(int i = 0; i < nbNodes; i++){ UVnodes[i][0] = UV(i,0); UVnodes[i][1] = UV(i,1); diff --git a/Geo/GRbf.h b/Geo/GRbf.h index c3a565e77fe719080f9b697aff8f8b3ef4832dc3..6d2f89c23acabe6f606b72d9eb2cbfd7d9d567a9 100644 --- a/Geo/GRbf.h +++ b/Geo/GRbf.h @@ -9,7 +9,7 @@ #include "MVertex.h" #include "Context.h" #if defined(HAVE_ANN) -#include <ANN/ANN.h> +class ANNkd_tree; #endif @@ -57,8 +57,6 @@ class GRbf { #if defined (HAVE_ANN) ANNkd_tree *XYZkdtree; ANNkd_tree *UVkdtree; - ANNpointArray XYZnodes; - ANNpointArray UVnodes; #endif public: diff --git a/Mesh/Levy3D.cpp b/Mesh/Levy3D.cpp index e6d88c023a8ec88b90ba65d6d4752d450b4c0e74..61d146b059fccc37c213d5c1af3431cd9484992d 100755 --- a/Mesh/Levy3D.cpp +++ b/Mesh/Levy3D.cpp @@ -17,6 +17,13 @@ #include "MElement.h" #include "MElementOctree.h" #include "meshGRegion.h" +#include "ap.h" +#include "alglibinternal.h" +#include "alglibmisc.h" +#include "linalg.h" +#include "optimization.h" + +void call_back(const alglib::real_1d_array&,double&,alglib::real_1d_array&,void*); /*********class VoronoiVertex*********/ diff --git a/Mesh/Levy3D.h b/Mesh/Levy3D.h index 8118deca98e36c9e4509aa4778e4f8c1da81803e..42e164ea5d457f50576fbc7ec0f1450ec98854a6 100755 --- a/Mesh/Levy3D.h +++ b/Mesh/Levy3D.h @@ -10,12 +10,7 @@ #include <list> #include "fullMatrix.h" #include "GRegion.h" -#include "MElementOctree.h" -#include "ap.h" -#include "alglibinternal.h" -#include "alglibmisc.h" -#include "linalg.h" -#include "optimization.h" +class MElementOctree; /*********class VoronoiVertex*********/ @@ -191,4 +186,3 @@ class Wrap{ /*********functions*********/ bool inside_domain(MElementOctree*,double,double,double); -void call_back(const alglib::real_1d_array&,double&,alglib::real_1d_array&,void*); diff --git a/Mesh/meshGFaceLloyd.cpp b/Mesh/meshGFaceLloyd.cpp index 897dd7fbc426f9971cf73e8ff8eb7f8b68fb659a..2f81ae95ecc4b570c70eb47e6cb40c980c9411a4 100644 --- a/Mesh/meshGFaceLloyd.cpp +++ b/Mesh/meshGFaceLloyd.cpp @@ -28,6 +28,47 @@ #include "polynomialBasis.h" #include "MElementOctree.h" +bool domain_search(MElementOctree* octree,double x,double y){ + MElement* element; + + element = (MElement*)octree->find(x,y,0.0,2,true); + if(element!=NULL) return 1; + else return 0; +} + + + +class wrapper{ + private : + int p; + int dimension; + GFace* gf; + int iteration; + int max; + double start; + DocRecord* triangulator; + MElementOctree* octree; + public : + wrapper(); + ~wrapper(); + int get_p(); + void set_p(int); + int get_dimension(); + void set_dimension(int); + GFace* get_face(); + void set_face(GFace*); + int get_iteration(); + void set_iteration(int); + int get_max(); + void set_max(int); + double get_start(); + void set_start(double); + DocRecord* get_triangulator(); + void set_triangulator(DocRecord*); + MElementOctree* get_octree(); + void set_octree(MElementOctree*); +}; + /****************fonction callback****************/ @@ -147,15 +188,6 @@ void callback(const alglib::real_1d_array& x,double& func,alglib::real_1d_array& } } -bool domain_search(MElementOctree* octree,double x,double y){ - MElement* element; - - element = (MElement*)octree->find(x,y,0.0,2,true); - if(element!=NULL) return 1; - else return 0; -} - - /****************class smoothing****************/ diff --git a/Mesh/meshGFaceLloyd.h b/Mesh/meshGFaceLloyd.h index 59f62124b623589bbd112753c3d7d5eb28b2944f..8e80fc7103aed9195cb517fa06747b24376fc102 100644 --- a/Mesh/meshGFaceLloyd.h +++ b/Mesh/meshGFaceLloyd.h @@ -13,12 +13,6 @@ #include "fullMatrix.h" #include "DivideAndConquer.h" #include <queue> -#include "ap.h" -#include "alglibinternal.h" -#include "alglibmisc.h" -#include "linalg.h" -#include "optimization.h" -#include "MElementOctree.h" class GFace; class voronoi_vertex; @@ -28,8 +22,6 @@ class segment; class segment_list; class metric; -void callback(const alglib::real_1d_array&,double&,alglib::real_1d_array&,void*); -bool domain_search(MElementOctree*,double,double); class smoothing{ int ITER_MAX; @@ -216,37 +208,6 @@ class segment_list{ bool add_segment(segment); }; -class wrapper{ - private : - int p; - int dimension; - GFace* gf; - int iteration; - int max; - double start; - DocRecord* triangulator; - MElementOctree* octree; - public : - wrapper(); - ~wrapper(); - int get_p(); - void set_p(int); - int get_dimension(); - void set_dimension(int); - GFace* get_face(); - void set_face(GFace*); - int get_iteration(); - void set_iteration(int); - int get_max(); - void set_max(int); - double get_start(); - void set_start(double); - DocRecord* get_triangulator(); - void set_triangulator(DocRecord*); - MElementOctree* get_octree(); - void set_octree(MElementOctree*); -}; - #endif #endif diff --git a/Solver/dofManager.cpp b/Solver/dofManager.cpp index 2ce4b596a4e1fc7acaf1626333d71967fb386be7..a2a6c14253bfd4f3a088bcce07a1f29c41f6116a 100644 --- a/Solver/dofManager.cpp +++ b/Solver/dofManager.cpp @@ -1,4 +1,8 @@ #include <dofManager.h> +#include "GmshConfig.h" +#ifdef HAVE_MPI +#include "mpi.h" +#endif template<> void dofManager<double>::scatterSolution() { @@ -31,3 +35,112 @@ void dofManager<double>::scatterSolution() MPI_Waitall (Msg::GetCommSize(), &reqSend[0], MPI_STATUS_IGNORE); #endif } + +void dofManagerBase::_parallelFinalize() +{ + _localSize = unknown.size(); +#ifdef HAVE_MPI + int numStart; + int numTotal; + MPI_Status status; + parentByProc.resize(Msg::GetCommSize()); + ghostByProc.resize(Msg::GetCommSize()); + if (Msg::GetCommRank() == 0){ + numStart = 0; + } + else + MPI_Recv (&numStart, 1, MPI_INT, Msg::GetCommRank()-1, 0, MPI_COMM_WORLD, &status); + numTotal = numStart + _localSize; + if (Msg::GetCommRank() != Msg::GetCommSize()-1) + MPI_Send (&numTotal, 1, MPI_INT, Msg::GetCommRank()+1, 0, MPI_COMM_WORLD); + MPI_Bcast(&numTotal, 1, MPI_INT, Msg::GetCommSize()-1, MPI_COMM_WORLD); + for (std::map <Dof, int> ::iterator it = unknown.begin(); it!= unknown.end(); it++) + it->second += numStart; + std::vector<std::list<Dof> > ghostedByProc; + int *nRequest = new int[Msg::GetCommSize()]; + int *nRequested = new int[Msg::GetCommSize()]; + for (int i = 0; i<Msg::GetCommSize(); i++) + nRequest[i] = 0; + for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) { + int procId = it->second.first; + it->second.second = nRequest[procId]++; + } + MPI_Alltoall(nRequest, 1, MPI_INT, nRequested, 1, MPI_INT, MPI_COMM_WORLD); + long int **recv0 = new long int*[Msg::GetCommSize()]; + int **recv1 = new int*[Msg::GetCommSize()]; + long int **send0 = new long int*[Msg::GetCommSize()]; + int **send1 = new int*[Msg::GetCommSize()]; + MPI_Request *reqRecv0 = new MPI_Request[2*Msg::GetCommSize()]; + MPI_Request *reqRecv1 = reqRecv0 + Msg::GetCommSize(); + MPI_Request *reqSend0 = new MPI_Request[Msg::GetCommSize()]; + MPI_Request *reqSend1 = new MPI_Request[Msg::GetCommSize()]; + for (int i = 0; i < Msg::GetCommSize(); i++) { + send0[i] = new long int[nRequest[i]*2]; + recv0[i] = new long int[nRequested[i]*2]; + send1[i] = new int[nRequested[i]]; + recv1[i] = new int[nRequest[i]]; + reqSend0[i] = reqSend1[i] = reqRecv0[i] = reqRecv1[i] = MPI_REQUEST_NULL; + parentByProc[i].resize(nRequested[i], Dof(0,0)); + ghostByProc[i].resize(nRequest[i], Dof(0,0)); + } + for (int i = 0; i<Msg::GetCommSize(); i++) + nRequest [i] = 0; + for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) { + int proc = it->second.first; + send0 [proc] [nRequest[proc]*2] = it->first.getEntity(); + send0 [proc] [nRequest[proc]*2+1] = it->first.getType(); + ghostByProc[proc][nRequest[proc]] = it->first; + nRequest [proc] ++; + } + for (int i = 0; i<Msg::GetCommSize(); i++) { + if (nRequested[i] > 0) { + MPI_Irecv (recv0[i], 2*nRequested[i], MPI_LONG, i, 0, MPI_COMM_WORLD, &reqRecv0[i]); + } + if (nRequest[i] > 0) { + MPI_Irecv (recv1[i], 2*nRequest[i], MPI_INT, i, 1, MPI_COMM_WORLD, &reqRecv1[i]); + MPI_Isend (send0[i], 2*nRequest[i], MPI_LONG, i, 0, MPI_COMM_WORLD, &reqSend0[i]); + } + } + int index; + while (MPI_Waitany (2*Msg::GetCommSize(), reqRecv0, &index, &status) == 0 && + index != MPI_UNDEFINED) { + if (status.MPI_TAG == 0) { + for (int j = 0; j < nRequested[index]; j++) { + Dof d(recv0[index][j*2], recv0[index][j*2+1]); + std::map<Dof, int>::iterator it = unknown.find(d); + if (it == unknown.end ()) + Msg::Error ("ghost Dof does not exist on parent process"); + send1[index][j] = it->second; + parentByProc[index][j] = d; + } + MPI_Isend(send1[index], nRequested[index], MPI_INT, index, 1, + MPI_COMM_WORLD, &reqSend1[index]); + } + } + for (int i = 0; i<Msg::GetCommSize(); i++) + for (int i = 0; i<Msg::GetCommSize(); i++) + nRequest[i] = 0; + for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) { + int proc = it->second.first; + unknown[it->first] = recv1 [proc][nRequest[proc] ++]; + } + MPI_Waitall (Msg::GetCommSize(), reqSend0, MPI_STATUS_IGNORE); + MPI_Waitall (Msg::GetCommSize(), reqSend1, MPI_STATUS_IGNORE); + for (int i = 0; i < Msg::GetCommSize(); i++) { + delete [] send0[i]; + delete [] send1[i]; + delete [] recv0[i]; + delete [] recv1[i]; + } + delete [] send0; + delete [] send1; + delete [] recv0; + delete [] recv1; + delete [] reqSend0; + delete [] reqSend1; + delete [] reqRecv0; +#endif + _parallelFinalized = true; +} + + diff --git a/Solver/dofManager.h b/Solver/dofManager.h index 4783b3eee3b259df5a19110e13e1c56e2452ba7d..87ebbedc1a9a9e7f614d87c41cc20cb1877b5db4 100644 --- a/Solver/dofManager.h +++ b/Solver/dofManager.h @@ -12,15 +12,10 @@ #include <map> #include <list> #include <iostream> -#include "GmshConfig.h" #include "MVertex.h" #include "linearSystem.h" #include "fullMatrix.h" -#if defined(HAVE_MPI) -#include "mpi.h" -#endif - class Dof{ protected: // v(x) = \sum_f \sum_i v_{fi} s^f_i(x) @@ -88,18 +83,41 @@ class DofAffineConstraint{ typename dofTraits<T>::VecType shift; }; +//non template part that can be implemented in the cxx file (and so avoid to include mpi.h in the .h file) +class dofManagerBase{ + protected: + // numbering of unknown dof blocks + std::map<Dof, int> unknown; + + // associatations (not used ?) + std::map<Dof, Dof> associatedWith; + + // parallel section + // those dof are images of ghost located on another proc (id givent by the map). + // this is a first try, maybe not the final implementation + std::map<Dof, std::pair<int, int> > ghostByDof; // dof => procId, globalId + std::vector<std::vector<Dof> > ghostByProc, parentByProc; + int _localSize; + bool _parallelFinalized; + bool _isParallel; + void _parallelFinalize(); + dofManagerBase(bool isParallel) { + _isParallel = isParallel; + _parallelFinalized = false; + } +}; + // A manager for degrees of freedoms, templated on the value of a dof // (what the functional returns): float, double, complex<double>, // fullVecor<double>, ... template <class T> -class dofManager{ +class dofManager : public dofManagerBase{ public: typedef typename dofTraits<T>::VecType dataVec; typedef typename dofTraits<T>::MatType dataMat; protected: // general affine constraint on sub-blocks, treated by adding // equations: - // Dof = \sum_i dataMat_i x Dof_i + dataVec std::map<Dof, DofAffineConstraint< dataVec > > constraints; @@ -110,38 +128,22 @@ class dofManager{ // initial conditions (not used ?) std::map<Dof, std::vector<dataVec> > initial; - // numbering of unknown dof blocks - std::map<Dof, int> unknown; - - // associatations (not used ?) - std::map<Dof, Dof> associatedWith; - // linearSystems - std::map<const std::string, linearSystem<dataMat>*> _linearSystems; linearSystem<dataMat> *_current; + std::map<const std::string, linearSystem<dataMat>*> _linearSystems; - // parallel section - private : - // those dof are images of ghost located on another proc (id givent by the map). - // this is a first try, maybe not the final implementation - std::map<Dof, std::pair<int, int> > ghostByDof; // dof => procId, globalId std::map<Dof, T> ghostValue; - std::vector<std::vector<Dof> > ghostByProc, parentByProc; - int _localSize; - bool _parallelFinalized; - bool _isParallel; - void _parallelFinalize(); public: void scatterSolution(); public: dofManager(linearSystem<dataMat> *l, bool isParallel=false) - : _current(l), _isParallel(isParallel), _parallelFinalized(false) + :dofManagerBase(isParallel), _current(l) { _linearSystems["A"] = l; } dofManager(linearSystem<dataMat> *l1, linearSystem<dataMat> *l2) - : _current(l1), _isParallel(false), _parallelFinalized(false) + :dofManagerBase(false), _current(l1) { _linearSystems.insert(std::make_pair("A", l1)); _linearSystems.insert(std::make_pair("B", l2)); @@ -557,112 +559,4 @@ class dofManager{ }; }; -template<class T> -void dofManager<T>::_parallelFinalize() -{ - _localSize = unknown.size(); -#ifdef HAVE_MPI - int _numStart; - int _numTotal; - MPI_Status status; - parentByProc.resize(Msg::GetCommSize()); - ghostByProc.resize(Msg::GetCommSize()); - if (Msg::GetCommRank() == 0){ - _numStart = 0; - } - else - MPI_Recv (&_numStart, 1, MPI_INT, Msg::GetCommRank()-1, 0, MPI_COMM_WORLD, &status); - _numTotal = _numStart + _localSize; - if (Msg::GetCommRank() != Msg::GetCommSize()-1) - MPI_Send (&_numTotal, 1, MPI_INT, Msg::GetCommRank()+1, 0, MPI_COMM_WORLD); - MPI_Bcast(&_numTotal, 1, MPI_INT, Msg::GetCommSize()-1, MPI_COMM_WORLD); - for (std::map <Dof, int> ::iterator it = unknown.begin(); it!= unknown.end(); it++) - it->second += _numStart; - std::vector<std::list<Dof> > ghostedByProc; - int *nRequest = new int[Msg::GetCommSize()]; - int *nRequested = new int[Msg::GetCommSize()]; - for (int i = 0; i<Msg::GetCommSize(); i++) - nRequest[i] = 0; - for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) { - int procId = it->second.first; - it->second.second = nRequest[procId]++; - } - MPI_Alltoall(nRequest, 1, MPI_INT, nRequested, 1, MPI_INT, MPI_COMM_WORLD); - long int **recv0 = new long int*[Msg::GetCommSize()]; - int **recv1 = new int*[Msg::GetCommSize()]; - long int **send0 = new long int*[Msg::GetCommSize()]; - int **send1 = new int*[Msg::GetCommSize()]; - MPI_Request *reqRecv0 = new MPI_Request[2*Msg::GetCommSize()]; - MPI_Request *reqRecv1 = reqRecv0 + Msg::GetCommSize(); - MPI_Request *reqSend0 = new MPI_Request[Msg::GetCommSize()]; - MPI_Request *reqSend1 = new MPI_Request[Msg::GetCommSize()]; - for (int i = 0; i < Msg::GetCommSize(); i++) { - send0[i] = new long int[nRequest[i]*2]; - recv0[i] = new long int[nRequested[i]*2]; - send1[i] = new int[nRequested[i]]; - recv1[i] = new int[nRequest[i]]; - reqSend0[i] = reqSend1[i] = reqRecv0[i] = reqRecv1[i] = MPI_REQUEST_NULL; - parentByProc[i].resize(nRequested[i], Dof(0,0)); - ghostByProc[i].resize(nRequest[i], Dof(0,0)); - } - for (int i = 0; i<Msg::GetCommSize(); i++) - nRequest [i] = 0; - for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) { - int proc = it->second.first; - send0 [proc] [nRequest[proc]*2] = it->first.getEntity(); - send0 [proc] [nRequest[proc]*2+1] = it->first.getType(); - ghostByProc[proc][nRequest[proc]] = it->first; - nRequest [proc] ++; - } - for (int i = 0; i<Msg::GetCommSize(); i++) { - if (nRequested[i] > 0) { - MPI_Irecv (recv0[i], 2*nRequested[i], MPI_LONG, i, 0, MPI_COMM_WORLD, &reqRecv0[i]); - } - if (nRequest[i] > 0) { - MPI_Irecv (recv1[i], 2*nRequest[i], MPI_INT, i, 1, MPI_COMM_WORLD, &reqRecv1[i]); - MPI_Isend (send0[i], 2*nRequest[i], MPI_LONG, i, 0, MPI_COMM_WORLD, &reqSend0[i]); - } - } - int index; - while (MPI_Waitany (2*Msg::GetCommSize(), reqRecv0, &index, &status) == 0 && - index != MPI_UNDEFINED) { - if (status.MPI_TAG == 0) { - for (int j = 0; j < nRequested[index]; j++) { - Dof d(recv0[index][j*2], recv0[index][j*2+1]); - std::map<Dof, int>::iterator it = unknown.find(d); - if (it == unknown.end ()) - Msg::Error ("ghost Dof does not exist on parent process"); - send1[index][j] = it->second; - parentByProc[index][j] = d; - } - MPI_Isend(send1[index], nRequested[index], MPI_INT, index, 1, - MPI_COMM_WORLD, &reqSend1[index]); - } - } - for (int i = 0; i<Msg::GetCommSize(); i++) - for (int i = 0; i<Msg::GetCommSize(); i++) - nRequest[i] = 0; - for (std::map <Dof, std::pair<int, int> >::iterator it = ghostByDof.begin(); it != ghostByDof.end(); it++) { - int proc = it->second.first; - unknown[it->first] = recv1 [proc][nRequest[proc] ++]; - } - MPI_Waitall (Msg::GetCommSize(), reqSend0, MPI_STATUS_IGNORE); - MPI_Waitall (Msg::GetCommSize(), reqSend1, MPI_STATUS_IGNORE); - for (int i = 0; i < Msg::GetCommSize(); i++) { - delete [] send0[i]; - delete [] send1[i]; - delete [] recv0[i]; - delete [] recv1[i]; - } - delete [] send0; - delete [] send1; - delete [] recv0; - delete [] recv1; - delete [] reqSend0; - delete [] reqSend1; - delete [] reqRecv0; -#endif - _parallelFinalized = true; -} - #endif diff --git a/gmshpy/CMakeLists.txt b/gmshpy/CMakeLists.txt index 4933e7db41169735efc143d123f106ff5c1b71bf..436155784f8c805e9a42c97fca5c59c6cee0250f 100644 --- a/gmshpy/CMakeLists.txt +++ b/gmshpy/CMakeLists.txt @@ -56,6 +56,11 @@ MACRO(SWIG_GET_WRAPPER_DEPENDENCIES swigFile genWrapper language DEST_VARIABLE) ENDIF(NOT ${swig_getdeps_error} EQUAL 0) ENDMACRO(SWIG_GET_WRAPPER_DEPENDENCIES) +option(ENABLE_PYTHON_LIB_API "Export all C header files needed to build the python library" OFF) +if(ENABLE_PYTHON_LIB_API) + set(GMSH_API ${GMSH_API} Geo/Curvature.h Mesh/Generator.h Mesh/highOrderTools.h Mesh/meshGFaceLloyd.h Numeric/DivideAndConquer.h Post/PViewFactory.h Solver/function.h Solver/functionDerivator.h Solver/functionPython.h Solver/functionNumpy.h Solver/linearSystemPETSc.h Fltk/FlGui.h Solver/functionSpace.h Solver/STensor43.h Solver/sparsityPattern.h Solver/SElement.h Solver/groupOfElements.h PARENT_SCOPE) +endif(ENABLE_PYTHON_LIB_API) + if(HAVE_SWIG) include(${SWIG_USE_FILE}) include_directories(${PYTHON_INCLUDE_DIR}) diff --git a/gmshpy/gmshCommon.i b/gmshpy/gmshCommon.i index 6d4dc71764d429dc1656c322819a707dac8f33f7..24f4ebd197393e2a2f4cc71bb63f73c8ec8edeb6 100644 --- a/gmshpy/gmshCommon.i +++ b/gmshpy/gmshCommon.i @@ -9,7 +9,6 @@ #include "GmshConfig.h" #include "Context.h" - #include "DefaultOptions.h" #include "Gmsh.h" #include "GmshDefines.h" #include "GmshMessage.h" @@ -46,7 +45,6 @@ namespace std { %include "GmshConfig.h" %include "Context.h" -#include "DefaultOptions.h" #if defined(HAVE_FLTK) %include "FlGui.h" #endif diff --git a/gmshpy/gmshSolver.i b/gmshpy/gmshSolver.i index 97832aa66a0c13a42f954b253acd2f401ca53efd..e0aaa6982e780ab08a2ce1de907b9ba828476628 100644 --- a/gmshpy/gmshSolver.i +++ b/gmshpy/gmshSolver.i @@ -41,5 +41,7 @@ namespace std { %template(linearSystemTAUCSDouble) linearSystemCSRTaucs<double>; %include "linearSystemFull.h" %template(linearSystemFullDouble) linearSystemFull<double> ; +#ifdef HAVE_PETSC %include "linearSystemPETSc.h" %template(linearSystemPETScDouble) linearSystemPETSc<double>; +#endif