diff --git a/CMakeLists.txt b/CMakeLists.txt index d1e2aa46effff6bf6c8e9bd87e0bae84944d3c62..bdb9756229e3a195e77806162486f685cf540b7d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -255,7 +255,7 @@ if (OPT_EXPENSIVE_ASSERTS) add_definitions(-DEXPENSIVE_ASSERTS) endif() -option(TM_NEW_CURLS "Use the new method for computing curls" OFF) +option(TM_NEW_CURLS "Use the new method for computing curls" ON) if (TM_NEW_CURLS) add_compile_definitions(TM_NEW_CURLS) endif() diff --git a/doc/changelog.md b/doc/changelog.md index ce901833968c496fc6b41dfa446f2af1eb8e6ca1..b1e3e04bafbae523c135603f0d652f489451c82e 100644 --- a/doc/changelog.md +++ b/doc/changelog.md @@ -33,7 +33,8 @@ Unavailable features: - Don't regenerate mesh if input is already `.msh` (7ba32158) - Code to allow comparison with GMSH-FEM (a5068e3a) - Licensed as AGPLv3 (2c471367) -- Eigen and Sol symbols hidden by linker script (02ac8c5d) -- Support GMSH physical groups (be6606d7) +- Eigen and Sol symbols hidden by linker script (Only Linux for now) (02ac8c5d) +- Support GMSH physical groups (be6606d7, ad399724, 0dcb0e72) - Fix performance issues in the evaluation of boundary sources (a247f57e) - Don't alloc memory for RK4 auxiliary vectors if RK4 not used (cc387be4) +- New kernels for direct computation of curls (5b1986be and subsequent). diff --git a/doc/compile.md b/doc/compile.md index 9d00afe4b649cb9f2a3e1a3563394a47424e41cd..2807da6024407385debc89ec90ba5b3b00ec00ad 100644 --- a/doc/compile.md +++ b/doc/compile.md @@ -2,8 +2,8 @@ ## Supported systems This code is developed under Linux and Mac OS. GPU support is available -only under Linux. A Windows port should be possible, but for the moment -is not in the plans. +only under Linux. A native Windows port should be possible, but for the moment +is not in the plans. If you really need to stick to Windows, [Windows Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install) is the tool you want to look at. ## Code dependencies The code is designed to keep the number of dependencies at the minimum, however it @@ -24,14 +24,31 @@ ccmake .. -DEigen3_DIR=/your/eigen/install/path/eigen/cmake/ - **Lua**: All the simulation configurations are written in Lua. - **SILO/VisIt**: SILO is a scientific database used to store simulation results, VisIt is a scientific data visualization tool -- **CUDA**: if you want GPU support -- **OpenMPI**: if you want parallel processing - -### Package installation on Debian +- **CUDA** (optional): if you want GPU support +- **OpenMPI** (optional): if you want parallel processing +### Package installation on Debian/Ubuntu: +On Debian/Ubuntu it should be sufficient to ``` apt install libeigen3-dev libsilo-dev libsiloh5-0 liblua5.3-dev ``` +Concerning GMSH, it is suggested to install it from source as the versions provided with the distributions are usually slightly outdated. Once you have GMSH compiled and installed, you can use the `GMSH_ROOT` environment variable to specify its path to CMake ([see below](#compilation)) + +If you want parallel processing support you need to install OpenMPI with: +``` +apt install libopenmpi-dev +``` +The code will very likely work also with other MPI implementation, but it is currently not tested. + +If you want GPU support you need to install CUDA with: +``` +apt install nvidia-cuda-dev +``` +If you install the packages this way, CMake should find everything automagically. Otherwise, if you compile them from source, you will need to specify their installation paths via the appropriate environment variable. For example, if you installed Silo by hand: +``` +export SILO_ROOT=<path to silo> +ccmake .. +``` ### Package installation on Mac OS ``` @@ -44,10 +61,11 @@ brew install datafl4sh/code/silo The code uses CMake to handle the builds. The whole procedure should be ``` -git clone --recursive https://gitlab.onelab.info/mcicuttin/gmsh_gpu_dg.git -cd gmsh_gpu_dg +git clone --recursive https://gitlab.onelab.info/gmsh/dg.git +cd dg mkdir build cd build +export GMSH_ROOT=<your path to gmsh, if installed from source> ccmake .. make ``` diff --git a/doc/lua_api.md b/doc/lua_api.md index 2d01e98f347e174750129f32c53fdcb31caff73e..ceae956755187f26be3d29a365f13381e8afd46d 100644 --- a/doc/lua_api.md +++ b/doc/lua_api.md @@ -2,10 +2,12 @@ The solver employs the Lua programming language for its **configuration**. Lua was chosen first of all for simplicity: it is extremely lightweight and carries almost no dependencies, and this allow to keep the solver small and compact. Secondly, Lua was chosen to *deliberately* limit the possibilities of what the user can do in the configuration files. If configurations become full-fledged programs, it means that something is missing in the solver core or that the solver is being used in the wrong way. Third, Lua is *fast*: evaluating user-defined Lua functions is not much slower than native C. -This file documents the Lua API available in the solver. API has a *general* part and a *problem-specific* part. +This file documents the Lua API available in the solver. The API has a *general* part and a *problem-specific* part. The *general* part has to do with configuration not related to a specific problem (i.e. timestep, geometry file), whereas the *problem-specific* part configures all the parameter that make sense only on a given problem (i.e. materials and sources). This separation is reflected also in how the configuration is handled internally. +The Lua configuration script is the first thing loaded by the solver and is immediately executed. Therefore, some functions are not available immediately, but only after the GMSH model has been loaded. A notable example are the functions handling [the physical groups](#handling-of-gmsh-physical-groups). + ## API version This document describes the API available on the version v0.4 of the solver. @@ -13,8 +15,8 @@ This document describes the API available on the version v0.4 of the solver. ### Simulation variables -- `sim.name` (string): name of the simulation. Used also as the oname of the output directory. -- `sim.dt` (real): timestep duration. +- `sim.name` (string): name of the simulation. Used also as the name of the output directory. +- `sim.dt` (real): timestep duration in seconds. - `sim.timesteps` (integer): number of timesteps to do. - `sim.gmsh_model` (string): name of the file containing the GMSH model. - `sim.use_gpu` (0/1): enable/disable GPU usage. @@ -43,17 +45,20 @@ The parallel solver runs as separate MPI processes. As such, each process loads - `enable_volume_sources()`: takes a boolean parameter that specifies if volumetric sources should be enabled or not #### Handling of GMSH physical groups -- `volume_physical_group_entities(pg)`: takes the tag of a volume physical group (`dim == 3` in GMSH parlance) and returns the tags of all the entities included in the physical group. This function is available only after mesh loading, therefore it is usually called inside some callback. -- `surface_physical_group_entities(pg)`: takes the tag of a surface physical group (`dim == 2` in GMSH parlance) and returns the tags of all the entities included in the physical group. This function is available only after mesh loading, therefore it is usually called inside some callback. +- `volume_physical_group_entities(pg)`: takes the tag of a volume physical group (`dim == 3` in GMSH parlance) and returns the tags of all the entities included in the physical group. This function is available **only after mesh loading**, therefore it is usually called inside some callback, most probably inside `on_mesh_loaded()` ([see below](#callable-functions-1)). +- `surface_physical_group_entities(pg)`: takes the tag of a surface physical group (`dim == 2` in GMSH parlance) and returns the tags of all the entities included in the physical group. This function is available **only after mesh loading**, therefore it is usually called inside some callback, most probably inside `on_mesh_loaded()` ([see below](#callable-functions-1)). ### Callbacks -- `on_exit()`: this callback is called at the exit of the program, just before the internal Lua context gets destroyed. +- `on_exit()`: this callback is called at the exit of the program, just before the internal Lua context gets destroyed. This could be useful for example to close some file that was previously opened by the script. ## Maxwell solver interface ### Materials Materials are specified populating the `materials` table exposed by the solver. The material parameters are the relative electric permittivity `epsilon`, the relative magnetic permeability `mu` and the conducibility `sigma`. The empty space parameters are stored in `const.eps0` and `const.mu0`. +#### A note about entity tags +GMSH entities (Volumes, surfaces, ...) are identified by an integer named *tag*. That tag can change for several reasons (version and type of the geometric kernel, GMSH version...), therefore GMSH provides *physical groups* in order to have a more stable numbering and to be able to give a single tag to multiple entities/subdomains. GMSH/DG was originally based on entity tags, but partial support for physical groups is being added. Currently support for physical groups is slightly incoherent and supported only on materials and boundary conditions. + It is possible either to provide a function describing the material properties in the whole domain or specify the materials subdomain by subdomain. In the first case, appropriate callbacks have to be installed in the material table, whereas in the second case the appropriate variables must be set. If, for example, the model has two subdomains with tags `1` and `2`, the electric permeability can be specified as diff --git a/share/validation/params_twomat.lua b/share/validation/params_twomat.lua index 7b640ee1a5b34e19cfc53d6a35a35dba9ad8a9d2..c99b42cbeff87412d3b4ffe499e25a9dd43ac6c7 100644 --- a/share/validation/params_twomat.lua +++ b/share/validation/params_twomat.lua @@ -1,15 +1,15 @@ -- Validation test: simulate a plane wave propagating in a parallel plate -- waveguide with a discontinuity in materials. -sim.name = "twomat" -- simulation name -sim.dt = 1e-12 -- timestep size -sim.timesteps = 10000 -- num of iterations +sim.name = "twomat" -- simulation name +sim.dt = 1e-12 -- timestep size +sim.timesteps = 20000 -- num of iterations sim.gmsh_model = "twomat.geo" -- gmsh model filename -sim.use_gpu = 0 -- 0: cpu, 1: gpu -sim.approx_order = 1 -- approximation order +sim.use_gpu = 0 -- 0: cpu, 1: gpu +sim.approx_order = 1 -- approximation order sim.time_integrator = "leapfrog" postpro.silo_output_rate = 100 -postpro.cycle_print_rate = 100 -- console print rate +postpro.cycle_print_rate = 10 -- console print rate postpro["E"].mode = "nodal" @@ -17,13 +17,21 @@ debug = {}; debug.dump_cell_ranks = true; function setup_materials() - local bulk = volume_physical_group_entities(1); - for i,v in ipairs(bulk) do + local bulk_m1 = volume_physical_group_entities(1); + for i,v in ipairs(bulk_m1) do materials[v] = {} materials[v].epsilon = 1 materials[v].mu = 1 materials[v].sigma = 0 end + + local bulk_m2 = volume_physical_group_entities(2); + for i,v in ipairs(bulk_m2) do + materials[v] = {} + materials[v].epsilon = 4 + materials[v].mu = 1 + materials[v].sigma = 0 + end end local freq = 3e8 diff --git a/share/validation/twomat.geo b/share/validation/twomat.geo index c37a400240b677633590270f1a20f5dd8972a8c5..27d1aaed35b36aa00465ab812b2f924548c139b9 100644 --- a/share/validation/twomat.geo +++ b/share/validation/twomat.geo @@ -3,7 +3,8 @@ Box(1) = {0, 0, 0, 1, 1, 0.1}; Box(2) = {1, 0, 0, 1, 1, 0.1}; Coherence; -Physical Volume("bulk", 1) = {1,2}; +Physical Volume("bulk_m1", 1) = {1}; +Physical Volume("bulk_m2", 2) = {2}; Physical Surface("source", 10) = {1}; Physical Surface("pmc", 11) = {3,4,8,9}; Physical Surface("pec", 12) = {5,6,10,11};