Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
dg
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
gmsh
dg
Commits
f2f6d748
Commit
f2f6d748
authored
Aug 18, 2021
by
Matteo Cicuttin
Browse files
Options
Downloads
Patches
Plain Diff
Broadcasts before entity import.
parent
fa247c55
No related branches found
No related tags found
No related merge requests found
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
include/gmsh_io.h
+92
-6
92 additions, 6 deletions
include/gmsh_io.h
include/physical_element.h
+2
-0
2 additions, 0 deletions
include/physical_element.h
src/gmsh_io.cpp
+54
-21
54 additions, 21 deletions
src/gmsh_io.cpp
with
148 additions
and
27 deletions
include/gmsh_io.h
+
92
−
6
View file @
f2f6d748
...
...
@@ -3,6 +3,11 @@
#include
<vector>
#include
<array>
#include
<gmsh.h>
#include
<type_traits>
#ifdef USE_MPI
#include
<mpi/mpi.h>
#endif
/* USE_MPI */
#include
"entity.h"
#include
"physical_element.h"
...
...
@@ -26,11 +31,12 @@ std::string basis_grad_name(int);
using
face_key
=
std
::
array
<
size_t
,
3
>
;
enum
class
face_type
enum
class
face_type
:
int
{
NONE
,
NONE
=
0
,
INTERFACE
,
BOUNDARY
BOUNDARY
,
INTERPROCESS_BOUNDARY
};
struct
boundary_descriptor
...
...
@@ -56,15 +62,95 @@ struct boundary_descriptor
{}
};
#if ((!defined(NDEBUG)) && defined(USE_MPI))
#define ASSERT_MPI_RANK_0 { \
int __rank; \
MPI_Comm_rank(MPI_COMM_WORLD, &__rank); \
assert(__rank == 0); \
}
#else
#define ASSERT_MPI_RANK_0 (void) 0
#endif
#define IMPLIES(a, b) ((not(a)) or (b))
#define IFF(a,b) (IMPLIES(a,b) and IMPLIES(b,a))
template
<
typename
T
>
void
priv_MPI_Bcast
(
std
::
vector
<
T
>&
vec
,
int
root
,
MPI_Comm
comm
)
{
static_assert
(
std
::
is_trivially_copyable
<
T
>::
value
,
"Type must be trivially copyable"
);
int
rank
;
MPI_Comm_rank
(
comm
,
&
rank
);
if
(
rank
==
root
)
{
size_t
vsize
=
vec
.
size
();
MPI_Bcast
(
&
vsize
,
1
,
MPI_UNSIGNED_LONG_LONG
,
root
,
comm
);
MPI_Bcast
(
vec
.
data
(),
vec
.
size
()
*
sizeof
(
T
),
MPI_PACKED
,
root
,
comm
);
}
else
{
size_t
vsize
;
MPI_Bcast
(
&
vsize
,
1
,
MPI_UNSIGNED_LONG_LONG
,
root
,
comm
);
vec
.
resize
(
vsize
);
MPI_Bcast
(
vec
.
data
(),
vec
.
size
()
*
sizeof
(
T
),
MPI_PACKED
,
root
,
comm
);
}
}
template
<
typename
T1
,
typename
T2
>
void
priv_MPI_Bcast
(
std
::
map
<
T1
,
std
::
vector
<
T2
>>&
map
,
int
root
,
MPI_Comm
comm
)
{
static_assert
(
std
::
is_trivially_copyable
<
T1
>::
value
,
"Type must be trivially copyable"
);
static_assert
(
std
::
is_trivially_copyable
<
T2
>::
value
,
"Type must be trivially copyable"
);
int
rank
;
MPI_Comm_rank
(
comm
,
&
rank
);
if
(
rank
==
root
)
{
size_t
msize
=
map
.
size
();
MPI_Bcast
(
&
msize
,
1
,
MPI_UNSIGNED_LONG_LONG
,
root
,
comm
);
for
(
auto
&
[
l
,
rv
]
:
map
)
{
auto
ll
=
l
;
MPI_Bcast
(
&
ll
,
sizeof
(
T1
),
MPI_PACKED
,
root
,
comm
);
size_t
vsize
=
rv
.
size
();
MPI_Bcast
(
&
vsize
,
1
,
MPI_UNSIGNED_LONG_LONG
,
root
,
comm
);
MPI_Bcast
(
rv
.
data
(),
vsize
*
sizeof
(
T2
),
MPI_PACKED
,
root
,
comm
);
}
}
else
{
size_t
msize
;
MPI_Bcast
(
&
msize
,
1
,
MPI_UNSIGNED_LONG_LONG
,
root
,
comm
);
for
(
size_t
i
=
0
;
i
<
msize
;
i
++
)
{
T1
l
;
MPI_Bcast
(
&
l
,
sizeof
(
T1
),
MPI_PACKED
,
root
,
comm
);
size_t
vsize
;
MPI_Bcast
(
&
vsize
,
1
,
MPI_UNSIGNED_LONG_LONG
,
root
,
comm
);
std
::
vector
<
T2
>
rv
;
rv
.
resize
(
vsize
);
MPI_Bcast
(
rv
.
data
(),
vsize
*
sizeof
(
T2
),
MPI_PACKED
,
root
,
comm
);
map
[
l
]
=
std
::
move
(
rv
);
}
}
}
class
model
{
int
geometric_order
;
int
approximation_order
;
int
num_partitions
;
std
::
map
<
int
,
std
::
vector
<
element_key
>>
boundary_map
;
std
::
map
<
int
,
std
::
vector
<
int
>>
comm_map
;
std
::
map
<
int
,
std
::
vector
<
int
>>
partition_map
;
std
::
vector
<
entity
>
entities
;
std
::
map
<
size_t
,
std
::
vector
<
element_key
>>
boundary_map
;
std
::
map
<
size_t
,
std
::
vector
<
int
>>
comm_map
;
std
::
vector
<
boundary_descriptor
>
bnd_descriptors
;
element_connectivity
<
element_key
>
conn
;
...
...
@@ -77,7 +163,7 @@ class model
void
populate_from_gmsh_rank0
(
void
);
#ifdef
HAV
E_MPI
#ifdef
US
E_MPI
void
populate_from_gmsh_rankN
(
void
);
#endif
...
...
This diff is collapsed.
Click to expand it.
include/physical_element.h
+
2
−
0
View file @
f2f6d748
...
...
@@ -111,6 +111,8 @@ public:
class
element_key
{
/* Objects of this class get MPI_Bcast'ed as MPI_PACKED,
* beware of data types you use below. */
int
m_dim
;
int
m_elemType
;
std
::
array
<
size_t
,
8
>
m_key_data
{};
...
...
This diff is collapsed.
Click to expand it.
src/gmsh_io.cpp
+
54
−
21
View file @
f2f6d748
...
...
@@ -5,9 +5,9 @@
#include
"gmsh_io.h"
#include
"entity.h"
#ifdef
HAV
E_MPI
#ifdef
US
E_MPI
#include
<mpi/mpi.h>
#endif
/*
HAV
E_MPI */
#endif
/*
US
E_MPI */
std
::
string
quadrature_name
(
int
order
)
...
...
@@ -57,7 +57,7 @@ model::~model()
void
model
::
make_boundary_to_faces_map
(
void
)
{
assert
(
num_partitions
>
1
)
;
ASSERT_MPI_RANK_0
;
gvp_t
gmsh_entities
;
...
...
@@ -98,11 +98,15 @@ model::make_boundary_to_faces_map(void)
boundary_map
[
tag
]
=
ekf
.
get_keys
();
}
}
assert
(
IFF
(
comm_map
.
size
()
==
0
,
num_partitions
==
1
)
);
}
void
model
::
update_connectivity
(
const
entity
&
e
,
size_t
entity_index
)
{
ASSERT_MPI_RANK_0
;
for
(
size_t
iT
=
0
;
iT
<
e
.
num_cells
();
iT
++
)
{
for
(
size_t
iF
=
0
;
iF
<
e
.
num_faces_per_elem
();
iF
++
)
...
...
@@ -180,6 +184,8 @@ model::import_gmsh_entities(void)
void
model
::
map_boundaries
(
void
)
{
ASSERT_MPI_RANK_0
;
/* Make a vector mapping element_key to entity tag */
using
bfk_t
=
std
::
pair
<
element_key
,
int
>
;
std
::
vector
<
bfk_t
>
bfk
;
...
...
@@ -227,64 +233,91 @@ model::map_boundaries(void)
fbase
+=
e
.
num_faces
();
}
/*
size_t
normal
=
0
;
size_t
interface
=
0
;
size_t
boundary
=
0
;
for (auto& bd : boundary_descriptors)
size_t
ipc_boundary
=
0
;
for
(
auto
&
bd
:
bnd_descriptors
)
{
switch
(
bd
.
type
)
{
case
face_type
::
NONE
:
normal
++
;
break
;
case
face_type
::
INTERFACE
:
interface
++
;
break
;
case
face_type
::
BOUNDARY
:
boundary
++
;
break
;
case
face_type
::
INTERPROCESS_BOUNDARY
:
ipc_boundary
++
;
break
;
}
}
std::cout << bfk.size() << " " << boundary_descriptors.size() << std::endl;
std::cout << normal << " " << interface << " " << boundary << std::endl;
*/
std
::
cout
<<
bfk
.
size
()
<<
" "
<<
bnd_descriptors
.
size
()
<<
std
::
endl
;
std
::
cout
<<
normal
<<
" "
<<
interface
<<
" "
<<
boundary
<<
" "
<<
ipc_boundary
<<
std
::
endl
;
}
void
model
::
populate_from_gmsh_rank0
()
{
ASSERT_MPI_RANK_0
;
gmm
::
setOrder
(
geometric_order
);
make_boundary_to_faces_map
();
for
(
auto
&
[
tag
,
parts
]
:
comm_map
)
{
std
::
cout
<<
tag
<<
": "
;
for
(
auto
&
part
:
parts
)
std
::
cout
<<
part
<<
" "
;
std
::
cout
<<
std
::
endl
;
}
#ifdef USE_MPI
/* At this point, the following members are populated:
* - geometric_order, approximation_order and num_partitions
* - boundary_map
* - conn_map
* - partition_map
* We broadcast them to the other processes
*/
MPI_Bcast
(
&
geometric_order
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Bcast
(
&
approximation_order
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Bcast
(
&
num_partitions
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
priv_MPI_Bcast
(
boundary_map
,
0
,
MPI_COMM_WORLD
);
priv_MPI_Bcast
(
comm_map
,
0
,
MPI_COMM_WORLD
);
priv_MPI_Bcast
(
partition_map
,
0
,
MPI_COMM_WORLD
);
for
(
auto
&
[
tag
,
vec
]
:
comm_map
)
std
::
cout
<<
tag
<<
": "
<<
vec
.
size
()
<<
" "
<<
vec
[
0
]
<<
std
::
endl
;
#endif
/* USE_MPI */
entities
.
clear
();
import_gmsh_entities
();
map_boundaries
();
/* This must happen after import_gmsh_entities(). */
}
#ifdef
HAV
E_MPI
#ifdef
US
E_MPI
void
model
::
populate_from_gmsh_rankN
()
{}
#endif
/* HAVE_MPI */
{
MPI_Bcast
(
&
geometric_order
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Bcast
(
&
approximation_order
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Bcast
(
&
num_partitions
,
1
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
priv_MPI_Bcast
(
boundary_map
,
0
,
MPI_COMM_WORLD
);
priv_MPI_Bcast
(
comm_map
,
0
,
MPI_COMM_WORLD
);
priv_MPI_Bcast
(
partition_map
,
0
,
MPI_COMM_WORLD
);
for
(
auto
&
[
tag
,
vec
]
:
comm_map
)
std
::
cout
<<
tag
<<
": "
<<
vec
.
size
()
<<
" "
<<
vec
[
0
]
<<
std
::
endl
;
}
#endif
/* USE_MPI */
void
model
::
populate_from_gmsh
()
{
#ifdef
HAV
E_MPI
#ifdef
US
E_MPI
int
rank
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
if
(
rank
==
0
)
populate_from_gmsh_rank0
();
else
populate_from_gmsh_rankN
();
#else
/*
HAV
E_MPI */
#else
/*
US
E_MPI */
populate_from_gmsh_rank0
();
#endif
/*
HAV
E_MPI */
#endif
/*
US
E_MPI */
}
void
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment