Skip to content
Snippets Groups Projects
Commit c04d927c authored by Matteo Cicuttin's avatar Matteo Cicuttin
Browse files

Corrected DOF ordering on IPC boundary. Works OK.

parent 92cf7fe6
No related branches found
No related tags found
No related merge requests found
Subproject commit 79794aae8124bbae25f7b2a13e0f675a37bd7b04
Subproject commit 06edde267d96c353e8d46002bc64c0c7bb8ad616
......@@ -156,6 +156,9 @@ class model
using entofs_pair = std::pair<size_t, size_t>;
std::map<size_t, entofs_pair> etag_to_entity_offset;
using bfk_t = std::pair<element_key, int>;
std::vector<bfk_t> get_facekey_tag_pairs(void);
void map_boundaries(void);
void import_gmsh_entities_rank0(void);
void update_connectivity(const entity&, size_t);
......@@ -170,6 +173,7 @@ class model
void make_partition_to_entities_map(void);
void map_interprocess_boundaries(void);
void make_comm_descriptors(void);
std::vector<bfk_t> get_ip_facekey_tag_pairs(void);
#endif /* USE_MPI */
void make_boundary_to_faces_map(void);
......
......@@ -31,8 +31,22 @@ public:
{
return (wtf_int == other.wtf_int) and (wtf_uint == other.wtf_uint);
}
bool operator<(const bf_key& other) const
{
return (wtf_int < other.wtf_int) or
( (wtf_int == other.wtf_int) and (wtf_uint < other.wtf_uint) );
}
friend std::ostream& operator<<(std::ostream& os, const bf_key&);
};
inline std::ostream& operator<<(std::ostream& os, const bf_key& bk)
{
os << "{" << bk.wtf_int << ", " << bk.wtf_uint << "}";
return os;
}
class physical_element
{
size_t m_original_position; /* Position in GMSH ordering (relative to entity) */
......
......@@ -383,15 +383,11 @@ model::is_interprocess_boundary(int tag)
#endif /* USE_MPI */
void
model::map_boundaries(void)
std::vector<model::bfk_t>
model::get_facekey_tag_pairs(void)
{
#ifdef USE_MPI
//ASSERT_MPI_RANK_0;
#endif /* USE_MPI */
/* Make a vector mapping element_key to entity tag */
using bfk_t = std::pair<element_key, int>;
/* Make a vector mapping an element_key of a boundary face
* to its entity tag. Sort it to allow fast lookups. */
std::vector<bfk_t> bfk;
for (auto& [tag, keys] : boundary_map)
for (auto& k : keys)
......@@ -403,6 +399,17 @@ model::map_boundaries(void)
};
std::sort(bfk.begin(), bfk.end(), comp);
return bfk;
}
void
model::map_boundaries(void)
{
#ifdef USE_MPI
//ASSERT_MPI_RANK_0;
#endif /* USE_MPI */
std::vector<bfk_t> bfk = get_facekey_tag_pairs();
bnd_descriptors.resize( num_faces() );
size_t fbase = 0;
/* For each entity */
......@@ -477,11 +484,11 @@ model::map_boundaries(void)
}
#ifdef USE_MPI
void
model::map_interprocess_boundaries(void)
std::vector<model::bfk_t>
model::get_ip_facekey_tag_pairs(void)
{
/* Make a vector mapping element_key to entity tag */
using bfk_t = std::pair<element_key, int>;
/* Make a vector mapping an element_key of an interprocess boundary
* face to its entity tag. Sort it to allow fast lookups. */
std::vector<bfk_t> bfk;
for (auto& [tag, keys] : boundary_map)
{
......@@ -497,6 +504,14 @@ model::map_interprocess_boundaries(void)
};
std::sort(bfk.begin(), bfk.end(), comp);
return bfk;
}
void
model::map_interprocess_boundaries(void)
{
std::vector<bfk_t> bfk = get_ip_facekey_tag_pairs();
auto lbcomp = [](const bfk_t& a, const element_key& b) -> bool {
return a.first < b;
};
......@@ -545,39 +560,27 @@ model::map_interprocess_boundaries(void)
void
model::make_comm_descriptors(void)
{
/* Make a vector mapping element_key to entity tag */
using bfk_t = std::pair<element_key, int>;
std::vector<bfk_t> bfk;
for (auto& [tag, keys] : boundary_map)
{
if ( not is_interprocess_boundary(tag) )
continue;
for (auto& k : keys)
bfk.push_back( std::make_pair(k, tag) );
}
/* Sort it for lookups */
auto comp = [](const bfk_t& a, const bfk_t& b) -> bool {
return a.first < b.first;
};
std::sort(bfk.begin(), bfk.end(), comp);
/* The idea here is to build a vector (cd.dof_mapping) that tracks
* all the degrees of freedom on a given interprocess boundary.
* That vector is subsequently used to build the vectors that will
* be transmitted to (and received from) the neighbouring partitions
* to compute the jumps. On both sides of the IPC boundary we need
* the DOFs in cd.dof_mapping listed in the exact same order.
* */
std::vector<bfk_t> bfk = get_ip_facekey_tag_pairs();
auto lbcomp = [](const bfk_t& a, const element_key& b) -> bool {
return a.first < b;
};
/* Sorry for this. Basically I need to have the faces I will
* find in the next loop in the exact same order on both sides
* of the IPC boundary. */
struct lazy_hack {
element_key fk;
size_t fbs;
size_t base;
size_t ibtag;
struct dof_position_info {
element_key fk;
size_t fbs;
size_t ibtag;
std::vector<size_t> offsets;
};
std::vector<lazy_hack> lhs;
std::vector<dof_position_info> dpis;
size_t flux_base = 0;
/* For each entity */
......@@ -597,32 +600,34 @@ model::make_comm_descriptors(void)
continue;
auto ibtag = (*itor).second;
auto fbs = rf.num_basis_functions();
/*
auto& cd = ipc_boundary_comm_table[ibtag];
std::vector< std::pair<bf_key, size_t> > bf_order(fbs);
auto bf_keys = pf.bf_keys();
for (size_t i = 0; i < fbs; i++)
bf_order[i] = std::make_pair( bf_keys[i], i );
std::sort(bf_order.begin(), bf_order.end());
dof_position_info dpi;
dpi.fk = fk;
dpi.fbs = fbs;
for (size_t i = 0; i < fbs; i++)
cd.dof_mapping.push_back(flux_base + fbs*iF + i);
cd.fks.push_back(fk);
*/
lazy_hack lh;
lh.fk = fk;
lh.fbs = fbs;
lh.base = flux_base + fbs*iF;
lh.ibtag = ibtag;
lhs.push_back( std::move(lh) );
dpi.offsets.push_back(flux_base + fbs*iF + bf_order[i].second);
dpi.ibtag = ibtag;
dpis.push_back( std::move(dpi) );
}
flux_base += e.num_fluxes();
}
auto lhcomp = [](const lazy_hack& a, const lazy_hack& b) -> bool {
auto dpicomp = [](const dof_position_info& a, const dof_position_info& b) -> bool {
return a.fk < b.fk;
};
std::sort(lhs.begin(), lhs.end(), lhcomp);
for (auto& lh : lhs)
std::sort(dpis.begin(), dpis.end(), dpicomp);
for (auto& dpi : dpis)
{
auto& cd = ipc_boundary_comm_table[lh.ibtag];
for (size_t i = 0; i < lh.fbs; i++)
cd.dof_mapping.push_back( lh.base+i );
auto& cd = ipc_boundary_comm_table[dpi.ibtag];
for (size_t i = 0; i < dpi.fbs; i++)
cd.dof_mapping.push_back( dpi.offsets[i] );
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment