libMesh
nonmanifold_coupling_test.C
Go to the documentation of this file.
1 // libMesh includes
2 #include <libmesh/dof_map.h>
3 #include <libmesh/elem.h>
4 #include <libmesh/equation_systems.h>
5 #include <libmesh/linear_implicit_system.h>
6 #include <libmesh/non_manifold_coupling.h>
7 #include <libmesh/partitioner.h>
8 #include <libmesh/replicated_mesh.h>
9 #include <libmesh/sides_to_elem_map.h>
10 
11 // Unit test includes
12 #include "test_comm.h"
13 #include "libmesh_cppunit.h"
14 
15 // C++ includes
16 #include <memory>
17 
18 using namespace libMesh;
19 
20 // We use a custom partioner for this test to help ensure we're
21 // testing the cases we want. In particular, we're going to ensure
22 // that elements attached to the "shared" side are on different
23 // processors from each other so we know ghosting will be required for
24 // algebraic and coupling ghosting between the two subdomains.
26 {
27 public:
28 
29  NonManifoldTestPartitioner () = default;
32  NonManifoldTestPartitioner & operator= (const NonManifoldTestPartitioner &) = default;
33  NonManifoldTestPartitioner & operator= (NonManifoldTestPartitioner &&) = default;
34  virtual ~NonManifoldTestPartitioner() = default;
35 
39  virtual std::unique_ptr<Partitioner> clone () const override
40  {
41  return std::make_unique<NonManifoldTestPartitioner>(*this);
42  }
43 
44 protected:
45 
49  virtual void _do_partition (MeshBase & mesh,
50  const unsigned int n) override
51  {
52  // If we're on one partition, then everyone gets to be on that partition
53  if (n == 1)
54  this->single_partition_range (mesh.active_elements_begin(), mesh.active_elements_end());
55  else
56  {
57  // Assign a round-robin processor id to each Elem attached to
58  // the "non-manifold" edge in this Mesh. In this case, the
59  // test Meshes were created so that they contain a single
60  // non-manifold edge, so we can stop partitioning once we have
61  // found that one edge.
62  libmesh_assert_greater (n, 0);
63 
65 
66  auto success = [&]() -> bool
67  {
68  for (const auto & elem : mesh.element_ptr_range())
69  for (auto s : elem->side_index_range())
70  {
71  const auto [side_neighbors_begin, side_neighbors_end] =
72  stem.get_connected_elems(elem, s);
73 
74  if (std::distance(side_neighbors_begin, side_neighbors_end) == 5)
75  {
76  for (auto [e, it] = std::make_tuple(0u, side_neighbors_begin);
77  it != side_neighbors_end; ++e, ++it)
78  {
79  // If there are more Elems attached to the
80  // non-manifold edge than processors, just wrap
81  // around back to 0. Setting the processor id is a
82  // bit convoluted because the SidesToElemMap only
83  // has const pointers, but since we have a
84  // non-const reference to the mesh, we can work
85  // around that.
86  Elem * elem = mesh.elem_ptr((*it)->id());
87  elem->processor_id() = e % n;
88 
89  // Debugging
90  // libMesh::out << "Partitioning Elem " << elem->id()
91  // << " onto proc " << elem->processor_id()
92  // << std::endl;
93  }
94 
95  // Stop once we have partitioned elements attached to
96  // the non-manifold Side with 5 neighbors. This should
97  // effectively leave all other Elems assigned to
98  // processor 0.
99  return true;
100  }
101  }
102 
103  // If we made it here, then we didn't find a Side shared by
104  // 5 elements and we'll throw an error later
105  return false;
106  }();
107 
108  libmesh_error_msg_if(!success, "Did not find expected non-manifold edge.");
109  }
110  }
111 };
112 
113 
114 // Common functionality for all the different tests we'll be running
116 {
117 protected:
118 
119  std::unique_ptr<MeshBase> _mesh;
120 
121  std::unique_ptr<EquationSystems> _es;
122 
123  void read_mesh(const std::string & mesh_filename)
124  {
125  // We are making assumptions in various places about the presence
126  // of the elements on the current processor so we're restricting to
127  // ReplicatedMesh for now.
128  _mesh = std::make_unique<ReplicatedMesh>(*TestCommWorld);
129  _mesh->read(mesh_filename);
130 
131  // Use our custom partitioner that assigns non-manifold edge
132  // neighbors to different processors.
133  _mesh->partitioner() = std::make_unique<NonManifoldTestPartitioner>();
134 
135  _mesh->prepare_for_use();
136 
137  // Debugging: print processor ids determined by partitioner
138  // for (const auto & elem : _mesh->element_ptr_range())
139  // libMesh::out << "Elem " << elem->id() << " is on processor " << elem->processor_id() << std::endl;
140  }
141 
142  void init_es()
143  {
144  _es = std::make_unique<EquationSystems>(*_mesh);
145 
146  // Add System with a single linear Lagrange variable on it.
147  LinearImplicitSystem & sys = _es->add_system<LinearImplicitSystem> ("SimpleSystem");
148  sys.add_variable("u", FIRST, LAGRANGE);
149 
150  // Attach ghosting functor to the System. We use the std::shared_ptr interface
151  // so that the DofMap takes ownership of the object. Note: if you comment out
152  // this line, then the test _can_ fail when run in parallel, but it depends on
153  // the number of processors used. For small numbers of procs, sometimes we can
154  // get "lucky" and wind up with all the non-manifold neighbor DOFs either local
155  // to or ghosted on all procs where they are needed. In practice, this test should
156  // be run on 3 or more procs to make it fail when the NonManifoldGhostingFunctor
157  // is not used.
158  auto ghosting_functor = std::make_shared<NonManifoldGhostingFunctor>(*_mesh);
159  sys.get_dof_map().add_coupling_functor(ghosting_functor, /*to_mesh=*/true);
160 
161  // Initialize the DofMap, etc. for all Systems
162  _es->init();
163  }
164 };
165 
166 // This class defines the actual unit tests
167 class NonManifoldGhostingFunctorTest : public CppUnit::TestCase,
169 {
170 public:
171 
172  LIBMESH_CPPUNIT_TEST_SUITE( NonManifoldGhostingFunctorTest );
173 
174  // These tests all require Exodus
175 #if defined(LIBMESH_HAVE_EXODUS_API)
176  CPPUNIT_TEST( verifySendListEntries0 );
177  CPPUNIT_TEST( verifySendListEntries1 );
178  CPPUNIT_TEST( verifySendListEntries2 );
179  CPPUNIT_TEST( verifySendListEntries3 );
180 #endif
181 
182  CPPUNIT_TEST_SUITE_END();
183 
184 public:
185  void setUp()
186  {
187  }
188 
189  void tearDown()
190  {
191  }
192 
193  void verifySendListEntries0() { this->verify_send_list_entries_helper("meshes/non_manifold_junction0.exo"); }
194  void verifySendListEntries1() { this->verify_send_list_entries_helper("meshes/non_manifold_junction1.exo"); }
195  void verifySendListEntries2() { this->verify_send_list_entries_helper("meshes/non_manifold_junction2.exo"); }
196  void verifySendListEntries3() { this->verify_send_list_entries_helper("meshes/non_manifold_junction3.exo"); }
197 
198  // We call this helper function (which can take an argument) from
199  // the CPPUNIT_TEST macro functions, (which I don't think can take an argument?)
200  void verify_send_list_entries_helper(const std::string & mesh_filename)
201  {
202  // Call base class implementations
203  this->read_mesh(mesh_filename);
204  this->init_es();
205 
206  // Get reference to the System, corresponding DofMap, and send_list for this processor
207  System & system = _es->get_system("SimpleSystem");
208  DofMap & dof_map = system.get_dof_map();
209  const std::vector<dof_id_type> & send_list = dof_map.get_send_list();
210 
212 
213  // Use the SidesToElemMap to get a range of Elem pointers attached
214  // to the non-manifold edge that we intentionally partitioned onto
215  // different processors.
217  auto side_neighbors_found = [&]() -> bool
218  {
219  for (const auto & elem : _mesh->element_ptr_range())
220  for (auto s : elem->side_index_range())
221  {
222  auto range = stem.get_connected_elems(elem, s);
223  if (std::distance(range.first, range.second) == 5)
224  {
225  beg = range.first;
226  end = range.second;
227  return true;
228  }
229  }
230 
231  // If we made it here, then we didn't find a Side shared by
232  // 5 elements and we'll throw an error later
233  return false;
234  }();
235 
236  // Require that neighbors were found
237  CPPUNIT_ASSERT(side_neighbors_found);
238 
239  // For every pair of elements (e,f) on this edge owned by processors (proc_e, proc_f) respectively, check that:
240  // 1.) The DOFs of e are either local to, or in the send-list of, proc_f
241  // 2.) The DOFs of f are either local to, or in the send-list of, proc_e
242  for (auto it_e = beg; it_e != end; ++it_e)
243  for (auto it_f = std::next(it_e); it_f != end; ++it_f)
244  {
245  // Lambda to be used for error checking
246  auto check_dofs = [&](const Elem * elem)
247  {
248  std::vector<dof_id_type> dof_indices;
249  dof_map.dof_indices(elem, dof_indices);
250 
251  for (const auto & dof : dof_indices)
252  {
253  bool is_local = (dof >= dof_map.first_dof()) && (dof < dof_map.end_dof());
254  bool is_in_send_list = (Utility::binary_find(send_list.begin(), send_list.end(), dof) != send_list.end());
255  CPPUNIT_ASSERT(is_local || is_in_send_list);
256  }
257  };
258 
259  const Elem * elem_e = *it_e;
260  const Elem * elem_f = *it_f;
261 
262  if (_mesh->comm().rank() == elem_e->processor_id())
263  check_dofs(elem_f);
264 
265  if (_mesh->comm().rank() == elem_f->processor_id())
266  check_dofs(elem_e);
267  }
268  }
269 };
270 
dof_id_type end_dof(const processor_id_type proc) const
Definition: dof_map_base.h:191
void dof_indices(const Elem *const elem, std::vector< dof_id_type > &di) const
Definition: dof_map.C:2164
Manages consistently variables, degrees of freedom, coefficient vectors, matrices and linear solvers ...
This is the base class from which all geometric element types are derived.
Definition: elem.h:94
MeshBase & mesh
void verify_send_list_entries_helper(const std::string &mesh_filename)
std::pair< ElemIter, ElemIter > get_connected_elems(const Elem *elem, unsigned int side) const
Return an iterator pair defining the range of Elems connected to "side" of "elem".
The libMesh namespace provides an interface to certain functionality in the library.
std::unique_ptr< EquationSystems > _es
std::unique_ptr< MeshBase > _mesh
Real distance(const Point &p)
void add_coupling_functor(GhostingFunctor &coupling_functor, bool to_mesh=true)
Adds a functor which can specify coupling requirements for creation of sparse matrices.
Definition: dof_map.C:2033
This is the MeshBase class.
Definition: mesh_base.h:75
The Partitioner class provides a uniform interface for partitioning algorithms.
Definition: partitioner.h:51
std::vector< const Elem * >::const_iterator ElemIter
Typedef for the iterator type returned by the SidesToeElemMap::get_connected_elems() function...
This class handles the numbering of degrees of freedom on a mesh.
Definition: dof_map.h:179
virtual std::unique_ptr< Partitioner > clone() const override
Manages consistently variables, degrees of freedom, and coefficient vectors.
Definition: system.h:96
static SidesToElemMap build(const MeshBase &mesh)
Static build function.
unsigned int add_variable(std::string_view var, const FEType &type, const std::set< subdomain_id_type > *const active_subdomains=nullptr)
Adds the variable var to the list of variables for this system.
Definition: system.C:1357
virtual const Elem * elem_ptr(const dof_id_type i) const =0
static const unsigned int next[3]
A lookup table for the increment modulo 3 operation, for iterating through the three nodes per elemen...
virtual void _do_partition(MeshBase &mesh, const unsigned int n) override
Partition the MeshBase onto n processors.
ForwardIterator binary_find(ForwardIterator first, ForwardIterator last, const T &value)
The STL provides std::binary_search() which returns true or false depending on whether the searched-f...
Definition: utility.h:265
void read_mesh(const std::string &mesh_filename)
This class implements a generalization of the MeshTools::build_nodes_to_elem_map() function...
dof_id_type first_dof(const processor_id_type proc) const
Definition: dof_map_base.h:185
CPPUNIT_TEST_SUITE_REGISTRATION(NonManifoldGhostingFunctorTest)
const DofMap & get_dof_map() const
Definition: system.h:2374
processor_id_type processor_id() const
Definition: dof_object.h:905
const std::vector< dof_id_type > & get_send_list() const
Definition: dof_map.h:526