libMesh
mesh_communication.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2019 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // Local Includes
21 #include "libmesh/boundary_info.h"
22 #include "libmesh/distributed_mesh.h"
23 #include "libmesh/elem.h"
24 #include "libmesh/ghosting_functor.h"
25 #include "libmesh/libmesh_config.h"
26 #include "libmesh/libmesh_common.h"
27 #include "libmesh/libmesh_logging.h"
28 #include "libmesh/mesh_base.h"
29 #include "libmesh/mesh_communication.h"
30 #include "libmesh/mesh_inserter_iterator.h"
31 #include "libmesh/mesh_tools.h"
32 #include "libmesh/parallel.h"
33 #include "libmesh/parallel_elem.h"
34 #include "libmesh/parallel_node.h"
35 #include "libmesh/parallel_ghost_sync.h"
36 #include "libmesh/utility.h"
37 #include "libmesh/remote_elem.h"
38 #include "libmesh/int_range.h"
39 
40 // C++ Includes
41 #include <numeric>
42 #include <set>
43 #include <unordered_set>
44 #include <unordered_map>
45 
46 
47 
48 //-----------------------------------------------
49 // anonymous namespace for implementation details
50 namespace {
51 
52 using namespace libMesh;
53 
54 struct SyncNeighbors
55 {
56  typedef std::vector<dof_id_type> datum;
57 
58  SyncNeighbors(MeshBase & _mesh) :
59  mesh(_mesh) {}
60 
61  MeshBase & mesh;
62 
63  // Find the neighbor ids for each requested element
64  void gather_data (const std::vector<dof_id_type> & ids,
65  std::vector<datum> & neighbors) const
66  {
67  neighbors.resize(ids.size());
68 
69  for (auto i : index_range(ids))
70  {
71  // Look for this element in the mesh
72  // We'd better find every element we're asked for
73  const Elem & elem = mesh.elem_ref(ids[i]);
74 
75  // Return the element's neighbors
76  const unsigned int n_neigh = elem.n_neighbors();
77  neighbors[i].resize(n_neigh);
78  for (unsigned int n = 0; n != n_neigh; ++n)
79  {
80  const Elem * neigh = elem.neighbor_ptr(n);
81  if (neigh)
82  {
83  libmesh_assert_not_equal_to(neigh, remote_elem);
84  neighbors[i][n] = neigh->id();
85  }
86  else
87  neighbors[i][n] = DofObject::invalid_id;
88  }
89  }
90  }
91 
92  void act_on_data (const std::vector<dof_id_type> & ids,
93  const std::vector<datum> & neighbors) const
94  {
95  for (auto i : index_range(ids))
96  {
97  Elem & elem = mesh.elem_ref(ids[i]);
98 
99  const datum & new_neigh = neighbors[i];
100 
101  const unsigned int n_neigh = elem.n_neighbors();
102  libmesh_assert_equal_to (n_neigh, new_neigh.size());
103 
104  for (unsigned int n = 0; n != n_neigh; ++n)
105  {
106  const dof_id_type new_neigh_id = new_neigh[n];
107  const Elem * old_neigh = elem.neighbor_ptr(n);
108  if (old_neigh && old_neigh != remote_elem)
109  {
110  libmesh_assert_equal_to(old_neigh->id(), new_neigh_id);
111  }
112  else if (new_neigh_id == DofObject::invalid_id)
113  {
114  libmesh_assert (!old_neigh);
115  }
116  else
117  {
118  Elem * neigh = mesh.query_elem_ptr(new_neigh_id);
119  if (neigh)
120  elem.set_neighbor(n, neigh);
121  else
122  elem.set_neighbor(n, const_cast<RemoteElem *>(remote_elem));
123  }
124  }
125  }
126  }
127 };
128 
129 
130 }
131 
132 
133 
134 namespace libMesh
135 {
136 
137 
139  processor_id_type pid,
142  std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
143 {
144  for (auto & gf :
147  {
148  GhostingFunctor::map_type elements_to_ghost;
149  libmesh_assert(gf);
150  (*gf)(elem_it, elem_end, pid, elements_to_ghost);
151 
152  // We can ignore the CouplingMatrix in ->second, but we
153  // need to ghost all the elements in ->first.
154  for (auto & pr : elements_to_ghost)
155  {
156  const Elem * elem = pr.first;
157  libmesh_assert(elem != remote_elem);
158  connected_elements.insert(elem);
159  }
160  }
161 
162  // The GhostingFunctors won't be telling us about the elements from
163  // pid; we need to add those ourselves.
164  for (; elem_it != elem_end; ++elem_it)
165  connected_elements.insert(*elem_it);
166 }
167 
168 
172  std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
173 {
174  // None of these parameters are used when !LIBMESH_ENABLE_AMR.
175  libmesh_ignore(mesh, elem_it, elem_end, connected_elements);
176 
177 #ifdef LIBMESH_ENABLE_AMR
178  // Our XdrIO output needs inactive local elements to not have any
179  // remote_elem children. Let's make sure that doesn't happen.
180  //
181  for (const auto & elem : as_range(elem_it, elem_end))
182  {
183  if (elem->has_children())
184  for (auto & child : elem->child_ref_range())
185  if (&child != remote_elem)
186  connected_elements.insert(&child);
187  }
188 #endif // LIBMESH_ENABLE_AMR
189 }
190 
191 
192 void connect_families(std::set<const Elem *, CompareElemIdsByLevel> & connected_elements)
193 {
194  // This parameter is not used when !LIBMESH_ENABLE_AMR.
195  libmesh_ignore(connected_elements);
196 
197 #ifdef LIBMESH_ENABLE_AMR
198 
199  // Because our set is sorted by ascending level, we can traverse it
200  // in reverse order, adding parents as we go, and end up with all
201  // ancestors added. This is safe for std::set where insert doesn't
202  // invalidate iterators.
203  //
204  // This only works because we do *not* cache
205  // connected_elements.rend(), whose value can change when we insert
206  // elements which are sorted before the original rend.
207  //
208  // We're also going to get subactive descendents here, when any
209  // exist. We're iterating in the wrong direction to do that
210  // non-recursively, so we'll cop out and rely on total_family_tree.
211  // Iterating backwards does mean that we won't be querying the newly
212  // inserted subactive elements redundantly.
213 
214  std::set<const Elem *, CompareElemIdsByLevel>::reverse_iterator
215  elem_rit = connected_elements.rbegin();
216 
217  for (; elem_rit != connected_elements.rend(); ++elem_rit)
218  {
219  const Elem * elem = *elem_rit;
220  libmesh_assert(elem);
221  const Elem * parent = elem->parent();
222 
223  // We let ghosting functors worry about only active elements,
224  // but the remote processor needs all its semilocal elements'
225  // ancestors and active semilocal elements' descendants too.
226  if (parent)
227  connected_elements.insert (parent);
228 
229  if (elem->active() && elem->has_children())
230  {
231  std::vector<const Elem *> subactive_family;
232  elem->total_family_tree(subactive_family);
233  for (const auto & f : subactive_family)
234  {
236  connected_elements.insert(f);
237  }
238  }
239  }
240 
241 # ifdef DEBUG
242  // Let's be paranoid and make sure that all our ancestors
243  // really did get inserted. I screwed this up the first time
244  // by caching rend, and I can easily imagine screwing it up in
245  // the future by changing containers.
246  for (const auto & elem : connected_elements)
247  {
248  libmesh_assert(elem);
249  const Elem * parent = elem->parent();
250  if (parent)
251  libmesh_assert(connected_elements.count(parent));
252  }
253 # endif // DEBUG
254 
255 #endif // LIBMESH_ENABLE_AMR
256 }
257 
258 
259 void reconnect_nodes (const std::set<const Elem *, CompareElemIdsByLevel> & connected_elements,
260  std::set<const Node *> & connected_nodes)
261 {
262  // We're done using the nodes list for element decisions; now
263  // let's reuse it for nodes of the elements we've decided on.
264  connected_nodes.clear();
265 
266  for (const auto & elem : connected_elements)
267  for (auto & n : elem->node_ref_range())
268  connected_nodes.insert(&n);
269 }
270 
271 
272 
273 
274 // ------------------------------------------------------------
275 // MeshCommunication class members
277 {
278  // _neighboring_processors.clear();
279 }
280 
281 
282 
283 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
284 // ------------------------------------------------------------
286 {
287  // no MPI == one processor, no redistribution
288  return;
289 }
290 
291 #else
292 // ------------------------------------------------------------
294  bool newly_coarsened_only) const
295 {
296  // This method will be called after a new partitioning has been
297  // assigned to the elements. This partitioning was defined in
298  // terms of the active elements, and "trickled down" to the
299  // parents and nodes as to be consistent.
300  //
301  // The point is that the entire concept of local elements is
302  // kinda shaky in this method. Elements which were previously
303  // local may now be assigned to other processors, so we need to
304  // send those off. Similarly, we need to accept elements from
305  // other processors.
306 
307  // This method is also useful in the more limited case of
308  // post-coarsening redistribution: if elements are only ghosting
309  // neighbors of their active elements, but adaptive coarsening
310  // causes an inactive element to become active, then we may need a
311  // copy of that inactive element's neighbors.
312 
313  // The approach is as follows:
314  // (1) send all relevant elements we have stored to their proper homes
315  // (2) receive elements from all processors, watching for duplicates
316  // (3) deleting all nonlocal elements elements
317  // (4) obtaining required ghost elements from neighboring processors
318  libmesh_parallel_only(mesh.comm());
322 
323  LOG_SCOPE("redistribute()", "MeshCommunication");
324 
325  // Get a few unique message tags to use in communications; we'll
326  // default to some numbers around pi*1000
327  Parallel::MessageTag
328  nodestag = mesh.comm().get_unique_tag(3141),
329  elemstag = mesh.comm().get_unique_tag(3142);
330 
331  // Figure out how many nodes and elements we have which are assigned to each
332  // processor. send_n_nodes_and_elem_per_proc contains the number of nodes/elements
333  // we will be sending to each processor, recv_n_nodes_and_elem_per_proc contains
334  // the number of nodes/elements we will be receiving from each processor.
335  // Format:
336  // send_n_nodes_and_elem_per_proc[2*pid+0] = number of nodes to send to pid
337  // send_n_nodes_and_elem_per_proc[2*pid+1] = number of elements to send to pid
338  std::vector<dof_id_type> send_n_nodes_and_elem_per_proc(2*mesh.n_processors(), 0);
339 
340  std::vector<Parallel::Request>
341  node_send_requests, element_send_requests;
342 
343  // We're going to sort elements-to-send by pid in one pass, to avoid
344  // sending predicated iterators through the whole mesh N_p times
345  std::unordered_map<processor_id_type, std::vector<Elem *>> send_to_pid;
346 
347  const MeshBase::const_element_iterator send_elems_begin =
348 #ifdef LIBMESH_ENABLE_AMR
349  newly_coarsened_only ?
351 #endif
353 
354  const MeshBase::const_element_iterator send_elems_end =
355 #ifdef LIBMESH_ENABLE_AMR
356  newly_coarsened_only ?
358 #endif
360 
361  for (auto & elem : as_range(send_elems_begin, send_elems_end))
362  send_to_pid[elem->processor_id()].push_back(elem);
363 
364  // If we don't have any just-coarsened elements to send to a
365  // pid, then there won't be any nodes or any elements pulled
366  // in by ghosting either, and we're done with this pid.
367  for (auto pair : send_to_pid)
368  {
369  const processor_id_type pid = pair.first;
370  // don't send to ourselves!!
371  if (pid == mesh.processor_id())
372  continue;
373 
374  // Build up a list of nodes and elements to send to processor pid.
375  // We will certainly send all the elements assigned to this processor,
376  // but we will also ship off any elements which are required
377  // to be ghosted and any nodes which are used by any of the
378  // above.
379 
380  const auto & p_elements = pair.second;
381  libmesh_assert(!p_elements.empty());
382 
383  Elem * const * elempp = p_elements.data();
384  Elem * const * elemend = elempp + p_elements.size();
385 
386 #ifndef LIBMESH_ENABLE_AMR
387  // This parameter is not used when !LIBMESH_ENABLE_AMR.
388  libmesh_ignore(newly_coarsened_only);
389  libmesh_assert(!newly_coarsened_only);
390 #endif
391 
394  (elempp, elemend, Predicates::NotNull<Elem * const *>());
395 
396  const MeshBase::const_element_iterator elem_end =
398  (elemend, elemend, Predicates::NotNull<Elem * const *>());
399 
400  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
401 
402  // See which to-be-ghosted elements we need to send
403  query_ghosting_functors (mesh, pid, elem_it, elem_end,
404  elements_to_send);
405 
406  // The inactive elements we need to send should have their
407  // immediate children present.
409  mesh.pid_elements_end(pid),
410  elements_to_send);
411 
412  // The elements we need should have their ancestors and their
413  // subactive children present too.
414  connect_families(elements_to_send);
415 
416  std::set<const Node *> connected_nodes;
417  reconnect_nodes(elements_to_send, connected_nodes);
418 
419  // the number of nodes we will ship to pid
420  send_n_nodes_and_elem_per_proc[2*pid+0] =
421  cast_int<dof_id_type>(connected_nodes.size());
422 
423  // send any nodes off to the destination processor
424  libmesh_assert (!connected_nodes.empty());
425  node_send_requests.push_back(Parallel::request());
426 
427  mesh.comm().send_packed_range (pid, &mesh,
428  connected_nodes.begin(),
429  connected_nodes.end(),
430  node_send_requests.back(),
431  nodestag);
432 
433  // the number of elements we will send to this processor
434  send_n_nodes_and_elem_per_proc[2*pid+1] =
435  cast_int<dof_id_type>(elements_to_send.size());
436 
437  // send the elements off to the destination processor
438  libmesh_assert (!elements_to_send.empty());
439  element_send_requests.push_back(Parallel::request());
440 
441  mesh.comm().send_packed_range (pid, &mesh,
442  elements_to_send.begin(),
443  elements_to_send.end(),
444  element_send_requests.back(),
445  elemstag);
446  }
447 
448  std::vector<dof_id_type> recv_n_nodes_and_elem_per_proc(send_n_nodes_and_elem_per_proc);
449 
450  mesh.comm().alltoall (recv_n_nodes_and_elem_per_proc);
451 
452  // In general we will only need to communicate with a subset of the other processors.
453  // I can't immediately think of a case where we will send elements but not nodes, but
454  // these are only bools and we have the information anyway...
455  std::vector<bool>
456  send_node_pair(mesh.n_processors(),false), send_elem_pair(mesh.n_processors(),false),
457  recv_node_pair(mesh.n_processors(),false), recv_elem_pair(mesh.n_processors(),false);
458 
459  unsigned int
460  n_send_node_pairs=0, n_send_elem_pairs=0,
461  n_recv_node_pairs=0, n_recv_elem_pairs=0;
462 
463  for (auto pid : IntRange<processor_id_type>(0, mesh.n_processors()))
464  {
465  if (send_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to send
466  {
467  send_node_pair[pid] = true;
468  n_send_node_pairs++;
469  }
470 
471  if (send_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to send
472  {
473  send_elem_pair[pid] = true;
474  n_send_elem_pairs++;
475  }
476 
477  if (recv_n_nodes_and_elem_per_proc[2*pid+0]) // we have nodes to receive
478  {
479  recv_node_pair[pid] = true;
480  n_recv_node_pairs++;
481  }
482 
483  if (recv_n_nodes_and_elem_per_proc[2*pid+1]) // we have elements to receive
484  {
485  recv_elem_pair[pid] = true;
486  n_recv_elem_pairs++;
487  }
488  }
489  libmesh_assert_equal_to (n_send_node_pairs, node_send_requests.size());
490  libmesh_assert_equal_to (n_send_elem_pairs, element_send_requests.size());
491 
492  // Receive nodes.
493  // We now know how many processors will be sending us nodes.
494  for (unsigned int node_comm_step=0; node_comm_step<n_recv_node_pairs; node_comm_step++)
495  // but we don't necessarily want to impose an ordering, so
496  // just process whatever message is available next.
497  mesh.comm().receive_packed_range (Parallel::any_source,
498  &mesh,
500  (Node**)nullptr,
501  nodestag);
502 
503  // Receive elements.
504  // Similarly we know how many processors are sending us elements,
505  // but we don't really care in what order we receive them.
506  for (unsigned int elem_comm_step=0; elem_comm_step<n_recv_elem_pairs; elem_comm_step++)
507  mesh.comm().receive_packed_range (Parallel::any_source,
508  &mesh,
510  (Elem**)nullptr,
511  elemstag);
512 
513  // Wait for all sends to complete
514  Parallel::wait (node_send_requests);
515  Parallel::wait (element_send_requests);
516 
517  // Check on the redistribution consistency
518 #ifdef DEBUG
520 
522 #endif
523 
524  // If we had a point locator, it's invalid now that there are new
525  // elements it can't locate.
527 
528  // We now have all elements and nodes redistributed; our ghosting
529  // functors should be ready to redistribute and/or recompute any
530  // cached data they use too.
532  gf->redistribute();
533 }
534 #endif // LIBMESH_HAVE_MPI
535 
536 
537 
538 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
539 // ------------------------------------------------------------
541 {
542  // no MPI == one processor, no need for this method...
543  return;
544 }
545 #else
546 // ------------------------------------------------------------
548 {
549  // Don't need to do anything if there is
550  // only one processor.
551  if (mesh.n_processors() == 1)
552  return;
553 
554  // This function must be run on all processors at once
555  libmesh_parallel_only(mesh.comm());
556 
557  LOG_SCOPE("gather_neighboring_elements()", "MeshCommunication");
558 
559  //------------------------------------------------------------------
560  // The purpose of this function is to provide neighbor data structure
561  // consistency for a parallel, distributed mesh. In libMesh we require
562  // that each local element have access to a full set of valid face
563  // neighbors. In some cases this requires us to store "ghost elements" -
564  // elements that belong to other processors but we store to provide
565  // data structure consistency. Also, it is assumed that any element
566  // with a nullptr neighbor resides on a physical domain boundary. So,
567  // even our "ghost elements" must have non-nullptr neighbors. To handle
568  // this the concept of "RemoteElem" is used - a special construct which
569  // is used to denote that an element has a face neighbor, but we do
570  // not actually store detailed information about that neighbor. This
571  // is required to prevent data structure explosion.
572  //
573  // So when this method is called we should have only local elements.
574  // These local elements will then find neighbors among the local
575  // element set. After this is completed, any element with a nullptr
576  // neighbor has either (i) a face on the physical boundary of the mesh,
577  // or (ii) a neighboring element which lives on a remote processor.
578  // To handle case (ii), we communicate the global node indices connected
579  // to all such faces to our neighboring processors. They then send us
580  // all their elements with a nullptr neighbor that are connected to any
581  // of the nodes in our list.
582  //------------------------------------------------------------------
583 
584  // Let's begin with finding consistent neighbor data information
585  // for all the elements we currently have. We'll use a clean
586  // slate here - clear any existing information, including RemoteElem's.
587  mesh.find_neighbors (/* reset_remote_elements = */ true,
588  /* reset_current_list = */ true);
589 
590  // Get a unique message tag to use in communications
591  Parallel::MessageTag
592  element_neighbors_tag = mesh.comm().get_unique_tag();
593 
594  // Now any element with a nullptr neighbor either
595  // (i) lives on the physical domain boundary, or
596  // (ii) lives on an inter-processor boundary.
597  // We will now gather all the elements from adjacent processors
598  // which are of the same state, which should address all the type (ii)
599  // elements.
600 
601  // A list of all the processors which *may* contain neighboring elements.
602  // (for development simplicity, just make this the identity map)
603  std::vector<processor_id_type> adjacent_processors;
604  for (auto pid : IntRange<processor_id_type>(0, mesh.n_processors()))
605  if (pid != mesh.processor_id())
606  adjacent_processors.push_back (pid);
607 
608 
609  const processor_id_type n_adjacent_processors =
610  cast_int<processor_id_type>(adjacent_processors.size());
611 
612  //-------------------------------------------------------------------------
613  // Let's build a list of all nodes which live on nullptr-neighbor sides.
614  // For simplicity, we will use a set to build the list, then transfer
615  // it to a vector for communication.
616  std::vector<dof_id_type> my_interface_node_list;
617  std::vector<const Elem *> my_interface_elements;
618  {
619  std::set<dof_id_type> my_interface_node_set;
620 
621  // Pull objects out of the loop to reduce heap operations
622  std::unique_ptr<const Elem> side;
623 
624  // since parent nodes are a subset of children nodes, this should be sufficient
625  for (const auto & elem : mesh.active_local_element_ptr_range())
626  {
627  libmesh_assert(elem);
628 
629  if (elem->on_boundary()) // denotes *any* side has a nullptr neighbor
630  {
631  my_interface_elements.push_back(elem); // add the element, but only once, even
632  // if there are multiple nullptr neighbors
633  for (auto s : elem->side_index_range())
634  if (elem->neighbor_ptr(s) == nullptr)
635  {
636  elem->build_side_ptr(side, s);
637 
638  for (auto n : IntRange<unsigned int>(0, side->n_vertices()))
639  my_interface_node_set.insert (side->node_id(n));
640  }
641  }
642  }
643 
644  my_interface_node_list.reserve (my_interface_node_set.size());
645  my_interface_node_list.insert (my_interface_node_list.end(),
646  my_interface_node_set.begin(),
647  my_interface_node_set.end());
648  }
649 
650  // we will now send my_interface_node_list to all of the adjacent processors.
651  // note that for the time being we will copy the list to a unique buffer for
652  // each processor so that we can use a nonblocking send and not access the
653  // buffer again until the send completes. it is my understanding that the
654  // MPI 2.1 standard seeks to remove this restriction as unnecessary, so in
655  // the future we should change this to send the same buffer to each of the
656  // adjacent processors. - BSK 11/17/2008
657  std::vector<std::vector<dof_id_type>>
658  my_interface_node_xfer_buffers (n_adjacent_processors, my_interface_node_list);
659  std::map<processor_id_type, unsigned char> n_comm_steps;
660 
661  std::vector<Parallel::Request> send_requests (3*n_adjacent_processors);
662  unsigned int current_request = 0;
663 
664  for (unsigned int comm_step=0; comm_step<n_adjacent_processors; comm_step++)
665  {
666  n_comm_steps[adjacent_processors[comm_step]]=1;
667  mesh.comm().send (adjacent_processors[comm_step],
668  my_interface_node_xfer_buffers[comm_step],
669  send_requests[current_request++],
670  element_neighbors_tag);
671  }
672 
673  //-------------------------------------------------------------------------
674  // processor pairings are symmetric - I expect to receive an interface node
675  // list from each processor in adjacent_processors as well!
676  // now we will catch an incoming node list for each of our adjacent processors.
677  //
678  // we are done with the adjacent_processors list - note that it is in general
679  // a superset of the processors we truly share elements with. so let's
680  // clear the superset list, and we will fill it with the true list.
681  adjacent_processors.clear();
682 
683  std::vector<dof_id_type> common_interface_node_list;
684 
685  // we expect two classes of messages -
686  // (1) incoming interface node lists, to which we will reply with our elements
687  // touching nodes in the list, and
688  // (2) replies from the requests we sent off previously.
689  // (2.a) - nodes
690  // (2.b) - elements
691  // so we expect 3 communications from each adjacent processor.
692  // by structuring the communication in this way we hopefully impose no
693  // order on the handling of the arriving messages. in particular, we
694  // should be able to handle the case where we receive a request and
695  // all replies from processor A before even receiving a request from
696  // processor B.
697 
698  for (unsigned int comm_step=0; comm_step<3*n_adjacent_processors; comm_step++)
699  {
700  //------------------------------------------------------------------
701  // catch incoming node list
702  Parallel::Status
703  status(mesh.comm().probe (Parallel::any_source,
704  element_neighbors_tag));
705  const processor_id_type
706  source_pid_idx = cast_int<processor_id_type>(status.source()),
707  dest_pid_idx = source_pid_idx;
708 
709  //------------------------------------------------------------------
710  // first time - incoming request
711  if (n_comm_steps[source_pid_idx] == 1)
712  {
713  n_comm_steps[source_pid_idx]++;
714 
715  mesh.comm().receive (source_pid_idx,
716  common_interface_node_list,
717  element_neighbors_tag);
718 
719  // const std::size_t
720  // their_interface_node_list_size = common_interface_node_list.size();
721 
722  // we now have the interface node list from processor source_pid_idx.
723  // now we can find all of our elements which touch any of these nodes
724  // and send copies back to this processor. however, we can make our
725  // search more efficient by first excluding all the nodes in
726  // their list which are not also contained in
727  // my_interface_node_list. we can do this in place as a set
728  // intersection.
729  common_interface_node_list.erase
730  (std::set_intersection (my_interface_node_list.begin(),
731  my_interface_node_list.end(),
732  common_interface_node_list.begin(),
733  common_interface_node_list.end(),
734  common_interface_node_list.begin()),
735  common_interface_node_list.end());
736 
737  // if (false)
738  // libMesh::out << "[" << mesh.processor_id() << "] "
739  // << "my_interface_node_list.size()=" << my_interface_node_list.size()
740  // << ", [" << source_pid_idx << "] "
741  // << "their_interface_node_list.size()=" << their_interface_node_list_size
742  // << ", common_interface_node_list.size()=" << common_interface_node_list.size()
743  // << std::endl;
744 
745  // Now we need to see which of our elements touch the nodes in the list.
746  // We will certainly send all the active elements which intersect source_pid_idx,
747  // but we will also ship off the other elements in the same family tree
748  // as the active ones for data structure consistency.
749  //
750  // FIXME - shipping full family trees is unnecessary and inefficient.
751  //
752  // We also ship any nodes connected to these elements. Note
753  // some of these nodes and elements may be replicated from
754  // other processors, but that is OK.
755  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
756  std::set<const Node *> connected_nodes;
757 
758  // Check for quick return?
759  if (common_interface_node_list.empty())
760  {
761  // let's try to be smart here - if we have no nodes in common,
762  // we cannot share elements. so post the messages expected
763  // from us here and go on about our business.
764  // note that even though these are nonblocking sends
765  // they should complete essentially instantly, because
766  // in all cases the send buffers are empty
767  mesh.comm().send_packed_range (dest_pid_idx,
768  &mesh,
769  connected_nodes.begin(),
770  connected_nodes.end(),
771  send_requests[current_request++],
772  element_neighbors_tag);
773 
774  mesh.comm().send_packed_range (dest_pid_idx,
775  &mesh,
776  elements_to_send.begin(),
777  elements_to_send.end(),
778  send_requests[current_request++],
779  element_neighbors_tag);
780 
781  continue;
782  }
783  // otherwise, this really *is* an adjacent processor.
784  adjacent_processors.push_back(source_pid_idx);
785 
786  std::vector<const Elem *> family_tree;
787 
788  for (auto & elem : my_interface_elements)
789  {
790  std::size_t n_shared_nodes = 0;
791 
792  for (auto n : IntRange<unsigned int>(0, elem->n_vertices()))
793  if (std::binary_search (common_interface_node_list.begin(),
794  common_interface_node_list.end(),
795  elem->node_id(n)))
796  {
797  n_shared_nodes++;
798 
799  // TBD - how many nodes do we need to share
800  // before we care? certainly 2, but 1? not
801  // sure, so let's play it safe...
802  if (n_shared_nodes > 0) break;
803  }
804 
805  if (n_shared_nodes) // share at least one node?
806  {
807  elem = elem->top_parent();
808 
809  // avoid a lot of duplicated effort -- if we already have elem
810  // in the set its entire family tree is already in the set.
811  if (!elements_to_send.count(elem))
812  {
813 #ifdef LIBMESH_ENABLE_AMR
814  elem->family_tree(family_tree);
815 #else
816  family_tree.clear();
817  family_tree.push_back(elem);
818 #endif
819  for (const auto & f : family_tree)
820  {
821  elem = f;
822  elements_to_send.insert (elem);
823 
824  for (auto & n : elem->node_ref_range())
825  connected_nodes.insert (&n);
826  }
827  }
828  }
829  }
830 
831  // The elements_to_send and connected_nodes sets now contain all
832  // the elements and nodes we need to send to this processor.
833  // All that remains is to pack up the objects (along with
834  // any boundary conditions) and send the messages off.
835  {
836  libmesh_assert (connected_nodes.empty() || !elements_to_send.empty());
837  libmesh_assert (!connected_nodes.empty() || elements_to_send.empty());
838 
839  // send the nodes off to the destination processor
840  mesh.comm().send_packed_range (dest_pid_idx,
841  &mesh,
842  connected_nodes.begin(),
843  connected_nodes.end(),
844  send_requests[current_request++],
845  element_neighbors_tag);
846 
847  // send the elements off to the destination processor
848  mesh.comm().send_packed_range (dest_pid_idx,
849  &mesh,
850  elements_to_send.begin(),
851  elements_to_send.end(),
852  send_requests[current_request++],
853  element_neighbors_tag);
854  }
855  }
856  //------------------------------------------------------------------
857  // second time - reply of nodes
858  else if (n_comm_steps[source_pid_idx] == 2)
859  {
860  n_comm_steps[source_pid_idx]++;
861 
862  mesh.comm().receive_packed_range (source_pid_idx,
863  &mesh,
865  (Node**)nullptr,
866  element_neighbors_tag);
867  }
868  //------------------------------------------------------------------
869  // third time - reply of elements
870  else if (n_comm_steps[source_pid_idx] == 3)
871  {
872  n_comm_steps[source_pid_idx]++;
873 
874  mesh.comm().receive_packed_range (source_pid_idx,
875  &mesh,
877  (Elem**)nullptr,
878  element_neighbors_tag);
879  }
880  //------------------------------------------------------------------
881  // fourth time - shouldn't happen
882  else
883  {
884  libMesh::err << "ERROR: unexpected number of replies: "
885  << n_comm_steps[source_pid_idx]
886  << std::endl;
887  }
888  } // done catching & processing replies associated with tag ~ 100,000pi
889 
890  // allow any pending requests to complete
891  Parallel::wait (send_requests);
892 
893  // If we had a point locator, it's invalid now that there are new
894  // elements it can't locate.
896 
897  // We can now find neighbor information for the interfaces between
898  // local elements and ghost elements.
899  mesh.find_neighbors (/* reset_remote_elements = */ true,
900  /* reset_current_list = */ false);
901 
902  // Ghost elements may not have correct remote_elem neighbor links,
903  // and we may not be able to locally infer correct neighbor links to
904  // remote elements. So we synchronize ghost element neighbor links.
905  SyncNeighbors nsync(mesh);
906 
908  (mesh.comm(), mesh.elements_begin(), mesh.elements_end(), nsync);
909 }
910 #endif // LIBMESH_HAVE_MPI
911 
912 
913 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
914 // ------------------------------------------------------------
916 {
917  // no MPI == one processor, no need for this method...
918  return;
919 }
920 #else
922 {
923 
924  // Don't need to do anything if all processors already ghost all non-local
925  // elements.
926  if (mesh.is_serial())
927  return;
928 
929  // This algorithm uses the MeshBase::flagged_pid_elements_begin/end iterators
930  // which are only available when AMR is enabled.
931 #ifndef LIBMESH_ENABLE_AMR
932  libmesh_error_msg("Calling MeshCommunication::send_coarse_ghosts() requires AMR to be enabled. "
933  "Please configure libmesh with --enable-amr.");
934 #else
935  // When we coarsen elements on a DistributedMesh, we make their
936  // parents active. This may increase the ghosting requirements on
937  // the processor which owns the newly-activated parent element. To
938  // ensure ghosting requirements are satisfied, processors which
939  // coarsen an element will send all the associated ghosted elements
940  // to all processors which own any of the coarsened-away-element's
941  // siblings.
942  typedef std::unordered_map<processor_id_type, std::vector<Elem *>> ghost_map;
943  ghost_map coarsening_elements_to_ghost;
944 
945  const processor_id_type proc_id = mesh.processor_id();
946  // Look for just-coarsened elements
947  for (auto elem : as_range(mesh.flagged_pid_elements_begin(Elem::COARSEN, proc_id),
949  {
950  // If it's flagged for coarsening it had better have a parent
951  libmesh_assert(elem->parent());
952 
953  // On a distributed mesh:
954  // If we don't own this element's parent but we do own it, then
955  // there is a chance that we are aware of ghost elements which
956  // the parent's owner needs us to send them.
957  const processor_id_type their_proc_id = elem->parent()->processor_id();
958  if (their_proc_id != proc_id)
959  coarsening_elements_to_ghost[their_proc_id].push_back(elem);
960  }
961 
962  const processor_id_type n_proc = mesh.n_processors();
963 
964  // Get a few unique message tags to use in communications; we'll
965  // default to some numbers around pi*1000
966  Parallel::MessageTag
967  nodestag = mesh.comm().get_unique_tag(3141),
968  elemstag = mesh.comm().get_unique_tag(3142);
969 
970  std::vector<Parallel::Request> send_requests;
971 
972  // Using unsigned char instead of bool since it'll get converted for
973  // MPI use later anyway
974  std::vector<unsigned char> send_to_proc(n_proc, 0);
975 
976  for (processor_id_type p=0; p != n_proc; ++p)
977  {
978  if (p == proc_id)
979  break;
980 
981  // We'll send these asynchronously, but their data will be
982  // packed into Parallel:: buffers so it will be okay when the
983  // original containers are destructed before the sends complete.
984  std::set<const Elem *, CompareElemIdsByLevel> elements_to_send;
985  std::set<const Node *> nodes_to_send;
986 
987  const ghost_map::const_iterator it =
988  coarsening_elements_to_ghost.find(p);
989  if (it != coarsening_elements_to_ghost.end())
990  {
991  const std::vector<Elem *> & elems = it->second;
992  libmesh_assert(elems.size());
993 
994  // Make some fake element iterators defining this vector of
995  // elements
996  Elem * const * elempp = const_cast<Elem * const *>(elems.data());
997  Elem * const * elemend = elempp+elems.size();
998  const MeshBase::const_element_iterator elem_it =
1000  const MeshBase::const_element_iterator elem_end =
1002 
1003  for (auto & gf : as_range(mesh.ghosting_functors_begin(),
1005  {
1006  GhostingFunctor::map_type elements_to_ghost;
1007  libmesh_assert(gf);
1008  (*gf)(elem_it, elem_end, p, elements_to_ghost);
1009 
1010  // We can ignore the CouplingMatrix in ->second, but we
1011  // need to ghost all the elements in ->first.
1012  for (auto & pr : elements_to_ghost)
1013  {
1014  const Elem * elem = pr.first;
1015  libmesh_assert(elem);
1016  while (elem)
1017  {
1018  libmesh_assert(elem != remote_elem);
1019  elements_to_send.insert(elem);
1020  for (auto & n : elem->node_ref_range())
1021  nodes_to_send.insert(&n);
1022  elem = elem->parent();
1023  }
1024  }
1025  }
1026 
1027  send_requests.push_back(Parallel::request());
1028 
1029  mesh.comm().send_packed_range (p, &mesh,
1030  nodes_to_send.begin(),
1031  nodes_to_send.end(),
1032  send_requests.back(),
1033  nodestag);
1034 
1035  send_requests.push_back(Parallel::request());
1036 
1037  send_to_proc[p] = 1; // true
1038 
1039  mesh.comm().send_packed_range (p, &mesh,
1040  elements_to_send.begin(),
1041  elements_to_send.end(),
1042  send_requests.back(),
1043  elemstag);
1044  }
1045  }
1046 
1047  // Find out how many other processors are sending us elements+nodes.
1048  std::vector<unsigned char> recv_from_proc(send_to_proc);
1049  mesh.comm().alltoall(recv_from_proc);
1050 
1051  const processor_id_type n_receives = cast_int<processor_id_type>
1052  (std::count(recv_from_proc.begin(), recv_from_proc.end(), 1));
1053 
1054  // Receive nodes first since elements will need to attach to them
1055  for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1056  {
1057  mesh.comm().receive_packed_range
1058  (Parallel::any_source,
1059  &mesh,
1061  (Node**)nullptr,
1062  nodestag);
1063  }
1064 
1065  for (processor_id_type recv_i = 0; recv_i != n_receives; ++recv_i)
1066  {
1067  mesh.comm().receive_packed_range
1068  (Parallel::any_source,
1069  &mesh,
1071  (Elem**)nullptr,
1072  elemstag);
1073  }
1074 
1075  // Wait for all sends to complete
1076  Parallel::wait (send_requests);
1077 #endif // LIBMESH_ENABLE_AMR
1078 }
1079 
1080 #endif // LIBMESH_HAVE_MPI
1081 
1082 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1083 // ------------------------------------------------------------
1085 {
1086  // no MPI == one processor, no need for this method...
1087  return;
1088 }
1089 #else
1090 // ------------------------------------------------------------
1092 {
1093  // Don't need to do anything if there is
1094  // only one processor.
1095  if (mesh.n_processors() == 1)
1096  return;
1097 
1098  // This function must be run on all processors at once
1099  libmesh_parallel_only(mesh.comm());
1100 
1101  LOG_SCOPE("broadcast()", "MeshCommunication");
1102 
1103  // Explicitly clear the mesh on all but processor 0.
1104  if (mesh.processor_id() != 0)
1105  mesh.clear();
1106 
1107  // We may have set extra data only on processor 0 in a read()
1108  mesh.comm().broadcast(mesh._node_integer_names);
1109  mesh.comm().broadcast(mesh._elem_integer_names);
1110 
1111  // We may have set mapping data only on processor 0 in a read()
1112  unsigned char map_type = mesh.default_mapping_type();
1113  unsigned char map_data = mesh.default_mapping_data();
1114  mesh.comm().broadcast(map_type);
1115  mesh.comm().broadcast(map_data);
1117  mesh.set_default_mapping_data(map_data);
1118 
1119  // Broadcast nodes
1120  mesh.comm().broadcast_packed_range(&mesh,
1121  mesh.nodes_begin(),
1122  mesh.nodes_end(),
1123  &mesh,
1125 
1126  // Broadcast elements from coarsest to finest, so that child
1127  // elements will see their parents already in place.
1128  //
1129  // When restarting from a checkpoint, we may have elements which are
1130  // assigned to a processor but which have not yet been sent to that
1131  // processor, so we need to use a paranoid n_levels() count and not
1132  // the usual fast algorithm.
1133  const unsigned int n_levels = MeshTools::paranoid_n_levels(mesh);
1134 
1135  for (unsigned int l=0; l != n_levels; ++l)
1136  mesh.comm().broadcast_packed_range(&mesh,
1139  &mesh,
1141 
1142  // Make sure mesh_dimension and elem_dimensions are consistent.
1144 
1145  // Broadcast all of the named entity information
1146  mesh.comm().broadcast(mesh.set_subdomain_name_map());
1149 
1150  // If we had a point locator, it's invalid now that there are new
1151  // elements it can't locate.
1153 
1154  libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1155  libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1156 
1157 #ifdef DEBUG
1158  MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
1159  MeshTools::libmesh_assert_valid_procids<Node>(mesh);
1160 #endif
1161 }
1162 #endif // LIBMESH_HAVE_MPI
1163 
1164 
1165 
1166 #ifndef LIBMESH_HAVE_MPI // avoid spurious gcc warnings
1167 // ------------------------------------------------------------
1169 {
1170  // no MPI == one processor, no need for this method...
1171  return;
1172 }
1173 #else
1174 // ------------------------------------------------------------
1176 {
1177  // Check for quick return
1178  if (mesh.n_processors() == 1)
1179  return;
1180 
1181  // This function must be run on all processors at once
1182  libmesh_parallel_only(mesh.comm());
1183 
1184  LOG_SCOPE("(all)gather()", "MeshCommunication");
1185 
1186  (root_id == DofObject::invalid_processor_id) ?
1187 
1188  mesh.comm().allgather_packed_range (&mesh,
1189  mesh.nodes_begin(),
1190  mesh.nodes_end(),
1192 
1193  mesh.comm().gather_packed_range (root_id,
1194  &mesh,
1195  mesh.nodes_begin(),
1196  mesh.nodes_end(),
1198 
1199  // Gather elements from coarsest to finest, so that child
1200  // elements will see their parents already in place.
1201  const unsigned int n_levels = MeshTools::n_levels(mesh);
1202 
1203  for (unsigned int l=0; l != n_levels; ++l)
1204  (root_id == DofObject::invalid_processor_id) ?
1205 
1206  mesh.comm().allgather_packed_range (&mesh,
1210 
1211  mesh.comm().gather_packed_range (root_id,
1212  &mesh,
1216 
1217  // If we had a point locator, it's invalid now that there are new
1218  // elements it can't locate.
1220 
1221  // If we are doing an allgather(), perform sanity check on the result.
1222  if (root_id == DofObject::invalid_processor_id)
1223  {
1224  libmesh_assert (mesh.comm().verify(mesh.n_elem()));
1225  libmesh_assert (mesh.comm().verify(mesh.n_nodes()));
1226  }
1227 
1228  // Inform new elements of their neighbors,
1229  // while resetting all remote_elem links on
1230  // the ranks which did the gather.
1232  root_id == mesh.processor_id());
1233 
1234  // All done, but let's make sure it's done correctly
1235 
1236 #ifdef DEBUG
1238 #endif
1239 }
1240 #endif // LIBMESH_HAVE_MPI
1241 
1242 
1243 
1244 // Functor for make_elems_parallel_consistent and
1245 // make_node_ids_parallel_consistent
1246 namespace {
1247 
1248 struct SyncIds
1249 {
1250  typedef dof_id_type datum;
1251  typedef void (MeshBase::*renumber_obj)(dof_id_type, dof_id_type);
1252 
1253  SyncIds(MeshBase & _mesh, renumber_obj _renumberer) :
1254  mesh(_mesh),
1255  renumber(_renumberer) {}
1256 
1258  renumber_obj renumber;
1259  // renumber_obj & renumber;
1260 
1261  // Find the id of each requested DofObject -
1262  // Parallel::sync_* already did the work for us
1263  void gather_data (const std::vector<dof_id_type> & ids,
1264  std::vector<datum> & ids_out) const
1265  {
1266  ids_out = ids;
1267  }
1268 
1269  void act_on_data (const std::vector<dof_id_type> & old_ids,
1270  const std::vector<datum> & new_ids) const
1271  {
1272  for (auto i : index_range(old_ids))
1273  if (old_ids[i] != new_ids[i])
1274  (mesh.*renumber)(old_ids[i], new_ids[i]);
1275  }
1276 };
1277 
1278 
1279 struct SyncNodeIds
1280 {
1281  typedef dof_id_type datum;
1282 
1283  SyncNodeIds(MeshBase & _mesh) :
1284  mesh(_mesh) {}
1285 
1286  MeshBase & mesh;
1287 
1288  // We only know a Node id() is definitive if we own the Node or if
1289  // we're told it's definitive. We keep track of the latter cases by
1290  // putting definitively id'd ghost nodes into this set.
1291  typedef std::unordered_set<const Node *> uset_type;
1292  uset_type definitive_ids;
1293 
1294  // We should never be told two different definitive ids for the same
1295  // node, but let's check on that in debug mode.
1296 #ifdef DEBUG
1297  typedef std::unordered_map<dof_id_type, dof_id_type> umap_type;
1299 #endif
1300 
1301  // Find the id of each requested DofObject -
1302  // Parallel::sync_* already tried to do the work for us, but we can
1303  // only say the result is definitive if we own the DofObject or if
1304  // we were given the definitive result from another processor.
1305  void gather_data (const std::vector<dof_id_type> & ids,
1306  std::vector<datum> & ids_out) const
1307  {
1308  ids_out.clear();
1309  ids_out.resize(ids.size(), DofObject::invalid_id);
1310 
1311  for (auto i : index_range(ids))
1312  {
1313  const dof_id_type id = ids[i];
1314  const Node * node = mesh.query_node_ptr(id);
1315  if (node && (node->processor_id() == mesh.processor_id() ||
1316  definitive_ids.count(node)))
1317  ids_out[i] = id;
1318  }
1319  }
1320 
1321  bool act_on_data (const std::vector<dof_id_type> & old_ids,
1322  const std::vector<datum> & new_ids)
1323  {
1324  bool data_changed = false;
1325  for (auto i : index_range(old_ids))
1326  {
1327  const dof_id_type new_id = new_ids[i];
1328 
1329  const dof_id_type old_id = old_ids[i];
1330 
1331  Node * node = mesh.query_node_ptr(old_id);
1332 
1333  // If we can't find the node we were asking about, another
1334  // processor must have already given us the definitive id
1335  // for it
1336  if (!node)
1337  {
1338  // But let's check anyway in debug mode
1339 #ifdef DEBUG
1341  (definitive_renumbering.count(old_id));
1342  libmesh_assert_equal_to
1343  (new_id, definitive_renumbering[old_id]);
1344 #endif
1345  continue;
1346  }
1347 
1348  // If we asked for an id but there's no definitive id ready
1349  // for us yet, then we can't quit trying to sync yet.
1350  if (new_id == DofObject::invalid_id)
1351  {
1352  // But we might have gotten a definitive id from a
1353  // different request
1354  if (!definitive_ids.count(mesh.node_ptr(old_id)))
1355  data_changed = true;
1356  }
1357  else
1358  {
1359  if (node->processor_id() != mesh.processor_id())
1360  definitive_ids.insert(node);
1361  if (old_id != new_id)
1362  {
1363 #ifdef DEBUG
1365  (!definitive_renumbering.count(old_id));
1366  definitive_renumbering[old_id] = new_id;
1367 #endif
1368  mesh.renumber_node(old_id, new_id);
1369  data_changed = true;
1370  }
1371  }
1372  }
1373  return data_changed;
1374  }
1375 };
1376 
1377 
1378 #ifdef LIBMESH_ENABLE_AMR
1379 struct SyncPLevels
1380 {
1381  typedef unsigned char datum;
1382 
1383  SyncPLevels(MeshBase & _mesh) :
1384  mesh(_mesh) {}
1385 
1386  MeshBase & mesh;
1387 
1388  // Find the p_level of each requested Elem
1389  void gather_data (const std::vector<dof_id_type> & ids,
1390  std::vector<datum> & ids_out) const
1391  {
1392  ids_out.reserve(ids.size());
1393 
1394  for (const auto & id : ids)
1395  {
1396  Elem & elem = mesh.elem_ref(id);
1397  ids_out.push_back(cast_int<unsigned char>(elem.p_level()));
1398  }
1399  }
1400 
1401  void act_on_data (const std::vector<dof_id_type> & old_ids,
1402  const std::vector<datum> & new_p_levels) const
1403  {
1404  for (auto i : index_range(old_ids))
1405  {
1406  Elem & elem = mesh.elem_ref(old_ids[i]);
1407  elem.set_p_level(new_p_levels[i]);
1408  }
1409  }
1410 };
1411 #endif // LIBMESH_ENABLE_AMR
1412 
1413 
1414 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1415 template <typename DofObjSubclass>
1416 struct SyncUniqueIds
1417 {
1418  typedef unique_id_type datum;
1419  typedef DofObjSubclass* (MeshBase::*query_obj)(const dof_id_type);
1420 
1421  SyncUniqueIds(MeshBase &_mesh, query_obj _querier) :
1422  mesh(_mesh),
1423  query(_querier) {}
1424 
1425  MeshBase & mesh;
1426  query_obj query;
1427 
1428  // Find the id of each requested DofObject -
1429  // Parallel::sync_* already did the work for us
1430  void gather_data (const std::vector<dof_id_type> & ids,
1431  std::vector<datum> & ids_out) const
1432  {
1433  ids_out.reserve(ids.size());
1434 
1435  for (const auto & id : ids)
1436  {
1437  DofObjSubclass * d = (mesh.*query)(id);
1438  libmesh_assert(d);
1439  ids_out.push_back(d->unique_id());
1440  }
1441  }
1442 
1443  void act_on_data (const std::vector<dof_id_type> & ids,
1444  const std::vector<datum> & unique_ids) const
1445  {
1446  for (auto i : index_range(ids))
1447  {
1448  DofObjSubclass * d = (mesh.*query)(ids[i]);
1449  libmesh_assert(d);
1450  d->set_unique_id() = unique_ids[i];
1451  }
1452  }
1453 };
1454 #endif // LIBMESH_ENABLE_UNIQUE_ID
1455 }
1456 
1457 
1458 
1459 // ------------------------------------------------------------
1461 {
1462  // This function must be run on all processors at once
1463  libmesh_parallel_only(mesh.comm());
1464 
1465  // We need to agree on which processor owns every node, but we can't
1466  // easily assert that here because we don't currently agree on which
1467  // id every node has, and some of our temporary ids on unrelated
1468  // nodes will "overlap".
1469 //#ifdef DEBUG
1470 // MeshTools::libmesh_assert_parallel_consistent_procids<Node> (mesh);
1471 //#endif // DEBUG
1472 
1473  LOG_SCOPE ("make_node_ids_parallel_consistent()", "MeshCommunication");
1474 
1475  SyncNodeIds syncids(mesh);
1479 
1480  // At this point, with both ids and processor ids synced, we can
1481  // finally check for topological consistency of node processor ids.
1482 #ifdef DEBUG
1484 #endif
1485 }
1486 
1487 
1488 
1490 {
1491  // Avoid unused variable warnings if unique ids aren't enabled.
1493 
1494  // This function must be run on all processors at once
1495  libmesh_parallel_only(mesh.comm());
1496 
1497 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1498  LOG_SCOPE ("make_node_unique_ids_parallel_consistent()", "MeshCommunication");
1499 
1500  SyncUniqueIds<Node> syncuniqueids(mesh, &MeshBase::query_node_ptr);
1502  mesh.nodes_begin(),
1503  mesh.nodes_end(),
1504  syncuniqueids);
1505 
1506 #endif
1507 }
1508 
1509 
1510 
1511 
1512 // ------------------------------------------------------------
1514 {
1515  // This function must be run on all processors at once
1516  libmesh_parallel_only(mesh.comm());
1517 
1518  LOG_SCOPE ("make_elems_parallel_consistent()", "MeshCommunication");
1519 
1520  SyncIds syncids(mesh, &MeshBase::renumber_elem);
1523  mesh.active_elements_end(), syncids);
1524 
1525 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1526  SyncUniqueIds<Elem> syncuniqueids(mesh, &MeshBase::query_elem_ptr);
1529  mesh.active_elements_end(), syncuniqueids);
1530 #endif
1531 }
1532 
1533 
1534 
1535 // ------------------------------------------------------------
1536 #ifdef LIBMESH_ENABLE_AMR
1538 {
1539  // This function must be run on all processors at once
1540  libmesh_parallel_only(mesh.comm());
1541 
1542  LOG_SCOPE ("make_p_levels_parallel_consistent()", "MeshCommunication");
1543 
1544  SyncPLevels syncplevels(mesh);
1547  syncplevels);
1548 }
1549 #endif // LIBMESH_ENABLE_AMR
1550 
1551 
1552 
1553 // Functors for make_node_proc_ids_parallel_consistent
1554 namespace {
1555 
1556 struct SyncProcIds
1557 {
1558  typedef processor_id_type datum;
1559 
1560  SyncProcIds(MeshBase & _mesh) : mesh(_mesh) {}
1561 
1562  MeshBase & mesh;
1563 
1564  // ------------------------------------------------------------
1565  void gather_data (const std::vector<dof_id_type> & ids,
1566  std::vector<datum> & data)
1567  {
1568  // Find the processor id of each requested node
1569  data.resize(ids.size());
1570 
1571  for (auto i : index_range(ids))
1572  {
1573  // Look for this point in the mesh
1574  if (ids[i] != DofObject::invalid_id)
1575  {
1576  Node & node = mesh.node_ref(ids[i]);
1577 
1578  // Return the node's correct processor id,
1579  data[i] = node.processor_id();
1580  }
1581  else
1583  }
1584  }
1585 
1586  // ------------------------------------------------------------
1587  bool act_on_data (const std::vector<dof_id_type> & ids,
1588  const std::vector<datum> proc_ids)
1589  {
1590  bool data_changed = false;
1591 
1592  // Set the ghost node processor ids we've now been informed of
1593  for (auto i : index_range(ids))
1594  {
1595  Node & node = mesh.node_ref(ids[i]);
1596 
1597  // We may not have ids synched when this synchronization is done, so we
1598  // *can't* use id to load-balance processor id properly; we have to use
1599  // the old heuristic of choosing the smallest valid processor id.
1600  //
1601  // If someone tells us our node processor id is too low, then
1602  // they're wrong. If they tell us our node processor id is
1603  // too high, then we're wrong.
1604  if (node.processor_id() > proc_ids[i])
1605  {
1606  data_changed = true;
1607  node.processor_id() = proc_ids[i];
1608  }
1609  }
1610 
1611  return data_changed;
1612  }
1613 };
1614 
1615 
1616 struct ElemNodesMaybeNew
1617 {
1618  ElemNodesMaybeNew() {}
1619 
1620  bool operator() (const Elem * elem) const
1621  {
1622  // If this element was just refined then it may have new nodes we
1623  // need to work on
1624 #ifdef LIBMESH_ENABLE_AMR
1625  if (elem->refinement_flag() == Elem::JUST_REFINED)
1626  return true;
1627 #endif
1628 
1629  // If this element has remote_elem neighbors then there may have
1630  // been refinement of those neighbors that affect its nodes'
1631  // processor_id()
1632  for (auto neigh : elem->neighbor_ptr_range())
1633  if (neigh == remote_elem)
1634  return true;
1635  return false;
1636  }
1637 };
1638 
1639 
1640 struct NodeWasNew
1641 {
1642  NodeWasNew(const MeshBase & mesh)
1643  {
1644  for (const auto & node : mesh.node_ptr_range())
1646  was_new.insert(node);
1647  }
1648 
1649  bool operator() (const Elem * elem, unsigned int local_node_num) const
1650  {
1651  if (was_new.count(elem->node_ptr(local_node_num)))
1652  return true;
1653  return false;
1654  }
1655 
1656  std::unordered_set<const Node *> was_new;
1657 };
1658 
1659 }
1660 
1661 
1662 
1663 // ------------------------------------------------------------
1665 {
1666  LOG_SCOPE ("make_node_proc_ids_parallel_consistent()", "MeshCommunication");
1667 
1668  // This function must be run on all processors at once
1669  libmesh_parallel_only(mesh.comm());
1670 
1671  // When this function is called, each section of a parallelized mesh
1672  // should be in the following state:
1673  //
1674  // All nodes should have the exact same physical location on every
1675  // processor where they exist.
1676  //
1677  // Local nodes should have unique authoritative ids,
1678  // and processor ids consistent with all processors which own
1679  // an element touching them.
1680  //
1681  // Ghost nodes touching local elements should have processor ids
1682  // consistent with all processors which own an element touching
1683  // them.
1684  SyncProcIds sync(mesh);
1688 }
1689 
1690 
1691 
1692 // ------------------------------------------------------------
1694 {
1695  LOG_SCOPE ("make_new_node_proc_ids_parallel_consistent()", "MeshCommunication");
1696 
1697  // This function must be run on all processors at once
1698  libmesh_parallel_only(mesh.comm());
1699 
1700  // When this function is called, each section of a parallelized mesh
1701  // should be in the following state:
1702  //
1703  // Local nodes should have unique authoritative ids,
1704  // and new nodes should be unpartitioned.
1705  //
1706  // New ghost nodes touching local elements should be unpartitioned.
1707 
1708  // We may not have consistent processor ids for new nodes (because a
1709  // node may be old and partitioned on one processor but new and
1710  // unpartitioned on another) when we start
1711 #ifdef DEBUG
1713  // MeshTools::libmesh_assert_parallel_consistent_new_node_procids(mesh);
1714 #endif
1715 
1716  // We have two kinds of new nodes. *NEW* nodes are unpartitioned on
1717  // all processors: we need to use a id-independent (i.e. dumb)
1718  // heuristic to partition them. But "new" nodes are newly created
1719  // on some processors (when ghost elements are refined) yet
1720  // correspond to existing nodes on other processors: we need to use
1721  // the existing processor id for them.
1722  //
1723  // A node which is "new" on one processor will be associated with at
1724  // least one ghost element, and we can just query that ghost
1725  // element's owner to find out the correct processor id.
1726 
1727  auto node_unpartitioned =
1728  [](const Elem * elem, unsigned int local_node_num)
1729  { return elem->node_ref(local_node_num).processor_id() ==
1731 
1732  SyncProcIds sync(mesh);
1733 
1737  node_unpartitioned, sync);
1738 
1739  // Nodes should now be unpartitioned iff they are truly new; those
1740  // are the *only* nodes we will touch.
1741 #ifdef DEBUG
1743 #endif
1744 
1745  NodeWasNew node_was_new(mesh);
1746 
1747  // Set the lowest processor id we can on truly new nodes
1748  for (auto & elem : mesh.element_ptr_range())
1749  for (auto & node : elem->node_ref_range())
1750  if (node_was_new.was_new.count(&node))
1751  {
1752  processor_id_type & pid = node.processor_id();
1753  pid = std::min(pid, elem->processor_id());
1754  }
1755 
1756  // Then finally see if other processors have a lower option
1759  ElemNodesMaybeNew(), node_was_new, sync);
1760 
1761  // We should have consistent processor ids when we're done.
1762 #ifdef DEBUG
1765 #endif
1766 }
1767 
1768 
1769 
1770 // ------------------------------------------------------------
1772 {
1773  // This function must be run on all processors at once
1774  libmesh_parallel_only(mesh.comm());
1775 
1776  // When this function is called, each section of a parallelized mesh
1777  // should be in the following state:
1778  //
1779  // All nodes should have the exact same physical location on every
1780  // processor where they exist.
1781  //
1782  // Local nodes should have unique authoritative ids,
1783  // and processor ids consistent with all processors which own
1784  // an element touching them.
1785  //
1786  // Ghost nodes touching local elements should have processor ids
1787  // consistent with all processors which own an element touching
1788  // them.
1789  //
1790  // Ghost nodes should have ids which are either already correct
1791  // or which are in the "unpartitioned" id space.
1792 
1793  // First, let's sync up processor ids. Some of these processor ids
1794  // may be "wrong" from coarsening, but they're right in the sense
1795  // that they'll tell us who has the authoritative dofobject ids for
1796  // each node.
1797 
1799 
1800  // Second, sync up dofobject ids.
1802 
1803  // Third, sync up dofobject unique_ids if applicable.
1805 
1806  // Finally, correct the processor ids to make DofMap happy
1808 }
1809 
1810 
1811 
1812 // ------------------------------------------------------------
1814 {
1815  // This function must be run on all processors at once
1816  libmesh_parallel_only(mesh.comm());
1817 
1818  // When this function is called, each section of a parallelized mesh
1819  // should be in the following state:
1820  //
1821  // All nodes should have the exact same physical location on every
1822  // processor where they exist.
1823  //
1824  // Local nodes should have unique authoritative ids,
1825  // and new nodes should be unpartitioned.
1826  //
1827  // New ghost nodes touching local elements should be unpartitioned.
1828  //
1829  // New ghost nodes should have ids which are either already correct
1830  // or which are in the "unpartitioned" id space.
1831  //
1832  // Non-new nodes should have correct ids and processor ids already.
1833 
1834  // First, let's sync up new nodes' processor ids.
1835 
1837 
1838  // Second, sync up dofobject ids.
1840 
1841  // Third, sync up dofobject unique_ids if applicable.
1843 
1844  // Finally, correct the processor ids to make DofMap happy
1846 }
1847 
1848 
1849 
1850 // ------------------------------------------------------------
1851 void
1853  const std::set<Elem *> & extra_ghost_elem_ids) const
1854 {
1855  // The mesh should know it's about to be parallelized
1857 
1858  LOG_SCOPE("delete_remote_elements()", "MeshCommunication");
1859 
1860 #ifdef DEBUG
1861  // We expect maximum ids to be in sync so we can use them to size
1862  // vectors
1863  libmesh_assert(mesh.comm().verify(mesh.max_node_id()));
1864  libmesh_assert(mesh.comm().verify(mesh.max_elem_id()));
1865  const dof_id_type par_max_node_id = mesh.parallel_max_node_id();
1866  const dof_id_type par_max_elem_id = mesh.parallel_max_elem_id();
1867  libmesh_assert_equal_to (par_max_node_id, mesh.max_node_id());
1868  libmesh_assert_equal_to (par_max_elem_id, mesh.max_elem_id());
1869 #endif
1870 
1871  std::set<const Elem *, CompareElemIdsByLevel> elements_to_keep;
1872 
1873  // Don't delete elements that we were explicitly told not to
1874  for (const auto & elem : extra_ghost_elem_ids)
1875  {
1876  std::vector<const Elem *> active_family;
1877 #ifdef LIBMESH_ENABLE_AMR
1878  if (!elem->subactive())
1879  elem->active_family_tree(active_family);
1880  else
1881 #endif
1882  active_family.push_back(elem);
1883 
1884  for (const auto & f : active_family)
1885  elements_to_keep.insert(f);
1886  }
1887 
1888  // See which elements we still need to keep ghosted, given that
1889  // we're keeping local and unpartitioned elements.
1891  (mesh, mesh.processor_id(),
1894  elements_to_keep);
1899  elements_to_keep);
1900 
1901  // The inactive elements we need to send should have their
1902  // immediate children present.
1905  elements_to_keep);
1909  elements_to_keep);
1910 
1911  // The elements we need should have their ancestors and their
1912  // subactive children present too.
1913  connect_families(elements_to_keep);
1914 
1915  // Don't delete nodes that our semilocal elements need
1916  std::set<const Node *> connected_nodes;
1917  reconnect_nodes(elements_to_keep, connected_nodes);
1918 
1919  // Delete all the elements we have no reason to save,
1920  // starting with the most refined so that the mesh
1921  // is valid at all intermediate steps
1922  unsigned int n_levels = MeshTools::n_levels(mesh);
1923 
1924  for (int l = n_levels - 1; l >= 0; --l)
1925  for (auto & elem : as_range(mesh.level_elements_begin(l),
1927  {
1928  libmesh_assert (elem);
1929  // Make sure we don't leave any invalid pointers
1930  const bool keep_me = elements_to_keep.count(elem);
1931 
1932  if (!keep_me)
1933  elem->make_links_to_me_remote();
1934 
1935  // delete_elem doesn't currently invalidate element
1936  // iterators... that had better not change
1937  if (!keep_me)
1938  mesh.delete_elem(elem);
1939  }
1940 
1941  // Delete all the nodes we have no reason to save
1942  for (auto & node : mesh.node_ptr_range())
1943  {
1944  libmesh_assert(node);
1945  if (!connected_nodes.count(node))
1946  mesh.delete_node(node);
1947  }
1948 
1949  // If we had a point locator, it's invalid now that some of the
1950  // elements it pointed to have been deleted.
1952 
1953  // Much of our boundary info may have been for now-remote parts of
1954  // the mesh, in which case we don't want to keep local copies.
1956 
1957  // We now have all remote elements and nodes deleted; our ghosting
1958  // functors should be ready to delete any now-redundant cached data
1959  // they use too.
1961  gf->delete_remote_elements();
1962 
1963 #ifdef DEBUG
1965 #endif
1966 }
1967 
1968 } // namespace libMesh
libMesh::MeshBase::pid_elements_begin
virtual element_iterator pid_elements_begin(processor_id_type proc_id)=0
Iterate over all elements with a specified processor id.
libMesh::MeshBase::flagged_elements_begin
virtual element_iterator flagged_elements_begin(unsigned char rflag)=0
Iterate over all elements with a specified refinement flag.
libMesh::connect_children
void connect_children(const MeshBase &mesh, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
Definition: mesh_communication.C:169
libMesh::MeshBase::ghosting_functors_begin
std::set< GhostingFunctor * >::const_iterator ghosting_functors_begin() const
Beginning of range of ghosting functors.
Definition: mesh_base.h:1113
libMesh::dof_id_type
uint8_t dof_id_type
Definition: id_types.h:67
libMesh::ElemInternal::family_tree
void family_tree(T elem, std::vector< T > &family, bool reset=true)
Definition: elem_internal.h:41
libMesh::MeshBase::ghosting_functors_end
std::set< GhostingFunctor * >::const_iterator ghosting_functors_end() const
End of range of ghosting functors.
Definition: mesh_base.h:1119
libMesh::MeshBase::delete_node
virtual void delete_node(Node *n)=0
Removes the Node n from the mesh.
libMesh::MeshTools::paranoid_n_levels
unsigned int paranoid_n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:680
libMesh::MeshCommunication::delete_remote_elements
void delete_remote_elements(DistributedMesh &, const std::set< Elem * > &) const
This method takes an input DistributedMesh which may be distributed among all the processors.
Definition: mesh_communication.C:1852
libMesh::Elem::total_family_tree
void total_family_tree(std::vector< const Elem * > &family, bool reset=true) const
Same as the family_tree() member, but also adds any subactive descendants.
Definition: elem.C:1457
libMesh::MeshBase::default_mapping_type
ElemMappingType default_mapping_type() const
Returns the default master space to physical space mapping basis functions to be used on newly added ...
Definition: mesh_base.h:708
was_new
std::unordered_set< const Node * > was_new
Definition: mesh_communication.C:1656
libMesh::MeshTools::n_elem
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:705
libMesh::Elem::JUST_REFINED
Definition: elem.h:1172
libMesh::Elem::on_boundary
bool on_boundary() const
Definition: elem.h:2313
libMesh::MeshBase::flagged_pid_elements_end
virtual element_iterator flagged_pid_elements_end(unsigned char rflag, processor_id_type pid)=0
libMesh::Elem::build_side_ptr
virtual std::unique_ptr< Elem > build_side_ptr(const unsigned int i, bool proxy=true)=0
libMesh::MeshBase::flagged_pid_elements_begin
virtual element_iterator flagged_pid_elements_begin(unsigned char rflag, processor_id_type pid)=0
Iterate over all elements with a specified refinement flag on a specified processor.
libMesh::MeshBase::active_pid_elements_end
virtual element_iterator active_pid_elements_end(processor_id_type proc_id)=0
libMesh::unique_id_type
uint8_t unique_id_type
Definition: id_types.h:86
libMesh::MeshBase::get_boundary_info
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:132
libMesh::MeshTools::libmesh_assert_valid_boundary_ids
void libmesh_assert_valid_boundary_ids(const MeshBase &mesh)
A function for verifying that boundary condition ids match across processors.
Definition: mesh_tools.C:1396
libMesh::MeshBase::is_serial
virtual bool is_serial() const
Definition: mesh_base.h:159
libMesh::Elem::child_ref_range
SimpleRange< ChildRefIter > child_ref_range()
Returns a range with all children of a parent element, usable in range-based for loops.
Definition: elem.h:1839
libMesh::MeshBase::delete_elem
virtual void delete_elem(Elem *e)=0
Removes element e from the mesh.
libMesh::MeshBase::active_local_element_ptr_range
virtual SimpleRange< element_iterator > active_local_element_ptr_range()=0
libMesh::Elem::COARSEN
Definition: elem.h:1169
libMesh::Elem::n_neighbors
unsigned int n_neighbors() const
Definition: elem.h:631
libMesh::MeshBase::pid_elements_end
virtual element_iterator pid_elements_end(processor_id_type proc_id)=0
libMesh::MeshBase::n_elem
virtual dof_id_type n_elem() const =0
libMesh::MeshBase::elem_ref
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:521
libMesh::MeshCommunication::gather_neighboring_elements
void gather_neighboring_elements(DistributedMesh &) const
Definition: mesh_communication.C:540
libMesh::index_range
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:106
libMesh::MeshBase::max_elem_id
virtual dof_id_type max_elem_id() const =0
libMesh
The libMesh namespace provides an interface to certain functionality in the library.
Definition: factoryfunction.C:55
libMesh::Elem::set_p_level
void set_p_level(const unsigned int p)
Sets the value of the p-refinement level for the element.
Definition: elem_refinement.C:41
libMesh::MeshTools::libmesh_assert_parallel_consistent_new_node_procids
void libmesh_assert_parallel_consistent_new_node_procids(const MeshBase &mesh)
A function for verifying that processor assignment is parallel consistent (every processor agrees on ...
Definition: mesh_tools.C:1870
libMesh::MeshCommunication::make_p_levels_parallel_consistent
void make_p_levels_parallel_consistent(MeshBase &)
Copy p levels of ghost elements from their local processors.
Definition: mesh_communication.C:1537
libMesh::MeshBase::set_default_mapping_data
void set_default_mapping_data(const unsigned char data)
Set the default master space to physical space mapping basis functions to be used on newly added elem...
Definition: mesh_base.h:732
libMesh::reconnect_nodes
void reconnect_nodes(const std::set< const Elem *, CompareElemIdsByLevel > &connected_elements, std::set< const Node * > &connected_nodes)
Definition: mesh_communication.C:259
libMesh::ParallelObject::comm
const Parallel::Communicator & comm() const
Definition: parallel_object.h:94
libMesh::Elem::set_neighbor
void set_neighbor(const unsigned int i, Elem *n)
Assigns n as the neighbor.
Definition: elem.h:2105
libMesh::MeshBase::set_subdomain_name_map
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1631
libMesh::GhostingFunctor::map_type
std::unordered_map< const Elem *, const CouplingMatrix * > map_type
What elements do we care about and what variables do we care about on each element?
Definition: ghosting_functor.h:171
libMesh::Elem::p_level
unsigned int p_level() const
Definition: elem.h:2512
libMesh::Elem::neighbor_ptr_range
SimpleRange< NeighborPtrIter > neighbor_ptr_range()
Returns a range with all neighbors of an element, usable in range-based for loops.
Definition: elem.h:2917
mesh
MeshBase & mesh
Definition: mesh_communication.C:1257
libMesh::MeshBase::node_ptr
virtual const Node * node_ptr(const dof_id_type i) const =0
libMesh::MeshBase::not_local_elements_begin
virtual element_iterator not_local_elements_begin()=0
libMesh::MeshBase::max_node_id
virtual dof_id_type max_node_id() const =0
libMesh::MeshBase::renumber_elem
virtual void renumber_elem(dof_id_type old_id, dof_id_type new_id)=0
Changes the id of element old_id, both by changing elem(old_id)->id() and by moving elem(old_id) in t...
libMesh::MeshBase::active_pid_elements_begin
virtual element_iterator active_pid_elements_begin(processor_id_type proc_id)=0
libMesh::DofObject::processor_id
processor_id_type processor_id() const
Definition: dof_object.h:829
libMesh::MeshBase::elements_begin
virtual element_iterator elements_begin()=0
Iterate over all the elements in the Mesh.
libMesh::Elem::active
bool active() const
Definition: elem.h:2345
libMesh::MeshTools::n_levels
unsigned int n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:656
libMesh::MeshCommunication::make_node_ids_parallel_consistent
void make_node_ids_parallel_consistent(MeshBase &)
Assuming all ids on local nodes are globally unique, and assuming all processor ids are parallel cons...
Definition: mesh_communication.C:1460
libMesh::MeshCommunication::make_nodes_parallel_consistent
void make_nodes_parallel_consistent(MeshBase &)
Copy processor_ids and ids on ghost nodes from their local processors.
Definition: mesh_communication.C:1771
libMesh::MeshCommunication::send_coarse_ghosts
void send_coarse_ghosts(MeshBase &) const
Examine a just-coarsened mesh, and for any newly-coarsened elements, send the associated ghosted elem...
Definition: mesh_communication.C:915
libMesh::MeshBase::query_elem_ptr
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
libMesh::Elem::make_links_to_me_remote
void make_links_to_me_remote()
Resets this element's neighbors' appropriate neighbor pointers and its parent's and children's approp...
Definition: elem.C:1085
libMesh::MeshCommunication::make_node_proc_ids_parallel_consistent
void make_node_proc_ids_parallel_consistent(MeshBase &)
Assuming all processor ids on nodes touching local elements are parallel consistent,...
Definition: mesh_communication.C:1664
libMesh::MeshBase::element_ptr_range
virtual SimpleRange< element_iterator > element_ptr_range()=0
libMesh::libmesh_assert
libmesh_assert(ctx)
libMesh::MeshCommunication::broadcast
void broadcast(MeshBase &) const
Definition: mesh_communication.C:1084
libMesh::IntRange
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
libMesh::MeshCommunication::redistribute
void redistribute(DistributedMesh &mesh, bool newly_coarsened_only=false) const
This method takes a parallel distributed mesh and redistributes the elements.
Definition: mesh_communication.C:285
libMesh::MeshCommunication::make_node_unique_ids_parallel_consistent
void make_node_unique_ids_parallel_consistent(MeshBase &)
Assuming all unique_ids on local nodes are globally unique, and assuming all processor ids are parall...
Definition: mesh_communication.C:1489
libMesh::Elem::subactive
bool subactive() const
Definition: elem.h:2363
libMesh::MeshBase
This is the MeshBase class.
Definition: mesh_base.h:78
libMesh::MeshBase::active_elements_begin
virtual element_iterator active_elements_begin()=0
Active, local, and negation forms of the element iterators described above.
libMesh::Elem::node_ref_range
SimpleRange< NodeRefIter > node_ref_range()
Returns a range with all nodes of an element, usable in range-based for loops.
Definition: elem.h:2152
libMesh::Parallel::sync_node_data_by_element_id
void sync_node_data_by_element_id(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id,...
Definition: parallel_ghost_sync.h:754
libMesh::MeshBase::level_elements_begin
virtual element_iterator level_elements_begin(unsigned int level)=0
Iterate over elements of a given level.
libMesh::MeshCommunication::gather
void gather(const processor_id_type root_id, DistributedMesh &) const
This method takes an input DistributedMesh which may be distributed among all the processors.
Definition: mesh_communication.C:1168
libMesh::MeshTools::libmesh_assert_topology_consistent_procids< Node >
void libmesh_assert_topology_consistent_procids< Node >(const MeshBase &mesh)
Definition: mesh_tools.C:1819
libMesh::MeshBase::_elem_integer_names
std::vector< std::string > _elem_integer_names
The array of names for integer data associated with each element in the mesh.
Definition: mesh_base.h:1780
libMesh::ParallelObject::n_processors
processor_id_type n_processors() const
Definition: parallel_object.h:100
libMesh::MeshBase::node_ptr_range
virtual SimpleRange< node_iterator > node_ptr_range()=0
libMesh::MeshBase::clear_point_locator
void clear_point_locator()
Releases the current PointLocator object.
Definition: mesh_base.C:696
libMesh::ParallelObject::processor_id
processor_id_type processor_id() const
Definition: parallel_object.h:106
libMesh::libmesh_ignore
void libmesh_ignore(const Args &...)
Definition: libmesh_common.h:526
libMesh::DofObject::invalid_id
static const dof_id_type invalid_id
An invalid id to distinguish an uninitialized DofObject.
Definition: dof_object.h:421
libMesh::processor_id_type
uint8_t processor_id_type
Definition: id_types.h:104
libMesh::MeshCommunication::make_new_nodes_parallel_consistent
void make_new_nodes_parallel_consistent(MeshBase &)
Copy processor_ids and ids on new nodes from their local processors.
Definition: mesh_communication.C:1813
libMesh::Elem::n_vertices
virtual unsigned int n_vertices() const =0
libMesh::Node
A Node is like a Point, but with more information.
Definition: node.h:52
query
query_obj query
Definition: mesh_communication.C:1426
libMesh::MeshTools::libmesh_assert_equal_n_systems
void libmesh_assert_equal_n_systems(const MeshBase &mesh)
A function for testing that all DofObjects within a mesh have the same n_systems count.
Definition: mesh_tools.C:1168
libMesh::as_range
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
libMesh::Elem::JUST_COARSENED
Definition: elem.h:1173
libMesh::BoundaryInfo::regenerate_id_sets
void regenerate_id_sets()
Clears and regenerates the cached sets of ids.
Definition: boundary_info.C:159
libMesh::MeshBase::const_element_iterator
The definition of the const_element_iterator struct.
Definition: mesh_base.h:1891
libMesh::MeshCommunication::clear
void clear()
Clears all data structures and resets to a pristine state.
Definition: mesh_communication.C:276
libMesh::query_ghosting_functors
void query_ghosting_functors(const MeshBase &mesh, processor_id_type pid, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
Definition: mesh_communication.C:138
libMesh::MeshBase::_node_integer_names
std::vector< std::string > _node_integer_names
The array of names for integer data associated with each node in the mesh.
Definition: mesh_base.h:1786
libMesh::MeshBase::cache_elem_dims
void cache_elem_dims()
Search the mesh and cache the different dimensions of the elements present in the mesh.
Definition: mesh_base.C:753
libMesh::MeshBase::nodes_end
virtual node_iterator nodes_end()=0
libMesh::Elem::family_tree
void family_tree(std::vector< const Elem * > &family, bool reset=true) const
Fills the vector family with the children of this element, recursively.
Definition: elem.C:1441
libMesh::MeshBase::node_ref
virtual const Node & node_ref(const dof_id_type i) const
Definition: mesh_base.h:451
libMesh::mesh_inserter_iterator
A class for templated methods that expect output iterator arguments, which adds objects to the Mesh.
Definition: mesh_inserter_iterator.h:49
libMesh::Elem::parent
const Elem * parent() const
Definition: elem.h:2434
libMesh::MeshBase::n_nodes
virtual dof_id_type n_nodes() const =0
libMesh::Elem::refinement_flag
RefinementState refinement_flag() const
Definition: elem.h:2614
libMesh::connect_families
void connect_families(std::set< const Elem *, CompareElemIdsByLevel > &connected_elements)
Definition: mesh_communication.C:192
libMesh::Parallel::sync_dofobject_data_by_id
void sync_dofobject_data_by_id(const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
Request data about a range of ghost dofobjects uniquely identified by their id.
Definition: parallel_ghost_sync.h:338
definitive_renumbering
umap_type definitive_renumbering
Definition: mesh_communication.C:1298
libMesh::MeshBase::nodes_begin
virtual node_iterator nodes_begin()=0
Iterate over all the nodes in the Mesh.
definitive_ids
uset_type definitive_ids
Definition: mesh_communication.C:1292
libMesh::BoundaryInfo::set_nodeset_name_map
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
Definition: boundary_info.h:882
libMesh::ElemMappingType
ElemMappingType
Enumeration of possible element master->physical mapping types.
Definition: enum_elem_type.h:82
libMesh::MeshCommunication::make_elems_parallel_consistent
void make_elems_parallel_consistent(MeshBase &)
Copy ids of ghost elements from their local processors.
Definition: mesh_communication.C:1513
libMesh::DofObject::id
dof_id_type id() const
Definition: dof_object.h:767
libMesh::Elem::active_family_tree
void active_family_tree(std::vector< const Elem * > &active_family, bool reset=true) const
Same as the family_tree() member, but only adds the active children.
Definition: elem.C:1473
libMesh::MeshBase::elements_end
virtual element_iterator elements_end()=0
libMesh::MeshBase::set_default_mapping_type
void set_default_mapping_type(const ElemMappingType type)
Set the default master space to physical space mapping basis functions to be used on newly added elem...
Definition: mesh_base.h:716
libMesh::Elem::has_children
bool has_children() const
Definition: elem.h:2383
libMesh::Elem::side_index_range
IntRange< unsigned short > side_index_range() const
Definition: elem.h:2188
libMesh::DofObject::invalid_processor_id
static const processor_id_type invalid_processor_id
An invalid processor_id to distinguish DoFs that have not been assigned to a processor.
Definition: dof_object.h:432
libMesh::Elem
This is the base class from which all geometric element types are derived.
Definition: elem.h:100
data
IterBase * data
Ideally this private member data should have protected access.
Definition: variant_filter_iterator.h:337
libMesh::Parallel::sync_element_data_by_parent_id
void sync_element_data_by_parent_id(MeshBase &mesh, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
Request data about a range of ghost elements uniquely identified by their parent id and which child t...
Definition: parallel_ghost_sync.h:431
libMesh::Parallel::sync_node_data_by_element_id_once
bool sync_node_data_by_element_id_once(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id,...
Definition: parallel_ghost_sync.h:548
libMesh::MeshTools::libmesh_assert_valid_refinement_tree
void libmesh_assert_valid_refinement_tree(const MeshBase &mesh)
A function for verifying that elements on this processor have valid descendants and consistent active...
Definition: mesh_tools.C:2026
libMesh::Parallel::SyncEverything
Definition: parallel_ghost_sync.h:219
libMesh::MeshBase::level_elements_end
virtual element_iterator level_elements_end(unsigned int level)=0
libMesh::err
OStreamProxy err
libMesh::MeshBase::default_mapping_data
unsigned char default_mapping_data() const
Returns any default data value used by the master space to physical space mapping.
Definition: mesh_base.h:724
libMesh::Elem::neighbor_ptr
const Elem * neighbor_ptr(unsigned int i) const
Definition: elem.h:2085
libMesh::Elem::node_id
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:1977
libMesh::Elem::node_ref
const Node & node_ref(const unsigned int i) const
Definition: elem.h:2031
libMesh::MeshBase::renumber_node
virtual void renumber_node(dof_id_type old_id, dof_id_type new_id)=0
Changes the id of node old_id, both by changing node(old_id)->id() and by moving node(old_id) in the ...
libMesh::MeshBase::clear
virtual void clear()
Deletes all the element and node data that is currently stored.
Definition: mesh_base.C:429
libMesh::MeshBase::not_local_elements_end
virtual element_iterator not_local_elements_end()=0
libMesh::MeshBase::query_node_ptr
virtual const Node * query_node_ptr(const dof_id_type i) const =0
libMesh::MeshBase::active_elements_end
virtual element_iterator active_elements_end()=0
libMesh::Elem::node_ptr
const Node * node_ptr(const unsigned int i) const
Definition: elem.h:2009
renumber
renumber_obj renumber
Definition: mesh_communication.C:1258
libMesh::MeshCommunication::make_new_node_proc_ids_parallel_consistent
void make_new_node_proc_ids_parallel_consistent(MeshBase &)
Assuming all processor ids on nodes touching local elements are parallel consistent,...
Definition: mesh_communication.C:1693
libMesh::DistributedMesh
The DistributedMesh class is derived from the MeshBase class, and is intended to provide identical fu...
Definition: distributed_mesh.h:50
libMesh::BoundaryInfo::set_sideset_name_map
std::map< boundary_id_type, std::string > & set_sideset_name_map()
Definition: boundary_info.h:874
libMesh::MeshBase::find_neighbors
virtual void find_neighbors(const bool reset_remote_elements=false, const bool reset_current_list=true)=0
Locate element face (edge in 2D) neighbors.
libMesh::MeshBase::flagged_elements_end
virtual element_iterator flagged_elements_end(unsigned char rflag)=0
libMesh::remote_elem
const RemoteElem * remote_elem
Definition: remote_elem.C:57
libMesh::MeshTools::correct_node_proc_ids
void correct_node_proc_ids(MeshBase &)
Changes the processor ids on each node so be the same as the id of the lowest element touching that n...
Definition: mesh_tools.C:2252
libMesh::MeshBase::unpartitioned_elements_end
virtual element_iterator unpartitioned_elements_end()=0
libMesh::MeshBase::unpartitioned_elements_begin
virtual element_iterator unpartitioned_elements_begin()=0
Iterate over unpartitioned elements in the Mesh.
libMesh::Predicates::NotNull
Used to iterate over non-nullptr entries in a container.
Definition: multi_predicates.h:137
libMesh::Elem::top_parent
const Elem * top_parent() const
Definition: elem.h:2460
libMesh::MeshTools::libmesh_assert_parallel_consistent_procids< Node >
void libmesh_assert_parallel_consistent_procids< Node >(const MeshBase &mesh)
Definition: mesh_tools.C:1908