libMesh
Namespaces | Classes | Typedefs | Functions
libMesh::Parallel Namespace Reference

Namespaces

 Has_buffer_type
 
 PackingMixedType
 
 Utils
 

Classes

class  BinSorter
 Perform a parallel sort using a bin-sort method. More...
 
class  Communicator
 
struct  DefaultBufferType
 
struct  DefaultBufferType< T, typename std::enable_if< Has_buffer_type< Packing< T > >::value >::type >
 
struct  DefaultBufferType< T, typename std::enable_if<!Has_buffer_type< Packing< T > >::value >::type >
 
struct  DefaultValueType
 
struct  DefaultValueType< std::pair< const K, V > >
 
class  Has_buffer_type
 
class  Histogram
 Defines a histogram to be used in parallel in conjunction with a BinSorter. More...
 
class  Packing
 
class  Packing< const Elem * >
 
class  Packing< const Elem *const >
 
class  Packing< const Node * >
 
class  Packing< const Node *const >
 
class  Packing< Eigen::Matrix< Scalar, Rows, Cols, Options, MaxRows, MaxCols > >
 
class  Packing< Elem * >
 
class  Packing< Elem *const >
 
class  Packing< Node * >
 
class  Packing< Node *const >
 
class  Packing< std::array< T, N >, typename std::enable_if< Has_buffer_type< Packing< T > >::value >::type >
 
class  Packing< std::basic_string< T >, typename std::enable_if< TIMPI::StandardType< T >::is_fixed_type >::type >
 
class  Packing< std::pair< T1, T2 >, typename std::enable_if< PairHasPacking< T1, T2 >::value >::type >
 
class  Packing< std::tuple< T, Types... >, typename std::enable_if< TupleHasPacking< T, Types... >::value >::type >
 
class  Packing< std::tuple<>, Enable >
 
class  Packing< std::unique_ptr< int > >
 
struct  PackingMixedType
 
class  PackingRange
 
struct  PairBufferTypeHelper
 
struct  PairBufferTypeHelper< T1, false, T2, false >
 
struct  PairBufferTypeHelper< T1, false, T2, true >
 
struct  PairBufferTypeHelper< T1, true, T2, false >
 
struct  PairBufferTypeHelper< T1, true, T2, true >
 
struct  PairHasPacking
 
class  Sort
 The parallel sorting method is templated on the type of data which is to be sorted. More...
 
struct  SyncEverything
 
struct  TupleBufferType
 
struct  TupleBufferType< T1 >
 
struct  TupleBufferType< T1, MoreTypes... >
 
struct  TupleBufferTypeHelper
 
struct  TupleBufferTypeHelper< false, true, T1, MoreTypes... >
 
struct  TupleBufferTypeHelper< true, false, T1, MoreTypes... >
 
struct  TupleBufferTypeHelper< true, MoreTypes_have_buffer_Type, T1 >
 
struct  TupleBufferTypeHelper< true, true, T1, MoreTypes... >
 
struct  TupleHasPacking
 
struct  TupleHasPacking< T, Types... >
 
struct  TupleHasPacking<>
 

Typedefs

typedef std::pair< Hilbert::HilbertIndices, unique_id_typeDofObjectKey
 

Functions

constexpr int get_packed_len_entries ()
 
void put_packed_len (unsigned int len, Iter data_out)
 
unsigned int get_packed_len (typename std::vector< buffer_type >::const_iterator in)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::vector< T TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::list< T TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::map< K TIMPI_P_COMMA T TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::multimap< K TIMPI_P_COMMA T TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::multiset< K TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::set< K TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::unordered_map< K TIMPI_P_COMMA T TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::unordered_multimap< K TIMPI_P_COMMA T TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::unordered_multiset< K TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
 TIMPI_PACKING_RANGE_SUBCLASS (std::unordered_set< K TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::vector< T TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::list< T TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::map< K TIMPI_P_COMMA T TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::multimap< K TIMPI_P_COMMA T TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::multiset< K TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::set< K TIMPI_P_COMMA C TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::unordered_map< K TIMPI_P_COMMA T TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::unordered_multimap< K TIMPI_P_COMMA T TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::unordered_multiset< K TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
 TIMPI_DECL_PACKING_RANGE_SUBCLASS (std::unordered_set< K TIMPI_P_COMMA H TIMPI_P_COMMA KE TIMPI_P_COMMA A >)
 
template<typename Iterator , typename DofObjType , typename SyncFunctor >
void sync_dofobject_data_by_xyz (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, LocationMap< DofObjType > &location_map, SyncFunctor &sync)
 Request data about a range of ghost nodes uniquely identified by their xyz location or a range of active ghost elements uniquely identified by their vertex averages' xyz location. More...
 
template<typename Iterator , typename SyncFunctor >
void sync_dofobject_data_by_id (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
 Request data about a range of ghost dofobjects uniquely identified by their id. More...
 
template<typename Iterator , typename DofObjectCheckFunctor , typename SyncFunctor >
void sync_dofobject_data_by_id (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, const DofObjectCheckFunctor &dofobj_check, SyncFunctor &sync)
 Request data about a range of ghost dofobjects uniquely identified by their id. More...
 
template<typename Iterator , typename SyncFunctor >
void sync_element_data_by_parent_id (MeshBase &mesh, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
 Request data about a range of ghost elements uniquely identified by their parent id and which child they are. More...
 
template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
bool sync_node_data_by_element_id_once (MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
 Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, assuming a single synchronization pass is necessary. More...
 
template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
void sync_node_data_by_element_id (MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
 Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, iterating until data is completely in sync and further synchronization passes cause no changes. More...
 

Typedef Documentation

◆ DofObjectKey

typedef Hilbert::HilbertIndices libMesh::Parallel::DofObjectKey

Definition at line 80 of file parallel_hilbert.h.

Function Documentation

◆ sync_dofobject_data_by_id() [1/2]

template<typename Iterator , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_id ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
SyncFunctor &  sync 
)

Request data about a range of ghost dofobjects uniquely identified by their id.

Fulfill requests with sync.gather_data(const std::vector<dof_id_type> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<dof_id_type> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 357 of file parallel_ghost_sync.h.

Referenced by libMesh::Partitioner::_find_global_index_by_pid_map(), libMesh::VariationalSmootherSystem::assembly(), libMesh::MeshTools::correct_node_proc_ids(), libMesh::MeshRefinement::make_coarsening_compatible(), libMesh::MeshCommunication::make_elems_parallel_consistent(), libMesh::MeshRefinement::make_flags_parallel_consistent(), libMesh::MeshCommunication::make_node_bcids_parallel_consistent(), libMesh::MeshCommunication::make_node_unique_ids_parallel_consistent(), libMesh::MeshCommunication::make_p_levels_parallel_consistent(), libMesh::FEMSystem::mesh_position_set(), libMesh::LaplaceMeshSmoother::smooth(), libMesh::VariationalMeshSmoother::smooth(), and libMesh::MeshRefinement::uniformly_coarsen().

361 {
362  sync_dofobject_data_by_id(comm, range_begin, range_end, SyncEverything(), sync);
363 }
void sync_dofobject_data_by_id(const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, const DofObjectCheckFunctor &dofobj_check, SyncFunctor &sync)
Request data about a range of ghost dofobjects uniquely identified by their id.

◆ sync_dofobject_data_by_id() [2/2]

template<typename Iterator , typename DofObjectCheckFunctor , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_id ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
const DofObjectCheckFunctor &  dofobj_check,
SyncFunctor &  sync 
)

Request data about a range of ghost dofobjects uniquely identified by their id.

Elements within the range can be excluded from the request by returning false from dofobj_check(dof_object)

Definition at line 368 of file parallel_ghost_sync.h.

References libMesh::DofObject::id(), libMesh::DofObject::invalid_processor_id, libMesh::libmesh_assert(), libMesh::DofObject::processor_id(), pull_parallel_vector_data(), and TIMPI::Communicator::rank().

373 {
374  // This function must be run on all processors at once
375  libmesh_parallel_only(comm);
376 
377  // Count the objects to ask each processor about
378  std::map<processor_id_type, dof_id_type>
379  ghost_objects_from_proc;
380 
381  for (Iterator it = range_begin; it != range_end; ++it)
382  {
383  const DofObject * obj = *it;
384  libmesh_assert (obj);
385 
386  // We may want to pass Elem* or Node* to the check function, not
387  // just DofObject*
388  if (!dofobj_check(*it))
389  continue;
390 
391  processor_id_type obj_procid = obj->processor_id();
392  if (obj_procid != DofObject::invalid_processor_id)
393  ghost_objects_from_proc[obj_procid]++;
394  }
395 
396  // Request sets to send to each processor
397  std::map<processor_id_type, std::vector<dof_id_type>>
398  requested_objs_id;
399 
400  // We know how many objects live on each processor, so reserve()
401  // space for each.
402  for (auto pair : ghost_objects_from_proc)
403  {
404  const processor_id_type p = pair.first;
405  if (p != comm.rank())
406  requested_objs_id[p].reserve(pair.second);
407  }
408 
409  for (Iterator it = range_begin; it != range_end; ++it)
410  {
411  const DofObject * obj = *it;
412 
413  if (!dofobj_check(*it))
414  continue;
415 
416  processor_id_type obj_procid = obj->processor_id();
417  if (obj_procid == comm.rank() ||
418  obj_procid == DofObject::invalid_processor_id)
419  continue;
420 
421  requested_objs_id[obj_procid].push_back(obj->id());
422  }
423 
424  auto gather_functor =
425  [&sync]
426  (processor_id_type, const std::vector<dof_id_type> & ids,
427  std::vector<typename SyncFunctor::datum> & data)
428  {
429  sync.gather_data(ids, data);
430  };
431 
432  auto action_functor =
433  [&sync]
434  (processor_id_type, const std::vector<dof_id_type> & ids,
435  const std::vector<typename SyncFunctor::datum> & data)
436  {
437  // Let the user process the results
438  sync.act_on_data(ids, data);
439  };
440 
441  // Trade requests with other processors
442  typename SyncFunctor::datum * ex = nullptr;
444  (comm, requested_objs_id, gather_functor, action_functor, ex);
445 }
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, const ActionFunctor &act_on_data, const datum *example)
uint8_t processor_id_type
libmesh_assert(ctx)

◆ sync_dofobject_data_by_xyz()

template<typename Iterator , typename DofObjType , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_xyz ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
LocationMap< DofObjType > &  location_map,
SyncFunctor &  sync 
)

Request data about a range of ghost nodes uniquely identified by their xyz location or a range of active ghost elements uniquely identified by their vertex averages' xyz location.

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type. The user-provided location_map should be already filled.

This method may fail in cases of overlapping nodes or vertex averages, e.g. with slit meshes and/or overset meshes.

Definition at line 240 of file parallel_ghost_sync.h.

References libMesh::LocationMap< T >::empty(), libMesh::LocationMap< T >::find(), libMesh::DofObject::invalid_processor_id, libMesh::libmesh_assert(), TIMPI::Communicator::max(), libMesh::LocationMap< T >::point_of(), pull_parallel_vector_data(), and TIMPI::Communicator::rank().

Referenced by ParallelGhostSyncTest::testByXYZ().

245 {
246  // This function must be run on all processors at once
247  libmesh_parallel_only(comm);
248 
249  // We need a valid location_map
250 #ifdef DEBUG
251  bool need_map_update = (range_begin != range_end && location_map.empty());
252  comm.max(need_map_update);
253  libmesh_assert(!need_map_update);
254 #endif
255 
256  // Count the objects to ask each processor about
257  std::map<processor_id_type, dof_id_type>
258  ghost_objects_from_proc;
259 
260  for (Iterator it = range_begin; it != range_end; ++it)
261  {
262  DofObjType * obj = *it;
263  libmesh_assert (obj);
264  processor_id_type obj_procid = obj->processor_id();
265  if (obj_procid != DofObject::invalid_processor_id)
266  ghost_objects_from_proc[obj_procid]++;
267  }
268 
269  // Request sets to send to each processor
270  std::map<processor_id_type, std::vector<Point>>
271  requested_objs_pt;
272  // Corresponding ids to keep track of
273  std::map<processor_id_type, std::vector<dof_id_type>>
274  requested_objs_id;
275 
276  // We know how many objects live on each processor, so reserve()
277  // space for each.
278  for (auto pair : ghost_objects_from_proc)
279  {
280  const processor_id_type p = pair.first;
281  if (p != comm.rank())
282  {
283  requested_objs_pt[p].reserve(pair.second);
284  requested_objs_id[p].reserve(pair.second);
285  }
286  }
287 
288  for (Iterator it = range_begin; it != range_end; ++it)
289  {
290  DofObjType * obj = *it;
291  processor_id_type obj_procid = obj->processor_id();
292  if (obj_procid == comm.rank() ||
293  obj_procid == DofObject::invalid_processor_id)
294  continue;
295 
296  Point p = location_map.point_of(*obj);
297  requested_objs_pt[obj_procid].push_back(p);
298  requested_objs_id[obj_procid].push_back(obj->id());
299  }
300 
301  std::map<const std::vector<Point> *, processor_id_type>
302  requested_objs_pt_inv;
303  for (auto & pair : requested_objs_pt)
304  requested_objs_pt_inv[&pair.second] = pair.first;
305 
306  auto gather_functor =
307  [&location_map, &sync]
308  (processor_id_type /*pid*/, const std::vector<Point> & pts,
309  std::vector<typename SyncFunctor::datum> & data)
310  {
311  // Find the local id of each requested object
312  std::size_t query_size = pts.size();
313  std::vector<dof_id_type> query_id(query_size);
314  for (std::size_t i=0; i != query_size; ++i)
315  {
316  Point pt = pts[i];
317 
318  // Look for this object in the multimap
319  DofObjType * obj = location_map.find(pt);
320 
321  // We'd better find every object we're asked for
322  libmesh_assert (obj);
323 
324  // Return the object's correct processor id,
325  // and our (correct if it's local) id for it.
326  query_id[i] = obj->id();
327  }
328 
329  // Gather whatever data the user wants
330  sync.gather_data(query_id, data);
331  };
332 
333  auto action_functor =
334  [&sync, &requested_objs_id,
335  &requested_objs_pt_inv]
336  (processor_id_type /* pid */, const std::vector<Point> & point_request,
337  const std::vector<typename SyncFunctor::datum> & data)
338  {
339  // With splits working on more pids than ranks, query_pid may not equal pid
340  const processor_id_type query_pid =
341  requested_objs_pt_inv[&point_request];
342 
343  // Let the user process the results
344  sync.act_on_data(requested_objs_id[query_pid], data);
345  };
346 
347  // Trade requests with other processors
348  typename SyncFunctor::datum * ex = nullptr;
350  (comm, requested_objs_pt, gather_functor, action_functor, ex);
351 }
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, const ActionFunctor &act_on_data, const datum *example)
uint8_t processor_id_type
libmesh_assert(ctx)

◆ sync_element_data_by_parent_id()

template<typename Iterator , typename SyncFunctor >
void libMesh::Parallel::sync_element_data_by_parent_id ( MeshBase mesh,
const Iterator &  range_begin,
const Iterator &  range_end,
SyncFunctor &  sync 
)

Request data about a range of ghost elements uniquely identified by their parent id and which child they are.

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 453 of file parallel_ghost_sync.h.

References libMesh::Elem::active(), libMesh::Elem::child_ptr(), libMesh::Elem::has_children(), libMesh::DofObject::id(), libMesh::DofObject::invalid_processor_id, libMesh::libmesh_assert(), mesh, libMesh::Elem::parent(), libMesh::DofObject::processor_id(), pull_parallel_vector_data(), and libMesh::Elem::which_child_am_i().

Referenced by libMesh::MeshCommunication::make_elems_parallel_consistent().

457 {
458  const Communicator & comm (mesh.comm());
459 
460  // This function must be run on all processors at once
461  libmesh_parallel_only(comm);
462 
463  // Count the objects to ask each processor about
464  std::map<processor_id_type, dof_id_type>
465  ghost_objects_from_proc;
466 
467  for (Iterator it = range_begin; it != range_end; ++it)
468  {
469  Elem * elem = *it;
470  processor_id_type obj_procid = elem->processor_id();
471  if (obj_procid == comm.rank() ||
472  obj_procid == DofObject::invalid_processor_id)
473  continue;
474  const Elem * parent = elem->parent();
475  if (!parent || !elem->active())
476  continue;
477 
478  ghost_objects_from_proc[obj_procid]++;
479  }
480 
481  // Request sets to send to each processor
482  std::map<processor_id_type, std::vector<dof_id_type>>
483  requested_objs_id;
484  std::map<processor_id_type, std::vector<std::pair<dof_id_type,unsigned char>>>
485  requested_objs_parent_id_child_num;
486 
487  // We know how many objects live on each processor, so reserve()
488  // space for each.
489  for (auto pair : ghost_objects_from_proc)
490  {
491  const processor_id_type p = pair.first;
492  if (p != comm.rank())
493  {
494  requested_objs_id[p].reserve(pair.second);
495  requested_objs_parent_id_child_num[p].reserve(pair.second);
496  }
497  }
498 
499  for (Iterator it = range_begin; it != range_end; ++it)
500  {
501  Elem * elem = *it;
502  processor_id_type obj_procid = elem->processor_id();
503  if (obj_procid == comm.rank() ||
504  obj_procid == DofObject::invalid_processor_id)
505  continue;
506  const Elem * parent = elem->parent();
507  if (!parent || !elem->active())
508  continue;
509 
510  requested_objs_id[obj_procid].push_back(elem->id());
511  requested_objs_parent_id_child_num[obj_procid].emplace_back
512  (parent->id(), cast_int<unsigned char>(parent->which_child_am_i(elem)));
513  }
514 
515  std::map<const std::vector<std::pair<dof_id_type,unsigned char>> *, processor_id_type>
516  requested_objs_parent_id_child_num_inv;
517  for (auto & pair : requested_objs_parent_id_child_num)
518  requested_objs_parent_id_child_num_inv[&pair.second] = pair.first;
519 
520  auto gather_functor =
521  [&mesh, &sync]
523  const std::vector<std::pair<dof_id_type, unsigned char>> & parent_id_child_num,
524  std::vector<typename SyncFunctor::datum> & data)
525  {
526  // Find the id of each requested element
527  std::size_t query_size = parent_id_child_num.size();
528  std::vector<dof_id_type> query_id(query_size);
529  for (std::size_t i=0; i != query_size; ++i)
530  {
531  Elem & parent = mesh.elem_ref(parent_id_child_num[i].first);
532  libmesh_assert(parent.has_children());
533  Elem * child = parent.child_ptr(parent_id_child_num[i].second);
534  libmesh_assert(child);
535  libmesh_assert(child->active());
536  query_id[i] = child->id();
537  }
538 
539  // Gather whatever data the user wants
540  sync.gather_data(query_id, data);
541  };
542 
543  auto action_functor =
544  [&sync, &requested_objs_id,
545  &requested_objs_parent_id_child_num_inv]
546  (processor_id_type /* pid */,
547  const std::vector<std::pair<dof_id_type, unsigned char>> & parent_id_child_num_request,
548  const std::vector<typename SyncFunctor::datum> & data)
549  {
550  // With splits working on more pids than ranks, query_pid may not equal pid
551  const processor_id_type query_pid =
552  requested_objs_parent_id_child_num_inv[&parent_id_child_num_request];
553 
554  // Let the user process the results
555  sync.act_on_data(requested_objs_id[query_pid], data);
556  };
557 
558  // Trade requests with other processors
559  typename SyncFunctor::datum * ex = nullptr;
561  (comm, requested_objs_parent_id_child_num, gather_functor,
562  action_functor, ex);
563 }
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, const ActionFunctor &act_on_data, const datum *example)
MeshBase & mesh
uint8_t processor_id_type
libmesh_assert(ctx)

◆ sync_node_data_by_element_id()

template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
void libMesh::Parallel::sync_node_data_by_element_id ( MeshBase mesh,
const MeshBase::const_element_iterator range_begin,
const MeshBase::const_element_iterator range_end,
const ElemCheckFunctor &  elem_check,
const NodeCheckFunctor &  node_check,
SyncFunctor &  sync 
)

Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, iterating until data is completely in sync and further synchronization passes cause no changes.

Imagine a vertex surrounded by triangles, each on a different processor, with a ghosting policy that include only face neighbors and not point neighbors. Then the only way for authoritative information to trickle out from that vertex is by being passed along, one neighbor at a time, to processors who mostly don't even see the node's true owner!

Data for all nodes connected to elements in the given range of element iterators will be requested.

Elements can be further excluded from the request by returning false from element_check(elem)

Nodes can be further excluded from the request by returning false from node_check(elem, local_node_num)

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with bool sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) and return true iff the response changed any data.

The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 780 of file parallel_ghost_sync.h.

References mesh, and sync_node_data_by_element_id_once().

Referenced by libMesh::MeshCommunication::make_new_node_proc_ids_parallel_consistent(), libMesh::MeshCommunication::make_node_ids_parallel_consistent(), libMesh::MeshCommunication::make_node_proc_ids_parallel_consistent(), and libMesh::Partitioner::set_node_processor_ids().

786 {
787  // This function must be run on all processors at once
788  libmesh_parallel_only(mesh.comm());
789 
790  bool need_sync = false;
791 
792  do
793  {
794  need_sync =
796  (mesh, range_begin, range_end, elem_check, node_check,
797  sync);
798  } while (need_sync);
799 }
MeshBase & mesh
bool sync_node_data_by_element_id_once(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id...

◆ sync_node_data_by_element_id_once()

template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
bool libMesh::Parallel::sync_node_data_by_element_id_once ( MeshBase mesh,
const MeshBase::const_element_iterator range_begin,
const MeshBase::const_element_iterator range_end,
const ElemCheckFunctor &  elem_check,
const NodeCheckFunctor &  node_check,
SyncFunctor &  sync 
)

Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, assuming a single synchronization pass is necessary.

Data for all nodes connected to elements in the given range of element iterators will be requested.

Elements can be further excluded from the request by returning false from element_check(elem)

Nodes can be further excluded from the request by returning false from node_check(elem, local_node_num)

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with bool sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) and return true iff the response changed any data.

The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

This method returns true iff the sync pass changed any data on any processor.

Definition at line 580 of file parallel_ghost_sync.h.

References libMesh::as_range(), libMesh::DofObject::id(), libMesh::DofObject::invalid_id, libMesh::DofObject::invalid_processor_id, libMesh::libmesh_assert(), mesh, libMesh::Elem::n_nodes(), libMesh::Elem::node_ref(), libMesh::DofObject::processor_id(), and pull_parallel_vector_data().

Referenced by libMesh::MeshCommunication::make_new_node_proc_ids_parallel_consistent(), and sync_node_data_by_element_id().

586 {
587  const Communicator & comm (mesh.comm());
588 
589  // Count the objects to ask each processor about
590  std::map<processor_id_type, dof_id_type>
591  ghost_objects_from_proc;
592 
593  for (const auto & elem : as_range(range_begin, range_end))
594  {
595  libmesh_assert (elem);
596 
597  if (!elem_check(elem))
598  continue;
599 
600  const processor_id_type proc_id = elem->processor_id();
601 
602  bool i_have_elem =
603  (proc_id == comm.rank() ||
604  proc_id == DofObject::invalid_processor_id);
605 
606  if (elem->active() && i_have_elem)
607  continue;
608 
609  for (auto n : elem->node_index_range())
610  {
611  if (!node_check(elem, n))
612  continue;
613 
614  const processor_id_type node_pid =
615  elem->node_ref(n).processor_id();
616 
617  if (i_have_elem && (node_pid == comm.rank()))
618  continue;
619 
620  if (i_have_elem)
621  {
622  libmesh_assert_not_equal_to
623  (node_pid, DofObject::invalid_processor_id);
624  ghost_objects_from_proc[node_pid]++;
625  }
626  else
627  {
628  const processor_id_type request_pid =
629  (node_pid == DofObject::invalid_processor_id) ?
630  proc_id : node_pid;
631  ghost_objects_from_proc[request_pid]++;
632  }
633  }
634  }
635 
636  // Now repeat that iteration, filling request sets this time.
637 
638  // Request sets to send to each processor
639  std::map<processor_id_type, std::vector<std::pair<dof_id_type, unsigned char>>>
640  requested_objs_elem_id_node_num;
641 
642  // We know how many objects live on each processor, so reserve()
643  // space for each.
644  for (auto pair : ghost_objects_from_proc)
645  {
646  const processor_id_type p = pair.first;
647  if (p != comm.rank())
648  requested_objs_elem_id_node_num[p].reserve(ghost_objects_from_proc[p]);
649  }
650 
651  for (const auto & elem : as_range(range_begin, range_end))
652  {
653  libmesh_assert (elem);
654 
655  if (!elem_check(elem))
656  continue;
657 
658  const processor_id_type proc_id = elem->processor_id();
659 
660  bool i_have_elem =
661  (proc_id == comm.rank() ||
662  proc_id == DofObject::invalid_processor_id);
663 
664  if (elem->active() && i_have_elem)
665  continue;
666 
667  const dof_id_type elem_id = elem->id();
668 
669  for (auto n : elem->node_index_range())
670  {
671  if (!node_check(elem, n))
672  continue;
673 
674  const Node & node = elem->node_ref(n);
675  const processor_id_type node_pid = node.processor_id();
676 
677  if (i_have_elem && (node_pid == comm.rank()))
678  continue;
679 
680  if (i_have_elem)
681  {
682  libmesh_assert_not_equal_to
683  (node_pid, DofObject::invalid_processor_id);
684  requested_objs_elem_id_node_num[node_pid].emplace_back
685  (elem_id, cast_int<unsigned char>(n));
686  }
687  else
688  {
689  const processor_id_type request_pid =
690  (node_pid == DofObject::invalid_processor_id) ?
691  proc_id : node_pid;
692  requested_objs_elem_id_node_num[request_pid].emplace_back
693  (elem_id,cast_int<unsigned char>(n));
694  }
695  }
696  }
697 
698  auto gather_functor =
699  [&mesh, &sync]
701  const std::vector<std::pair<dof_id_type, unsigned char>> & elem_id_node_num,
702  std::vector<typename SyncFunctor::datum> & data)
703  {
704  // Find the id of each requested element
705  std::size_t request_size = elem_id_node_num.size();
706  std::vector<dof_id_type> query_id(request_size);
707  for (std::size_t i=0; i != request_size; ++i)
708  {
709  // We might now get queries about remote elements, in which
710  // case we'll have to ignore them and wait for the query
711  // answer to filter to the querier via another source.
712  const Elem * elem = mesh.query_elem_ptr(elem_id_node_num[i].first);
713 
714  if (elem)
715  {
716  const unsigned int n = elem_id_node_num[i].second;
717  libmesh_assert_less (n, elem->n_nodes());
718 
719  const Node & node = elem->node_ref(n);
720 
721  // This isn't a safe assertion in the case where we're
722  // syncing processor ids
723  // libmesh_assert_equal_to (node->processor_id(), comm.rank());
724 
725  query_id[i] = node.id();
726  }
727  else
728  query_id[i] = DofObject::invalid_id;
729  }
730 
731  // Gather whatever data the user wants
732  sync.gather_data(query_id, data);
733  };
734 
735  bool data_changed = false;
736 
737  auto action_functor =
738  [&sync, &mesh, &data_changed]
739  (processor_id_type /* pid */,
740  const std::vector<std::pair<dof_id_type, unsigned char>> & elem_id_node_num,
741  const std::vector<typename SyncFunctor::datum> & data)
742  {
743  const std::size_t data_size = data.size();
744 
745  libmesh_assert_equal_to(elem_id_node_num.size(), data_size);
746 
747  std::vector<dof_id_type> requested_objs_id(data.size());
748 
749  for (auto i : IntRange<std::size_t>(0,data_size))
750  {
751  const Elem & elem = mesh.elem_ref(elem_id_node_num[i].first);
752  const Node & node = elem.node_ref(elem_id_node_num[i].second);
753  requested_objs_id[i] = node.id();
754  }
755 
756  // Let the user process the results. If any of the results
757  // were different than what the user expected, then we may
758  // need to sync again just in case this processor has to
759  // pass on the changes to yet another processor.
760  if (sync.act_on_data(requested_objs_id, data))
761  data_changed = true;
762  };
763 
764  // Trade requests with other processors
765  typename SyncFunctor::datum * ex = nullptr;
767  (comm, requested_objs_elem_id_node_num, gather_functor,
768  action_functor, ex);
769 
770  comm.max(data_changed);
771 
772  return data_changed;
773 }
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, const ActionFunctor &act_on_data, const datum *example)
MeshBase & mesh
uint8_t processor_id_type
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
libmesh_assert(ctx)
uint8_t dof_id_type
Definition: id_types.h:67