libMesh
Namespaces | Classes | Typedefs | Functions | Variables
libMesh::Parallel Namespace Reference

The Parallel namespace is for wrapper functions for common general parallel synchronization tasks. More...

Namespaces

 Utils
 

Classes

struct  Attributes
 
class  BinSorter
 Perform a parallel sort using a bin-sort method. More...
 
struct  BuildStandardTypeVector
 
struct  BuildStandardTypeVector< 0 >
 
class  Communicator
 Encapsulates the MPI_Comm object. More...
 
struct  data_type
 
class  DataPlusInt
 Types combined with an int. More...
 
class  DataType
 Encapsulates the MPI_Datatype. More...
 
struct  FillDisplacementArray
 
struct  FillDisplacementArray< 0 >
 
class  Histogram
 Defines a histogram to be used in parallel in conjunction with a BinSorter. More...
 
class  MessageTag
 Encapsulates the MPI tag integers. More...
 
class  OpFunction
 Templated class to provide the appropriate MPI reduction operations for use with built-in C types or simple C++ constructions. More...
 
class  OpFunction< Point >
 
class  OpFunction< TypeVector< T > >
 
class  OpFunction< VectorValue< T > >
 
struct  opfunction_dependent_false
 
class  Packing
 Define data types and (un)serialization functions for use when encoding a potentially-variable-size object of type T. More...
 
class  Packing< const Elem * >
 
class  Packing< const Node * >
 
class  Packing< Elem * >
 
class  Packing< Node * >
 
struct  PostWaitCopyBuffer
 
struct  PostWaitDeleteBuffer
 
struct  PostWaitDereferenceSharedPtr
 
struct  PostWaitDereferenceTag
 
struct  PostWaitFreeBuffer
 
struct  PostWaitUnpackBuffer
 
struct  PostWaitWork
 An abstract base class that can be subclassed to allow other code to perform work after a MPI_Wait succeeds. More...
 
class  Request
 Encapsulates the MPI_Request. More...
 
struct  request
 
class  Sort
 The parallel sorting method is templated on the type of data which is to be sorted. More...
 
class  StandardType
 Templated class to provide the appropriate MPI datatype for use with built-in C types or simple C++ constructions. More...
 
class  StandardType< Hilbert::HilbertIndices >
 
class  StandardType< Point >
 
class  StandardType< std::complex< T > >
 
class  StandardType< std::pair< T1, T2 > >
 
class  StandardType< std::tuple< Types... > >
 
class  StandardType< TensorValue< T > >
 
class  StandardType< TypeTensor< T > >
 
class  StandardType< TypeVector< T > >
 
class  StandardType< VectorValue< T > >
 
struct  standardtype_dependent_false
 
class  Status
 Encapsulates the MPI_Status struct. More...
 
struct  status
 
struct  SyncEverything
 
class  TypeVectorOpFunction
 

Typedefs

typedef MPI_Comm communicator
 Communicator object for talking with subsets of processors. More...
 
typedef MPI_Datatype data_type
 Data types for communication. More...
 
typedef std::pair< Hilbert::HilbertIndices, unique_id_typeDofObjectKey
 
typedef MPI_Request request
 Request object for non-blocking I/O. More...
 
typedef MPI_Status status
 Status object for querying messages. More...
 

Functions

 LIBMESH_INT_TYPE (char)
 
 LIBMESH_INT_TYPE (signed char)
 
 LIBMESH_INT_TYPE (unsigned char)
 
 LIBMESH_INT_TYPE (short int)
 
 LIBMESH_INT_TYPE (unsigned short int)
 
 LIBMESH_INT_TYPE (int)
 
 LIBMESH_INT_TYPE (long)
 
 LIBMESH_INT_TYPE (unsigned long long)
 
 LIBMESH_FLOAT_TYPE (float)
 
 LIBMESH_FLOAT_TYPE (double)
 
 LIBMESH_FLOAT_TYPE (long double)
 
template<typename T , typename C , typename A >
 LIBMESH_CONTAINER_TYPE (std::set< T LIBMESH_ATTRIBUTES_COMMA C LIBMESH_ATTRIBUTES_COMMA A >)
 
template<typename T , typename A >
 LIBMESH_CONTAINER_TYPE (std::vector< T LIBMESH_ATTRIBUTES_COMMA A >)
 
 LIBMESH_PARALLEL_INTEGER_OPS (char)
 
 LIBMESH_PARALLEL_INTEGER_OPS (signed char)
 
 LIBMESH_PARALLEL_INTEGER_OPS (unsigned char)
 
 LIBMESH_PARALLEL_INTEGER_OPS (short int)
 
 LIBMESH_PARALLEL_INTEGER_OPS (unsigned short int)
 
 LIBMESH_PARALLEL_INTEGER_OPS (int)
 
 LIBMESH_PARALLEL_INTEGER_OPS (long)
 
 LIBMESH_PARALLEL_INTEGER_OPS (unsigned long long)
 
 LIBMESH_PARALLEL_FLOAT_OPS (float)
 
 LIBMESH_PARALLEL_FLOAT_OPS (double)
 
 LIBMESH_PARALLEL_FLOAT_OPS (long double)
 
template<typename Context , typename buffertype , typename OutputIter , typename T >
void unpack_range (const typename std::vector< buffertype > &buffer, Context *context, OutputIter out, const T *output_type)
 Decode a range of potentially-variable-size objects from a data array. More...
 
template<typename Context , typename buffertype , typename Iter >
Iter pack_range (const Context *context, Iter range_begin, const Iter range_end, typename std::vector< buffertype > &buffer, std::size_t approx_buffer_size=1000000)
 Encode a range of potentially-variable-size objects to a data array. More...
 
template<typename Context , typename Iter >
std::size_t packed_range_size (const Context *context, Iter range_begin, const Iter range_end)
 Return the total buffer size needed to encode a range of potentially-variable-size objects to a data array. More...
 
template<typename Context , typename buffertype , typename OutputIter , typename T >
void unpack_range (const std::vector< buffertype > &buffer, Context *context, OutputIter out_iter, const T *)
 Helper function for range unpacking. More...
 
template<typename Iterator , typename DofObjType , typename SyncFunctor >
void sync_dofobject_data_by_xyz (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, LocationMap< DofObjType > *location_map, SyncFunctor &sync)
 Request data about a range of ghost nodes uniquely identified by their xyz location or a range of active ghost elements uniquely identified by their centroids' xyz location. More...
 
template<typename Iterator , typename SyncFunctor >
void sync_dofobject_data_by_id (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
 Request data about a range of ghost dofobjects uniquely identified by their id. More...
 
template<typename Iterator , typename DofObjectCheckFunctor , typename SyncFunctor >
void sync_dofobject_data_by_id (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, const DofObjectCheckFunctor &dofobj_check, SyncFunctor &sync)
 Request data about a range of ghost dofobjects uniquely identified by their id. More...
 
template<typename Iterator , typename SyncFunctor >
void sync_element_data_by_parent_id (MeshBase &mesh, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
 Request data about a range of ghost elements uniquely identified by their parent id and which child they are. More...
 
template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
bool sync_node_data_by_element_id_once (MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
 Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, assuming a single synchronization pass is necessary. More...
 
template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
void sync_node_data_by_element_id (MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
 Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, iterating until data is completely in sync and futher synchronization passes cause no changes. More...
 
template<typename Iterator , typename DofObjType , typename SyncFunctor >
void sync_dofobject_data_by_xyz (const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, LocationMap< DofObjType > &location_map, SyncFunctor &sync)
 
template<typename T >
data_type dataplusint_type ()
 Templated function to return the appropriate MPI datatype for use with built-in C types when combined with an int. More...
 
template<>
data_type dataplusint_type< short int > ()
 
template<>
data_type dataplusint_type< int > ()
 
template<>
data_type dataplusint_type< long > ()
 
template<>
data_type dataplusint_type< float > ()
 
template<>
data_type dataplusint_type< double > ()
 
template<>
data_type dataplusint_type< long double > ()
 
template<typename MapToVectors , typename RequestContainer , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapToVectors &data, RequestContainer &reqs, ActionFunctor &act_on_data)
 Send and receive and act on vectors of data. More...
 
template<typename MapToVectors , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapToVectors &data, ActionFunctor &act_on_data)
 Send and receive and act on vectors of data. More...
 
template<typename datum , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
 Send query vectors, receive and answer them with vectors of data, then act on those answers. More...
 
template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
 Send query vectors, receive and answer them with vectors of data, then act on those answers. More...
 
template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, RequestContainer &reqs, ActionFunctor &act_on_data)
 
template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
 
template<typename datum , typename A , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const std::vector< datum, A > *example)
 
template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, RequestContainer &reqs, ActionFunctor &act_on_data)
 
template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void push_parallel_vector_data (const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
 
Status wait (Request &r)
 Wait for a non-blocking send or receive to finish. More...
 
void wait (std::vector< Request > &r)
 Wait for all non-blocking operations to finish. More...
 
std::size_t waitany (std::vector< Request > &r)
 Wait for at least one non-blocking operation to finish. More...
 
 LIBMESH_STANDARD_TYPE (char, MPI_CHAR)
 
 LIBMESH_STANDARD_TYPE (signed char, MPI_SIGNED_CHAR)
 
 LIBMESH_STANDARD_TYPE (unsigned char, MPI_UNSIGNED_CHAR)
 
 LIBMESH_STANDARD_TYPE (short int, MPI_SHORT)
 
 LIBMESH_STANDARD_TYPE (unsigned short int, MPI_UNSIGNED_SHORT)
 
 LIBMESH_STANDARD_TYPE (int, MPI_INT)
 
 LIBMESH_STANDARD_TYPE (unsigned int, MPI_UNSIGNED)
 
 LIBMESH_STANDARD_TYPE (long, MPI_LONG)
 
 LIBMESH_STANDARD_TYPE (long long, MPI_LONG_LONG_INT)
 
 LIBMESH_STANDARD_TYPE (unsigned long, MPI_UNSIGNED_LONG)
 
 LIBMESH_STANDARD_TYPE (unsigned long long, MPI_UNSIGNED_LONG_LONG)
 
 LIBMESH_STANDARD_TYPE (float, MPI_FLOAT)
 
 LIBMESH_STANDARD_TYPE (double, MPI_DOUBLE)
 
 LIBMESH_STANDARD_TYPE (long double, MPI_LONG_DOUBLE)
 

Variables

const unsigned int any_source
 Processor id meaning "Accept from any source". More...
 
const MessageTag any_tag = MessageTag(MPI_ANY_TAG)
 Default message tag ids. More...
 
const MessageTag no_tag = MessageTag(0)
 

Detailed Description

The Parallel namespace is for wrapper functions for common general parallel synchronization tasks.

For MPI 1.1 compatibility, temporary buffers are used instead of MPI 2's MPI_IN_PLACE

Typedef Documentation

◆ communicator

Communicator object for talking with subsets of processors.

Definition at line 57 of file communicator.h.

◆ data_type

typedef MPI_Datatype libMesh::Parallel::data_type

Data types for communication.

Definition at line 46 of file data_type.h.

◆ DofObjectKey

typedef Hilbert::HilbertIndices libMesh::Parallel::DofObjectKey

Definition at line 69 of file parallel_hilbert.h.

◆ request

typedef MPI_Request libMesh::Parallel::request

Request object for non-blocking I/O.

Definition at line 40 of file request.h.

◆ status

typedef MPI_Status libMesh::Parallel::status

Status object for querying messages.

Definition at line 41 of file status.h.

Function Documentation

◆ dataplusint_type()

template<typename T >
data_type libMesh::Parallel::dataplusint_type ( )

Templated function to return the appropriate MPI datatype for use with built-in C types when combined with an int.

◆ dataplusint_type< double >()

Definition at line 167 of file parallel_implementation.h.

167 { return MPI_DOUBLE_INT; }

◆ dataplusint_type< float >()

Definition at line 164 of file parallel_implementation.h.

164 { return MPI_FLOAT_INT; }

◆ dataplusint_type< int >()

Definition at line 158 of file parallel_implementation.h.

158 { return MPI_2INT; }

◆ dataplusint_type< long >()

Definition at line 161 of file parallel_implementation.h.

161 { return MPI_LONG_INT; }

◆ dataplusint_type< long double >()

template<>
data_type libMesh::Parallel::dataplusint_type< long double > ( )

Definition at line 170 of file parallel_implementation.h.

170 { return MPI_LONG_DOUBLE_INT; }

◆ dataplusint_type< short int >()

template<>
data_type libMesh::Parallel::dataplusint_type< short int > ( )

Definition at line 155 of file parallel_implementation.h.

155 { return MPI_SHORT_INT; }

◆ LIBMESH_CONTAINER_TYPE() [1/2]

template<typename T , typename C , typename A >
libMesh::Parallel::LIBMESH_CONTAINER_TYPE ( std::set< T LIBMESH_ATTRIBUTES_COMMA C LIBMESH_ATTRIBUTES_COMMA A >  )

◆ LIBMESH_CONTAINER_TYPE() [2/2]

template<typename T , typename A >
libMesh::Parallel::LIBMESH_CONTAINER_TYPE ( std::vector< T LIBMESH_ATTRIBUTES_COMMA A >  )

◆ LIBMESH_FLOAT_TYPE() [1/3]

libMesh::Parallel::LIBMESH_FLOAT_TYPE ( float  )

◆ LIBMESH_FLOAT_TYPE() [2/3]

libMesh::Parallel::LIBMESH_FLOAT_TYPE ( double  )

◆ LIBMESH_FLOAT_TYPE() [3/3]

libMesh::Parallel::LIBMESH_FLOAT_TYPE ( long  double)

◆ LIBMESH_INT_TYPE() [1/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( char  )

◆ LIBMESH_INT_TYPE() [2/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( signed  char)

◆ LIBMESH_INT_TYPE() [3/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( unsigned  char)

◆ LIBMESH_INT_TYPE() [4/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( short  int)

◆ LIBMESH_INT_TYPE() [5/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( unsigned short  int)

◆ LIBMESH_INT_TYPE() [6/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( int  )

◆ LIBMESH_INT_TYPE() [7/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( long  )

◆ LIBMESH_INT_TYPE() [8/8]

libMesh::Parallel::LIBMESH_INT_TYPE ( unsigned long  long)

◆ LIBMESH_PARALLEL_FLOAT_OPS() [1/3]

libMesh::Parallel::LIBMESH_PARALLEL_FLOAT_OPS ( float  )

◆ LIBMESH_PARALLEL_FLOAT_OPS() [2/3]

libMesh::Parallel::LIBMESH_PARALLEL_FLOAT_OPS ( double  )

◆ LIBMESH_PARALLEL_FLOAT_OPS() [3/3]

libMesh::Parallel::LIBMESH_PARALLEL_FLOAT_OPS ( long  double)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [1/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( char  )

◆ LIBMESH_PARALLEL_INTEGER_OPS() [2/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( signed  char)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [3/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( unsigned  char)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [4/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( short  int)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [5/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( unsigned short  int)

◆ LIBMESH_PARALLEL_INTEGER_OPS() [6/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( int  )

◆ LIBMESH_PARALLEL_INTEGER_OPS() [7/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( long  )

◆ LIBMESH_PARALLEL_INTEGER_OPS() [8/8]

libMesh::Parallel::LIBMESH_PARALLEL_INTEGER_OPS ( unsigned long  long)

◆ LIBMESH_STANDARD_TYPE() [1/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( char  ,
MPI_CHAR   
)

◆ LIBMESH_STANDARD_TYPE() [2/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( signed  char,
MPI_SIGNED_CHAR   
)

◆ LIBMESH_STANDARD_TYPE() [3/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned  char,
MPI_UNSIGNED_CHAR   
)

◆ LIBMESH_STANDARD_TYPE() [4/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( short  int,
MPI_SHORT   
)

◆ LIBMESH_STANDARD_TYPE() [5/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned short  int,
MPI_UNSIGNED_SHORT   
)

◆ LIBMESH_STANDARD_TYPE() [6/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( int  ,
MPI_INT   
)

◆ LIBMESH_STANDARD_TYPE() [7/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned  int,
MPI_UNSIGNED   
)

◆ LIBMESH_STANDARD_TYPE() [8/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( long  ,
MPI_LONG   
)

◆ LIBMESH_STANDARD_TYPE() [9/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( long  long,
MPI_LONG_LONG_INT   
)

◆ LIBMESH_STANDARD_TYPE() [10/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned  long,
MPI_UNSIGNED_LONG   
)

◆ LIBMESH_STANDARD_TYPE() [11/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( unsigned long  long,
MPI_UNSIGNED_LONG_LONG   
)

◆ LIBMESH_STANDARD_TYPE() [12/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( float  ,
MPI_FLOAT   
)

◆ LIBMESH_STANDARD_TYPE() [13/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( double  ,
MPI_DOUBLE   
)

◆ LIBMESH_STANDARD_TYPE() [14/14]

libMesh::Parallel::LIBMESH_STANDARD_TYPE ( long  double,
MPI_LONG_DOUBLE   
)

◆ pack_range()

template<typename Context , typename buffertype , typename Iter >
Iter libMesh::Parallel::pack_range ( const Context *  context,
Iter  range_begin,
const Iter  range_end,
typename std::vector< buffertype > &  buffer,
std::size_t  approx_buffer_size = 1000000 
)

Encode a range of potentially-variable-size objects to a data array.

Helper function for range packing.

The data will be buffered in vectors with lengths that do not exceed the sum of approx_buffer_size and the size of an individual packed object.

Definition at line 139 of file packing.h.

References libMesh::Parallel::Packing< T >::pack(), libMesh::Parallel::Packing< T >::packable_size(), and libMesh::Parallel::Packing< T >::packed_size().

Referenced by libMesh::Parallel::Communicator::allgather_packed_range(), libMesh::Parallel::Communicator::broadcast_packed_range(), libMesh::Parallel::Communicator::gather_packed_range(), libMesh::Parallel::Communicator::nonblocking_send_packed_range(), libMesh::Parallel::Communicator::send_packed_range(), and libMesh::Parallel::Communicator::send_receive_packed_range().

148 {
149  typedef typename std::iterator_traits<Iter>::value_type T;
150 
151  // Count the total size of and preallocate buffer for efficiency.
152  // Prepare to stop early if the buffer would be too large.
153  std::size_t buffer_size = 0;
154  Iter range_stop = range_begin;
155  for (; range_stop != range_end && buffer_size < approx_buffer_size;
156  ++range_stop)
157  {
158  std::size_t next_buffer_size =
159  Parallel::Packing<T>::packable_size(*range_stop, context);
160  buffer_size += next_buffer_size;
161  }
162  buffer.reserve(buffer.size() + buffer_size);
163 
164  // Pack the objects into the buffer
165  for (; range_begin != range_stop; ++range_begin)
166  {
167 #ifndef NDEBUG
168  std::size_t old_size = buffer.size();
169 #endif
170 
171  Parallel::Packing<T>::pack
172  (*range_begin, back_inserter(buffer), context);
173 
174 #ifndef NDEBUG
175  unsigned int my_packable_size =
176  Parallel::Packing<T>::packable_size(*range_begin, context);
177  unsigned int my_packed_size =
178  Parallel::Packing<T>::packed_size (buffer.begin() + old_size);
179  libmesh_assert_equal_to (my_packable_size, my_packed_size);
180  libmesh_assert_equal_to (buffer.size(), old_size + my_packable_size);
181 #endif
182  }
183 
184  return range_stop;
185 }

◆ packed_range_size()

template<typename Context , typename Iter >
std::size_t libMesh::Parallel::packed_range_size ( const Context *  context,
Iter  range_begin,
const Iter  range_end 
)

Return the total buffer size needed to encode a range of potentially-variable-size objects to a data array.

Helper function for range packing.

Definition at line 118 of file packing.h.

References libMesh::Parallel::Packing< T >::packable_size().

Referenced by libMesh::Parallel::Communicator::send_packed_range().

121 {
122  typedef typename std::iterator_traits<Iter>::value_type T;
123 
124  std::size_t buffer_size = 0;
125  for (Iter range_count = range_begin;
126  range_count != range_end;
127  ++range_count)
128  {
129  buffer_size += Parallel::Packing<T>::packable_size(*range_count, context);
130  }
131  return buffer_size;
132 }

◆ pull_parallel_vector_data() [1/3]

template<typename datum , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void libMesh::Parallel::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
RequestContainer &  reqs,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const datum *  example 
)

Send query vectors, receive and answer them with vectors of data, then act on those answers.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of query ids to send.

Query data which is received from other processors will be operated on by gather_data(processor_id_type pid, const std::vector<id> & ids, std::vector<datum> & data)

Answer data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<id> & ids, const std::vector<datum> & data);

The example pointer may be null; it merely needs to be of the correct type. It's just here because function overloading in C++ is easy, whereas SFINAE is hard and partial template specialization of functions is impossible.

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All receives and actions are completed before this function returns.

Not all sends may have yet completed. The supplied container of Request objects, req, has more requests inserted, one for each of the data sends. These requests must be waited on before the data map is deleted.

Definition at line 472 of file parallel_sync.h.

References libMesh::Parallel::Communicator::get_unique_tag(), push_parallel_vector_data(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), libMesh::Parallel::Communicator::size(), wait(), and waitany().

Referenced by pull_parallel_vector_data(), sync_dofobject_data_by_id(), sync_dofobject_data_by_xyz(), sync_element_data_by_parent_id(), and sync_node_data_by_element_id_once().

478 {
479  typedef typename MapToVectors::mapped_type query_type;
480 
481  std::map<processor_id_type, std::vector<datum> >
482  response_data, received_data;
483  std::vector<Request> response_reqs;
484 
485  StandardType<datum> datatype;
486 
487  // We'll grab a tag so we can overlap request sends and receives
488  // without confusing one for the other
489  MessageTag tag = comm.get_unique_tag(105);
490 
491  auto gather_functor =
492  [&comm, &gather_data, &response_data, &response_reqs, &datatype, &tag]
493  (processor_id_type pid, query_type query)
494  {
495  gather_data(pid, query, response_data[pid]);
496  libmesh_assert_equal_to(query.size(), response_data[pid].size());
497 
498  // Just act on data later if the user requested a send-to-self
499  if (pid != comm.rank())
500  {
501  Request sendreq;
502  comm.send(pid, response_data[pid], datatype, sendreq, tag);
503  response_reqs.push_back(sendreq);
504  }
505  };
506 
507  push_parallel_vector_data (comm, queries, reqs, gather_functor);
508 
509  // Every outgoing query should now have an incoming response.
510  // Post all of the receives, non-blocking
511  std::vector<Request> receive_reqs;
512  std::vector<processor_id_type> receive_procids;
513  for (auto & querypair : queries)
514  {
515  processor_id_type proc_id = querypair.first;
516  libmesh_assert_less(proc_id, comm.size());
517 
518  if (proc_id == comm.rank())
519  {
520  libmesh_assert(queries.count(proc_id));
521  libmesh_assert_equal_to(queries.at(proc_id).size(),
522  response_data.at(proc_id).size());
523  act_on_data(proc_id, queries.at(proc_id), response_data.at(proc_id));
524  }
525  else
526  {
527  auto & querydata = querypair.second;
528  Request req;
529  auto & incoming_data = received_data[proc_id];
530  incoming_data.resize(querydata.size());
531  comm.receive(proc_id, incoming_data, datatype, req, tag);
532  receive_reqs.push_back(req);
533  receive_procids.push_back(proc_id);
534  }
535  }
536 
537  while(receive_reqs.size())
538  {
539  std::size_t completed = waitany(receive_reqs);
540  processor_id_type proc_id = receive_procids[completed];
541  receive_reqs.erase(receive_reqs.begin() + completed);
542  receive_procids.erase(receive_procids.begin() + completed);
543 
544  libmesh_assert(queries.count(proc_id));
545  libmesh_assert_equal_to(queries.at(proc_id).size(),
546  received_data[proc_id].size());
547  act_on_data(proc_id, queries.at(proc_id), received_data[proc_id]);
548  received_data.erase(proc_id);
549  }
550 
551  wait(response_reqs);
552 }
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: request.h:129
uint8_t processor_id_type
Definition: id_types.h:99
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
std::size_t waitany(std::vector< Request > &r)
Wait for at least one non-blocking operation to finish.

◆ pull_parallel_vector_data() [2/3]

template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void libMesh::Parallel::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const datum *  example 
)

Send query vectors, receive and answer them with vectors of data, then act on those answers.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of query ids to send.

Query data which is received from other processors will be operated on by gather_data(processor_id_type pid, const std::vector<id> & ids, std::vector<datum> & data)

Answer data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<id> & ids, const std::vector<datum> & data);

The example pointer may be null; it merely needs to be of the correct type. It's just here because function overloading in C++ is easy, whereas SFINAE is hard and partial template specialization of functions is impossible.

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All communication and actions are complete when this function returns.

Definition at line 559 of file parallel_sync.h.

References pull_parallel_vector_data(), and wait().

564 {
565  std::vector<Request> requests;
566 
567  pull_parallel_vector_data(comm, queries, requests, gather_data,
568  act_on_data, example);
569 
570  wait(requests);
571 }
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: request.h:129
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const std::vector< datum, A > *example)

◆ pull_parallel_vector_data() [3/3]

template<typename datum , typename A , typename MapToVectors , typename RequestContainer , typename GatherFunctor , typename ActionFunctor >
void libMesh::Parallel::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
RequestContainer &  reqs,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const std::vector< datum, A > *  example 
)

Definition at line 580 of file parallel_sync.h.

References any_source, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::probe(), push_parallel_vector_data(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and wait().

586 {
587  typedef typename MapToVectors::mapped_type query_type;
588 
589  std::map<processor_id_type, std::vector<std::vector<datum,A>>>
590  response_data;
591  std::vector<Request> response_reqs;
592 
593  // We'll grab a tag so we can overlap request sends and receives
594  // without confusing one for the other
595  MessageTag tag = comm.get_unique_tag(105);
596 
597  auto gather_functor =
598  [&comm, &gather_data, &act_on_data,
599  &response_data, &response_reqs, &tag]
600  (processor_id_type pid, query_type query)
601  {
602  gather_data(pid, query, response_data[pid]);
603  libmesh_assert_equal_to(query.size(),
604  response_data[pid].size());
605 
606  // Just act on data if the user requested a send-to-self
607  if (pid == comm.rank())
608  {
609  act_on_data(pid, query, response_data[pid]);
610  }
611  else
612  {
613  Request sendreq;
614  comm.send(pid, response_data[pid], sendreq, tag);
615  response_reqs.push_back(sendreq);
616  }
617  };
618 
619  push_parallel_vector_data (comm, queries, reqs, gather_functor);
620 
621  // Every outgoing query should now have an incoming response.
622  //
623  // Post all of the receives.
624  //
625  // Use blocking API here since we can't use the pre-sized
626  // non-blocking APIs with this data type.
627  //
628  // FIXME - implement Derek's API from #1684, switch to that!
629  std::vector<Request> receive_reqs;
630  std::vector<processor_id_type> receive_procids;
631  for (std::size_t i = 0,
632  n_queries = queries.size() - queries.count(comm.rank());
633  i != n_queries; ++i)
634  {
635  Status stat(comm.probe(any_source, tag));
636  const processor_id_type
637  proc_id = cast_int<processor_id_type>(stat.source());
638 
639  std::vector<std::vector<datum,A>> received_data;
640  comm.receive(proc_id, received_data, tag);
641 
642  libmesh_assert(queries.count(proc_id));
643  auto & querydata = queries.at(proc_id);
644  libmesh_assert_equal_to(querydata.size(), received_data.size());
645  act_on_data(proc_id, querydata, received_data);
646  }
647 
648  wait(response_reqs);
649 }
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: request.h:129
const unsigned int any_source
Processor id meaning "Accept from any source".
Definition: communicator.h:70
uint8_t processor_id_type
Definition: id_types.h:99
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)

◆ push_parallel_vector_data() [1/6]

template<typename MapToVectors , typename RequestContainer , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapToVectors &  data,
RequestContainer &  reqs,
ActionFunctor &  act_on_data 
)

Send and receive and act on vectors of data.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of data to send.

Data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<datum> & data)

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All receives and actions are completed before this function returns.

Not all sends may have yet completed. The supplied container of Request objects, req, has more requests inserted, one for each of the data sends. These requests must be waited on before the data map is deleted.

Definition at line 239 of file parallel_sync.h.

References libMesh::Parallel::Communicator::alltoall(), data, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), libMesh::Parallel::Communicator::size(), and waitany().

Referenced by pull_parallel_vector_data(), and push_parallel_vector_data().

243 {
244  // This function must be run on all processors at once
245  libmesh_parallel_only(comm);
246 
247  processor_id_type num_procs = comm.size();
248 
249  // Size of vectors to send to each procesor
250  std::vector<std::size_t> will_send_to(num_procs, 0);
251  processor_id_type num_sends = 0;
252  for (auto & datapair : data)
253  {
254  // Don't try to send anywhere that doesn't exist
255  libmesh_assert_less(datapair.first, num_procs);
256 
257  // Don't give us empty vectors to send
258  libmesh_assert_greater(datapair.second.size(), 0);
259 
260  will_send_to[datapair.first] = datapair.second.size();
261  num_sends++;
262  }
263 
264  // Tell everyone about where everyone will send to
265  comm.alltoall(will_send_to);
266 
267  // will_send_to now represents who we'll receive from
268  // give it a good name
269  auto & will_receive_from = will_send_to;
270 
271  // This function only works for "flat" data that we can pre-size
272  // receive buffers for: a map to vectors-of-standard-types, not e.g.
273  // vectors-of-vectors.
274  //
275  // Trying to instantiate a StandardType<T> gives us a compiler error
276  // where otherwise we would have had a runtime error.
277  //
278  // Creating a StandardType<T> manually also saves our APIs from
279  // having to do a bunch of automatic creations later.
280  //
281  // This object will be free'd before all non-blocking communications
282  // complete, but the MPI standard for MPI_Type_free specifies "Any
283  // communication that is currently using this datatype will
284  // complete normally." so we're cool.
285  typedef decltype(data.begin()->second.front()) ref_type;
286  typedef typename std::remove_reference<ref_type>::type nonref_type;
287  StandardType<typename std::remove_const<nonref_type>::type> datatype;
288 
289  // We'll grab a tag so we can overlap request sends and receives
290  // without confusing one for the other
291  MessageTag tag = comm.get_unique_tag(1225);
292 
293  MapToVectors received_data;
294 
295  // Post all of the sends, non-blocking
296  for (auto & datapair : data)
297  {
298  processor_id_type destid = datapair.first;
299  libmesh_assert_less(destid, num_procs);
300  auto & datum = datapair.second;
301 
302  // Just act on data if the user requested a send-to-self
303  if (destid == comm.rank())
304  act_on_data(destid, datum);
305  else
306  {
307  Request sendreq;
308  comm.send(destid, datum, datatype, sendreq, tag);
309  reqs.insert(reqs.end(), sendreq);
310  }
311  }
312 
313  // Post all of the receives, non-blocking
314  std::vector<Request> receive_reqs;
315  std::vector<processor_id_type> receive_procids;
316  for (processor_id_type proc_id = 0; proc_id < num_procs; proc_id++)
317  if (will_receive_from[proc_id] && proc_id != comm.rank())
318  {
319  Request req;
320  auto & incoming_data = received_data[proc_id];
321  incoming_data.resize(will_receive_from[proc_id]);
322  comm.receive(proc_id, incoming_data, datatype, req, tag);
323  receive_reqs.push_back(req);
324  receive_procids.push_back(proc_id);
325  }
326 
327  while(receive_reqs.size())
328  {
329  std::size_t completed = waitany(receive_reqs);
330  processor_id_type proc_id = receive_procids[completed];
331  receive_reqs.erase(receive_reqs.begin() + completed);
332  receive_procids.erase(receive_procids.begin() + completed);
333 
334  act_on_data(proc_id, received_data[proc_id]);
335  received_data.erase(proc_id);
336  }
337 }
uint8_t processor_id_type
Definition: id_types.h:99
std::size_t waitany(std::vector< Request > &r)
Wait for at least one non-blocking operation to finish.
IterBase * data
Ideally this private member data should have protected access.

◆ push_parallel_vector_data() [2/6]

template<typename MapToVectors , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapToVectors &  data,
ActionFunctor &  act_on_data 
)

Send and receive and act on vectors of data.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of data to send.

Data which is received from other processors will be operated on by act_on_data(processor_id_type pid, const std::vector<datum> & data);

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All communication and actions are complete when this function returns.

Definition at line 435 of file parallel_sync.h.

References data, push_parallel_vector_data(), and wait().

438 {
439  std::vector<Request> requests;
440 
441  push_parallel_vector_data(comm, data, requests, act_on_data);
442 
443  wait(requests);
444 }
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: request.h:129
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
IterBase * data
Ideally this private member data should have protected access.

◆ push_parallel_vector_data() [3/6]

template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
RequestContainer &  reqs,
ActionFunctor &  act_on_data 
)

Definition at line 348 of file parallel_sync.h.

References libMesh::Parallel::Communicator::alltoall(), any_source, data, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::probe(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and libMesh::Parallel::Communicator::size().

352 {
353  // This function must be run on all processors at once
354  libmesh_parallel_only(comm);
355 
356  processor_id_type num_procs = comm.size();
357 
358  // Size of vectors to send to each procesor
359  std::vector<std::size_t> will_send_to(num_procs, 0);
360  processor_id_type num_sends = 0;
361  for (auto & datapair : data)
362  {
363  // Don't try to send anywhere that doesn't exist
364  libmesh_assert_less(datapair.first, num_procs);
365 
366  // Don't give us empty vectors to send
367  libmesh_assert_greater(datapair.second.size(), 0);
368 
369  will_send_to[datapair.first] = datapair.second.size();
370  num_sends++;
371  }
372 
373  // Tell everyone about where everyone will send to
374  comm.alltoall(will_send_to);
375 
376  // will_send_to now represents who we'll receive from
377  // give it a good name
378  auto & will_receive_from = will_send_to;
379 
380  processor_id_type n_receives = 0;
381  for (processor_id_type proc_id = 0; proc_id < num_procs; proc_id++)
382  if (will_receive_from[proc_id])
383  n_receives++;
384 
385  // We'll construct a datatype once for repeated use
386  StandardType<ValueType> datatype;
387 
388  // We'll grab a tag so we can overlap request sends and receives
389  // without confusing one for the other
390  MessageTag tag = comm.get_unique_tag(1225);
391 
392  // Post all of the sends, non-blocking
393  for (auto & datapair : data)
394  {
395  processor_id_type destid = datapair.first;
396  libmesh_assert_less(destid, num_procs);
397  auto & datum = datapair.second;
398 
399  // Just act on data if the user requested a send-to-self
400  if (destid == comm.rank())
401  {
402  act_on_data(destid, datum);
403  n_receives--;
404  }
405  else
406  {
407  Request sendreq;
408  comm.send(destid, datum, datatype, sendreq, tag);
409  reqs.insert(reqs.end(), sendreq);
410  }
411  }
412 
413  // Post all of the receives.
414  //
415  // Use blocking API here since we can't use the pre-sized
416  // non-blocking APIs with this data type.
417  //
418  // FIXME - implement Derek's API from #1684, switch to that!
419  for (processor_id_type i = 0; i != n_receives; ++i)
420  {
421  Status stat(comm.probe(any_source, tag));
422  const processor_id_type
423  proc_id = cast_int<processor_id_type>(stat.source());
424 
425  std::vector<std::vector<ValueType,A1>,A2> received_data;
426  comm.receive(proc_id, received_data, datatype, tag);
427  act_on_data(proc_id, received_data);
428  }
429 }
const unsigned int any_source
Processor id meaning "Accept from any source".
Definition: communicator.h:70
Definition: assembly.h:98
uint8_t processor_id_type
Definition: id_types.h:99
IterBase * data
Ideally this private member data should have protected access.

◆ push_parallel_vector_data() [4/6]

template<template< typename, typename, typename ... > class MapType, typename KeyType , typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
ActionFunctor &  act_on_data 
)

Definition at line 454 of file parallel_sync.h.

References data, push_parallel_vector_data(), and wait().

457 {
458  std::vector<Request> requests;
459 
460  push_parallel_vector_data(comm, data, requests, act_on_data);
461 
462  wait(requests);
463 }
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: request.h:129
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
IterBase * data
Ideally this private member data should have protected access.

◆ push_parallel_vector_data() [5/6]

template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename RequestContainer , typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
RequestContainer &  reqs,
ActionFunctor &  act_on_data 
)

Definition at line 348 of file parallel_sync.h.

References libMesh::Parallel::Communicator::alltoall(), any_source, data, libMesh::Parallel::Communicator::get_unique_tag(), libMesh::Parallel::Communicator::probe(), libMesh::Parallel::Communicator::rank(), libMesh::Parallel::Communicator::receive(), libMesh::Parallel::Communicator::send(), and libMesh::Parallel::Communicator::size().

352 {
353  // This function must be run on all processors at once
354  libmesh_parallel_only(comm);
355 
356  processor_id_type num_procs = comm.size();
357 
358  // Size of vectors to send to each procesor
359  std::vector<std::size_t> will_send_to(num_procs, 0);
360  processor_id_type num_sends = 0;
361  for (auto & datapair : data)
362  {
363  // Don't try to send anywhere that doesn't exist
364  libmesh_assert_less(datapair.first, num_procs);
365 
366  // Don't give us empty vectors to send
367  libmesh_assert_greater(datapair.second.size(), 0);
368 
369  will_send_to[datapair.first] = datapair.second.size();
370  num_sends++;
371  }
372 
373  // Tell everyone about where everyone will send to
374  comm.alltoall(will_send_to);
375 
376  // will_send_to now represents who we'll receive from
377  // give it a good name
378  auto & will_receive_from = will_send_to;
379 
380  processor_id_type n_receives = 0;
381  for (processor_id_type proc_id = 0; proc_id < num_procs; proc_id++)
382  if (will_receive_from[proc_id])
383  n_receives++;
384 
385  // We'll construct a datatype once for repeated use
386  StandardType<ValueType> datatype;
387 
388  // We'll grab a tag so we can overlap request sends and receives
389  // without confusing one for the other
390  MessageTag tag = comm.get_unique_tag(1225);
391 
392  // Post all of the sends, non-blocking
393  for (auto & datapair : data)
394  {
395  processor_id_type destid = datapair.first;
396  libmesh_assert_less(destid, num_procs);
397  auto & datum = datapair.second;
398 
399  // Just act on data if the user requested a send-to-self
400  if (destid == comm.rank())
401  {
402  act_on_data(destid, datum);
403  n_receives--;
404  }
405  else
406  {
407  Request sendreq;
408  comm.send(destid, datum, datatype, sendreq, tag);
409  reqs.insert(reqs.end(), sendreq);
410  }
411  }
412 
413  // Post all of the receives.
414  //
415  // Use blocking API here since we can't use the pre-sized
416  // non-blocking APIs with this data type.
417  //
418  // FIXME - implement Derek's API from #1684, switch to that!
419  for (processor_id_type i = 0; i != n_receives; ++i)
420  {
421  Status stat(comm.probe(any_source, tag));
422  const processor_id_type
423  proc_id = cast_int<processor_id_type>(stat.source());
424 
425  std::vector<std::vector<ValueType,A1>,A2> received_data;
426  comm.receive(proc_id, received_data, datatype, tag);
427  act_on_data(proc_id, received_data);
428  }
429 }
const unsigned int any_source
Processor id meaning "Accept from any source".
Definition: communicator.h:70
Definition: assembly.h:98
uint8_t processor_id_type
Definition: id_types.h:99
IterBase * data
Ideally this private member data should have protected access.

◆ push_parallel_vector_data() [6/6]

template<template< typename, typename, typename ... > class MapType, typename ValueType , typename A1 , typename A2 , typename ... ExtraTypes, typename ActionFunctor >
void libMesh::Parallel::push_parallel_vector_data ( const Communicator comm,
const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &  data,
ActionFunctor &  act_on_data 
)

Definition at line 454 of file parallel_sync.h.

References data, push_parallel_vector_data(), and wait().

457 {
458  std::vector<Request> requests;
459 
460  push_parallel_vector_data(comm, data, requests, act_on_data);
461 
462  wait(requests);
463 }
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: request.h:129
void push_parallel_vector_data(const Communicator &comm, const MapType< processor_id_type, std::vector< std::vector< ValueType, A1 >, A2 >, ExtraTypes... > &data, ActionFunctor &act_on_data)
IterBase * data
Ideally this private member data should have protected access.

◆ sync_dofobject_data_by_id() [1/2]

template<typename Iterator , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_id ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
SyncFunctor &  sync 
)

Request data about a range of ghost dofobjects uniquely identified by their id.

Fulfill requests with sync.gather_data(const std::vector<dof_id_type> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<dof_id_type> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 336 of file parallel_ghost_sync.h.

340 {
341  sync_dofobject_data_by_id(comm, range_begin, range_end, SyncEverything(), sync);
342 }
void sync_dofobject_data_by_id(const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, const DofObjectCheckFunctor &dofobj_check, SyncFunctor &sync)
Request data about a range of ghost dofobjects uniquely identified by their id.

◆ sync_dofobject_data_by_id() [2/2]

template<typename Iterator , typename DofObjectCheckFunctor , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_id ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
const DofObjectCheckFunctor &  dofobj_check,
SyncFunctor &  sync 
)

Request data about a range of ghost dofobjects uniquely identified by their id.

Elements within the range can be excluded from the request by returning false from dofobj_check(dof_object)

Definition at line 347 of file parallel_ghost_sync.h.

References data, libMesh::DofObject::id(), libMesh::DofObject::invalid_processor_id, libMesh::DofObject::processor_id(), pull_parallel_vector_data(), libMesh::Parallel::Communicator::rank(), and libMesh::Parallel::Communicator::size().

352 {
353  // This function must be run on all processors at once
354  libmesh_parallel_only(comm);
355 
356  // Count the objects to ask each processor about
357  std::vector<dof_id_type>
358  ghost_objects_from_proc(comm.size(), 0);
359 
360  for (Iterator it = range_begin; it != range_end; ++it)
361  {
362  DofObject * obj = *it;
363  libmesh_assert (obj);
364 
365  // We may want to pass Elem* or Node* to the check function, not
366  // just DofObject*
367  if (!dofobj_check(*it))
368  continue;
369 
370  processor_id_type obj_procid = obj->processor_id();
371  if (obj_procid != DofObject::invalid_processor_id)
372  ghost_objects_from_proc[obj_procid]++;
373  }
374 
375  // Request sets to send to each processor
376  std::map<processor_id_type, std::vector<dof_id_type>>
377  requested_objs_id;
378 
379  // We know how many objects live on each processor, so reserve()
380  // space for each.
381  for (processor_id_type p=0; p != comm.size(); ++p)
382  if (p != comm.rank() && ghost_objects_from_proc[p])
383  requested_objs_id[p].reserve(ghost_objects_from_proc[p]);
384 
385  for (Iterator it = range_begin; it != range_end; ++it)
386  {
387  DofObject * obj = *it;
388 
389  if (!dofobj_check(*it))
390  continue;
391 
392  processor_id_type obj_procid = obj->processor_id();
393  if (obj_procid == comm.rank() ||
394  obj_procid == DofObject::invalid_processor_id)
395  continue;
396 
397  requested_objs_id[obj_procid].push_back(obj->id());
398  }
399 
400  auto gather_functor =
401  [&sync]
402  (processor_id_type, const std::vector<dof_id_type> & ids,
403  std::vector<typename SyncFunctor::datum> & data)
404  {
405  sync.gather_data(ids, data);
406  };
407 
408  auto action_functor =
409  [&sync]
410  (processor_id_type, const std::vector<dof_id_type> & ids,
411  const std::vector<typename SyncFunctor::datum> & data)
412  {
413  // Let the user process the results
414  sync.act_on_data(ids, data);
415  };
416 
417  // Trade requests with other processors
418  typename SyncFunctor::datum * ex = nullptr;
420  (comm, requested_objs_id, gather_functor, action_functor, ex);
421 }
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
Send query vectors, receive and answer them with vectors of data, then act on those answers...
IterBase * data
Ideally this private member data should have protected access.

◆ sync_dofobject_data_by_xyz() [1/2]

template<typename Iterator , typename DofObjType , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_xyz ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
LocationMap< DofObjType > *  location_map,
SyncFunctor &  sync 
)

Request data about a range of ghost nodes uniquely identified by their xyz location or a range of active ghost elements uniquely identified by their centroids' xyz location.

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type. The user-provided location_map will be used and left unchanged if it is provided, or filled and cleared if it is empty.

◆ sync_dofobject_data_by_xyz() [2/2]

template<typename Iterator , typename DofObjType , typename SyncFunctor >
void libMesh::Parallel::sync_dofobject_data_by_xyz ( const Communicator comm,
const Iterator &  range_begin,
const Iterator &  range_end,
LocationMap< DofObjType > &  location_map,
SyncFunctor &  sync 
)

Definition at line 232 of file parallel_ghost_sync.h.

References data, libMesh::LocationMap< T >::empty(), libMesh::LocationMap< T >::find(), libMesh::DofObject::invalid_processor_id, libMesh::Parallel::Communicator::max(), libMesh::LocationMap< T >::point_of(), pull_parallel_vector_data(), libMesh::Parallel::Communicator::rank(), and libMesh::Parallel::Communicator::size().

237 {
238  // This function must be run on all processors at once
239  libmesh_parallel_only(comm);
240 
241  // We need a valid location_map
242 #ifdef DEBUG
243  bool need_map_update = (range_begin != range_end && location_map.empty());
244  comm.max(need_map_update);
245  libmesh_assert(!need_map_update);
246 #endif
247 
248  // Count the objects to ask each processor about
249  std::vector<dof_id_type>
250  ghost_objects_from_proc(comm.size(), 0);
251 
252  for (Iterator it = range_begin; it != range_end; ++it)
253  {
254  DofObjType * obj = *it;
255  libmesh_assert (obj);
256  processor_id_type obj_procid = obj->processor_id();
257  if (obj_procid != DofObject::invalid_processor_id)
258  ghost_objects_from_proc[obj_procid]++;
259  }
260 
261  // Request sets to send to each processor
262  std::map<processor_id_type, std::vector<Point>>
263  requested_objs_pt;
264  // Corresponding ids to keep track of
265  std::map<processor_id_type, std::vector<dof_id_type>>
266  requested_objs_id;
267 
268  // We know how many objects live on each processor, so reserve()
269  // space for each.
270  for (processor_id_type p=0; p != comm.size(); ++p)
271  if (p != comm.rank() && ghost_objects_from_proc[p])
272  {
273  requested_objs_pt[p].reserve(ghost_objects_from_proc[p]);
274  requested_objs_id[p].reserve(ghost_objects_from_proc[p]);
275  }
276 
277  for (Iterator it = range_begin; it != range_end; ++it)
278  {
279  DofObjType * obj = *it;
280  processor_id_type obj_procid = obj->processor_id();
281  if (obj_procid == comm.rank() ||
282  obj_procid == DofObject::invalid_processor_id)
283  continue;
284 
285  Point p = location_map.point_of(*obj);
286  requested_objs_pt[obj_procid].push_back(p);
287  requested_objs_id[obj_procid].push_back(obj->id());
288  }
289 
290  auto gather_functor =
291  [&location_map, &sync]
292  (processor_id_type /*pid*/, const std::vector<Point> & pts,
293  std::vector<typename SyncFunctor::datum> & data)
294  {
295  // Find the local id of each requested object
296  std::size_t query_size = pts.size();
297  std::vector<dof_id_type> query_id(query_size);
298  for (std::size_t i=0; i != query_size; ++i)
299  {
300  Point pt = pts[i];
301 
302  // Look for this object in the multimap
303  DofObjType * obj = location_map.find(pt);
304 
305  // We'd better find every object we're asked for
306  libmesh_assert (obj);
307 
308  // Return the object's correct processor id,
309  // and our (correct if it's local) id for it.
310  query_id[i] = obj->id();
311  }
312 
313  // Gather whatever data the user wants
314  sync.gather_data(query_id, data);
315  };
316 
317  auto action_functor =
318  [&sync, &requested_objs_id]
319  (processor_id_type pid, const std::vector<Point> &,
320  const std::vector<typename SyncFunctor::datum> & data)
321  {
322  // Let the user process the results
323  sync.act_on_data(requested_objs_id[pid], data);
324  };
325 
326  // Trade requests with other processors
327  typename SyncFunctor::datum * ex = nullptr;
329  (comm, requested_objs_pt, gather_functor, action_functor, ex);
330 }
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
Send query vectors, receive and answer them with vectors of data, then act on those answers...
IterBase * data
Ideally this private member data should have protected access.

◆ sync_element_data_by_parent_id()

template<typename Iterator , typename SyncFunctor >
void libMesh::Parallel::sync_element_data_by_parent_id ( MeshBase mesh,
const Iterator &  range_begin,
const Iterator &  range_end,
SyncFunctor &  sync 
)

Request data about a range of ghost elements uniquely identified by their parent id and which child they are.

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 429 of file parallel_ghost_sync.h.

References libMesh::Elem::active(), libMesh::Elem::child_ptr(), libMesh::ParallelObject::comm(), data, libMesh::MeshBase::elem_ref(), libMesh::Elem::has_children(), libMesh::DofObject::id(), libMesh::DofObject::invalid_processor_id, libMesh::Elem::parent(), libMesh::DofObject::processor_id(), pull_parallel_vector_data(), and libMesh::Elem::which_child_am_i().

433 {
434  const Communicator & comm (mesh.comm());
435 
436  // This function must be run on all processors at once
437  libmesh_parallel_only(comm);
438 
439  // Count the objects to ask each processor about
440  std::vector<dof_id_type>
441  ghost_objects_from_proc(comm.size(), 0);
442 
443  for (Iterator it = range_begin; it != range_end; ++it)
444  {
445  Elem * elem = *it;
446  processor_id_type obj_procid = elem->processor_id();
447  if (obj_procid == comm.rank() ||
448  obj_procid == DofObject::invalid_processor_id)
449  continue;
450  const Elem * parent = elem->parent();
451  if (!parent || !elem->active())
452  continue;
453 
454  ghost_objects_from_proc[obj_procid]++;
455  }
456 
457  // Request sets to send to each processor
458  std::map<processor_id_type, std::vector<dof_id_type>>
459  requested_objs_id;
460  std::map<processor_id_type, std::vector<std::pair<dof_id_type,unsigned char>>>
461  requested_objs_parent_id_child_num;
462 
463  // We know how many objects live on each processor, so reserve()
464  // space for each.
465  for (processor_id_type p=0; p != comm.size(); ++p)
466  if (p != comm.rank() && ghost_objects_from_proc[p])
467  {
468  requested_objs_id[p].reserve(ghost_objects_from_proc[p]);
469  requested_objs_parent_id_child_num[p].reserve(ghost_objects_from_proc[p]);
470  }
471 
472  for (Iterator it = range_begin; it != range_end; ++it)
473  {
474  Elem * elem = *it;
475  processor_id_type obj_procid = elem->processor_id();
476  if (obj_procid == comm.rank() ||
477  obj_procid == DofObject::invalid_processor_id)
478  continue;
479  const Elem * parent = elem->parent();
480  if (!parent || !elem->active())
481  continue;
482 
483  requested_objs_id[obj_procid].push_back(elem->id());
484  requested_objs_parent_id_child_num[obj_procid].push_back
485  (std::make_pair
486  (parent->id(),
487  cast_int<unsigned char>
488  (parent->which_child_am_i(elem))));
489  }
490 
491  auto gather_functor =
492  [&mesh, &sync]
494  const std::vector<std::pair<dof_id_type, unsigned char>> & parent_id_child_num,
495  std::vector<typename SyncFunctor::datum> & data)
496  {
497  // Find the id of each requested element
498  std::size_t query_size = parent_id_child_num.size();
499  std::vector<dof_id_type> query_id(query_size);
500  for (std::size_t i=0; i != query_size; ++i)
501  {
502  Elem & parent = mesh.elem_ref(parent_id_child_num[i].first);
503  libmesh_assert(parent.has_children());
504  Elem * child = parent.child_ptr(parent_id_child_num[i].second);
505  libmesh_assert(child);
506  libmesh_assert(child->active());
507  query_id[i] = child->id();
508  }
509 
510  // Gather whatever data the user wants
511  sync.gather_data(query_id, data);
512  };
513 
514  auto action_functor =
515  [&sync, &requested_objs_id]
516  (processor_id_type pid,
517  const std::vector<std::pair<dof_id_type, unsigned char>> &,
518  const std::vector<typename SyncFunctor::datum> & data)
519  {
520  // Let the user process the results
521  sync.act_on_data(requested_objs_id[pid], data);
522  };
523 
524  // Trade requests with other processors
525  typename SyncFunctor::datum * ex = nullptr;
527  (comm, requested_objs_parent_id_child_num, gather_functor,
528  action_functor, ex);
529 }
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
Send query vectors, receive and answer them with vectors of data, then act on those answers...
IterBase * data
Ideally this private member data should have protected access.

◆ sync_node_data_by_element_id()

template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
void libMesh::Parallel::sync_node_data_by_element_id ( MeshBase mesh,
const MeshBase::const_element_iterator range_begin,
const MeshBase::const_element_iterator range_end,
const ElemCheckFunctor &  elem_check,
const NodeCheckFunctor &  node_check,
SyncFunctor &  sync 
)

Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, iterating until data is completely in sync and futher synchronization passes cause no changes.

Imagine a vertex surrounded by triangles, each on a different processor, with a ghosting policy that include only face neighbors and not point neighbors. Then the only way for authoritative information to trickle out from that vertex is by being passed along, one neighbor at a time, to processors who mostly don't even see the node's true owner!

Data for all nodes connected to elements in the given range of element iterators will be requested.

Elements can be further excluded from the request by returning false from element_check(elem)

Nodes can be further excluded from the request by returning false from node_check(elem, local_node_num)

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with bool sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) and return true iff the response changed any data.

The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

Definition at line 690 of file parallel_ghost_sync.h.

References libMesh::ParallelObject::comm(), and sync_node_data_by_element_id_once().

696 {
697  // This function must be run on all processors at once
698  libmesh_parallel_only(mesh.comm());
699 
700  bool need_sync = false;
701 
702  do
703  {
704  need_sync =
706  (mesh, range_begin, range_end, elem_check, node_check,
707  sync);
708  } while (need_sync);
709 }
bool sync_node_data_by_element_id_once(MeshBase &mesh, const MeshBase::const_element_iterator &range_begin, const MeshBase::const_element_iterator &range_end, const ElemCheckFunctor &elem_check, const NodeCheckFunctor &node_check, SyncFunctor &sync)
Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id...

◆ sync_node_data_by_element_id_once()

template<typename ElemCheckFunctor , typename NodeCheckFunctor , typename SyncFunctor >
bool libMesh::Parallel::sync_node_data_by_element_id_once ( MeshBase mesh,
const MeshBase::const_element_iterator range_begin,
const MeshBase::const_element_iterator range_end,
const ElemCheckFunctor &  elem_check,
const NodeCheckFunctor &  node_check,
SyncFunctor &  sync 
)

Synchronize data about a range of ghost nodes uniquely identified by an element id and local node id, assuming a single synchronization pass is necessary.

Data for all nodes connected to elements in the given range of element iterators will be requested.

Elements can be further excluded from the request by returning false from element_check(elem)

Nodes can be further excluded from the request by returning false from node_check(elem, local_node_num)

Fulfill requests with sync.gather_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data), by resizing and setting the values of the data vector. Respond to fulfillment with bool sync.act_on_data(const std::vector<unsigned int> & ids, std::vector<sync::datum> & data) and return true iff the response changed any data.

The user must define Parallel::StandardType<sync::datum> if sync::datum isn't a built-in type.

This method returns true iff the sync pass changed any data on any processor.

Definition at line 546 of file parallel_ghost_sync.h.

References libMesh::as_range(), libMesh::ParallelObject::comm(), data, libMesh::MeshBase::elem_ref(), libMesh::DofObject::id(), libMesh::DofObject::invalid_processor_id, libMesh::Elem::n_nodes(), libMesh::Elem::node_ref(), and pull_parallel_vector_data().

Referenced by sync_node_data_by_element_id().

552 {
553  const Communicator & comm (mesh.comm());
554 
555  // Count the objects to ask each processor about
556  std::vector<dof_id_type>
557  ghost_objects_from_proc(comm.size(), 0);
558 
559  for (const auto & elem : as_range(range_begin, range_end))
560  {
561  libmesh_assert (elem);
562 
563  if (!elem_check(elem))
564  continue;
565 
566  const processor_id_type proc_id = elem->processor_id();
567  if (proc_id == comm.rank() ||
568  proc_id == DofObject::invalid_processor_id)
569  continue;
570 
571  for (auto n : elem->node_index_range())
572  {
573  if (!node_check(elem, n))
574  continue;
575 
576  ghost_objects_from_proc[proc_id]++;
577  }
578  }
579 
580  // Now repeat that iteration, filling request sets this time.
581 
582  // Request sets to send to each processor
583  std::map<processor_id_type, std::vector<std::pair<dof_id_type, unsigned char>>>
584  requested_objs_elem_id_node_num;
585 
586  // Keep track of current local ids for each too
587  std::map<processor_id_type, std::vector<dof_id_type>>
588  requested_objs_id;
589 
590  // We know how many objects live on each processor, so reserve()
591  // space for each.
592  for (processor_id_type p=0; p != comm.size(); ++p)
593  if (p != comm.rank() && ghost_objects_from_proc[p])
594  {
595  requested_objs_elem_id_node_num[p].reserve(ghost_objects_from_proc[p]);
596  requested_objs_id[p].reserve(ghost_objects_from_proc[p]);
597  }
598 
599  for (const auto & elem : as_range(range_begin, range_end))
600  {
601  libmesh_assert (elem);
602 
603  if (!elem_check(elem))
604  continue;
605 
606  const processor_id_type proc_id = elem->processor_id();
607  if (proc_id == comm.rank() ||
608  proc_id == DofObject::invalid_processor_id)
609  continue;
610 
611  const dof_id_type elem_id = elem->id();
612 
613  for (auto n : elem->node_index_range())
614  {
615  if (!node_check(elem, n))
616  continue;
617 
618  const Node & node = elem->node_ref(n);
619  const dof_id_type node_id = node.id();
620 
621  requested_objs_elem_id_node_num[proc_id].push_back
622  (std::make_pair
623  (elem_id,
624  cast_int<unsigned char>(n)));
625  requested_objs_id[proc_id].push_back(node_id);
626  }
627  }
628 
629  auto gather_functor =
630  [&mesh, &sync]
632  const std::vector<std::pair<dof_id_type, unsigned char>> & elem_id_node_num,
633  std::vector<typename SyncFunctor::datum> & data)
634  {
635  // Find the id of each requested element
636  std::size_t request_size = elem_id_node_num.size();
637  std::vector<dof_id_type> query_id(request_size);
638  for (std::size_t i=0; i != request_size; ++i)
639  {
640  const Elem & elem = mesh.elem_ref(elem_id_node_num[i].first);
641 
642  const unsigned int n = elem_id_node_num[i].second;
643  libmesh_assert_less (n, elem.n_nodes());
644 
645  const Node & node = elem.node_ref(n);
646 
647  // This isn't a safe assertion in the case where we're
648  // syncing processor ids
649  // libmesh_assert_equal_to (node->processor_id(), comm.rank());
650 
651  query_id[i] = node.id();
652  }
653 
654  // Gather whatever data the user wants
655  sync.gather_data(query_id, data);
656  };
657 
658  bool data_changed = false;
659 
660  auto action_functor =
661  [&sync, &requested_objs_id, &data_changed]
662  (processor_id_type pid,
663  const std::vector<std::pair<dof_id_type, unsigned char>> &,
664  const std::vector<typename SyncFunctor::datum> & data)
665  {
666  // Let the user process the results. If any of the results
667  // were different than what the user expected, then we may
668  // need to sync again just in case this processor has to
669  // pass on the changes to yet another processor.
670  if (sync.act_on_data(requested_objs_id[pid], data))
671  data_changed = true;
672  };
673 
674  // Trade requests with other processors
675  typename SyncFunctor::datum * ex = nullptr;
677  (comm, requested_objs_elem_id_node_num, gather_functor,
678  action_functor, ex);
679 
680  comm.max(data_changed);
681 
682  return data_changed;
683 }
uint8_t processor_id_type
Definition: id_types.h:99
void pull_parallel_vector_data(const Communicator &comm, const MapToVectors &queries, RequestContainer &reqs, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *example)
Send query vectors, receive and answer them with vectors of data, then act on those answers...
SimpleRange< I > as_range(const std::pair< I, I > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
IterBase * data
Ideally this private member data should have protected access.
uint8_t dof_id_type
Definition: id_types.h:64

◆ unpack_range() [1/2]

template<typename Context , typename buffertype , typename OutputIter , typename T >
void libMesh::Parallel::unpack_range ( const typename std::vector< buffertype > &  buffer,
Context *  context,
OutputIter  out,
const T *  output_type 
)

◆ unpack_range() [2/2]

template<typename Context , typename buffertype , typename OutputIter , typename T >
void libMesh::Parallel::unpack_range ( const std::vector< buffertype > &  buffer,
Context *  context,
OutputIter  out_iter,
const T *   
)

Helper function for range unpacking.

Definition at line 194 of file packing.h.

References libMesh::Parallel::Packing< T >::packed_size(), and libMesh::Parallel::Packing< T >::unpack().

198 {
199  // Loop through the buffer and unpack each object, returning the
200  // object pointer via the output iterator
201  typename std::vector<buffertype>::const_iterator
202  next_object_start = buffer.begin();
203 
204  while (next_object_start < buffer.end())
205  {
206  *out_iter++ = Parallel::Packing<T>::unpack(next_object_start, context);
207  next_object_start +=
208  Parallel::Packing<T>::packed_size(next_object_start);
209  }
210 
211  // We should have used up the exact amount of data in the buffer
212  libmesh_assert (next_object_start == buffer.end());
213 }

◆ wait() [1/2]

Status libMesh::Parallel::wait ( Request r)

Wait for a non-blocking send or receive to finish.

Definition at line 129 of file request.h.

References libMesh::Parallel::Request::wait().

Referenced by pull_parallel_vector_data(), and push_parallel_vector_data().

129 { return r.wait(); }

◆ wait() [2/2]

void libMesh::Parallel::wait ( std::vector< Request > &  r)

Wait for all non-blocking operations to finish.

◆ waitany()

std::size_t libMesh::Parallel::waitany ( std::vector< Request > &  r)

Wait for at least one non-blocking operation to finish.

Return the index of the request which completed.

Referenced by pull_parallel_vector_data(), and push_parallel_vector_data().

Variable Documentation

◆ any_source

const unsigned int libMesh::Parallel::any_source
Initial value:
=
static_cast<unsigned int>(MPI_ANY_SOURCE)

Processor id meaning "Accept from any source".

Definition at line 70 of file communicator.h.

Referenced by pull_parallel_vector_data(), and push_parallel_vector_data().

◆ any_tag

const MessageTag libMesh::Parallel::any_tag = MessageTag(MPI_ANY_TAG)

Default message tag ids.

Definition at line 115 of file message_tag.h.

◆ no_tag

const MessageTag libMesh::Parallel::no_tag = MessageTag(0)

Definition at line 120 of file message_tag.h.