Loading [MathJax]/extensions/tex2jax.js
TIMPI
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends
Namespaces | Classes | Typedefs | Functions | Variables
TIMPI Namespace Reference

Namespaces

 detail
 

Classes

struct  Attributes
 
struct  BuildStandardTypeVector
 
struct  BuildStandardTypeVector< 0 >
 
struct  casting_compare
 
struct  CheckAllFixedTypes
 
struct  CheckAllFixedTypes< Head >
 
class  Communicator
 Encapsulates the MPI_Comm object. More...
 
struct  data_type
 
class  DataPlusInt
 Types combined with an int. More...
 
class  DataType
 Encapsulates the MPI_Datatype. More...
 
struct  FillDisplacementArray
 
struct  FillDisplacementArray< 0 >
 
struct  InnermostType
 
struct  InnermostType< std::list< T, A > >
 
struct  InnermostType< std::map< K, T, C, A > >
 
struct  InnermostType< std::multimap< K, T, C, A > >
 
struct  InnermostType< std::multiset< T, C, A > >
 
struct  InnermostType< std::set< T, C, A > >
 
struct  InnermostType< std::unordered_map< K, T, C, A > >
 
struct  InnermostType< std::unordered_multimap< K, T, C, A > >
 
struct  InnermostType< std::unordered_multiset< T, C, A > >
 
struct  InnermostType< std::unordered_set< T, C, A > >
 
struct  InnermostType< std::vector< T, A > >
 
class  ManageOp
 
class  ManageType
 
struct  MaybeADataType
 
struct  MaybeADataType< false >
 
class  MessageTag
 Encapsulates the MPI tag integers. More...
 
class  NotADataType
 StandardType<T>'s which do not define a way to MPI_Type T should inherit from this class. More...
 
class  OpFunction
 Templated class to provide the appropriate MPI reduction operations for use with built-in C types or simple C++ constructions. More...
 
class  OpFunction< std::pair< T, U > >
 
class  OpFunction< TIMPI_DEFAULT_SCALAR_TYPE >
 
struct  PostWaitCopyBuffer
 
struct  PostWaitDeleteBuffer
 
struct  PostWaitDereferenceSharedPtr
 
struct  PostWaitDereferenceTag
 
struct  PostWaitFreeBuffer
 
struct  PostWaitUnpackBuffer
 
struct  PostWaitUnpackNestedBuffer
 
struct  PostWaitWork
 An abstract base class that can be subclassed to allow other code to perform work after a MPI_Wait succeeds. More...
 
class  Request
 Encapsulates the MPI_Request. More...
 
class  SemiPermanent
 The SemiPermanent "class" is basically just a place for a destructor vtable. More...
 
class  StandardType
 Templated class to provide the appropriate MPI datatype for use with built-in C types or simple C++ constructions. More...
 
class  StandardType< std::array< T, N >, typename std::enable_if< StandardType< T >::is_fixed_type >::type >
 
class  StandardType< std::complex< T > >
 
class  StandardType< std::pair< T1, T2 >, typename std::enable_if< StandardType< typename std::remove_const< T1 >::type >::is_fixed_type &&StandardType< T2 >::is_fixed_type >::type >
 
class  StandardType< std::set< T > >
 
class  StandardType< std::tuple< Types... >, typename std::enable_if< CheckAllFixedTypes< Types... >::is_fixed_type >::type >
 
class  StandardType< TIMPI_DEFAULT_SCALAR_TYPE >
 
struct  standardtype_dependent_false
 
class  Status
 Encapsulates the MPI_Status struct. More...
 
struct  status
 
class  TIMPIInit
 The TIMPIInit class, when constructed, initializes any dependent libraries (e.g. More...
 

Typedefs

typedef uint8_t processor_id_type
 
typedef MPI_Info info
 Info object used by some MPI-3 methods. More...
 
typedef MPI_Datatype data_type
 Data types for communication. More...
 
typedef MPI_Aint DispType
 
typedef MPI_Request request
 Request object for non-blocking I/O. More...
 
typedef MPI_Status status
 Status object for querying messages. More...
 
typedef MPI_Count CountType
 

Functions

template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, const ActionFunctor &act_on_data, const datum *example)
 Send query vectors, receive and answer them with vectors of data, then act on those answers. More...
 
template<typename datum , typename A , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, ActionFunctor &act_on_data, const std::vector< datum, A > *example)
 
template<typename MapToContainers , typename ActionFunctor , typename Context >
void push_parallel_packed_range (const Communicator &comm, MapToContainers &&data, Context *context, const ActionFunctor &act_on_data)
 
template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void pull_parallel_vector_data (const Communicator &comm, const MapToVectors &queries, GatherFunctor &gather_data, ActionFunctor &act_on_data, const datum *)
 
 TIMPI_INT_TYPE (char)
 
 TIMPI_INT_TYPE (signed char)
 
 TIMPI_INT_TYPE (unsigned char)
 
 TIMPI_INT_TYPE (short int)
 
 TIMPI_INT_TYPE (unsigned short int)
 
 TIMPI_INT_TYPE (int)
 
 TIMPI_INT_TYPE (long)
 
 TIMPI_INT_TYPE (unsigned long long)
 
 TIMPI_FLOAT_TYPE (float)
 
 TIMPI_FLOAT_TYPE (double)
 
 TIMPI_FLOAT_TYPE (long double)
 
 TIMPI_FLOAT_TYPE (TIMPI_DEFAULT_SCALAR_TYPE)
 
template<typename T , typename C , typename A >
 TIMPI_CONTAINER_TYPE (std::set< T TIMPI_ATTRIBUTES_COMMA C TIMPI_ATTRIBUTES_COMMA A >)
 
template<typename T , typename A >
 TIMPI_CONTAINER_TYPE (std::vector< T TIMPI_ATTRIBUTES_COMMA A >)
 
 TIMPI_PARALLEL_INTEGER_OPS (char)
 
 TIMPI_PARALLEL_INTEGER_OPS (signed char)
 
 TIMPI_PARALLEL_INTEGER_OPS (unsigned char)
 
 TIMPI_PARALLEL_INTEGER_OPS (short int)
 
 TIMPI_PARALLEL_INTEGER_OPS (unsigned short int)
 
 TIMPI_PARALLEL_INTEGER_OPS (int)
 
 TIMPI_PARALLEL_INTEGER_OPS (long)
 
 TIMPI_PARALLEL_INTEGER_OPS (unsigned long long)
 
 TIMPI_PARALLEL_FLOAT_OPS (float)
 
 TIMPI_PARALLEL_FLOAT_OPS (double)
 
 TIMPI_PARALLEL_FLOAT_OPS (long double)
 
 TIMPI_PARALLEL_FLOAT_OPS (TIMPI_DEFAULT_SCALAR_TYPE)
 
template<typename Context , typename Iter >
std::size_t packed_range_size (const Context *context, Iter range_begin, const Iter range_end)
 Helper function for range packing. More...
 
template<typename Context , typename buffertype , typename Iter >
Iter pack_range (const Context *context, Iter range_begin, const Iter range_end, std::vector< buffertype > &buffer, std::size_t approx_buffer_size)
 Helper function for range packing. More...
 
template<typename Context , typename buffertype , typename OutputIter , typename T >
OutputIter unpack_range (const std::vector< buffertype > &buffer, Context *context, OutputIter out_iter, const T *)
 Helper function for range unpacking. More...
 
template<typename Context , typename buffertype , typename OutputIter , typename T >
OutputIter unpack_range (const typename std::vector< buffertype > &buffer, Context *context, OutputIter out_iter, const T *output_type)
 Decode a range of potentially-variable-size objects from a data array. More...
 
template<typename T >
data_type dataplusint_type ()
 Templated function to return the appropriate MPI datatype for use with built-in C types when combined with an int, or MPI_DATATYPE_NULL for types which have no predefined datatype. More...
 
template<>
data_type dataplusint_type< short int > ()
 
template<>
data_type dataplusint_type< int > ()
 
template<>
data_type dataplusint_type< long > ()
 
template<>
data_type dataplusint_type< float > ()
 
template<>
data_type dataplusint_type< double > ()
 
template<>
data_type dataplusint_type< long double > ()
 
template<>
data_type dataplusint_type< TIMPI_DEFAULT_SCALAR_TYPE > ()
 
template<typename T >
std::pair< data_type, std::unique_ptr< StandardType< std::pair< T, int > > > > dataplusint_type_acquire ()
 
Status wait (Request &r)
 Wait for a non-blocking send or receive to finish. More...
 
void wait (std::vector< Request > &r)
 Wait for all non-blocking operations to finish. More...
 
std::size_t waitany (std::vector< Request > &r)
 Wait for at least one non-blocking operation to finish. More...
 
template<typename T >
StandardType< T > build_standard_type (const T *example=nullptr)
 
template<typename T , typename A >
StandardType< typename InnermostType< T >::type > build_standard_type (const std::vector< T, A > *example=nullptr)
 
 TIMPI_STANDARD_TYPE (char, MPI_CHAR)
 
 TIMPI_STANDARD_TYPE (signed char, MPI_SIGNED_CHAR)
 
 TIMPI_STANDARD_TYPE (unsigned char, MPI_UNSIGNED_CHAR)
 
 TIMPI_STANDARD_TYPE (short int, MPI_SHORT)
 
 TIMPI_STANDARD_TYPE (unsigned short int, MPI_UNSIGNED_SHORT)
 
 TIMPI_STANDARD_TYPE (int, MPI_INT)
 
 TIMPI_STANDARD_TYPE (unsigned int, MPI_UNSIGNED)
 
 TIMPI_STANDARD_TYPE (long, MPI_LONG)
 
 TIMPI_STANDARD_TYPE (long long, MPI_LONG_LONG_INT)
 
 TIMPI_STANDARD_TYPE (unsigned long, MPI_UNSIGNED_LONG)
 
 TIMPI_STANDARD_TYPE (unsigned long long, MPI_UNSIGNED_LONG_LONG)
 
 TIMPI_STANDARD_TYPE (float, MPI_FLOAT)
 
 TIMPI_STANDARD_TYPE (double, MPI_DOUBLE)
 
 TIMPI_STANDARD_TYPE (long double, MPI_LONG_DOUBLE)
 
 TIMPI_STANDARD_TYPE (TIMPI_DEFAULT_SCALAR_TYPE,)
 
void report_here (const char *file, int line, const char *date, const char *time)
 
void report_error (const char *file, int line, const char *date, const char *time)
 
template<class ... Args>
void ignore (const Args &...)
 
template<class ... Args>
void timpi_ignore (const Args &...)
 
template<typename Tnew , typename Told >
Tnew cast_int (Told oldvar)
 
void timpi_version_stdout ()
 
std::string timpi_version_string ()
 
int get_timpi_version ()
 
template<typename MapToVectors , typename ActionFunctor , typename std::enable_if< std::is_base_of< DataType, StandardType< typename InnermostType< typename std::remove_const< typename std::remove_reference< MapToVectors >::type::mapped_type::value_type >::type >::type >>::value, int >::type = 0>
void push_parallel_vector_data (const Communicator &comm, MapToVectors &&data, const ActionFunctor &act_on_data)
 Send and receive and act on vectors of data. More...
 
template<typename MapToVectors , typename ActionFunctor , typename Context >
void push_parallel_packed_range (const Communicator &comm, MapToVectors &&data, Context *context, const ActionFunctor &act_on_data)
 Send and receive and act on vectors of data. More...
 

Variables

DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator
 Communicator object for talking with subsets of processors. More...
 
const unsigned int any_source
 Processor id meaning "Accept from any source". More...
 
const MessageTag any_tag = MessageTag(MPI_ANY_TAG)
 Default message tag ids. More...
 
const MessageTag no_tag = MessageTag(0)
 

Typedef Documentation

◆ CountType

typedef int TIMPI::CountType

Definition at line 47 of file status.h.

◆ data_type

typedef MPI_Datatype TIMPI::data_type

Data types for communication.

Definition at line 33 of file data_type.h.

◆ DispType

typedef int TIMPI::DispType

Definition at line 222 of file parallel_implementation.h.

◆ info

typedef int TIMPI::info

Info object used by some MPI-3 methods.

Definition at line 78 of file communicator.h.

◆ processor_id_type

typedef uint64_t TIMPI::processor_id_type

Definition at line 53 of file communicator.h.

◆ request

typedef unsigned int TIMPI::request

Request object for non-blocking I/O.

Definition at line 41 of file request.h.

◆ status

typedef MPI_Status TIMPI::status

Status object for querying messages.

Definition at line 44 of file status.h.

Function Documentation

◆ build_standard_type() [1/2]

template<typename T >
StandardType<T> TIMPI::build_standard_type ( const T *  example = nullptr)

Definition at line 181 of file standard_type.h.

Referenced by build_standard_type(), and push_parallel_vector_data().

182 {
183  StandardType<T> returnval(example);
184  return returnval;
185 }

◆ build_standard_type() [2/2]

template<typename T , typename A >
StandardType<typename InnermostType<T>::type> TIMPI::build_standard_type ( const std::vector< T, A > *  example = nullptr)

Definition at line 195 of file standard_type.h.

References build_standard_type().

196 {
197  const T * inner_example = (example && !example->empty()) ? &(*example)[0] : nullptr;
198  return build_standard_type(inner_example);
199 }
StandardType< typename InnermostType< T >::type > build_standard_type(const std::vector< T, A > *example=nullptr)

◆ cast_int()

template<typename Tnew , typename Told >
Tnew TIMPI::cast_int ( Told  oldvar)
inline

Definition at line 297 of file timpi_assert.h.

298 {
299  timpi_assert_equal_to
300  (oldvar, static_cast<Told>(static_cast<Tnew>(oldvar)));
301 
302  return(static_cast<Tnew>(oldvar));
303 }

◆ dataplusint_type()

template<typename T >
data_type TIMPI::dataplusint_type ( )
inline

Templated function to return the appropriate MPI datatype for use with built-in C types when combined with an int, or MPI_DATATYPE_NULL for types which have no predefined datatype.

◆ dataplusint_type< double >()

template<>
data_type TIMPI::dataplusint_type< double > ( )
inline

Definition at line 193 of file parallel_implementation.h.

193 { return MPI_DOUBLE_INT; }

◆ dataplusint_type< float >()

template<>
data_type TIMPI::dataplusint_type< float > ( )
inline

Definition at line 190 of file parallel_implementation.h.

190 { return MPI_FLOAT_INT; }

◆ dataplusint_type< int >()

template<>
data_type TIMPI::dataplusint_type< int > ( )
inline

Definition at line 184 of file parallel_implementation.h.

184 { return MPI_2INT; }

◆ dataplusint_type< long >()

template<>
data_type TIMPI::dataplusint_type< long > ( )
inline

Definition at line 187 of file parallel_implementation.h.

187 { return MPI_LONG_INT; }

◆ dataplusint_type< long double >()

template<>
data_type TIMPI::dataplusint_type< long double > ( )
inline

Definition at line 196 of file parallel_implementation.h.

196 { return MPI_LONG_DOUBLE_INT; }

◆ dataplusint_type< short int >()

template<>
data_type TIMPI::dataplusint_type< short int > ( )
inline

Definition at line 181 of file parallel_implementation.h.

181 { return MPI_SHORT_INT; }

◆ dataplusint_type< TIMPI_DEFAULT_SCALAR_TYPE >()

template<>
data_type TIMPI::dataplusint_type< TIMPI_DEFAULT_SCALAR_TYPE > ( )
inline

Definition at line 201 of file parallel_implementation.h.

201 { return MPI_DATATYPE_NULL; }

◆ dataplusint_type_acquire()

template<typename T >
std::pair<data_type, std::unique_ptr<StandardType<std::pair<T,int> > > > TIMPI::dataplusint_type_acquire ( )
inline

Definition at line 207 of file parallel_implementation.h.

208 {
209  std::pair<data_type, std::unique_ptr<StandardType<std::pair<T,int>>>> return_val;
210  return_val.first = dataplusint_type<T>();
211  if (return_val.first == MPI_DATATYPE_NULL)
212  {
213  return_val.second.reset(new StandardType<std::pair<T,int>>());
214  return_val.first = *return_val.second;
215  }
216  return return_val;
217 }

◆ get_timpi_version()

int TIMPI::get_timpi_version ( )

Definition at line 53 of file timpi_version.C.

Referenced by testVersionNumber(), and timpi_version_string().

54  {
55  /* Note: return format follows the versioning convention xx.yy.zz where
56 
57  xx = major version number
58  yy = minor version number
59  zz = micro version number
60 
61  For example:
62  v. 0.23 -> 002300 = 2300
63  v 0.23.1 -> 002301 = 2301
64  v. 10.23.2 -> 102302 */
65 
66  int major_version = 0;
67  int minor_version = 0;
68  int micro_version = 0;
69 
70 #ifdef TIMPI_MAJOR_VERSION
71  major_version = TIMPI_MAJOR_VERSION;
72 #endif
73 
74 #ifdef TIMPI_MINOR_VERSION
75  minor_version = TIMPI_MINOR_VERSION;
76 #endif
77 
78 #ifdef TIMPI_MICRO_VERSION
79  micro_version = TIMPI_MICRO_VERSION;
80 #endif
81 
82  return major_version*10000 + minor_version*100 + micro_version;
83  }

◆ ignore()

template<class ... Args>
void TIMPI::ignore ( const Args &  ...)
inline

◆ pack_range()

template<typename Context , typename buffertype , typename Iter >
Iter TIMPI::pack_range ( const Context *  context,
Iter  range_begin,
const Iter  range_end,
typename std::vector< buffertype > &  buffer,
std::size_t  approx_buffer_size = 1000000 
)
inline

Helper function for range packing.

Encode a range of potentially-variable-size objects to a data array.

The data will be buffered in vectors with lengths that do not exceed the sum of approx_buffer_size and the size of an individual packed object.

Definition at line 1044 of file packing.h.

References libMesh::Parallel::Packing< T, Enable >::pack(), libMesh::Parallel::Packing< T, Enable >::packable_size(), and libMesh::Parallel::Packing< T, Enable >::packed_size().

Referenced by TIMPI::Communicator::allgather_packed_range(), TIMPI::Communicator::broadcast_packed_range(), TIMPI::Communicator::gather_packed_range(), TIMPI::Communicator::nonblocking_send_packed_range(), TIMPI::Communicator::send_packed_range(), and TIMPI::Communicator::send_receive_packed_range().

1053 {
1054  typedef typename std::iterator_traits<Iter>::value_type T;
1055 
1056  // Count the total size of and preallocate buffer for efficiency.
1057  // Prepare to stop early if the buffer would be too large.
1058  std::size_t buffer_size = 0;
1059  Iter range_stop = range_begin;
1060  for (; range_stop != range_end && buffer_size < approx_buffer_size;
1061  ++range_stop)
1062  {
1063  std::size_t next_buffer_size =
1064  Packing<T>::packable_size(*range_stop, context);
1065  buffer_size += next_buffer_size;
1066  }
1067  buffer.reserve(buffer.size() + buffer_size);
1068 
1069  // Pack the objects into the buffer
1070  for (; range_begin != range_stop; ++range_begin)
1071  {
1072 #ifndef NDEBUG
1073  std::size_t old_size = buffer.size();
1074 #endif
1075 
1076  Packing<T>::pack
1077  (*range_begin, std::back_inserter(buffer), context);
1078 
1079 #ifndef NDEBUG
1080  unsigned int my_packable_size =
1081  Packing<T>::packable_size(*range_begin, context);
1082  unsigned int my_packed_size =
1083  Packing<T>::packed_size (buffer.begin() + old_size);
1084  timpi_assert_equal_to (my_packable_size, my_packed_size);
1085  timpi_assert_equal_to (buffer.size(), old_size + my_packable_size);
1086 #endif
1087  }
1088 
1089  return range_stop;
1090 }

◆ packed_range_size()

template<typename Context , typename Iter >
std::size_t TIMPI::packed_range_size ( const Context *  context,
Iter  range_begin,
const Iter  range_end 
)
inline

Helper function for range packing.

Return the total buffer size needed to encode a range of potentially-variable-size objects to a data array.

Definition at line 1023 of file packing.h.

References libMesh::Parallel::Packing< T, Enable >::packable_size().

Referenced by TIMPI::Communicator::send_packed_range().

1026 {
1027  typedef typename std::iterator_traits<Iter>::value_type T;
1028 
1029  std::size_t buffer_size = 0;
1030  for (Iter range_count = range_begin;
1031  range_count != range_end;
1032  ++range_count)
1033  {
1034  buffer_size += Packing<T>::packable_size(*range_count, context);
1035  }
1036  return buffer_size;
1037 }

◆ pull_parallel_vector_data() [1/3]

template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void TIMPI::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
GatherFunctor &  gather_data,
const ActionFunctor &  act_on_data,
const datum *  example 
)

Send query vectors, receive and answer them with vectors of data, then act on those answers.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of query ids to send. For processors to which no data should be sent, there should be no map entry; this will avoid any unnecessary communication. Unless NDEBUG is enabled, TIMPI will assert that no empty map entries exist. In any case empty map entries will not be gathered or acted on.

Queries will be operated on by the queried processor by gather_data(processor_id_type pid, const std::vector<id> & ids, std::vector<datum> & data)

Answer data from each query will be operated on by act_on_data(processor_id_type pid, const std::vector<id> & ids, std::vector<datum> && data);

If a query vector exists for the local processor in the map, gather_data will be called on it directly, and act_on_data will be called on the response directly, without any network operations.

The example pointer may be null; it merely needs to be of the correct type. It's just here because function overloading in C++ is easy, whereas SFINAE is hard and partial template specialization of functions is impossible.

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All receives and actions are completed before this function returns.

Referenced by testPull(), testPullImpl(), testPullPacked(), and testPullVecVecImpl().

◆ pull_parallel_vector_data() [2/3]

template<typename datum , typename A , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void TIMPI::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const std::vector< datum, A > *  example 
)

Definition at line 960 of file parallel_sync.h.

References any_source, TIMPI::Communicator::get_unique_tag(), TIMPI::Communicator::probe(), push_parallel_vector_data(), TIMPI::Communicator::rank(), TIMPI::Communicator::receive(), TIMPI::Communicator::send(), and wait().

965 {
966  typedef typename MapToVectors::mapped_type query_type;
967 
968  // First index: order of creation, irrelevant
969  std::vector<std::vector<std::vector<datum,A>>> response_data;
970  std::vector<Request> response_requests;
971 
972  // We'll grab a tag so we can overlap request sends and receives
973  // without confusing one for the other
974  MessageTag tag = comm.get_unique_tag();
975 
976  auto gather_functor =
977  [&comm, &gather_data, &act_on_data,
978  &response_data, &response_requests, &tag]
979  (processor_id_type pid, query_type query)
980  {
981  std::vector<std::vector<datum,A>> response;
982  gather_data(pid, query, response);
983  timpi_assert_equal_to(query.size(),
984  response.size());
985 
986  // Just act on data if the user requested a send-to-self
987  if (pid == comm.rank())
988  {
989  act_on_data(pid, query, response);
990  }
991  else
992  {
993  Request sendreq;
994  comm.send(pid, response, sendreq, tag);
995  response_requests.push_back(sendreq);
996  response_data.push_back(std::move(response));
997  }
998  };
999 
1000  push_parallel_vector_data (comm, queries, gather_functor);
1001 
1002  // Every outgoing query should now have an incoming response.
1003  //
1004  // Post all of the receives.
1005  //
1006  // Use blocking API here since we can't use the pre-sized
1007  // non-blocking APIs with this data type.
1008  //
1009  // FIXME - implement Derek's API from #1684, switch to that!
1010  std::vector<Request> receive_requests;
1011  std::vector<processor_id_type> receive_procids;
1012  for (std::size_t i = 0,
1013  n_queries = queries.size() - queries.count(comm.rank());
1014  i != n_queries; ++i)
1015  {
1016  Status stat(comm.probe(any_source, tag));
1017  const processor_id_type
1018  proc_id = cast_int<processor_id_type>(stat.source());
1019 
1020  std::vector<std::vector<datum,A>> received_data;
1021  comm.receive(proc_id, received_data, tag);
1022 
1023  timpi_assert(queries.count(proc_id));
1024  auto & querydata = queries.at(proc_id);
1025  timpi_assert_equal_to(querydata.size(), received_data.size());
1026  act_on_data(proc_id, querydata, received_data);
1027  }
1028 
1029  wait(response_requests);
1030 }
const unsigned int any_source
Processor id meaning "Accept from any source".
Definition: communicator.h:83
void push_parallel_vector_data(const Communicator &comm, MapToVectors &&data, const ActionFunctor &act_on_data)
Send and receive and act on vectors of data.
uint8_t processor_id_type
Definition: communicator.h:53
Status wait(Request &r)
Wait for a non-blocking send or receive to finish.
Definition: request.h:135

◆ pull_parallel_vector_data() [3/3]

template<typename datum , typename MapToVectors , typename GatherFunctor , typename ActionFunctor >
void TIMPI::pull_parallel_vector_data ( const Communicator comm,
const MapToVectors &  queries,
GatherFunctor &  gather_data,
ActionFunctor &  act_on_data,
const datum *   
)

Definition at line 866 of file parallel_sync.h.

References push_parallel_vector_data(), TIMPI::Communicator::SENDRECEIVE, TIMPI::Communicator::size(), and TIMPI::Communicator::sync_type().

871 {
872  typedef typename MapToVectors::mapped_type query_type;
873 
874  std::multimap<processor_id_type, std::vector<datum> >
875  response_data;
876 
877 #ifndef NDEBUG
878  processor_id_type max_pid = 0;
879  for (auto p : queries)
880  max_pid = std::max(max_pid, p.first);
881 
882  // Our SENDRECEIVE implementation doesn't preserve ordering, but we
883  // need ordering preserved for the multimap trick here to work.
884  if (comm.sync_type() == Communicator::SENDRECEIVE &&
885  max_pid > comm.size())
886  timpi_not_implemented();
887 #endif
888 
889  auto gather_functor =
890  [&gather_data, &response_data]
891  (processor_id_type pid, query_type query)
892  {
893  auto new_data_it =
894  response_data.emplace(pid, std::vector<datum>());
895  gather_data(pid, query, new_data_it->second);
896  timpi_assert_equal_to(query.size(), new_data_it->second.size());
897  };
898 
899  push_parallel_vector_data (comm, queries, gather_functor);
900 
901  std::map<processor_id_type, unsigned int> responses_acted_on;
902 
903  const processor_id_type num_procs = comm.size();
904 
905  auto action_functor =
906  [&act_on_data, &queries, &responses_acted_on,
907 #ifndef NDEBUG
908  max_pid,
909 #endif
910  num_procs
911  ]
912  (processor_id_type pid, const std::vector<datum> & data)
913  {
914  // We rely on responses coming in the same order as queries
915  const unsigned int nth_query = responses_acted_on[pid]++;
916 
917  auto q_pid_its = queries.equal_range(pid);
918  auto query_it = q_pid_its.first;
919 
920  // In an oversized pull we might not have any queries addressed
921  // to the *base* pid, but only to pid+N*num_procs for some N>1
922  // timpi_assert(query_it != q_pid_its.second);
923  while (query_it == q_pid_its.second)
924  {
925  pid += num_procs;
926  q_pid_its = queries.equal_range(pid);
927  timpi_assert_less_equal(pid, max_pid);
928  query_it = q_pid_its.first;
929  }
930 
931  for (unsigned int i=0; i != nth_query; ++i)
932  {
933  query_it++;
934  if (query_it == q_pid_its.second)
935  {
936  do
937  {
938  pid += num_procs;
939  q_pid_its = queries.equal_range(pid);
940  timpi_assert_less_equal(pid, max_pid);
941  } while (q_pid_its.first == q_pid_its.second);
942  query_it = q_pid_its.first;
943  }
944  }
945 
946  act_on_data(pid, query_it->second, data);
947  };
948 
949  push_parallel_vector_data (comm, response_data, action_functor);
950 }
void push_parallel_vector_data(const Communicator &comm, MapToVectors &&data, const ActionFunctor &act_on_data)
Send and receive and act on vectors of data.
uint8_t processor_id_type
Definition: communicator.h:53

◆ push_parallel_packed_range() [1/2]

template<typename MapToVectors , typename ActionFunctor , typename Context >
void TIMPI::push_parallel_packed_range ( const Communicator comm,
MapToVectors &&  data,
Context *  context,
const ActionFunctor &  act_on_data 
)

Send and receive and act on vectors of data.

Similar to push_parallel_vector_data, except the vectors are packed and unpacked using the Parallel::Packing routines.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of data to send. For processors to which no data should be sent, there should be no map entry; this will avoid any unnecessary communication. Unless NDEBUG is enabled, TIMPI will assert that no empty map entries exist. In any case empty map entries will not be acted on.

Data which is received from other processors will be operated on by act_on_data(processor_id_type pid, std::vector<datum> && data)

If data exists for the local processor in the map, it will be acted on directly, without any network operations. This also avoids packing and unpacking the data, so no side effects of those operations should be assumed.

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All receives and actions are completed before this function returns.

If you wish to use move semantics within the data received in act_on_data, pass data itself as an rvalue reference.

Referenced by testPushPackedImpl(), testPushPackedImplMove(), and testPushPackedNested().

◆ push_parallel_packed_range() [2/2]

template<typename MapToContainers , typename ActionFunctor , typename Context >
void TIMPI::push_parallel_packed_range ( const Communicator comm,
MapToContainers &&  data,
Context *  context,
const ActionFunctor &  act_on_data 
)

Definition at line 658 of file parallel_sync.h.

References TIMPI::Communicator::ALLTOALL_COUNTS, TIMPI::Communicator::NBX, TIMPI::Communicator::nonblocking_receive_packed_range(), TIMPI::Communicator::nonblocking_send_packed_range(), TIMPI::Communicator::packed_range_probe(), TIMPI::Communicator::possibly_receive_packed_range(), TIMPI::detail::push_parallel_alltoall_helper(), TIMPI::detail::push_parallel_nbx_helper(), TIMPI::detail::push_parallel_roundrobin_helper(), TIMPI::Communicator::send_receive_packed_range(), TIMPI::Communicator::SENDRECEIVE, TIMPI::Communicator::sync_type(), and TIMPI::Request::wait().

662 {
663  typedef typename std::remove_reference<MapToContainers>::type::mapped_type container_type;
664  typedef typename container_type::value_type nonref_type;
665  typename std::remove_const<nonref_type>::type * output_type = nullptr;
666 
667  switch (comm.sync_type()) {
668  case Communicator::NBX:
669  {
670  auto send_functor = [&context, &comm](const processor_id_type dest_pid,
671  const container_type & datum,
672  Request & send_request,
673  const MessageTag tag) {
674  comm.nonblocking_send_packed_range(dest_pid, context, datum.begin(), datum.end(), send_request, tag);
675  };
676 
677  auto possibly_receive_functor = [&context, &output_type, &comm](unsigned int & current_src_proc,
678  container_type & current_incoming_data,
679  Request & current_request,
680  const MessageTag tag) {
681  return comm.possibly_receive_packed_range(
682  current_src_proc,
683  context,
684  std::inserter(current_incoming_data, current_incoming_data.end()),
685  output_type,
686  current_request,
687  tag);
688  };
689 
691  (comm, data, send_functor, possibly_receive_functor, act_on_data);
692  }
693  break;
694  case Communicator::ALLTOALL_COUNTS:
695  {
696  auto send_functor = [&context, &comm](const processor_id_type dest_pid,
697  const container_type & datum,
698  Request & send_request,
699  const MessageTag tag) {
700  comm.nonblocking_send_packed_range(dest_pid, context, datum.begin(), datum.end(), send_request, tag);
701  };
702 
703  auto receive_functor = [&context, &output_type, &comm](unsigned int current_src_proc,
704  container_type & current_incoming_data,
705  const MessageTag tag) {
706  bool flag = false;
707  Status stat(comm.packed_range_probe<container_type>(current_src_proc, tag, flag));
708  timpi_assert(flag);
709 
710  Request req;
711  comm.nonblocking_receive_packed_range(current_src_proc, context,
712  std::inserter(current_incoming_data, current_incoming_data.end()),
713  output_type, req, stat, tag);
714  req.wait();
715  };
716 
718  (comm, data, send_functor, receive_functor, act_on_data);
719  }
720  break;
721  case Communicator::SENDRECEIVE:
722  {
723  auto sendreceive_functor = [&context, &output_type, &comm]
724  (const processor_id_type dest_pid,
725  const container_type & data_to_send,
726  const processor_id_type src_pid,
727  container_type & received_data,
728  const MessageTag tag) {
729  comm.send_receive_packed_range(dest_pid, context,
730  data_to_send.begin(),
731  data_to_send.end(), src_pid,
732  context,
733  std::inserter(received_data,
734  received_data.end()),
735  output_type, tag, tag);
736  };
737 
739  (comm, data, sendreceive_functor, act_on_data);
740  }
741  break;
742  default:
743  timpi_error_msg("Invalid sync_type setting " << comm.sync_type());
744  }
745 
746 }
void push_parallel_roundrobin_helper(const Communicator &comm, MapToContainers &&data, const SendReceiveFunctor &sendreceive_functor, const ActionFunctor &act_on_data)
uint8_t processor_id_type
Definition: communicator.h:53
void push_parallel_alltoall_helper(const Communicator &comm, MapToContainers &&data, const SendFunctor &send_functor, const ReceiveFunctor &receive_functor, const ActionFunctor &act_on_data)
void push_parallel_nbx_helper(const Communicator &comm, MapToContainers &&data, const SendFunctor &send_functor, const PossiblyReceiveFunctor &possibly_receive_functor, const ActionFunctor &act_on_data)

◆ push_parallel_vector_data()

template<typename MapToVectors , typename ActionFunctor , typename std::enable_if< std::is_base_of< DataType, StandardType< typename InnermostType< typename std::remove_const< typename std::remove_reference< MapToVectors >::type::mapped_type::value_type >::type >::type >>::value, int >::type = 0>
void TIMPI::push_parallel_vector_data ( const Communicator comm,
MapToVectors &&  data,
const ActionFunctor &  act_on_data 
)

Send and receive and act on vectors of data.

The data map is indexed by processor ids as keys, and for each processor id in the map there should be a vector of data to send. For processors to which no data should be sent, there should be no map entry; this will avoid any unnecessary communication. Unless NDEBUG is enabled, TIMPI will assert that no empty map entries exist. In any case empty map entries will not be acted on.

Data which is received from other processors will be operated on by act_on_data(processor_id_type pid, std::vector<datum> && data)

If data exists for the local processor in the map, it will be acted on directly, without any network operations.

No guarantee about operation ordering is made - this function will attempt to act on data in the order in which it is received.

All receives and actions are completed before this function returns.

If you wish to use move semantics within the data received in act_on_data, pass data itself as an rvalue reference.

This overload should be automatically selected for data which has a StandardType specialization defined, so that we can directly send it without serializing it into buffers beforehand.

This overload should be automatically selected for data which has a Packing specialization defined, where we must serialize it into buffers before sending.

Definition at line 755 of file parallel_sync.h.

References TIMPI::Communicator::ALLTOALL_COUNTS, build_standard_type(), TIMPI::Communicator::NBX, TIMPI::Communicator::possibly_receive(), TIMPI::detail::push_parallel_alltoall_helper(), TIMPI::detail::push_parallel_nbx_helper(), TIMPI::detail::push_parallel_roundrobin_helper(), TIMPI::Communicator::receive(), TIMPI::Communicator::send(), TIMPI::Communicator::send_receive(), TIMPI::Communicator::SENDRECEIVE, and TIMPI::Communicator::sync_type().

Referenced by pull_parallel_vector_data(), testEmptyEntry(), testPush(), testPushImpl(), testPushMove(), testPushMultimapImpl(), testPushMultimapVecVecImpl(), testPushPackedDispatch(), testPushPackedFailureCase(), testPushPackedOneTuple(), and testPushVecVecImpl().

758 {
759  typedef typename std::remove_reference<MapToVectors>::type::mapped_type container_type;
760  typedef typename container_type::value_type nonref_type;
761  typedef typename std::remove_const<nonref_type>::type nonconst_nonref_type;
762 
763  // We'll construct the StandardType once rather than inside a loop.
764  // We can't pass in example data here, because we might have
765  // data.empty() on some ranks, so we'll need StandardType to be able
766  // to construct the user's data type without an example.
767  auto type = build_standard_type(static_cast<nonconst_nonref_type *>(nullptr));
768 
769  switch (comm.sync_type()) {
770  case Communicator::NBX:
771  {
772  auto send_functor = [&type, &comm](const processor_id_type dest_pid,
773  const container_type & datum,
774  Request & send_request,
775  const MessageTag tag) {
776  comm.send(dest_pid, datum, type, send_request, tag);
777  };
778 
779  auto possibly_receive_functor = [&type, &comm](unsigned int & current_src_proc,
780  container_type & current_incoming_data,
781  Request & current_request,
782  const MessageTag tag) {
783  return comm.possibly_receive(
784  current_src_proc, current_incoming_data, type, current_request, tag);
785  };
786 
788  (comm, data, send_functor, possibly_receive_functor, act_on_data);
789  }
790  break;
791  case Communicator::ALLTOALL_COUNTS:
792  {
793 #ifdef TIMPI_HAVE_MPI // We should never hit these functors in serial
794  auto send_functor = [&type, &comm](const processor_id_type dest_pid,
795  const container_type & datum,
796  Request & send_request,
797  const MessageTag tag) {
798  comm.send(dest_pid, datum, type, send_request, tag);
799  };
800 
801  auto receive_functor = [&type, &comm](unsigned int current_src_proc,
802  container_type & current_incoming_data,
803  const MessageTag tag) {
804  comm.receive(current_src_proc, current_incoming_data, type, tag);
805  };
806 #else
807  auto send_functor = [](const processor_id_type,
808  const container_type &,
809  Request &,
810  const MessageTag) {
811  timpi_error(); // We should never hit these in serial
812  };
813 
814  auto receive_functor = [](unsigned int,
815  container_type &,
816  const MessageTag) {
817  timpi_error();
818  };
819 #endif
820 
822  (comm, data, send_functor, receive_functor, act_on_data);
823  }
824  break;
825  case Communicator::SENDRECEIVE:
826  {
827  auto sendreceive_functor = [&comm](const processor_id_type dest_pid,
828  const container_type & data_to_send,
829  const processor_id_type src_pid,
830  container_type & received_data,
831  const MessageTag tag) {
832  comm.send_receive(dest_pid, data_to_send,
833  src_pid, received_data, tag, tag);
834  };
835 
837  (comm, data, sendreceive_functor, act_on_data);
838  }
839  break;
840  default:
841  timpi_error_msg("Invalid sync_type setting " << comm.sync_type());
842  }
843 }
void push_parallel_roundrobin_helper(const Communicator &comm, MapToContainers &&data, const SendReceiveFunctor &sendreceive_functor, const ActionFunctor &act_on_data)
StandardType< T > build_standard_type(const T *example=nullptr)
uint8_t processor_id_type
Definition: communicator.h:53
void push_parallel_alltoall_helper(const Communicator &comm, MapToContainers &&data, const SendFunctor &send_functor, const ReceiveFunctor &receive_functor, const ActionFunctor &act_on_data)
void push_parallel_nbx_helper(const Communicator &comm, MapToContainers &&data, const SendFunctor &send_functor, const PossiblyReceiveFunctor &possibly_receive_functor, const ActionFunctor &act_on_data)

◆ report_error()

void TIMPI::report_error ( const char *  file,
int  line,
const char *  date,
const char *  time 
)

Definition at line 43 of file timpi_assert.C.

References report_here().

44 {
45  // It is possible to have an error *inside* report_error; e.g. when
46  // we start using a TIMPI::print_trace. Don't infinitely recurse.
47  static bool reporting_error = false;
48  if (reporting_error)
49  {
50  // I heard you like error reporting, so we put an error report
51  // in report_error() so you can report errors from the report.
52  std::cerr << "TIMPI encountered an error while attempting to report_error." << std::endl;
53  return;
54  }
55  reporting_error = true;
56 
57  report_here(file, line, date, time);
58 
59  reporting_error = false;
60 }
void report_here(const char *file, int line, const char *date, const char *time)
Definition: timpi_assert.C:29

◆ report_here()

void TIMPI::report_here ( const char *  file,
int  line,
const char *  date,
const char *  time 
)

Definition at line 29 of file timpi_assert.C.

References TIMPI::Communicator::rank().

Referenced by report_error().

30 {
31  std::ostringstream here_msg; // Build in one buffer to reduce interleaving
32 #ifdef TIMPI_HAVE_MPI
33  TIMPI::Communicator commworld(MPI_COMM_WORLD);
34  const std::size_t proc_id = commworld.rank();
35  here_msg << "[" << proc_id << "] ";
36 #endif
37  here_msg << file << ", line " << line << ", compiled "
38  << date << " at " << time << std::endl;
39  std::cerr << here_msg.str();
40 }
Encapsulates the MPI_Comm object.
Definition: communicator.h:107

◆ TIMPI_CONTAINER_TYPE() [1/2]

template<typename T , typename C , typename A >
TIMPI::TIMPI_CONTAINER_TYPE ( std::set< T TIMPI_ATTRIBUTES_COMMA C TIMPI_ATTRIBUTES_COMMA A >  )

◆ TIMPI_CONTAINER_TYPE() [2/2]

template<typename T , typename A >
TIMPI::TIMPI_CONTAINER_TYPE ( std::vector< T TIMPI_ATTRIBUTES_COMMA A >  )

◆ TIMPI_FLOAT_TYPE() [1/4]

TIMPI::TIMPI_FLOAT_TYPE ( float  )

◆ TIMPI_FLOAT_TYPE() [2/4]

TIMPI::TIMPI_FLOAT_TYPE ( double  )

◆ TIMPI_FLOAT_TYPE() [3/4]

TIMPI::TIMPI_FLOAT_TYPE ( long  double)

◆ TIMPI_FLOAT_TYPE() [4/4]

TIMPI::TIMPI_FLOAT_TYPE ( TIMPI_DEFAULT_SCALAR_TYPE  )

◆ timpi_ignore()

template<class ... Args>
void TIMPI::timpi_ignore ( const Args &  ...)
inline

◆ TIMPI_INT_TYPE() [1/8]

TIMPI::TIMPI_INT_TYPE ( char  )

◆ TIMPI_INT_TYPE() [2/8]

TIMPI::TIMPI_INT_TYPE ( signed  char)

◆ TIMPI_INT_TYPE() [3/8]

TIMPI::TIMPI_INT_TYPE ( unsigned  char)

◆ TIMPI_INT_TYPE() [4/8]

TIMPI::TIMPI_INT_TYPE ( short  int)

◆ TIMPI_INT_TYPE() [5/8]

TIMPI::TIMPI_INT_TYPE ( unsigned short  int)

◆ TIMPI_INT_TYPE() [6/8]

TIMPI::TIMPI_INT_TYPE ( int  )

◆ TIMPI_INT_TYPE() [7/8]

TIMPI::TIMPI_INT_TYPE ( long  )

◆ TIMPI_INT_TYPE() [8/8]

TIMPI::TIMPI_INT_TYPE ( unsigned long  long)

◆ TIMPI_PARALLEL_FLOAT_OPS() [1/4]

TIMPI::TIMPI_PARALLEL_FLOAT_OPS ( float  )

◆ TIMPI_PARALLEL_FLOAT_OPS() [2/4]

TIMPI::TIMPI_PARALLEL_FLOAT_OPS ( double  )

◆ TIMPI_PARALLEL_FLOAT_OPS() [3/4]

TIMPI::TIMPI_PARALLEL_FLOAT_OPS ( long  double)

◆ TIMPI_PARALLEL_FLOAT_OPS() [4/4]

TIMPI::TIMPI_PARALLEL_FLOAT_OPS ( TIMPI_DEFAULT_SCALAR_TYPE  )

◆ TIMPI_PARALLEL_INTEGER_OPS() [1/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( char  )

◆ TIMPI_PARALLEL_INTEGER_OPS() [2/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( signed  char)

◆ TIMPI_PARALLEL_INTEGER_OPS() [3/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( unsigned  char)

◆ TIMPI_PARALLEL_INTEGER_OPS() [4/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( short  int)

◆ TIMPI_PARALLEL_INTEGER_OPS() [5/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( unsigned short  int)

◆ TIMPI_PARALLEL_INTEGER_OPS() [6/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( int  )

◆ TIMPI_PARALLEL_INTEGER_OPS() [7/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( long  )

◆ TIMPI_PARALLEL_INTEGER_OPS() [8/8]

TIMPI::TIMPI_PARALLEL_INTEGER_OPS ( unsigned long  long)

◆ TIMPI_STANDARD_TYPE() [1/15]

TIMPI::TIMPI_STANDARD_TYPE ( char  ,
MPI_CHAR   
)

◆ TIMPI_STANDARD_TYPE() [2/15]

TIMPI::TIMPI_STANDARD_TYPE ( signed  char,
MPI_SIGNED_CHAR   
)

◆ TIMPI_STANDARD_TYPE() [3/15]

TIMPI::TIMPI_STANDARD_TYPE ( unsigned  char,
MPI_UNSIGNED_CHAR   
)

◆ TIMPI_STANDARD_TYPE() [4/15]

TIMPI::TIMPI_STANDARD_TYPE ( short  int,
MPI_SHORT   
)

◆ TIMPI_STANDARD_TYPE() [5/15]

TIMPI::TIMPI_STANDARD_TYPE ( unsigned short  int,
MPI_UNSIGNED_SHORT   
)

◆ TIMPI_STANDARD_TYPE() [6/15]

TIMPI::TIMPI_STANDARD_TYPE ( int  ,
MPI_INT   
)

◆ TIMPI_STANDARD_TYPE() [7/15]

TIMPI::TIMPI_STANDARD_TYPE ( unsigned  int,
MPI_UNSIGNED   
)

◆ TIMPI_STANDARD_TYPE() [8/15]

TIMPI::TIMPI_STANDARD_TYPE ( long  ,
MPI_LONG   
)

◆ TIMPI_STANDARD_TYPE() [9/15]

TIMPI::TIMPI_STANDARD_TYPE ( long  long,
MPI_LONG_LONG_INT   
)

◆ TIMPI_STANDARD_TYPE() [10/15]

TIMPI::TIMPI_STANDARD_TYPE ( unsigned  long,
MPI_UNSIGNED_LONG   
)

◆ TIMPI_STANDARD_TYPE() [11/15]

TIMPI::TIMPI_STANDARD_TYPE ( unsigned long  long,
MPI_UNSIGNED_LONG_LONG   
)

◆ TIMPI_STANDARD_TYPE() [12/15]

TIMPI::TIMPI_STANDARD_TYPE ( float  ,
MPI_FLOAT   
)

◆ TIMPI_STANDARD_TYPE() [13/15]

TIMPI::TIMPI_STANDARD_TYPE ( double  ,
MPI_DOUBLE   
)

◆ TIMPI_STANDARD_TYPE() [14/15]

TIMPI::TIMPI_STANDARD_TYPE ( long  double,
MPI_LONG_DOUBLE   
)

◆ TIMPI_STANDARD_TYPE() [15/15]

TIMPI::TIMPI_STANDARD_TYPE ( TIMPI_DEFAULT_SCALAR_TYPE  )

◆ timpi_version_stdout()

void TIMPI::timpi_version_stdout ( )

Definition at line 26 of file timpi_version.C.

References timpi_version_string().

Referenced by main().

27  {
28  std::cout << timpi_version_string() << std::flush;
29  }
std::string timpi_version_string()
Definition: timpi_version.C:31

◆ timpi_version_string()

std::string TIMPI::timpi_version_string ( )

Definition at line 31 of file timpi_version.C.

References get_timpi_version().

Referenced by testVersionString(), and timpi_version_stdout().

32  {
33  std::ostringstream oss;
34 
35  oss << "--------------------------------------------------------" << std::endl;
36  oss << "TIMPI Package: Version = " << TIMPI_LIB_VERSION;
37  oss << " (" << get_timpi_version() << ")" << std::endl << std::endl;
38 
39  oss << TIMPI_LIB_RELEASE << std::endl << std::endl;
40 
41  oss << "Build Date = " << TIMPI_BUILD_DATE << std::endl;
42  oss << "Build Host = " << TIMPI_BUILD_HOST << std::endl;
43  oss << "Build User = " << TIMPI_BUILD_USER << std::endl;
44  oss << "Build Arch = " << TIMPI_BUILD_ARCH << std::endl;
45  oss << "Build Rev = " << TIMPI_BUILD_VERSION << std::endl << std::endl;
46 
47  oss << "C++ Config = " << TIMPI_CXX << " " << TIMPI_CXXFLAGS << std::endl;
48  oss << "--------------------------------------------------------" << std::endl;
49 
50  return oss.str();
51  }
int get_timpi_version()
Definition: timpi_version.C:53

◆ unpack_range() [1/2]

template<typename Context , typename buffertype , typename OutputIter , typename T >
OutputIter TIMPI::unpack_range ( const typename std::vector< buffertype > &  buffer,
Context *  context,
OutputIter  out_iter,
const T *  output_type 
)
inline

Decode a range of potentially-variable-size objects from a data array.

We take out_iter by value for maximum compatibility, but we return it afterward for the use of code that needs to unpack multiple buffers to the same output iterator.

◆ unpack_range() [2/2]

template<typename Context , typename buffertype , typename OutputIter , typename T >
OutputIter TIMPI::unpack_range ( const std::vector< buffertype > &  buffer,
Context *  context,
OutputIter  out_iter,
const T *   
)
inline

Helper function for range unpacking.

We take out_iter by value for maximum compatibility, but we return it afterward for the use of code that needs to unpack multiple buffers to the same output iterator.

Definition at line 1103 of file packing.h.

References libMesh::Parallel::Packing< T, Enable >::packed_size(), and libMesh::Parallel::Packing< T, Enable >::unpack().

Referenced by TIMPI::Communicator::allgather_packed_range(), TIMPI::Communicator::broadcast_packed_range(), TIMPI::Communicator::gather_packed_range(), TIMPI::Communicator::receive_packed_range(), TIMPI::PostWaitUnpackBuffer< Container, Context, OutputIter, T >::run(), and TIMPI::Communicator::send_receive_packed_range().

1107 {
1108  // Loop through the buffer and unpack each object, returning the
1109  // object pointer via the output iterator
1110  typename std::vector<buffertype>::const_iterator
1111  next_object_start = buffer.begin();
1112 
1113  while (next_object_start < buffer.end())
1114  {
1115  *out_iter++ = Packing<T>::unpack(next_object_start, context);
1116  next_object_start +=
1117  Packing<T>::packed_size(next_object_start);
1118  }
1119 
1120  // We should have used up the exact amount of data in the buffer
1121  timpi_assert (next_object_start == buffer.end());
1122 
1123  return out_iter;
1124 }

◆ wait() [1/2]

Status TIMPI::wait ( Request r)
inline

Wait for a non-blocking send or receive to finish.

Definition at line 135 of file request.h.

References TIMPI::Request::wait().

Referenced by pull_parallel_vector_data(), testIrecvSend(), testIsendRecv(), testRecvIsendSets(), and testRecvIsendVecVecs().

135 { return r.wait(); }

◆ wait() [2/2]

void TIMPI::wait ( std::vector< Request > &  r)

Wait for all non-blocking operations to finish.

Definition at line 213 of file request.C.

214 {
215  for (auto & req : r)
216  req.wait();
217 }

◆ waitany()

std::size_t TIMPI::waitany ( std::vector< Request > &  r)

Wait for at least one non-blocking operation to finish.

Return the index of the request which completed.

Definition at line 219 of file request.C.

References TIMPI::Request::_prior_request, TIMPI::Request::_request, TIMPI::Request::get(), TIMPI::Request::null_request, and TIMPI::Request::post_wait_work.

Referenced by testNonblockingWaitany().

220 {
221  timpi_assert(!r.empty());
222 
223  int r_size = cast_int<int>(r.size());
224  std::vector<request> raw(r_size);
225  int non_null = r_size;
226  for (int i=0; i != r_size; ++i)
227  {
228  Request * root = &r[i];
229  // If we have prior requests, we need to complete the first one
230  // first
231  while (root->_prior_request.get())
232  root = root->_prior_request.get();
233  raw[i] = *root->get();
234 
235  if (raw[i] != Request::null_request)
236  non_null = std::min(non_null,i);
237  }
238 
239  if (non_null == r_size)
240  return std::size_t(-1);
241 
242  int index = non_null;
243 
244 #ifdef TIMPI_HAVE_MPI
245  bool only_priors_completed = false;
246  Request * next;
247 
248  do
249  {
250  timpi_call_mpi
251  (MPI_Waitany(r_size, raw.data(), &index, MPI_STATUS_IGNORE));
252 
253  timpi_assert_not_equal_to(index, MPI_UNDEFINED);
254 
255  timpi_assert_less(index, r_size);
256 
257  Request * completed = &r[index];
258  next = completed;
259 
260  // If we completed a prior request, we're not really done yet,
261  // so find the next in that line to try again.
262  while (completed->_prior_request.get())
263  {
264  only_priors_completed = true;
265  next = completed;
266  completed = completed->_prior_request.get();
267  }
268 
269  // MPI sets a completed MPI_Request to MPI_REQUEST_NULL; we want
270  // to preserve that
271  completed->_request = raw[index];
272 
273  // Do any post-wait work for the completed request
274  if (completed->post_wait_work)
275  for (auto & item : completed->post_wait_work->first)
276  {
277  // The user should never try to give us non-existent work or try
278  // to wait() twice.
279  timpi_assert (item);
280  item->run();
281  delete item;
282  item = nullptr;
283  }
284 
285  next->_prior_request.reset(nullptr);
286  raw[index] = *next->get();
287 
288  } while(only_priors_completed);
289 #else
290  r[index]._request = Request::null_request;
291 #endif
292 
293  return index;
294 }

Variable Documentation

◆ any_source

const unsigned int TIMPI::any_source

◆ any_tag

const MessageTag TIMPI::any_tag = MessageTag(MPI_ANY_TAG)

Default message tag ids.

Definition at line 114 of file message_tag.h.

◆ communicator

typedef int TIMPI::communicator

Communicator object for talking with subsets of processors.

Definition at line 73 of file communicator.h.

◆ no_tag

const MessageTag TIMPI::no_tag = MessageTag(0)

Definition at line 119 of file message_tag.h.