20 #include "libmesh/parallel_sort.h"
23 #include "libmesh/libmesh_common.h"
24 #include "libmesh/parallel_bin_sorter.h"
25 #include "libmesh/parallel_hilbert.h"
46 template <
typename KeyType,
typename IdxType>
48 std::vector<KeyType> & d) :
52 _bin_is_sorted(false),
63 template <
typename KeyType,
typename IdxType>
69 IdxType global_data_size = cast_int<IdxType>(_data.size());
71 this->comm().sum (global_data_size);
73 if (global_data_size < 2)
79 this->comm().allgather (static_cast<IdxType>(_my_bin.size()),
84 if (this->n_processors() > 1)
87 this->communicate_bins();
92 this->sort_local_bin();
96 _bin_is_sorted =
true;
101 template <
typename KeyType,
typename IdxType>
106 std::vector<KeyType> global_min_max(2);
109 global_min_max[0] = -_data.front();
110 global_min_max[1] = _data.back();
114 this->comm().max(global_min_max);
117 global_min_max[0] *= -1;
121 bs.
binsort(_n_procs, global_min_max[1], global_min_max[0]);
131 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI)
141 local_min, local_max,
142 global_min, global_max;
146 #ifdef LIBMESH_ENABLE_UNIQUE_ID
147 local_min.first.rack0 = local_min.first.rack1 = local_min.first.rack2 = static_cast<Hilbert::inttype>(-1);
148 local_min.second = std::numeric_limits<unique_id_type>::max();
149 local_max.first.rack0 = local_max.first.rack1 = local_max.first.rack2 = 0;
150 local_max.second = 0;
152 local_min.rack0 = local_min.rack1 = local_min.rack2 = static_cast<Hilbert::inttype>(-1);
153 local_max.rack0 = local_max.rack1 = local_max.rack2 = 0;
158 local_min = _data.front();
159 local_max = _data.back();
162 MPI_Op hilbert_max, hilbert_min;
169 MPI_Allreduce(&local_min,
172 Parallel::StandardType<Parallel::DofObjectKey>(&local_min),
176 MPI_Allreduce(&local_max,
179 Parallel::StandardType<Parallel::DofObjectKey>(&local_max),
183 MPI_Op_free (&hilbert_max);
184 MPI_Op_free (&hilbert_min);
188 bs.
binsort(_n_procs, global_max, global_min);
196 #endif // #ifdef LIBMESH_HAVE_LIBHILBERT
199 template <
typename KeyType,
typename IdxType>
202 #ifdef LIBMESH_HAVE_MPI
204 IdxType local_offset = 0;
205 std::map<processor_id_type, std::vector<KeyType> > pushed_keys, received_keys;
209 IdxType next_offset = local_offset + _local_bin_sizes[i];
210 if (_local_bin_sizes[i])
212 auto begin = _data.begin() + local_offset;
213 auto end = _data.begin() + next_offset;
214 pushed_keys[i].assign(begin,
end);
217 local_offset = next_offset;
220 auto keys_action_functor =
223 const std::vector<KeyType> & keys)
225 received_keys[pid] = keys;
228 Parallel::push_parallel_vector_data
229 (this->comm(), pushed_keys, keys_action_functor);
231 std::size_t my_bin_size = 0;
232 for (
auto & p : received_keys)
233 my_bin_size += p.second.size();
236 _my_bin.reserve(my_bin_size);
238 for (
auto & p : received_keys)
239 _my_bin.insert(_my_bin.end(), p.second.begin(), p.second.end());
242 std::vector<IdxType> global_bin_sizes = _local_bin_sizes;
244 this->comm().sum(global_bin_sizes);
246 libmesh_assert_equal_to
247 (global_bin_sizes[this->processor_id()], _my_bin.size());
250 #endif // LIBMESH_HAVE_MPI
255 template <
typename KeyType,
typename IdxType>
258 std::sort(_my_bin.begin(), _my_bin.end());
263 template <
typename KeyType,
typename IdxType>
268 libMesh::out <<
"Warning! Bin is not yet sorted!" << std::endl;
281 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI)