21 #include "libmesh/mesh_communication.h" 24 #include "libmesh/libmesh_config.h" 25 #include "libmesh/libmesh_common.h" 26 #include "libmesh/libmesh_logging.h" 27 #include "libmesh/mesh_base.h" 28 #include "libmesh/mesh_tools.h" 29 #include "libmesh/parallel_hilbert.h" 30 #include "libmesh/parallel_sort.h" 31 #include "libmesh/elem.h" 32 #include "libmesh/elem_range.h" 33 #include "libmesh/node_range.h" 40 #ifdef LIBMESH_HAVE_LIBHILBERT 45 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI) 57 p(0) = (bbox.first(0) == bbox.second(0)) ? 0. :
58 1/(bbox.second(0)-bbox.first(0));
61 p(1) = (bbox.first(1) == bbox.second(1)) ? 0. :
62 1/(bbox.second(1)-bbox.first(1));
66 p(2) = (bbox.first(2) == bbox.second(2)) ? 0. :
67 1/(bbox.second(2)-bbox.first(2));
77 void get_hilbert_coords (
const Point & p,
79 const Point & bboxinv,
80 CFixBitVec icoords[3])
82 static const Hilbert::inttype max_inttype =
static_cast<Hilbert::inttype
>(-1);
85 x = (p(0)-bbox.first(0)) * bboxinv(0),
88 y = (p(1)-bbox.first(1)) * bboxinv(1),
94 z = (p(2)-bbox.first(2)) * bboxinv(2);
100 icoords[0] =
static_cast<Hilbert::inttype
>(x*max_inttype);
101 icoords[1] =
static_cast<Hilbert::inttype
>(y*max_inttype);
102 icoords[2] =
static_cast<Hilbert::inttype
>(z*max_inttype);
108 get_dofobject_key (
const Elem & e,
110 const Point & bboxinv)
112 static const unsigned int sizeof_inttype =
sizeof(Hilbert::inttype);
114 Hilbert::HilbertIndices index;
115 CFixBitVec icoords[3];
116 Hilbert::BitVecType bv;
118 Hilbert::coordsToIndex (icoords, 8*sizeof_inttype, 3, bv);
121 #ifdef LIBMESH_ENABLE_UNIQUE_ID 122 return std::make_pair(index, e.
unique_id());
132 get_dofobject_key (
const Node & n,
134 const Point & bboxinv)
136 static const unsigned int sizeof_inttype =
sizeof(Hilbert::inttype);
138 Hilbert::HilbertIndices index;
139 CFixBitVec icoords[3];
140 Hilbert::BitVecType bv;
141 get_hilbert_coords (n, bbox, bboxinv, icoords);
142 Hilbert::coordsToIndex (icoords, 8*sizeof_inttype, 3, bv);
145 #ifdef LIBMESH_ENABLE_UNIQUE_ID 146 return std::make_pair(index, n.
unique_id());
153 class ComputeHilbertKeys
157 const Point & bboxinv,
158 std::vector<Parallel::DofObjectKey> & keys) :
168 for (
const auto & node : range)
171 libmesh_assert_less (pos, _keys.size());
172 _keys[pos++] = get_dofobject_key (*node, _bbox, _bboxinv);
180 for (
const auto & elem : range)
183 libmesh_assert_less (pos, _keys.size());
184 _keys[pos++] = get_dofobject_key (*elem, _bbox, _bboxinv);
190 const Point _bboxinv;
191 std::vector<Parallel::DofObjectKey> & _keys;
195 #endif // defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI) 203 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI) 206 LOG_SCOPE (
"assign_global_indices()",
"MeshCommunication");
232 const Point bboxinv = invert_bbox(bbox);
236 std::vector<Parallel::DofObjectKey>
237 node_keys, elem_keys;
243 mesh.local_nodes_end());
244 node_keys.resize (nr.size());
272 mesh.local_elements_end());
273 elem_keys.resize (er.size());
315 const std::vector<Parallel::DofObjectKey> & my_node_bin =
322 const std::vector<Parallel::DofObjectKey> & my_elem_bin =
329 std::vector<Parallel::DofObjectKey>
334 std::vector<Parallel::DofObjectKey> recvbuf(2*
communicator.size());
335 std::vector<unsigned short int>
338 std::vector<Parallel::DofObjectKey> my_max(2);
340 communicator.allgather (static_cast<unsigned short int>(my_node_bin.empty()), empty_nodes);
341 communicator.allgather (static_cast<unsigned short int>(my_elem_bin.empty()), empty_elem);
343 if (!my_node_bin.empty()) my_max[0] = my_node_bin.back();
344 if (!my_elem_bin.empty()) my_max[1] = my_elem_bin.back();
353 node_upper_bounds[p] = my_max[2*p+0];
354 elem_upper_bounds[p] = my_max[2*p+1];
358 if (empty_nodes[p]) node_upper_bounds[p] = node_upper_bounds[p-1];
359 if (empty_elem[p]) elem_upper_bounds[p] = elem_upper_bounds[p-1];
374 std::map<processor_id_type, std::vector<Parallel::DofObjectKey>>
378 std::map<dof_id_type, std::vector<dof_id_type>>
382 for (
const auto & node :
mesh.node_ptr_range())
386 get_dofobject_key (*node, bbox, bboxinv);
388 cast_int<processor_id_type>
390 std::lower_bound(node_upper_bounds.begin(),
391 node_upper_bounds.end(),
396 requested_ids[pid].push_back(hi);
400 std::vector<dof_id_type> node_bin_sizes(
communicator.size());
401 communicator.allgather (static_cast<dof_id_type>(my_node_bin.size()), node_bin_sizes);
406 my_offset += node_bin_sizes[pid];
408 auto gather_functor =
418 const std::vector<Parallel::DofObjectKey> & keys,
419 std::vector<dof_id_type> & global_ids)
422 const std::size_t keys_size = keys.size();
423 global_ids.reserve(keys_size);
424 for (std::size_t
idx=0;
idx != keys_size;
idx++)
427 libmesh_assert_less_equal (hi, node_upper_bounds[
communicator.rank()]);
430 std::vector<Parallel::DofObjectKey>::const_iterator pos =
431 std::lower_bound (my_node_bin.begin(), my_node_bin.end(), hi);
433 libmesh_assert_equal_to (*pos, hi);
437 global_ids.push_back(cast_int<dof_id_type>(
std::distance(my_node_bin.begin(), pos) + my_offset));
441 auto action_functor =
444 const std::vector<Parallel::DofObjectKey> &,
445 const std::vector<dof_id_type> & global_ids)
447 filled_request[pid] = global_ids;
452 Parallel::pull_parallel_vector_data
453 (
communicator, requested_ids, gather_functor, action_functor, ex);
458 std::map<dof_id_type, std::vector<dof_id_type>::const_iterator>
460 for (
auto & p : filled_request)
461 next_obj_on_proc[p.first] = p.second.begin();
463 for (
auto & node :
mesh.node_ptr_range())
467 get_dofobject_key (*node, bbox, bboxinv);
469 cast_int<processor_id_type>
471 std::lower_bound(node_upper_bounds.begin(),
472 node_upper_bounds.end(),
476 libmesh_assert (next_obj_on_proc[pid] != filled_request[pid].end());
478 const dof_id_type global_index = *next_obj_on_proc[pid];
479 libmesh_assert_less (global_index,
mesh.
n_nodes());
480 node->set_id() = global_index;
482 ++next_obj_on_proc[pid];
491 std::map<processor_id_type, std::vector<Parallel::DofObjectKey>>
495 std::map<dof_id_type, std::vector<dof_id_type>>
498 for (
const auto & elem :
mesh.element_ptr_range())
502 get_dofobject_key (*elem, bbox, bboxinv);
504 cast_int<processor_id_type>
506 std::lower_bound(elem_upper_bounds.begin(),
507 elem_upper_bounds.end(),
512 requested_ids[pid].push_back(hi);
516 std::vector<dof_id_type> elem_bin_sizes(
communicator.size());
517 communicator.allgather (static_cast<dof_id_type>(my_elem_bin.size()), elem_bin_sizes);
522 my_offset += elem_bin_sizes[pid];
524 auto gather_functor =
534 const std::vector<Parallel::DofObjectKey> & keys,
535 std::vector<dof_id_type> & global_ids)
538 const std::size_t keys_size = keys.size();
539 global_ids.reserve(keys_size);
540 for (std::size_t
idx=0;
idx != keys_size;
idx++)
543 libmesh_assert_less_equal (hi, elem_upper_bounds[
communicator.rank()]);
546 std::vector<Parallel::DofObjectKey>::const_iterator pos =
547 std::lower_bound (my_elem_bin.begin(), my_elem_bin.end(), hi);
549 libmesh_assert_equal_to (*pos, hi);
553 global_ids.push_back (cast_int<dof_id_type>(
std::distance(my_elem_bin.begin(), pos) + my_offset));
557 auto action_functor =
560 const std::vector<Parallel::DofObjectKey> &,
561 const std::vector<dof_id_type> & global_ids)
563 filled_request[pid] = global_ids;
568 Parallel::pull_parallel_vector_data
569 (
communicator, requested_ids, gather_functor, action_functor, ex);
574 std::vector<std::vector<dof_id_type>::const_iterator>
575 next_obj_on_proc; next_obj_on_proc.reserve(
communicator.size());
577 next_obj_on_proc.push_back(filled_request[pid].begin());
579 for (
auto & elem :
mesh.element_ptr_range())
583 get_dofobject_key (*elem, bbox, bboxinv);
585 cast_int<processor_id_type>
587 std::lower_bound(elem_upper_bounds.begin(),
588 elem_upper_bounds.end(),
592 libmesh_assert (next_obj_on_proc[pid] != filled_request[pid].end());
594 const dof_id_type global_index = *next_obj_on_proc[pid];
595 libmesh_assert_less (global_index,
mesh.
n_elem());
596 elem->set_id() = global_index;
598 ++next_obj_on_proc[pid];
604 #else // LIBMESH_HAVE_LIBHILBERT, LIBMESH_HAVE_MPI 608 #endif // LIBMESH_HAVE_LIBHILBERT, LIBMESH_HAVE_MPI 611 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI) 614 LOG_SCOPE (
"check_for_duplicate_global_indices()",
"MeshCommunication");
622 const Point bboxinv = invert_bbox(bbox);
624 std::vector<Parallel::DofObjectKey>
625 node_keys, elem_keys;
631 mesh.local_nodes_end());
632 node_keys.resize (nr.size());
638 for (std::size_t i = 0; i != node_keys.size(); ++i, ++nodei)
641 for (std::size_t j = 0; j != i; ++j, ++nodej)
643 if (node_keys[i] == node_keys[j])
645 CFixBitVec icoords[3], jcoords[3];
646 get_hilbert_coords(**nodej, bbox, bboxinv, jcoords);
648 "node " << (*nodej)->id() <<
", " <<
649 *(
const Point *)(*nodej) <<
" has HilbertIndices " <<
650 node_keys[j] << std::endl;
651 get_hilbert_coords(**nodei, bbox, bboxinv, icoords);
653 "node " << (*nodei)->id() <<
", " <<
654 *(
const Point *)(*nodei) <<
" has HilbertIndices " <<
655 node_keys[i] << std::endl;
656 libmesh_error_msg(
"Error: nodes with duplicate Hilbert keys!");
665 mesh.local_elements_end());
666 elem_keys.resize (er.size());
673 for (std::size_t i = 0; i != elem_keys.size(); ++i, ++elemi)
676 for (std::size_t j = 0; j != i; ++j, ++elemj)
678 if ((elem_keys[i] == elem_keys[j]) &&
679 ((*elemi)->level() == (*elemj)->level()))
682 "level " << (*elemj)->level() <<
" elem\n" <<
683 (**elemj) <<
" vertex average " <<
684 (*elemj)->vertex_average() <<
" has HilbertIndices " <<
685 elem_keys[j] <<
" or " <<
686 get_dofobject_key((**elemj), bbox, bboxinv) <<
689 "level " << (*elemi)->level() <<
" elem\n" <<
690 (**elemi) <<
" vertex average " <<
691 (*elemi)->vertex_average() <<
" has HilbertIndices " <<
692 elem_keys[i] <<
" or " <<
693 get_dofobject_key((**elemi), bbox, bboxinv) <<
695 libmesh_error_msg(
"Error: level " << (*elemi)->level() <<
" elements with duplicate Hilbert keys!");
702 #else // LIBMESH_HAVE_LIBHILBERT, LIBMESH_HAVE_MPI 706 #endif // LIBMESH_HAVE_LIBHILBERT, LIBMESH_HAVE_MPI 708 #if defined(LIBMESH_HAVE_LIBHILBERT) && defined(LIBMESH_HAVE_MPI) 709 template <
typename ForwardIterator>
711 const ForwardIterator & begin,
712 const ForwardIterator & end,
713 std::unordered_map<dof_id_type, dof_id_type> & index_map)
const 715 LOG_SCOPE (
"find_local_indices()",
"MeshCommunication");
722 const Point bboxinv = invert_bbox(bbox);
729 std::map<Parallel::DofObjectKey, dof_id_type> hilbert_keys;
731 LOG_SCOPE(
"local_hilbert_indices",
"MeshCommunication");
732 for (ForwardIterator it=begin; it!=end; ++it)
735 hilbert_keys.emplace(hi, (*it)->id());
741 for (
auto key_val : hilbert_keys)
742 index_map[key_val.second] = cnt++;
747 template <
typename ForwardIterator>
750 const ForwardIterator & begin,
751 const ForwardIterator & end,
752 std::vector<dof_id_type> & index_map)
const 754 LOG_SCOPE (
"find_global_indices()",
"MeshCommunication");
767 index_map.reserve(n_objects);
769 const Point bboxinv = invert_bbox(bbox);
776 std::vector<Parallel::DofObjectKey>
779 sorted_hilbert_keys.reserve(n_objects);
780 hilbert_keys.reserve(n_objects);
782 LOG_SCOPE(
"compute_hilbert_indices()",
"MeshCommunication");
784 for (ForwardIterator it=begin; it!=end; ++it)
787 hilbert_keys.push_back(hi);
792 sorted_hilbert_keys.push_back(hi);
797 sorted_hilbert_keys.push_back(hi);
803 START_LOG (
"parallel_sort()",
"MeshCommunication");
805 sorted_hilbert_keys);
807 STOP_LOG (
"parallel_sort()",
"MeshCommunication");
808 const std::vector<Parallel::DofObjectKey> & my_bin = sorter.
bin();
811 std::vector<unsigned int> bin_sizes(
communicator.size());
812 communicator.allgather (static_cast<unsigned int>(my_bin.size()), bin_sizes);
815 unsigned int my_offset = 0;
817 my_offset += bin_sizes[pid];
821 std::vector<Parallel::DofObjectKey>
825 upper_bounds[0] = my_bin.back();
833 if (!bin_sizes[p]) upper_bounds[p] = upper_bounds[p-1];
844 std::map<processor_id_type, std::vector<Parallel::DofObjectKey>>
847 std::map<processor_id_type, std::vector<dof_id_type>>
851 std::vector<Parallel::DofObjectKey>::const_iterator hi =
852 hilbert_keys.begin();
854 for (ForwardIterator it = begin; it != end; ++it)
858 std::vector<Parallel::DofObjectKey>::iterator lb =
859 std::lower_bound(upper_bounds.begin(), upper_bounds.end(),
863 cast_int<processor_id_type>
868 requested_ids[pid].push_back(*hi);
873 index_map.push_back(pid);
876 auto gather_functor =
887 std::vector<dof_id_type> & global_ids)
893 const std::size_t keys_size = keys.size();
895 global_ids.reserve(keys_size);
896 for (std::size_t
idx=0;
idx != keys_size;
idx++)
899 libmesh_assert_less_equal (hilbert_indices, upper_bounds[
communicator.rank()]);
902 std::vector<Parallel::DofObjectKey>::const_iterator pos =
903 std::lower_bound (my_bin.begin(), my_bin.end(), hilbert_indices);
910 if (*pos != hilbert_indices)
915 Hilbert::BitVecType input;
916 #ifdef LIBMESH_ENABLE_UNIQUE_ID 917 input = hilbert_indices.first;
919 input = hilbert_indices;
923 std::vector<CBigBitVec> output(3);
926 Hilbert::indexToCoords(output.data(), 8*
sizeof(Hilbert::inttype), 3, input);
932 const Real max_int_as_real =
933 static_cast<Real>(std::numeric_limits<Hilbert::inttype>::max());
938 Point p_hat(static_cast<Real>(output[0].racks()[0]) / max_int_as_real,
939 static_cast<Real>(output[1].racks()[0]) / max_int_as_real,
940 static_cast<Real>(output[2].racks()[0]) / max_int_as_real);
944 xmin = bbox.first(0),
945 xmax = bbox.second(0),
946 ymin = bbox.first(1),
947 ymax = bbox.second(1),
948 zmin = bbox.first(2),
949 zmax = bbox.second(2);
952 Point p(xmin + (xmax-xmin)*p_hat(0),
953 ymin + (ymax-ymin)*p_hat(1),
954 zmin + (zmax-zmin)*p_hat(2));
956 libmesh_error_msg(
"Could not find hilbert indices: " 958 <<
" corresponding to point " << p);
964 global_ids.push_back (cast_int<dof_id_type>(
std::distance(my_bin.begin(), pos) + my_offset));
968 auto action_functor =
971 const std::vector<Parallel::DofObjectKey> &,
972 const std::vector<dof_id_type> & global_ids)
974 filled_request[pid] = global_ids;
978 Parallel::pull_parallel_vector_data
979 (
communicator, requested_ids, gather_functor, action_functor, ex);
984 std::vector<std::vector<dof_id_type>::const_iterator>
985 next_obj_on_proc; next_obj_on_proc.reserve(
communicator.size());
987 next_obj_on_proc.push_back(filled_request[pid].begin());
990 for (ForwardIterator it = begin; it != end; ++it, cnt++)
996 libmesh_assert (next_obj_on_proc[pid] != filled_request[pid].end());
998 const dof_id_type global_index = *next_obj_on_proc[pid];
999 index_map[cnt] = global_index;
1001 ++next_obj_on_proc[pid];
1006 libmesh_assert_equal_to(index_map.size(), n_objects);
1008 #else // LIBMESH_HAVE_LIBHILBERT, LIBMESH_HAVE_MPI 1009 template <
typename ForwardIterator>
1011 const ForwardIterator & begin,
1012 const ForwardIterator & end,
1013 std::unordered_map<dof_id_type, dof_id_type> & index_map)
const 1017 for (ForwardIterator it=begin; it!=end; ++it)
1018 index_map[(*it)->id()] = (index++);
1021 template <
typename ForwardIterator>
1024 const ForwardIterator & begin,
1025 const ForwardIterator & end,
1026 std::vector<dof_id_type> & index_map)
const 1030 unsigned int index = 0;
1031 for (ForwardIterator it=begin; it!=end; ++it)
1032 index_map.push_back(index++);
1034 #endif // LIBMESH_HAVE_LIBHILBERT, LIBMESH_HAVE_MPI 1039 template LIBMESH_EXPORT
void MeshCommunication::find_global_indices<MeshBase::const_node_iterator> (
const Parallel::Communicator &,
1043 std::vector<dof_id_type> &)
const;
1045 template LIBMESH_EXPORT
void MeshCommunication::find_global_indices<MeshBase::const_element_iterator> (
const Parallel::Communicator &,
1049 std::vector<dof_id_type> &)
const;
1050 template LIBMESH_EXPORT
void MeshCommunication::find_global_indices<MeshBase::node_iterator> (
const Parallel::Communicator &,
1054 std::vector<dof_id_type> &)
const;
1056 template LIBMESH_EXPORT
void MeshCommunication::find_global_indices<MeshBase::element_iterator> (
const Parallel::Communicator &,
1060 std::vector<dof_id_type> &)
const;
1061 template LIBMESH_EXPORT
void MeshCommunication::find_local_indices<MeshBase::const_element_iterator> (
const libMesh::BoundingBox &,
1064 std::unordered_map<dof_id_type, dof_id_type> &)
const;
The definition of the element_iterator struct.
void find_global_indices(const Parallel::Communicator &communicator, const libMesh::BoundingBox &, const ForwardIterator &, const ForwardIterator &, std::vector< dof_id_type > &) const
This method determines a globally unique, partition-agnostic index for each object in the input range...
A Node is like a Point, but with more information.
const std::vector< KeyType > & bin()
Return a constant reference to _my_bin.
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
void parallel_for(const Range &range, const Body &body)
Execute the provided function object in parallel on the specified range.
The definition of the const_element_iterator struct.
std::pair< Hilbert::HilbertIndices, unique_id_type > DofObjectKey
void sort()
This is the only method which needs to be called by the user.
std::size_t first_idx() const
Index in the stored vector of the first object.
This is the base class from which all geometric element types are derived.
void assign_global_indices(MeshBase &) const
This method assigns globally unique, partition-agnostic indices to the nodes and elements in the mesh...
unique_id_type unique_id() const
const Parallel::Communicator & comm() const
The StoredRange class defines a contiguous, divisible set of objects.
The libMesh namespace provides an interface to certain functionality in the library.
Real distance(const Point &p)
uint8_t processor_id_type
This is the MeshBase class.
void libmesh_ignore(const Args &...)
void check_for_duplicate_global_indices(MeshBase &) const
Throw an error if we have any index clashes in the numbering used by assign_global_indices.
static const processor_id_type invalid_processor_id
An invalid processor_id to distinguish DoFs that have not been assigned to a processor.
void find_local_indices(const libMesh::BoundingBox &, const ForwardIterator &, const ForwardIterator &, std::unordered_map< dof_id_type, dof_id_type > &) const
This method determines a locally unique, contiguous index for each object in the input range...
Defines a Cartesian bounding box by the two corner extremum.
The definition of the node_iterator struct.
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
The parallel sorting method is templated on the type of data which is to be sorted.
DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator
The definition of the const_node_iterator struct.
IntRange< T > make_range(T beg, T end)
The 2-parameter make_range() helper function returns an IntRange<T> when both input parameters are of...
virtual dof_id_type n_elem() const =0
A Point defines a location in LIBMESH_DIM dimensional Real space.
virtual dof_id_type n_nodes() const =0
Point vertex_average() const