libMesh
partitioner.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2019 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // Local includes
21 #include "libmesh/partitioner.h"
22 
23 // libMesh includes
24 #include "libmesh/elem.h"
25 #include "libmesh/int_range.h"
26 #include "libmesh/libmesh_logging.h"
27 #include "libmesh/mesh_base.h"
28 #include "libmesh/mesh_tools.h"
29 #include "libmesh/mesh_communication.h"
30 #include "libmesh/parallel_ghost_sync.h"
31 
32 // TIMPI includes
34 #include "timpi/parallel_sync.h"
35 
36 // C/C++ includes
37 #ifdef LIBMESH_HAVE_PETSC
38 #include "libmesh/ignore_warnings.h"
39 #include "petscmat.h"
40 #include "libmesh/restore_warnings.h"
41 #endif
42 
43 namespace libMesh
44 {
45 
46 
47 
48 // ------------------------------------------------------------
49 // Partitioner static data
51  dof_id_type(1000000);
52 
53 
54 
55 // ------------------------------------------------------------
56 // Partitioner implementation
58 {
59  this->partition(mesh,mesh.n_processors());
60 }
61 
62 
63 
65  const unsigned int n)
66 {
67  libmesh_parallel_only(mesh.comm());
68 
69  // BSK - temporary fix while redistribution is integrated 6/26/2008
70  // Uncomment this to not repartition in parallel
71  // if (!mesh.is_serial())
72  // return;
73 
74  // we cannot partition into more pieces than we have
75  // active elements!
76  const unsigned int n_parts =
77  static_cast<unsigned int>
78  (std::min(mesh.n_active_elem(), static_cast<dof_id_type>(n)));
79 
80  // Set the number of partitions in the mesh
81  mesh.set_n_partitions()=n_parts;
82 
83  if (n_parts == 1)
84  {
85  this->single_partition (mesh);
86  return;
87  }
88 
89  // First assign a temporary partitioning to any unpartitioned elements
91 
92  // Call the partitioning function
93  this->_do_partition(mesh,n_parts);
94 
95  // Set the parent's processor ids
97 
98  // Redistribute elements if necessary, before setting node processor
99  // ids, to make sure those will be set consistently
100  mesh.redistribute();
101 
102 #ifdef DEBUG
104 
105  // Messed up elem processor_id()s can leave us without the child
106  // elements we need to restrict vectors on a distributed mesh
107  MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
108 #endif
109 
110  // Set the node's processor ids
112 
113 #ifdef DEBUG
114  MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
115 #endif
116 
117  // Give derived Mesh classes a chance to update any cached data to
118  // reflect the new partitioning
119  mesh.update_post_partitioning();
120 }
121 
122 
123 
125 {
126  this->repartition(mesh,mesh.n_processors());
127 }
128 
129 
130 
132  const unsigned int n)
133 {
134  // we cannot partition into more pieces than we have
135  // active elements!
136  const unsigned int n_parts =
137  static_cast<unsigned int>
138  (std::min(mesh.n_active_elem(), static_cast<dof_id_type>(n)));
139 
140  // Set the number of partitions in the mesh
141  mesh.set_n_partitions()=n_parts;
142 
143  if (n_parts == 1)
144  {
145  this->single_partition (mesh);
146  return;
147  }
148 
149  // First assign a temporary partitioning to any unpartitioned elements
151 
152  // Call the partitioning function
153  this->_do_repartition(mesh,n_parts);
154 
155  // Set the parent's processor ids
157 
158  // Set the node's processor ids
160 }
161 
162 
163 
164 
165 
167 {
169  mesh.elements_end());
170 
171  // Redistribute, in case someone (like our unit tests) is doing
172  // something silly (like moving a whole already-distributed mesh
173  // back onto rank 0).
174  mesh.redistribute();
175 }
176 
177 
178 
181 {
182  LOG_SCOPE("single_partition_range()", "Partitioner");
183 
184  for (auto & elem : as_range(it, end))
185  {
186  elem->processor_id() = 0;
187 
188  // Assign all this element's nodes to processor 0 as well.
189  for (Node & node : elem->node_ref_range())
190  node.processor_id() = 0;
191  }
192 }
193 
195 {
197 }
198 
199 
200 
202  const unsigned int n_subdomains)
203 {
204  MeshBase::element_iterator it = mesh.unpartitioned_elements_begin();
205  const MeshBase::element_iterator end = mesh.unpartitioned_elements_end();
206 
207  const dof_id_type n_unpartitioned_elements = MeshTools::n_elem (it, end);
208 
209  // the unpartitioned elements must exist on all processors. If the range is empty on one
210  // it is empty on all, and we can quit right here.
211  if (!n_unpartitioned_elements)
212  return;
213 
214  // find the target subdomain sizes
215  std::vector<dof_id_type> subdomain_bounds(mesh.n_processors());
216 
217  for (auto pid : IntRange<processor_id_type>(0, mesh.n_processors()))
218  {
219  dof_id_type tgt_subdomain_size = 0;
220 
221  // watch out for the case that n_subdomains < n_processors
222  if (pid < n_subdomains)
223  {
224  tgt_subdomain_size = n_unpartitioned_elements/n_subdomains;
225 
226  if (pid < n_unpartitioned_elements%n_subdomains)
227  tgt_subdomain_size++;
228 
229  }
230 
231  //libMesh::out << "pid, #= " << pid << ", " << tgt_subdomain_size << std::endl;
232  if (pid == 0)
233  subdomain_bounds[0] = tgt_subdomain_size;
234  else
235  subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size;
236  }
237 
238  libmesh_assert_equal_to (subdomain_bounds.back(), n_unpartitioned_elements);
239 
240  // create the unique mapping for all unpartitioned elements independent of partitioning
241  // determine the global indexing for all the unpartitioned elements
242  std::vector<dof_id_type> global_indices;
243 
244  // Calling this on all processors a unique range in [0,n_unpartitioned_elements) is constructed.
245  // Only the indices for the elements we pass in are returned in the array.
248  global_indices);
249 
250  dof_id_type cnt=0;
251  for (auto & elem : as_range(it, end))
252  {
253  libmesh_assert_less (cnt, global_indices.size());
254  const dof_id_type global_index =
255  global_indices[cnt++];
256 
257  libmesh_assert_less (global_index, subdomain_bounds.back());
258  libmesh_assert_less (global_index, n_unpartitioned_elements);
259 
260  const processor_id_type subdomain_id =
261  cast_int<processor_id_type>
262  (std::distance(subdomain_bounds.begin(),
263  std::upper_bound(subdomain_bounds.begin(),
264  subdomain_bounds.end(),
265  global_index)));
266  libmesh_assert_less (subdomain_id, n_subdomains);
267 
268  elem->processor_id() = subdomain_id;
269  //libMesh::out << "assigning " << global_index << " to " << subdomain_id << std::endl;
270  }
271 }
272 
273 
274 
276 {
277  // Ignore the parameter when !LIBMESH_ENABLE_AMR
279 
280  LOG_SCOPE("set_parent_processor_ids()", "Partitioner");
281 
282 #ifdef LIBMESH_ENABLE_AMR
283 
284  // If the mesh is serial we have access to all the elements,
285  // in particular all the active ones. We can therefore set
286  // the parent processor ids indirectly through their children, and
287  // set the subactive processor ids while examining their active
288  // ancestors.
289  // By convention a parent is assigned to the minimum processor
290  // of all its children, and a subactive is assigned to the processor
291  // of its active ancestor.
292  if (mesh.is_serial())
293  {
294  for (auto & elem : mesh.active_element_ptr_range())
295  {
296  // First set descendents
297  std::vector<Elem *> subactive_family;
298  elem->total_family_tree(subactive_family);
299  for (const auto & f : subactive_family)
300  f->processor_id() = elem->processor_id();
301 
302  // Then set ancestors
303  Elem * parent = elem->parent();
304 
305  while (parent)
306  {
307  // invalidate the parent id, otherwise the min below
308  // will not work if the current parent id is less
309  // than all the children!
310  parent->invalidate_processor_id();
311 
312  for (auto & child : parent->child_ref_range())
313  {
314  libmesh_assert(!child.is_remote());
315  libmesh_assert_not_equal_to (child.processor_id(), DofObject::invalid_processor_id);
316  parent->processor_id() = std::min(parent->processor_id(),
317  child.processor_id());
318  }
319  parent = parent->parent();
320  }
321  }
322  }
323 
324  // When the mesh is parallel we cannot guarantee that parents have access to
325  // all their children.
326  else
327  {
328  // Setting subactive processor ids is easy: we can guarantee
329  // that children have access to all their parents.
330 
331  // Loop over all the active elements in the mesh
332  for (auto & child : mesh.active_element_ptr_range())
333  {
334  std::vector<Elem *> subactive_family;
335  child->total_family_tree(subactive_family);
336  for (const auto & f : subactive_family)
337  f->processor_id() = child->processor_id();
338  }
339 
340  // When the mesh is parallel we cannot guarantee that parents have access to
341  // all their children.
342 
343  // We will use a brute-force approach here. Each processor finds its parent
344  // elements and sets the parent pid to the minimum of its
345  // semilocal descendants.
346  // A global reduction is then performed to make sure the true minimum is found.
347  // As noted, this is required because we cannot guarantee that a parent has
348  // access to all its children on any single processor.
349  libmesh_parallel_only(mesh.comm());
350  libmesh_assert(MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
351  mesh.unpartitioned_elements_end()) == 0);
352 
353  const dof_id_type max_elem_id = mesh.max_elem_id();
354 
355  std::vector<processor_id_type>
356  parent_processor_ids (std::min(communication_blocksize,
357  max_elem_id));
358 
359  for (dof_id_type blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++)
360  {
361  last_elem_id =
362  std::min(static_cast<dof_id_type>((blk+1)*communication_blocksize),
363  max_elem_id);
364  const dof_id_type first_elem_id = blk*communication_blocksize;
365 
366  std::fill (parent_processor_ids.begin(),
367  parent_processor_ids.end(),
369 
370  // first build up local contributions to parent_processor_ids
371  bool have_parent_in_block = false;
372 
373  for (auto & parent : as_range(mesh.ancestor_elements_begin(),
374  mesh.ancestor_elements_end()))
375  {
376  const dof_id_type parent_idx = parent->id();
377  libmesh_assert_less (parent_idx, max_elem_id);
378 
379  if ((parent_idx >= first_elem_id) &&
380  (parent_idx < last_elem_id))
381  {
382  have_parent_in_block = true;
384 
385  std::vector<const Elem *> active_family;
386  parent->active_family_tree(active_family);
387  for (const auto & f : active_family)
388  parent_pid = std::min (parent_pid, f->processor_id());
389 
390  const dof_id_type packed_idx = parent_idx - first_elem_id;
391  libmesh_assert_less (packed_idx, parent_processor_ids.size());
392 
393  parent_processor_ids[packed_idx] = parent_pid;
394  }
395  }
396 
397  // then find the global minimum
398  mesh.comm().min (parent_processor_ids);
399 
400  // and assign the ids, if we have a parent in this block.
401  if (have_parent_in_block)
402  for (auto & parent : as_range(mesh.ancestor_elements_begin(),
403  mesh.ancestor_elements_end()))
404  {
405  const dof_id_type parent_idx = parent->id();
406 
407  if ((parent_idx >= first_elem_id) &&
408  (parent_idx < last_elem_id))
409  {
410  const dof_id_type packed_idx = parent_idx - first_elem_id;
411  libmesh_assert_less (packed_idx, parent_processor_ids.size());
412 
413  const processor_id_type parent_pid =
414  parent_processor_ids[packed_idx];
415 
416  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_processor_id);
417 
418  parent->processor_id() = parent_pid;
419  }
420  }
421  }
422  }
423 
424 #endif // LIBMESH_ENABLE_AMR
425 }
426 
427 void
429  std::map<std::pair<processor_id_type, processor_id_type>, std::set<dof_id_type>> & processor_pair_to_nodes)
430 {
431  // This function must be run on all processors at once
432  libmesh_parallel_only(mesh.comm());
433 
434  processor_pair_to_nodes.clear();
435 
436  std::set<dof_id_type> mynodes;
437  std::set<dof_id_type> neighbor_nodes;
438  std::vector<dof_id_type> common_nodes;
439 
440  // Loop over all the active elements
441  for (auto & elem : mesh.active_element_ptr_range())
442  {
443  libmesh_assert(elem);
444 
445  libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);
446 
447  auto n_nodes = elem->n_nodes();
448 
449  // prepare data for this element
450  mynodes.clear();
451  neighbor_nodes.clear();
452  common_nodes.clear();
453 
454  for (unsigned int inode = 0; inode < n_nodes; inode++)
455  mynodes.insert(elem->node_id(inode));
456 
457  for (auto i : elem->side_index_range())
458  {
459  auto neigh = elem->neighbor_ptr(i);
460  if (neigh && !neigh->is_remote() && neigh->processor_id() != elem->processor_id())
461  {
462  neighbor_nodes.clear();
463  common_nodes.clear();
464  auto neigh_n_nodes = neigh->n_nodes();
465  for (unsigned int inode = 0; inode < neigh_n_nodes; inode++)
466  neighbor_nodes.insert(neigh->node_id(inode));
467 
468  std::set_intersection(mynodes.begin(), mynodes.end(),
469  neighbor_nodes.begin(), neighbor_nodes.end(),
470  std::back_inserter(common_nodes));
471 
472  auto & map_set = processor_pair_to_nodes[std::make_pair(std::min(elem->processor_id(), neigh->processor_id()),
473  std::max(elem->processor_id(), neigh->processor_id()))];
474  for (auto global_node_id : common_nodes)
475  map_set.insert(global_node_id);
476  }
477  }
478  }
479 }
480 
482 {
483  // This function must be run on all processors at once
484  libmesh_parallel_only(mesh.comm());
485 
486  std::map<std::pair<processor_id_type, processor_id_type>, std::set<dof_id_type>> processor_pair_to_nodes;
487 
488  processor_pairs_to_interface_nodes(mesh, processor_pair_to_nodes);
489 
490  for (auto & pmap : processor_pair_to_nodes)
491  {
492  std::size_t n_own_nodes = pmap.second.size()/2, i = 0;
493 
494  for (dof_id_type id : pmap.second)
495  {
496  auto & node = mesh.node_ref(id);
497  if (i <= n_own_nodes)
498  node.processor_id() = pmap.first.first;
499  else
500  node.processor_id() = pmap.first.second;
501  i++;
502  }
503  }
504 }
505 
507 {
508  // This function must be run on all processors at once
509  libmesh_parallel_only(mesh.comm());
510 
511  std::map<std::pair<processor_id_type, processor_id_type>, std::set<dof_id_type>> processor_pair_to_nodes;
512 
513  processor_pairs_to_interface_nodes(mesh, processor_pair_to_nodes);
514 
515  std::unordered_map<dof_id_type, std::vector<const Elem *>> nodes_to_elem_map;
516 
517  MeshTools::build_nodes_to_elem_map(mesh, nodes_to_elem_map);
518 
519  std::vector<const Node *> neighbors;
520  std::set<dof_id_type> neighbors_order;
521  std::vector<dof_id_type> common_nodes;
522  std::queue<dof_id_type> nodes_queue;
523  std::set<dof_id_type> visted_nodes;
524 
525  for (auto & pmap : processor_pair_to_nodes)
526  {
527  std::size_t n_own_nodes = pmap.second.size()/2;
528 
529  // Initialize node assignment
530  for (dof_id_type id : pmap.second)
531  mesh.node_ref(id).processor_id() = pmap.first.second;
532 
533  visted_nodes.clear();
534  for (dof_id_type id : pmap.second)
535  {
536  mesh.node_ref(id).processor_id() = pmap.first.second;
537 
538  if (visted_nodes.find(id) != visted_nodes.end())
539  continue;
540  else
541  {
542  nodes_queue.push(id);
543  visted_nodes.insert(id);
544  if (visted_nodes.size() >= n_own_nodes)
545  break;
546  }
547 
548  while (!nodes_queue.empty())
549  {
550  auto & node = mesh.node_ref(nodes_queue.front());
551  nodes_queue.pop();
552 
553  neighbors.clear();
554  MeshTools::find_nodal_neighbors(mesh, node, nodes_to_elem_map, neighbors);
555  neighbors_order.clear();
556  for (auto & neighbor : neighbors)
557  neighbors_order.insert(neighbor->id());
558 
559  common_nodes.clear();
560  std::set_intersection(pmap.second.begin(), pmap.second.end(),
561  neighbors_order.begin(), neighbors_order.end(),
562  std::back_inserter(common_nodes));
563 
564  for (auto c_node : common_nodes)
565  if (visted_nodes.find(c_node) == visted_nodes.end())
566  {
567  nodes_queue.push(c_node);
568  visted_nodes.insert(c_node);
569  if (visted_nodes.size() >= n_own_nodes)
570  goto queue_done;
571  }
572 
573  if (visted_nodes.size() >= n_own_nodes)
574  goto queue_done;
575  }
576  }
577  queue_done:
578  for (auto node : visted_nodes)
579  mesh.node_ref(node).processor_id() = pmap.first.first;
580  }
581 }
582 
584 {
585  libmesh_ignore(mesh); // Only used if LIBMESH_HAVE_PETSC
586 
587  // This function must be run on all processors at once
588  libmesh_parallel_only(mesh.comm());
589 
590 #if LIBMESH_HAVE_PETSC
591  std::map<std::pair<processor_id_type, processor_id_type>, std::set<dof_id_type>> processor_pair_to_nodes;
592 
593  processor_pairs_to_interface_nodes(mesh, processor_pair_to_nodes);
594 
595  std::vector<std::vector<const Elem *>> nodes_to_elem_map;
596 
597  MeshTools::build_nodes_to_elem_map(mesh, nodes_to_elem_map);
598 
599  std::vector<const Node *> neighbors;
600  std::set<dof_id_type> neighbors_order;
601  std::vector<dof_id_type> common_nodes;
602 
603  std::vector<dof_id_type> rows;
604  std::vector<dof_id_type> cols;
605 
606  std::map<dof_id_type, dof_id_type> global_to_local;
607 
608  for (auto & pmap : processor_pair_to_nodes)
609  {
610  unsigned int i = 0;
611 
612  rows.clear();
613  rows.resize(pmap.second.size()+1);
614  cols.clear();
615  for (dof_id_type id : pmap.second)
616  global_to_local[id] = i++;
617 
618  i = 0;
619  for (auto id : pmap.second)
620  {
621  auto & node = mesh.node_ref(id);
622  neighbors.clear();
623  MeshTools::find_nodal_neighbors(mesh, node, nodes_to_elem_map, neighbors);
624  neighbors_order.clear();
625  for (auto & neighbor : neighbors)
626  neighbors_order.insert(neighbor->id());
627 
628  common_nodes.clear();
629  std::set_intersection(pmap.second.begin(), pmap.second.end(),
630  neighbors_order.begin(), neighbors_order.end(),
631  std::back_inserter(common_nodes));
632 
633  rows[i+1] = rows[i] + cast_int<dof_id_type>(common_nodes.size());
634 
635  for (auto c_node : common_nodes)
636  cols.push_back(global_to_local[c_node]);
637 
638  i++;
639  }
640 
641  Mat adj;
642  MatPartitioning part;
643  IS is;
644  PetscInt local_size, rows_size, cols_size;
645  PetscInt *adj_i, *adj_j;
646  const PetscInt *indices;
647  PetscCalloc1(rows.size(), &adj_i);
648  PetscCalloc1(cols.size(), &adj_j);
649  rows_size = cast_int<PetscInt>(rows.size());
650  for (PetscInt ii=0; ii<rows_size; ii++)
651  adj_i[ii] = rows[ii];
652 
653  cols_size = cast_int<PetscInt>(cols.size());
654  for (PetscInt ii=0; ii<cols_size; ii++)
655  adj_j[ii] = cols[ii];
656 
657  const PetscInt sz = cast_int<PetscInt>(pmap.second.size());
658  MatCreateMPIAdj(PETSC_COMM_SELF, sz, sz, adj_i, adj_j,nullptr,&adj);
659  MatPartitioningCreate(PETSC_COMM_SELF,&part);
660  MatPartitioningSetAdjacency(part,adj);
661  MatPartitioningSetNParts(part,2);
662  PetscObjectSetOptionsPrefix((PetscObject)part, "balance_");
663  MatPartitioningSetFromOptions(part);
664  MatPartitioningApply(part,&is);
665 
666  MatDestroy(&adj);
667  MatPartitioningDestroy(&part);
668 
669  ISGetLocalSize(is, &local_size);
670  ISGetIndices(is, &indices);
671  i = 0;
672  for (auto id : pmap.second)
673  {
674  auto & node = mesh.node_ref(id);
675  if (indices[i])
676  node.processor_id() = pmap.first.second;
677  else
678  node.processor_id() = pmap.first.first;
679 
680  i++;
681  }
682  ISRestoreIndices(is, &indices);
683  ISDestroy(&is);
684  }
685 #else
686  libmesh_error_msg("PETSc is required");
687 #endif
688 }
689 
690 
692 {
693  LOG_SCOPE("set_node_processor_ids()","Partitioner");
694 
695  // This function must be run on all processors at once
696  libmesh_parallel_only(mesh.comm());
697 
698  // If we have any unpartitioned elements at this
699  // stage there is a problem
700  libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
701  mesh.unpartitioned_elements_end()) == 0);
702 
703 
704  // const dof_id_type orig_n_local_nodes = mesh.n_local_nodes();
705 
706  // libMesh::err << "[" << mesh.processor_id() << "]: orig_n_local_nodes="
707  // << orig_n_local_nodes << std::endl;
708 
709  // Build up request sets. Each node is currently owned by a processor because
710  // it is connected to an element owned by that processor. However, during the
711  // repartitioning phase that element may have been assigned a new processor id, but
712  // it is still resident on the original processor. We need to know where to look
713  // for new ids before assigning new ids, otherwise we may be asking the wrong processors
714  // for the wrong information.
715  //
716  // The only remaining issue is what to do with unpartitioned nodes. Since they are required
717  // to live on all processors we can simply rely on ourselves to number them properly.
718  std::map<processor_id_type, std::vector<dof_id_type>>
719  requested_node_ids;
720 
721  // Loop over all the nodes, count the ones on each processor. We can skip ourself
722  std::map<processor_id_type, dof_id_type> ghost_nodes_from_proc;
723 
724  for (auto & node : mesh.node_ptr_range())
725  {
726  libmesh_assert(node);
727  const processor_id_type current_pid = node->processor_id();
728  if (current_pid != mesh.processor_id() &&
729  current_pid != DofObject::invalid_processor_id)
730  {
731  libmesh_assert_less (current_pid, mesh.n_processors());
732  ghost_nodes_from_proc[current_pid]++;
733  }
734  }
735 
736  // We know how many objects live on each processor, so reserve()
737  // space for each.
738  for (auto pair : ghost_nodes_from_proc)
739  requested_node_ids[pair.first].reserve(pair.second);
740 
741  // We need to get the new pid for each node from the processor
742  // which *currently* owns the node. We can safely skip ourself
743  for (auto & node : mesh.node_ptr_range())
744  {
745  libmesh_assert(node);
746  const processor_id_type current_pid = node->processor_id();
747  if (current_pid != mesh.processor_id() &&
748  current_pid != DofObject::invalid_processor_id)
749  {
750  libmesh_assert_less (requested_node_ids[current_pid].size(),
751  ghost_nodes_from_proc[current_pid]);
752  requested_node_ids[current_pid].push_back(node->id());
753  }
754 
755  // Unset any previously-set node processor ids
756  node->invalidate_processor_id();
757  }
758 
759  // Loop over all the active elements
760  for (auto & elem : mesh.active_element_ptr_range())
761  {
762  libmesh_assert(elem);
763 
764  libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);
765 
766  // Consider updating the processor id on this element's nodes
767  for (Node & node : elem->node_ref_range())
768  {
769  processor_id_type & pid = node.processor_id();
770  pid = node.choose_processor_id(pid, elem->processor_id());
771  }
772  }
773 
774  bool load_balanced_nodes_linear =
775  libMesh::on_command_line ("--load-balanced-nodes-linear");
776 
777  if (load_balanced_nodes_linear)
779 
780  bool load_balanced_nodes_bfs =
781  libMesh::on_command_line ("--load-balanced-nodes-bfs");
782 
783  if (load_balanced_nodes_bfs)
785 
786  bool load_balanced_nodes_petscpartition =
787  libMesh::on_command_line ("--load_balanced_nodes_petscpartitioner");
788 
789  if (load_balanced_nodes_petscpartition)
791 
792  // And loop over the subactive elements, but don't reassign
793  // nodes that are already active on another processor.
794  for (auto & elem : as_range(mesh.subactive_elements_begin(),
795  mesh.subactive_elements_end()))
796  {
797  libmesh_assert(elem);
798 
799  libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);
800 
801  for (Node & node : elem->node_ref_range())
802  if (node.processor_id() == DofObject::invalid_processor_id)
803  node.processor_id() = elem->processor_id();
804  }
805 
806  // Same for the inactive elements -- we will have already gotten most of these
807  // nodes, *except* for the case of a parent with a subset of children which are
808  // ghost elements. In that case some of the parent nodes will not have been
809  // properly handled yet
810  for (auto & elem : as_range(mesh.not_active_elements_begin(),
811  mesh.not_active_elements_end()))
812  {
813  libmesh_assert(elem);
814 
815  libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);
816 
817  for (Node & node : elem->node_ref_range())
818  if (node.processor_id() == DofObject::invalid_processor_id)
819  node.processor_id() = elem->processor_id();
820  }
821 
822  // We can't assert that all nodes are connected to elements, because
823  // a DistributedMesh with NodeConstraints might have pulled in some
824  // remote nodes solely for evaluating those constraints.
825  // MeshTools::libmesh_assert_connected_nodes(mesh);
826 
827  // For such nodes, we'll do a sanity check later when making sure
828  // that we successfully reset their processor ids to something
829  // valid.
830 
831  auto gather_functor =
832  [& mesh]
833  (processor_id_type, const std::vector<dof_id_type> & ids,
834  std::vector<processor_id_type> & new_pids)
835  {
836  const std::size_t ids_size = ids.size();
837  new_pids.resize(ids_size);
838 
839  // Fill those requests in-place
840  for (std::size_t i=0; i != ids_size; ++i)
841  {
842  Node & node = mesh.node_ref(ids[i]);
843  const processor_id_type new_pid = node.processor_id();
844 
845  // We may have an invalid processor_id() on nodes that have been
846  // "detached" from coarsened-away elements but that have not yet
847  // themselves been removed.
848  // libmesh_assert_not_equal_to (new_pid, DofObject::invalid_processor_id);
849  // libmesh_assert_less (new_pid, mesh.n_partitions()); // this is the correct test --
850  new_pids[i] = new_pid; // the number of partitions may
851  } // not equal the number of processors
852  };
853 
854  auto action_functor =
855  [& mesh]
857  const std::vector<dof_id_type> & ids,
858  const std::vector<processor_id_type> & new_pids)
859  {
860  const std::size_t ids_size = ids.size();
861  // Copy the pid changes we've now been informed of
862  for (std::size_t i=0; i != ids_size; ++i)
863  {
864  Node & node = mesh.node_ref(ids[i]);
865 
866  // this is the correct test -- the number of partitions may
867  // not equal the number of processors
868 
869  // But: we may have an invalid processor_id() on nodes that
870  // have been "detached" from coarsened-away elements but
871  // that have not yet themselves been removed.
872  // libmesh_assert_less (filled_request[i], mesh.n_partitions());
873 
874  node.processor_id(new_pids[i]);
875  }
876  };
877 
878  const processor_id_type * ex = nullptr;
879  Parallel::pull_parallel_vector_data
880  (mesh.comm(), requested_node_ids, gather_functor, action_functor, ex);
881 
882 #ifdef DEBUG
883  MeshTools::libmesh_assert_valid_procids<Node>(mesh);
884  //MeshTools::libmesh_assert_canonical_node_procids(mesh);
885 #endif
886 }
887 
888 
889 
891 {
893 
894  typedef std::unordered_map<dof_id_type, dof_id_type> map_type;
895 
896  SyncLocalIDs(map_type & _id_map) : id_map(_id_map) {}
897 
899 
900  void gather_data (const std::vector<dof_id_type> & ids,
901  std::vector<datum> & local_ids) const
902  {
903  local_ids.resize(ids.size());
904 
905  for (auto i : index_range(ids))
906  local_ids[i] = id_map[ids[i]];
907  }
908 
909  void act_on_data (const std::vector<dof_id_type> & ids,
910  const std::vector<datum> & local_ids)
911  {
912  for (auto i : index_range(local_ids))
913  id_map[ids[i]] = local_ids[i];
914  }
915 };
916 
918 {
919  const dof_id_type n_active_local_elem = mesh.n_active_local_elem();
920 
921  // Find the number of active elements on each processor. We cannot use
922  // mesh.n_active_elem_on_proc(pid) since that only returns the number of
923  // elements assigned to pid which are currently stored on the calling
924  // processor. This will not in general be correct for parallel meshes
925  // when (pid!=mesh.processor_id()).
926  _n_active_elem_on_proc.resize(mesh.n_processors());
927  mesh.comm().allgather(n_active_local_elem, _n_active_elem_on_proc);
928 
929  libMesh::BoundingBox bbox =
931 
932  _global_index_by_pid_map.clear();
933 
934  // create the mapping which is contiguous by processor
936  mesh.active_local_elements_begin(),
937  mesh.active_local_elements_end(),
939 
941 
943  (mesh.comm(), mesh.active_elements_begin(), mesh.active_elements_end(), sync);
944 
945  dof_id_type pid_offset=0;
946  for (auto pid : IntRange<processor_id_type>(0, mesh.n_processors()))
947  {
948  for (const auto & elem : as_range(mesh.active_pid_elements_begin(pid),
949  mesh.active_pid_elements_end(pid)))
950  {
951  libmesh_assert_less (_global_index_by_pid_map[elem->id()], _n_active_elem_on_proc[pid]);
952 
953  _global_index_by_pid_map[elem->id()] += pid_offset;
954  }
955 
956  pid_offset += _n_active_elem_on_proc[pid];
957  }
958 }
959 
961 {
962  LOG_SCOPE("build_graph()", "ParmetisPartitioner");
963 
964  const dof_id_type n_active_local_elem = mesh.n_active_local_elem();
965  // If we have boundary elements in this mesh, we want to account for
966  // the connectivity between them and interior elements. We can find
967  // interior elements from boundary elements, but we need to build up
968  // a lookup map to do the reverse.
969  typedef std::unordered_multimap<const Elem *, const Elem *> map_type;
970  map_type interior_to_boundary_map;
971 
972  for (const auto & elem : mesh.active_element_ptr_range())
973  {
974  // If we don't have an interior_parent then there's nothing to look us
975  // up.
976  if ((elem->dim() >= LIBMESH_DIM) ||
977  !elem->interior_parent())
978  continue;
979 
980  // get all relevant interior elements
981  std::set<const Elem *> neighbor_set;
982  elem->find_interior_neighbors(neighbor_set);
983 
984  for (const auto & neighbor : neighbor_set)
985  interior_to_boundary_map.insert(std::make_pair(neighbor, elem));
986  }
987 
988 #ifdef LIBMESH_ENABLE_AMR
989  std::vector<const Elem *> neighbors_offspring;
990 #endif
991 
992  // This is costly, and we only need to do it if the mesh has
993  // changed since we last partitioned... but the mesh probably has
994  // changed since we last partitioned, and if it hasn't we don't
995  // have a reliable way to be sure of that.
997 
998  dof_id_type first_local_elem = 0;
999  for (auto pid : IntRange<processor_id_type>(0, mesh.processor_id()))
1000  first_local_elem += _n_active_elem_on_proc[pid];
1001 
1002  _dual_graph.clear();
1003  _dual_graph.resize(n_active_local_elem);
1004  _local_id_to_elem.resize(n_active_local_elem);
1005 
1006  for (const auto & elem : mesh.active_local_element_ptr_range())
1007  {
1008  libmesh_assert (_global_index_by_pid_map.count(elem->id()));
1009  const dof_id_type global_index_by_pid =
1010  _global_index_by_pid_map[elem->id()];
1011 
1012  const dof_id_type local_index =
1013  global_index_by_pid - first_local_elem;
1014  libmesh_assert_less (local_index, n_active_local_elem);
1015 
1016  std::vector<dof_id_type> & graph_row = _dual_graph[local_index];
1017 
1018  // Save this off to make it easy to index later
1019  _local_id_to_elem[local_index] = elem;
1020 
1021  // Loop over the element's neighbors. An element
1022  // adjacency corresponds to a face neighbor
1023  for (auto neighbor : elem->neighbor_ptr_range())
1024  {
1025  if (neighbor != nullptr)
1026  {
1027  // If the neighbor is active treat it
1028  // as a connection
1029  if (neighbor->active())
1030  {
1031  libmesh_assert(_global_index_by_pid_map.count(neighbor->id()));
1032  const dof_id_type neighbor_global_index_by_pid =
1033  _global_index_by_pid_map[neighbor->id()];
1034 
1035  graph_row.push_back(neighbor_global_index_by_pid);
1036  }
1037 
1038 #ifdef LIBMESH_ENABLE_AMR
1039 
1040  // Otherwise we need to find all of the
1041  // neighbor's children that are connected to
1042  // us and add them
1043  else
1044  {
1045  // The side of the neighbor to which
1046  // we are connected
1047  const unsigned int ns =
1048  neighbor->which_neighbor_am_i (elem);
1049  libmesh_assert_less (ns, neighbor->n_neighbors());
1050 
1051  // Get all the active children (& grandchildren, etc...)
1052  // of the neighbor
1053 
1054  // FIXME - this is the wrong thing, since we
1055  // should be getting the active family tree on
1056  // our side only. But adding too many graph
1057  // links may cause hanging nodes to tend to be
1058  // on partition interiors, which would reduce
1059  // communication overhead for constraint
1060  // equations, so we'll leave it.
1061 
1062  neighbor->active_family_tree (neighbors_offspring);
1063 
1064  // Get all the neighbor's children that
1065  // live on that side and are thus connected
1066  // to us
1067  for (const auto & child : neighbors_offspring)
1068  {
1069  // This does not assume a level-1 mesh.
1070  // Note that since children have sides numbered
1071  // coincident with the parent then this is a sufficient test.
1072  if (child->neighbor_ptr(ns) == elem)
1073  {
1074  libmesh_assert (child->active());
1075  libmesh_assert (_global_index_by_pid_map.count(child->id()));
1076  const dof_id_type child_global_index_by_pid =
1077  _global_index_by_pid_map[child->id()];
1078 
1079  graph_row.push_back(child_global_index_by_pid);
1080  }
1081  }
1082  }
1083 
1084 #endif /* ifdef LIBMESH_ENABLE_AMR */
1085 
1086 
1087  }
1088  }
1089 
1090  if ((elem->dim() < LIBMESH_DIM) &&
1091  elem->interior_parent())
1092  {
1093  // get all relevant interior elements
1094  std::set<const Elem *> neighbor_set;
1095  elem->find_interior_neighbors(neighbor_set);
1096 
1097  for (const auto & neighbor : neighbor_set)
1098  {
1099  const dof_id_type neighbor_global_index_by_pid =
1100  _global_index_by_pid_map[neighbor->id()];
1101 
1102  graph_row.push_back(neighbor_global_index_by_pid);
1103  }
1104  }
1105 
1106  // Check for any boundary neighbors
1107  for (const auto & pr : as_range(interior_to_boundary_map.equal_range(elem)))
1108  {
1109  const Elem * neighbor = pr.second;
1110 
1111  const dof_id_type neighbor_global_index_by_pid =
1112  _global_index_by_pid_map[neighbor->id()];
1113 
1114  graph_row.push_back(neighbor_global_index_by_pid);
1115  }
1116  }
1117 
1118 }
1119 
1120 void Partitioner::assign_partitioning (const MeshBase & mesh, const std::vector<dof_id_type> & parts)
1121 {
1122  LOG_SCOPE("assign_partitioning()", "ParmetisPartitioner");
1123 
1124  // This function must be run on all processors at once
1125  libmesh_parallel_only(mesh.comm());
1126 
1127  dof_id_type first_local_elem = 0;
1128  for (auto pid : IntRange<processor_id_type>(0, mesh.processor_id()))
1129  first_local_elem += _n_active_elem_on_proc[pid];
1130 
1131 #ifndef NDEBUG
1132  const dof_id_type n_active_local_elem = mesh.n_active_local_elem();
1133 #endif
1134 
1135  std::map<processor_id_type, std::vector<dof_id_type>>
1136  requested_ids;
1137 
1138  // Results to gather from each processor - kept in a map so we
1139  // do only one loop over elements after all receives are done.
1140  std::map<processor_id_type, std::vector<processor_id_type>>
1141  filled_request;
1142 
1143  for (auto & elem : mesh.active_element_ptr_range())
1144  {
1145  // we need to get the index from the owning processor
1146  // (note we cannot assign it now -- we are iterating
1147  // over elements again and this will be bad!)
1148  requested_ids[elem->processor_id()].push_back(elem->id());
1149  }
1150 
1151  auto gather_functor =
1152  [this,
1153  & parts,
1154 #ifndef NDEBUG
1155  & mesh,
1156  n_active_local_elem,
1157 #endif
1158  first_local_elem]
1159  (processor_id_type, const std::vector<dof_id_type> & ids,
1160  std::vector<processor_id_type> & data)
1161  {
1162  const std::size_t ids_size = ids.size();
1163  data.resize(ids.size());
1164 
1165  for (std::size_t i=0; i != ids_size; i++)
1166  {
1167  const dof_id_type requested_elem_index = ids[i];
1168 
1169  libmesh_assert(_global_index_by_pid_map.count(requested_elem_index));
1170 
1171  const dof_id_type global_index_by_pid =
1172  _global_index_by_pid_map[requested_elem_index];
1173 
1174  const dof_id_type local_index =
1175  global_index_by_pid - first_local_elem;
1176 
1177  libmesh_assert_less (local_index, parts.size());
1178  libmesh_assert_less (local_index, n_active_local_elem);
1179 
1180  const processor_id_type elem_procid =
1181  cast_int<processor_id_type>(parts[local_index]);
1182 
1183  libmesh_assert_less (elem_procid, mesh.n_partitions());
1184 
1185  data[i] = elem_procid;
1186  }
1187  };
1188 
1189  auto action_functor =
1190  [&filled_request]
1191  (processor_id_type pid,
1192  const std::vector<dof_id_type> &,
1193  const std::vector<processor_id_type> & new_procids)
1194  {
1195  filled_request[pid] = new_procids;
1196  };
1197 
1198  // Trade requests with other processors
1199  const processor_id_type * ex = nullptr;
1200  Parallel::pull_parallel_vector_data
1201  (mesh.comm(), requested_ids, gather_functor, action_functor, ex);
1202 
1203  // and finally assign the partitioning.
1204  // note we are iterating in exactly the same order
1205  // used to build up the request, so we can expect the
1206  // required entries to be in the proper sequence.
1207  std::vector<unsigned int> counters(mesh.n_processors(), 0);
1208  for (auto & elem : mesh.active_element_ptr_range())
1209  {
1210  const processor_id_type current_pid = elem->processor_id();
1211 
1212  libmesh_assert_less (counters[current_pid], requested_ids[current_pid].size());
1213 
1214  const processor_id_type elem_procid =
1215  filled_request[current_pid][counters[current_pid]++];
1216 
1217  libmesh_assert_less (elem_procid, mesh.n_partitions());
1218  elem->processor_id() = elem_procid;
1219  }
1220 }
1221 
1222 
1223 } // namespace libMesh
libMesh::Partitioner::single_partition_range
void single_partition_range(MeshBase::element_iterator it, MeshBase::element_iterator end)
Slightly generalized version of single_partition which acts on a range of elements defined by the pai...
Definition: partitioner.C:179
libMesh::dof_id_type
uint8_t dof_id_type
Definition: id_types.h:67
libMesh::MeshBase::element_iterator
The definition of the element_iterator struct.
Definition: mesh_base.h:1873
libMesh::Elem::total_family_tree
void total_family_tree(std::vector< const Elem * > &family, bool reset=true) const
Same as the family_tree() member, but also adds any subactive descendants.
Definition: elem.C:1457
libMesh::MeshTools::n_elem
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:705
libMesh::Elem::child_ref_range
SimpleRange< ChildRefIter > child_ref_range()
Returns a range with all children of a parent element, usable in range-based for loops.
Definition: elem.h:1839
libMesh::SyncLocalIDs::act_on_data
void act_on_data(const std::vector< dof_id_type > &ids, const std::vector< datum > &local_ids)
Definition: partitioner.C:909
libMesh::Partitioner::partition_unpartitioned_elements
static void partition_unpartitioned_elements(MeshBase &mesh)
These functions assign processor IDs to newly-created elements (in parallel) which are currently assi...
Definition: partitioner.C:194
libMesh::Partitioner::_do_partition
virtual void _do_partition(MeshBase &mesh, const unsigned int n)=0
This is the actual partitioning method which must be overridden in derived classes.
libMesh::Partitioner::_find_global_index_by_pid_map
virtual void _find_global_index_by_pid_map(const MeshBase &mesh)
Construct contiguous global indices for the current partitioning.
Definition: partitioner.C:917
libMesh::BoundingBox
Defines a Cartesian bounding box by the two corner extremum.
Definition: bounding_box.h:40
libMesh::index_range
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:106
libMesh
The libMesh namespace provides an interface to certain functionality in the library.
Definition: factoryfunction.C:55
libMesh::Partitioner::set_parent_processor_ids
static void set_parent_processor_ids(MeshBase &mesh)
This function is called after partitioning to set the processor IDs for the inactive parent elements.
Definition: partitioner.C:275
libMesh::MeshCommunication::find_local_indices
void find_local_indices(const libMesh::BoundingBox &, const ForwardIterator &, const ForwardIterator &, std::unordered_map< dof_id_type, dof_id_type > &) const
This method determines a locally unique, contiguous index for each object in the input range.
Definition: mesh_communication_global_indices.C:674
libMesh::SyncLocalIDs::map_type
std::unordered_map< dof_id_type, dof_id_type > map_type
Definition: partitioner.C:894
libMesh::SyncLocalIDs::SyncLocalIDs
SyncLocalIDs(map_type &_id_map)
Definition: partitioner.C:896
end
IterBase * end
Also have a polymorphic pointer to the end object, this prevents iterating past the end.
Definition: variant_filter_iterator.h:343
libMesh::Partitioner::_n_active_elem_on_proc
std::vector< dof_id_type > _n_active_elem_on_proc
The number of active elements on each processor.
Definition: partitioner.h:281
libMesh::Partitioner::communication_blocksize
static const dof_id_type communication_blocksize
The blocksize to use when doing blocked parallel communication.
Definition: partitioner.h:244
mesh
MeshBase & mesh
Definition: mesh_communication.C:1257
libMesh::MeshTools::build_nodes_to_elem_map
void build_nodes_to_elem_map(const MeshBase &mesh, std::vector< std::vector< dof_id_type >> &nodes_to_elem_map)
After calling this function the input vector nodes_to_elem_map will contain the node to element conne...
Definition: mesh_tools.C:248
libMesh::SyncLocalIDs::datum
dof_id_type datum
Definition: partitioner.C:892
libMesh::DofObject::processor_id
processor_id_type processor_id() const
Definition: dof_object.h:829
libMesh::MeshBase::elements_begin
virtual element_iterator elements_begin()=0
Iterate over all the elements in the Mesh.
libMesh::Partitioner::partition
virtual void partition(MeshBase &mesh, const unsigned int n)
Partitions the MeshBase into n parts by setting processor_id() on Nodes and Elems.
Definition: partitioner.C:64
parallel_sync.h
libMesh::MeshCommunication::find_global_indices
void find_global_indices(const Parallel::Communicator &communicator, const libMesh::BoundingBox &, const ForwardIterator &, const ForwardIterator &, std::vector< dof_id_type > &) const
This method determines a globally unique, partition-agnostic index for each object in the input range...
Definition: mesh_communication_global_indices.C:710
libMesh::libmesh_assert
libmesh_assert(ctx)
libMesh::IntRange
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
libMesh::Partitioner::_global_index_by_pid_map
std::unordered_map< dof_id_type, dof_id_type > _global_index_by_pid_map
Maps active element ids into a contiguous range, as needed by parallel partitioner.
Definition: partitioner.h:272
libMesh::MeshTools::create_bounding_box
libMesh::BoundingBox create_bounding_box(const MeshBase &mesh)
The same functionality as the deprecated MeshTools::bounding_box().
Definition: mesh_tools.C:389
libMesh::MeshBase
This is the MeshBase class.
Definition: mesh_base.h:78
libMesh::Partitioner::set_interface_node_processor_ids_petscpartitioner
static void set_interface_node_processor_ids_petscpartitioner(MeshBase &mesh)
Nodes on the partitioning interface is partitioned into two groups using a PETSc partitioner for each...
Definition: partitioner.C:583
libMesh::Partitioner::_dual_graph
std::vector< std::vector< dof_id_type > > _dual_graph
A dual graph corresponds to the mesh, and it is typically used in paritioner.
Definition: partitioner.h:288
libMesh::libmesh_ignore
void libmesh_ignore(const Args &...)
Definition: libmesh_common.h:526
parallel_implementation.h
libMesh::SyncLocalIDs::gather_data
void gather_data(const std::vector< dof_id_type > &ids, std::vector< datum > &local_ids) const
Definition: partitioner.C:900
libMesh::processor_id_type
uint8_t processor_id_type
Definition: id_types.h:104
libMesh::SyncLocalIDs::id_map
map_type & id_map
Definition: partitioner.C:898
libMesh::Node
A Node is like a Point, but with more information.
Definition: node.h:52
libMesh::Partitioner::set_interface_node_processor_ids_BFS
static void set_interface_node_processor_ids_BFS(MeshBase &mesh)
Nodes on the partitioning interface is clustered into two groups BFS (Breadth First Search)scheme for...
Definition: partitioner.C:506
libMesh::as_range
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
n_nodes
const dof_id_type n_nodes
Definition: tecplot_io.C:68
libMesh::MeshTools::find_nodal_neighbors
void find_nodal_neighbors(const MeshBase &mesh, const Node &n, const std::vector< std::vector< const Elem * >> &nodes_to_elem_map, std::vector< const Node * > &neighbors)
Given a mesh and a node in the mesh, the vector will be filled with every node directly attached to t...
Definition: mesh_tools.C:743
libMesh::Partitioner::single_partition
void single_partition(MeshBase &mesh)
Trivially "partitions" the mesh for one processor.
Definition: partitioner.C:166
libMesh::is
PetscErrorCode PetscInt const PetscInt IS * is
Definition: petsc_dm_wrapper.C:60
libMesh::Elem::parent
const Elem * parent() const
Definition: elem.h:2434
distance
Real distance(const Point &p)
Definition: subdomains_ex3.C:50
libMesh::Parallel::sync_dofobject_data_by_id
void sync_dofobject_data_by_id(const Communicator &comm, const Iterator &range_begin, const Iterator &range_end, SyncFunctor &sync)
Request data about a range of ghost dofobjects uniquely identified by their id.
Definition: parallel_ghost_sync.h:338
libMesh::Partitioner::set_node_processor_ids
static void set_node_processor_ids(MeshBase &mesh)
This function is called after partitioning to set the processor IDs for the nodes.
Definition: partitioner.C:691
libMesh::DofObject::id
dof_id_type id() const
Definition: dof_object.h:767
libMesh::MeshTools::libmesh_assert_valid_remote_elems
void libmesh_assert_valid_remote_elems(const MeshBase &mesh)
A function for verifying that active local elements' neighbors are never remote elements.
Definition: mesh_tools.C:1242
libMesh::Partitioner::build_graph
virtual void build_graph(const MeshBase &mesh)
Build a dual graph for partitioner.
Definition: partitioner.C:960
libMesh::SyncLocalIDs
Definition: partitioner.C:890
libMesh::DofObject::invalid_processor_id
static const processor_id_type invalid_processor_id
An invalid processor_id to distinguish DoFs that have not been assigned to a processor.
Definition: dof_object.h:432
libMesh::Partitioner::assign_partitioning
void assign_partitioning(const MeshBase &mesh, const std::vector< dof_id_type > &parts)
Assign the computed partitioning to the mesh.
Definition: partitioner.C:1120
libMesh::Elem
This is the base class from which all geometric element types are derived.
Definition: elem.h:100
libMesh::Partitioner::set_interface_node_processor_ids_linear
static void set_interface_node_processor_ids_linear(MeshBase &mesh)
Nodes on the partitioning interface is linearly assigned to each pair of processors.
Definition: partitioner.C:481
libMesh::on_command_line
bool on_command_line(std::string arg)
Definition: libmesh.C:898
data
IterBase * data
Ideally this private member data should have protected access.
Definition: variant_filter_iterator.h:337
libMesh::DofObject::invalidate_processor_id
void invalidate_processor_id()
Sets the processor id to invalid_processor_id.
Definition: dof_object.h:719
libMesh::MeshCommunication
This is the MeshCommunication class.
Definition: mesh_communication.h:50
libMesh::Partitioner::repartition
void repartition(MeshBase &mesh, const unsigned int n)
Repartitions the MeshBase into n parts.
Definition: partitioner.C:131
libMesh::Partitioner::_do_repartition
virtual void _do_repartition(MeshBase &mesh, const unsigned int n)
This is the actual re-partitioning method which can be overridden in derived classes.
Definition: partitioner.h:237
libMesh::Partitioner::processor_pairs_to_interface_nodes
static void processor_pairs_to_interface_nodes(MeshBase &mesh, std::map< std::pair< processor_id_type, processor_id_type >, std::set< dof_id_type >> &processor_pair_to_nodes)
On the partitioning interface, a surface is shared by two and only two processors.
Definition: partitioner.C:428
libMesh::Partitioner::_local_id_to_elem
std::vector< Elem * > _local_id_to_elem
Definition: partitioner.h:291