Line data Source code
1 : // The libMesh Finite Element Library.
2 : // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 :
4 : // This library is free software; you can redistribute it and/or
5 : // modify it under the terms of the GNU Lesser General Public
6 : // License as published by the Free Software Foundation; either
7 : // version 2.1 of the License, or (at your option) any later version.
8 :
9 : // This library is distributed in the hope that it will be useful,
10 : // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 : // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 : // Lesser General Public License for more details.
13 :
14 : // You should have received a copy of the GNU Lesser General Public
15 : // License along with this library; if not, write to the Free Software
16 : // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 :
18 :
19 :
20 : // Local includes
21 : #include "libmesh/distributed_mesh.h"
22 :
23 : // libMesh includes
24 : #include "libmesh/boundary_info.h"
25 : #include "libmesh/elem.h"
26 : #include "libmesh/libmesh_logging.h"
27 : #include "libmesh/mesh_communication.h"
28 : #include "libmesh/mesh_tools.h"
29 : #include "libmesh/partitioner.h"
30 : #include "libmesh/string_to_enum.h"
31 :
32 : // TIMPI includes
33 : #include "timpi/parallel_implementation.h"
34 : #include "timpi/parallel_sync.h"
35 :
36 :
37 : namespace libMesh
38 : {
39 :
40 : // ------------------------------------------------------------
41 : // DistributedMesh class member functions
42 253290 : DistributedMesh::DistributedMesh (const Parallel::Communicator & comm_in,
43 253290 : unsigned char d) :
44 252834 : UnstructuredMesh (comm_in,d), _is_serial(true),
45 252834 : _is_serial_on_proc_0(true),
46 252834 : _deleted_coarse_elements(false),
47 252834 : _n_nodes(0), _n_elem(0), _max_node_id(0), _max_elem_id(0),
48 253518 : _next_free_local_node_id(this->processor_id()),
49 253290 : _next_free_local_elem_id(this->processor_id()),
50 253290 : _next_free_unpartitioned_node_id(this->n_processors()),
51 253290 : _next_free_unpartitioned_elem_id(this->n_processors())
52 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
53 253290 : , _next_unpartitioned_unique_id(this->n_processors())
54 : #endif
55 : {
56 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
57 253290 : _next_unique_id = this->processor_id();
58 : #endif
59 :
60 253518 : const std::string default_partitioner = "parmetis";
61 : const std::string my_partitioner =
62 : libMesh::command_line_value("--default-partitioner",
63 506580 : default_partitioner);
64 : _partitioner = Partitioner::build
65 506352 : (Utility::string_to_enum<PartitionerType>(my_partitioner));
66 253290 : }
67 :
68 270 : DistributedMesh & DistributedMesh::operator= (DistributedMesh && other_mesh)
69 : {
70 4 : LOG_SCOPE("operator=(&&)", "DistributedMesh");
71 :
72 : // Move assign as an UnstructuredMesh.
73 4 : this->UnstructuredMesh::operator=(std::move(other_mesh));
74 :
75 : // Nodes and elements belong to DistributedMesh and have to be
76 : // moved before we can move arbitrary GhostingFunctor, Partitioner,
77 : // etc. subclasses.
78 270 : this->move_nodes_and_elements(std::move(other_mesh));
79 :
80 : // But move_nodes_and_elems misses (or guesses about) some of our
81 : // subclass values, and we want more precision than a guess.
82 270 : _deleted_coarse_elements = other_mesh._deleted_coarse_elements;
83 4 : _extra_ghost_elems = std::move(other_mesh._extra_ghost_elems);
84 :
85 : // Handle remaining MeshBase moves.
86 270 : this->post_dofobject_moves(std::move(other_mesh));
87 :
88 274 : return *this;
89 : }
90 :
91 270 : MeshBase & DistributedMesh::assign(MeshBase && other_mesh)
92 : {
93 270 : *this = std::move(cast_ref<DistributedMesh&>(other_mesh));
94 :
95 270 : return *this;
96 : }
97 :
98 4836 : bool DistributedMesh::subclass_locally_equals(const MeshBase & other_mesh_base) const
99 : {
100 16 : const DistributedMesh * dist_mesh_ptr =
101 4836 : dynamic_cast<const DistributedMesh *>(&other_mesh_base);
102 4836 : if (!dist_mesh_ptr)
103 0 : return false;
104 16 : const DistributedMesh & other_mesh = *dist_mesh_ptr;
105 :
106 48 : if (_is_serial != other_mesh._is_serial ||
107 4824 : _is_serial_on_proc_0 != other_mesh._is_serial_on_proc_0 ||
108 4836 : _deleted_coarse_elements != other_mesh._deleted_coarse_elements ||
109 4824 : _n_nodes != other_mesh._n_nodes ||
110 4836 : _n_elem != other_mesh._n_elem ||
111 4836 : _max_node_id != other_mesh._max_node_id ||
112 14508 : _max_elem_id != other_mesh._max_elem_id ||
113 : // We expect these things to change in a prepare_for_use();
114 : // they're conceptually "mutable"...
115 : /*
116 : _next_free_local_node_id != other_mesh._next_free_local_node_id ||
117 : _next_free_local_elem_id != other_mesh._next_free_local_elem_id ||
118 : _next_free_unpartitioned_node_id != other_mesh._next_free_unpartitioned_node_id ||
119 : _next_free_unpartitioned_elem_id != other_mesh._next_free_unpartitioned_elem_id ||
120 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
121 : _next_unpartitioned_unique_id != other_mesh._next_unpartitioned_unique_id ||
122 : #endif
123 : */
124 4836 : !this->nodes_and_elements_equal(other_mesh))
125 63 : return false;
126 :
127 4787 : if (_extra_ghost_elems.size() !=
128 14 : other_mesh._extra_ghost_elems.size())
129 0 : return false;
130 4773 : for (auto & elem : _extra_ghost_elems)
131 : {
132 0 : libmesh_assert(this->query_elem_ptr(elem->id()) == elem);
133 0 : const Elem * other_elem = other_mesh.query_elem_ptr(elem->id());
134 0 : if (!other_elem ||
135 0 : !other_mesh._extra_ghost_elems.count(const_cast<Elem *>(other_elem)))
136 0 : return false;
137 : }
138 :
139 14 : return true;
140 : }
141 :
142 289719 : DistributedMesh::~DistributedMesh ()
143 : {
144 271272 : this->DistributedMesh::clear(); // Free nodes and elements
145 289719 : }
146 :
147 :
148 : // This might be specialized later, but right now it's just here to
149 : // make sure the compiler doesn't give us a default (non-deep) copy
150 : // constructor instead.
151 17059 : DistributedMesh::DistributedMesh (const DistributedMesh & other_mesh) :
152 17059 : DistributedMesh(static_cast<const MeshBase &>(other_mesh))
153 : {
154 17059 : _is_serial = other_mesh._is_serial;
155 17059 : _is_serial_on_proc_0 = other_mesh._is_serial_on_proc_0;
156 17059 : _deleted_coarse_elements = other_mesh._deleted_coarse_elements;
157 :
158 17059 : _n_nodes = other_mesh.n_nodes();
159 17059 : _n_elem = other_mesh.n_elem();
160 17059 : _max_node_id = other_mesh.max_node_id();
161 17059 : _max_elem_id = other_mesh.max_elem_id();
162 17059 : _next_free_local_node_id =
163 17059 : other_mesh._next_free_local_node_id;
164 17059 : _next_free_local_elem_id =
165 17059 : other_mesh._next_free_local_elem_id;
166 17059 : _next_free_unpartitioned_node_id =
167 17059 : other_mesh._next_free_unpartitioned_node_id;
168 17059 : _next_free_unpartitioned_elem_id =
169 17059 : other_mesh._next_free_unpartitioned_elem_id;
170 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
171 17059 : _next_unique_id =
172 17059 : other_mesh._next_unique_id;
173 17059 : _next_unpartitioned_unique_id =
174 17059 : other_mesh._next_unpartitioned_unique_id;
175 : #endif
176 :
177 : // Need to copy extra_ghost_elems
178 17059 : for (auto & elem : other_mesh._extra_ghost_elems)
179 0 : _extra_ghost_elems.insert(this->elem_ptr(elem->id()));
180 17059 : }
181 :
182 :
183 :
184 17982 : DistributedMesh::DistributedMesh (const MeshBase & other_mesh) :
185 17982 : UnstructuredMesh (other_mesh), _is_serial(other_mesh.is_serial()),
186 18018 : _is_serial_on_proc_0(other_mesh.is_serial()),
187 17910 : _deleted_coarse_elements(true), // better safe than sorry...
188 17910 : _n_nodes(0), _n_elem(0), _max_node_id(0), _max_elem_id(0),
189 18018 : _next_free_local_node_id(this->processor_id()),
190 17982 : _next_free_local_elem_id(this->processor_id()),
191 17982 : _next_free_unpartitioned_node_id(this->n_processors()),
192 35928 : _next_free_unpartitioned_elem_id(this->n_processors())
193 : {
194 17982 : this->copy_nodes_and_elements(other_mesh, true);
195 :
196 72 : this->allow_find_neighbors(other_mesh.allow_find_neighbors());
197 72 : this->allow_renumbering(other_mesh.allow_renumbering());
198 72 : this->allow_remote_element_removal(other_mesh.allow_remote_element_removal());
199 72 : this->skip_partitioning(other_mesh.skip_partitioning());
200 :
201 : // The prepare_for_use() in copy_nodes_and_elements() is going to be
202 : // tricky to remove without breaking backwards compatibility, but it
203 : // updates some things we want to just copy.
204 17982 : this->copy_cached_data(other_mesh);
205 :
206 17982 : this->copy_constraint_rows(other_mesh);
207 :
208 17982 : this->_is_prepared = other_mesh.is_prepared();
209 :
210 36 : auto & this_boundary_info = this->get_boundary_info();
211 36 : const auto & other_boundary_info = other_mesh.get_boundary_info();
212 :
213 17982 : this_boundary_info = other_boundary_info;
214 :
215 36 : this->set_subdomain_name_map() = other_mesh.get_subdomain_name_map();
216 :
217 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
218 17982 : _next_unique_id = other_mesh.parallel_max_unique_id() +
219 18018 : this->processor_id();
220 18018 : _next_unpartitioned_unique_id = _next_unique_id +
221 17982 : (this->n_processors() - this->processor_id());
222 : #endif
223 17982 : this->update_parallel_id_counts();
224 17982 : }
225 :
226 270 : void DistributedMesh::move_nodes_and_elements(MeshBase && other_meshbase)
227 : {
228 4 : DistributedMesh & other_mesh = cast_ref<DistributedMesh&>(other_meshbase);
229 :
230 4 : this->_nodes = std::move(other_mesh._nodes);
231 270 : this->_n_nodes = other_mesh.n_nodes();
232 :
233 4 : this->_elements = std::move(other_mesh._elements);
234 270 : this->_n_elem = other_mesh.n_elem();
235 :
236 270 : _is_serial = other_mesh.is_serial();
237 270 : _is_serial_on_proc_0 = other_mesh.is_serial_on_zero();
238 270 : _deleted_coarse_elements = true; // Better safe than sorry
239 :
240 270 : _max_node_id = other_mesh.max_node_id();
241 270 : _max_elem_id = other_mesh.max_elem_id();
242 :
243 270 : _next_free_local_node_id = other_mesh._next_free_local_node_id;
244 270 : _next_free_local_elem_id = other_mesh._next_free_local_elem_id;
245 270 : _next_free_unpartitioned_node_id = other_mesh._next_free_unpartitioned_node_id;
246 270 : _next_free_unpartitioned_elem_id = other_mesh._next_free_unpartitioned_elem_id;
247 :
248 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
249 270 : _next_unpartitioned_unique_id = other_mesh._next_unpartitioned_unique_id;
250 : #endif
251 270 : }
252 :
253 : // We use cached values for these so they can be called
254 : // from one processor without bothering the rest, but
255 : // we may need to update those caches before doing a full
256 : // renumbering
257 1586471 : void DistributedMesh::update_parallel_id_counts()
258 : {
259 : // This function must be run on all processors at once
260 1602 : parallel_object_only();
261 :
262 1586471 : _n_elem = this->parallel_n_elem();
263 1586471 : _n_nodes = this->parallel_n_nodes();
264 1586471 : _max_node_id = this->parallel_max_node_id();
265 1586471 : _max_elem_id = this->parallel_max_elem_id();
266 :
267 1586471 : if (_next_free_unpartitioned_elem_id < _max_elem_id)
268 20125 : _next_free_unpartitioned_elem_id =
269 20125 : ((_max_elem_id-1) / (this->n_processors() + 1) + 1) *
270 20125 : (this->n_processors() + 1) + this->n_processors();
271 1586471 : if (_next_free_local_elem_id < _max_elem_id)
272 19864 : _next_free_local_elem_id =
273 19886 : ((_max_elem_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
274 19864 : (this->n_processors() + 1) + this->processor_id();
275 :
276 1586471 : if (_next_free_unpartitioned_node_id < _max_node_id)
277 26286 : _next_free_unpartitioned_node_id =
278 26286 : ((_max_node_id-1) / (this->n_processors() + 1) + 1) *
279 26286 : (this->n_processors() + 1) + this->n_processors();
280 1586471 : if (_next_free_local_node_id < _max_node_id)
281 26887 : _next_free_local_node_id =
282 26912 : ((_max_node_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
283 26887 : (this->n_processors() + 1) + this->processor_id();
284 :
285 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
286 1586471 : _next_unique_id = this->parallel_max_unique_id();
287 1586471 : _next_unpartitioned_unique_id =
288 1588073 : ((_next_unique_id-1) / (this->n_processors() + 1) + 1) *
289 1586471 : (this->n_processors() + 1) + this->n_processors();
290 1586471 : _next_unique_id =
291 1586471 : ((_next_unique_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
292 1586471 : (this->n_processors() + 1) + this->processor_id();
293 : #endif
294 1586471 : }
295 :
296 :
297 : // Or in debug mode we may want to test the uncached values without
298 : // changing the cache
299 1594162 : dof_id_type DistributedMesh::parallel_n_elem() const
300 : {
301 : // This function must be run on all processors at once
302 2416 : parallel_object_only();
303 :
304 1594162 : dof_id_type n_local = this->n_local_elem();
305 1594162 : this->comm().sum(n_local);
306 1594162 : n_local += this->n_unpartitioned_elem();
307 1594162 : return n_local;
308 : }
309 :
310 :
311 :
312 1593183 : dof_id_type DistributedMesh::parallel_max_elem_id() const
313 : {
314 : // This function must be run on all processors at once
315 8314 : parallel_object_only();
316 :
317 1593183 : dof_id_type max_local = 0;
318 :
319 : dofobject_container<Elem>::const_reverse_veclike_iterator
320 8314 : rit = _elements.rbegin();
321 :
322 : const dofobject_container<Elem>::const_reverse_veclike_iterator
323 8314 : rend = _elements.rend();
324 :
325 : // Look for the maximum element id. Search backwards through
326 : // elements so we can break out early. Beware of nullptr entries that
327 : // haven't yet been cleared from _elements.
328 10799332 : for (; rit != rend; ++rit)
329 : {
330 10420739 : const DofObject *d = *rit;
331 17662 : if (d)
332 : {
333 8082 : libmesh_assert(_elements[d->id()] == d);
334 1214590 : max_local = d->id() + 1;
335 1214590 : break;
336 : }
337 : }
338 :
339 1593183 : this->comm().max(max_local);
340 1593183 : return max_local;
341 : }
342 :
343 :
344 :
345 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
346 1664011 : unique_id_type DistributedMesh::parallel_max_unique_id() const
347 : {
348 : // This function must be run on all processors at once
349 1704 : parallel_object_only();
350 :
351 3328022 : unique_id_type max_local = std::max(_next_unique_id,
352 1951512 : _next_unpartitioned_unique_id);
353 1664011 : this->comm().max(max_local);
354 1664011 : return max_local;
355 : }
356 :
357 :
358 :
359 58106 : void DistributedMesh::set_next_unique_id(unique_id_type id)
360 : {
361 90 : _next_unique_id = id;
362 58106 : _next_unpartitioned_unique_id =
363 58196 : ((_next_unique_id-1) / (this->n_processors() + 1) + 1) *
364 58106 : (this->n_processors() + 1) + this->n_processors();
365 58106 : _next_unique_id =
366 58106 : ((_next_unique_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
367 58106 : (this->n_processors() + 1) + this->processor_id();
368 58106 : }
369 : #endif
370 :
371 :
372 :
373 1594162 : dof_id_type DistributedMesh::parallel_n_nodes() const
374 : {
375 : // This function must be run on all processors at once
376 2416 : parallel_object_only();
377 :
378 1594162 : dof_id_type n_local = this->n_local_nodes();
379 1594162 : this->comm().sum(n_local);
380 1594162 : n_local += this->n_unpartitioned_nodes();
381 1594162 : return n_local;
382 : }
383 :
384 :
385 :
386 1590865 : dof_id_type DistributedMesh::parallel_max_node_id() const
387 : {
388 : // This function must be run on all processors at once
389 5996 : parallel_object_only();
390 :
391 1590865 : dof_id_type max_local = 0;
392 :
393 : dofobject_container<Node>::const_reverse_veclike_iterator
394 5996 : rit = _nodes.rbegin();
395 :
396 : const dofobject_container<Node>::const_reverse_veclike_iterator
397 5996 : rend = _nodes.rend();
398 :
399 : // Look for the maximum node id. Search backwards through
400 : // nodes so we can break out early. Beware of nullptr entries that
401 : // haven't yet been cleared from _nodes
402 36698976 : for (; rit != rend; ++rit)
403 : {
404 36320459 : const DofObject *d = *rit;
405 60714 : if (d)
406 : {
407 5840 : libmesh_assert(_nodes[d->id()] == d);
408 1212348 : max_local = d->id() + 1;
409 1212348 : break;
410 : }
411 : }
412 :
413 1590865 : this->comm().max(max_local);
414 1590865 : return max_local;
415 : }
416 :
417 :
418 :
419 39540572 : const Point & DistributedMesh::point (const dof_id_type i) const
420 : {
421 39540572 : return this->node_ref(i);
422 : }
423 :
424 :
425 :
426 40674075 : const Node * DistributedMesh::node_ptr (const dof_id_type i) const
427 : {
428 159127 : libmesh_assert(_nodes[i]);
429 159127 : libmesh_assert_equal_to (_nodes[i]->id(), i);
430 :
431 40674075 : return _nodes[i];
432 : }
433 :
434 :
435 :
436 :
437 695164297 : Node * DistributedMesh::node_ptr (const dof_id_type i)
438 : {
439 526604 : libmesh_assert(_nodes[i]);
440 526604 : libmesh_assert_equal_to (_nodes[i]->id(), i);
441 :
442 695164297 : return _nodes[i];
443 : }
444 :
445 :
446 :
447 :
448 1597560 : const Node * DistributedMesh::query_node_ptr (const dof_id_type i) const
449 : {
450 1597560 : if (const auto it = _nodes.find(i);
451 645217 : it != _nodes.end())
452 : {
453 1318785 : const Node * n = *it;
454 366442 : libmesh_assert (!n || n->id() == i);
455 366442 : return n;
456 : }
457 :
458 278775 : return nullptr;
459 : }
460 :
461 :
462 :
463 :
464 336293818 : Node * DistributedMesh::query_node_ptr (const dof_id_type i)
465 : {
466 336293818 : if (auto it = _nodes.find(i);
467 157596 : it != _nodes.end())
468 : {
469 257275676 : Node * n = *it;
470 109943 : libmesh_assert (!n || n->id() == i);
471 109943 : return n;
472 : }
473 :
474 47653 : return nullptr;
475 : }
476 :
477 :
478 :
479 :
480 3030330 : const Elem * DistributedMesh::elem_ptr (const dof_id_type i) const
481 : {
482 9256 : libmesh_assert(_elements[i]);
483 9256 : libmesh_assert_equal_to (_elements[i]->id(), i);
484 :
485 3030330 : return _elements[i];
486 : }
487 :
488 :
489 :
490 :
491 272888297 : Elem * DistributedMesh::elem_ptr (const dof_id_type i)
492 : {
493 145212 : libmesh_assert(_elements[i]);
494 145212 : libmesh_assert_equal_to (_elements[i]->id(), i);
495 :
496 272888297 : return _elements[i];
497 : }
498 :
499 :
500 :
501 :
502 932585 : const Elem * DistributedMesh::query_elem_ptr (const dof_id_type i) const
503 : {
504 932585 : if (const auto it = _elements.find(i);
505 579015 : it != _elements.end())
506 : {
507 571856 : const Elem * e = *it;
508 222171 : libmesh_assert (!e || e->id() == i);
509 222171 : return e;
510 : }
511 :
512 356844 : return nullptr;
513 : }
514 :
515 :
516 :
517 :
518 564663222 : Elem * DistributedMesh::query_elem_ptr (const dof_id_type i)
519 : {
520 564663222 : if (auto it = _elements.find(i);
521 176775 : it != _elements.end())
522 : {
523 480339958 : Elem * e = *it;
524 173612 : libmesh_assert (!e || e->id() == i);
525 173612 : return e;
526 : }
527 :
528 3163 : return nullptr;
529 : }
530 :
531 :
532 :
533 :
534 42712100 : Elem * DistributedMesh::add_elem (Elem * e)
535 : {
536 : // Don't try to add nullptrs!
537 14019 : libmesh_assert(e);
538 :
539 : // Trying to add an existing element is a no-op
540 42712100 : if (e->valid_id() && _elements[e->id()] == e)
541 0 : return e;
542 :
543 42712100 : const processor_id_type elem_procid = e->processor_id();
544 :
545 42712100 : if (!e->valid_id())
546 : {
547 : // We should only be creating new ids past the end of the range
548 : // of existing ids
549 3204 : libmesh_assert_greater_equal(_next_free_unpartitioned_elem_id,
550 : _max_elem_id);
551 3204 : libmesh_assert_greater_equal(_next_free_local_elem_id, _max_elem_id);
552 :
553 : // Use the unpartitioned ids for unpartitioned elems, and
554 : // temporarily for ghost elems
555 3780823 : dof_id_type * next_id = &_next_free_unpartitioned_elem_id;
556 3784027 : if (elem_procid == this->processor_id())
557 1121576 : next_id = &_next_free_local_elem_id;
558 3780823 : e->set_id (*next_id);
559 : }
560 :
561 : {
562 : // Advance next_ids up high enough that each is pointing to an
563 : // unused id and any subsequent increments will still point us
564 : // to unused ids
565 85424200 : _max_elem_id = std::max(_max_elem_id,
566 42712100 : static_cast<dof_id_type>(e->id()+1));
567 :
568 42712100 : if (_next_free_unpartitioned_elem_id < _max_elem_id)
569 4967627 : _next_free_unpartitioned_elem_id =
570 4967627 : ((_max_elem_id-1) / (this->n_processors() + 1) + 1) *
571 4967627 : (this->n_processors() + 1) + this->n_processors();
572 42712100 : if (_next_free_local_elem_id < _max_elem_id)
573 3330657 : _next_free_local_elem_id =
574 3335609 : ((_max_elem_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
575 3330657 : (this->n_processors() + 1) + this->processor_id();
576 :
577 : #ifndef NDEBUG
578 : // We need a const dofobject_container so we don't inadvertently create
579 : // nullptr entries when testing for non-nullptr ones
580 14019 : const dofobject_container<Elem> & const_elements = _elements;
581 : #endif
582 14019 : libmesh_assert(!const_elements[_next_free_unpartitioned_elem_id]);
583 14019 : libmesh_assert(!const_elements[_next_free_local_elem_id]);
584 : }
585 :
586 : // Don't try to overwrite existing elems
587 14019 : libmesh_assert (!_elements[e->id()]);
588 :
589 42712100 : _elements[e->id()] = e;
590 :
591 : // Try to make the cached elem data more accurate
592 42726119 : if (elem_procid == this->processor_id() ||
593 : elem_procid == DofObject::invalid_processor_id)
594 12237868 : _n_elem++;
595 :
596 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
597 42712100 : if (!e->valid_unique_id())
598 : {
599 12872524 : if (processor_id() == e->processor_id())
600 : {
601 1121576 : e->set_unique_id(_next_unique_id);
602 1121576 : _next_unique_id += this->n_processors() + 1;
603 : }
604 : else
605 : {
606 11750948 : e->set_unique_id(_next_unpartitioned_unique_id);
607 11750948 : _next_unpartitioned_unique_id += this->n_processors() + 1;
608 : }
609 : }
610 : else
611 : {
612 29858570 : _next_unique_id = std::max(_next_unique_id, e->unique_id()+1);
613 29839576 : _next_unique_id =
614 29839576 : ((_next_unique_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
615 29839576 : (this->n_processors() + 1) + this->processor_id();
616 : }
617 : #endif
618 :
619 : // Unpartitioned elems should be added on every processor
620 : // And shouldn't be added in the same batch as ghost elems
621 : // But we might be just adding on processor 0 to
622 : // broadcast later
623 : // #ifdef DEBUG
624 : // if (elem_procid == DofObject::invalid_processor_id)
625 : // {
626 : // dof_id_type elem_id = e->id();
627 : // this->comm().max(elem_id);
628 : // libmesh_assert_equal_to (elem_id, e->id());
629 : // }
630 : // #endif
631 :
632 : // Make sure any new element is given space for any extra integers
633 : // we've requested
634 42712100 : e->add_extra_integers(_elem_integer_names.size(),
635 42712100 : _elem_integer_default_values);
636 :
637 : // And set mapping type and data on any new element
638 28038 : e->set_mapping_type(this->default_mapping_type());
639 28038 : e->set_mapping_data(this->default_mapping_data());
640 :
641 42712100 : return e;
642 : }
643 :
644 :
645 :
646 13758695 : Elem * DistributedMesh::add_elem (std::unique_ptr<Elem> e)
647 : {
648 : // The mesh now takes ownership of the Elem. Eventually the guts of
649 : // add_elem() will get moved to a private helper function, and
650 : // calling add_elem() directly will be deprecated.
651 13758695 : return add_elem(e.release());
652 : }
653 :
654 :
655 :
656 3055727 : Elem * DistributedMesh::insert_elem (Elem * e)
657 : {
658 3055727 : if (_elements[e->id()])
659 3055727 : this->delete_elem(_elements[e->id()]);
660 :
661 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
662 3055727 : if (!e->valid_unique_id())
663 : {
664 0 : if (processor_id() == e->processor_id())
665 : {
666 0 : e->set_unique_id(_next_unique_id);
667 0 : _next_unique_id += this->n_processors() + 1;
668 : }
669 : else
670 : {
671 0 : e->set_unique_id(_next_unpartitioned_unique_id);
672 0 : _next_unpartitioned_unique_id += this->n_processors() + 1;
673 : }
674 : }
675 : else
676 : {
677 3079248 : _next_unique_id = std::max(_next_unique_id, e->unique_id()+1);
678 3055727 : _next_unique_id =
679 3059423 : ((_next_unique_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
680 3055727 : (this->n_processors() + 1) + this->processor_id();
681 : }
682 : #endif
683 :
684 : // Try to make the cached elem data more accurate
685 3055727 : processor_id_type elem_procid = e->processor_id();
686 3059423 : if (elem_procid == this->processor_id() ||
687 : elem_procid == DofObject::invalid_processor_id)
688 2948704 : _n_elem++;
689 :
690 3055727 : _elements[e->id()] = e;
691 :
692 : // Make sure any new element is given space for any extra integers
693 : // we've requested
694 3055727 : e->add_extra_integers(_elem_integer_names.size(),
695 3055727 : _elem_integer_default_values);
696 :
697 : // And set mapping type and data on any new element
698 7392 : e->set_mapping_type(this->default_mapping_type());
699 7392 : e->set_mapping_data(this->default_mapping_data());
700 :
701 3055727 : return e;
702 : }
703 :
704 3055727 : Elem * DistributedMesh::insert_elem (std::unique_ptr<Elem> e)
705 : {
706 : // The mesh now takes ownership of the Elem. Eventually the guts of
707 : // insert_elem(Elem*) will get moved to a private helper function, and
708 : // calling insert_elem(Elem*) directly will be deprecated.
709 3055727 : return insert_elem(e.release());
710 : }
711 :
712 :
713 38860977 : void DistributedMesh::delete_elem(Elem * e)
714 : {
715 7460 : libmesh_assert (e);
716 :
717 : // Try to make the cached elem data more accurate
718 38860977 : _n_elem--;
719 :
720 : // Was this a coarse element, not just a coarsening where we still
721 : // have some ancestor structure? Was it a *local* element, that we
722 : // might have been depending on as an owner of local nodes? We'll
723 : // have to be more careful with our nodes in contract() later; no
724 : // telling if we just locally orphaned a node that should be
725 : // globally retained.
726 38870255 : if (e->processor_id() == this->processor_id() &&
727 3636 : !e->parent())
728 66049 : _deleted_coarse_elements = true;
729 :
730 : // Delete the element from the BoundaryInfo object
731 38860977 : this->get_boundary_info().remove(e);
732 :
733 : // But not yet from the container; we might invalidate
734 : // an iterator that way!
735 :
736 : //_elements.erase(e->id());
737 :
738 : // Instead, we set it to nullptr for now
739 :
740 38860977 : _elements[e->id()] = nullptr;
741 :
742 : // delete the element
743 38860977 : delete e;
744 38860977 : }
745 :
746 :
747 :
748 2675252 : void DistributedMesh::renumber_elem(const dof_id_type old_id,
749 : const dof_id_type new_id)
750 : {
751 : // This could be a no-op
752 2675252 : if (old_id == new_id)
753 0 : return;
754 :
755 2675252 : Elem * el = _elements[old_id];
756 1160 : libmesh_assert (el);
757 1160 : libmesh_assert_equal_to (el->id(), old_id);
758 :
759 2675252 : el->set_id(new_id);
760 1160 : libmesh_assert (!_elements[new_id]);
761 2675252 : _elements[new_id] = el;
762 2675252 : _elements.erase(old_id);
763 : }
764 :
765 :
766 :
767 39044320 : Node * DistributedMesh::add_point (const Point & p,
768 : const dof_id_type id,
769 : const processor_id_type proc_id)
770 : {
771 39044320 : Node * old_n = this->query_node_ptr(id);
772 :
773 39044320 : if (old_n)
774 : {
775 0 : *old_n = p;
776 0 : old_n->processor_id() = proc_id;
777 :
778 0 : return old_n;
779 : }
780 :
781 39004165 : Node * n = Node::build(p, id).release();
782 39044320 : n->processor_id() = proc_id;
783 :
784 39044320 : return DistributedMesh::add_node(n);
785 : }
786 :
787 :
788 4108 : void DistributedMesh::own_node (Node & n)
789 : {
790 : // This had better be a node in our mesh
791 0 : libmesh_assert(_nodes[n.id()] == &n);
792 :
793 4108 : _nodes[n.id()] = nullptr;
794 4108 : _n_nodes--;
795 :
796 0 : n.set_id(DofObject::invalid_id);
797 4108 : n.processor_id() = this->processor_id();
798 :
799 4108 : this->add_node(&n);
800 4108 : }
801 :
802 :
803 68189744 : Node * DistributedMesh::add_node (Node * n)
804 : {
805 : // Don't try to add nullptrs!
806 41867 : libmesh_assert(n);
807 :
808 : // Trying to add an existing node is a no-op
809 68189744 : if (n->valid_id() && _nodes[n->id()] == n)
810 0 : return n;
811 :
812 68189744 : const processor_id_type node_procid = n->processor_id();
813 :
814 68189744 : if (!n->valid_id())
815 : {
816 : // We should only be creating new ids past the end of the range
817 : // of existing ids
818 17177 : libmesh_assert_greater_equal(_next_free_unpartitioned_node_id,
819 : _max_node_id);
820 17177 : libmesh_assert_greater_equal(_next_free_local_node_id, _max_node_id);
821 :
822 : // Use the unpartitioned ids for unpartitioned nodes,
823 : // and temporarily for ghost nodes
824 16050154 : dof_id_type * next_id = &_next_free_unpartitioned_node_id;
825 16067331 : if (node_procid == this->processor_id())
826 2041026 : next_id = &_next_free_local_node_id;
827 16050154 : n->set_id (*next_id);
828 : }
829 :
830 : {
831 : // Advance next_ids up high enough that each is pointing to an
832 : // unused id and any subsequent increments will still point us
833 : // to unused ids
834 136379488 : _max_node_id = std::max(_max_node_id,
835 68189744 : static_cast<dof_id_type>(n->id()+1));
836 :
837 68189744 : if (_next_free_unpartitioned_node_id < _max_node_id)
838 18817224 : _next_free_unpartitioned_node_id =
839 18817224 : ((_max_node_id-1) / (this->n_processors() + 1) + 1) *
840 18817224 : (this->n_processors() + 1) + this->n_processors();
841 68189744 : if (_next_free_local_node_id < _max_node_id)
842 10985480 : _next_free_local_node_id =
843 11003192 : ((_max_node_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
844 10985480 : (this->n_processors() + 1) + this->processor_id();
845 :
846 : #ifndef NDEBUG
847 : // We need a const dofobject_container so we don't inadvertently create
848 : // nullptr entries when testing for non-nullptr ones
849 41867 : const dofobject_container<Node> & const_nodes = _nodes;
850 : #endif
851 41867 : libmesh_assert(!const_nodes[_next_free_unpartitioned_node_id]);
852 41867 : libmesh_assert(!const_nodes[_next_free_local_node_id]);
853 : }
854 :
855 : // Don't try to overwrite existing nodes
856 41867 : libmesh_assert (!_nodes[n->id()]);
857 :
858 68189744 : _nodes[n->id()] = n;
859 :
860 : // Try to make the cached node data more accurate
861 68231611 : if (node_procid == this->processor_id() ||
862 : node_procid == DofObject::invalid_processor_id)
863 36001100 : _n_nodes++;
864 :
865 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
866 68189744 : if (!n->valid_unique_id())
867 : {
868 39044320 : if (processor_id() == n->processor_id())
869 : {
870 2454746 : n->set_unique_id(_next_unique_id);
871 2454746 : _next_unique_id += this->n_processors() + 1;
872 : }
873 : else
874 : {
875 73151744 : n->set_unique_id(_next_unpartitioned_unique_id);
876 36589574 : _next_unpartitioned_unique_id += this->n_processors() + 1;
877 : }
878 : }
879 : else
880 : {
881 29269264 : _next_unique_id = std::max(_next_unique_id, n->unique_id()+1);
882 29145424 : _next_unique_id =
883 29145424 : ((_next_unique_id + this->n_processors() - 1) / (this->n_processors() + 1) + 1) *
884 29145424 : (this->n_processors() + 1) + this->processor_id();
885 : }
886 : #endif
887 :
888 68189744 : n->add_extra_integers(_node_integer_names.size(),
889 68189744 : _node_integer_default_values);
890 :
891 : // Unpartitioned nodes should be added on every processor
892 : // And shouldn't be added in the same batch as ghost nodes
893 : // But we might be just adding on processor 0 to
894 : // broadcast later
895 : // #ifdef DEBUG
896 : // if (node_procid == DofObject::invalid_processor_id)
897 : // {
898 : // dof_id_type node_id = n->id();
899 : // this->comm().max(node_id);
900 : // libmesh_assert_equal_to (node_id, n->id());
901 : // }
902 : // #endif
903 :
904 68189744 : return n;
905 : }
906 :
907 29141316 : Node * DistributedMesh::add_node (std::unique_ptr<Node> n)
908 : {
909 : // The mesh now takes ownership of the Node. Eventually the guts of
910 : // add_node() will get moved to a private helper function, and
911 : // calling add_node() directly will be deprecated.
912 29141316 : return add_node(n.release());
913 : }
914 :
915 : #ifdef LIBMESH_ENABLE_DEPRECATED
916 :
917 0 : Node * DistributedMesh::insert_node(Node * n)
918 : {
919 : libmesh_deprecated();
920 0 : return DistributedMesh::add_node(n);
921 : }
922 :
923 0 : Node * DistributedMesh::insert_node(std::unique_ptr<Node> n)
924 : {
925 : libmesh_deprecated();
926 0 : return insert_node(n.release());
927 : }
928 :
929 : #endif
930 :
931 48301680 : void DistributedMesh::delete_node(Node * n)
932 : {
933 4271 : libmesh_assert(n);
934 4271 : libmesh_assert(_nodes[n->id()]);
935 :
936 : // Try to make the cached elem data more accurate
937 48301680 : _n_nodes--;
938 :
939 : // Delete the node from the BoundaryInfo object
940 48301680 : this->get_boundary_info().remove(n);
941 8542 : _constraint_rows.erase(n);
942 :
943 : // But not yet from the container; we might invalidate
944 : // an iterator that way!
945 :
946 : //_nodes.erase(n->id());
947 :
948 : // Instead, we set it to nullptr for now
949 :
950 48301680 : _nodes[n->id()] = nullptr;
951 :
952 : // delete the node
953 48301680 : delete n;
954 48301680 : }
955 :
956 :
957 :
958 5210844 : void DistributedMesh::renumber_node(const dof_id_type old_id,
959 : const dof_id_type new_id)
960 : {
961 : // This could be a no-op
962 5210844 : if (old_id == new_id)
963 0 : return;
964 :
965 5210250 : Node * nd = _nodes[old_id];
966 3234 : libmesh_assert (nd);
967 3234 : libmesh_assert_equal_to (nd->id(), old_id);
968 :
969 : // If we have nodes shipped to this processor for NodeConstraints
970 : // use, then those nodes will exist in _nodes, but may not be
971 : // locatable via a TopologyMap due to the insufficiency of elements
972 : // connecting to them. If local refinement then wants to create a
973 : // *new* node in the same location, it will initially get a temporary
974 : // id, and then make_node_ids_parallel_consistent() will try to move
975 : // it to the canonical id. We need to account for this case to
976 : // avoid false positives and memory leaks.
977 : #ifdef LIBMESH_ENABLE_NODE_CONSTRAINTS
978 6468 : if (_nodes[new_id])
979 : {
980 0 : libmesh_assert_equal_to (*(Point *)_nodes[new_id],
981 : *(Point *)_nodes[old_id]);
982 0 : _nodes.erase(new_id);
983 : }
984 : #else
985 : // If we aren't shipping nodes for NodeConstraints, there should be
986 : // no reason for renumbering one node onto another.
987 : libmesh_assert (!_nodes[new_id]);
988 : #endif
989 5210250 : _nodes[new_id] = nd;
990 5210250 : nd->set_id(new_id);
991 :
992 5210250 : _nodes.erase(old_id);
993 : }
994 :
995 :
996 :
997 511863 : void DistributedMesh::clear ()
998 : {
999 : // Call parent clear function
1000 511863 : MeshBase::clear();
1001 :
1002 : // Clear our elements and nodes
1003 : // There is no need to remove them from
1004 : // the BoundaryInfo data structure since we
1005 : // already cleared it.
1006 511863 : this->DistributedMesh::clear_elems();
1007 :
1008 22025438 : for (auto & node : _nodes)
1009 39297533 : delete node;
1010 :
1011 484 : _nodes.clear();
1012 :
1013 : // We're no longer distributed if we were before
1014 511863 : _is_serial = true;
1015 511863 : _is_serial_on_proc_0 = true;
1016 :
1017 : // We deleted a ton of coarse elements, but their nodes got deleted too so
1018 : // all is copacetic.
1019 511863 : _deleted_coarse_elements = false;
1020 :
1021 : // Correct our caches
1022 511863 : _n_nodes = 0;
1023 511863 : _max_node_id = 0;
1024 512347 : _next_free_local_node_id = this->processor_id();
1025 511863 : _next_free_unpartitioned_node_id = this->n_processors();
1026 511863 : }
1027 :
1028 :
1029 :
1030 517751 : void DistributedMesh::clear_elems ()
1031 : {
1032 8866772 : for (auto & elem : _elements)
1033 8349021 : delete elem;
1034 :
1035 484 : _elements.clear();
1036 :
1037 : // Correct our caches
1038 517751 : _n_elem = 0;
1039 517751 : _max_elem_id = 0;
1040 518235 : _next_free_local_elem_id = this->processor_id();
1041 517751 : _next_free_unpartitioned_elem_id = this->n_processors();
1042 517751 : }
1043 :
1044 :
1045 :
1046 499343 : void DistributedMesh::redistribute ()
1047 : {
1048 : // If this is a truly parallel mesh, go through the redistribution/gather/delete remote steps
1049 499343 : if (!this->is_serial())
1050 : {
1051 : // Construct a MeshCommunication object to actually redistribute the nodes
1052 : // and elements according to the partitioner, and then to re-gather the neighbors.
1053 : MeshCommunication mc;
1054 60027 : mc.redistribute(*this);
1055 :
1056 60027 : this->update_parallel_id_counts();
1057 :
1058 : // We ought to still have valid neighbor links; we communicate
1059 : // them for newly-redistributed elements
1060 : // this->find_neighbors();
1061 :
1062 : // Is this necessary? If we are called from prepare_for_use(), this will be called
1063 : // anyway... but users can always call partition directly, in which case we do need
1064 : // to call delete_remote_elements()...
1065 : //
1066 : // Regardless of whether it's necessary, it isn't safe. We
1067 : // haven't communicated new node processor_ids yet, and we can't
1068 : // delete nodes until we do.
1069 : // this->delete_remote_elements();
1070 : }
1071 : else
1072 : // The base class can handle non-distributed things, like
1073 : // notifying any GhostingFunctors of changes
1074 439316 : MeshBase::redistribute();
1075 499343 : }
1076 :
1077 :
1078 :
1079 423277 : void DistributedMesh::update_post_partitioning ()
1080 : {
1081 : // this->recalculate_n_partitions();
1082 :
1083 : // Partitioning changes our numbers of unpartitioned objects
1084 423277 : this->update_parallel_id_counts();
1085 423277 : }
1086 :
1087 :
1088 :
1089 : template <typename T>
1090 3224 : void DistributedMesh::libmesh_assert_valid_parallel_object_ids(const dofobject_container<T> & objects) const
1091 : {
1092 : // This function must be run on all processors at once
1093 3224 : parallel_object_only();
1094 :
1095 3224 : const dof_id_type pmax_node_id = this->parallel_max_node_id();
1096 3224 : const dof_id_type pmax_elem_id = this->parallel_max_elem_id();
1097 3224 : const dof_id_type pmax_id = std::max(pmax_node_id, pmax_elem_id);
1098 :
1099 882932 : for (dof_id_type i=0; i != pmax_id; ++i)
1100 : {
1101 879708 : T * obj = objects[i]; // Returns nullptr if there's no map entry
1102 :
1103 : // Local lookups by id should return the requested object
1104 879708 : libmesh_assert(!obj || obj->id() == i);
1105 :
1106 : // All processors with an object should agree on id
1107 : #ifndef NDEBUG
1108 879708 : const dof_id_type dofid = obj && obj->valid_id() ?
1109 : obj->id() : DofObject::invalid_id;
1110 879708 : libmesh_assert(this->comm().semiverify(obj ? &dofid : nullptr));
1111 : #endif
1112 :
1113 : // All processors with an object should agree on processor id
1114 879708 : const dof_id_type procid = obj && obj->valid_processor_id() ?
1115 : obj->processor_id() : DofObject::invalid_processor_id;
1116 879708 : libmesh_assert(this->comm().semiverify(obj ? &procid : nullptr));
1117 :
1118 879708 : dof_id_type min_procid = procid;
1119 879708 : this->comm().min(min_procid);
1120 :
1121 : // Either:
1122 : // 1.) I own this elem (min_procid == this->processor_id()) *and* I have a valid pointer to it (obj != nullptr)
1123 : // or
1124 : // 2.) I don't own this elem (min_procid != this->processor_id()). (In this case I may or may not have a valid pointer to it.)
1125 :
1126 : // Original assert logic
1127 : // libmesh_assert (min_procid != this->processor_id() || obj);
1128 :
1129 : // More human-understandable logic...
1130 879708 : libmesh_assert (
1131 : ((min_procid == this->processor_id()) && obj)
1132 : ||
1133 : (min_procid != this->processor_id())
1134 : );
1135 :
1136 : #if defined(LIBMESH_ENABLE_UNIQUE_ID) && !defined(NDEBUG)
1137 : // All processors with an object should agree on unique id
1138 879708 : const unique_id_type uniqueid = obj ? obj->unique_id() : 0;
1139 879708 : libmesh_assert(this->comm().semiverify(obj ? &uniqueid : nullptr));
1140 : #endif
1141 : }
1142 3224 : }
1143 :
1144 :
1145 :
1146 1612 : void DistributedMesh::libmesh_assert_valid_parallel_ids () const
1147 : {
1148 1612 : this->libmesh_assert_valid_parallel_object_ids (this->_elements);
1149 1612 : this->libmesh_assert_valid_parallel_object_ids (this->_nodes);
1150 1612 : }
1151 :
1152 :
1153 :
1154 390 : void DistributedMesh::libmesh_assert_valid_parallel_p_levels () const
1155 : {
1156 : #ifndef NDEBUG
1157 : // This function must be run on all processors at once
1158 390 : parallel_object_only();
1159 :
1160 390 : dof_id_type pmax_elem_id = this->parallel_max_elem_id();
1161 :
1162 21356 : for (dof_id_type i=0; i != pmax_elem_id; ++i)
1163 : {
1164 20966 : Elem * el = _elements[i]; // Returns nullptr if there's no map entry
1165 :
1166 20966 : unsigned int p_level = el ? (el->p_level()) : libMesh::invalid_uint;
1167 :
1168 : // All processors with an active element should agree on p level
1169 20966 : libmesh_assert(this->comm().semiverify((el && el->active()) ? &p_level : nullptr));
1170 : }
1171 : #endif
1172 390 : }
1173 :
1174 :
1175 :
1176 :
1177 1560 : void DistributedMesh::libmesh_assert_valid_parallel_flags () const
1178 : {
1179 : #if defined(LIBMESH_ENABLE_AMR) && !defined(NDEBUG)
1180 : // This function must be run on all processors at once
1181 1560 : parallel_object_only();
1182 :
1183 1560 : dof_id_type pmax_elem_id = this->parallel_max_elem_id();
1184 :
1185 141220 : for (dof_id_type i=0; i != pmax_elem_id; ++i)
1186 : {
1187 139660 : Elem * el = _elements[i]; // Returns nullptr if there's no map entry
1188 :
1189 139660 : unsigned int refinement_flag = el ?
1190 60933 : static_cast<unsigned int> (el->refinement_flag()) : libMesh::invalid_uint;
1191 139660 : unsigned int p_refinement_flag = el ?
1192 60933 : static_cast<unsigned int> (el->p_refinement_flag()) : libMesh::invalid_uint;
1193 :
1194 139660 : libmesh_assert(this->comm().semiverify(el ? &refinement_flag : nullptr));
1195 :
1196 : // p refinement flags aren't always kept correct on inactive
1197 : // ghost elements
1198 139660 : libmesh_assert(this->comm().semiverify((el && el->active()) ? &p_refinement_flag : nullptr));
1199 : }
1200 : #endif // LIBMESH_ENABLE_AMR
1201 1560 : }
1202 :
1203 :
1204 :
1205 : template <typename T>
1206 : dof_id_type
1207 1229414 : DistributedMesh::renumber_dof_objects(dofobject_container<T> & objects)
1208 : {
1209 : // This function must be run on all processors at once
1210 776 : parallel_object_only();
1211 :
1212 : typedef typename dofobject_container<T>::veclike_iterator object_iterator;
1213 :
1214 : // In parallel we may not know what objects other processors have.
1215 : // Start by figuring out how many
1216 776 : dof_id_type unpartitioned_objects = 0;
1217 :
1218 : std::unordered_map<processor_id_type, dof_id_type>
1219 1552 : ghost_objects_from_proc;
1220 :
1221 1229414 : object_iterator it = objects.begin();
1222 776 : object_iterator end = objects.end();
1223 :
1224 95257009 : while (it != end)
1225 : {
1226 94027595 : T * obj = *it;
1227 :
1228 : // Remove any nullptr container entries while we're here.
1229 94027595 : if (!obj)
1230 1187472 : it = objects.erase(it);
1231 : else
1232 : {
1233 92840123 : processor_id_type obj_procid = obj->processor_id();
1234 92840123 : if (obj_procid == DofObject::invalid_processor_id)
1235 40449721 : unpartitioned_objects++;
1236 : else
1237 52390402 : ghost_objects_from_proc[obj_procid]++;
1238 :
1239 : // Finally, increment the iterator
1240 55224 : ++it;
1241 : }
1242 : }
1243 :
1244 1230966 : std::vector<dof_id_type> objects_on_proc(this->n_processors(), 0);
1245 1230190 : auto this_it = ghost_objects_from_proc.find(this->processor_id());
1246 776 : this->comm().allgather
1247 1229890 : ((this_it == ghost_objects_from_proc.end()) ?
1248 476 : dof_id_type(0) : this_it->second, objects_on_proc);
1249 :
1250 : #ifndef NDEBUG
1251 776 : libmesh_assert(this->comm().verify(unpartitioned_objects));
1252 2328 : for (processor_id_type p=0, np=this->n_processors(); p != np; ++p)
1253 1552 : if (ghost_objects_from_proc.count(p))
1254 920 : libmesh_assert_less_equal (ghost_objects_from_proc[p], objects_on_proc[p]);
1255 : else
1256 632 : libmesh_assert_less_equal (0, objects_on_proc[p]);
1257 : #endif
1258 :
1259 : // We'll renumber objects in blocks by processor id
1260 1230966 : std::vector<dof_id_type> first_object_on_proc(this->n_processors());
1261 13039502 : for (processor_id_type i=1, np=this->n_processors(); i != np; ++i)
1262 11813192 : first_object_on_proc[i] = first_object_on_proc[i-1] +
1263 776 : objects_on_proc[i-1];
1264 1230190 : dof_id_type next_id = first_object_on_proc[this->processor_id()];
1265 1229414 : dof_id_type first_free_id =
1266 1230966 : first_object_on_proc[this->n_processors()-1] +
1267 776 : objects_on_proc[this->n_processors()-1] +
1268 : unpartitioned_objects;
1269 :
1270 : // First set new local object ids and build request sets
1271 : // for non-local object ids
1272 :
1273 : // Request sets to send to each processor
1274 : std::map<processor_id_type, std::vector<dof_id_type>>
1275 776 : requested_ids;
1276 :
1277 : // We know how many objects live on each processor, so reserve() space for
1278 : // each.
1279 776 : auto ghost_end = ghost_objects_from_proc.end();
1280 14268916 : for (auto p : make_range(this->n_processors()))
1281 13041054 : if (p != this->processor_id())
1282 : {
1283 11810864 : if (const auto p_it = ghost_objects_from_proc.find(p);
1284 776 : p_it != ghost_end)
1285 2421017 : requested_ids[p].reserve(p_it->second);
1286 : }
1287 :
1288 776 : end = objects.end();
1289 94069537 : for (it = objects.begin(); it != end; ++it)
1290 : {
1291 92840123 : T * obj = *it;
1292 92840123 : if (!obj)
1293 0 : continue;
1294 92895347 : if (obj->processor_id() == this->processor_id())
1295 16822754 : obj->set_id(next_id++);
1296 76017369 : else if (obj->processor_id() != DofObject::invalid_processor_id)
1297 35567648 : requested_ids[obj->processor_id()].push_back(obj->id());
1298 : }
1299 :
1300 : // Next set ghost object ids from other processors
1301 :
1302 1246356 : auto gather_functor =
1303 37955669 : [
1304 : #ifndef NDEBUG
1305 : this,
1306 : &first_object_on_proc,
1307 : &objects_on_proc,
1308 : #endif
1309 : &objects]
1310 : (processor_id_type, const std::vector<dof_id_type> & ids,
1311 888 : std::vector<dof_id_type> & new_ids)
1312 : {
1313 888 : std::size_t ids_size = ids.size();
1314 2421017 : new_ids.resize(ids_size);
1315 :
1316 37988665 : for (std::size_t i=0; i != ids_size; ++i)
1317 : {
1318 35567648 : T * obj = objects[ids[i]];
1319 16054 : libmesh_assert(obj);
1320 16054 : libmesh_assert_equal_to (obj->processor_id(), this->processor_id());
1321 35583702 : new_ids[i] = obj->id();
1322 :
1323 16054 : libmesh_assert_greater_equal (new_ids[i],
1324 : first_object_on_proc[this->processor_id()]);
1325 16054 : libmesh_assert_less (new_ids[i],
1326 : first_object_on_proc[this->processor_id()] +
1327 : objects_on_proc[this->processor_id()]);
1328 : }
1329 : };
1330 :
1331 1246356 : auto action_functor =
1332 37955669 : [
1333 : #ifndef NDEBUG
1334 : &first_object_on_proc,
1335 : &objects_on_proc,
1336 : #endif
1337 : &objects]
1338 : (processor_id_type libmesh_dbg_var(pid),
1339 : const std::vector<dof_id_type> & ids,
1340 888 : const std::vector<dof_id_type> & data)
1341 : {
1342 : // Copy the id changes we've now been informed of
1343 37988665 : for (auto i : index_range(ids))
1344 : {
1345 35567648 : T * obj = objects[ids[i]];
1346 16054 : libmesh_assert (obj);
1347 16054 : libmesh_assert_equal_to (obj->processor_id(), pid);
1348 16054 : libmesh_assert_greater_equal (data[i],
1349 : first_object_on_proc[pid]);
1350 16054 : libmesh_assert_less (data[i],
1351 : first_object_on_proc[pid] +
1352 : objects_on_proc[pid]);
1353 35583702 : obj->set_id(data[i]);
1354 : }
1355 : };
1356 :
1357 776 : const dof_id_type * ex = nullptr;
1358 : Parallel::pull_parallel_vector_data
1359 1229414 : (this->comm(), requested_ids, gather_functor, action_functor, ex);
1360 :
1361 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
1362 1246356 : auto unique_gather_functor =
1363 37955669 : [
1364 : #ifndef NDEBUG
1365 : this,
1366 : #endif
1367 : &objects]
1368 : (processor_id_type, const std::vector<dof_id_type> & ids,
1369 888 : std::vector<unique_id_type> & data)
1370 : {
1371 888 : std::size_t ids_size = ids.size();
1372 2421017 : data.resize(ids_size);
1373 :
1374 37988665 : for (std::size_t i=0; i != ids_size; ++i)
1375 : {
1376 35567648 : T * obj = objects[ids[i]];
1377 16054 : libmesh_assert(obj);
1378 16054 : libmesh_assert_equal_to (obj->processor_id(), this->processor_id());
1379 71135296 : data[i] = obj->valid_unique_id() ? obj->unique_id() : DofObject::invalid_unique_id;
1380 : }
1381 : };
1382 :
1383 1246356 : auto unique_action_functor =
1384 37955669 : [&objects]
1385 : (processor_id_type libmesh_dbg_var(pid),
1386 : const std::vector<dof_id_type> & ids,
1387 888 : const std::vector<unique_id_type> & data)
1388 : {
1389 37988665 : for (auto i : index_range(ids))
1390 : {
1391 35567648 : T * obj = objects[ids[i]];
1392 16054 : libmesh_assert (obj);
1393 16054 : libmesh_assert_equal_to (obj->processor_id(), pid);
1394 35567648 : if (!obj->valid_unique_id() && data[i] != DofObject::invalid_unique_id)
1395 0 : obj->set_unique_id(data[i]);
1396 : }
1397 : };
1398 :
1399 776 : const unique_id_type * unique_ex = nullptr;
1400 : Parallel::pull_parallel_vector_data
1401 1229414 : (this->comm(), requested_ids, unique_gather_functor,
1402 : unique_action_functor, unique_ex);
1403 : #endif
1404 :
1405 : // Next set unpartitioned object ids
1406 776 : next_id = 0;
1407 14268916 : for (auto i : make_range(this->n_processors()))
1408 13041054 : next_id += objects_on_proc[i];
1409 94069537 : for (it = objects.begin(); it != end; ++it)
1410 : {
1411 92840123 : T * obj = *it;
1412 92840123 : if (!obj)
1413 0 : continue;
1414 92840123 : if (obj->processor_id() == DofObject::invalid_processor_id)
1415 40449721 : obj->set_id(next_id++);
1416 : }
1417 :
1418 : // Finally shuffle around objects so that container indices
1419 : // match ids
1420 1229414 : it = objects.begin();
1421 776 : end = objects.end();
1422 99861985 : while (it != end)
1423 : {
1424 98632571 : T * obj = *it;
1425 98632571 : if (obj) // don't try shuffling already-nullptr entries
1426 : {
1427 98632571 : T * next = objects[obj->id()];
1428 : // If we have to move this object
1429 98632571 : if (next != obj)
1430 : {
1431 : // nullptr out its original position for now
1432 : // (our shuffling may put another object there shortly)
1433 36904875 : *it = nullptr;
1434 :
1435 : // There may already be another object with this id that
1436 : // needs to be moved itself
1437 46950450 : while (next)
1438 : {
1439 : // We shouldn't be trying to give two objects the
1440 : // same id
1441 9026 : libmesh_assert_not_equal_to (next->id(), obj->id());
1442 10045575 : objects[obj->id()] = obj;
1443 9026 : obj = next;
1444 10045575 : next = objects[obj->id()];
1445 : }
1446 36904875 : objects[obj->id()] = obj;
1447 : }
1448 : }
1449 :
1450 : // Remove any container entries that were left as nullptr.
1451 112800 : if (!obj)
1452 0 : it = objects.erase(it);
1453 : else
1454 56400 : ++it;
1455 : }
1456 :
1457 1230190 : return first_free_id;
1458 : }
1459 :
1460 :
1461 616377 : void DistributedMesh::renumber_nodes_and_elements ()
1462 : {
1463 390 : parallel_object_only();
1464 :
1465 : #ifdef DEBUG
1466 : // Make sure our ids and flags are consistent
1467 390 : this->libmesh_assert_valid_parallel_ids();
1468 390 : this->libmesh_assert_valid_parallel_flags();
1469 390 : this->libmesh_assert_valid_parallel_p_levels();
1470 : #endif
1471 :
1472 390 : LOG_SCOPE("renumber_nodes_and_elements()", "DistributedMesh");
1473 :
1474 : // Nodes not connected to any elements, and nullptr node entries
1475 : // in our container, should be deleted. But wait! If we've deleted coarse
1476 : // local elements on some processor, other processors might have ghosted
1477 : // nodes from it that are now no longer connected to any elements on it, but
1478 : // that are connected to their own semilocal elements. We'll have to
1479 : // communicate to ascertain if that's the case.
1480 616377 : this->comm().max(_deleted_coarse_elements);
1481 :
1482 : // What used nodes do we see on our proc?
1483 390 : std::set<dof_id_type> used_nodes;
1484 :
1485 : // What used node info should we send from our proc? Could we take ownership
1486 : // of each node if we needed to?
1487 : std::map<processor_id_type, std::map<dof_id_type, bool>>
1488 390 : used_nodes_on_proc;
1489 :
1490 : // flag the nodes we need
1491 50369396 : for (auto & elem : this->element_ptr_range())
1492 260283150 : for (const Node & node : elem->node_ref_range())
1493 : {
1494 235685830 : const dof_id_type n = node.id();
1495 235543337 : used_nodes.insert(n);
1496 235685830 : if (_deleted_coarse_elements)
1497 : {
1498 2130350 : const processor_id_type p = node.processor_id();
1499 2130598 : if (p != this->processor_id())
1500 : {
1501 1585649 : auto & used_nodes_on_p = used_nodes_on_proc[p];
1502 1585773 : if (elem->processor_id() == this->processor_id())
1503 132539 : used_nodes_on_p[n] = true;
1504 : else
1505 93 : if (!used_nodes_on_p.count(n))
1506 262351 : used_nodes_on_p[n] = false;
1507 : }
1508 : }
1509 615597 : }
1510 :
1511 616377 : if (_deleted_coarse_elements)
1512 : {
1513 : // "unsigned char" == "bool, but MPI::BOOL is iffy to use"
1514 : typedef unsigned char boolish;
1515 : std::map<processor_id_type, std::vector<std::pair<dof_id_type, boolish>>>
1516 24 : used_nodes_on_proc_vecs;
1517 55114 : for (auto & [pid, nodemap] : used_nodes_on_proc)
1518 42003 : used_nodes_on_proc_vecs[pid].assign(nodemap.begin(), nodemap.end());
1519 :
1520 24 : std::map<dof_id_type,processor_id_type> repartitioned_node_pids;
1521 : std::map<processor_id_type, std::set<dof_id_type>>
1522 24 : repartitioned_node_sets_to_push;
1523 :
1524 : auto ids_action_functor =
1525 41981 : [&used_nodes, &repartitioned_node_pids,
1526 : &repartitioned_node_sets_to_push]
1527 : (processor_id_type pid,
1528 95 : const std::vector<std::pair<dof_id_type, boolish>> & ids_and_bools)
1529 : {
1530 314416 : for (auto [n, sender_could_become_owner] : ids_and_bools)
1531 : {
1532 : // If we don't see a use for our own node, but someone
1533 : // else does, better figure out who should own it next.
1534 82 : if (!used_nodes.count(n))
1535 : {
1536 2 : if (auto it = repartitioned_node_pids.find(n);
1537 2 : sender_could_become_owner)
1538 : {
1539 1 : if (it != repartitioned_node_pids.end() &&
1540 0 : pid < it->second)
1541 0 : it->second = pid;
1542 : else
1543 1 : repartitioned_node_pids[n] = pid;
1544 : }
1545 : else
1546 1 : if (it == repartitioned_node_pids.end())
1547 0 : repartitioned_node_pids[n] =
1548 : DofObject::invalid_processor_id;
1549 :
1550 2 : repartitioned_node_sets_to_push[pid].insert(n);
1551 : }
1552 : }
1553 13121 : };
1554 :
1555 : // We need two pushes instead of a pull here because we need to
1556 : // know *all* the queries for a particular node before we can
1557 : // respond to *any* of them.
1558 : Parallel::push_parallel_vector_data
1559 13111 : (this->comm(), used_nodes_on_proc_vecs, ids_action_functor);
1560 :
1561 : // Repartition (what used to be) our own nodes first
1562 13112 : for (auto & [n, p] : repartitioned_node_pids)
1563 : {
1564 1 : Node & node = this->node_ref(n);
1565 0 : libmesh_assert_equal_to(node.processor_id(), this->processor_id());
1566 0 : libmesh_assert_not_equal_to_msg(p, DofObject::invalid_processor_id, "Node " << n << " is lost?");
1567 1 : node.processor_id() = p;
1568 : }
1569 :
1570 : // Then push to repartition others' ghosted copies.
1571 :
1572 : std::map<processor_id_type, std::vector<std::pair<dof_id_type,processor_id_type>>>
1573 24 : repartitioned_node_vecs;
1574 :
1575 13113 : for (auto & [p, nodeset] : repartitioned_node_sets_to_push)
1576 : {
1577 2 : auto & rn_vec = repartitioned_node_vecs[p];
1578 4 : for (auto n : nodeset)
1579 2 : rn_vec.emplace_back(n, repartitioned_node_pids[n]);
1580 : }
1581 :
1582 : auto repartition_node_functor =
1583 2 : [this]
1584 : (processor_id_type libmesh_dbg_var(pid),
1585 2 : const std::vector<std::pair<dof_id_type, processor_id_type>> & ids_and_pids)
1586 : {
1587 4 : for (auto [n, p] : ids_and_pids)
1588 : {
1589 0 : libmesh_assert_not_equal_to(p, DofObject::invalid_processor_id);
1590 2 : Node & node = this->node_ref(n);
1591 0 : libmesh_assert_equal_to(node.processor_id(), pid);
1592 2 : node.processor_id() = p;
1593 : }
1594 13087 : };
1595 :
1596 : Parallel::push_parallel_vector_data
1597 13111 : (this->comm(), repartitioned_node_vecs, repartition_node_functor);
1598 : }
1599 :
1600 616377 : _deleted_coarse_elements = false;
1601 :
1602 : // Nodes not connected to any local elements, and nullptr node entries
1603 : // in our container, are deleted
1604 : {
1605 616377 : node_iterator_imp it = _nodes.begin();
1606 390 : node_iterator_imp end = _nodes.end();
1607 :
1608 74473876 : while (it != end)
1609 : {
1610 73857499 : Node * nd = *it;
1611 73857499 : if (!nd)
1612 2517554 : it = _nodes.erase(it);
1613 71339945 : else if (!used_nodes.count(nd->id()))
1614 : {
1615 : // remove any boundary information associated with
1616 : // this node
1617 2063796 : this->get_boundary_info().remove (nd);
1618 2788 : _constraint_rows.erase(nd);
1619 :
1620 : // delete the node
1621 2063796 : delete nd;
1622 :
1623 2063796 : it = _nodes.erase(it);
1624 : }
1625 : else
1626 44384 : ++it;
1627 : }
1628 : }
1629 :
1630 616377 : if (_skip_renumber_nodes_and_elements)
1631 : {
1632 1670 : this->update_parallel_id_counts();
1633 2 : return;
1634 : }
1635 :
1636 : // Finally renumber all the elements
1637 614707 : _n_elem = this->renumber_dof_objects (this->_elements);
1638 :
1639 : // and all the remaining nodes
1640 614707 : _n_nodes = this->renumber_dof_objects (this->_nodes);
1641 :
1642 : // And figure out what IDs we should use when adding new nodes and
1643 : // new elements
1644 614707 : this->update_parallel_id_counts();
1645 :
1646 : // Make sure our caches are up to date and our
1647 : // DofObjects are well packed
1648 : #ifdef DEBUG
1649 388 : libmesh_assert_equal_to (this->n_nodes(), this->parallel_n_nodes());
1650 388 : libmesh_assert_equal_to (this->n_elem(), this->parallel_n_elem());
1651 388 : const dof_id_type pmax_node_id = this->parallel_max_node_id();
1652 388 : const dof_id_type pmax_elem_id = this->parallel_max_elem_id();
1653 388 : libmesh_assert_equal_to (this->max_node_id(), pmax_node_id);
1654 388 : libmesh_assert_equal_to (this->max_elem_id(), pmax_elem_id);
1655 388 : libmesh_assert_equal_to (this->n_nodes(), this->max_node_id());
1656 388 : libmesh_assert_equal_to (this->n_elem(), this->max_elem_id());
1657 :
1658 : // Make sure our ids and flags are consistent
1659 388 : this->libmesh_assert_valid_parallel_ids();
1660 388 : this->libmesh_assert_valid_parallel_flags();
1661 :
1662 : // And make sure we've made our numbering monotonic
1663 388 : MeshTools::libmesh_assert_valid_elem_ids(*this);
1664 : #endif
1665 : }
1666 :
1667 :
1668 :
1669 17410 : void DistributedMesh::fix_broken_node_and_element_numbering ()
1670 : {
1671 : // We can't use range-for here because we need access to the special
1672 : // iterators' methods, not just to their dereferenced values.
1673 :
1674 : // Nodes first
1675 0 : for (auto pr = this->_nodes.begin(),
1676 5444770 : end = this->_nodes.end(); pr != end; ++pr)
1677 : {
1678 5427360 : Node * n = *pr;
1679 5427360 : if (n != nullptr)
1680 : {
1681 0 : const dof_id_type id = pr.index();
1682 5204550 : n->set_id() = id;
1683 0 : libmesh_assert_equal_to(this->node_ptr(id), n);
1684 : }
1685 : }
1686 :
1687 : // Elements next
1688 0 : for (auto pr = this->_elements.begin(),
1689 5671231 : end = this->_elements.end(); pr != end; ++pr)
1690 : {
1691 5653821 : Elem * e = *pr;
1692 5653821 : if (e != nullptr)
1693 : {
1694 0 : const dof_id_type id = pr.index();
1695 5590229 : e->set_id() = id;
1696 0 : libmesh_assert_equal_to(this->elem_ptr(id), e);
1697 : }
1698 : }
1699 17410 : }
1700 :
1701 :
1702 :
1703 574640 : dof_id_type DistributedMesh::n_active_elem () const
1704 : {
1705 460 : parallel_object_only();
1706 :
1707 : // Get local active elements first
1708 : dof_id_type active_elements =
1709 1148820 : static_cast<dof_id_type>(std::distance (this->active_local_elements_begin(),
1710 1723000 : this->active_local_elements_end()));
1711 574640 : this->comm().sum(active_elements);
1712 :
1713 : // Then add unpartitioned active elements, which should exist on
1714 : // every processor
1715 574640 : active_elements +=
1716 574640 : static_cast<dof_id_type>(std::distance
1717 1148820 : (this->active_pid_elements_begin(DofObject::invalid_processor_id),
1718 575100 : this->active_pid_elements_end(DofObject::invalid_processor_id)));
1719 574640 : return active_elements;
1720 : }
1721 :
1722 :
1723 :
1724 393959 : void DistributedMesh::delete_remote_elements()
1725 : {
1726 : #ifdef DEBUG
1727 : // Make sure our neighbor links are all fine
1728 368 : MeshTools::libmesh_assert_valid_neighbors(*this);
1729 :
1730 : // And our child/parent links, and our flags
1731 368 : MeshTools::libmesh_assert_valid_refinement_tree(*this);
1732 :
1733 : // Make sure our ids and flags are consistent
1734 368 : this->libmesh_assert_valid_parallel_ids();
1735 368 : this->libmesh_assert_valid_parallel_flags();
1736 :
1737 368 : libmesh_assert_equal_to (this->n_nodes(), this->parallel_n_nodes());
1738 368 : libmesh_assert_equal_to (this->n_elem(), this->parallel_n_elem());
1739 368 : const dof_id_type pmax_node_id = this->parallel_max_node_id();
1740 368 : const dof_id_type pmax_elem_id = this->parallel_max_elem_id();
1741 368 : libmesh_assert_equal_to (this->max_node_id(), pmax_node_id);
1742 368 : libmesh_assert_equal_to (this->max_elem_id(), pmax_elem_id);
1743 : #endif
1744 :
1745 393959 : _is_serial = false;
1746 393959 : _is_serial_on_proc_0 = false;
1747 :
1748 393959 : MeshCommunication().delete_remote_elements(*this, _extra_ghost_elems);
1749 :
1750 368 : libmesh_assert_equal_to (this->max_elem_id(), this->parallel_max_elem_id());
1751 :
1752 : // Now make sure the containers actually shrink - strip
1753 : // any newly-created nullptr voids out of the element array
1754 393959 : dofobject_container<Elem>::veclike_iterator e_it = _elements.begin();
1755 368 : const dofobject_container<Elem>::veclike_iterator e_end = _elements.end();
1756 52341857 : while (e_it != e_end)
1757 51947898 : if (!*e_it)
1758 38637470 : e_it = _elements.erase(e_it);
1759 : else
1760 15381 : ++e_it;
1761 :
1762 393959 : dofobject_container<Node>::veclike_iterator n_it = _nodes.begin();
1763 368 : const dofobject_container<Node>::veclike_iterator n_end = _nodes.end();
1764 96251733 : while (n_it != n_end)
1765 95857774 : if (!*n_it)
1766 63317371 : n_it = _nodes.erase(n_it);
1767 : else
1768 45932 : ++n_it;
1769 :
1770 : // We may have deleted no-longer-connected nodes or coarsened-away
1771 : // elements; let's update our caches.
1772 393959 : this->update_parallel_id_counts();
1773 :
1774 : // We may have deleted nodes or elements that were the only local
1775 : // representatives of some particular boundary id(s); let's update
1776 : // those caches.
1777 393959 : this->get_boundary_info().regenerate_id_sets();
1778 :
1779 : #ifdef DEBUG
1780 : // We might not have well-packed objects if the user didn't allow us
1781 : // to renumber
1782 : // libmesh_assert_equal_to (this->n_nodes(), this->max_node_id());
1783 : // libmesh_assert_equal_to (this->n_elem(), this->max_elem_id());
1784 :
1785 : // Make sure our neighbor links are all fine
1786 368 : MeshTools::libmesh_assert_valid_neighbors(*this);
1787 :
1788 : // And our child/parent links, and our flags
1789 368 : MeshTools::libmesh_assert_valid_refinement_tree(*this);
1790 :
1791 : // Make sure our ids and flags are consistent
1792 368 : this->libmesh_assert_valid_parallel_ids();
1793 368 : this->libmesh_assert_valid_parallel_flags();
1794 : #endif
1795 393959 : }
1796 :
1797 :
1798 0 : void DistributedMesh::add_extra_ghost_elem(Elem * e)
1799 : {
1800 : // First add the elem like normal
1801 0 : add_elem(e);
1802 :
1803 : // Now add it to the set that won't be deleted when we call
1804 : // delete_remote_elements()
1805 0 : _extra_ghost_elems.insert(e);
1806 0 : }
1807 :
1808 : void
1809 0 : DistributedMesh::clear_extra_ghost_elems(const std::set<Elem *> & extra_ghost_elems)
1810 : {
1811 0 : std::set<Elem *> tmp;
1812 0 : std::set_difference(_extra_ghost_elems.begin(), _extra_ghost_elems.end(),
1813 : extra_ghost_elems.begin(), extra_ghost_elems.end(),
1814 0 : std::inserter(tmp, tmp.begin()));
1815 0 : _extra_ghost_elems = tmp;
1816 0 : }
1817 :
1818 77752 : void DistributedMesh::allgather()
1819 : {
1820 77752 : if (_is_serial)
1821 0 : return;
1822 77750 : MeshCommunication().allgather(*this);
1823 77750 : _is_serial = true;
1824 77750 : _is_serial_on_proc_0 = true;
1825 :
1826 : // Make sure our caches are up to date and our
1827 : // DofObjects are well packed
1828 : #ifdef DEBUG
1829 46 : libmesh_assert_equal_to (this->n_nodes(), this->parallel_n_nodes());
1830 46 : libmesh_assert_equal_to (this->n_elem(), this->parallel_n_elem());
1831 46 : const dof_id_type pmax_node_id = this->parallel_max_node_id();
1832 46 : const dof_id_type pmax_elem_id = this->parallel_max_elem_id();
1833 46 : libmesh_assert_equal_to (this->max_node_id(), pmax_node_id);
1834 46 : libmesh_assert_equal_to (this->max_elem_id(), pmax_elem_id);
1835 :
1836 : // If we've disabled renumbering we can't be sure we're contiguous
1837 : // libmesh_assert_equal_to (this->n_nodes(), this->max_node_id());
1838 : // libmesh_assert_equal_to (this->n_elem(), this->max_elem_id());
1839 :
1840 : // Make sure our neighbor links are all fine
1841 46 : MeshTools::libmesh_assert_valid_neighbors(*this);
1842 :
1843 : // Make sure our ids and flags are consistent
1844 46 : this->libmesh_assert_valid_parallel_ids();
1845 46 : this->libmesh_assert_valid_parallel_flags();
1846 : #endif
1847 : }
1848 :
1849 18032 : void DistributedMesh::gather_to_zero()
1850 : {
1851 18032 : if (_is_serial_on_proc_0)
1852 4 : return;
1853 :
1854 13794 : _is_serial_on_proc_0 = true;
1855 13794 : MeshCommunication().gather(0, *this);
1856 : }
1857 :
1858 :
1859 : } // namespace libMesh
|