Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #include "FeatureFloodCount.h"
11 : #include "IndirectSort.h"
12 : #include "MooseMesh.h"
13 : #include "MooseUtils.h"
14 : #include "MooseVariable.h"
15 : #include "SubProblem.h"
16 :
17 : #include "Assembly.h"
18 : #include "FEProblem.h"
19 : #include "NonlinearSystem.h"
20 :
21 : #include "libmesh/dof_map.h"
22 : #include "libmesh/mesh_tools.h"
23 : #include "libmesh/periodic_boundaries.h"
24 : #include "libmesh/point_locator_base.h"
25 : #include "libmesh/remote_elem.h"
26 :
27 : #include <algorithm>
28 : #include <limits>
29 :
30 : template <>
31 : void
32 34399 : dataStore(std::ostream & stream, FeatureFloodCount::FeatureData & feature, void * context)
33 : {
34 : /**
35 : * Note that _local_ids is not stored here. It's not needed for restart, and not needed
36 : * during the parallel merge operation
37 : */
38 34399 : storeHelper(stream, feature._ghosted_ids, context);
39 34399 : storeHelper(stream, feature._halo_ids, context);
40 34399 : storeHelper(stream, feature._disjoint_halo_ids, context);
41 34399 : storeHelper(stream, feature._periodic_nodes, context);
42 34399 : storeHelper(stream, feature._var_index, context);
43 34399 : storeHelper(stream, feature._id, context);
44 34399 : storeHelper(stream, feature._bboxes, context);
45 34399 : storeHelper(stream, feature._orig_ids, context);
46 34399 : storeHelper(stream, feature._min_entity_id, context);
47 34399 : storeHelper(stream, feature._vol_count, context);
48 34399 : storeHelper(stream, feature._centroid, context);
49 34399 : storeHelper(stream, feature._status, context);
50 34399 : storeHelper(stream, feature._boundary_intersection, context);
51 34399 : }
52 :
53 : template <>
54 : void
55 45124 : dataStore(std::ostream & stream, BoundingBox & bbox, void * context)
56 : {
57 : storeHelper(stream, bbox.min(), context);
58 : storeHelper(stream, bbox.max(), context);
59 45124 : }
60 :
61 : template <>
62 : void
63 19014 : dataLoad(std::istream & stream, FeatureFloodCount::FeatureData & feature, void * context)
64 : {
65 : /**
66 : * Note that _local_ids is not loaded here. It's not needed for restart, and not needed
67 : * during the parallel merge operation
68 : */
69 19014 : loadHelper(stream, feature._ghosted_ids, context);
70 19014 : loadHelper(stream, feature._halo_ids, context);
71 19014 : loadHelper(stream, feature._disjoint_halo_ids, context);
72 19014 : loadHelper(stream, feature._periodic_nodes, context);
73 19014 : loadHelper(stream, feature._var_index, context);
74 19014 : loadHelper(stream, feature._id, context);
75 19014 : loadHelper(stream, feature._bboxes, context);
76 19014 : loadHelper(stream, feature._orig_ids, context);
77 19014 : loadHelper(stream, feature._min_entity_id, context);
78 19014 : loadHelper(stream, feature._vol_count, context);
79 19014 : loadHelper(stream, feature._centroid, context);
80 19014 : loadHelper(stream, feature._status, context);
81 19014 : loadHelper(stream, feature._boundary_intersection, context);
82 19014 : }
83 :
84 : template <>
85 : void
86 24694 : dataLoad(std::istream & stream, BoundingBox & bbox, void * context)
87 : {
88 : loadHelper(stream, bbox.min(), context);
89 : loadHelper(stream, bbox.max(), context);
90 24694 : }
91 :
92 : // Utility routines
93 : void updateBBoxExtremesHelper(BoundingBox & bbox, const Point & node);
94 : void updateBBoxExtremesHelper(BoundingBox & bbox, const Elem & elem);
95 :
96 : registerMooseObject("PhaseFieldApp", FeatureFloodCount);
97 :
98 : InputParameters
99 5460 : FeatureFloodCount::validParams()
100 : {
101 5460 : InputParameters params = GeneralPostprocessor::validParams();
102 5460 : params += BoundaryRestrictable::validParams();
103 :
104 10920 : params.addRequiredCoupledVar(
105 : "variable",
106 : "The variable(s) for which to find connected regions of interests, i.e. \"features\".");
107 10920 : params.addParam<Real>(
108 10920 : "threshold", 0.5, "The threshold value for which a new feature may be started");
109 10920 : params.addParam<Real>(
110 : "connecting_threshold",
111 : "The threshold for which an existing feature may be extended (defaults to \"threshold\")");
112 10920 : params.addParam<bool>("use_single_map",
113 10920 : true,
114 : "Determine whether information is tracked per "
115 : "coupled variable or consolidated into one "
116 : "(default: true)");
117 10920 : params.addParam<bool>(
118 : "condense_map_info",
119 10920 : false,
120 : "Determines whether we condense all the node values when in multimap mode (default: false)");
121 10920 : params.addParam<bool>("use_global_numbering",
122 10920 : true,
123 : "Determine whether or not global numbers are "
124 : "used to label features on multiple maps "
125 : "(default: true)");
126 10920 : params.addParam<bool>("enable_var_coloring",
127 10920 : false,
128 : "Instruct the Postprocessor to populate the variable index map.");
129 10920 : params.addParam<bool>(
130 : "compute_halo_maps",
131 10920 : false,
132 : "Instruct the Postprocessor to communicate proper halo information to all ranks");
133 10920 : params.addParam<bool>("compute_var_to_feature_map",
134 10920 : false,
135 : "Instruct the Postprocessor to compute the active vars to features map");
136 10920 : params.addParam<bool>(
137 : "use_less_than_threshold_comparison",
138 10920 : true,
139 : "Controls whether features are defined to be less than or greater than the threshold value.");
140 :
141 10920 : params.addParam<std::vector<BoundaryName>>(
142 : "primary_percolation_boundaries",
143 : "A list of boundaries used in conjunction with the corresponding "
144 : "\"secondary_percolation_boundaries\" parameter for determining if a feature creates a path "
145 : "connecting any pair of boundaries");
146 10920 : params.addParam<std::vector<BoundaryName>>(
147 : "secondary_percolation_boundaries",
148 : "Paired boundaries with \"primaryary_percolation_boundaries\" parameter");
149 10920 : params.addParam<std::vector<BoundaryName>>(
150 : "specified_boundaries",
151 : "An optional list of boundaries; if supplied, each feature is checked to determine whether "
152 : "it intersects any of the specified boundaries in this list.");
153 :
154 : /**
155 : * The FeatureFloodCount and derived objects should not to operate on the displaced mesh. These
156 : * objects consume variable values from the nonlinear system and use a lot of raw geometric
157 : * element information from the mesh. If you use the displaced system with EBSD information for
158 : * instance, you'll have difficulties reconciling the difference between the coordinates from the
159 : * EBSD data file and the potential displacements applied via boundary conditions.
160 : */
161 5460 : params.set<bool>("use_displaced_mesh") = false;
162 :
163 : // The FeatureFloodCount object does not require that any state (restartable information) is
164 : // maintained. This Boolean is set to false so that we don't ask MOOSE to save a potentially
165 : // large data structure for no reason. It is set for true in at least one derived class
166 : // (GrainTracker).
167 5460 : params.addPrivateParam<bool>("restartable_required", false);
168 :
169 10920 : params.addParamNamesToGroup(
170 : "use_single_map condense_map_info use_global_numbering primary_percolation_boundaries",
171 : "Advanced");
172 :
173 10920 : MooseEnum flood_type("NODAL ELEMENTAL", "ELEMENTAL");
174 10920 : params.addParam<MooseEnum>("flood_entity_type",
175 : flood_type,
176 : "Determines whether the flood algorithm runs on nodes or elements");
177 :
178 10920 : params.addClassDescription("The object is able to find and count \"connected components\" in any "
179 : "solution field or number of solution fields. A primary example would "
180 : "be to count \"bubbles\".");
181 :
182 10920 : params.addRelationshipManager("ElementSideNeighborLayers",
183 : Moose::RelationshipManagerType::GEOMETRIC |
184 : Moose::RelationshipManagerType::ALGEBRAIC);
185 :
186 5460 : return params;
187 5460 : }
188 :
189 1999 : FeatureFloodCount::FeatureFloodCount(const InputParameters & parameters)
190 : : GeneralPostprocessor(parameters),
191 : Coupleable(this, false),
192 : MooseVariableDependencyInterface(this),
193 : BoundaryRestrictable(this, false),
194 1999 : _fe_vars(getCoupledMooseVars()),
195 1999 : _vars(getCoupledStandardMooseVars()),
196 1999 : _dof_map(_vars[0]->dofMap()),
197 3998 : _threshold(getParam<Real>("threshold")),
198 1999 : _connecting_threshold(isParamValid("connecting_threshold")
199 5997 : ? getParam<Real>("connecting_threshold")
200 4409 : : getParam<Real>("threshold")),
201 1999 : _mesh(_subproblem.mesh()),
202 1999 : _var_number(_fe_vars[0]->number()),
203 3998 : _single_map_mode(getParam<bool>("use_single_map")),
204 3998 : _condense_map_info(getParam<bool>("condense_map_info")),
205 3998 : _global_numbering(getParam<bool>("use_global_numbering")),
206 3998 : _var_index_mode(getParam<bool>("enable_var_coloring")),
207 3998 : _compute_halo_maps(getParam<bool>("compute_halo_maps")),
208 3998 : _compute_var_to_feature_map(getParam<bool>("compute_var_to_feature_map")),
209 3998 : _use_less_than_threshold_comparison(getParam<bool>("use_less_than_threshold_comparison")),
210 1999 : _n_vars(_fe_vars.size()),
211 1999 : _maps_size(_single_map_mode ? 1 : _fe_vars.size()),
212 1999 : _n_procs(_app.n_processors()),
213 1999 : _feature_counts_per_map(_maps_size),
214 1999 : _feature_count(0),
215 1999 : _partial_feature_sets(_maps_size),
216 3998 : _feature_sets(getParam<bool>("restartable_required")
217 3461 : ? declareRestartableData<std::vector<FeatureData>>("feature_sets")
218 : : _volatile_feature_sets),
219 1999 : _feature_maps(_maps_size),
220 1999 : _pbs(nullptr),
221 3998 : _element_average_value(parameters.isParamValid("elem_avg_value")
222 1999 : ? getPostprocessorValue("elem_avg_value")
223 : : _real_zero),
224 1999 : _halo_ids(_maps_size),
225 3998 : _is_elemental(getParam<MooseEnum>("flood_entity_type") == "ELEMENTAL"),
226 3998 : _is_primary(processor_id() == 0)
227 : {
228 1999 : if (_var_index_mode)
229 813 : _var_index_maps.resize(_maps_size);
230 :
231 1999 : addMooseVariableDependency(_fe_vars);
232 :
233 1999 : _is_boundary_restricted = boundaryRestricted();
234 :
235 1999 : if (_subproblem.isTransient())
236 : {
237 : // tell MOOSE that we are going to need old and older DoF values
238 10624 : for (auto & var : _vars)
239 : {
240 9109 : var->dofValuesOld();
241 9109 : var->dofValuesOlder();
242 : }
243 : }
244 :
245 3998 : if (parameters.isParamValid("primary_percolation_boundaries"))
246 54 : _primary_perc_bnds = _mesh.getBoundaryIDs(
247 27 : parameters.get<std::vector<BoundaryName>>("primary_percolation_boundaries"));
248 3998 : if (parameters.isParamValid("secondary_percolation_boundaries"))
249 54 : _secondary_perc_bnds = _mesh.getBoundaryIDs(
250 27 : parameters.get<std::vector<BoundaryName>>("secondary_percolation_boundaries"));
251 :
252 1999 : if (_primary_perc_bnds.empty() != _secondary_perc_bnds.empty())
253 0 : paramError("primary_percolation_boundaries",
254 : "primary_percolation_boundaries and secondary_percolation_boundaries must both be "
255 : "supplied when checking for percolation");
256 :
257 3998 : if (parameters.isParamValid("specified_boundaries"))
258 : _specified_bnds =
259 40 : _mesh.getBoundaryIDs(parameters.get<std::vector<BoundaryName>>("specified_boundaries"));
260 1999 : }
261 :
262 : void
263 1847 : FeatureFloodCount::initialSetup()
264 : {
265 : // We need one map per coupled variable for normal runs to support overlapping features
266 1847 : _entities_visited.resize(_vars.size());
267 :
268 : // Get a pointer to the PeriodicBoundaries buried in libMesh
269 1847 : _pbs = _sys.dofMap().get_periodic_boundaries();
270 :
271 1847 : meshChanged();
272 :
273 : /**
274 : * Size the empty var to features vector to the number of coupled variables.
275 : * This empty vector (but properly sized) vector is returned for elements
276 : * that are queried but are not in the structure (which also shouldn't happen).
277 : * The user is warned in this case but this helps avoid extra bounds checking
278 : * in user code and avoids segfaults.
279 : */
280 1847 : _empty_var_to_features.resize(_n_vars, invalid_id);
281 1847 : }
282 :
283 : void
284 3739 : FeatureFloodCount::initialize()
285 : {
286 : // Clear the feature marking maps and region counters and other data structures
287 22763 : for (const auto map_num : make_range(_maps_size))
288 : {
289 : _feature_maps[map_num].clear();
290 : _partial_feature_sets[map_num].clear();
291 :
292 19024 : if (_var_index_mode)
293 : _var_index_maps[map_num].clear();
294 :
295 : _halo_ids[map_num].clear();
296 : }
297 :
298 3739 : _feature_sets.clear();
299 :
300 : // Calculate the thresholds for this iteration
301 3739 : _step_threshold = _element_average_value + _threshold;
302 3739 : _step_connecting_threshold = _element_average_value + _connecting_threshold;
303 :
304 : _ghosted_entity_ids.clear();
305 :
306 : // Reset the feature count and max local size
307 3739 : _feature_count = 0;
308 :
309 : _entity_var_to_features.clear();
310 :
311 25599 : for (auto & map_ref : _entities_visited)
312 : map_ref.clear();
313 3739 : }
314 :
315 : void
316 3454 : FeatureFloodCount::clearDataStructures()
317 : {
318 3454 : }
319 :
320 : void
321 2397 : FeatureFloodCount::meshChanged()
322 : {
323 4794 : _point_locator = _mesh.getMesh().sub_point_locator();
324 :
325 2397 : _mesh.buildPeriodicNodeMap(_periodic_node_map, _var_number, _pbs);
326 :
327 : // Build a new node to element map
328 : _nodes_to_elem_map.clear();
329 2397 : MeshTools::build_nodes_to_elem_map(_mesh.getMesh(), _nodes_to_elem_map);
330 :
331 : /**
332 : * We need to build a set containing all of the boundary entities
333 : * to compare against. This will be elements for elemental flooding.
334 : * Volumes for nodal flooding is not supported
335 : */
336 : _all_boundary_entity_ids.clear();
337 2397 : if (_is_elemental)
338 419650 : for (auto elem_it = _mesh.bndElemsBegin(), elem_end = _mesh.bndElemsEnd(); elem_it != elem_end;
339 415038 : ++elem_it)
340 415038 : _all_boundary_entity_ids.insert((*elem_it)->_elem->id());
341 2397 : }
342 :
343 : void
344 2984 : FeatureFloodCount::execute()
345 : {
346 5968 : TIME_SECTION("execute", 3, "Flooding Features");
347 :
348 : // Iterate only over boundaries if restricted
349 2984 : if (_is_boundary_restricted)
350 : {
351 50 : mooseInfo("Using EXPERIMENTAL boundary restricted FeatureFloodCount object!\n");
352 :
353 : // Set the boundary range pointer for use during flooding
354 50 : _bnd_elem_range = _mesh.getBoundaryElementRange();
355 :
356 : auto rank = processor_id();
357 :
358 4050 : for (const auto & belem : *_bnd_elem_range)
359 : {
360 4000 : const Elem * elem = belem->_elem;
361 4000 : BoundaryID boundary_id = belem->_bnd_id;
362 :
363 4000 : if (elem->processor_id() == rank)
364 : {
365 3040 : if (hasBoundary(boundary_id))
366 3800 : for (const auto var_num : index_range(_vars))
367 1900 : flood(elem, var_num);
368 : }
369 : }
370 : }
371 : else // Normal volumetric operation
372 : {
373 3544446 : for (const auto & current_elem : _mesh.getMesh().active_local_element_ptr_range())
374 : {
375 : // Loop over elements or nodes
376 1769289 : if (_is_elemental)
377 : {
378 5806120 : for (const auto var_num : index_range(_vars))
379 4463426 : flood(current_elem, var_num);
380 : }
381 : else
382 : {
383 426595 : auto n_nodes = current_elem->n_vertices();
384 3240367 : for (const auto i : make_range(n_nodes))
385 : {
386 2813772 : const Node * current_node = current_elem->node_ptr(i);
387 :
388 6166740 : for (const auto var_num : index_range(_vars))
389 3352968 : flood(current_node, var_num);
390 : }
391 : }
392 2934 : }
393 : }
394 2984 : }
395 :
396 : processor_id_type
397 2984 : FeatureFloodCount::numberOfDistributedMergeHelpers() const
398 : {
399 2984 : return _app.n_processors() >= _maps_size ? _maps_size : 1;
400 : }
401 :
402 : void
403 3502 : FeatureFloodCount::communicateAndMerge()
404 : {
405 7004 : TIME_SECTION("communicateAndMerge", 3, "Communicating and Merging");
406 :
407 : // First we need to transform the raw data into a usable data structure
408 3502 : prepareDataForTransfer();
409 :
410 : /**
411 : * The libMesh packed range routines handle the communication of the individual
412 : * string buffers. Here we need to create a container to hold our type
413 : * to serialize. It'll always be size one because we are sending a single
414 : * byte stream of all the data to other processors. The stream need not be
415 : * the same size on all processors.
416 : */
417 3502 : std::vector<std::string> send_buffers(1);
418 :
419 : /**
420 : * Additionally we need to create a different container to hold the received
421 : * byte buffers. The container type need not match the send container type.
422 : * However, We do know the number of incoming buffers (num processors) so we'll
423 : * go ahead and use a vector.
424 : */
425 : std::vector<std::string> recv_buffers, deserialize_buffers;
426 :
427 : /**
428 : * When we distribute merge work, we are reducing computational work by adding more communication.
429 : * Each of the first _n_vars processors will receive one variable worth of information to merge.
430 : * After each of those processors has merged that information, it'll be sent to the primary
431 : * processor where final consolidation will occur.
432 : */
433 3502 : const auto n_merging_procs = numberOfDistributedMergeHelpers();
434 :
435 3502 : if (n_merging_procs > 1)
436 : {
437 : auto rank = processor_id();
438 : bool is_merging_processor = rank < n_merging_procs;
439 :
440 1806 : if (is_merging_processor)
441 1758 : recv_buffers.reserve(_app.n_processors());
442 :
443 13932 : for (const auto i : make_range(n_merging_procs))
444 : {
445 12126 : serialize(send_buffers[0], i);
446 :
447 : /**
448 : * Send the data from all processors to the first 'n_merging_procs' processors to create a
449 : * complete global feature maps for each variable.
450 : */
451 12126 : _communicator.gather_packed_range(i,
452 : (void *)(nullptr),
453 : send_buffers.begin(),
454 : send_buffers.end(),
455 : std::back_inserter(recv_buffers));
456 :
457 : /**
458 : * A call to gather_packed_range seems to populate the receiving buffer on all processors, not
459 : * just the receiving buffer on the actual receiving processor. If we plan to call this
460 : * function repeatedly, we must clear the buffers each time on all non-receiving processors.
461 : * On the actual receiving processor, we'll save off the buffer for use later.
462 : */
463 12126 : if (rank == i)
464 : recv_buffers.swap(deserialize_buffers);
465 : else
466 10368 : recv_buffers.clear();
467 : }
468 :
469 : // Setup a new communicator for doing merging communication operations
470 : Parallel::Communicator merge_comm;
471 :
472 1854 : _communicator.split(is_merging_processor ? 0 : MPI_UNDEFINED, rank, merge_comm);
473 :
474 1806 : if (is_merging_processor)
475 : {
476 : /**
477 : * The FeatureFloodCount and derived objects rely on having the original data structures
478 : * intact on all non-zero ranks. This is because local-only information (local entities) is
479 : * never communicated and thus must remain intact. However, the distributed merging will
480 : * destroy that information. The easiest thing to do is to swap out the data structure while
481 : * we perform the distributed merge work.
482 : */
483 1758 : std::vector<std::list<FeatureData>> tmp_data(_partial_feature_sets.size());
484 : tmp_data.swap(_partial_feature_sets);
485 :
486 1758 : deserialize(deserialize_buffers, processor_id());
487 :
488 : send_buffers[0].clear();
489 1758 : recv_buffers.clear();
490 1758 : deserialize_buffers.clear();
491 :
492 : // Merge one variable's worth of data
493 1758 : mergeSets();
494 :
495 : // Now we need to serialize again to send to the primary (only the processors who did work)
496 1758 : serialize(send_buffers[0]);
497 :
498 : // Free up as much memory as possible here before we do global communication
499 1758 : clearDataStructures();
500 :
501 : /**
502 : * Send the data from the merging processors to the root to create a complete global feature
503 : * map.
504 : */
505 1758 : merge_comm.gather_packed_range(0,
506 : (void *)(nullptr),
507 : send_buffers.begin(),
508 : send_buffers.end(),
509 : std::back_inserter(recv_buffers));
510 :
511 1758 : if (_is_primary)
512 : {
513 : // The root process now needs to deserialize all of the data
514 345 : deserialize(recv_buffers);
515 :
516 : send_buffers[0].clear();
517 345 : recv_buffers.clear();
518 :
519 345 : consolidateMergedFeatures(&tmp_data);
520 : }
521 : else
522 : // Restore our original data on non-zero ranks
523 : tmp_data.swap(_partial_feature_sets);
524 1758 : }
525 : }
526 :
527 : // Serialized merging (primary does all the work)
528 : else
529 : {
530 1696 : if (_is_primary)
531 1275 : recv_buffers.reserve(_app.n_processors());
532 :
533 1696 : serialize(send_buffers[0]);
534 :
535 : // Free up as much memory as possible here before we do global communication
536 1696 : clearDataStructures();
537 :
538 : /**
539 : * Send the data from all processors to the root to create a complete
540 : * global feature map.
541 : */
542 1696 : _communicator.gather_packed_range(0,
543 : (void *)(nullptr),
544 : send_buffers.begin(),
545 : send_buffers.end(),
546 : std::back_inserter(recv_buffers));
547 :
548 1696 : if (_is_primary)
549 : {
550 : // The root process now needs to deserialize all of the data
551 1275 : deserialize(recv_buffers);
552 1275 : recv_buffers.clear();
553 :
554 1275 : mergeSets();
555 :
556 1275 : consolidateMergedFeatures();
557 : }
558 : }
559 :
560 3502 : if (!_is_primary)
561 1882 : restoreOriginalDataStructures(_partial_feature_sets);
562 :
563 : // Make sure that feature count is communicated to all ranks
564 3502 : _communicator.broadcast(_feature_count);
565 7004 : }
566 :
567 : void
568 949 : FeatureFloodCount::sortAndLabel()
569 : {
570 : mooseAssert(_is_primary, "sortAndLabel can only be called on the primary");
571 :
572 : /**
573 : * Perform a sort to give a parallel unique sorting to the identified features.
574 : * We use the "min_entity_id" inside each feature to assign it's position in the
575 : * sorted vector.
576 : */
577 949 : std::sort(_feature_sets.begin(), _feature_sets.end());
578 :
579 : #ifndef NDEBUG
580 : /**
581 : * Sanity check. Now that we've sorted the flattened vector of features
582 : * we need to make sure that the counts vector still lines up appropriately
583 : * with each feature's _var_index.
584 : */
585 : unsigned int feature_offset = 0;
586 : for (const auto map_num : make_range(_maps_size))
587 : {
588 : // Skip empty map checks
589 : if (_feature_counts_per_map[map_num] == 0)
590 : continue;
591 :
592 : // Check the begin and end of the current range
593 : auto range_front = feature_offset;
594 : auto range_back = feature_offset + _feature_counts_per_map[map_num] - 1;
595 :
596 : mooseAssert(range_front <= range_back && range_back < _feature_count,
597 : "Indexing error in feature sets");
598 :
599 : if (!_single_map_mode && (_feature_sets[range_front]._var_index != map_num ||
600 : _feature_sets[range_back]._var_index != map_num))
601 : mooseError("Error in _feature_sets sorting, map index: ", map_num);
602 :
603 : feature_offset += _feature_counts_per_map[map_num];
604 : }
605 : #endif
606 :
607 : // Label the features with an ID based on the sorting (processor number independent value)
608 9596 : for (const auto i : index_range(_feature_sets))
609 8647 : if (_feature_sets[i]._id == invalid_id)
610 2647 : _feature_sets[i]._id = i;
611 949 : }
612 :
613 : void
614 1741 : FeatureFloodCount::buildLocalToGlobalIndices(std::vector<std::size_t> & local_to_global_all,
615 : std::vector<int> & counts) const
616 : {
617 : mooseAssert(_is_primary, "This method must only be called on the root processor");
618 :
619 1741 : counts.assign(_n_procs, 0);
620 : // Now size the individual counts vectors based on the largest index seen per processor
621 17455 : for (const auto & feature : _feature_sets)
622 44812 : for (const auto & local_index_pair : feature._orig_ids)
623 : {
624 : // local_index_pair.first = ranks, local_index_pair.second = local_index
625 : mooseAssert(local_index_pair.first < _n_procs, "Processor ID is out of range");
626 29098 : if (local_index_pair.second >= static_cast<std::size_t>(counts[local_index_pair.first]))
627 13220 : counts[local_index_pair.first] = local_index_pair.second + 1;
628 : }
629 :
630 : // Build the offsets vector
631 : unsigned int globalsize = 0;
632 1741 : std::vector<int> offsets(_n_procs); // Type is signed for use with the MPI API
633 5480 : for (const auto i : index_range(offsets))
634 : {
635 3739 : offsets[i] = globalsize;
636 3739 : globalsize += counts[i];
637 : }
638 :
639 : // Finally populate the primary vector
640 1741 : local_to_global_all.resize(globalsize, FeatureFloodCount::invalid_size_t);
641 17455 : for (const auto & feature : _feature_sets)
642 : {
643 : // Get the local indices from the feature and build a map
644 44812 : for (const auto & local_index_pair : feature._orig_ids)
645 : {
646 29098 : auto rank = local_index_pair.first;
647 : mooseAssert(rank < _n_procs, rank << ", " << _n_procs);
648 :
649 29098 : auto local_index = local_index_pair.second;
650 29098 : auto stacked_local_index = offsets[rank] + local_index;
651 :
652 : mooseAssert(stacked_local_index < globalsize,
653 : "Global index: " << stacked_local_index << " is out of range");
654 29098 : local_to_global_all[stacked_local_index] = feature._id;
655 : }
656 : }
657 1741 : }
658 :
659 : void
660 6355 : FeatureFloodCount::buildFeatureIdToLocalIndices(unsigned int max_id)
661 : {
662 6355 : _feature_id_to_local_index.assign(max_id + 1, invalid_size_t);
663 52384 : for (const auto feature_index : index_range(_feature_sets))
664 : {
665 46029 : if (_feature_sets[feature_index]._status != Status::INACTIVE)
666 : {
667 : mooseAssert(_feature_sets[feature_index]._id <= max_id,
668 : "Feature ID out of range(" << _feature_sets[feature_index]._id << ')');
669 45933 : _feature_id_to_local_index[_feature_sets[feature_index]._id] = feature_index;
670 : }
671 : }
672 6355 : }
673 :
674 : void
675 1123 : FeatureFloodCount::finalize()
676 : {
677 2246 : TIME_SECTION("finalize", 3, "Finalizing Feature Identification");
678 :
679 : // Gather all information on processor zero and merge
680 1123 : communicateAndMerge();
681 :
682 : // Sort and label the features
683 1123 : if (_is_primary)
684 708 : sortAndLabel();
685 :
686 : // Send out the local to global mappings
687 1123 : scatterAndUpdateRanks();
688 :
689 : // Populate _feature_maps and _var_index_maps
690 1123 : updateFieldInfo();
691 1123 : }
692 :
693 : const std::vector<unsigned int> &
694 175549224 : FeatureFloodCount::getVarToFeatureVector(dof_id_type elem_id) const
695 : {
696 175549224 : mooseDoOnce(if (!_compute_var_to_feature_map) mooseError(
697 : "Please set \"compute_var_to_feature_map = true\" to use this interface method"));
698 :
699 : const auto pos = _entity_var_to_features.find(elem_id);
700 175549224 : if (pos != _entity_var_to_features.end())
701 : {
702 : mooseAssert(pos->second.size() == _n_vars, "Variable to feature vector not sized properly");
703 174352895 : return pos->second;
704 : }
705 : else
706 1196329 : return _empty_var_to_features;
707 : }
708 :
709 : void
710 3739 : FeatureFloodCount::scatterAndUpdateRanks()
711 : {
712 : // local to global map (one per processor)
713 : std::vector<int> counts;
714 : std::vector<std::size_t> local_to_global_all;
715 3739 : if (_is_primary)
716 1741 : buildLocalToGlobalIndices(local_to_global_all, counts);
717 :
718 : // Scatter local_to_global indices to all processors and store in class member variable
719 3739 : _communicator.scatter(local_to_global_all, counts, _local_to_global_feature_map);
720 :
721 : std::size_t largest_global_index = std::numeric_limits<std::size_t>::lowest();
722 3739 : if (!_is_primary)
723 : {
724 1998 : _feature_sets.resize(_local_to_global_feature_map.size());
725 :
726 : /**
727 : * On non-root processors we can't maintain the full _feature_sets data structure since
728 : * we don't have all of the global information. We'll move the items from the partial
729 : * feature sets into a flat structure maintaining order and update the internal IDs
730 : * with the proper global ID.
731 : */
732 14078 : for (auto & list_ref : _partial_feature_sets)
733 : {
734 24421 : for (auto & feature : list_ref)
735 : {
736 : mooseAssert(feature._orig_ids.size() == 1, "feature._orig_ids length doesn't make sense");
737 :
738 : auto global_index = FeatureFloodCount::invalid_size_t;
739 12341 : auto local_index = feature._orig_ids.begin()->second;
740 :
741 12341 : if (local_index < _local_to_global_feature_map.size())
742 12305 : global_index = _local_to_global_feature_map[local_index];
743 :
744 12305 : if (global_index != FeatureFloodCount::invalid_size_t)
745 : {
746 12257 : if (global_index > largest_global_index)
747 : largest_global_index = global_index;
748 :
749 : // Set the correct global index
750 12257 : feature._id = global_index;
751 :
752 : /**
753 : * Important: Make sure we clear the local status if we received a valid global
754 : * index for this feature. It's possible that we have a status of INVALID
755 : * on the local processor because there was never any starting threshold found.
756 : * However, the root processor wouldn't have sent an index if it didn't find
757 : * a starting threshold connected to our local piece.
758 : */
759 : feature._status &= ~Status::INACTIVE;
760 :
761 : // Move the feature into the correct place
762 12257 : _feature_sets[local_index] = std::move(feature);
763 : }
764 : }
765 : }
766 : }
767 : else
768 : {
769 30953 : for (auto global_index : local_to_global_all)
770 29212 : if (global_index != FeatureFloodCount::invalid_size_t && global_index > largest_global_index)
771 : largest_global_index = global_index;
772 : }
773 :
774 : // communicate the boundary intersection state
775 : std::vector<std::pair<unsigned int, int>> intersection_state;
776 31758 : for (auto & feature : _feature_sets)
777 28019 : intersection_state.emplace_back(feature._id, static_cast<int>(feature._boundary_intersection));
778 :
779 : // gather on root
780 3739 : _communicator.gather(0, intersection_state);
781 :
782 : // consolidate
783 : std::map<unsigned int, int> consolidated_intersection_state;
784 3739 : if (_is_primary)
785 29760 : for (const auto & [id, state] : intersection_state)
786 28019 : consolidated_intersection_state[id] |= state;
787 :
788 : // broadcast result
789 3739 : _communicator.broadcast(consolidated_intersection_state, 0);
790 :
791 : // apply broadcast changes
792 31758 : for (auto & feature : _feature_sets)
793 : feature._boundary_intersection |=
794 28019 : static_cast<BoundaryIntersection>(consolidated_intersection_state[feature._id]);
795 :
796 3739 : buildFeatureIdToLocalIndices(largest_global_index);
797 3739 : }
798 :
799 : Real
800 3968 : FeatureFloodCount::getValue() const
801 : {
802 3968 : return static_cast<Real>(_feature_count);
803 : }
804 :
805 : std::size_t
806 184 : FeatureFloodCount::getNumberActiveFeatures() const
807 : {
808 : // Note: This value is parallel consistent, see FeatureFloodCount::communicateAndMerge()
809 184 : return _feature_count;
810 : }
811 :
812 : std::size_t
813 192 : FeatureFloodCount::getTotalFeatureCount() const
814 : {
815 : /**
816 : * Since the FeatureFloodCount object doesn't maintain any information about
817 : * features between invocations. The maximum id in use is simply the number of
818 : * features.
819 : */
820 192 : return _feature_count;
821 : }
822 :
823 : unsigned int
824 1052 : FeatureFloodCount::getFeatureVar(unsigned int feature_id) const
825 : {
826 : // Some processors don't contain the largest feature id, in that case we just return invalid_id
827 1052 : if (feature_id >= _feature_id_to_local_index.size())
828 : return invalid_id;
829 :
830 998 : auto local_index = _feature_id_to_local_index[feature_id];
831 998 : if (local_index != invalid_size_t)
832 : {
833 : mooseAssert(local_index < _feature_sets.size(), "local_index out of bounds");
834 890 : return _feature_sets[local_index]._status != Status::INACTIVE
835 890 : ? _feature_sets[local_index]._var_index
836 890 : : invalid_id;
837 : }
838 :
839 : return invalid_id;
840 : }
841 :
842 : bool
843 576 : FeatureFloodCount::doesFeatureIntersectBoundary(unsigned int feature_id) const
844 : {
845 : // Some processors don't contain the largest feature id, in that case we just return invalid_id
846 576 : if (feature_id >= _feature_id_to_local_index.size())
847 : return false;
848 :
849 522 : auto local_index = _feature_id_to_local_index[feature_id];
850 :
851 522 : if (local_index != invalid_size_t)
852 : {
853 : mooseAssert(local_index < _feature_sets.size(), "local_index out of bounds");
854 492 : return _feature_sets[local_index]._status != Status::INACTIVE
855 492 : ? _feature_sets[local_index]._boundary_intersection != BoundaryIntersection::NONE
856 : : false;
857 : }
858 :
859 : return false;
860 : }
861 :
862 : bool
863 10127 : FeatureFloodCount::doesFeatureIntersectSpecifiedBoundary(unsigned int feature_id) const
864 : {
865 : // Some processors don't contain the largest feature id, in that case we just return invalid_id
866 10127 : if (feature_id >= _feature_id_to_local_index.size())
867 : return false;
868 :
869 5728 : auto local_index = _feature_id_to_local_index[feature_id];
870 :
871 5728 : if (local_index != invalid_size_t)
872 : {
873 : mooseAssert(local_index < _feature_sets.size(), "local_index out of bounds");
874 5698 : return _feature_sets[local_index]._status != Status::INACTIVE
875 5698 : ? ((_feature_sets[local_index]._boundary_intersection &
876 : BoundaryIntersection::SPECIFIED_BOUNDARY) ==
877 : BoundaryIntersection::SPECIFIED_BOUNDARY)
878 : : false;
879 : }
880 :
881 : return false;
882 : }
883 :
884 : bool
885 627 : FeatureFloodCount::isFeaturePercolated(unsigned int feature_id) const
886 : {
887 : // Some processors don't contain the largest feature id, in that case we just return invalid_id
888 627 : if (feature_id >= _feature_id_to_local_index.size())
889 : return false;
890 :
891 522 : auto local_index = _feature_id_to_local_index[feature_id];
892 :
893 522 : if (local_index != invalid_size_t)
894 : {
895 : mooseAssert(local_index < _feature_sets.size(), "local_index out of bounds");
896 492 : bool primary = ((_feature_sets[local_index]._boundary_intersection &
897 : BoundaryIntersection::PRIMARY_PERCOLATION_BOUNDARY) ==
898 492 : BoundaryIntersection::PRIMARY_PERCOLATION_BOUNDARY);
899 : bool secondary = ((_feature_sets[local_index]._boundary_intersection &
900 : BoundaryIntersection::SECONDARY_PERCOLATION_BOUNDARY) ==
901 492 : BoundaryIntersection::SECONDARY_PERCOLATION_BOUNDARY);
902 492 : return _feature_sets[local_index]._status != Status::INACTIVE ? (primary && secondary) : false;
903 : }
904 :
905 : return false;
906 : }
907 :
908 : Point
909 437 : FeatureFloodCount::featureCentroid(unsigned int feature_id) const
910 : {
911 437 : if (feature_id >= _feature_id_to_local_index.size())
912 : return invalid_id;
913 :
914 417 : auto local_index = _feature_id_to_local_index[feature_id];
915 :
916 : Real invalid_coord = std::numeric_limits<Real>::max();
917 : Point p(invalid_coord, invalid_coord, invalid_coord);
918 417 : if (local_index != invalid_size_t)
919 : {
920 : mooseAssert(local_index < _feature_sets.size(), "local_index out of bounds");
921 393 : p = _feature_sets[local_index]._centroid;
922 : }
923 417 : return p;
924 : }
925 :
926 : Real
927 6986331 : FeatureFloodCount::getEntityValue(dof_id_type entity_id,
928 : FieldType field_type,
929 : std::size_t var_index) const
930 : {
931 : auto use_default = false;
932 6986331 : if (var_index == invalid_size_t)
933 : {
934 : use_default = true;
935 : var_index = 0;
936 : }
937 :
938 : mooseAssert(var_index < _maps_size, "Index out of range");
939 :
940 6986331 : switch (field_type)
941 : {
942 2402336 : case FieldType::UNIQUE_REGION:
943 : {
944 : const auto entity_it = _feature_maps[var_index].find(entity_id);
945 :
946 2402336 : if (entity_it != _feature_maps[var_index].end())
947 1796356 : return entity_it->second; // + _region_offsets[var_index];
948 : else
949 : return -1;
950 : }
951 :
952 523858 : case FieldType::VARIABLE_COLORING:
953 : {
954 : mooseAssert(
955 : _var_index_mode,
956 : "\"enable_var_coloring\" must be set to true to pull back the VARIABLE_COLORING field");
957 :
958 : const auto entity_it = _var_index_maps[var_index].find(entity_id);
959 :
960 523858 : if (entity_it != _var_index_maps[var_index].end())
961 499529 : return entity_it->second;
962 : else
963 : return -1;
964 : }
965 :
966 324829 : case FieldType::GHOSTED_ENTITIES:
967 : {
968 : const auto entity_it = _ghosted_entity_ids.find(entity_id);
969 :
970 324829 : if (entity_it != _ghosted_entity_ids.end())
971 38612 : return entity_it->second;
972 : else
973 : return -1;
974 : }
975 :
976 3166941 : case FieldType::HALOS:
977 : {
978 3166941 : if (!use_default)
979 : {
980 : const auto entity_it = _halo_ids[var_index].find(entity_id);
981 2851136 : if (entity_it != _halo_ids[var_index].end())
982 356189 : return entity_it->second;
983 : }
984 : else
985 : {
986 : // Showing halos in reverse order for backwards compatibility
987 315805 : for (auto map_num = _maps_size;
988 1868348 : map_num-- /* don't compare greater than zero for unsigned */;)
989 : {
990 : const auto entity_it = _halo_ids[map_num].find(entity_id);
991 :
992 1753460 : if (entity_it != _halo_ids[map_num].end())
993 200917 : return entity_it->second;
994 : }
995 : }
996 : return -1;
997 : }
998 :
999 558867 : case FieldType::CENTROID:
1000 : {
1001 558867 : if (_periodic_node_map.size())
1002 0 : mooseDoOnce(mooseWarning(
1003 : "Centroids are not correct when using periodic boundaries, contact the MOOSE team"));
1004 :
1005 : // If this element contains the centroid of one of features, return one
1006 558867 : const auto * elem_ptr = _mesh.elemPtr(entity_id);
1007 :
1008 3472259 : for (const auto & feature : _feature_sets)
1009 : {
1010 2918704 : if (feature._status == Status::INACTIVE)
1011 6087 : continue;
1012 :
1013 2912617 : if (elem_ptr->contains_point(feature._centroid))
1014 : return 1;
1015 : }
1016 :
1017 : return 0;
1018 : }
1019 :
1020 9500 : case FieldType::INTERSECTS_SPECIFIED_BOUNDARY:
1021 : {
1022 9500 : auto ids = getVarToFeatureVector(entity_id);
1023 9500 : if (ids.size() != 0)
1024 9500 : return doesFeatureIntersectSpecifiedBoundary(ids[0]);
1025 : return 0;
1026 9500 : }
1027 :
1028 : default:
1029 : return 0;
1030 : }
1031 : }
1032 :
1033 : void
1034 3502 : FeatureFloodCount::prepareDataForTransfer()
1035 : {
1036 7004 : TIME_SECTION("prepareDataForTransfer", 3, "Preparing Data For Transfer");
1037 :
1038 3502 : MeshBase & mesh = _mesh.getMesh();
1039 :
1040 : FeatureData::container_type local_ids_no_ghost, set_difference;
1041 :
1042 20904 : for (auto & list_ref : _partial_feature_sets)
1043 : {
1044 44148 : for (auto & feature : list_ref)
1045 : {
1046 : // See if the feature intersects a boundary or perhaps one of the percolation boundaries.
1047 26746 : updateBoundaryIntersections(feature);
1048 :
1049 : // Periodic node ids
1050 26746 : appendPeriodicNeighborNodes(feature);
1051 :
1052 : /**
1053 : * If using a vector container, we need to sort all of the data structures for later
1054 : * operations such as checking for intersection and merging. The following "sort" function
1055 : * does nothing when invoked on a std::set.
1056 : */
1057 26746 : FeatureFloodCount::sort(feature._ghosted_ids);
1058 26746 : FeatureFloodCount::sort(feature._local_ids);
1059 26746 : FeatureFloodCount::sort(feature._halo_ids);
1060 26746 : FeatureFloodCount::sort(feature._disjoint_halo_ids);
1061 26746 : FeatureFloodCount::sort(feature._periodic_nodes);
1062 :
1063 : // Now extend the bounding box by the halo region
1064 26746 : if (_is_elemental)
1065 25117 : feature.updateBBoxExtremes(mesh);
1066 : else
1067 : {
1068 87779 : for (auto & halo_id : feature._halo_ids)
1069 86150 : updateBBoxExtremesHelper(feature._bboxes[0], mesh.point(halo_id));
1070 : }
1071 :
1072 : mooseAssert(!feature._local_ids.empty(), "local entity ids cannot be empty");
1073 :
1074 : /**
1075 : * Save off the min entity id present in the feature to uniquely
1076 : * identify the feature regardless of n_procs
1077 : */
1078 26746 : feature._min_entity_id = *feature._local_ids.begin();
1079 : }
1080 : }
1081 7004 : }
1082 :
1083 : void
1084 15580 : FeatureFloodCount::serialize(std::string & serialized_buffer, unsigned int var_num)
1085 : {
1086 : // stream for serializing the _partial_feature_sets data structure to a byte stream
1087 15580 : std::ostringstream oss;
1088 :
1089 : mooseAssert(var_num == invalid_id || var_num < _partial_feature_sets.size(),
1090 : "var_num out of range");
1091 :
1092 : // Serialize everything
1093 15580 : if (var_num == invalid_id)
1094 3454 : dataStore(oss, _partial_feature_sets, this);
1095 : else
1096 12126 : dataStore(oss, _partial_feature_sets[var_num], this);
1097 :
1098 : // Populate the passed in string pointer with the string stream's buffer contents
1099 15580 : serialized_buffer.assign(oss.str());
1100 15580 : }
1101 :
1102 : void
1103 3378 : FeatureFloodCount::deserialize(std::vector<std::string> & serialized_buffers, unsigned int var_num)
1104 : {
1105 : // The input string stream used for deserialization
1106 3378 : std::istringstream iss;
1107 :
1108 : auto rank = processor_id();
1109 :
1110 18958 : for (const auto proc_id : index_range(serialized_buffers))
1111 : {
1112 : /**
1113 : * Usually we have the local processor data already in the _partial_feature_sets data structure.
1114 : * However, if we are doing distributed merge work, we also need to preserve all of the original
1115 : * data for use in later stages of the algorithm so it'll have been swapped out with clean
1116 : * buffers. This leaves us a choice, either we just duplicate the Features from the original
1117 : * data structure after we've swapped out the buffer, or we go ahead and unpack data that we
1118 : * would normally already have. So during distributed merging, that's exactly what we'll do.
1119 : * Later however when the primary is doing the final consolidating, we'll opt to just skip
1120 : * the local unpacking. To tell the difference, between these two modes, we just need to
1121 : * see if a var_num was passed in.
1122 : */
1123 15580 : if (var_num == invalid_id && proc_id == rank)
1124 1620 : continue;
1125 :
1126 : iss.str(serialized_buffers[proc_id]); // populate the stream with a new buffer
1127 13960 : iss.clear(); // reset the string stream state
1128 :
1129 : // Load the gathered data into the data structure.
1130 13960 : if (var_num == invalid_id)
1131 1834 : dataLoad(iss, _partial_feature_sets, this);
1132 : else
1133 12126 : dataLoad(iss, _partial_feature_sets[var_num], this);
1134 : }
1135 3378 : }
1136 :
1137 : void
1138 2515 : FeatureFloodCount::mergeSets()
1139 : {
1140 5030 : TIME_SECTION("mergeSets", 3, "Merging Sets");
1141 :
1142 : // When working with _distribute_merge_work all of the maps will be empty except for one
1143 17788 : for (const auto map_num : make_range(_maps_size))
1144 : {
1145 15273 : for (auto it1 = _partial_feature_sets[map_num].begin();
1146 32380 : it1 != _partial_feature_sets[map_num].end();
1147 : /* No increment on it1 */)
1148 : {
1149 : bool merge_occured = false;
1150 17107 : for (auto it2 = _partial_feature_sets[map_num].begin();
1151 65497 : it2 != _partial_feature_sets[map_num].end();
1152 : ++it2)
1153 : {
1154 55641 : if (it1 != it2 && areFeaturesMergeable(*it1, *it2))
1155 : {
1156 7251 : it2->merge(std::move(*it1));
1157 :
1158 : /**
1159 : * Insert the new entity at the end of the list so that it may be checked against all
1160 : * other partial features again.
1161 : */
1162 : _partial_feature_sets[map_num].emplace_back(std::move(*it2));
1163 :
1164 : /**
1165 : * Now remove both halves the merged features: it2 contains the "moved" feature cell just
1166 : * inserted at the back of the list, it1 contains the mostly empty other half. We have to
1167 : * be careful about the order in which these two elements are deleted. We delete it2 first
1168 : * since we don't care where its iterator points after the deletion. We are going to break
1169 : * out of this loop anyway. If we delete it1 first, it may end up pointing at the same
1170 : * location as it2 which after the second deletion would cause both of the iterators to be
1171 : * invalidated.
1172 : */
1173 : _partial_feature_sets[map_num].erase(it2);
1174 : it1 = _partial_feature_sets[map_num].erase(it1); // it1 is incremented here!
1175 :
1176 : // A merge occurred, this is used to determine whether or not we increment the outer
1177 : // iterator
1178 : merge_occured = true;
1179 :
1180 : // We need to start the list comparison over for the new it1 so break here
1181 7251 : break;
1182 : }
1183 : } // it2 loop
1184 :
1185 17107 : if (!merge_occured) // No merges so we need to manually increment the outer iterator
1186 : ++it1;
1187 :
1188 : } // it1 loop
1189 : } // map loop
1190 2515 : }
1191 :
1192 : void
1193 1620 : FeatureFloodCount::consolidateMergedFeatures(std::vector<std::list<FeatureData>> * saved_data)
1194 : {
1195 3240 : TIME_SECTION("consolidateMergedFeatures", 3, "Consolidating Merged Features");
1196 :
1197 : /**
1198 : * Now that the merges are complete we need to adjust the centroid, and halos.
1199 : * Additionally, To make several of the sorting and tracking algorithms more straightforward,
1200 : * we will move the features into a flat vector. Finally we can count the final number of
1201 : * features and find the max local index seen on any processor
1202 : *
1203 : * Note: This is all occurring on rank 0 only!
1204 : */
1205 : mooseAssert(_is_primary,
1206 : "cosolidateMergedFeatures() may only be called on the primary processor");
1207 : mooseAssert(saved_data == nullptr || saved_data->size() == _partial_feature_sets.size(),
1208 : "Data structure size mismatch");
1209 :
1210 : // Offset where the current set of features with the same variable id starts in the flat vector
1211 : unsigned int feature_offset = 0;
1212 : // Set the member feature count to zero and start counting the actual features
1213 1620 : _feature_count = 0;
1214 7991 : for (const auto map_num : index_range(_partial_feature_sets))
1215 : {
1216 20226 : for (auto & feature : _partial_feature_sets[map_num])
1217 : {
1218 13855 : if (saved_data)
1219 : {
1220 13833 : for (auto it = (*saved_data)[map_num].begin(); it != (*saved_data)[map_num].end();
1221 : /* no increment */)
1222 : {
1223 10520 : if (feature.canConsolidate(*it))
1224 : {
1225 3557 : feature.consolidate(std::move(*it));
1226 : it = (*saved_data)[map_num].erase(it); // increment
1227 : }
1228 : else
1229 : ++it;
1230 : }
1231 : }
1232 :
1233 : // If after merging we still have an inactive feature, discard it
1234 13855 : if (feature._status == Status::CLEAR)
1235 : {
1236 : // First we need to calculate the centroid now that we are doing merging all partial
1237 : // features
1238 13713 : if (feature._vol_count != 0)
1239 12445 : feature._centroid /= feature._vol_count;
1240 :
1241 13713 : _feature_sets.emplace_back(std::move(feature));
1242 13713 : ++_feature_count;
1243 : }
1244 : }
1245 :
1246 : // Record the feature numbers just for the current map
1247 6371 : _feature_counts_per_map[map_num] = _feature_count - feature_offset;
1248 :
1249 : // Now update the running feature count so we can calculate the next map's contribution
1250 6371 : feature_offset = _feature_count;
1251 :
1252 : // Clean up the "moved" objects
1253 : _partial_feature_sets[map_num].clear();
1254 6371 : if (saved_data)
1255 : (*saved_data)[map_num].clear();
1256 : }
1257 :
1258 : // We may have resided our data structures for the communicateAndMerge step. We'll restore the
1259 : // original size here just in case we need to loop over the assumed size (i.e. _maps_size)
1260 : // elsewhere in this or derived objects.
1261 1620 : if (_partial_feature_sets.size() != _maps_size)
1262 : {
1263 117 : _partial_feature_sets.resize(_maps_size);
1264 :
1265 117 : _feature_counts_per_map[0] = _feature_count;
1266 117 : _feature_counts_per_map.resize(_maps_size);
1267 : }
1268 :
1269 : /**
1270 : * IMPORTANT: FeatureFloodCount::_feature_count is set on rank 0 at this point but
1271 : * we can't broadcast it here because this routine is not collective.
1272 : */
1273 1620 : }
1274 :
1275 : bool
1276 38534 : FeatureFloodCount::areFeaturesMergeable(const FeatureData & f1, const FeatureData & f2) const
1277 : {
1278 38534 : return f1.mergeable(f2);
1279 : }
1280 :
1281 : void
1282 1123 : FeatureFloodCount::updateFieldInfo()
1283 : {
1284 11132 : for (const auto i : index_range(_feature_sets))
1285 : {
1286 10009 : auto & feature = _feature_sets[i];
1287 :
1288 : // If the developer has requested _condense_map_info we'll make sure we only update the zeroth
1289 : // map
1290 10009 : auto map_index = (_single_map_mode || _condense_map_info) ? decltype(feature._var_index)(0)
1291 : : feature._var_index;
1292 :
1293 : // Loop over the entity ids of this feature and update our local map
1294 925316 : for (auto entity : feature._local_ids)
1295 : {
1296 915307 : _feature_maps[map_index][entity] = static_cast<int>(feature._id);
1297 :
1298 915307 : if (_var_index_mode)
1299 13866 : _var_index_maps[map_index][entity] = feature._var_index;
1300 :
1301 : // Fill in the data structure that keeps track of all features per elem
1302 915307 : if (_compute_var_to_feature_map)
1303 : {
1304 174266 : auto insert_pair = moose_try_emplace(
1305 174266 : _entity_var_to_features, entity, std::vector<unsigned int>(_n_vars, invalid_id));
1306 : auto & vec_ref = insert_pair.first->second;
1307 174266 : vec_ref[feature._var_index] = feature._id;
1308 : }
1309 : }
1310 :
1311 10009 : if (_compute_halo_maps)
1312 : // Loop over the halo ids to update cells with halo information
1313 0 : for (auto entity : feature._halo_ids)
1314 0 : _halo_ids[map_index][entity] = static_cast<int>(feature._id);
1315 :
1316 : // Loop over the ghosted ids to update cells with ghost information
1317 131276 : for (auto entity : feature._ghosted_ids)
1318 121267 : _ghosted_entity_ids[entity] = 1;
1319 :
1320 : // TODO: Fixme
1321 10009 : if (!_global_numbering)
1322 0 : mooseError("Local numbering currently disabled");
1323 : }
1324 1123 : }
1325 :
1326 : bool
1327 8353526 : FeatureFloodCount::flood(const DofObject * dof_object, std::size_t current_index)
1328 :
1329 : {
1330 : // if (dof_object == nullptr || dof_object == libMesh::remote_elem)
1331 : // return false;
1332 : mooseAssert(dof_object, "DOF object is nullptr");
1333 : mooseAssert(_entity_queue.empty(), "Entity queue is not empty when starting a feature");
1334 :
1335 : // Kick off the exploration of a new feature
1336 8353526 : _entity_queue.push_front(dof_object);
1337 :
1338 : bool return_value = false;
1339 8353526 : FeatureData * feature = nullptr;
1340 23777312 : while (!_entity_queue.empty())
1341 : {
1342 15423786 : const DofObject * curr_dof_object = _entity_queue.back();
1343 15423786 : const Elem * elem = _is_elemental ? static_cast<const Elem *>(curr_dof_object) : nullptr;
1344 15423786 : _entity_queue.pop_back();
1345 :
1346 : // Retrieve the id of the current entity
1347 15423786 : auto entity_id = curr_dof_object->id();
1348 :
1349 : // Has this entity already been marked? - if so move along
1350 15423786 : if (current_index != invalid_size_t &&
1351 : _entities_visited[current_index].find(entity_id) != _entities_visited[current_index].end())
1352 13570335 : continue;
1353 :
1354 : // Are we outside of the range we should be working in?
1355 9426550 : if (_is_elemental && !_dof_map.is_evaluable(*elem))
1356 0 : continue;
1357 :
1358 : // See if the current entity either starts a new feature or continues an existing feature
1359 9426550 : auto new_id = invalid_id; // Writable reference to hold an optional id;
1360 9426550 : Status status =
1361 : Status::INACTIVE; // Status is inactive until we find an entity above the starting threshold
1362 :
1363 : // Make sure that the Assembly object has the right element and subdomain information set
1364 : // since we are moving through the mesh in a manual fashion.
1365 9426550 : if (_is_elemental)
1366 6264315 : _fe_problem.setCurrentSubdomainID(elem, 0);
1367 :
1368 9426550 : if (!isNewFeatureOrConnectedRegion(curr_dof_object, current_index, feature, status, new_id))
1369 : {
1370 : // If we have an active feature, we just found a halo entity
1371 7573099 : if (feature)
1372 582374 : feature->_halo_ids.insert(feature->_halo_ids.end(), entity_id);
1373 7573099 : continue;
1374 : }
1375 :
1376 : mooseAssert(current_index != invalid_size_t, "current_index is invalid");
1377 :
1378 : /**
1379 : * If we reach this point (i.e. we haven't continued to the next queue entry),
1380 : * we've found a new mesh entity that's part of a feature. We need to mark
1381 : * the entity as visited at this point (and not before!) to avoid infinite
1382 : * recursion. If you mark the node too early you risk not coloring in a whole
1383 : * feature any time a "connecting threshold" is used since we may have
1384 : * already visited this entity earlier but it was in-between two thresholds.
1385 : */
1386 : return_value = true;
1387 1853451 : _entities_visited[current_index].insert(entity_id);
1388 :
1389 1853451 : auto map_num = _single_map_mode ? decltype(current_index)(0) : current_index;
1390 :
1391 : // New Feature (we need to create it and add it to our data structure)
1392 1853451 : if (!feature)
1393 : {
1394 : _partial_feature_sets[map_num].emplace_back(
1395 26746 : current_index, _feature_count++, processor_id(), status);
1396 :
1397 : // Get a handle to the feature we will update (always the last feature in the data structure)
1398 26746 : feature = &_partial_feature_sets[map_num].back();
1399 :
1400 : // If new_id is valid, we'll set it in the feature here.
1401 26746 : if (new_id != invalid_id)
1402 9639 : feature->_id = new_id;
1403 : }
1404 :
1405 : // Insert the current entity into the local ids data structure
1406 1853451 : feature->_local_ids.insert(feature->_local_ids.end(), entity_id);
1407 :
1408 : /**
1409 : * See if this particular entity cell contributes to the centroid calculation. We
1410 : * only deal with elemental floods and only count it if it's owned by the current
1411 : * processor to avoid skewing the result.
1412 : */
1413 1853451 : if (_is_elemental && processor_id() == curr_dof_object->processor_id())
1414 : {
1415 : // Keep track of how many elements participate in the centroid averaging
1416 1700400 : feature->_vol_count++;
1417 :
1418 : // Sum the centroid values for now, we'll average them later
1419 1700400 : feature->_centroid += elem->vertex_average();
1420 :
1421 : // // Does the volume intersect the boundary?
1422 : // if (_all_boundary_entity_ids.find(elem->id()) != _all_boundary_entity_ids.end())
1423 : // feature->_intersects_boundary = true;
1424 : }
1425 :
1426 1853451 : if (_is_elemental)
1427 1801364 : visitElementalNeighbors(elem,
1428 : feature,
1429 : /*expand_halos_only =*/false,
1430 : /*disjoint_only =*/false);
1431 : else
1432 52087 : visitNodalNeighbors(static_cast<const Node *>(curr_dof_object),
1433 : feature,
1434 : /*expand_halos_only =*/false);
1435 : }
1436 :
1437 8353526 : return return_value;
1438 : }
1439 :
1440 3473560 : Real FeatureFloodCount::getThreshold(std::size_t /*current_index*/) const
1441 : {
1442 3473560 : return _step_threshold;
1443 : }
1444 :
1445 7017913 : Real FeatureFloodCount::getConnectingThreshold(std::size_t /*current_index*/) const
1446 : {
1447 7017913 : return _step_connecting_threshold;
1448 : }
1449 :
1450 : bool
1451 15059270 : FeatureFloodCount::compareValueWithThreshold(Real entity_value, Real threshold) const
1452 : {
1453 15059270 : return ((_use_less_than_threshold_comparison && (entity_value >= threshold)) ||
1454 0 : (!_use_less_than_threshold_comparison && (entity_value <= threshold)));
1455 : }
1456 :
1457 : bool
1458 8041357 : FeatureFloodCount::isNewFeatureOrConnectedRegion(const DofObject * dof_object,
1459 : std::size_t & current_index,
1460 : FeatureData *& feature,
1461 : Status & status,
1462 : unsigned int & /*new_id*/)
1463 : {
1464 : // Get the value of the current variable for the current entity
1465 : Real entity_value;
1466 8041357 : if (_is_elemental)
1467 : {
1468 4879122 : const Elem * elem = static_cast<const Elem *>(dof_object);
1469 4879122 : std::vector<Point> centroid(1, elem->vertex_average());
1470 4879122 : _subproblem.reinitElemPhys(elem, centroid, 0);
1471 4879122 : entity_value = _vars[current_index]->sln()[0];
1472 4879122 : }
1473 : else
1474 3162235 : entity_value = _vars[current_index]->getNodalValue(*static_cast<const Node *>(dof_object));
1475 :
1476 : // If the value compares against our starting threshold, this is definitely part of a feature
1477 : // we'll keep
1478 8041357 : if (compareValueWithThreshold(entity_value, getThreshold(current_index)))
1479 : {
1480 : Status * status_ptr = &status;
1481 :
1482 1023444 : if (feature)
1483 1014588 : status_ptr = &feature->_status;
1484 :
1485 : // Update an existing feature's status or clear the flag on the passed in status
1486 : *status_ptr &= ~Status::INACTIVE;
1487 1023444 : return true;
1488 : }
1489 :
1490 : /**
1491 : * If the value is _only_ above the connecting threshold, it's still part of a feature but
1492 : * possibly part of one that we'll discard if there is never any starting threshold encountered.
1493 : */
1494 7017913 : return compareValueWithThreshold(entity_value, getConnectingThreshold(current_index));
1495 : }
1496 :
1497 : void
1498 0 : FeatureFloodCount::expandPointHalos()
1499 : {
1500 0 : const auto & node_to_elem_map = _mesh.nodeToActiveSemilocalElemMap();
1501 : FeatureData::container_type expanded_local_ids;
1502 : auto my_processor_id = processor_id();
1503 :
1504 : /**
1505 : * To expand the feature element region to the actual flooded region (nodal basis)
1506 : * we need to add in all point neighbors of the current local region for each feature.
1507 : * This is because the elemental variable influence spreads from the elemental data out
1508 : * exactly one element from every mesh point.
1509 : */
1510 0 : for (auto & list_ref : _partial_feature_sets)
1511 : {
1512 0 : for (auto & feature : list_ref)
1513 : {
1514 0 : expanded_local_ids.clear();
1515 :
1516 0 : for (auto entity : feature._local_ids)
1517 : {
1518 0 : const Elem * elem = _mesh.elemPtr(entity);
1519 : mooseAssert(elem, "elem pointer is NULL");
1520 :
1521 : // Get the nodes on a current element so that we can add in point neighbors
1522 0 : auto n_nodes = elem->n_vertices();
1523 0 : for (const auto i : make_range(n_nodes))
1524 : {
1525 : const Node * current_node = elem->node_ptr(i);
1526 :
1527 0 : auto elem_vector_it = node_to_elem_map.find(current_node->id());
1528 0 : if (elem_vector_it == node_to_elem_map.end())
1529 0 : mooseError("Error in node to elem map");
1530 :
1531 : const auto & elem_vector = elem_vector_it->second;
1532 :
1533 0 : std::copy(elem_vector.begin(),
1534 : elem_vector.end(),
1535 : std::insert_iterator<FeatureData::container_type>(expanded_local_ids,
1536 : expanded_local_ids.end()));
1537 :
1538 : // Now see which elements need to go into the ghosted set
1539 0 : for (auto entity : elem_vector)
1540 : {
1541 0 : const Elem * neighbor = _mesh.elemPtr(entity);
1542 : mooseAssert(neighbor, "neighbor pointer is NULL");
1543 :
1544 0 : if (neighbor->processor_id() != my_processor_id)
1545 0 : feature._ghosted_ids.insert(feature._ghosted_ids.end(), elem->id());
1546 : }
1547 : }
1548 : }
1549 :
1550 : // Replace the existing local ids with the expanded local ids
1551 : feature._local_ids.swap(expanded_local_ids);
1552 :
1553 : // Copy the expanded local_ids into the halo_ids container
1554 0 : feature._halo_ids = feature._local_ids;
1555 : }
1556 : }
1557 0 : }
1558 :
1559 : void
1560 2897 : FeatureFloodCount::expandEdgeHalos(unsigned int num_layers_to_expand)
1561 : {
1562 2897 : if (num_layers_to_expand == 0)
1563 : return;
1564 :
1565 5794 : TIME_SECTION("expandEdgeHalos", 3, "Expanding Edge Halos");
1566 :
1567 19657 : for (auto & list_ref : _partial_feature_sets)
1568 : {
1569 40340 : for (auto & feature : list_ref)
1570 : {
1571 47160 : for (unsigned short halo_level = 0; halo_level < num_layers_to_expand; ++halo_level)
1572 : {
1573 : /**
1574 : * Create a copy of the halo set so that as we insert new ids into the
1575 : * set we don't continue to iterate on those new ids.
1576 : */
1577 23580 : FeatureData::container_type orig_halo_ids(feature._halo_ids);
1578 486289 : for (auto entity : orig_halo_ids)
1579 : {
1580 462709 : if (_is_elemental)
1581 456850 : visitElementalNeighbors(_mesh.elemPtr(entity),
1582 : &feature,
1583 : /*expand_halos_only =*/true,
1584 : /*disjoint_only =*/false);
1585 : else
1586 5859 : visitNodalNeighbors(_mesh.nodePtr(entity),
1587 : &feature,
1588 : /*expand_halos_only =*/true);
1589 : }
1590 :
1591 : /**
1592 : * We have to handle disjoint halo IDs slightly differently. Once you are disjoint, you
1593 : * can't go back so make sure that we keep placing these IDs in the disjoint set.
1594 : */
1595 23580 : FeatureData::container_type disjoint_orig_halo_ids(feature._disjoint_halo_ids);
1596 77813 : for (auto entity : disjoint_orig_halo_ids)
1597 : {
1598 54233 : if (_is_elemental)
1599 54233 : visitElementalNeighbors(_mesh.elemPtr(entity),
1600 :
1601 : &feature,
1602 : /*expand_halos_only =*/true,
1603 : /*disjoint_only =*/true);
1604 : else
1605 0 : visitNodalNeighbors(_mesh.nodePtr(entity),
1606 :
1607 : &feature,
1608 : /*expand_halos_only =*/true);
1609 : }
1610 23580 : }
1611 : }
1612 : }
1613 : }
1614 :
1615 : void
1616 2312447 : FeatureFloodCount::visitElementalNeighbors(const Elem * elem,
1617 : FeatureData * feature,
1618 : bool expand_halos_only,
1619 : bool disjoint_only)
1620 : {
1621 : mooseAssert(elem, "Elem is NULL");
1622 :
1623 : std::vector<const Elem *> all_active_neighbors;
1624 2312447 : MeshBase & mesh = _mesh.getMesh();
1625 :
1626 : // Loop over all neighbors (at the the same level as the current element)
1627 11636395 : for (const auto i : make_range(elem->n_neighbors()))
1628 : {
1629 : const Elem * neighbor_ancestor = nullptr;
1630 : bool topological_neighbor = false;
1631 :
1632 : /**
1633 : * Retrieve only the active neighbors for each side of this element, append them to the list
1634 : * of active neighbors
1635 : */
1636 : neighbor_ancestor = elem->neighbor_ptr(i);
1637 :
1638 9323948 : if (neighbor_ancestor)
1639 : {
1640 : /**
1641 : * In general, {evaluable elements} >= {local elements} U {algebraic ghosting elements}. That
1642 : * is, the number of evaluable elements does NOT necessarily equal to the number of local and
1643 : * algebraic ghosting elements. The neighbors of evaluable elements can be remote even though
1644 : * we have two layers of geometric ghosting elements.
1645 : */
1646 9029388 : if (neighbor_ancestor->is_remote())
1647 0 : continue;
1648 :
1649 9029388 : neighbor_ancestor->active_family_tree_by_neighbor(all_active_neighbors, elem, false);
1650 : }
1651 : else
1652 : {
1653 294560 : neighbor_ancestor = elem->topological_neighbor(i, mesh, *_point_locator, _pbs);
1654 :
1655 : /**
1656 : * If the current element (passed into this method) doesn't have a connected neighbor but
1657 : * does have a topological neighbor, this might be a new disjoint region that we'll
1658 : * need to represent with a separate bounding box. To find out for sure, we'll need
1659 : * see if the new neighbors are present in any of the halo or disjoint halo sets. If
1660 : * they are not present, this is a new region.
1661 : */
1662 294560 : if (neighbor_ancestor)
1663 : {
1664 : /**
1665 : * In general, {evaluable elements} >= {local elements} U {algebraic ghosting elements}.
1666 : * That is, the number of evaluable elements does NOT necessarily equal to the number of
1667 : * local and algebraic ghosting elements. The neighbors of evaluable elements can be remote
1668 : * even though we have two layers of geometric ghosting elements.
1669 : */
1670 114980 : if (neighbor_ancestor->is_remote())
1671 0 : continue;
1672 :
1673 114980 : neighbor_ancestor->active_family_tree_by_topological_neighbor(
1674 114980 : all_active_neighbors, elem, mesh, *_point_locator, _pbs, false);
1675 :
1676 : topological_neighbor = true;
1677 : }
1678 : else
1679 : {
1680 : /**
1681 : * This neighbor is NULL which means we need to expand the bounding box here in case this
1682 : * grain is up against multiple domain edges so we don't end up with a degenerate bounding
1683 : * box.
1684 : */
1685 179580 : updateBBoxExtremesHelper(feature->_bboxes[0], *elem);
1686 : }
1687 : }
1688 :
1689 9323948 : visitNeighborsHelper(elem,
1690 : all_active_neighbors,
1691 : feature,
1692 : expand_halos_only,
1693 : topological_neighbor,
1694 : disjoint_only);
1695 :
1696 9323948 : all_active_neighbors.clear();
1697 : }
1698 2312447 : }
1699 :
1700 : void
1701 57946 : FeatureFloodCount::visitNodalNeighbors(const Node * node,
1702 : FeatureData * feature,
1703 : bool expand_halos_only)
1704 : {
1705 : mooseAssert(node, "Node is NULL");
1706 :
1707 : std::vector<const Node *> all_active_neighbors;
1708 57946 : MeshTools::find_nodal_neighbors(_mesh.getMesh(), *node, _nodes_to_elem_map, all_active_neighbors);
1709 :
1710 57946 : visitNeighborsHelper(node, all_active_neighbors, feature, expand_halos_only, false, false);
1711 57946 : }
1712 :
1713 : template <typename T>
1714 : void
1715 9381894 : FeatureFloodCount::visitNeighborsHelper(const T * curr_entity,
1716 : std::vector<const T *> neighbor_entities,
1717 : FeatureData * feature,
1718 : bool expand_halos_only,
1719 : bool topological_neighbor,
1720 : bool disjoint_only)
1721 : {
1722 : // Loop over all active element neighbors
1723 18850548 : for (const auto neighbor : neighbor_entities)
1724 : {
1725 9468654 : if (neighbor && (!_is_boundary_restricted || isBoundaryEntity(neighbor)))
1726 : {
1727 9468416 : if (expand_halos_only)
1728 : {
1729 2058634 : auto entity_id = neighbor->id();
1730 :
1731 2058634 : if (topological_neighbor || disjoint_only)
1732 229203 : feature->_disjoint_halo_ids.insert(feature->_disjoint_halo_ids.end(), entity_id);
1733 1829431 : else if (!FeatureFloodCount::contains(feature->_local_ids, entity_id))
1734 1137162 : feature->_halo_ids.insert(feature->_halo_ids.end(), entity_id);
1735 : }
1736 : else
1737 : {
1738 : auto my_processor_id = processor_id();
1739 :
1740 7409782 : if (!topological_neighbor && neighbor->processor_id() != my_processor_id)
1741 408145 : feature->_ghosted_ids.insert(feature->_ghosted_ids.end(), curr_entity->id());
1742 :
1743 : /**
1744 : * Only recurse where we own this entity and it's a topologically connected entity. We
1745 : * shouldn't even attempt to flood to the periodic boundary because we won't have solution
1746 : * information and if we are using DistributedMesh we probably won't have geometric
1747 : * information either.
1748 : *
1749 : * When we only recurse on entities we own, we can never get more than one away from
1750 : * a local entity which should be in the ghosted zone.
1751 : */
1752 7409782 : if (curr_entity->processor_id() == my_processor_id ||
1753 : neighbor->processor_id() == my_processor_id)
1754 : {
1755 : /**
1756 : * Premark neighboring entities with a halo mark. These
1757 : * entities may or may not end up being part of the feature.
1758 : * We will not update the _entities_visited data structure
1759 : * here.
1760 : */
1761 7117526 : if (topological_neighbor || disjoint_only)
1762 47266 : feature->_disjoint_halo_ids.insert(feature->_disjoint_halo_ids.end(), neighbor->id());
1763 : else
1764 7070260 : _entity_queue.push_front(neighbor);
1765 : }
1766 : }
1767 : }
1768 : }
1769 9381894 : }
1770 :
1771 : void
1772 26746 : FeatureFloodCount::updateBoundaryIntersections(FeatureData & feature) const
1773 : {
1774 26746 : if (_is_elemental)
1775 : {
1776 1826481 : for (auto entity : feature._local_ids)
1777 : {
1778 : // See if this feature is on a boundary if we haven't already figured that out
1779 1801364 : if ((feature._boundary_intersection & BoundaryIntersection::ANY_BOUNDARY) ==
1780 : BoundaryIntersection::NONE)
1781 : {
1782 856611 : Elem * elem = _mesh.elemPtr(entity);
1783 1713222 : if (elem && elem->on_boundary())
1784 : feature._boundary_intersection |= BoundaryIntersection::ANY_BOUNDARY;
1785 : }
1786 :
1787 : // Now see if the feature touches the primary and/or secondary boundary IDs if we haven't
1788 : // figured that out already
1789 1801364 : if ((feature._boundary_intersection & BoundaryIntersection::PRIMARY_PERCOLATION_BOUNDARY) ==
1790 : BoundaryIntersection::NONE)
1791 : {
1792 1817644 : for (auto primary_id : _primary_perc_bnds)
1793 19760 : if (_mesh.isBoundaryElem(entity, primary_id))
1794 : feature._boundary_intersection |= BoundaryIntersection::PRIMARY_PERCOLATION_BOUNDARY;
1795 : }
1796 :
1797 1801364 : if ((feature._boundary_intersection & BoundaryIntersection::SECONDARY_PERCOLATION_BOUNDARY) ==
1798 : BoundaryIntersection::NONE)
1799 : {
1800 1819516 : for (auto secondary_id : _secondary_perc_bnds)
1801 20696 : if (_mesh.isBoundaryElem(entity, secondary_id))
1802 : feature._boundary_intersection |= BoundaryIntersection::SECONDARY_PERCOLATION_BOUNDARY;
1803 : }
1804 :
1805 : // See if the feature contacts any of the user-specified boundaries if we haven't
1806 : // done so already
1807 1801364 : if ((feature._boundary_intersection & BoundaryIntersection::SPECIFIED_BOUNDARY) ==
1808 : BoundaryIntersection::NONE)
1809 : {
1810 1826608 : for (auto specified_id : _specified_bnds)
1811 35575 : if (_mesh.isBoundaryElem(entity, specified_id))
1812 : feature._boundary_intersection |= BoundaryIntersection::SPECIFIED_BOUNDARY;
1813 : }
1814 : }
1815 : }
1816 26746 : }
1817 :
1818 : void
1819 26746 : FeatureFloodCount::appendPeriodicNeighborNodes(FeatureData & feature) const
1820 : {
1821 26746 : if (_is_elemental)
1822 : {
1823 1826481 : for (auto entity : feature._local_ids)
1824 : {
1825 1801364 : Elem * elem = _mesh.elemPtr(entity);
1826 :
1827 9116068 : for (const auto node_n : make_range(elem->n_nodes()))
1828 : {
1829 7314704 : auto iters = _periodic_node_map.equal_range(elem->node_id(node_n));
1830 :
1831 7414218 : for (auto it = iters.first; it != iters.second; ++it)
1832 : {
1833 99514 : feature._periodic_nodes.insert(feature._periodic_nodes.end(), it->first);
1834 99514 : feature._periodic_nodes.insert(feature._periodic_nodes.end(), it->second);
1835 : }
1836 : }
1837 : }
1838 : }
1839 : else
1840 : {
1841 53716 : for (auto entity : feature._local_ids)
1842 : {
1843 : auto iters = _periodic_node_map.equal_range(entity);
1844 :
1845 53927 : for (auto it = iters.first; it != iters.second; ++it)
1846 : {
1847 1840 : feature._periodic_nodes.insert(feature._periodic_nodes.end(), it->first);
1848 1840 : feature._periodic_nodes.insert(feature._periodic_nodes.end(), it->second);
1849 : }
1850 : }
1851 : }
1852 :
1853 : // TODO: Remove duplicates
1854 26746 : }
1855 :
1856 : template <typename T>
1857 : bool
1858 740 : FeatureFloodCount::isBoundaryEntity(const T * entity) const
1859 : {
1860 : mooseAssert(_bnd_elem_range, "Boundary Element Range is nullptr");
1861 :
1862 740 : if (entity)
1863 44031 : for (const auto & belem : *_bnd_elem_range)
1864 : // Only works for Elements
1865 43793 : if (belem->_elem->id() == entity->id() && hasBoundary(belem->_bnd_id))
1866 : return true;
1867 :
1868 : return false;
1869 : }
1870 :
1871 : void
1872 25117 : FeatureFloodCount::FeatureData::updateBBoxExtremes(MeshBase & mesh)
1873 : {
1874 : // First update the primary bounding box (all topologically connected)
1875 1658503 : for (auto & halo_id : _halo_ids)
1876 1633386 : updateBBoxExtremesHelper(_bboxes[0], mesh.elem_ref(halo_id));
1877 433262 : for (auto & ghost_id : _ghosted_ids)
1878 408145 : updateBBoxExtremesHelper(_bboxes[0], mesh.elem_ref(ghost_id));
1879 :
1880 : FeatureData::container_type all_primary_region_ids;
1881 25117 : FeatureFloodCount::reserve(all_primary_region_ids, _local_ids.size() + _halo_ids.size());
1882 25117 : std::set_union(_local_ids.begin(),
1883 : _local_ids.end(),
1884 : _halo_ids.begin(),
1885 : _halo_ids.end(),
1886 : std::insert_iterator<FeatureData::container_type>(all_primary_region_ids,
1887 : all_primary_region_ids.begin()));
1888 :
1889 : // Remove all of the IDs that are in the primary region
1890 : std::list<dof_id_type> disjoint_elem_id_list;
1891 25117 : std::set_difference(_disjoint_halo_ids.begin(),
1892 : _disjoint_halo_ids.end(),
1893 : all_primary_region_ids.begin(),
1894 : all_primary_region_ids.end(),
1895 : std::insert_iterator<std::list<dof_id_type>>(disjoint_elem_id_list,
1896 : disjoint_elem_id_list.begin()));
1897 :
1898 : /**
1899 : * Now we need to find how many distinct topologically disconnected sets of elements we have.
1900 : * We've already removed elements that are part of the primary region, now we need to iterate
1901 : * over the remaining "possible" disjoint elements and partition them into independent
1902 : * bboxes regions.
1903 : */
1904 : constexpr auto TOLERANCE = libMesh::TOLERANCE * libMesh::TOLERANCE;
1905 246158 : for (auto elem_id : disjoint_elem_id_list)
1906 : {
1907 221041 : BoundingBox elem_bbox;
1908 221041 : updateBBoxExtremesHelper(elem_bbox, *mesh.elem_ptr(elem_id));
1909 :
1910 : bool found_match = false;
1911 510443 : for (auto & bbox : _bboxes)
1912 : {
1913 500053 : if (bbox.intersects(elem_bbox, TOLERANCE))
1914 : {
1915 210651 : updateBBoxExtremes(bbox, elem_bbox);
1916 : found_match = true;
1917 : break;
1918 : }
1919 : }
1920 :
1921 : if (!found_match)
1922 10390 : _bboxes.push_back(elem_bbox);
1923 : }
1924 :
1925 25117 : mergeBBoxes(_bboxes, false);
1926 :
1927 : /**
1928 : * We no longer need to distinguish among the various halo ids in disjoint bounding
1929 : * boxes. We'll just merge them altogether for the remaining portions of the algorithm.
1930 : */
1931 : FeatureData::container_type set_union;
1932 25117 : FeatureFloodCount::reserve(set_union, _halo_ids.size() + _disjoint_halo_ids.size());
1933 25117 : std::set_union(_halo_ids.begin(),
1934 : _halo_ids.end(),
1935 : _disjoint_halo_ids.begin(),
1936 : _disjoint_halo_ids.end(),
1937 : std::insert_iterator<FeatureData::container_type>(set_union, set_union.begin()));
1938 : _halo_ids.swap(set_union);
1939 25117 : _disjoint_halo_ids.clear();
1940 50234 : }
1941 :
1942 : void
1943 230834 : FeatureFloodCount::FeatureData::updateBBoxExtremes(BoundingBox & bbox, const BoundingBox & rhs_bbox)
1944 : {
1945 923336 : for (const auto i : make_range(Moose::dim))
1946 : {
1947 714649 : bbox.min()(i) = std::min(bbox.min()(i), rhs_bbox.min()(i));
1948 692502 : bbox.max()(i) = std::max(bbox.max()(i), rhs_bbox.max()(i));
1949 : }
1950 230834 : }
1951 :
1952 : bool
1953 106738 : FeatureFloodCount::FeatureData::boundingBoxesIntersect(const FeatureData & rhs) const
1954 : {
1955 : // See if any of the bounding boxes in either FeatureData object intersect
1956 170476 : for (const auto & bbox_lhs : _bboxes)
1957 199591 : for (const auto & bbox_rhs : rhs._bboxes)
1958 135853 : if (bbox_lhs.intersects(bbox_rhs, libMesh::TOLERANCE * libMesh::TOLERANCE))
1959 : return true;
1960 :
1961 : return false;
1962 : }
1963 :
1964 : bool
1965 28611 : FeatureFloodCount::FeatureData::halosIntersect(const FeatureData & rhs) const
1966 : {
1967 28611 : return MooseUtils::setsIntersect(_halo_ids, rhs._halo_ids);
1968 : }
1969 :
1970 : bool
1971 32438 : FeatureFloodCount::FeatureData::periodicBoundariesIntersect(const FeatureData & rhs) const
1972 : {
1973 32438 : return MooseUtils::setsIntersect(_periodic_nodes, rhs._periodic_nodes);
1974 : }
1975 :
1976 : bool
1977 15459 : FeatureFloodCount::FeatureData::ghostedIntersect(const FeatureData & rhs) const
1978 : {
1979 15459 : return MooseUtils::setsIntersect(_ghosted_ids, rhs._ghosted_ids);
1980 : }
1981 :
1982 : bool
1983 38534 : FeatureFloodCount::FeatureData::mergeable(const FeatureData & rhs) const
1984 : {
1985 76978 : return (_var_index == rhs._var_index && // the sets have matching variable indices and
1986 53903 : ((boundingBoxesIntersect(rhs) && // (if the feature's bboxes intersect and
1987 15459 : ghostedIntersect(rhs)) // the ghosted entities also intersect)
1988 32438 : || // or
1989 32438 : periodicBoundariesIntersect(rhs))); // periodic node sets intersect)
1990 : }
1991 :
1992 : bool
1993 10520 : FeatureFloodCount::FeatureData::canConsolidate(const FeatureData & rhs) const
1994 : {
1995 74621 : for (const auto & orig_id_pair1 : _orig_ids)
1996 131759 : for (const auto & orig_id_pair2 : rhs._orig_ids)
1997 : if (orig_id_pair1 == orig_id_pair2)
1998 : return true;
1999 :
2000 : return false;
2001 : }
2002 :
2003 : void
2004 12891 : FeatureFloodCount::FeatureData::merge(FeatureData && rhs)
2005 : {
2006 : mooseAssert(_var_index == rhs._var_index, "Mismatched variable index in merge");
2007 : mooseAssert(_id == rhs._id, "Mismatched auxiliary id in merge");
2008 :
2009 : FeatureData::container_type set_union;
2010 :
2011 12891 : FeatureFloodCount::reserve(set_union, _local_ids.size() + rhs._local_ids.size());
2012 12891 : std::set_union(_local_ids.begin(),
2013 : _local_ids.end(),
2014 : rhs._local_ids.begin(),
2015 : rhs._local_ids.end(),
2016 : std::insert_iterator<FeatureData::container_type>(set_union, set_union.begin()));
2017 : _local_ids.swap(set_union);
2018 :
2019 12891 : set_union.clear();
2020 12891 : FeatureFloodCount::reserve(set_union, _periodic_nodes.size() + rhs._periodic_nodes.size());
2021 12891 : std::set_union(_periodic_nodes.begin(),
2022 : _periodic_nodes.end(),
2023 : rhs._periodic_nodes.begin(),
2024 : rhs._periodic_nodes.end(),
2025 : std::insert_iterator<FeatureData::container_type>(set_union, set_union.begin()));
2026 : _periodic_nodes.swap(set_union);
2027 :
2028 12891 : set_union.clear();
2029 12891 : FeatureFloodCount::reserve(set_union, _ghosted_ids.size() + rhs._ghosted_ids.size());
2030 12891 : std::set_union(_ghosted_ids.begin(),
2031 : _ghosted_ids.end(),
2032 : rhs._ghosted_ids.begin(),
2033 : rhs._ghosted_ids.end(),
2034 : std::insert_iterator<FeatureData::container_type>(set_union, set_union.begin()));
2035 :
2036 : /**
2037 : * Even though we've determined that these two partial regions need to be merged, we don't
2038 : * necessarily know if the _ghost_ids intersect. We could be in this branch because the periodic
2039 : * boundaries intersect but that doesn't tell us anything about whether or not the ghost_region
2040 : * also intersects. If the _ghost_ids intersect, that means that we are merging along a periodic
2041 : * boundary, not across one. In this case the bounding box(s) need to be expanded.
2042 : */
2043 12891 : bool physical_intersection = (_ghosted_ids.size() + rhs._ghosted_ids.size() > set_union.size());
2044 : _ghosted_ids.swap(set_union);
2045 :
2046 : /**
2047 : * If we had a physical intersection, we need to expand boxes. If we had a virtual (periodic)
2048 : * intersection we need to preserve all of the boxes from each of the regions' sets.
2049 : */
2050 12891 : _bboxes.reserve(_bboxes.size() + rhs._bboxes.size());
2051 : std::copy(rhs._bboxes.begin(), rhs._bboxes.end(), std::back_inserter(_bboxes));
2052 :
2053 12891 : mergeBBoxes(_bboxes, physical_intersection);
2054 :
2055 12891 : set_union.clear();
2056 12891 : FeatureFloodCount::reserve(set_union, _disjoint_halo_ids.size() + rhs._disjoint_halo_ids.size());
2057 12891 : std::set_union(_disjoint_halo_ids.begin(),
2058 : _disjoint_halo_ids.end(),
2059 : rhs._disjoint_halo_ids.begin(),
2060 : rhs._disjoint_halo_ids.end(),
2061 : std::insert_iterator<FeatureData::container_type>(set_union, set_union.begin()));
2062 : _disjoint_halo_ids.swap(set_union);
2063 :
2064 12891 : set_union.clear();
2065 12891 : FeatureFloodCount::reserve(set_union, _halo_ids.size() + rhs._halo_ids.size());
2066 12891 : std::set_union(_halo_ids.begin(),
2067 : _halo_ids.end(),
2068 : rhs._halo_ids.begin(),
2069 : rhs._halo_ids.end(),
2070 : std::insert_iterator<FeatureData::container_type>(set_union, set_union.begin()));
2071 : _halo_ids.swap(set_union);
2072 :
2073 : // Keep track of the original ids so we can notify other processors of the local to global mapping
2074 12891 : _orig_ids.splice(_orig_ids.end(), std::move(rhs._orig_ids));
2075 :
2076 : // Update the min feature id
2077 12891 : _min_entity_id = std::min(_min_entity_id, rhs._min_entity_id);
2078 :
2079 : /**
2080 : * Combine the status flags: Currently we only expect to combine CLEAR and INACTIVE. Any other
2081 : * combination is currently a logic error. In this case of CLEAR and INACTIVE though,
2082 : * we want to make sure that CLEAR wins.
2083 : */
2084 : mooseAssert((_status & Status::MARKED & Status::DIRTY) == Status::CLEAR,
2085 : "Flags in invalid state");
2086 :
2087 : // Logical AND here to combine flags (INACTIVE & INACTIVE == INACTIVE, all other combos are CLEAR)
2088 12891 : _status &= rhs._status;
2089 :
2090 : // Logical OR here to make sure we maintain boundary intersection attribute when joining
2091 12891 : _boundary_intersection |= rhs._boundary_intersection;
2092 :
2093 12891 : _vol_count += rhs._vol_count;
2094 : _centroid += rhs._centroid;
2095 12891 : }
2096 :
2097 : void
2098 3557 : FeatureFloodCount::FeatureData::consolidate(FeatureData && rhs)
2099 : {
2100 : mooseAssert(_var_index == rhs._var_index, "Mismatched variable index in merge");
2101 : mooseAssert(_id == rhs._id, "Mismatched auxiliary id in merge");
2102 :
2103 : FeatureData::container_type set_union;
2104 3557 : FeatureFloodCount::reserve(_local_ids, _local_ids.size() + rhs._local_ids.size());
2105 3557 : std::set_union(_local_ids.begin(),
2106 : _local_ids.end(),
2107 : rhs._local_ids.begin(),
2108 : rhs._local_ids.end(),
2109 : std::insert_iterator<FeatureData::container_type>(set_union, set_union.begin()));
2110 : _local_ids.swap(set_union);
2111 :
2112 : mooseAssert((_status & Status::MARKED & Status::DIRTY) == Status::CLEAR,
2113 : "Flags in invalid state");
2114 3557 : }
2115 :
2116 : void
2117 0 : FeatureFloodCount::FeatureData::clear()
2118 : {
2119 0 : _local_ids.clear();
2120 0 : _periodic_nodes.clear();
2121 0 : _halo_ids.clear();
2122 0 : _disjoint_halo_ids.clear();
2123 0 : _ghosted_ids.clear();
2124 0 : _bboxes.clear();
2125 : _orig_ids.clear();
2126 0 : }
2127 :
2128 : void
2129 38008 : FeatureFloodCount::FeatureData::mergeBBoxes(std::vector<BoundingBox> & bboxes,
2130 : bool physical_intersection)
2131 : {
2132 : /**
2133 : * It's possible to iterate over these disjoint elements in a way such that bounding boxes in
2134 : * geometrically related region won't overlap on a single pass. Imagine having three elements
2135 : * across a periodic boundary that represent the halo that just bleeds over. If we attempt to
2136 : * merge the first and third bounding boxes, they won't intersect without having the second
2137 : * bounding box there to link them together. We'll have to continue to merge bounding boxes
2138 : * until convergence.
2139 : */
2140 38008 : std::list<BoundingBox> box_list(bboxes.begin(), bboxes.end());
2141 :
2142 : auto box_expanded = false;
2143 117944 : for (auto it1 = box_list.begin(); it1 != box_list.end(); /* No increment on it1 */)
2144 : {
2145 : auto merge_occured = false;
2146 259633 : for (auto it2 = box_list.begin(); it2 != box_list.end(); ++it2)
2147 : {
2148 199880 : if (it1 != it2 && it1->intersects(*it2, TOLERANCE))
2149 : {
2150 20183 : updateBBoxExtremes(*it2, *it1);
2151 : box_list.emplace_back(std::move(*it2));
2152 :
2153 : box_list.erase(it2);
2154 : it1 = box_list.erase(it1); // it1 is incremented here!
2155 :
2156 : box_expanded = true;
2157 : merge_occured = true;
2158 :
2159 20183 : break;
2160 : }
2161 : }
2162 :
2163 79936 : if (!merge_occured)
2164 : ++it1;
2165 : }
2166 :
2167 : /**
2168 : * Now copy the list back into the original vector.
2169 : */
2170 38008 : bboxes.resize(box_list.size());
2171 : std::copy(box_list.begin(), box_list.end(), bboxes.begin());
2172 :
2173 : // Error check
2174 38008 : if (physical_intersection && !box_expanded)
2175 : {
2176 0 : std::ostringstream oss;
2177 0 : oss << "LHS BBoxes:\n";
2178 0 : for (const auto i : index_range(_bboxes))
2179 0 : oss << "Max: " << _bboxes[i].max() << " Min: " << _bboxes[i].min() << '\n';
2180 :
2181 0 : ::mooseError("No Bounding Boxes Expanded - This is a catastrophic error!\n", oss.str());
2182 0 : }
2183 38008 : }
2184 :
2185 : std::ostream &
2186 6 : operator<<(std::ostream & out, const FeatureFloodCount::FeatureData & feature)
2187 : {
2188 : static const bool debug = true;
2189 :
2190 6 : out << "Grain ID: ";
2191 6 : if (feature._id != FeatureFloodCount::invalid_id)
2192 : out << feature._id;
2193 : else
2194 0 : out << "invalid";
2195 :
2196 : if (debug)
2197 : {
2198 6 : out << "\nGhosted Entities: ";
2199 216 : for (auto ghosted_id : feature._ghosted_ids)
2200 210 : out << ghosted_id << " ";
2201 :
2202 6 : out << "\nLocal Entities: ";
2203 876 : for (auto local_id : feature._local_ids)
2204 870 : out << local_id << " ";
2205 :
2206 6 : out << "\nHalo Entities: ";
2207 1198 : for (auto halo_id : feature._halo_ids)
2208 1192 : out << halo_id << " ";
2209 :
2210 6 : out << "\nPeriodic Node IDs: ";
2211 6 : for (auto periodic_node : feature._periodic_nodes)
2212 0 : out << periodic_node << " ";
2213 : }
2214 :
2215 6 : out << "\nBBoxes:";
2216 12 : for (const auto & bbox : feature._bboxes)
2217 : {
2218 6 : out << "\nMax: " << bbox.max() << " Min: " << bbox.min();
2219 : }
2220 :
2221 6 : out << "\nStatus: ";
2222 6 : if (feature._status == FeatureFloodCount::Status::CLEAR)
2223 0 : out << "CLEAR";
2224 6 : if (static_cast<bool>(feature._status & FeatureFloodCount::Status::MARKED))
2225 6 : out << " MARKED";
2226 6 : if (static_cast<bool>(feature._status & FeatureFloodCount::Status::DIRTY))
2227 0 : out << " DIRTY";
2228 6 : if (static_cast<bool>(feature._status & FeatureFloodCount::Status::INACTIVE))
2229 0 : out << " INACTIVE";
2230 :
2231 : if (debug)
2232 : {
2233 6 : out << "\nOrig IDs (rank, index): ";
2234 14 : for (const auto & orig_pair : feature._orig_ids)
2235 8 : out << '(' << orig_pair.first << ", " << orig_pair.second << ") ";
2236 6 : out << "\nVar_index: " << feature._var_index;
2237 6 : out << "\nMin Entity ID: " << feature._min_entity_id;
2238 : }
2239 6 : out << "\n" << std::endl;
2240 :
2241 6 : return out;
2242 : }
2243 :
2244 : /*****************************************************************************************
2245 : ******************************* Utility Routines ****************************************
2246 : *****************************************************************************************
2247 : */
2248 : void
2249 10281214 : updateBBoxExtremesHelper(BoundingBox & bbox, const Point & node)
2250 : {
2251 41124856 : for (const auto i : make_range(Moose::dim))
2252 : {
2253 31658229 : bbox.min()(i) = std::min(bbox.min()(i), node(i));
2254 30843642 : bbox.max()(i) = std::max(bbox.max()(i), node(i));
2255 : }
2256 10281214 : }
2257 :
2258 : void
2259 2442152 : updateBBoxExtremesHelper(BoundingBox & bbox, const Elem & elem)
2260 : {
2261 12637216 : for (const auto node_n : make_range(elem.n_nodes()))
2262 10195064 : updateBBoxExtremesHelper(bbox, *(elem.node_ptr(node_n)));
2263 2442152 : }
2264 :
2265 : // Constants
2266 : const std::size_t FeatureFloodCount::invalid_size_t = std::numeric_limits<std::size_t>::max();
2267 : const unsigned int FeatureFloodCount::invalid_id = std::numeric_limits<unsigned int>::max();
2268 : const processor_id_type FeatureFloodCount::invalid_proc_id =
2269 : std::numeric_limits<processor_id_type>::max();
|