Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #include "DistributedRectilinearMeshGenerator.h"
11 : #include "CastUniquePointer.h"
12 : #include "PetscExternalPartitioner.h"
13 :
14 : #include "SerializerGuard.h"
15 :
16 : #include "libmesh/mesh_generation.h"
17 : #include "libmesh/string_to_enum.h"
18 : #include "libmesh/periodic_boundaries.h"
19 : #include "libmesh/periodic_boundary_base.h"
20 : #include "libmesh/unstructured_mesh.h"
21 : #include "libmesh/mesh_communication.h"
22 : #include "libmesh/remote_elem.h"
23 : #include "libmesh/partitioner.h"
24 :
25 : // TIMPI includes
26 : #include "timpi/communicator.h"
27 : #include "timpi/parallel_sync.h"
28 :
29 : // C++ includes
30 : #include <cmath> // provides round, not std::round (see http://www.cplusplus.com/reference/cmath/round/)
31 :
32 : registerMooseObject("MooseApp", DistributedRectilinearMeshGenerator);
33 :
34 : InputParameters
35 30440 : DistributedRectilinearMeshGenerator::validParams()
36 : {
37 30440 : InputParameters params = PetscExternalPartitioner::validParams();
38 30440 : params += MeshGenerator::validParams();
39 :
40 30440 : MooseEnum dims("1=1 2 3");
41 30440 : params.addRequiredParam<MooseEnum>(
42 : "dim", dims, "The dimension of the mesh to be generated"); // Make this parameter required
43 :
44 30440 : params.addParam<dof_id_type>("nx", 1, "Number of elements in the X direction");
45 30440 : params.addParam<dof_id_type>("ny", 1, "Number of elements in the Y direction");
46 30440 : params.addParam<dof_id_type>("nz", 1, "Number of elements in the Z direction");
47 30440 : params.addParam<Real>("xmin", 0.0, "Lower X Coordinate of the generated mesh");
48 30440 : params.addParam<Real>("ymin", 0.0, "Lower Y Coordinate of the generated mesh");
49 30440 : params.addParam<Real>("zmin", 0.0, "Lower Z Coordinate of the generated mesh");
50 30440 : params.addParam<Real>("xmax", 1.0, "Upper X Coordinate of the generated mesh");
51 30440 : params.addParam<Real>("ymax", 1.0, "Upper Y Coordinate of the generated mesh");
52 30440 : params.addParam<Real>("zmax", 1.0, "Upper Z Coordinate of the generated mesh");
53 :
54 91320 : params.addParam<processor_id_type>(
55 60880 : "num_cores_for_partition", 0, "Number of cores for partitioning the graph");
56 :
57 91320 : params.addRangeCheckedParam<unsigned>(
58 : "num_side_layers",
59 60880 : 2,
60 : "num_side_layers>=1 & num_side_layers<5",
61 : "Number of layers of off-processor side neighbors is reserved during mesh generation");
62 :
63 30440 : params.addRelationshipManager("ElementSideNeighborLayers",
64 : Moose::RelationshipManagerType::GEOMETRIC,
65 0 : [](const InputParameters & obj_params, InputParameters & rm_params)
66 : {
67 : // Let this RM safeguard users specified ghosted layers
68 2806 : rm_params.set<unsigned short>("layers") =
69 2806 : obj_params.get<unsigned>("num_side_layers");
70 : // We can not attach geometric early here because some simulation
71 : // related info, such as, periodic BCs is not available yet during
72 : // an early stage. Periodic BCs will be passed into ghosting
73 : // functor during initFunctor of ElementSideNeighborLayers. There
74 : // is no hurt to attach geometric late for general simulations
75 : // that do not have extra requirements on ghosting elements. That
76 : // is especially true distributed generated meshes since there is
77 : // not much redundant info.
78 2806 : rm_params.set<bool>("attach_geometric_early") = false;
79 2806 : });
80 :
81 30440 : MooseEnum partition("graph linear square", "graph", false);
82 30440 : params.addParam<MooseEnum>(
83 : "partition", partition, "Which method (graph linear square) use to partition mesh");
84 :
85 30440 : MooseEnum elem_types(LIST_GEOM_ELEM); // no default
86 30440 : params.addParam<MooseEnum>("elem_type",
87 : elem_types,
88 : "The type of element from libMesh to "
89 : "generate (default: linear element for "
90 : "requested dimension)");
91 :
92 91320 : params.addRangeCheckedParam<Real>(
93 : "bias_x",
94 60880 : 1.,
95 : "bias_x>=0.5 & bias_x<=2",
96 : "The amount by which to grow (or shrink) the cells in the x-direction.");
97 91320 : params.addRangeCheckedParam<Real>(
98 : "bias_y",
99 60880 : 1.,
100 : "bias_y>=0.5 & bias_y<=2",
101 : "The amount by which to grow (or shrink) the cells in the y-direction.");
102 91320 : params.addRangeCheckedParam<Real>(
103 : "bias_z",
104 60880 : 1.,
105 : "bias_z>=0.5 & bias_z<=2",
106 : "The amount by which to grow (or shrink) the cells in the z-direction.");
107 :
108 30440 : params.addClassDescription(
109 : "Create a line, square, or cube mesh with uniformly spaced or biased elements.");
110 :
111 60880 : return params;
112 30440 : }
113 :
114 944 : DistributedRectilinearMeshGenerator::DistributedRectilinearMeshGenerator(
115 944 : const InputParameters & parameters)
116 : : MeshGenerator(parameters),
117 944 : _dim(getParam<MooseEnum>("dim")),
118 944 : _nx(declareMeshProperty("num_elements_x", getParam<dof_id_type>("nx"))),
119 944 : _ny(declareMeshProperty("num_elements_y", getParam<dof_id_type>("ny"))),
120 944 : _nz(declareMeshProperty("num_elements_z", getParam<dof_id_type>("nz"))),
121 944 : _xmin(declareMeshProperty("xmin", getParam<Real>("xmin"))),
122 944 : _xmax(declareMeshProperty("xmax", getParam<Real>("xmax"))),
123 944 : _ymin(declareMeshProperty("ymin", getParam<Real>("ymin"))),
124 944 : _ymax(declareMeshProperty("ymax", getParam<Real>("ymax"))),
125 944 : _zmin(declareMeshProperty("zmin", getParam<Real>("zmin"))),
126 944 : _zmax(declareMeshProperty("zmax", getParam<Real>("zmax"))),
127 944 : _num_cores_for_partition(getParam<processor_id_type>("num_cores_for_partition")),
128 944 : _bias_x(getParam<Real>("bias_x")),
129 944 : _bias_y(getParam<Real>("bias_y")),
130 944 : _bias_z(getParam<Real>("bias_z")),
131 944 : _part_package(getParam<MooseEnum>("part_package")),
132 944 : _num_parts_per_compute_node(getParam<processor_id_type>("num_cores_per_compute_node")),
133 944 : _partition_method(getParam<MooseEnum>("partition")),
134 1888 : _num_side_layers(getParam<unsigned>("num_side_layers"))
135 : {
136 944 : declareMeshProperty("use_distributed_mesh", true);
137 944 : }
138 :
139 : template <>
140 : dof_id_type
141 0 : DistributedRectilinearMeshGenerator::numNeighbors<Edge2>(const dof_id_type nx,
142 : const dof_id_type /*ny*/,
143 : const dof_id_type /*nz*/,
144 : const dof_id_type i,
145 : const dof_id_type /*j*/,
146 : const dof_id_type /*k*/)
147 : {
148 : // The ends only have one neighbor
149 0 : if (i == 0 || i == nx - 1)
150 0 : return 1;
151 :
152 0 : return 2;
153 : }
154 :
155 : template <>
156 : void
157 6654 : DistributedRectilinearMeshGenerator::getNeighbors<Edge2>(const dof_id_type nx,
158 : const dof_id_type /*ny*/,
159 : const dof_id_type /*nz*/,
160 : const dof_id_type i,
161 : const dof_id_type /*j*/,
162 : const dof_id_type /*k*/,
163 : std::vector<dof_id_type> & neighbors,
164 : const bool corner)
165 :
166 : {
167 6654 : if (corner)
168 : {
169 : // The elements on the opposite of the current boundary are required
170 : // for periodic boundary conditions
171 4494 : neighbors[0] = (i - 1 + nx) % nx;
172 4494 : neighbors[1] = (i + 1 + nx) % nx;
173 :
174 4494 : return;
175 : }
176 :
177 2160 : neighbors[0] = i - 1;
178 2160 : neighbors[1] = i + 1;
179 :
180 : // First element doesn't have a left neighbor
181 2160 : if (i == 0)
182 36 : neighbors[0] = Elem::invalid_id;
183 :
184 : // Last element doesn't have a right neighbor
185 2160 : if (i == nx - 1)
186 36 : neighbors[1] = Elem::invalid_id;
187 : }
188 :
189 : template <>
190 : void
191 5028 : DistributedRectilinearMeshGenerator::getIndices<Edge2>(const dof_id_type /*nx*/,
192 : const dof_id_type /*ny*/,
193 : const dof_id_type elem_id,
194 : dof_id_type & i,
195 : dof_id_type & /*j*/,
196 : dof_id_type & /*k*/)
197 : {
198 5028 : i = elem_id;
199 5028 : }
200 :
201 : template <>
202 : void
203 186 : DistributedRectilinearMeshGenerator::getGhostNeighbors<Edge2>(
204 : const dof_id_type nx,
205 : const dof_id_type /*ny*/,
206 : const dof_id_type /*nz*/,
207 : const MeshBase & mesh,
208 : const std::set<dof_id_type> & current_elems,
209 : std::set<dof_id_type> & ghost_elems)
210 : {
211 186 : std::vector<dof_id_type> neighbors(2);
212 :
213 4680 : for (auto elem_id : current_elems)
214 : {
215 4494 : getNeighbors<Edge2>(nx, 0, 0, elem_id, 0, 0, neighbors, true);
216 :
217 13482 : for (auto neighbor : neighbors)
218 8988 : if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
219 522 : ghost_elems.insert(neighbor);
220 : }
221 186 : }
222 :
223 : template <>
224 : dof_id_type
225 0 : DistributedRectilinearMeshGenerator::elemId<Edge2>(const dof_id_type /*nx*/,
226 : const dof_id_type /*ny*/,
227 : const dof_id_type i,
228 : const dof_id_type /*j*/,
229 : const dof_id_type /*k*/)
230 : {
231 0 : return i;
232 : }
233 :
234 : template <>
235 : void
236 2508 : DistributedRectilinearMeshGenerator::addElement<Edge2>(const dof_id_type nx,
237 : const dof_id_type /*ny*/,
238 : const dof_id_type /*nz*/,
239 : const dof_id_type /*i*/,
240 : const dof_id_type /*j*/,
241 : const dof_id_type /*k*/,
242 : const dof_id_type elem_id,
243 : const processor_id_type pid,
244 : const ElemType /*type*/,
245 : MeshBase & mesh)
246 : {
247 2508 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
248 :
249 2508 : auto node_offset = elem_id;
250 :
251 2508 : Node * node0_ptr = mesh.query_node_ptr(node_offset);
252 2508 : if (!node0_ptr)
253 : {
254 : std::unique_ptr<Node> new_node =
255 247 : Node::build(Point(static_cast<Real>(node_offset) / nx, 0, 0), node_offset);
256 :
257 247 : new_node->set_unique_id(nx + node_offset);
258 247 : new_node->processor_id() = pid;
259 :
260 247 : node0_ptr = mesh.add_node(std::move(new_node));
261 247 : }
262 :
263 2508 : Node * node1_ptr = mesh.query_node_ptr(node_offset + 1);
264 2508 : if (!node1_ptr)
265 : {
266 : std::unique_ptr<Node> new_node =
267 2414 : Node::build(Point(static_cast<Real>(node_offset + 1) / nx, 0, 0), node_offset + 1);
268 :
269 2414 : new_node->set_unique_id(nx + node_offset + 1);
270 2414 : new_node->processor_id() = pid;
271 :
272 2414 : node1_ptr = mesh.add_node(std::move(new_node));
273 2414 : }
274 :
275 2508 : Elem * elem = new Edge2;
276 2508 : elem->set_id(elem_id);
277 2508 : elem->processor_id() = pid;
278 2508 : elem->set_unique_id(elem_id);
279 2508 : elem = mesh.add_elem(elem);
280 2508 : elem->set_node(0, node0_ptr);
281 2508 : elem->set_node(1, node1_ptr);
282 :
283 2508 : if (elem_id == 0)
284 66 : boundary_info.add_side(elem, 0, 0);
285 :
286 2508 : if (elem_id == nx - 1)
287 66 : boundary_info.add_side(elem, 1, 1);
288 2508 : }
289 :
290 : template <>
291 : void
292 93 : DistributedRectilinearMeshGenerator::setBoundaryNames<Edge2>(BoundaryInfo & boundary_info)
293 : {
294 93 : boundary_info.sideset_name(0) = "left";
295 93 : boundary_info.sideset_name(1) = "right";
296 93 : }
297 :
298 : template <>
299 : void
300 93 : DistributedRectilinearMeshGenerator::scaleNodalPositions<Edge2>(dof_id_type /*nx*/,
301 : dof_id_type /*ny*/,
302 : dof_id_type /*nz*/,
303 : Real xmin,
304 : Real xmax,
305 : Real /*ymin*/,
306 : Real /*ymax*/,
307 : Real /*zmin*/,
308 : Real /*zmax*/,
309 : MeshBase & mesh)
310 : {
311 2754 : for (auto & node_ptr : mesh.node_ptr_range())
312 2754 : (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
313 93 : }
314 :
315 : template <>
316 : dof_id_type
317 0 : DistributedRectilinearMeshGenerator::numNeighbors<Quad4>(const dof_id_type nx,
318 : const dof_id_type ny,
319 : const dof_id_type /*nz*/,
320 : const dof_id_type i,
321 : const dof_id_type j,
322 : const dof_id_type /*k*/)
323 : {
324 0 : dof_id_type n = 4;
325 :
326 0 : if (i == 0)
327 0 : n--;
328 :
329 0 : if (i == nx - 1)
330 0 : n--;
331 :
332 0 : if (j == 0)
333 0 : n--;
334 :
335 0 : if (j == ny - 1)
336 0 : n--;
337 :
338 0 : return n;
339 : }
340 :
341 : template <>
342 : dof_id_type
343 8160588 : DistributedRectilinearMeshGenerator::elemId<Quad4>(const dof_id_type nx,
344 : const dof_id_type /*nx*/,
345 : const dof_id_type i,
346 : const dof_id_type j,
347 : const dof_id_type /*k*/)
348 : {
349 8160588 : return (j * nx) + i;
350 : }
351 :
352 : template <>
353 : void
354 1114424 : DistributedRectilinearMeshGenerator::getNeighbors<Quad4>(const dof_id_type nx,
355 : const dof_id_type ny,
356 : const dof_id_type /*nz*/,
357 : const dof_id_type i,
358 : const dof_id_type j,
359 : const dof_id_type /*k*/,
360 : std::vector<dof_id_type> & neighbors,
361 : const bool corner)
362 : {
363 1114424 : std::fill(neighbors.begin(), neighbors.end(), Elem::invalid_id);
364 :
365 1114424 : if (corner)
366 : {
367 : // libMesh dof_id_type looks like unsigned int
368 : // We add one layer of point neighbors by default. Besides,
369 : // The elements on the opposite side of the current boundary are included
370 : // for, in case, periodic boundary conditions. The overhead is negligible
371 : // since you could consider every element has the same number of neighbors
372 748472 : unsigned int nnb = 0;
373 2993888 : for (unsigned int ii = 0; ii <= 2; ii++)
374 8981664 : for (unsigned int jj = 0; jj <= 2; jj++)
375 6736248 : neighbors[nnb++] = elemId<Quad4>(nx, 0, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, 0);
376 :
377 748472 : return;
378 : }
379 : // Bottom
380 365952 : if (j != 0)
381 356220 : neighbors[0] = elemId<Quad4>(nx, 0, i, j - 1, 0);
382 :
383 : // Right
384 365952 : if (i != nx - 1)
385 355950 : neighbors[1] = elemId<Quad4>(nx, 0, i + 1, j, 0);
386 :
387 : // Top
388 365952 : if (j != ny - 1)
389 356220 : neighbors[2] = elemId<Quad4>(nx, 0, i, j + 1, 0);
390 :
391 : // Left
392 365952 : if (i != 0)
393 355950 : neighbors[3] = elemId<Quad4>(nx, 0, i - 1, j, 0);
394 : }
395 :
396 : template <>
397 : void
398 1708596 : DistributedRectilinearMeshGenerator::getIndices<Quad4>(const dof_id_type nx,
399 : const dof_id_type /*ny*/,
400 : const dof_id_type elem_id,
401 : dof_id_type & i,
402 : dof_id_type & j,
403 : dof_id_type & /*k*/)
404 : {
405 1708596 : i = elem_id % nx;
406 1708596 : j = (elem_id - i) / nx;
407 1708596 : }
408 :
409 : template <>
410 : void
411 1028 : DistributedRectilinearMeshGenerator::getGhostNeighbors<Quad4>(
412 : const dof_id_type nx,
413 : const dof_id_type ny,
414 : const dof_id_type /*nz*/,
415 : const MeshBase & mesh,
416 : const std::set<dof_id_type> & current_elems,
417 : std::set<dof_id_type> & ghost_elems)
418 : {
419 : dof_id_type i, j, k;
420 :
421 1028 : std::vector<dof_id_type> neighbors(9);
422 :
423 749500 : for (auto elem_id : current_elems)
424 : {
425 748472 : getIndices<Quad4>(nx, 0, elem_id, i, j, k);
426 :
427 748472 : getNeighbors<Quad4>(nx, ny, 0, i, j, 0, neighbors, true);
428 :
429 7484720 : for (auto neighbor : neighbors)
430 6736248 : if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
431 197088 : ghost_elems.insert(neighbor);
432 : }
433 1028 : }
434 :
435 : template <>
436 : dof_id_type
437 1599088 : DistributedRectilinearMeshGenerator::nodeId<Quad4>(const ElemType /*type*/,
438 : const dof_id_type nx,
439 : const dof_id_type /*ny*/,
440 : const dof_id_type i,
441 : const dof_id_type j,
442 : const dof_id_type /*k*/)
443 :
444 : {
445 1599088 : return i + j * (nx + 1);
446 : }
447 :
448 : template <>
449 : void
450 399772 : DistributedRectilinearMeshGenerator::addElement<Quad4>(const dof_id_type nx,
451 : const dof_id_type ny,
452 : const dof_id_type /*nz*/,
453 : const dof_id_type i,
454 : const dof_id_type j,
455 : const dof_id_type /*k*/,
456 : const dof_id_type elem_id,
457 : const processor_id_type pid,
458 : const ElemType type,
459 : MeshBase & mesh)
460 : {
461 399772 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
462 :
463 : // Bottom Left
464 399772 : const dof_id_type node0_id = nodeId<Quad4>(type, nx, 0, i, j, 0);
465 399772 : Node * node0_ptr = mesh.query_node_ptr(node0_id);
466 399772 : if (!node0_ptr)
467 : {
468 : std::unique_ptr<Node> new_node =
469 14966 : Node::build(Point(static_cast<Real>(i) / nx, static_cast<Real>(j) / ny, 0), node0_id);
470 :
471 14966 : new_node->set_unique_id(nx * ny + node0_id);
472 14966 : new_node->processor_id() = pid;
473 :
474 14966 : node0_ptr = mesh.add_node(std::move(new_node));
475 14966 : }
476 :
477 : // Bottom Right
478 399772 : const dof_id_type node1_id = nodeId<Quad4>(type, nx, 0, i + 1, j, 0);
479 399772 : Node * node1_ptr = mesh.query_node_ptr(node1_id);
480 399772 : if (!node1_ptr)
481 : {
482 : std::unique_ptr<Node> new_node =
483 22273 : Node::build(Point(static_cast<Real>(i + 1) / nx, static_cast<Real>(j) / ny, 0), node1_id);
484 :
485 22273 : new_node->set_unique_id(nx * ny + node1_id);
486 22273 : new_node->processor_id() = pid;
487 :
488 22273 : node1_ptr = mesh.add_node(std::move(new_node));
489 22273 : }
490 :
491 : // Top Right
492 399772 : const dof_id_type node2_id = nodeId<Quad4>(type, nx, 0, i + 1, j + 1, 0);
493 399772 : Node * node2_ptr = mesh.query_node_ptr(node2_id);
494 399772 : if (!node2_ptr)
495 : {
496 : std::unique_ptr<Node> new_node = Node::build(
497 375758 : Point(static_cast<Real>(i + 1) / nx, static_cast<Real>(j + 1) / ny, 0), node2_id);
498 :
499 375758 : new_node->set_unique_id(nx * ny + node2_id);
500 375758 : new_node->processor_id() = pid;
501 :
502 375758 : node2_ptr = mesh.add_node(std::move(new_node));
503 375758 : }
504 :
505 : // Top Left
506 399772 : const dof_id_type node3_id = nodeId<Quad4>(type, nx, 0, i, j + 1, 0);
507 399772 : Node * node3_ptr = mesh.query_node_ptr(node3_id);
508 399772 : if (!node3_ptr)
509 : {
510 : std::unique_ptr<Node> new_node =
511 21164 : Node::build(Point(static_cast<Real>(i) / nx, static_cast<Real>(j + 1) / ny, 0), node3_id);
512 :
513 21164 : new_node->set_unique_id(nx * ny + node3_id);
514 21164 : new_node->processor_id() = pid;
515 :
516 21164 : node3_ptr = mesh.add_node(std::move(new_node));
517 21164 : }
518 :
519 399772 : Elem * elem = new Quad4;
520 399772 : elem->set_id(elem_id);
521 399772 : elem->processor_id() = pid;
522 399772 : elem->set_unique_id(elem_id);
523 399772 : elem = mesh.add_elem(elem);
524 399772 : elem->set_node(0, node0_ptr);
525 399772 : elem->set_node(1, node1_ptr);
526 399772 : elem->set_node(2, node2_ptr);
527 399772 : elem->set_node(3, node3_ptr);
528 :
529 : // Bottom
530 399772 : if (j == 0)
531 12121 : boundary_info.add_side(elem, 0, 0);
532 :
533 : // Right
534 399772 : if (i == nx - 1)
535 12573 : boundary_info.add_side(elem, 1, 1);
536 :
537 : // Top
538 399772 : if (j == ny - 1)
539 12121 : boundary_info.add_side(elem, 2, 2);
540 :
541 : // Left
542 399772 : if (i == 0)
543 12573 : boundary_info.add_side(elem, 3, 3);
544 399772 : }
545 :
546 : template <>
547 : void
548 514 : DistributedRectilinearMeshGenerator::setBoundaryNames<Quad4>(BoundaryInfo & boundary_info)
549 : {
550 514 : boundary_info.sideset_name(0) = "bottom";
551 514 : boundary_info.sideset_name(1) = "right";
552 514 : boundary_info.sideset_name(2) = "top";
553 514 : boundary_info.sideset_name(3) = "left";
554 514 : }
555 :
556 : template <>
557 : void
558 514 : DistributedRectilinearMeshGenerator::scaleNodalPositions<Quad4>(dof_id_type /*nx*/,
559 : dof_id_type /*ny*/,
560 : dof_id_type /*nz*/,
561 : Real xmin,
562 : Real xmax,
563 : Real ymin,
564 : Real ymax,
565 : Real /*zmin*/,
566 : Real /*zmax*/,
567 : MeshBase & mesh)
568 : {
569 434675 : for (auto & node_ptr : mesh.node_ptr_range())
570 : {
571 434161 : (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
572 434161 : (*node_ptr)(1) = (*node_ptr)(1) * (ymax - ymin) + ymin;
573 514 : }
574 514 : }
575 :
576 : template <>
577 : dof_id_type
578 32661198 : DistributedRectilinearMeshGenerator::elemId<Hex8>(const dof_id_type nx,
579 : const dof_id_type ny,
580 : const dof_id_type i,
581 : const dof_id_type j,
582 : const dof_id_type k)
583 : {
584 32661198 : return i + (j * nx) + (k * nx * ny);
585 : }
586 :
587 : template <>
588 : dof_id_type
589 0 : DistributedRectilinearMeshGenerator::numNeighbors<Hex8>(const dof_id_type nx,
590 : const dof_id_type ny,
591 : const dof_id_type nz,
592 : const dof_id_type i,
593 : const dof_id_type j,
594 : const dof_id_type k)
595 : {
596 0 : dof_id_type n = 6;
597 :
598 0 : if (i == 0)
599 0 : n--;
600 :
601 0 : if (i == nx - 1)
602 0 : n--;
603 :
604 0 : if (j == 0)
605 0 : n--;
606 :
607 0 : if (j == ny - 1)
608 0 : n--;
609 :
610 0 : if (k == 0)
611 0 : n--;
612 :
613 0 : if (k == nz - 1)
614 0 : n--;
615 :
616 0 : return n;
617 : }
618 :
619 : template <>
620 : void
621 1583474 : DistributedRectilinearMeshGenerator::getNeighbors<Hex8>(const dof_id_type nx,
622 : const dof_id_type ny,
623 : const dof_id_type nz,
624 : const dof_id_type i,
625 : const dof_id_type j,
626 : const dof_id_type k,
627 : std::vector<dof_id_type> & neighbors,
628 : const bool corner)
629 : {
630 1583474 : std::fill(neighbors.begin(), neighbors.end(), Elem::invalid_id);
631 :
632 1583474 : if (corner)
633 : {
634 : // We collect one layer of point neighbors
635 : // We add one layer of point neighbors by default. Besides,
636 : // The elements on the opposite side of the current boundary are included
637 : // for, in case, periodic boundary conditions. The overhead is negligible
638 : // since you could consider every element has the same number of neighbors
639 1110474 : unsigned int nnb = 0;
640 4441896 : for (unsigned int ii = 0; ii <= 2; ii++)
641 13325688 : for (unsigned int jj = 0; jj <= 2; jj++)
642 39977064 : for (unsigned int kk = 0; kk <= 2; kk++)
643 29982798 : neighbors[nnb++] = elemId<Hex8>(
644 29982798 : nx, ny, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, (k + kk - 1 + nz) % nz);
645 :
646 1110474 : return;
647 : }
648 :
649 : // Back
650 473000 : if (k != 0)
651 440100 : neighbors[0] = elemId<Hex8>(nx, ny, i, j, k - 1);
652 :
653 : // Bottom
654 473000 : if (j != 0)
655 450900 : neighbors[1] = elemId<Hex8>(nx, ny, i, j - 1, k);
656 :
657 : // Right
658 473000 : if (i != nx - 1)
659 448200 : neighbors[2] = elemId<Hex8>(nx, ny, i + 1, j, k);
660 :
661 : // Top
662 473000 : if (j != ny - 1)
663 450900 : neighbors[3] = elemId<Hex8>(nx, ny, i, j + 1, k);
664 :
665 : // Left
666 473000 : if (i != 0)
667 448200 : neighbors[4] = elemId<Hex8>(nx, ny, i - 1, j, k);
668 :
669 : // Front
670 473000 : if (k != nz - 1)
671 440100 : neighbors[5] = elemId<Hex8>(nx, ny, i, j, k + 1);
672 : }
673 :
674 : template <>
675 : dof_id_type
676 6679176 : DistributedRectilinearMeshGenerator::nodeId<Hex8>(const ElemType /*type*/,
677 : const dof_id_type nx,
678 : const dof_id_type ny,
679 : const dof_id_type i,
680 : const dof_id_type j,
681 : const dof_id_type k)
682 : {
683 6679176 : return i + (nx + 1) * (j + k * (ny + 1));
684 : }
685 :
686 : template <>
687 : Node *
688 6679176 : DistributedRectilinearMeshGenerator::addPoint<Hex8>(const dof_id_type nx,
689 : const dof_id_type ny,
690 : const dof_id_type nz,
691 : const dof_id_type i,
692 : const dof_id_type j,
693 : const dof_id_type k,
694 : const ElemType type,
695 : MeshBase & mesh)
696 : {
697 6679176 : auto id = nodeId<Hex8>(type, nx, ny, i, j, k);
698 6679176 : Node * node_ptr = mesh.query_node_ptr(id);
699 6679176 : if (!node_ptr)
700 : {
701 : std::unique_ptr<Node> new_node = Node::build(
702 1143368 : Point(static_cast<Real>(i) / nx, static_cast<Real>(j) / ny, static_cast<Real>(k) / nz), id);
703 :
704 1143368 : new_node->set_unique_id(nx * ny * nz + id);
705 :
706 1143368 : node_ptr = mesh.add_node(std::move(new_node));
707 1143368 : }
708 :
709 6679176 : return node_ptr;
710 : }
711 :
712 : template <>
713 : void
714 834897 : DistributedRectilinearMeshGenerator::addElement<Hex8>(const dof_id_type nx,
715 : const dof_id_type ny,
716 : const dof_id_type nz,
717 : const dof_id_type i,
718 : const dof_id_type j,
719 : const dof_id_type k,
720 : const dof_id_type elem_id,
721 : const processor_id_type pid,
722 : const ElemType type,
723 : MeshBase & mesh)
724 : {
725 834897 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
726 :
727 : // This ordering was picked to match the ordering in mesh_generation.C
728 834897 : auto node0_ptr = addPoint<Hex8>(nx, ny, nz, i, j, k, type, mesh);
729 834897 : node0_ptr->processor_id() = pid;
730 834897 : auto node1_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j, k, type, mesh);
731 834897 : node1_ptr->processor_id() = pid;
732 834897 : auto node2_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j + 1, k, type, mesh);
733 834897 : node2_ptr->processor_id() = pid;
734 834897 : auto node3_ptr = addPoint<Hex8>(nx, ny, nz, i, j + 1, k, type, mesh);
735 834897 : node3_ptr->processor_id() = pid;
736 834897 : auto node4_ptr = addPoint<Hex8>(nx, ny, nz, i, j, k + 1, type, mesh);
737 834897 : node4_ptr->processor_id() = pid;
738 834897 : auto node5_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j, k + 1, type, mesh);
739 834897 : node5_ptr->processor_id() = pid;
740 834897 : auto node6_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j + 1, k + 1, type, mesh);
741 834897 : node6_ptr->processor_id() = pid;
742 834897 : auto node7_ptr = addPoint<Hex8>(nx, ny, nz, i, j + 1, k + 1, type, mesh);
743 834897 : node7_ptr->processor_id() = pid;
744 :
745 834897 : Elem * elem = new Hex8;
746 834897 : elem->set_id(elem_id);
747 834897 : elem->processor_id() = pid;
748 834897 : elem->set_unique_id(elem_id);
749 834897 : elem = mesh.add_elem(elem);
750 834897 : elem->set_node(0, node0_ptr);
751 834897 : elem->set_node(1, node1_ptr);
752 834897 : elem->set_node(2, node2_ptr);
753 834897 : elem->set_node(3, node3_ptr);
754 834897 : elem->set_node(4, node4_ptr);
755 834897 : elem->set_node(5, node5_ptr);
756 834897 : elem->set_node(6, node6_ptr);
757 834897 : elem->set_node(7, node7_ptr);
758 :
759 834897 : if (k == 0)
760 64581 : boundary_info.add_side(elem, 0, 0);
761 :
762 834897 : if (k == (nz - 1))
763 64581 : boundary_info.add_side(elem, 5, 5);
764 :
765 834897 : if (j == 0)
766 48193 : boundary_info.add_side(elem, 1, 1);
767 :
768 834897 : if (j == (ny - 1))
769 48193 : boundary_info.add_side(elem, 3, 3);
770 :
771 834897 : if (i == 0)
772 56652 : boundary_info.add_side(elem, 4, 4);
773 :
774 834897 : if (i == (nx - 1))
775 56652 : boundary_info.add_side(elem, 2, 2);
776 834897 : }
777 :
778 : template <>
779 : void
780 2580371 : DistributedRectilinearMeshGenerator::getIndices<Hex8>(const dof_id_type nx,
781 : const dof_id_type ny,
782 : const dof_id_type elem_id,
783 : dof_id_type & i,
784 : dof_id_type & j,
785 : dof_id_type & k)
786 : {
787 2580371 : i = elem_id % nx;
788 2580371 : j = (((elem_id - i) / nx) % ny);
789 2580371 : k = ((elem_id - i) - (j * nx)) / (nx * ny);
790 2580371 : }
791 :
792 : template <>
793 : void
794 536 : DistributedRectilinearMeshGenerator::getGhostNeighbors<Hex8>(
795 : const dof_id_type nx,
796 : const dof_id_type ny,
797 : const dof_id_type nz,
798 : const MeshBase & mesh,
799 : const std::set<dof_id_type> & current_elems,
800 : std::set<dof_id_type> & ghost_elems)
801 : {
802 : dof_id_type i, j, k;
803 :
804 536 : std::vector<dof_id_type> neighbors(27);
805 :
806 1111010 : for (auto elem_id : current_elems)
807 : {
808 1110474 : getIndices<Hex8>(nx, ny, elem_id, i, j, k);
809 :
810 1110474 : getNeighbors<Hex8>(nx, ny, nz, i, j, k, neighbors, true);
811 :
812 31093272 : for (auto neighbor : neighbors)
813 29982798 : if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
814 5719500 : ghost_elems.insert(neighbor);
815 : }
816 536 : }
817 :
818 : template <>
819 : void
820 268 : DistributedRectilinearMeshGenerator::setBoundaryNames<Hex8>(BoundaryInfo & boundary_info)
821 : {
822 268 : boundary_info.sideset_name(0) = "back";
823 268 : boundary_info.sideset_name(1) = "bottom";
824 268 : boundary_info.sideset_name(2) = "right";
825 268 : boundary_info.sideset_name(3) = "top";
826 268 : boundary_info.sideset_name(4) = "left";
827 268 : boundary_info.sideset_name(5) = "front";
828 268 : }
829 :
830 : template <>
831 : void
832 268 : DistributedRectilinearMeshGenerator::scaleNodalPositions<Hex8>(dof_id_type /*nx*/,
833 : dof_id_type /*ny*/,
834 : dof_id_type /*nz*/,
835 : Real xmin,
836 : Real xmax,
837 : Real ymin,
838 : Real ymax,
839 : Real zmin,
840 : Real zmax,
841 : MeshBase & mesh)
842 : {
843 1143636 : for (auto & node_ptr : mesh.node_ptr_range())
844 : {
845 1143368 : (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
846 1143368 : (*node_ptr)(1) = (*node_ptr)(1) * (ymax - ymin) + ymin;
847 1143368 : (*node_ptr)(2) = (*node_ptr)(2) * (zmax - zmin) + zmin;
848 268 : }
849 268 : }
850 :
851 : template <>
852 : void
853 63 : DistributedRectilinearMeshGenerator::paritionSquarely<Edge2>(const dof_id_type nx,
854 : const dof_id_type /*ny*/,
855 : const dof_id_type /*nz*/,
856 : const processor_id_type num_procs,
857 : std::vector<dof_id_type> & istarts,
858 : std::vector<dof_id_type> & jstarts,
859 : std::vector<dof_id_type> & kstarts)
860 : {
861 : // Starting indices along x direction
862 63 : istarts.resize(num_procs + 1);
863 : // Starting indices along y direction
864 : // There is only one processor along y direction since it is a 1D mesh
865 63 : jstarts.resize(2);
866 63 : jstarts[0] = 0;
867 63 : jstarts[1] = 1;
868 : // Starting indices along z direction
869 : // There is only one processor along z direction since it is a 1D mesh
870 63 : kstarts.resize(2);
871 63 : kstarts[0] = 0;
872 63 : kstarts[1] = 1;
873 :
874 63 : istarts[0] = 0;
875 288 : for (processor_id_type pid = 0; pid < num_procs; pid++)
876 : // Partition mesh evenly. The extra elements are assigned to the front processors
877 225 : istarts[pid + 1] = istarts[pid] + (nx / num_procs + ((nx % num_procs) > pid));
878 63 : }
879 :
880 : template <>
881 : void
882 333 : DistributedRectilinearMeshGenerator::paritionSquarely<Quad4>(const dof_id_type nx,
883 : const dof_id_type ny,
884 : const dof_id_type /*nz*/,
885 : const processor_id_type num_procs,
886 : std::vector<dof_id_type> & istarts,
887 : std::vector<dof_id_type> & jstarts,
888 : std::vector<dof_id_type> & kstarts)
889 : {
890 : // Try for squarish distribution
891 : // The number of processors along x direction
892 : processor_id_type px =
893 333 : (processor_id_type)(0.5 + std::sqrt(((Real)nx) * ((Real)num_procs) / ((Real)ny)));
894 :
895 333 : if (!px)
896 0 : px = 1;
897 :
898 : // The number of processors along y direction
899 333 : processor_id_type py = 1;
900 : // Fact num_procs into px times py
901 333 : while (px > 0)
902 : {
903 333 : py = num_procs / px;
904 333 : if (py * px == num_procs)
905 333 : break;
906 0 : px--;
907 : }
908 : // More processors are needed for denser side
909 333 : if (nx > ny && py < px)
910 0 : std::swap(px, py);
911 :
912 : // Starting indices along x direction
913 333 : istarts.resize(px + 1);
914 : // Starting indices along y direction
915 333 : jstarts.resize(py + 1);
916 : // Starting indices along z direction
917 333 : kstarts.resize(2);
918 : // There is no elements along z direction
919 333 : kstarts[0] = 0;
920 333 : kstarts[1] = 1;
921 :
922 333 : istarts[0] = 0;
923 756 : for (processor_id_type pxid = 0; pxid < px; pxid++)
924 : // Partition elements evenly along x direction
925 423 : istarts[pxid + 1] = istarts[pxid] + nx / px + ((nx % px) > pxid);
926 :
927 333 : jstarts[0] = 0;
928 900 : for (processor_id_type pyid = 0; pyid < py; pyid++)
929 : // Partition elements evenly along y direction
930 567 : jstarts[pyid + 1] = jstarts[pyid] + (ny / py + ((ny % py) > pyid));
931 333 : }
932 :
933 : template <>
934 : void
935 135 : DistributedRectilinearMeshGenerator::paritionSquarely<Hex8>(const dof_id_type nx,
936 : const dof_id_type ny,
937 : const dof_id_type nz,
938 : const processor_id_type num_procs,
939 : std::vector<dof_id_type> & istarts,
940 : std::vector<dof_id_type> & jstarts,
941 : std::vector<dof_id_type> & kstarts)
942 : {
943 : /* Try for squarish distribution */
944 : // The number of processors along y direction
945 : processor_id_type py =
946 135 : (processor_id_type)(0.5 + std::pow(((Real)ny * ny) * ((Real)num_procs) / ((Real)nz * nx),
947 135 : (Real)(1. / 3.)));
948 135 : if (!py)
949 0 : py = 1;
950 : // The number of processors for pxpz plane
951 135 : processor_id_type pxpz = 1;
952 : // Factorize num_procs into py times pxpz
953 270 : while (py > 0)
954 : {
955 270 : pxpz = num_procs / py;
956 270 : if (py * pxpz == num_procs)
957 135 : break;
958 135 : py--;
959 : }
960 :
961 135 : if (!py)
962 0 : py = 1;
963 :
964 : // There number of processors along x direction
965 : processor_id_type px =
966 135 : (processor_id_type)(0.5 + std::sqrt(((Real)nx) * ((Real)num_procs) / ((Real)nz * py)));
967 135 : if (!px)
968 0 : px = 1;
969 : // The number of processors along z direction
970 135 : processor_id_type pz = 1;
971 : // Factorize num_procs into px times py times pz
972 234 : while (px > 0)
973 : {
974 234 : pz = num_procs / (px * py);
975 234 : if (px * py * pz == num_procs)
976 135 : break;
977 99 : px--;
978 : }
979 : // denser mesh takes more processors
980 135 : if (nx > nz && px < pz)
981 27 : std::swap(px, pz);
982 :
983 135 : istarts.resize(px + 1);
984 135 : jstarts.resize(py + 1);
985 135 : kstarts.resize(pz + 1);
986 :
987 135 : istarts[0] = 0;
988 432 : for (processor_id_type pxid = 0; pxid < px; pxid++)
989 : // Partition mesh evenly along x direction
990 297 : istarts[pxid + 1] = istarts[pxid] + nx / px + ((nx % px) > pxid);
991 :
992 135 : jstarts[0] = 0;
993 378 : for (processor_id_type pyid = 0; pyid < py; pyid++)
994 : // Partition mesh evenly along y direction
995 243 : jstarts[pyid + 1] = jstarts[pyid] + (ny / py + ((ny % py) > pyid));
996 :
997 135 : kstarts[0] = 0;
998 342 : for (processor_id_type pzid = 0; pzid < pz; pzid++)
999 : // Partition mesh evenly along z direction
1000 207 : kstarts[pzid + 1] = kstarts[pzid] + (nz / pz + ((nz % pz) > pzid));
1001 135 : }
1002 :
1003 : template <typename T>
1004 : void
1005 879 : DistributedRectilinearMeshGenerator::buildCube(UnstructuredMesh & mesh,
1006 : const unsigned int nx,
1007 : unsigned int ny,
1008 : unsigned int nz,
1009 : const Real xmin,
1010 : const Real xmax,
1011 : const Real ymin,
1012 : const Real ymax,
1013 : const Real zmin,
1014 : const Real zmax,
1015 : const ElemType type)
1016 : {
1017 : /// 1. "Partition" the element linearly (i.e. break them up into n_procs contiguous chunks
1018 : /// 2. Create a (dual) graph of the local elements
1019 : /// 3. Partition the graph using PetscExternalPartitioner
1020 : /// 4. Push elements to new owners
1021 : /// 5. Each processor creates only the elements it owns
1022 : /// 6. Find the ghosts we need (all the elements that connect to at least one local mesh vertex)
1023 : /// 7. Pull the PIDs of the ghosts
1024 : /// 8. Add ghosts with the right PIDs to the mesh
1025 :
1026 879 : auto & comm = mesh.comm();
1027 :
1028 879 : dof_id_type num_elems = nx * ny * nz;
1029 :
1030 879 : const auto num_procs = comm.size();
1031 : // Current processor ID
1032 879 : const auto pid = comm.rank();
1033 :
1034 879 : if (_num_cores_for_partition > num_procs)
1035 4 : mooseError("Number of cores for the graph partitioner is too large ", _num_cores_for_partition);
1036 :
1037 875 : if (!_num_cores_for_partition)
1038 848 : _num_cores_for_partition = num_procs;
1039 :
1040 875 : auto & boundary_info = mesh.get_boundary_info();
1041 :
1042 875 : std::unique_ptr<Elem> canonical_elem = std::make_unique<T>();
1043 :
1044 : // Will get used to find the neighbors of an element
1045 875 : std::vector<dof_id_type> neighbors(canonical_elem->n_neighbors());
1046 : // Number of neighbors
1047 875 : dof_id_type n_neighbors = canonical_elem->n_neighbors();
1048 :
1049 : // "Partition" the elements linearly across the processors
1050 : dof_id_type num_local_elems;
1051 : dof_id_type local_elems_begin;
1052 : dof_id_type local_elems_end;
1053 875 : if (pid < _num_cores_for_partition)
1054 866 : MooseUtils::linearPartitionItems(num_elems,
1055 866 : _num_cores_for_partition,
1056 : pid,
1057 : num_local_elems,
1058 : local_elems_begin,
1059 : local_elems_end);
1060 : else
1061 : {
1062 9 : num_local_elems = 0;
1063 9 : local_elems_begin = 0;
1064 9 : local_elems_end = 0;
1065 : }
1066 :
1067 875 : std::vector<std::vector<dof_id_type>> graph;
1068 :
1069 : // Fill in xadj and adjncy
1070 : // xadj is the offset into adjncy
1071 : // adjncy are the face neighbors of each element on this processor
1072 875 : graph.resize(num_local_elems);
1073 : // Build a distributed graph
1074 875 : num_local_elems = 0;
1075 841987 : for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1076 : {
1077 841112 : dof_id_type i, j, k = 0;
1078 :
1079 841112 : getIndices<T>(nx, ny, e_id, i, j, k);
1080 :
1081 841112 : getNeighbors<T>(nx, ny, nz, i, j, k, neighbors, false);
1082 :
1083 841112 : std::vector<dof_id_type> & row = graph[num_local_elems++];
1084 841112 : row.reserve(n_neighbors);
1085 :
1086 5147240 : for (auto neighbor : neighbors)
1087 4306128 : if (neighbor != Elem::invalid_id)
1088 4106988 : row.push_back(neighbor);
1089 : }
1090 :
1091 : // Partition the distributed graph
1092 875 : std::vector<dof_id_type> partition_vec;
1093 875 : if (_partition_method == "linear")
1094 : {
1095 59 : mooseWarning(" LinearPartitioner is mainly used for setting up regression tests. For the "
1096 : "production run, please do not use it.");
1097 : // The graph is already partitioned linearly via calling MooseUtils::linearPartitionItems
1098 59 : partition_vec.resize(num_local_elems);
1099 : // We use it as is
1100 59 : std::fill(partition_vec.begin(), partition_vec.end(), pid);
1101 : }
1102 816 : else if (_partition_method == "square")
1103 : {
1104 : // Starting partition indices along x direction
1105 531 : std::vector<dof_id_type> istarts;
1106 : // Starting partition indices along y direction
1107 531 : std::vector<dof_id_type> jstarts;
1108 : // Starting partition indices along z direction
1109 531 : std::vector<dof_id_type> kstarts;
1110 531 : partition_vec.resize(num_local_elems);
1111 :
1112 : // Partition mesh evenly along each direction
1113 531 : paritionSquarely<T>(nx, ny, nz, num_procs, istarts, jstarts, kstarts);
1114 : // At least one processor
1115 : mooseAssert(istarts.size() > 1, "At least there is one processor along x direction");
1116 531 : processor_id_type px = istarts.size() - 1;
1117 : // At least one processor
1118 : mooseAssert(jstarts.size() > 1, "At least there is one processor along y direction");
1119 531 : processor_id_type py = jstarts.size() - 1;
1120 : // At least one processor
1121 : mooseAssert(kstarts.size() > 1, "At least there is one processor along z direction");
1122 : // processor_id_type pz = kstarts.size() -1;
1123 357291 : for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1124 : {
1125 356760 : dof_id_type i = 0, j = 0, k = 0;
1126 356760 : getIndices<T>(nx, ny, e_id, i, j, k);
1127 356760 : processor_id_type pi = 0, pj = 0, pk = 0;
1128 :
1129 356760 : pi = (std::upper_bound(istarts.begin(), istarts.end(), i) - istarts.begin()) - 1;
1130 356760 : pj = (std::upper_bound(jstarts.begin(), jstarts.end(), j) - jstarts.begin()) - 1;
1131 356760 : pk = (std::upper_bound(kstarts.begin(), kstarts.end(), k) - kstarts.begin()) - 1;
1132 :
1133 356760 : partition_vec[e_id - local_elems_begin] = pk * px * py + pj * px + pi;
1134 :
1135 : mooseAssert((pk * px * py + pj * px + pi) < num_procs, "processor id is too large");
1136 : }
1137 531 : }
1138 285 : else if (_partition_method == "graph")
1139 285 : PetscExternalPartitioner::partitionGraph(
1140 285 : comm, graph, {}, {}, num_procs, _num_parts_per_compute_node, _part_package, partition_vec);
1141 : else
1142 0 : mooseError("Unsupported partition method " + _partition_method);
1143 :
1144 : mooseAssert(partition_vec.size() == num_local_elems, " Invalid partition was generateed ");
1145 :
1146 : // Use current elements to remote processors according to partition
1147 875 : std::map<processor_id_type, std::vector<dof_id_type>> pushed_elements_vecs;
1148 :
1149 841987 : for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1150 841112 : pushed_elements_vecs[partition_vec[e_id - local_elems_begin]].push_back(e_id);
1151 :
1152 : // Collect new elements I should own
1153 875 : std::vector<dof_id_type> my_new_elems;
1154 :
1155 875 : auto elements_action_functor =
1156 3228 : [&my_new_elems](processor_id_type /*pid*/, const std::vector<dof_id_type> & data)
1157 1614 : { std::copy(data.begin(), data.end(), std::back_inserter(my_new_elems)); };
1158 :
1159 875 : Parallel::push_parallel_vector_data(comm, pushed_elements_vecs, elements_action_functor);
1160 :
1161 : // Add the elements this processor owns
1162 841987 : for (auto e_id : my_new_elems)
1163 : {
1164 841112 : dof_id_type i = 0, j = 0, k = 0;
1165 :
1166 841112 : getIndices<T>(nx, ny, e_id, i, j, k);
1167 :
1168 841112 : addElement<T>(nx, ny, nz, i, j, k, e_id, pid, type, mesh);
1169 : }
1170 :
1171 : // Need to link up the local elements before we can know what's missing
1172 875 : mesh.find_neighbors();
1173 :
1174 : // Get the ghosts (missing face neighbors)
1175 875 : std::set<dof_id_type> ghost_elems;
1176 : // Current local elements
1177 875 : std::set<dof_id_type> current_elems;
1178 :
1179 : // Fill current elems
1180 : // We will grow domain from current elements
1181 842862 : for (auto & elem_ptr : mesh.element_ptr_range())
1182 841112 : current_elems.insert(elem_ptr->id());
1183 :
1184 : // Grow domain layer by layer
1185 2625 : for (unsigned layer = 0; layer < _num_side_layers; layer++)
1186 : {
1187 : // getGhostNeighbors produces one layer of side neighbors
1188 1750 : getGhostNeighbors<T>(nx, ny, nz, mesh, current_elems, ghost_elems);
1189 : // Merge ghost elements into current element list
1190 1750 : current_elems.insert(ghost_elems.begin(), ghost_elems.end());
1191 : }
1192 : // We do not need it anymore
1193 875 : current_elems.clear();
1194 :
1195 : // Elements we're going to request from others
1196 875 : std::map<processor_id_type, std::vector<dof_id_type>> ghost_elems_to_request;
1197 :
1198 396940 : for (auto & ghost_id : ghost_elems)
1199 : {
1200 : // This is the processor ID the ghost_elem was originally assigned to
1201 396065 : auto proc_id = MooseUtils::linearPartitionChunk(num_elems, _num_cores_for_partition, ghost_id);
1202 :
1203 : // Using side-effect insertion on purpose
1204 396065 : ghost_elems_to_request[proc_id].push_back(ghost_id);
1205 : }
1206 :
1207 : // Next set ghost object ids from other processors
1208 875 : auto gather_functor =
1209 400055 : [local_elems_begin, partition_vec](processor_id_type /*pid*/,
1210 : const std::vector<dof_id_type> & coming_ghost_elems,
1211 : std::vector<dof_id_type> & pid_for_ghost_elems)
1212 : {
1213 1995 : auto num_ghost_elems = coming_ghost_elems.size();
1214 1995 : pid_for_ghost_elems.resize(num_ghost_elems);
1215 :
1216 1995 : dof_id_type num_local_elems = 0;
1217 :
1218 398060 : for (auto elem : coming_ghost_elems)
1219 396065 : pid_for_ghost_elems[num_local_elems++] = partition_vec[elem - local_elems_begin];
1220 : };
1221 :
1222 875 : std::unordered_map<dof_id_type, processor_id_type> ghost_elem_to_pid;
1223 :
1224 875 : auto action_functor =
1225 1995 : [&ghost_elem_to_pid](processor_id_type /*pid*/,
1226 : const std::vector<dof_id_type> & my_ghost_elems,
1227 : const std::vector<dof_id_type> & pid_for_my_ghost_elems)
1228 : {
1229 1995 : dof_id_type num_local_elems = 0;
1230 :
1231 398060 : for (auto elem : my_ghost_elems)
1232 396065 : ghost_elem_to_pid[elem] = pid_for_my_ghost_elems[num_local_elems++];
1233 : };
1234 :
1235 875 : const dof_id_type * ex = nullptr;
1236 875 : libMesh::Parallel::pull_parallel_vector_data(
1237 : comm, ghost_elems_to_request, gather_functor, action_functor, ex);
1238 :
1239 : // Add the ghost elements to the mesh
1240 396940 : for (auto gtop : ghost_elem_to_pid)
1241 : {
1242 396065 : auto ghost_id = gtop.first;
1243 396065 : auto proc_id = gtop.second;
1244 :
1245 396065 : dof_id_type i = 0, j = 0, k = 0;
1246 :
1247 396065 : getIndices<T>(nx, ny, ghost_id, i, j, k);
1248 :
1249 396065 : addElement<T>(nx, ny, nz, i, j, k, ghost_id, proc_id, type, mesh);
1250 : }
1251 :
1252 875 : mesh.find_neighbors(true);
1253 :
1254 : // Set RemoteElem neighbors
1255 2476104 : for (auto & elem_ptr : mesh.element_ptr_range())
1256 7850663 : for (unsigned int s = 0; s < elem_ptr->n_sides(); s++)
1257 6613486 : if (!elem_ptr->neighbor_ptr(s) && !boundary_info.n_boundary_ids(elem_ptr, s))
1258 232156 : elem_ptr->set_neighbor(s, const_cast<RemoteElem *>(remote_elem));
1259 :
1260 875 : setBoundaryNames<T>(boundary_info);
1261 :
1262 875 : Partitioner::set_node_processor_ids(mesh);
1263 :
1264 : // Already partitioned!
1265 875 : mesh.skip_partitioning(true);
1266 :
1267 : // Scale the nodal positions
1268 875 : scaleNodalPositions<T>(nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax, mesh);
1269 875 : }
1270 :
1271 : std::unique_ptr<MeshBase>
1272 879 : DistributedRectilinearMeshGenerator::generate()
1273 : {
1274 : // We will set up boundaries accordingly. We do not want to call
1275 : // ghostGhostedBoundaries in which allgather_packed_range is unscalable.
1276 : // ghostGhostedBoundaries will gather all boundaries to every single processor
1277 879 : _mesh->needGhostGhostedBoundaries(false);
1278 :
1279 : // DistributedRectilinearMeshGenerator always generates a distributed mesh
1280 879 : auto mesh = buildDistributedMesh();
1281 :
1282 879 : MooseEnum elem_type_enum = getParam<MooseEnum>("elem_type");
1283 :
1284 879 : if (!isParamValid("elem_type"))
1285 : {
1286 : // Switching on MooseEnum
1287 663 : switch (_dim)
1288 : {
1289 93 : case 1:
1290 93 : elem_type_enum = "EDGE2";
1291 93 : break;
1292 298 : case 2:
1293 298 : elem_type_enum = "QUAD4";
1294 298 : break;
1295 272 : case 3:
1296 272 : elem_type_enum = "HEX8";
1297 272 : break;
1298 : }
1299 : }
1300 :
1301 879 : _elem_type = Utility::string_to_enum<ElemType>(elem_type_enum);
1302 :
1303 879 : mesh->set_mesh_dimension(_dim);
1304 879 : mesh->set_spatial_dimension(_dim);
1305 : // Let the mesh know that it's not serialized
1306 879 : mesh->set_distributed();
1307 :
1308 : // Switching on MooseEnum
1309 879 : switch (_dim)
1310 : {
1311 : // The build_XYZ mesh generation functions take an
1312 : // UnstructuredMesh& as the first argument, hence the dynamic_cast.
1313 93 : case 1:
1314 93 : buildCube<Edge2>(
1315 93 : dynamic_cast<UnstructuredMesh &>(*mesh), _nx, 1, 1, _xmin, _xmax, 0, 0, 0, 0, _elem_type);
1316 93 : break;
1317 514 : case 2:
1318 514 : buildCube<Quad4>(dynamic_cast<UnstructuredMesh &>(*mesh),
1319 514 : _nx,
1320 514 : _ny,
1321 : 1,
1322 514 : _xmin,
1323 514 : _xmax,
1324 514 : _ymin,
1325 514 : _ymax,
1326 : 0,
1327 : 0,
1328 : _elem_type);
1329 514 : break;
1330 272 : case 3:
1331 272 : buildCube<Hex8>(dynamic_cast<UnstructuredMesh &>(*mesh),
1332 272 : _nx,
1333 272 : _ny,
1334 272 : _nz,
1335 272 : _xmin,
1336 272 : _xmax,
1337 272 : _ymin,
1338 272 : _ymax,
1339 272 : _zmin,
1340 272 : _zmax,
1341 : _elem_type);
1342 268 : break;
1343 0 : default:
1344 0 : mooseError(
1345 : getParam<MooseEnum>("elem_type"),
1346 : " is not a currently supported element type for DistributedRectilinearMeshGenerator");
1347 : }
1348 :
1349 : // Apply the bias if any exists
1350 875 : if (_bias_x != 1.0 || _bias_y != 1.0 || _bias_z != 1.0)
1351 : {
1352 : // Biases
1353 0 : Real bias[3] = {_bias_x, _bias_y, _bias_z};
1354 :
1355 : // "width" of the mesh in each direction
1356 0 : Real width[3] = {_xmax - _xmin, _ymax - _ymin, _zmax - _zmin};
1357 :
1358 : // Min mesh extent in each direction.
1359 0 : Real mins[3] = {_xmin, _ymin, _zmin};
1360 :
1361 : // Number of elements in each direction.
1362 0 : dof_id_type nelem[3] = {_nx, _ny, _nz};
1363 :
1364 : // We will need the biases raised to integer powers in each
1365 : // direction, so let's pre-compute those...
1366 0 : std::vector<std::vector<Real>> pows(LIBMESH_DIM);
1367 0 : for (dof_id_type dir = 0; dir < LIBMESH_DIM; ++dir)
1368 : {
1369 0 : pows[dir].resize(nelem[dir] + 1);
1370 0 : for (dof_id_type i = 0; i < pows[dir].size(); ++i)
1371 0 : pows[dir][i] = std::pow(bias[dir], static_cast<int>(i));
1372 : }
1373 :
1374 : // Loop over the nodes and move them to the desired location
1375 0 : for (auto & node_ptr : mesh->node_ptr_range())
1376 : {
1377 0 : Node & node = *node_ptr;
1378 :
1379 0 : for (dof_id_type dir = 0; dir < LIBMESH_DIM; ++dir)
1380 : {
1381 0 : if (width[dir] != 0. && bias[dir] != 1.)
1382 : {
1383 : // Compute the scaled "index" of the current point. This
1384 : // will either be close to a whole integer or a whole
1385 : // integer+0.5 for quadratic nodes.
1386 0 : Real float_index = (node(dir) - mins[dir]) * nelem[dir] / width[dir];
1387 :
1388 0 : Real integer_part = 0;
1389 0 : Real fractional_part = std::modf(float_index, &integer_part);
1390 :
1391 : // Figure out where to move the node...
1392 0 : if (std::abs(fractional_part) < TOLERANCE || std::abs(fractional_part - 1.0) < TOLERANCE)
1393 : {
1394 : // If the fractional_part ~ 0.0 or 1.0, this is a vertex node, so
1395 : // we don't need an average.
1396 : //
1397 : // Compute the "index" we are at in the current direction. We
1398 : // round to the nearest integral value to get this instead
1399 : // of using "integer_part", since that could be off by a
1400 : // lot (e.g. we want 3.9999 to map to 4.0 instead of 3.0).
1401 0 : int index = round(float_index);
1402 :
1403 : // Move node to biased location.
1404 0 : node(dir) =
1405 0 : mins[dir] + width[dir] * (1. - pows[dir][index]) / (1. - pows[dir][nelem[dir]]);
1406 : }
1407 0 : else if (std::abs(fractional_part - 0.5) < TOLERANCE)
1408 : {
1409 : // If the fractional_part ~ 0.5, this is a midedge/face
1410 : // (i.e. quadratic) node. We don't move those with the same
1411 : // bias as the vertices, instead we put them midway between
1412 : // their respective vertices.
1413 : //
1414 : // Also, since the fractional part is nearly 0.5, we know that
1415 : // the integer_part will be the index of the vertex to the
1416 : // left, and integer_part+1 will be the index of the
1417 : // vertex to the right.
1418 0 : node(dir) = mins[dir] +
1419 0 : width[dir] *
1420 0 : (1. - 0.5 * (pows[dir][integer_part] + pows[dir][integer_part + 1])) /
1421 0 : (1. - pows[dir][nelem[dir]]);
1422 : }
1423 : else
1424 : {
1425 : // We don't yet handle anything higher order than quadratic...
1426 0 : mooseError("Unable to bias node at node(", dir, ")=", node(dir));
1427 : }
1428 : }
1429 : }
1430 0 : }
1431 0 : }
1432 :
1433 : // MeshOutput<MT>::write_equation_systems will automatically renumber node and element IDs.
1434 : // So we have to make that consistent at the first place. Otherwise, the moose cached data such as
1435 : // _block_node_list will be inconsistent when we doing MooseMesh::getNodeBlockIds. That being
1436 : // said, moose will pass new ids to getNodeBlockIds while the cached _block_node_list still use
1437 : // the old node IDs. Yes, you could say: go ahead to do a mesh update, but I would say no. I do
1438 : // not change mesh and there is no point to update anything.
1439 875 : mesh->allow_renumbering(true);
1440 875 : mesh->prepare_for_use();
1441 :
1442 1750 : return dynamic_pointer_cast<MeshBase>(mesh);
1443 875 : }
|