Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #include "DistributedRectilinearMeshGenerator.h"
11 : #include "CastUniquePointer.h"
12 : #include "PetscExternalPartitioner.h"
13 :
14 : #include "SerializerGuard.h"
15 :
16 : #include "libmesh/mesh_generation.h"
17 : #include "libmesh/string_to_enum.h"
18 : #include "libmesh/periodic_boundaries.h"
19 : #include "libmesh/periodic_boundary_base.h"
20 : #include "libmesh/unstructured_mesh.h"
21 : #include "libmesh/mesh_communication.h"
22 : #include "libmesh/remote_elem.h"
23 : #include "libmesh/partitioner.h"
24 :
25 : // TIMPI includes
26 : #include "timpi/communicator.h"
27 : #include "timpi/parallel_sync.h"
28 :
29 : // C++ includes
30 : #include <cmath> // provides round, not std::round (see http://www.cplusplus.com/reference/cmath/round/)
31 :
32 : registerMooseObject("MooseApp", DistributedRectilinearMeshGenerator);
33 :
34 : InputParameters
35 30274 : DistributedRectilinearMeshGenerator::validParams()
36 : {
37 30274 : InputParameters params = PetscExternalPartitioner::validParams();
38 30274 : params += MeshGenerator::validParams();
39 :
40 30274 : MooseEnum dims("1=1 2 3");
41 30274 : params.addRequiredParam<MooseEnum>(
42 : "dim", dims, "The dimension of the mesh to be generated"); // Make this parameter required
43 :
44 30274 : params.addParam<dof_id_type>("nx", 1, "Number of elements in the X direction");
45 30274 : params.addParam<dof_id_type>("ny", 1, "Number of elements in the Y direction");
46 30274 : params.addParam<dof_id_type>("nz", 1, "Number of elements in the Z direction");
47 30274 : params.addParam<Real>("xmin", 0.0, "Lower X Coordinate of the generated mesh");
48 30274 : params.addParam<Real>("ymin", 0.0, "Lower Y Coordinate of the generated mesh");
49 30274 : params.addParam<Real>("zmin", 0.0, "Lower Z Coordinate of the generated mesh");
50 30274 : params.addParam<Real>("xmax", 1.0, "Upper X Coordinate of the generated mesh");
51 30274 : params.addParam<Real>("ymax", 1.0, "Upper Y Coordinate of the generated mesh");
52 30274 : params.addParam<Real>("zmax", 1.0, "Upper Z Coordinate of the generated mesh");
53 :
54 90822 : params.addParam<processor_id_type>(
55 60548 : "num_cores_for_partition", 0, "Number of cores for partitioning the graph");
56 :
57 90822 : params.addRangeCheckedParam<unsigned>(
58 : "num_side_layers",
59 60548 : 2,
60 : "num_side_layers>=1 & num_side_layers<5",
61 : "Number of layers of off-processor side neighbors is reserved during mesh generation");
62 :
63 30274 : params.addRelationshipManager("ElementSideNeighborLayers",
64 : Moose::RelationshipManagerType::GEOMETRIC,
65 0 : [](const InputParameters & obj_params, InputParameters & rm_params)
66 : {
67 : // Let this RM safeguard users specified ghosted layers
68 2562 : rm_params.set<unsigned short>("layers") =
69 2562 : obj_params.get<unsigned>("num_side_layers");
70 : // We can not attach geometric early here because some simulation
71 : // related info, such as, periodic BCs is not available yet during
72 : // an early stage. Periodic BCs will be passed into ghosting
73 : // functor during initFunctor of ElementSideNeighborLayers. There
74 : // is no hurt to attach geometric late for general simulations
75 : // that do not have extra requirements on ghosting elements. That
76 : // is especially true distributed generated meshes since there is
77 : // not much redundant info.
78 2562 : rm_params.set<bool>("attach_geometric_early") = false;
79 2562 : });
80 :
81 30274 : MooseEnum partition("graph linear square", "graph", false);
82 30274 : params.addParam<MooseEnum>(
83 : "partition", partition, "Which method (graph linear square) use to partition mesh");
84 :
85 30274 : MooseEnum elem_types(LIST_GEOM_ELEM); // no default
86 30274 : params.addParam<MooseEnum>("elem_type",
87 : elem_types,
88 : "The type of element from libMesh to "
89 : "generate (default: linear element for "
90 : "requested dimension)");
91 :
92 90822 : params.addRangeCheckedParam<Real>(
93 : "bias_x",
94 60548 : 1.,
95 : "bias_x>=0.5 & bias_x<=2",
96 : "The amount by which to grow (or shrink) the cells in the x-direction.");
97 90822 : params.addRangeCheckedParam<Real>(
98 : "bias_y",
99 60548 : 1.,
100 : "bias_y>=0.5 & bias_y<=2",
101 : "The amount by which to grow (or shrink) the cells in the y-direction.");
102 90822 : params.addRangeCheckedParam<Real>(
103 : "bias_z",
104 60548 : 1.,
105 : "bias_z>=0.5 & bias_z<=2",
106 : "The amount by which to grow (or shrink) the cells in the z-direction.");
107 :
108 30274 : params.addClassDescription(
109 : "Create a line, square, or cube mesh with uniformly spaced or biased elements.");
110 :
111 60548 : return params;
112 30274 : }
113 :
114 862 : DistributedRectilinearMeshGenerator::DistributedRectilinearMeshGenerator(
115 862 : const InputParameters & parameters)
116 : : MeshGenerator(parameters),
117 862 : _dim(getParam<MooseEnum>("dim")),
118 862 : _nx(declareMeshProperty("num_elements_x", getParam<dof_id_type>("nx"))),
119 862 : _ny(declareMeshProperty("num_elements_y", getParam<dof_id_type>("ny"))),
120 862 : _nz(declareMeshProperty("num_elements_z", getParam<dof_id_type>("nz"))),
121 862 : _xmin(declareMeshProperty("xmin", getParam<Real>("xmin"))),
122 862 : _xmax(declareMeshProperty("xmax", getParam<Real>("xmax"))),
123 862 : _ymin(declareMeshProperty("ymin", getParam<Real>("ymin"))),
124 862 : _ymax(declareMeshProperty("ymax", getParam<Real>("ymax"))),
125 862 : _zmin(declareMeshProperty("zmin", getParam<Real>("zmin"))),
126 862 : _zmax(declareMeshProperty("zmax", getParam<Real>("zmax"))),
127 862 : _num_cores_for_partition(getParam<processor_id_type>("num_cores_for_partition")),
128 862 : _bias_x(getParam<Real>("bias_x")),
129 862 : _bias_y(getParam<Real>("bias_y")),
130 862 : _bias_z(getParam<Real>("bias_z")),
131 862 : _part_package(getParam<MooseEnum>("part_package")),
132 862 : _num_parts_per_compute_node(getParam<processor_id_type>("num_cores_per_compute_node")),
133 862 : _partition_method(getParam<MooseEnum>("partition")),
134 1724 : _num_side_layers(getParam<unsigned>("num_side_layers"))
135 : {
136 862 : declareMeshProperty("use_distributed_mesh", true);
137 862 : }
138 :
139 : template <>
140 : dof_id_type
141 0 : DistributedRectilinearMeshGenerator::numNeighbors<Edge2>(const dof_id_type nx,
142 : const dof_id_type /*ny*/,
143 : const dof_id_type /*nz*/,
144 : const dof_id_type i,
145 : const dof_id_type /*j*/,
146 : const dof_id_type /*k*/)
147 : {
148 : // The ends only have one neighbor
149 0 : if (i == 0 || i == nx - 1)
150 0 : return 1;
151 :
152 0 : return 2;
153 : }
154 :
155 : template <>
156 : void
157 5916 : DistributedRectilinearMeshGenerator::getNeighbors<Edge2>(const dof_id_type nx,
158 : const dof_id_type /*ny*/,
159 : const dof_id_type /*nz*/,
160 : const dof_id_type i,
161 : const dof_id_type /*j*/,
162 : const dof_id_type /*k*/,
163 : std::vector<dof_id_type> & neighbors,
164 : const bool corner)
165 :
166 : {
167 5916 : if (corner)
168 : {
169 : // The elements on the opposite of the current boundary are required
170 : // for periodic boundary conditions
171 3996 : neighbors[0] = (i - 1 + nx) % nx;
172 3996 : neighbors[1] = (i + 1 + nx) % nx;
173 :
174 3996 : return;
175 : }
176 :
177 1920 : neighbors[0] = i - 1;
178 1920 : neighbors[1] = i + 1;
179 :
180 : // First element doesn't have a left neighbor
181 1920 : if (i == 0)
182 32 : neighbors[0] = Elem::invalid_id;
183 :
184 : // Last element doesn't have a right neighbor
185 1920 : if (i == nx - 1)
186 32 : neighbors[1] = Elem::invalid_id;
187 : }
188 :
189 : template <>
190 : void
191 4472 : DistributedRectilinearMeshGenerator::getIndices<Edge2>(const dof_id_type /*nx*/,
192 : const dof_id_type /*ny*/,
193 : const dof_id_type elem_id,
194 : dof_id_type & i,
195 : dof_id_type & /*j*/,
196 : dof_id_type & /*k*/)
197 : {
198 4472 : i = elem_id;
199 4472 : }
200 :
201 : template <>
202 : void
203 166 : DistributedRectilinearMeshGenerator::getGhostNeighbors<Edge2>(
204 : const dof_id_type nx,
205 : const dof_id_type /*ny*/,
206 : const dof_id_type /*nz*/,
207 : const MeshBase & mesh,
208 : const std::set<dof_id_type> & current_elems,
209 : std::set<dof_id_type> & ghost_elems)
210 : {
211 166 : std::vector<dof_id_type> neighbors(2);
212 :
213 4162 : for (auto elem_id : current_elems)
214 : {
215 3996 : getNeighbors<Edge2>(nx, 0, 0, elem_id, 0, 0, neighbors, true);
216 :
217 11988 : for (auto neighbor : neighbors)
218 7992 : if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
219 468 : ghost_elems.insert(neighbor);
220 : }
221 166 : }
222 :
223 : template <>
224 : dof_id_type
225 0 : DistributedRectilinearMeshGenerator::elemId<Edge2>(const dof_id_type /*nx*/,
226 : const dof_id_type /*ny*/,
227 : const dof_id_type i,
228 : const dof_id_type /*j*/,
229 : const dof_id_type /*k*/)
230 : {
231 0 : return i;
232 : }
233 :
234 : template <>
235 : void
236 2232 : DistributedRectilinearMeshGenerator::addElement<Edge2>(const dof_id_type nx,
237 : const dof_id_type /*ny*/,
238 : const dof_id_type /*nz*/,
239 : const dof_id_type /*i*/,
240 : const dof_id_type /*j*/,
241 : const dof_id_type /*k*/,
242 : const dof_id_type elem_id,
243 : const processor_id_type pid,
244 : const ElemType /*type*/,
245 : MeshBase & mesh)
246 : {
247 2232 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
248 :
249 2232 : auto node_offset = elem_id;
250 :
251 2232 : Node * node0_ptr = mesh.query_node_ptr(node_offset);
252 2232 : if (!node0_ptr)
253 : {
254 : std::unique_ptr<Node> new_node =
255 227 : Node::build(Point(static_cast<Real>(node_offset) / nx, 0, 0), node_offset);
256 :
257 227 : new_node->set_unique_id(nx + node_offset);
258 227 : new_node->processor_id() = pid;
259 :
260 227 : node0_ptr = mesh.add_node(std::move(new_node));
261 227 : }
262 :
263 2232 : Node * node1_ptr = mesh.query_node_ptr(node_offset + 1);
264 2232 : if (!node1_ptr)
265 : {
266 : std::unique_ptr<Node> new_node =
267 2142 : Node::build(Point(static_cast<Real>(node_offset + 1) / nx, 0, 0), node_offset + 1);
268 :
269 2142 : new_node->set_unique_id(nx + node_offset + 1);
270 2142 : new_node->processor_id() = pid;
271 :
272 2142 : node1_ptr = mesh.add_node(std::move(new_node));
273 2142 : }
274 :
275 2232 : Elem * elem = new Edge2;
276 2232 : elem->set_id(elem_id);
277 2232 : elem->processor_id() = pid;
278 2232 : elem->set_unique_id(elem_id);
279 2232 : elem = mesh.add_elem(elem);
280 2232 : elem->set_node(0, node0_ptr);
281 2232 : elem->set_node(1, node1_ptr);
282 :
283 2232 : if (elem_id == 0)
284 59 : boundary_info.add_side(elem, 0, 0);
285 :
286 2232 : if (elem_id == nx - 1)
287 59 : boundary_info.add_side(elem, 1, 1);
288 2232 : }
289 :
290 : template <>
291 : void
292 83 : DistributedRectilinearMeshGenerator::setBoundaryNames<Edge2>(BoundaryInfo & boundary_info)
293 : {
294 83 : boundary_info.sideset_name(0) = "left";
295 83 : boundary_info.sideset_name(1) = "right";
296 83 : }
297 :
298 : template <>
299 : void
300 83 : DistributedRectilinearMeshGenerator::scaleNodalPositions<Edge2>(dof_id_type /*nx*/,
301 : dof_id_type /*ny*/,
302 : dof_id_type /*nz*/,
303 : Real xmin,
304 : Real xmax,
305 : Real /*ymin*/,
306 : Real /*ymax*/,
307 : Real /*zmin*/,
308 : Real /*zmax*/,
309 : MeshBase & mesh)
310 : {
311 2452 : for (auto & node_ptr : mesh.node_ptr_range())
312 2452 : (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
313 83 : }
314 :
315 : template <>
316 : dof_id_type
317 0 : DistributedRectilinearMeshGenerator::numNeighbors<Quad4>(const dof_id_type nx,
318 : const dof_id_type ny,
319 : const dof_id_type /*nz*/,
320 : const dof_id_type i,
321 : const dof_id_type j,
322 : const dof_id_type /*k*/)
323 : {
324 0 : dof_id_type n = 4;
325 :
326 0 : if (i == 0)
327 0 : n--;
328 :
329 0 : if (i == nx - 1)
330 0 : n--;
331 :
332 0 : if (j == 0)
333 0 : n--;
334 :
335 0 : if (j == ny - 1)
336 0 : n--;
337 :
338 0 : return n;
339 : }
340 :
341 : template <>
342 : dof_id_type
343 7416748 : DistributedRectilinearMeshGenerator::elemId<Quad4>(const dof_id_type nx,
344 : const dof_id_type /*nx*/,
345 : const dof_id_type i,
346 : const dof_id_type j,
347 : const dof_id_type /*k*/)
348 : {
349 7416748 : return (j * nx) + i;
350 : }
351 :
352 : template <>
353 : void
354 1012808 : DistributedRectilinearMeshGenerator::getNeighbors<Quad4>(const dof_id_type nx,
355 : const dof_id_type ny,
356 : const dof_id_type /*nz*/,
357 : const dof_id_type i,
358 : const dof_id_type j,
359 : const dof_id_type /*k*/,
360 : std::vector<dof_id_type> & neighbors,
361 : const bool corner)
362 : {
363 1012808 : std::fill(neighbors.begin(), neighbors.end(), Elem::invalid_id);
364 :
365 1012808 : if (corner)
366 : {
367 : // libMesh dof_id_type looks like unsigned int
368 : // We add one layer of point neighbors by default. Besides,
369 : // The elements on the opposite side of the current boundary are included
370 : // for, in case, periodic boundary conditions. The overhead is negligible
371 : // since you could consider every element has the same number of neighbors
372 680292 : unsigned int nnb = 0;
373 2721168 : for (unsigned int ii = 0; ii <= 2; ii++)
374 8163504 : for (unsigned int jj = 0; jj <= 2; jj++)
375 6122628 : neighbors[nnb++] = elemId<Quad4>(nx, 0, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, 0);
376 :
377 680292 : return;
378 : }
379 : // Bottom
380 332516 : if (j != 0)
381 323650 : neighbors[0] = elemId<Quad4>(nx, 0, i, j - 1, 0);
382 :
383 : // Right
384 332516 : if (i != nx - 1)
385 323410 : neighbors[1] = elemId<Quad4>(nx, 0, i + 1, j, 0);
386 :
387 : // Top
388 332516 : if (j != ny - 1)
389 323650 : neighbors[2] = elemId<Quad4>(nx, 0, i, j + 1, 0);
390 :
391 : // Left
392 332516 : if (i != 0)
393 323410 : neighbors[3] = elemId<Quad4>(nx, 0, i - 1, j, 0);
394 : }
395 :
396 : template <>
397 : void
398 1552849 : DistributedRectilinearMeshGenerator::getIndices<Quad4>(const dof_id_type nx,
399 : const dof_id_type /*ny*/,
400 : const dof_id_type elem_id,
401 : dof_id_type & i,
402 : dof_id_type & j,
403 : dof_id_type & /*k*/)
404 : {
405 1552849 : i = elem_id % nx;
406 1552849 : j = (elem_id - i) / nx;
407 1552849 : }
408 :
409 : template <>
410 : void
411 936 : DistributedRectilinearMeshGenerator::getGhostNeighbors<Quad4>(
412 : const dof_id_type nx,
413 : const dof_id_type ny,
414 : const dof_id_type /*nz*/,
415 : const MeshBase & mesh,
416 : const std::set<dof_id_type> & current_elems,
417 : std::set<dof_id_type> & ghost_elems)
418 : {
419 : dof_id_type i, j, k;
420 :
421 936 : std::vector<dof_id_type> neighbors(9);
422 :
423 681228 : for (auto elem_id : current_elems)
424 : {
425 680292 : getIndices<Quad4>(nx, 0, elem_id, i, j, k);
426 :
427 680292 : getNeighbors<Quad4>(nx, ny, 0, i, j, 0, neighbors, true);
428 :
429 6802920 : for (auto neighbor : neighbors)
430 6122628 : if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
431 181568 : ghost_elems.insert(neighbor);
432 : }
433 936 : }
434 :
435 : template <>
436 : dof_id_type
437 1454564 : DistributedRectilinearMeshGenerator::nodeId<Quad4>(const ElemType /*type*/,
438 : const dof_id_type nx,
439 : const dof_id_type /*ny*/,
440 : const dof_id_type i,
441 : const dof_id_type j,
442 : const dof_id_type /*k*/)
443 :
444 : {
445 1454564 : return i + j * (nx + 1);
446 : }
447 :
448 : template <>
449 : void
450 363641 : DistributedRectilinearMeshGenerator::addElement<Quad4>(const dof_id_type nx,
451 : const dof_id_type ny,
452 : const dof_id_type /*nz*/,
453 : const dof_id_type i,
454 : const dof_id_type j,
455 : const dof_id_type /*k*/,
456 : const dof_id_type elem_id,
457 : const processor_id_type pid,
458 : const ElemType type,
459 : MeshBase & mesh)
460 : {
461 363641 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
462 :
463 : // Bottom Left
464 363641 : const dof_id_type node0_id = nodeId<Quad4>(type, nx, 0, i, j, 0);
465 363641 : Node * node0_ptr = mesh.query_node_ptr(node0_id);
466 363641 : if (!node0_ptr)
467 : {
468 : std::unique_ptr<Node> new_node =
469 13576 : Node::build(Point(static_cast<Real>(i) / nx, static_cast<Real>(j) / ny, 0), node0_id);
470 :
471 13576 : new_node->set_unique_id(nx * ny + node0_id);
472 13576 : new_node->processor_id() = pid;
473 :
474 13576 : node0_ptr = mesh.add_node(std::move(new_node));
475 13576 : }
476 :
477 : // Bottom Right
478 363641 : const dof_id_type node1_id = nodeId<Quad4>(type, nx, 0, i + 1, j, 0);
479 363641 : Node * node1_ptr = mesh.query_node_ptr(node1_id);
480 363641 : if (!node1_ptr)
481 : {
482 : std::unique_ptr<Node> new_node =
483 20115 : Node::build(Point(static_cast<Real>(i + 1) / nx, static_cast<Real>(j) / ny, 0), node1_id);
484 :
485 20115 : new_node->set_unique_id(nx * ny + node1_id);
486 20115 : new_node->processor_id() = pid;
487 :
488 20115 : node1_ptr = mesh.add_node(std::move(new_node));
489 20115 : }
490 :
491 : // Top Right
492 363641 : const dof_id_type node2_id = nodeId<Quad4>(type, nx, 0, i + 1, j + 1, 0);
493 363641 : Node * node2_ptr = mesh.query_node_ptr(node2_id);
494 363641 : if (!node2_ptr)
495 : {
496 : std::unique_ptr<Node> new_node = Node::build(
497 341387 : Point(static_cast<Real>(i + 1) / nx, static_cast<Real>(j + 1) / ny, 0), node2_id);
498 :
499 341387 : new_node->set_unique_id(nx * ny + node2_id);
500 341387 : new_node->processor_id() = pid;
501 :
502 341387 : node2_ptr = mesh.add_node(std::move(new_node));
503 341387 : }
504 :
505 : // Top Left
506 363641 : const dof_id_type node3_id = nodeId<Quad4>(type, nx, 0, i, j + 1, 0);
507 363641 : Node * node3_ptr = mesh.query_node_ptr(node3_id);
508 363641 : if (!node3_ptr)
509 : {
510 : std::unique_ptr<Node> new_node =
511 20016 : Node::build(Point(static_cast<Real>(i) / nx, static_cast<Real>(j + 1) / ny, 0), node3_id);
512 :
513 20016 : new_node->set_unique_id(nx * ny + node3_id);
514 20016 : new_node->processor_id() = pid;
515 :
516 20016 : node3_ptr = mesh.add_node(std::move(new_node));
517 20016 : }
518 :
519 363641 : Elem * elem = new Quad4;
520 363641 : elem->set_id(elem_id);
521 363641 : elem->processor_id() = pid;
522 363641 : elem->set_unique_id(elem_id);
523 363641 : elem = mesh.add_elem(elem);
524 363641 : elem->set_node(0, node0_ptr);
525 363641 : elem->set_node(1, node1_ptr);
526 363641 : elem->set_node(2, node2_ptr);
527 363641 : elem->set_node(3, node3_ptr);
528 :
529 : // Bottom
530 363641 : if (j == 0)
531 11097 : boundary_info.add_side(elem, 0, 0);
532 :
533 : // Right
534 363641 : if (i == nx - 1)
535 11455 : boundary_info.add_side(elem, 1, 1);
536 :
537 : // Top
538 363641 : if (j == ny - 1)
539 11097 : boundary_info.add_side(elem, 2, 2);
540 :
541 : // Left
542 363641 : if (i == 0)
543 11455 : boundary_info.add_side(elem, 3, 3);
544 363641 : }
545 :
546 : template <>
547 : void
548 468 : DistributedRectilinearMeshGenerator::setBoundaryNames<Quad4>(BoundaryInfo & boundary_info)
549 : {
550 468 : boundary_info.sideset_name(0) = "bottom";
551 468 : boundary_info.sideset_name(1) = "right";
552 468 : boundary_info.sideset_name(2) = "top";
553 468 : boundary_info.sideset_name(3) = "left";
554 468 : }
555 :
556 : template <>
557 : void
558 468 : DistributedRectilinearMeshGenerator::scaleNodalPositions<Quad4>(dof_id_type /*nx*/,
559 : dof_id_type /*ny*/,
560 : dof_id_type /*nz*/,
561 : Real xmin,
562 : Real xmax,
563 : Real ymin,
564 : Real ymax,
565 : Real /*zmin*/,
566 : Real /*zmax*/,
567 : MeshBase & mesh)
568 : {
569 395562 : for (auto & node_ptr : mesh.node_ptr_range())
570 : {
571 395094 : (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
572 395094 : (*node_ptr)(1) = (*node_ptr)(1) * (ymax - ymin) + ymin;
573 468 : }
574 468 : }
575 :
576 : template <>
577 : dof_id_type
578 29170176 : DistributedRectilinearMeshGenerator::elemId<Hex8>(const dof_id_type nx,
579 : const dof_id_type ny,
580 : const dof_id_type i,
581 : const dof_id_type j,
582 : const dof_id_type k)
583 : {
584 29170176 : return i + (j * nx) + (k * nx * ny);
585 : }
586 :
587 : template <>
588 : dof_id_type
589 0 : DistributedRectilinearMeshGenerator::numNeighbors<Hex8>(const dof_id_type nx,
590 : const dof_id_type ny,
591 : const dof_id_type nz,
592 : const dof_id_type i,
593 : const dof_id_type j,
594 : const dof_id_type k)
595 : {
596 0 : dof_id_type n = 6;
597 :
598 0 : if (i == 0)
599 0 : n--;
600 :
601 0 : if (i == nx - 1)
602 0 : n--;
603 :
604 0 : if (j == 0)
605 0 : n--;
606 :
607 0 : if (j == ny - 1)
608 0 : n--;
609 :
610 0 : if (k == 0)
611 0 : n--;
612 :
613 0 : if (k == nz - 1)
614 0 : n--;
615 :
616 0 : return n;
617 : }
618 :
619 : template <>
620 : void
621 1413888 : DistributedRectilinearMeshGenerator::getNeighbors<Hex8>(const dof_id_type nx,
622 : const dof_id_type ny,
623 : const dof_id_type nz,
624 : const dof_id_type i,
625 : const dof_id_type j,
626 : const dof_id_type k,
627 : std::vector<dof_id_type> & neighbors,
628 : const bool corner)
629 : {
630 1413888 : std::fill(neighbors.begin(), neighbors.end(), Elem::invalid_id);
631 :
632 1413888 : if (corner)
633 : {
634 : // We collect one layer of point neighbors
635 : // We add one layer of point neighbors by default. Besides,
636 : // The elements on the opposite side of the current boundary are included
637 : // for, in case, periodic boundary conditions. The overhead is negligible
638 : // since you could consider every element has the same number of neighbors
639 991888 : unsigned int nnb = 0;
640 3967552 : for (unsigned int ii = 0; ii <= 2; ii++)
641 11902656 : for (unsigned int jj = 0; jj <= 2; jj++)
642 35707968 : for (unsigned int kk = 0; kk <= 2; kk++)
643 26780976 : neighbors[nnb++] = elemId<Hex8>(
644 26780976 : nx, ny, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, (k + kk - 1 + nz) % nz);
645 :
646 991888 : return;
647 : }
648 :
649 : // Back
650 422000 : if (k != 0)
651 392600 : neighbors[0] = elemId<Hex8>(nx, ny, i, j, k - 1);
652 :
653 : // Bottom
654 422000 : if (j != 0)
655 402200 : neighbors[1] = elemId<Hex8>(nx, ny, i, j - 1, k);
656 :
657 : // Right
658 422000 : if (i != nx - 1)
659 399800 : neighbors[2] = elemId<Hex8>(nx, ny, i + 1, j, k);
660 :
661 : // Top
662 422000 : if (j != ny - 1)
663 402200 : neighbors[3] = elemId<Hex8>(nx, ny, i, j + 1, k);
664 :
665 : // Left
666 422000 : if (i != 0)
667 399800 : neighbors[4] = elemId<Hex8>(nx, ny, i - 1, j, k);
668 :
669 : // Front
670 422000 : if (k != nz - 1)
671 392600 : neighbors[5] = elemId<Hex8>(nx, ny, i, j, k + 1);
672 : }
673 :
674 : template <>
675 : dof_id_type
676 5976512 : DistributedRectilinearMeshGenerator::nodeId<Hex8>(const ElemType /*type*/,
677 : const dof_id_type nx,
678 : const dof_id_type ny,
679 : const dof_id_type i,
680 : const dof_id_type j,
681 : const dof_id_type k)
682 : {
683 5976512 : return i + (nx + 1) * (j + k * (ny + 1));
684 : }
685 :
686 : template <>
687 : Node *
688 5976512 : DistributedRectilinearMeshGenerator::addPoint<Hex8>(const dof_id_type nx,
689 : const dof_id_type ny,
690 : const dof_id_type nz,
691 : const dof_id_type i,
692 : const dof_id_type j,
693 : const dof_id_type k,
694 : const ElemType type,
695 : MeshBase & mesh)
696 : {
697 5976512 : auto id = nodeId<Hex8>(type, nx, ny, i, j, k);
698 5976512 : Node * node_ptr = mesh.query_node_ptr(id);
699 5976512 : if (!node_ptr)
700 : {
701 : std::unique_ptr<Node> new_node = Node::build(
702 1023702 : Point(static_cast<Real>(i) / nx, static_cast<Real>(j) / ny, static_cast<Real>(k) / nz), id);
703 :
704 1023702 : new_node->set_unique_id(nx * ny * nz + id);
705 :
706 1023702 : node_ptr = mesh.add_node(std::move(new_node));
707 1023702 : }
708 :
709 5976512 : return node_ptr;
710 : }
711 :
712 : template <>
713 : void
714 747064 : DistributedRectilinearMeshGenerator::addElement<Hex8>(const dof_id_type nx,
715 : const dof_id_type ny,
716 : const dof_id_type nz,
717 : const dof_id_type i,
718 : const dof_id_type j,
719 : const dof_id_type k,
720 : const dof_id_type elem_id,
721 : const processor_id_type pid,
722 : const ElemType type,
723 : MeshBase & mesh)
724 : {
725 747064 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
726 :
727 : // This ordering was picked to match the ordering in mesh_generation.C
728 747064 : auto node0_ptr = addPoint<Hex8>(nx, ny, nz, i, j, k, type, mesh);
729 747064 : node0_ptr->processor_id() = pid;
730 747064 : auto node1_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j, k, type, mesh);
731 747064 : node1_ptr->processor_id() = pid;
732 747064 : auto node2_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j + 1, k, type, mesh);
733 747064 : node2_ptr->processor_id() = pid;
734 747064 : auto node3_ptr = addPoint<Hex8>(nx, ny, nz, i, j + 1, k, type, mesh);
735 747064 : node3_ptr->processor_id() = pid;
736 747064 : auto node4_ptr = addPoint<Hex8>(nx, ny, nz, i, j, k + 1, type, mesh);
737 747064 : node4_ptr->processor_id() = pid;
738 747064 : auto node5_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j, k + 1, type, mesh);
739 747064 : node5_ptr->processor_id() = pid;
740 747064 : auto node6_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j + 1, k + 1, type, mesh);
741 747064 : node6_ptr->processor_id() = pid;
742 747064 : auto node7_ptr = addPoint<Hex8>(nx, ny, nz, i, j + 1, k + 1, type, mesh);
743 747064 : node7_ptr->processor_id() = pid;
744 :
745 747064 : Elem * elem = new Hex8;
746 747064 : elem->set_id(elem_id);
747 747064 : elem->processor_id() = pid;
748 747064 : elem->set_unique_id(elem_id);
749 747064 : elem = mesh.add_elem(elem);
750 747064 : elem->set_node(0, node0_ptr);
751 747064 : elem->set_node(1, node1_ptr);
752 747064 : elem->set_node(2, node2_ptr);
753 747064 : elem->set_node(3, node3_ptr);
754 747064 : elem->set_node(4, node4_ptr);
755 747064 : elem->set_node(5, node5_ptr);
756 747064 : elem->set_node(6, node6_ptr);
757 747064 : elem->set_node(7, node7_ptr);
758 :
759 747064 : if (k == 0)
760 57872 : boundary_info.add_side(elem, 0, 0);
761 :
762 747064 : if (k == (nz - 1))
763 57872 : boundary_info.add_side(elem, 5, 5);
764 :
765 747064 : if (j == 0)
766 43256 : boundary_info.add_side(elem, 1, 1);
767 :
768 747064 : if (j == (ny - 1))
769 43256 : boundary_info.add_side(elem, 3, 3);
770 :
771 747064 : if (i == 0)
772 50824 : boundary_info.add_side(elem, 4, 4);
773 :
774 747064 : if (i == (nx - 1))
775 50824 : boundary_info.add_side(elem, 2, 2);
776 747064 : }
777 :
778 : template <>
779 : void
780 2304952 : DistributedRectilinearMeshGenerator::getIndices<Hex8>(const dof_id_type nx,
781 : const dof_id_type ny,
782 : const dof_id_type elem_id,
783 : dof_id_type & i,
784 : dof_id_type & j,
785 : dof_id_type & k)
786 : {
787 2304952 : i = elem_id % nx;
788 2304952 : j = (((elem_id - i) / nx) % ny);
789 2304952 : k = ((elem_id - i) - (j * nx)) / (nx * ny);
790 2304952 : }
791 :
792 : template <>
793 : void
794 484 : DistributedRectilinearMeshGenerator::getGhostNeighbors<Hex8>(
795 : const dof_id_type nx,
796 : const dof_id_type ny,
797 : const dof_id_type nz,
798 : const MeshBase & mesh,
799 : const std::set<dof_id_type> & current_elems,
800 : std::set<dof_id_type> & ghost_elems)
801 : {
802 : dof_id_type i, j, k;
803 :
804 484 : std::vector<dof_id_type> neighbors(27);
805 :
806 992372 : for (auto elem_id : current_elems)
807 : {
808 991888 : getIndices<Hex8>(nx, ny, elem_id, i, j, k);
809 :
810 991888 : getNeighbors<Hex8>(nx, ny, nz, i, j, k, neighbors, true);
811 :
812 27772864 : for (auto neighbor : neighbors)
813 26780976 : if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
814 5144800 : ghost_elems.insert(neighbor);
815 : }
816 484 : }
817 :
818 : template <>
819 : void
820 242 : DistributedRectilinearMeshGenerator::setBoundaryNames<Hex8>(BoundaryInfo & boundary_info)
821 : {
822 242 : boundary_info.sideset_name(0) = "back";
823 242 : boundary_info.sideset_name(1) = "bottom";
824 242 : boundary_info.sideset_name(2) = "right";
825 242 : boundary_info.sideset_name(3) = "top";
826 242 : boundary_info.sideset_name(4) = "left";
827 242 : boundary_info.sideset_name(5) = "front";
828 242 : }
829 :
830 : template <>
831 : void
832 242 : DistributedRectilinearMeshGenerator::scaleNodalPositions<Hex8>(dof_id_type /*nx*/,
833 : dof_id_type /*ny*/,
834 : dof_id_type /*nz*/,
835 : Real xmin,
836 : Real xmax,
837 : Real ymin,
838 : Real ymax,
839 : Real zmin,
840 : Real zmax,
841 : MeshBase & mesh)
842 : {
843 1023944 : for (auto & node_ptr : mesh.node_ptr_range())
844 : {
845 1023702 : (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
846 1023702 : (*node_ptr)(1) = (*node_ptr)(1) * (ymax - ymin) + ymin;
847 1023702 : (*node_ptr)(2) = (*node_ptr)(2) * (zmax - zmin) + zmin;
848 242 : }
849 242 : }
850 :
851 : template <>
852 : void
853 56 : DistributedRectilinearMeshGenerator::paritionSquarely<Edge2>(const dof_id_type nx,
854 : const dof_id_type /*ny*/,
855 : const dof_id_type /*nz*/,
856 : const processor_id_type num_procs,
857 : std::vector<dof_id_type> & istarts,
858 : std::vector<dof_id_type> & jstarts,
859 : std::vector<dof_id_type> & kstarts)
860 : {
861 : // Starting indices along x direction
862 56 : istarts.resize(num_procs + 1);
863 : // Starting indices along y direction
864 : // There is only one processor along y direction since it is a 1D mesh
865 56 : jstarts.resize(2);
866 56 : jstarts[0] = 0;
867 56 : jstarts[1] = 1;
868 : // Starting indices along z direction
869 : // There is only one processor along z direction since it is a 1D mesh
870 56 : kstarts.resize(2);
871 56 : kstarts[0] = 0;
872 56 : kstarts[1] = 1;
873 :
874 56 : istarts[0] = 0;
875 256 : for (processor_id_type pid = 0; pid < num_procs; pid++)
876 : // Partition mesh evenly. The extra elements are assigned to the front processors
877 200 : istarts[pid + 1] = istarts[pid] + (nx / num_procs + ((nx % num_procs) > pid));
878 56 : }
879 :
880 : template <>
881 : void
882 302 : DistributedRectilinearMeshGenerator::paritionSquarely<Quad4>(const dof_id_type nx,
883 : const dof_id_type ny,
884 : const dof_id_type /*nz*/,
885 : const processor_id_type num_procs,
886 : std::vector<dof_id_type> & istarts,
887 : std::vector<dof_id_type> & jstarts,
888 : std::vector<dof_id_type> & kstarts)
889 : {
890 : // Try for squarish distribution
891 : // The number of processors along x direction
892 : processor_id_type px =
893 302 : (processor_id_type)(0.5 + std::sqrt(((Real)nx) * ((Real)num_procs) / ((Real)ny)));
894 :
895 302 : if (!px)
896 0 : px = 1;
897 :
898 : // The number of processors along y direction
899 302 : processor_id_type py = 1;
900 : // Fact num_procs into px times py
901 302 : while (px > 0)
902 : {
903 302 : py = num_procs / px;
904 302 : if (py * px == num_procs)
905 302 : break;
906 0 : px--;
907 : }
908 : // More processors are needed for denser side
909 302 : if (nx > ny && py < px)
910 0 : std::swap(px, py);
911 :
912 : // Starting indices along x direction
913 302 : istarts.resize(px + 1);
914 : // Starting indices along y direction
915 302 : jstarts.resize(py + 1);
916 : // Starting indices along z direction
917 302 : kstarts.resize(2);
918 : // There is no elements along z direction
919 302 : kstarts[0] = 0;
920 302 : kstarts[1] = 1;
921 :
922 302 : istarts[0] = 0;
923 684 : for (processor_id_type pxid = 0; pxid < px; pxid++)
924 : // Partition elements evenly along x direction
925 382 : istarts[pxid + 1] = istarts[pxid] + nx / px + ((nx % px) > pxid);
926 :
927 302 : jstarts[0] = 0;
928 816 : for (processor_id_type pyid = 0; pyid < py; pyid++)
929 : // Partition elements evenly along y direction
930 514 : jstarts[pyid + 1] = jstarts[pyid] + (ny / py + ((ny % py) > pyid));
931 302 : }
932 :
933 : template <>
934 : void
935 120 : DistributedRectilinearMeshGenerator::paritionSquarely<Hex8>(const dof_id_type nx,
936 : const dof_id_type ny,
937 : const dof_id_type nz,
938 : const processor_id_type num_procs,
939 : std::vector<dof_id_type> & istarts,
940 : std::vector<dof_id_type> & jstarts,
941 : std::vector<dof_id_type> & kstarts)
942 : {
943 : /* Try for squarish distribution */
944 : // The number of processors along y direction
945 : processor_id_type py =
946 120 : (processor_id_type)(0.5 + std::pow(((Real)ny * ny) * ((Real)num_procs) / ((Real)nz * nx),
947 120 : (Real)(1. / 3.)));
948 120 : if (!py)
949 0 : py = 1;
950 : // The number of processors for pxpz plane
951 120 : processor_id_type pxpz = 1;
952 : // Factorize num_procs into py times pxpz
953 240 : while (py > 0)
954 : {
955 240 : pxpz = num_procs / py;
956 240 : if (py * pxpz == num_procs)
957 120 : break;
958 120 : py--;
959 : }
960 :
961 120 : if (!py)
962 0 : py = 1;
963 :
964 : // There number of processors along x direction
965 : processor_id_type px =
966 120 : (processor_id_type)(0.5 + std::sqrt(((Real)nx) * ((Real)num_procs) / ((Real)nz * py)));
967 120 : if (!px)
968 0 : px = 1;
969 : // The number of processors along z direction
970 120 : processor_id_type pz = 1;
971 : // Factorize num_procs into px times py times pz
972 208 : while (px > 0)
973 : {
974 208 : pz = num_procs / (px * py);
975 208 : if (px * py * pz == num_procs)
976 120 : break;
977 88 : px--;
978 : }
979 : // denser mesh takes more processors
980 120 : if (nx > nz && px < pz)
981 24 : std::swap(px, pz);
982 :
983 120 : istarts.resize(px + 1);
984 120 : jstarts.resize(py + 1);
985 120 : kstarts.resize(pz + 1);
986 :
987 120 : istarts[0] = 0;
988 384 : for (processor_id_type pxid = 0; pxid < px; pxid++)
989 : // Partition mesh evenly along x direction
990 264 : istarts[pxid + 1] = istarts[pxid] + nx / px + ((nx % px) > pxid);
991 :
992 120 : jstarts[0] = 0;
993 336 : for (processor_id_type pyid = 0; pyid < py; pyid++)
994 : // Partition mesh evenly along y direction
995 216 : jstarts[pyid + 1] = jstarts[pyid] + (ny / py + ((ny % py) > pyid));
996 :
997 120 : kstarts[0] = 0;
998 304 : for (processor_id_type pzid = 0; pzid < pz; pzid++)
999 : // Partition mesh evenly along z direction
1000 184 : kstarts[pzid + 1] = kstarts[pzid] + (nz / pz + ((nz % pz) > pzid));
1001 120 : }
1002 :
1003 : template <typename T>
1004 : void
1005 797 : DistributedRectilinearMeshGenerator::buildCube(UnstructuredMesh & mesh,
1006 : const unsigned int nx,
1007 : unsigned int ny,
1008 : unsigned int nz,
1009 : const Real xmin,
1010 : const Real xmax,
1011 : const Real ymin,
1012 : const Real ymax,
1013 : const Real zmin,
1014 : const Real zmax,
1015 : const ElemType type)
1016 : {
1017 : /// 1. "Partition" the element linearly (i.e. break them up into n_procs contiguous chunks
1018 : /// 2. Create a (dual) graph of the local elements
1019 : /// 3. Partition the graph using PetscExternalPartitioner
1020 : /// 4. Push elements to new owners
1021 : /// 5. Each processor creates only the elements it owns
1022 : /// 6. Find the ghosts we need (all the elements that connect to at least one local mesh vertex)
1023 : /// 7. Pull the PIDs of the ghosts
1024 : /// 8. Add ghosts with the right PIDs to the mesh
1025 :
1026 797 : auto & comm = mesh.comm();
1027 :
1028 797 : dof_id_type num_elems = nx * ny * nz;
1029 :
1030 797 : const auto num_procs = comm.size();
1031 : // Current processor ID
1032 797 : const auto pid = comm.rank();
1033 :
1034 797 : if (_num_cores_for_partition > num_procs)
1035 4 : mooseError("Number of cores for the graph partitioner is too large ", _num_cores_for_partition);
1036 :
1037 793 : if (!_num_cores_for_partition)
1038 769 : _num_cores_for_partition = num_procs;
1039 :
1040 793 : auto & boundary_info = mesh.get_boundary_info();
1041 :
1042 793 : std::unique_ptr<Elem> canonical_elem = std::make_unique<T>();
1043 :
1044 : // Will get used to find the neighbors of an element
1045 793 : std::vector<dof_id_type> neighbors(canonical_elem->n_neighbors());
1046 : // Number of neighbors
1047 793 : dof_id_type n_neighbors = canonical_elem->n_neighbors();
1048 :
1049 : // "Partition" the elements linearly across the processors
1050 : dof_id_type num_local_elems;
1051 : dof_id_type local_elems_begin;
1052 : dof_id_type local_elems_end;
1053 793 : if (pid < _num_cores_for_partition)
1054 785 : MooseUtils::linearPartitionItems(num_elems,
1055 785 : _num_cores_for_partition,
1056 : pid,
1057 : num_local_elems,
1058 : local_elems_begin,
1059 : local_elems_end);
1060 : else
1061 : {
1062 8 : num_local_elems = 0;
1063 8 : local_elems_begin = 0;
1064 8 : local_elems_end = 0;
1065 : }
1066 :
1067 793 : std::vector<std::vector<dof_id_type>> graph;
1068 :
1069 : // Fill in xadj and adjncy
1070 : // xadj is the offset into adjncy
1071 : // adjncy are the face neighbors of each element on this processor
1072 793 : graph.resize(num_local_elems);
1073 : // Build a distributed graph
1074 793 : num_local_elems = 0;
1075 757229 : for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1076 : {
1077 756436 : dof_id_type i, j, k = 0;
1078 :
1079 756436 : getIndices<T>(nx, ny, e_id, i, j, k);
1080 :
1081 756436 : getNeighbors<T>(nx, ny, nz, i, j, k, neighbors, false);
1082 :
1083 756436 : std::vector<dof_id_type> & row = graph[num_local_elems++];
1084 756436 : row.reserve(n_neighbors);
1085 :
1086 4622340 : for (auto neighbor : neighbors)
1087 3865904 : if (neighbor != Elem::invalid_id)
1088 3687096 : row.push_back(neighbor);
1089 : }
1090 :
1091 : // Partition the distributed graph
1092 793 : std::vector<dof_id_type> partition_vec;
1093 793 : if (_partition_method == "linear")
1094 : {
1095 52 : mooseWarning(" LinearPartitioner is mainly used for setting up regression tests. For the "
1096 : "production run, please do not use it.");
1097 : // The graph is already partitioned linearly via calling MooseUtils::linearPartitionItems
1098 52 : partition_vec.resize(num_local_elems);
1099 : // We use it as is
1100 52 : std::fill(partition_vec.begin(), partition_vec.end(), pid);
1101 : }
1102 741 : else if (_partition_method == "square")
1103 : {
1104 : // Starting partition indices along x direction
1105 478 : std::vector<dof_id_type> istarts;
1106 : // Starting partition indices along y direction
1107 478 : std::vector<dof_id_type> jstarts;
1108 : // Starting partition indices along z direction
1109 478 : std::vector<dof_id_type> kstarts;
1110 478 : partition_vec.resize(num_local_elems);
1111 :
1112 : // Partition mesh evenly along each direction
1113 478 : paritionSquarely<T>(nx, ny, nz, num_procs, istarts, jstarts, kstarts);
1114 : // At least one processor
1115 : mooseAssert(istarts.size() > 1, "At least there is one processor along x direction");
1116 478 : processor_id_type px = istarts.size() - 1;
1117 : // At least one processor
1118 : mooseAssert(jstarts.size() > 1, "At least there is one processor along y direction");
1119 478 : processor_id_type py = jstarts.size() - 1;
1120 : // At least one processor
1121 : mooseAssert(kstarts.size() > 1, "At least there is one processor along z direction");
1122 : // processor_id_type pz = kstarts.size() -1;
1123 321198 : for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1124 : {
1125 320720 : dof_id_type i = 0, j = 0, k = 0;
1126 320720 : getIndices<T>(nx, ny, e_id, i, j, k);
1127 320720 : processor_id_type pi = 0, pj = 0, pk = 0;
1128 :
1129 320720 : pi = (std::upper_bound(istarts.begin(), istarts.end(), i) - istarts.begin()) - 1;
1130 320720 : pj = (std::upper_bound(jstarts.begin(), jstarts.end(), j) - jstarts.begin()) - 1;
1131 320720 : pk = (std::upper_bound(kstarts.begin(), kstarts.end(), k) - kstarts.begin()) - 1;
1132 :
1133 320720 : partition_vec[e_id - local_elems_begin] = pk * px * py + pj * px + pi;
1134 :
1135 : mooseAssert((pk * px * py + pj * px + pi) < num_procs, "processor id is too large");
1136 : }
1137 478 : }
1138 263 : else if (_partition_method == "graph")
1139 263 : PetscExternalPartitioner::partitionGraph(
1140 263 : comm, graph, {}, {}, num_procs, _num_parts_per_compute_node, _part_package, partition_vec);
1141 : else
1142 0 : mooseError("Unsupported partition method " + _partition_method);
1143 :
1144 : mooseAssert(partition_vec.size() == num_local_elems, " Invalid partition was generateed ");
1145 :
1146 : // Use current elements to remote processors according to partition
1147 793 : std::map<processor_id_type, std::vector<dof_id_type>> pushed_elements_vecs;
1148 :
1149 757229 : for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1150 756436 : pushed_elements_vecs[partition_vec[e_id - local_elems_begin]].push_back(e_id);
1151 :
1152 : // Collect new elements I should own
1153 793 : std::vector<dof_id_type> my_new_elems;
1154 :
1155 793 : auto elements_action_functor =
1156 2918 : [&my_new_elems](processor_id_type /*pid*/, const std::vector<dof_id_type> & data)
1157 1459 : { std::copy(data.begin(), data.end(), std::back_inserter(my_new_elems)); };
1158 :
1159 793 : Parallel::push_parallel_vector_data(comm, pushed_elements_vecs, elements_action_functor);
1160 :
1161 : // Add the elements this processor owns
1162 757229 : for (auto e_id : my_new_elems)
1163 : {
1164 756436 : dof_id_type i = 0, j = 0, k = 0;
1165 :
1166 756436 : getIndices<T>(nx, ny, e_id, i, j, k);
1167 :
1168 756436 : addElement<T>(nx, ny, nz, i, j, k, e_id, pid, type, mesh);
1169 : }
1170 :
1171 : // Need to link up the local elements before we can know what's missing
1172 793 : mesh.find_neighbors();
1173 :
1174 : // Get the ghosts (missing face neighbors)
1175 793 : std::set<dof_id_type> ghost_elems;
1176 : // Current local elements
1177 793 : std::set<dof_id_type> current_elems;
1178 :
1179 : // Fill current elems
1180 : // We will grow domain from current elements
1181 758022 : for (auto & elem_ptr : mesh.element_ptr_range())
1182 756436 : current_elems.insert(elem_ptr->id());
1183 :
1184 : // Grow domain layer by layer
1185 2379 : for (unsigned layer = 0; layer < _num_side_layers; layer++)
1186 : {
1187 : // getGhostNeighbors produces one layer of side neighbors
1188 1586 : getGhostNeighbors<T>(nx, ny, nz, mesh, current_elems, ghost_elems);
1189 : // Merge ghost elements into current element list
1190 1586 : current_elems.insert(ghost_elems.begin(), ghost_elems.end());
1191 : }
1192 : // We do not need it anymore
1193 793 : current_elems.clear();
1194 :
1195 : // Elements we're going to request from others
1196 793 : std::map<processor_id_type, std::vector<dof_id_type>> ghost_elems_to_request;
1197 :
1198 357294 : for (auto & ghost_id : ghost_elems)
1199 : {
1200 : // This is the processor ID the ghost_elem was originally assigned to
1201 356501 : auto proc_id = MooseUtils::linearPartitionChunk(num_elems, _num_cores_for_partition, ghost_id);
1202 :
1203 : // Using side-effect insertion on purpose
1204 356501 : ghost_elems_to_request[proc_id].push_back(ghost_id);
1205 : }
1206 :
1207 : // Next set ghost object ids from other processors
1208 793 : auto gather_functor =
1209 360093 : [local_elems_begin, partition_vec](processor_id_type /*pid*/,
1210 : const std::vector<dof_id_type> & coming_ghost_elems,
1211 : std::vector<dof_id_type> & pid_for_ghost_elems)
1212 : {
1213 1796 : auto num_ghost_elems = coming_ghost_elems.size();
1214 1796 : pid_for_ghost_elems.resize(num_ghost_elems);
1215 :
1216 1796 : dof_id_type num_local_elems = 0;
1217 :
1218 358297 : for (auto elem : coming_ghost_elems)
1219 356501 : pid_for_ghost_elems[num_local_elems++] = partition_vec[elem - local_elems_begin];
1220 : };
1221 :
1222 793 : std::unordered_map<dof_id_type, processor_id_type> ghost_elem_to_pid;
1223 :
1224 793 : auto action_functor =
1225 1796 : [&ghost_elem_to_pid](processor_id_type /*pid*/,
1226 : const std::vector<dof_id_type> & my_ghost_elems,
1227 : const std::vector<dof_id_type> & pid_for_my_ghost_elems)
1228 : {
1229 1796 : dof_id_type num_local_elems = 0;
1230 :
1231 358297 : for (auto elem : my_ghost_elems)
1232 356501 : ghost_elem_to_pid[elem] = pid_for_my_ghost_elems[num_local_elems++];
1233 : };
1234 :
1235 793 : const dof_id_type * ex = nullptr;
1236 793 : libMesh::Parallel::pull_parallel_vector_data(
1237 : comm, ghost_elems_to_request, gather_functor, action_functor, ex);
1238 :
1239 : // Add the ghost elements to the mesh
1240 357294 : for (auto gtop : ghost_elem_to_pid)
1241 : {
1242 356501 : auto ghost_id = gtop.first;
1243 356501 : auto proc_id = gtop.second;
1244 :
1245 356501 : dof_id_type i = 0, j = 0, k = 0;
1246 :
1247 356501 : getIndices<T>(nx, ny, ghost_id, i, j, k);
1248 :
1249 356501 : addElement<T>(nx, ny, nz, i, j, k, ghost_id, proc_id, type, mesh);
1250 : }
1251 :
1252 793 : mesh.find_neighbors(true);
1253 :
1254 : // Set RemoteElem neighbors
1255 2227460 : for (auto & elem_ptr : mesh.element_ptr_range())
1256 7054349 : for (unsigned int s = 0; s < elem_ptr->n_sides(); s++)
1257 5941412 : if (!elem_ptr->neighbor_ptr(s) && !boundary_info.n_boundary_ids(elem_ptr, s))
1258 208578 : elem_ptr->set_neighbor(s, const_cast<RemoteElem *>(remote_elem));
1259 :
1260 793 : setBoundaryNames<T>(boundary_info);
1261 :
1262 793 : Partitioner::set_node_processor_ids(mesh);
1263 :
1264 : // Already partitioned!
1265 793 : mesh.skip_partitioning(true);
1266 :
1267 : // Scale the nodal positions
1268 793 : scaleNodalPositions<T>(nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax, mesh);
1269 793 : }
1270 :
1271 : std::unique_ptr<MeshBase>
1272 797 : DistributedRectilinearMeshGenerator::generate()
1273 : {
1274 : // We will set up boundaries accordingly. We do not want to call
1275 : // ghostGhostedBoundaries in which allgather_packed_range is unscalable.
1276 : // ghostGhostedBoundaries will gather all boundaries to every single processor
1277 797 : _mesh->needGhostGhostedBoundaries(false);
1278 :
1279 : // DistributedRectilinearMeshGenerator always generates a distributed mesh
1280 797 : auto mesh = buildDistributedMesh();
1281 :
1282 797 : MooseEnum elem_type_enum = getParam<MooseEnum>("elem_type");
1283 :
1284 797 : if (!isParamValid("elem_type"))
1285 : {
1286 : // Switching on MooseEnum
1287 599 : switch (_dim)
1288 : {
1289 83 : case 1:
1290 83 : elem_type_enum = "EDGE2";
1291 83 : break;
1292 270 : case 2:
1293 270 : elem_type_enum = "QUAD4";
1294 270 : break;
1295 246 : case 3:
1296 246 : elem_type_enum = "HEX8";
1297 246 : break;
1298 : }
1299 : }
1300 :
1301 797 : _elem_type = Utility::string_to_enum<ElemType>(elem_type_enum);
1302 :
1303 797 : mesh->set_mesh_dimension(_dim);
1304 797 : mesh->set_spatial_dimension(_dim);
1305 : // Let the mesh know that it's not serialized
1306 797 : mesh->set_distributed();
1307 :
1308 : // Switching on MooseEnum
1309 797 : switch (_dim)
1310 : {
1311 : // The build_XYZ mesh generation functions take an
1312 : // UnstructuredMesh& as the first argument, hence the dynamic_cast.
1313 83 : case 1:
1314 83 : buildCube<Edge2>(
1315 83 : dynamic_cast<UnstructuredMesh &>(*mesh), _nx, 1, 1, _xmin, _xmax, 0, 0, 0, 0, _elem_type);
1316 83 : break;
1317 468 : case 2:
1318 468 : buildCube<Quad4>(dynamic_cast<UnstructuredMesh &>(*mesh),
1319 468 : _nx,
1320 468 : _ny,
1321 : 1,
1322 468 : _xmin,
1323 468 : _xmax,
1324 468 : _ymin,
1325 468 : _ymax,
1326 : 0,
1327 : 0,
1328 : _elem_type);
1329 468 : break;
1330 246 : case 3:
1331 246 : buildCube<Hex8>(dynamic_cast<UnstructuredMesh &>(*mesh),
1332 246 : _nx,
1333 246 : _ny,
1334 246 : _nz,
1335 246 : _xmin,
1336 246 : _xmax,
1337 246 : _ymin,
1338 246 : _ymax,
1339 246 : _zmin,
1340 246 : _zmax,
1341 : _elem_type);
1342 242 : break;
1343 0 : default:
1344 0 : mooseError(
1345 : getParam<MooseEnum>("elem_type"),
1346 : " is not a currently supported element type for DistributedRectilinearMeshGenerator");
1347 : }
1348 :
1349 : // Apply the bias if any exists
1350 793 : if (_bias_x != 1.0 || _bias_y != 1.0 || _bias_z != 1.0)
1351 : {
1352 : // Biases
1353 0 : Real bias[3] = {_bias_x, _bias_y, _bias_z};
1354 :
1355 : // "width" of the mesh in each direction
1356 0 : Real width[3] = {_xmax - _xmin, _ymax - _ymin, _zmax - _zmin};
1357 :
1358 : // Min mesh extent in each direction.
1359 0 : Real mins[3] = {_xmin, _ymin, _zmin};
1360 :
1361 : // Number of elements in each direction.
1362 0 : dof_id_type nelem[3] = {_nx, _ny, _nz};
1363 :
1364 : // We will need the biases raised to integer powers in each
1365 : // direction, so let's pre-compute those...
1366 0 : std::vector<std::vector<Real>> pows(LIBMESH_DIM);
1367 0 : for (dof_id_type dir = 0; dir < LIBMESH_DIM; ++dir)
1368 : {
1369 0 : pows[dir].resize(nelem[dir] + 1);
1370 0 : for (dof_id_type i = 0; i < pows[dir].size(); ++i)
1371 0 : pows[dir][i] = std::pow(bias[dir], static_cast<int>(i));
1372 : }
1373 :
1374 : // Loop over the nodes and move them to the desired location
1375 0 : for (auto & node_ptr : mesh->node_ptr_range())
1376 : {
1377 0 : Node & node = *node_ptr;
1378 :
1379 0 : for (dof_id_type dir = 0; dir < LIBMESH_DIM; ++dir)
1380 : {
1381 0 : if (width[dir] != 0. && bias[dir] != 1.)
1382 : {
1383 : // Compute the scaled "index" of the current point. This
1384 : // will either be close to a whole integer or a whole
1385 : // integer+0.5 for quadratic nodes.
1386 0 : Real float_index = (node(dir) - mins[dir]) * nelem[dir] / width[dir];
1387 :
1388 0 : Real integer_part = 0;
1389 0 : Real fractional_part = std::modf(float_index, &integer_part);
1390 :
1391 : // Figure out where to move the node...
1392 0 : if (std::abs(fractional_part) < TOLERANCE || std::abs(fractional_part - 1.0) < TOLERANCE)
1393 : {
1394 : // If the fractional_part ~ 0.0 or 1.0, this is a vertex node, so
1395 : // we don't need an average.
1396 : //
1397 : // Compute the "index" we are at in the current direction. We
1398 : // round to the nearest integral value to get this instead
1399 : // of using "integer_part", since that could be off by a
1400 : // lot (e.g. we want 3.9999 to map to 4.0 instead of 3.0).
1401 0 : int index = round(float_index);
1402 :
1403 : // Move node to biased location.
1404 0 : node(dir) =
1405 0 : mins[dir] + width[dir] * (1. - pows[dir][index]) / (1. - pows[dir][nelem[dir]]);
1406 : }
1407 0 : else if (std::abs(fractional_part - 0.5) < TOLERANCE)
1408 : {
1409 : // If the fractional_part ~ 0.5, this is a midedge/face
1410 : // (i.e. quadratic) node. We don't move those with the same
1411 : // bias as the vertices, instead we put them midway between
1412 : // their respective vertices.
1413 : //
1414 : // Also, since the fractional part is nearly 0.5, we know that
1415 : // the integer_part will be the index of the vertex to the
1416 : // left, and integer_part+1 will be the index of the
1417 : // vertex to the right.
1418 0 : node(dir) = mins[dir] +
1419 0 : width[dir] *
1420 0 : (1. - 0.5 * (pows[dir][integer_part] + pows[dir][integer_part + 1])) /
1421 0 : (1. - pows[dir][nelem[dir]]);
1422 : }
1423 : else
1424 : {
1425 : // We don't yet handle anything higher order than quadratic...
1426 0 : mooseError("Unable to bias node at node(", dir, ")=", node(dir));
1427 : }
1428 : }
1429 : }
1430 0 : }
1431 0 : }
1432 :
1433 : // MeshOutput<MT>::write_equation_systems will automatically renumber node and element IDs.
1434 : // So we have to make that consistent at the first place. Otherwise, the moose cached data such as
1435 : // _block_node_list will be inconsistent when we doing MooseMesh::getNodeBlockIds. That being
1436 : // said, moose will pass new ids to getNodeBlockIds while the cached _block_node_list still use
1437 : // the old node IDs. Yes, you could say: go ahead to do a mesh update, but I would say no. I do
1438 : // not change mesh and there is no point to update anything.
1439 793 : mesh->allow_renumbering(true);
1440 793 : mesh->prepare_for_use();
1441 :
1442 1586 : return dynamic_pointer_cast<MeshBase>(mesh);
1443 793 : }
|