https://mooseframework.inl.gov
DistributedRectilinearMeshGenerator.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
11 #include "CastUniquePointer.h"
13 
14 #include "SerializerGuard.h"
15 
16 #include "libmesh/mesh_generation.h"
17 #include "libmesh/string_to_enum.h"
18 #include "libmesh/periodic_boundaries.h"
19 #include "libmesh/periodic_boundary_base.h"
20 #include "libmesh/unstructured_mesh.h"
21 #include "libmesh/mesh_communication.h"
22 #include "libmesh/remote_elem.h"
23 #include "libmesh/partitioner.h"
24 
25 // TIMPI includes
26 #include "timpi/communicator.h"
27 #include "timpi/parallel_sync.h"
28 
29 // C++ includes
30 #include <cmath> // provides round, not std::round (see http://www.cplusplus.com/reference/cmath/round/)
31 
33 
36 {
38  params += MeshGenerator::validParams();
39 
40  MooseEnum dims("1=1 2 3");
42  "dim", dims, "The dimension of the mesh to be generated"); // Make this parameter required
43 
44  params.addParam<dof_id_type>("nx", 1, "Number of elements in the X direction");
45  params.addParam<dof_id_type>("ny", 1, "Number of elements in the Y direction");
46  params.addParam<dof_id_type>("nz", 1, "Number of elements in the Z direction");
47  params.addParam<Real>("xmin", 0.0, "Lower X Coordinate of the generated mesh");
48  params.addParam<Real>("ymin", 0.0, "Lower Y Coordinate of the generated mesh");
49  params.addParam<Real>("zmin", 0.0, "Lower Z Coordinate of the generated mesh");
50  params.addParam<Real>("xmax", 1.0, "Upper X Coordinate of the generated mesh");
51  params.addParam<Real>("ymax", 1.0, "Upper Y Coordinate of the generated mesh");
52  params.addParam<Real>("zmax", 1.0, "Upper Z Coordinate of the generated mesh");
53 
55  "num_cores_for_partition", 0, "Number of cores for partitioning the graph");
56 
57  params.addRangeCheckedParam<unsigned>(
58  "num_side_layers",
59  2,
60  "num_side_layers>=1 & num_side_layers<5",
61  "Number of layers of off-processor side neighbors is reserved during mesh generation");
62 
63  params.addRelationshipManager("ElementSideNeighborLayers",
65  [](const InputParameters & obj_params, InputParameters & rm_params)
66  {
67  // Let this RM safeguard users specified ghosted layers
68  rm_params.set<unsigned short>("layers") =
69  obj_params.get<unsigned>("num_side_layers");
70  // We can not attach geometric early here because some simulation
71  // related info, such as, periodic BCs is not available yet during
72  // an early stage. Periodic BCs will be passed into ghosting
73  // functor during initFunctor of ElementSideNeighborLayers. There
74  // is no hurt to attach geometric late for general simulations
75  // that do not have extra requirements on ghosting elements. That
76  // is especially true distributed generated meshes since there is
77  // not much redundant info.
78  rm_params.set<bool>("attach_geometric_early") = false;
79  });
80 
81  MooseEnum partition("graph linear square", "graph", false);
82  params.addParam<MooseEnum>(
83  "partition", partition, "Which method (graph linear square) use to partition mesh");
84 
85  MooseEnum elem_types(LIST_GEOM_ELEM); // no default
86  params.addParam<MooseEnum>("elem_type",
87  elem_types,
88  "The type of element from libMesh to "
89  "generate (default: linear element for "
90  "requested dimension)");
91 
92  params.addRangeCheckedParam<Real>(
93  "bias_x",
94  1.,
95  "bias_x>=0.5 & bias_x<=2",
96  "The amount by which to grow (or shrink) the cells in the x-direction.");
97  params.addRangeCheckedParam<Real>(
98  "bias_y",
99  1.,
100  "bias_y>=0.5 & bias_y<=2",
101  "The amount by which to grow (or shrink) the cells in the y-direction.");
102  params.addRangeCheckedParam<Real>(
103  "bias_z",
104  1.,
105  "bias_z>=0.5 & bias_z<=2",
106  "The amount by which to grow (or shrink) the cells in the z-direction.");
107 
108  params.addClassDescription(
109  "Create a line, square, or cube mesh with uniformly spaced or biased elements.");
110 
111  return params;
112 }
113 
115  const InputParameters & parameters)
116  : MeshGenerator(parameters),
117  _dim(getParam<MooseEnum>("dim")),
118  _nx(declareMeshProperty("num_elements_x", getParam<dof_id_type>("nx"))),
119  _ny(declareMeshProperty("num_elements_y", getParam<dof_id_type>("ny"))),
120  _nz(declareMeshProperty("num_elements_z", getParam<dof_id_type>("nz"))),
121  _xmin(declareMeshProperty("xmin", getParam<Real>("xmin"))),
122  _xmax(declareMeshProperty("xmax", getParam<Real>("xmax"))),
123  _ymin(declareMeshProperty("ymin", getParam<Real>("ymin"))),
124  _ymax(declareMeshProperty("ymax", getParam<Real>("ymax"))),
125  _zmin(declareMeshProperty("zmin", getParam<Real>("zmin"))),
126  _zmax(declareMeshProperty("zmax", getParam<Real>("zmax"))),
127  _num_cores_for_partition(getParam<processor_id_type>("num_cores_for_partition")),
128  _bias_x(getParam<Real>("bias_x")),
129  _bias_y(getParam<Real>("bias_y")),
130  _bias_z(getParam<Real>("bias_z")),
131  _part_package(getParam<MooseEnum>("part_package")),
132  _num_parts_per_compute_node(getParam<processor_id_type>("num_cores_per_compute_node")),
133  _partition_method(getParam<MooseEnum>("partition")),
134  _num_side_layers(getParam<unsigned>("num_side_layers"))
135 {
136  declareMeshProperty("use_distributed_mesh", true);
137 }
138 
139 template <>
141 DistributedRectilinearMeshGenerator::numNeighbors<Edge2>(const dof_id_type nx,
142  const dof_id_type /*ny*/,
143  const dof_id_type /*nz*/,
144  const dof_id_type i,
145  const dof_id_type /*j*/,
146  const dof_id_type /*k*/)
147 {
148  // The ends only have one neighbor
149  if (i == 0 || i == nx - 1)
150  return 1;
151 
152  return 2;
153 }
154 
155 template <>
156 void
157 DistributedRectilinearMeshGenerator::getNeighbors<Edge2>(const dof_id_type nx,
158  const dof_id_type /*ny*/,
159  const dof_id_type /*nz*/,
160  const dof_id_type i,
161  const dof_id_type /*j*/,
162  const dof_id_type /*k*/,
163  std::vector<dof_id_type> & neighbors,
164  const bool corner)
165 
166 {
167  if (corner)
168  {
169  // The elements on the opposite of the current boundary are required
170  // for periodic boundary conditions
171  neighbors[0] = (i - 1 + nx) % nx;
172  neighbors[1] = (i + 1 + nx) % nx;
173 
174  return;
175  }
176 
177  neighbors[0] = i - 1;
178  neighbors[1] = i + 1;
179 
180  // First element doesn't have a left neighbor
181  if (i == 0)
182  neighbors[0] = Elem::invalid_id;
183 
184  // Last element doesn't have a right neighbor
185  if (i == nx - 1)
186  neighbors[1] = Elem::invalid_id;
187 }
188 
189 template <>
190 void
191 DistributedRectilinearMeshGenerator::getIndices<Edge2>(const dof_id_type /*nx*/,
192  const dof_id_type /*ny*/,
193  const dof_id_type elem_id,
194  dof_id_type & i,
195  dof_id_type & /*j*/,
196  dof_id_type & /*k*/)
197 {
198  i = elem_id;
199 }
200 
201 template <>
202 void
203 DistributedRectilinearMeshGenerator::getGhostNeighbors<Edge2>(
204  const dof_id_type nx,
205  const dof_id_type /*ny*/,
206  const dof_id_type /*nz*/,
207  const MeshBase & mesh,
208  const std::set<dof_id_type> & current_elems,
209  std::set<dof_id_type> & ghost_elems)
210 {
211  std::vector<dof_id_type> neighbors(2);
212 
213  for (auto elem_id : current_elems)
214  {
215  getNeighbors<Edge2>(nx, 0, 0, elem_id, 0, 0, neighbors, true);
216 
217  for (auto neighbor : neighbors)
218  if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
219  ghost_elems.insert(neighbor);
220  }
221 }
222 
223 template <>
225 DistributedRectilinearMeshGenerator::elemId<Edge2>(const dof_id_type /*nx*/,
226  const dof_id_type /*ny*/,
227  const dof_id_type i,
228  const dof_id_type /*j*/,
229  const dof_id_type /*k*/)
230 {
231  return i;
232 }
233 
234 template <>
235 void
236 DistributedRectilinearMeshGenerator::addElement<Edge2>(const dof_id_type nx,
237  const dof_id_type /*ny*/,
238  const dof_id_type /*nz*/,
239  const dof_id_type /*i*/,
240  const dof_id_type /*j*/,
241  const dof_id_type /*k*/,
242  const dof_id_type elem_id,
243  const processor_id_type pid,
244  const ElemType /*type*/,
245  MeshBase & mesh)
246 {
247  BoundaryInfo & boundary_info = mesh.get_boundary_info();
248 
249  auto node_offset = elem_id;
250 
251  Node * node0_ptr = mesh.query_node_ptr(node_offset);
252  if (!node0_ptr)
253  {
254  std::unique_ptr<Node> new_node =
255  Node::build(Point(static_cast<Real>(node_offset) / nx, 0, 0), node_offset);
256 
257  new_node->set_unique_id(nx + node_offset);
258  new_node->processor_id() = pid;
259 
260  node0_ptr = mesh.add_node(std::move(new_node));
261  }
262 
263  Node * node1_ptr = mesh.query_node_ptr(node_offset + 1);
264  if (!node1_ptr)
265  {
266  std::unique_ptr<Node> new_node =
267  Node::build(Point(static_cast<Real>(node_offset + 1) / nx, 0, 0), node_offset + 1);
268 
269  new_node->set_unique_id(nx + node_offset + 1);
270  new_node->processor_id() = pid;
271 
272  node1_ptr = mesh.add_node(std::move(new_node));
273  }
274 
275  Elem * elem = new Edge2;
276  elem->set_id(elem_id);
277  elem->processor_id() = pid;
278  elem->set_unique_id(elem_id);
279  elem = mesh.add_elem(elem);
280  elem->set_node(0, node0_ptr);
281  elem->set_node(1, node1_ptr);
282 
283  if (elem_id == 0)
284  boundary_info.add_side(elem, 0, 0);
285 
286  if (elem_id == nx - 1)
287  boundary_info.add_side(elem, 1, 1);
288 }
289 
290 template <>
291 void
292 DistributedRectilinearMeshGenerator::setBoundaryNames<Edge2>(BoundaryInfo & boundary_info)
293 {
294  boundary_info.sideset_name(0) = "left";
295  boundary_info.sideset_name(1) = "right";
296 }
297 
298 template <>
299 void
300 DistributedRectilinearMeshGenerator::scaleNodalPositions<Edge2>(dof_id_type /*nx*/,
301  dof_id_type /*ny*/,
302  dof_id_type /*nz*/,
303  Real xmin,
304  Real xmax,
305  Real /*ymin*/,
306  Real /*ymax*/,
307  Real /*zmin*/,
308  Real /*zmax*/,
309  MeshBase & mesh)
310 {
311  for (auto & node_ptr : mesh.node_ptr_range())
312  (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
313 }
314 
315 template <>
317 DistributedRectilinearMeshGenerator::numNeighbors<Quad4>(const dof_id_type nx,
318  const dof_id_type ny,
319  const dof_id_type /*nz*/,
320  const dof_id_type i,
321  const dof_id_type j,
322  const dof_id_type /*k*/)
323 {
324  dof_id_type n = 4;
325 
326  if (i == 0)
327  n--;
328 
329  if (i == nx - 1)
330  n--;
331 
332  if (j == 0)
333  n--;
334 
335  if (j == ny - 1)
336  n--;
337 
338  return n;
339 }
340 
341 template <>
343 DistributedRectilinearMeshGenerator::elemId<Quad4>(const dof_id_type nx,
344  const dof_id_type /*nx*/,
345  const dof_id_type i,
346  const dof_id_type j,
347  const dof_id_type /*k*/)
348 {
349  return (j * nx) + i;
350 }
351 
352 template <>
353 void
354 DistributedRectilinearMeshGenerator::getNeighbors<Quad4>(const dof_id_type nx,
355  const dof_id_type ny,
356  const dof_id_type /*nz*/,
357  const dof_id_type i,
358  const dof_id_type j,
359  const dof_id_type /*k*/,
360  std::vector<dof_id_type> & neighbors,
361  const bool corner)
362 {
363  std::fill(neighbors.begin(), neighbors.end(), Elem::invalid_id);
364 
365  if (corner)
366  {
367  // libMesh dof_id_type looks like unsigned int
368  // We add one layer of point neighbors by default. Besides,
369  // The elements on the opposite side of the current boundary are included
370  // for, in case, periodic boundary conditions. The overhead is negligible
371  // since you could consider every element has the same number of neighbors
372  unsigned int nnb = 0;
373  for (unsigned int ii = 0; ii <= 2; ii++)
374  for (unsigned int jj = 0; jj <= 2; jj++)
375  neighbors[nnb++] = elemId<Quad4>(nx, 0, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, 0);
376 
377  return;
378  }
379  // Bottom
380  if (j != 0)
381  neighbors[0] = elemId<Quad4>(nx, 0, i, j - 1, 0);
382 
383  // Right
384  if (i != nx - 1)
385  neighbors[1] = elemId<Quad4>(nx, 0, i + 1, j, 0);
386 
387  // Top
388  if (j != ny - 1)
389  neighbors[2] = elemId<Quad4>(nx, 0, i, j + 1, 0);
390 
391  // Left
392  if (i != 0)
393  neighbors[3] = elemId<Quad4>(nx, 0, i - 1, j, 0);
394 }
395 
396 template <>
397 void
398 DistributedRectilinearMeshGenerator::getIndices<Quad4>(const dof_id_type nx,
399  const dof_id_type /*ny*/,
400  const dof_id_type elem_id,
401  dof_id_type & i,
402  dof_id_type & j,
403  dof_id_type & /*k*/)
404 {
405  i = elem_id % nx;
406  j = (elem_id - i) / nx;
407 }
408 
409 template <>
410 void
411 DistributedRectilinearMeshGenerator::getGhostNeighbors<Quad4>(
412  const dof_id_type nx,
413  const dof_id_type ny,
414  const dof_id_type /*nz*/,
415  const MeshBase & mesh,
416  const std::set<dof_id_type> & current_elems,
417  std::set<dof_id_type> & ghost_elems)
418 {
419  dof_id_type i, j, k;
420 
421  std::vector<dof_id_type> neighbors(9);
422 
423  for (auto elem_id : current_elems)
424  {
425  getIndices<Quad4>(nx, 0, elem_id, i, j, k);
426 
427  getNeighbors<Quad4>(nx, ny, 0, i, j, 0, neighbors, true);
428 
429  for (auto neighbor : neighbors)
430  if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
431  ghost_elems.insert(neighbor);
432  }
433 }
434 
435 template <>
437 DistributedRectilinearMeshGenerator::nodeId<Quad4>(const ElemType /*type*/,
438  const dof_id_type nx,
439  const dof_id_type /*ny*/,
440  const dof_id_type i,
441  const dof_id_type j,
442  const dof_id_type /*k*/)
443 
444 {
445  return i + j * (nx + 1);
446 }
447 
448 template <>
449 void
450 DistributedRectilinearMeshGenerator::addElement<Quad4>(const dof_id_type nx,
451  const dof_id_type ny,
452  const dof_id_type /*nz*/,
453  const dof_id_type i,
454  const dof_id_type j,
455  const dof_id_type /*k*/,
456  const dof_id_type elem_id,
457  const processor_id_type pid,
458  const ElemType type,
459  MeshBase & mesh)
460 {
461  BoundaryInfo & boundary_info = mesh.get_boundary_info();
462 
463  // Bottom Left
464  const dof_id_type node0_id = nodeId<Quad4>(type, nx, 0, i, j, 0);
465  Node * node0_ptr = mesh.query_node_ptr(node0_id);
466  if (!node0_ptr)
467  {
468  std::unique_ptr<Node> new_node =
469  Node::build(Point(static_cast<Real>(i) / nx, static_cast<Real>(j) / ny, 0), node0_id);
470 
471  new_node->set_unique_id(nx * ny + node0_id);
472  new_node->processor_id() = pid;
473 
474  node0_ptr = mesh.add_node(std::move(new_node));
475  }
476 
477  // Bottom Right
478  const dof_id_type node1_id = nodeId<Quad4>(type, nx, 0, i + 1, j, 0);
479  Node * node1_ptr = mesh.query_node_ptr(node1_id);
480  if (!node1_ptr)
481  {
482  std::unique_ptr<Node> new_node =
483  Node::build(Point(static_cast<Real>(i + 1) / nx, static_cast<Real>(j) / ny, 0), node1_id);
484 
485  new_node->set_unique_id(nx * ny + node1_id);
486  new_node->processor_id() = pid;
487 
488  node1_ptr = mesh.add_node(std::move(new_node));
489  }
490 
491  // Top Right
492  const dof_id_type node2_id = nodeId<Quad4>(type, nx, 0, i + 1, j + 1, 0);
493  Node * node2_ptr = mesh.query_node_ptr(node2_id);
494  if (!node2_ptr)
495  {
496  std::unique_ptr<Node> new_node = Node::build(
497  Point(static_cast<Real>(i + 1) / nx, static_cast<Real>(j + 1) / ny, 0), node2_id);
498 
499  new_node->set_unique_id(nx * ny + node2_id);
500  new_node->processor_id() = pid;
501 
502  node2_ptr = mesh.add_node(std::move(new_node));
503  }
504 
505  // Top Left
506  const dof_id_type node3_id = nodeId<Quad4>(type, nx, 0, i, j + 1, 0);
507  Node * node3_ptr = mesh.query_node_ptr(node3_id);
508  if (!node3_ptr)
509  {
510  std::unique_ptr<Node> new_node =
511  Node::build(Point(static_cast<Real>(i) / nx, static_cast<Real>(j + 1) / ny, 0), node3_id);
512 
513  new_node->set_unique_id(nx * ny + node3_id);
514  new_node->processor_id() = pid;
515 
516  node3_ptr = mesh.add_node(std::move(new_node));
517  }
518 
519  Elem * elem = new Quad4;
520  elem->set_id(elem_id);
521  elem->processor_id() = pid;
522  elem->set_unique_id(elem_id);
523  elem = mesh.add_elem(elem);
524  elem->set_node(0, node0_ptr);
525  elem->set_node(1, node1_ptr);
526  elem->set_node(2, node2_ptr);
527  elem->set_node(3, node3_ptr);
528 
529  // Bottom
530  if (j == 0)
531  boundary_info.add_side(elem, 0, 0);
532 
533  // Right
534  if (i == nx - 1)
535  boundary_info.add_side(elem, 1, 1);
536 
537  // Top
538  if (j == ny - 1)
539  boundary_info.add_side(elem, 2, 2);
540 
541  // Left
542  if (i == 0)
543  boundary_info.add_side(elem, 3, 3);
544 }
545 
546 template <>
547 void
548 DistributedRectilinearMeshGenerator::setBoundaryNames<Quad4>(BoundaryInfo & boundary_info)
549 {
550  boundary_info.sideset_name(0) = "bottom";
551  boundary_info.sideset_name(1) = "right";
552  boundary_info.sideset_name(2) = "top";
553  boundary_info.sideset_name(3) = "left";
554 }
555 
556 template <>
557 void
558 DistributedRectilinearMeshGenerator::scaleNodalPositions<Quad4>(dof_id_type /*nx*/,
559  dof_id_type /*ny*/,
560  dof_id_type /*nz*/,
561  Real xmin,
562  Real xmax,
563  Real ymin,
564  Real ymax,
565  Real /*zmin*/,
566  Real /*zmax*/,
567  MeshBase & mesh)
568 {
569  for (auto & node_ptr : mesh.node_ptr_range())
570  {
571  (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
572  (*node_ptr)(1) = (*node_ptr)(1) * (ymax - ymin) + ymin;
573  }
574 }
575 
576 template <>
578 DistributedRectilinearMeshGenerator::elemId<Hex8>(const dof_id_type nx,
579  const dof_id_type ny,
580  const dof_id_type i,
581  const dof_id_type j,
582  const dof_id_type k)
583 {
584  return i + (j * nx) + (k * nx * ny);
585 }
586 
587 template <>
589 DistributedRectilinearMeshGenerator::numNeighbors<Hex8>(const dof_id_type nx,
590  const dof_id_type ny,
591  const dof_id_type nz,
592  const dof_id_type i,
593  const dof_id_type j,
594  const dof_id_type k)
595 {
596  dof_id_type n = 6;
597 
598  if (i == 0)
599  n--;
600 
601  if (i == nx - 1)
602  n--;
603 
604  if (j == 0)
605  n--;
606 
607  if (j == ny - 1)
608  n--;
609 
610  if (k == 0)
611  n--;
612 
613  if (k == nz - 1)
614  n--;
615 
616  return n;
617 }
618 
619 template <>
620 void
621 DistributedRectilinearMeshGenerator::getNeighbors<Hex8>(const dof_id_type nx,
622  const dof_id_type ny,
623  const dof_id_type nz,
624  const dof_id_type i,
625  const dof_id_type j,
626  const dof_id_type k,
627  std::vector<dof_id_type> & neighbors,
628  const bool corner)
629 {
630  std::fill(neighbors.begin(), neighbors.end(), Elem::invalid_id);
631 
632  if (corner)
633  {
634  // We collect one layer of point neighbors
635  // We add one layer of point neighbors by default. Besides,
636  // The elements on the opposite side of the current boundary are included
637  // for, in case, periodic boundary conditions. The overhead is negligible
638  // since you could consider every element has the same number of neighbors
639  unsigned int nnb = 0;
640  for (unsigned int ii = 0; ii <= 2; ii++)
641  for (unsigned int jj = 0; jj <= 2; jj++)
642  for (unsigned int kk = 0; kk <= 2; kk++)
643  neighbors[nnb++] = elemId<Hex8>(
644  nx, ny, (i + ii - 1 + nx) % nx, (j + jj - 1 + ny) % ny, (k + kk - 1 + nz) % nz);
645 
646  return;
647  }
648 
649  // Back
650  if (k != 0)
651  neighbors[0] = elemId<Hex8>(nx, ny, i, j, k - 1);
652 
653  // Bottom
654  if (j != 0)
655  neighbors[1] = elemId<Hex8>(nx, ny, i, j - 1, k);
656 
657  // Right
658  if (i != nx - 1)
659  neighbors[2] = elemId<Hex8>(nx, ny, i + 1, j, k);
660 
661  // Top
662  if (j != ny - 1)
663  neighbors[3] = elemId<Hex8>(nx, ny, i, j + 1, k);
664 
665  // Left
666  if (i != 0)
667  neighbors[4] = elemId<Hex8>(nx, ny, i - 1, j, k);
668 
669  // Front
670  if (k != nz - 1)
671  neighbors[5] = elemId<Hex8>(nx, ny, i, j, k + 1);
672 }
673 
674 template <>
676 DistributedRectilinearMeshGenerator::nodeId<Hex8>(const ElemType /*type*/,
677  const dof_id_type nx,
678  const dof_id_type ny,
679  const dof_id_type i,
680  const dof_id_type j,
681  const dof_id_type k)
682 {
683  return i + (nx + 1) * (j + k * (ny + 1));
684 }
685 
686 template <>
687 Node *
688 DistributedRectilinearMeshGenerator::addPoint<Hex8>(const dof_id_type nx,
689  const dof_id_type ny,
690  const dof_id_type nz,
691  const dof_id_type i,
692  const dof_id_type j,
693  const dof_id_type k,
694  const ElemType type,
695  MeshBase & mesh)
696 {
697  auto id = nodeId<Hex8>(type, nx, ny, i, j, k);
698  Node * node_ptr = mesh.query_node_ptr(id);
699  if (!node_ptr)
700  {
701  std::unique_ptr<Node> new_node = Node::build(
702  Point(static_cast<Real>(i) / nx, static_cast<Real>(j) / ny, static_cast<Real>(k) / nz), id);
703 
704  new_node->set_unique_id(nx * ny * nz + id);
705 
706  node_ptr = mesh.add_node(std::move(new_node));
707  }
708 
709  return node_ptr;
710 }
711 
712 template <>
713 void
714 DistributedRectilinearMeshGenerator::addElement<Hex8>(const dof_id_type nx,
715  const dof_id_type ny,
716  const dof_id_type nz,
717  const dof_id_type i,
718  const dof_id_type j,
719  const dof_id_type k,
720  const dof_id_type elem_id,
721  const processor_id_type pid,
722  const ElemType type,
723  MeshBase & mesh)
724 {
725  BoundaryInfo & boundary_info = mesh.get_boundary_info();
726 
727  // This ordering was picked to match the ordering in mesh_generation.C
728  auto node0_ptr = addPoint<Hex8>(nx, ny, nz, i, j, k, type, mesh);
729  node0_ptr->processor_id() = pid;
730  auto node1_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j, k, type, mesh);
731  node1_ptr->processor_id() = pid;
732  auto node2_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j + 1, k, type, mesh);
733  node2_ptr->processor_id() = pid;
734  auto node3_ptr = addPoint<Hex8>(nx, ny, nz, i, j + 1, k, type, mesh);
735  node3_ptr->processor_id() = pid;
736  auto node4_ptr = addPoint<Hex8>(nx, ny, nz, i, j, k + 1, type, mesh);
737  node4_ptr->processor_id() = pid;
738  auto node5_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j, k + 1, type, mesh);
739  node5_ptr->processor_id() = pid;
740  auto node6_ptr = addPoint<Hex8>(nx, ny, nz, i + 1, j + 1, k + 1, type, mesh);
741  node6_ptr->processor_id() = pid;
742  auto node7_ptr = addPoint<Hex8>(nx, ny, nz, i, j + 1, k + 1, type, mesh);
743  node7_ptr->processor_id() = pid;
744 
745  Elem * elem = new Hex8;
746  elem->set_id(elem_id);
747  elem->processor_id() = pid;
748  elem->set_unique_id(elem_id);
749  elem = mesh.add_elem(elem);
750  elem->set_node(0, node0_ptr);
751  elem->set_node(1, node1_ptr);
752  elem->set_node(2, node2_ptr);
753  elem->set_node(3, node3_ptr);
754  elem->set_node(4, node4_ptr);
755  elem->set_node(5, node5_ptr);
756  elem->set_node(6, node6_ptr);
757  elem->set_node(7, node7_ptr);
758 
759  if (k == 0)
760  boundary_info.add_side(elem, 0, 0);
761 
762  if (k == (nz - 1))
763  boundary_info.add_side(elem, 5, 5);
764 
765  if (j == 0)
766  boundary_info.add_side(elem, 1, 1);
767 
768  if (j == (ny - 1))
769  boundary_info.add_side(elem, 3, 3);
770 
771  if (i == 0)
772  boundary_info.add_side(elem, 4, 4);
773 
774  if (i == (nx - 1))
775  boundary_info.add_side(elem, 2, 2);
776 }
777 
778 template <>
779 void
780 DistributedRectilinearMeshGenerator::getIndices<Hex8>(const dof_id_type nx,
781  const dof_id_type ny,
782  const dof_id_type elem_id,
783  dof_id_type & i,
784  dof_id_type & j,
785  dof_id_type & k)
786 {
787  i = elem_id % nx;
788  j = (((elem_id - i) / nx) % ny);
789  k = ((elem_id - i) - (j * nx)) / (nx * ny);
790 }
791 
792 template <>
793 void
794 DistributedRectilinearMeshGenerator::getGhostNeighbors<Hex8>(
795  const dof_id_type nx,
796  const dof_id_type ny,
797  const dof_id_type nz,
798  const MeshBase & mesh,
799  const std::set<dof_id_type> & current_elems,
800  std::set<dof_id_type> & ghost_elems)
801 {
802  dof_id_type i, j, k;
803 
804  std::vector<dof_id_type> neighbors(27);
805 
806  for (auto elem_id : current_elems)
807  {
808  getIndices<Hex8>(nx, ny, elem_id, i, j, k);
809 
810  getNeighbors<Hex8>(nx, ny, nz, i, j, k, neighbors, true);
811 
812  for (auto neighbor : neighbors)
813  if (neighbor != Elem::invalid_id && !mesh.query_elem_ptr(neighbor))
814  ghost_elems.insert(neighbor);
815  }
816 }
817 
818 template <>
819 void
820 DistributedRectilinearMeshGenerator::setBoundaryNames<Hex8>(BoundaryInfo & boundary_info)
821 {
822  boundary_info.sideset_name(0) = "back";
823  boundary_info.sideset_name(1) = "bottom";
824  boundary_info.sideset_name(2) = "right";
825  boundary_info.sideset_name(3) = "top";
826  boundary_info.sideset_name(4) = "left";
827  boundary_info.sideset_name(5) = "front";
828 }
829 
830 template <>
831 void
832 DistributedRectilinearMeshGenerator::scaleNodalPositions<Hex8>(dof_id_type /*nx*/,
833  dof_id_type /*ny*/,
834  dof_id_type /*nz*/,
835  Real xmin,
836  Real xmax,
837  Real ymin,
838  Real ymax,
839  Real zmin,
840  Real zmax,
841  MeshBase & mesh)
842 {
843  for (auto & node_ptr : mesh.node_ptr_range())
844  {
845  (*node_ptr)(0) = (*node_ptr)(0) * (xmax - xmin) + xmin;
846  (*node_ptr)(1) = (*node_ptr)(1) * (ymax - ymin) + ymin;
847  (*node_ptr)(2) = (*node_ptr)(2) * (zmax - zmin) + zmin;
848  }
849 }
850 
851 template <>
852 void
853 DistributedRectilinearMeshGenerator::paritionSquarely<Edge2>(const dof_id_type nx,
854  const dof_id_type /*ny*/,
855  const dof_id_type /*nz*/,
856  const processor_id_type num_procs,
857  std::vector<dof_id_type> & istarts,
858  std::vector<dof_id_type> & jstarts,
859  std::vector<dof_id_type> & kstarts)
860 {
861  // Starting indices along x direction
862  istarts.resize(num_procs + 1);
863  // Starting indices along y direction
864  // There is only one processor along y direction since it is a 1D mesh
865  jstarts.resize(2);
866  jstarts[0] = 0;
867  jstarts[1] = 1;
868  // Starting indices along z direction
869  // There is only one processor along z direction since it is a 1D mesh
870  kstarts.resize(2);
871  kstarts[0] = 0;
872  kstarts[1] = 1;
873 
874  istarts[0] = 0;
875  for (processor_id_type pid = 0; pid < num_procs; pid++)
876  // Partition mesh evenly. The extra elements are assigned to the front processors
877  istarts[pid + 1] = istarts[pid] + (nx / num_procs + ((nx % num_procs) > pid));
878 }
879 
880 template <>
881 void
882 DistributedRectilinearMeshGenerator::paritionSquarely<Quad4>(const dof_id_type nx,
883  const dof_id_type ny,
884  const dof_id_type /*nz*/,
885  const processor_id_type num_procs,
886  std::vector<dof_id_type> & istarts,
887  std::vector<dof_id_type> & jstarts,
888  std::vector<dof_id_type> & kstarts)
889 {
890  // Try for squarish distribution
891  // The number of processors along x direction
892  processor_id_type px =
893  (processor_id_type)(0.5 + std::sqrt(((Real)nx) * ((Real)num_procs) / ((Real)ny)));
894 
895  if (!px)
896  px = 1;
897 
898  // The number of processors along y direction
899  processor_id_type py = 1;
900  // Fact num_procs into px times py
901  while (px > 0)
902  {
903  py = num_procs / px;
904  if (py * px == num_procs)
905  break;
906  px--;
907  }
908  // More processors are needed for denser side
909  if (nx > ny && py < px)
910  std::swap(px, py);
911 
912  // Starting indices along x direction
913  istarts.resize(px + 1);
914  // Starting indices along y direction
915  jstarts.resize(py + 1);
916  // Starting indices along z direction
917  kstarts.resize(2);
918  // There is no elements along z direction
919  kstarts[0] = 0;
920  kstarts[1] = 1;
921 
922  istarts[0] = 0;
923  for (processor_id_type pxid = 0; pxid < px; pxid++)
924  // Partition elements evenly along x direction
925  istarts[pxid + 1] = istarts[pxid] + nx / px + ((nx % px) > pxid);
926 
927  jstarts[0] = 0;
928  for (processor_id_type pyid = 0; pyid < py; pyid++)
929  // Partition elements evenly along y direction
930  jstarts[pyid + 1] = jstarts[pyid] + (ny / py + ((ny % py) > pyid));
931 }
932 
933 template <>
934 void
935 DistributedRectilinearMeshGenerator::paritionSquarely<Hex8>(const dof_id_type nx,
936  const dof_id_type ny,
937  const dof_id_type nz,
938  const processor_id_type num_procs,
939  std::vector<dof_id_type> & istarts,
940  std::vector<dof_id_type> & jstarts,
941  std::vector<dof_id_type> & kstarts)
942 {
943  /* Try for squarish distribution */
944  // The number of processors along y direction
945  processor_id_type py =
946  (processor_id_type)(0.5 + std::pow(((Real)ny * ny) * ((Real)num_procs) / ((Real)nz * nx),
947  (Real)(1. / 3.)));
948  if (!py)
949  py = 1;
950  // The number of processors for pxpz plane
951  processor_id_type pxpz = 1;
952  // Factorize num_procs into py times pxpz
953  while (py > 0)
954  {
955  pxpz = num_procs / py;
956  if (py * pxpz == num_procs)
957  break;
958  py--;
959  }
960 
961  if (!py)
962  py = 1;
963 
964  // There number of processors along x direction
965  processor_id_type px =
966  (processor_id_type)(0.5 + std::sqrt(((Real)nx) * ((Real)num_procs) / ((Real)nz * py)));
967  if (!px)
968  px = 1;
969  // The number of processors along z direction
970  processor_id_type pz = 1;
971  // Factorize num_procs into px times py times pz
972  while (px > 0)
973  {
974  pz = num_procs / (px * py);
975  if (px * py * pz == num_procs)
976  break;
977  px--;
978  }
979  // denser mesh takes more processors
980  if (nx > nz && px < pz)
981  std::swap(px, pz);
982 
983  istarts.resize(px + 1);
984  jstarts.resize(py + 1);
985  kstarts.resize(pz + 1);
986 
987  istarts[0] = 0;
988  for (processor_id_type pxid = 0; pxid < px; pxid++)
989  // Partition mesh evenly along x direction
990  istarts[pxid + 1] = istarts[pxid] + nx / px + ((nx % px) > pxid);
991 
992  jstarts[0] = 0;
993  for (processor_id_type pyid = 0; pyid < py; pyid++)
994  // Partition mesh evenly along y direction
995  jstarts[pyid + 1] = jstarts[pyid] + (ny / py + ((ny % py) > pyid));
996 
997  kstarts[0] = 0;
998  for (processor_id_type pzid = 0; pzid < pz; pzid++)
999  // Partition mesh evenly along z direction
1000  kstarts[pzid + 1] = kstarts[pzid] + (nz / pz + ((nz % pz) > pzid));
1001 }
1002 
1003 template <typename T>
1004 void
1006  const unsigned int nx,
1007  unsigned int ny,
1008  unsigned int nz,
1009  const Real xmin,
1010  const Real xmax,
1011  const Real ymin,
1012  const Real ymax,
1013  const Real zmin,
1014  const Real zmax,
1015  const ElemType type)
1016 {
1025 
1026  auto & comm = mesh.comm();
1027 
1028  dof_id_type num_elems = nx * ny * nz;
1029 
1030  const auto num_procs = comm.size();
1031  // Current processor ID
1032  const auto pid = comm.rank();
1033 
1034  if (_num_cores_for_partition > num_procs)
1035  mooseError("Number of cores for the graph partitioner is too large ", _num_cores_for_partition);
1036 
1038  _num_cores_for_partition = num_procs;
1039 
1040  auto & boundary_info = mesh.get_boundary_info();
1041 
1042  std::unique_ptr<Elem> canonical_elem = std::make_unique<T>();
1043 
1044  // Will get used to find the neighbors of an element
1045  std::vector<dof_id_type> neighbors(canonical_elem->n_neighbors());
1046  // Number of neighbors
1047  dof_id_type n_neighbors = canonical_elem->n_neighbors();
1048 
1049  // "Partition" the elements linearly across the processors
1050  dof_id_type num_local_elems;
1051  dof_id_type local_elems_begin;
1052  dof_id_type local_elems_end;
1053  if (pid < _num_cores_for_partition)
1056  pid,
1057  num_local_elems,
1058  local_elems_begin,
1059  local_elems_end);
1060  else
1061  {
1062  num_local_elems = 0;
1063  local_elems_begin = 0;
1064  local_elems_end = 0;
1065  }
1066 
1067  std::vector<std::vector<dof_id_type>> graph;
1068 
1069  // Fill in xadj and adjncy
1070  // xadj is the offset into adjncy
1071  // adjncy are the face neighbors of each element on this processor
1072  graph.resize(num_local_elems);
1073  // Build a distributed graph
1074  num_local_elems = 0;
1075  for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1076  {
1077  dof_id_type i, j, k = 0;
1078 
1079  getIndices<T>(nx, ny, e_id, i, j, k);
1080 
1081  getNeighbors<T>(nx, ny, nz, i, j, k, neighbors, false);
1082 
1083  std::vector<dof_id_type> & row = graph[num_local_elems++];
1084  row.reserve(n_neighbors);
1085 
1086  for (auto neighbor : neighbors)
1087  if (neighbor != Elem::invalid_id)
1088  row.push_back(neighbor);
1089  }
1090 
1091  // Partition the distributed graph
1092  std::vector<dof_id_type> partition_vec;
1093  if (_partition_method == "linear")
1094  {
1095  mooseWarning(" LinearPartitioner is mainly used for setting up regression tests. For the "
1096  "production run, please do not use it.");
1097  // The graph is already partitioned linearly via calling MooseUtils::linearPartitionItems
1098  partition_vec.resize(num_local_elems);
1099  // We use it as is
1100  std::fill(partition_vec.begin(), partition_vec.end(), pid);
1101  }
1102  else if (_partition_method == "square")
1103  {
1104  // Starting partition indices along x direction
1105  std::vector<dof_id_type> istarts;
1106  // Starting partition indices along y direction
1107  std::vector<dof_id_type> jstarts;
1108  // Starting partition indices along z direction
1109  std::vector<dof_id_type> kstarts;
1110  partition_vec.resize(num_local_elems);
1111 
1112  // Partition mesh evenly along each direction
1113  paritionSquarely<T>(nx, ny, nz, num_procs, istarts, jstarts, kstarts);
1114  // At least one processor
1115  mooseAssert(istarts.size() > 1, "At least there is one processor along x direction");
1116  processor_id_type px = istarts.size() - 1;
1117  // At least one processor
1118  mooseAssert(jstarts.size() > 1, "At least there is one processor along y direction");
1119  processor_id_type py = jstarts.size() - 1;
1120  // At least one processor
1121  mooseAssert(kstarts.size() > 1, "At least there is one processor along z direction");
1122  // processor_id_type pz = kstarts.size() -1;
1123  for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1124  {
1125  dof_id_type i = 0, j = 0, k = 0;
1126  getIndices<T>(nx, ny, e_id, i, j, k);
1127  processor_id_type pi = 0, pj = 0, pk = 0;
1128 
1129  pi = (std::upper_bound(istarts.begin(), istarts.end(), i) - istarts.begin()) - 1;
1130  pj = (std::upper_bound(jstarts.begin(), jstarts.end(), j) - jstarts.begin()) - 1;
1131  pk = (std::upper_bound(kstarts.begin(), kstarts.end(), k) - kstarts.begin()) - 1;
1132 
1133  partition_vec[e_id - local_elems_begin] = pk * px * py + pj * px + pi;
1134 
1135  mooseAssert((pk * px * py + pj * px + pi) < num_procs, "processor id is too large");
1136  }
1137  }
1138  else if (_partition_method == "graph")
1140  comm, graph, {}, {}, num_procs, _num_parts_per_compute_node, _part_package, partition_vec);
1141  else
1142  mooseError("Unsupported partition method " + _partition_method);
1143 
1144  mooseAssert(partition_vec.size() == num_local_elems, " Invalid partition was generateed ");
1145 
1146  // Use current elements to remote processors according to partition
1147  std::map<processor_id_type, std::vector<dof_id_type>> pushed_elements_vecs;
1148 
1149  for (dof_id_type e_id = local_elems_begin; e_id < local_elems_end; e_id++)
1150  pushed_elements_vecs[partition_vec[e_id - local_elems_begin]].push_back(e_id);
1151 
1152  // Collect new elements I should own
1153  std::vector<dof_id_type> my_new_elems;
1154 
1155  auto elements_action_functor =
1156  [&my_new_elems](processor_id_type /*pid*/, const std::vector<dof_id_type> & data)
1157  { std::copy(data.begin(), data.end(), std::back_inserter(my_new_elems)); };
1158 
1159  Parallel::push_parallel_vector_data(comm, pushed_elements_vecs, elements_action_functor);
1160 
1161  // Add the elements this processor owns
1162  for (auto e_id : my_new_elems)
1163  {
1164  dof_id_type i = 0, j = 0, k = 0;
1165 
1166  getIndices<T>(nx, ny, e_id, i, j, k);
1167 
1168  addElement<T>(nx, ny, nz, i, j, k, e_id, pid, type, mesh);
1169  }
1170 
1171  // Need to link up the local elements before we can know what's missing
1172  mesh.find_neighbors();
1173 
1174  // Get the ghosts (missing face neighbors)
1175  std::set<dof_id_type> ghost_elems;
1176  // Current local elements
1177  std::set<dof_id_type> current_elems;
1178 
1179  // Fill current elems
1180  // We will grow domain from current elements
1181  for (auto & elem_ptr : mesh.element_ptr_range())
1182  current_elems.insert(elem_ptr->id());
1183 
1184  // Grow domain layer by layer
1185  for (unsigned layer = 0; layer < _num_side_layers; layer++)
1186  {
1187  // getGhostNeighbors produces one layer of side neighbors
1188  getGhostNeighbors<T>(nx, ny, nz, mesh, current_elems, ghost_elems);
1189  // Merge ghost elements into current element list
1190  current_elems.insert(ghost_elems.begin(), ghost_elems.end());
1191  }
1192  // We do not need it anymore
1193  current_elems.clear();
1194 
1195  // Elements we're going to request from others
1196  std::map<processor_id_type, std::vector<dof_id_type>> ghost_elems_to_request;
1197 
1198  for (auto & ghost_id : ghost_elems)
1199  {
1200  // This is the processor ID the ghost_elem was originally assigned to
1201  auto proc_id = MooseUtils::linearPartitionChunk(num_elems, _num_cores_for_partition, ghost_id);
1202 
1203  // Using side-effect insertion on purpose
1204  ghost_elems_to_request[proc_id].push_back(ghost_id);
1205  }
1206 
1207  // Next set ghost object ids from other processors
1208  auto gather_functor =
1209  [local_elems_begin, partition_vec](processor_id_type /*pid*/,
1210  const std::vector<dof_id_type> & coming_ghost_elems,
1211  std::vector<dof_id_type> & pid_for_ghost_elems)
1212  {
1213  auto num_ghost_elems = coming_ghost_elems.size();
1214  pid_for_ghost_elems.resize(num_ghost_elems);
1215 
1216  dof_id_type num_local_elems = 0;
1217 
1218  for (auto elem : coming_ghost_elems)
1219  pid_for_ghost_elems[num_local_elems++] = partition_vec[elem - local_elems_begin];
1220  };
1221 
1222  std::unordered_map<dof_id_type, processor_id_type> ghost_elem_to_pid;
1223 
1224  auto action_functor =
1225  [&ghost_elem_to_pid](processor_id_type /*pid*/,
1226  const std::vector<dof_id_type> & my_ghost_elems,
1227  const std::vector<dof_id_type> & pid_for_my_ghost_elems)
1228  {
1229  dof_id_type num_local_elems = 0;
1230 
1231  for (auto elem : my_ghost_elems)
1232  ghost_elem_to_pid[elem] = pid_for_my_ghost_elems[num_local_elems++];
1233  };
1234 
1235  const dof_id_type * ex = nullptr;
1236  libMesh::Parallel::pull_parallel_vector_data(
1237  comm, ghost_elems_to_request, gather_functor, action_functor, ex);
1238 
1239  // Add the ghost elements to the mesh
1240  for (auto gtop : ghost_elem_to_pid)
1241  {
1242  auto ghost_id = gtop.first;
1243  auto proc_id = gtop.second;
1244 
1245  dof_id_type i = 0, j = 0, k = 0;
1246 
1247  getIndices<T>(nx, ny, ghost_id, i, j, k);
1248 
1249  addElement<T>(nx, ny, nz, i, j, k, ghost_id, proc_id, type, mesh);
1250  }
1251 
1252  mesh.find_neighbors(true);
1253 
1254  // Set RemoteElem neighbors
1255  for (auto & elem_ptr : mesh.element_ptr_range())
1256  for (unsigned int s = 0; s < elem_ptr->n_sides(); s++)
1257  if (!elem_ptr->neighbor_ptr(s) && !boundary_info.n_boundary_ids(elem_ptr, s))
1258  elem_ptr->set_neighbor(s, const_cast<RemoteElem *>(remote_elem));
1259 
1260  setBoundaryNames<T>(boundary_info);
1261 
1262  Partitioner::set_node_processor_ids(mesh);
1263 
1264  // Already partitioned!
1265  mesh.skip_partitioning(true);
1266 
1267  // Scale the nodal positions
1268  scaleNodalPositions<T>(nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax, mesh);
1269 }
1270 
1271 std::unique_ptr<MeshBase>
1273 {
1274  // We will set up boundaries accordingly. We do not want to call
1275  // ghostGhostedBoundaries in which allgather_packed_range is unscalable.
1276  // ghostGhostedBoundaries will gather all boundaries to every single processor
1278 
1279  // DistributedRectilinearMeshGenerator always generates a distributed mesh
1280  auto mesh = buildDistributedMesh();
1281 
1282  MooseEnum elem_type_enum = getParam<MooseEnum>("elem_type");
1283 
1284  if (!isParamValid("elem_type"))
1285  {
1286  // Switching on MooseEnum
1287  switch (_dim)
1288  {
1289  case 1:
1290  elem_type_enum = "EDGE2";
1291  break;
1292  case 2:
1293  elem_type_enum = "QUAD4";
1294  break;
1295  case 3:
1296  elem_type_enum = "HEX8";
1297  break;
1298  }
1299  }
1300 
1301  _elem_type = Utility::string_to_enum<ElemType>(elem_type_enum);
1302 
1303  mesh->set_mesh_dimension(_dim);
1304  mesh->set_spatial_dimension(_dim);
1305  // Let the mesh know that it's not serialized
1306  mesh->set_distributed();
1307 
1308  // Switching on MooseEnum
1309  switch (_dim)
1310  {
1311  // The build_XYZ mesh generation functions take an
1312  // UnstructuredMesh& as the first argument, hence the dynamic_cast.
1313  case 1:
1314  buildCube<Edge2>(
1315  dynamic_cast<UnstructuredMesh &>(*mesh), _nx, 1, 1, _xmin, _xmax, 0, 0, 0, 0, _elem_type);
1316  break;
1317  case 2:
1318  buildCube<Quad4>(dynamic_cast<UnstructuredMesh &>(*mesh),
1319  _nx,
1320  _ny,
1321  1,
1322  _xmin,
1323  _xmax,
1324  _ymin,
1325  _ymax,
1326  0,
1327  0,
1328  _elem_type);
1329  break;
1330  case 3:
1331  buildCube<Hex8>(dynamic_cast<UnstructuredMesh &>(*mesh),
1332  _nx,
1333  _ny,
1334  _nz,
1335  _xmin,
1336  _xmax,
1337  _ymin,
1338  _ymax,
1339  _zmin,
1340  _zmax,
1341  _elem_type);
1342  break;
1343  default:
1344  mooseError(
1345  getParam<MooseEnum>("elem_type"),
1346  " is not a currently supported element type for DistributedRectilinearMeshGenerator");
1347  }
1348 
1349  // Apply the bias if any exists
1350  if (_bias_x != 1.0 || _bias_y != 1.0 || _bias_z != 1.0)
1351  {
1352  // Biases
1353  Real bias[3] = {_bias_x, _bias_y, _bias_z};
1354 
1355  // "width" of the mesh in each direction
1356  Real width[3] = {_xmax - _xmin, _ymax - _ymin, _zmax - _zmin};
1357 
1358  // Min mesh extent in each direction.
1359  Real mins[3] = {_xmin, _ymin, _zmin};
1360 
1361  // Number of elements in each direction.
1362  dof_id_type nelem[3] = {_nx, _ny, _nz};
1363 
1364  // We will need the biases raised to integer powers in each
1365  // direction, so let's pre-compute those...
1366  std::vector<std::vector<Real>> pows(LIBMESH_DIM);
1367  for (dof_id_type dir = 0; dir < LIBMESH_DIM; ++dir)
1368  {
1369  pows[dir].resize(nelem[dir] + 1);
1370  for (dof_id_type i = 0; i < pows[dir].size(); ++i)
1371  pows[dir][i] = std::pow(bias[dir], static_cast<int>(i));
1372  }
1373 
1374  // Loop over the nodes and move them to the desired location
1375  for (auto & node_ptr : mesh->node_ptr_range())
1376  {
1377  Node & node = *node_ptr;
1378 
1379  for (dof_id_type dir = 0; dir < LIBMESH_DIM; ++dir)
1380  {
1381  if (width[dir] != 0. && bias[dir] != 1.)
1382  {
1383  // Compute the scaled "index" of the current point. This
1384  // will either be close to a whole integer or a whole
1385  // integer+0.5 for quadratic nodes.
1386  Real float_index = (node(dir) - mins[dir]) * nelem[dir] / width[dir];
1387 
1388  Real integer_part = 0;
1389  Real fractional_part = std::modf(float_index, &integer_part);
1390 
1391  // Figure out where to move the node...
1392  if (std::abs(fractional_part) < TOLERANCE || std::abs(fractional_part - 1.0) < TOLERANCE)
1393  {
1394  // If the fractional_part ~ 0.0 or 1.0, this is a vertex node, so
1395  // we don't need an average.
1396  //
1397  // Compute the "index" we are at in the current direction. We
1398  // round to the nearest integral value to get this instead
1399  // of using "integer_part", since that could be off by a
1400  // lot (e.g. we want 3.9999 to map to 4.0 instead of 3.0).
1401  int index = round(float_index);
1402 
1403  // Move node to biased location.
1404  node(dir) =
1405  mins[dir] + width[dir] * (1. - pows[dir][index]) / (1. - pows[dir][nelem[dir]]);
1406  }
1407  else if (std::abs(fractional_part - 0.5) < TOLERANCE)
1408  {
1409  // If the fractional_part ~ 0.5, this is a midedge/face
1410  // (i.e. quadratic) node. We don't move those with the same
1411  // bias as the vertices, instead we put them midway between
1412  // their respective vertices.
1413  //
1414  // Also, since the fractional part is nearly 0.5, we know that
1415  // the integer_part will be the index of the vertex to the
1416  // left, and integer_part+1 will be the index of the
1417  // vertex to the right.
1418  node(dir) = mins[dir] +
1419  width[dir] *
1420  (1. - 0.5 * (pows[dir][integer_part] + pows[dir][integer_part + 1])) /
1421  (1. - pows[dir][nelem[dir]]);
1422  }
1423  else
1424  {
1425  // We don't yet handle anything higher order than quadratic...
1426  mooseError("Unable to bias node at node(", dir, ")=", node(dir));
1427  }
1428  }
1429  }
1430  }
1431  }
1432 
1433  // MeshOutput<MT>::write_equation_systems will automatically renumber node and element IDs.
1434  // So we have to make that consistent at the first place. Otherwise, the moose cached data such as
1435  // _block_node_list will be inconsistent when we doing MooseMesh::getNodeBlockIds. That being
1436  // said, moose will pass new ids to getNodeBlockIds while the cached _block_node_list still use
1437  // the old node IDs. Yes, you could say: go ahead to do a mesh update, but I would say no. I do
1438  // not change mesh and there is no point to update anything.
1439  mesh->allow_renumbering(true);
1440  mesh->prepare_for_use();
1441 
1442  return dynamic_pointer_cast<MeshBase>(mesh);
1443 }
std::unique_ptr< DistributedMesh > buildDistributedMesh(unsigned int dim=libMesh::invalid_uint)
Build a distributed mesh that has correct remote element removal behavior and geometric ghosting func...
processor_id_type _num_parts_per_compute_node
Number of cores per compute node if hierarch partitioning is used.
MetaPhysicL::DualNumber< V, D, asd > abs(const MetaPhysicL::DualNumber< V, D, asd > &a)
Definition: EigenADReal.h:42
ElemType
void elem_types(const MeshBase &mesh, std::vector< ElemType > &et)
const std::string LIST_GEOM_ELEM
Definition: MooseMesh.h:58
std::vector< std::pair< R1, R2 > > get(const std::string &param1, const std::string &param2) const
Combine two vector parameters into a single vector of pairs.
registerMooseObject("MooseApp", DistributedRectilinearMeshGenerator)
void swap(std::vector< T > &data, const std::size_t idx0, const std::size_t idx1, const libMesh::Parallel::Communicator &comm)
Swap function for serial or distributed vector of data.
Definition: Shuffle.h:494
dof_id_type & _nx
Number of elements in x, y, z direction.
processor_id_type _num_cores_for_partition
Number of cores for partitioning the graph The number of cores for the graph partition can be differe...
T & set(const std::string &name, bool quiet_mode=false)
Returns a writable reference to the named parameters.
MeshBase & mesh
static InputParameters validParams()
processor_id_type rank() const
The main MOOSE class responsible for handling user-defined parameters in almost every MOOSE system...
static void partitionGraph(const Parallel::Communicator &comm, const std::vector< std::vector< dof_id_type >> &graph, const std::vector< dof_id_type > &elem_weights, const std::vector< dof_id_type > &side_weights, const dof_id_type num_parts, const dof_id_type num_parts_per_compute_node, const std::string &part_package, std::vector< dof_id_type > &partition)
const Parallel::Communicator & comm() const
std::unique_ptr< T_DEST, T_DELETER > dynamic_pointer_cast(std::unique_ptr< T_SRC, T_DELETER > &src)
These are reworked from https://stackoverflow.com/a/11003103.
ElemType _elem_type
The type of element to build.
void linearPartitionItems(dof_id_type num_items, dof_id_type num_chunks, dof_id_type chunk_id, dof_id_type &num_local_items, dof_id_type &local_items_begin, dof_id_type &local_items_end)
Linearly partition a number of items.
Definition: MooseUtils.C:1079
DistributedRectilinearMeshGenerator(const InputParameters &parameters)
void addRelationshipManager(const std::string &name, Moose::RelationshipManagerType rm_type, Moose::RelationshipManagerInputParameterCallback input_parameter_callback=nullptr)
Tells MOOSE about a RelationshipManager that this object needs.
This class works by first creating a "distributed dual graph" of the element connectivity based on a ...
void addRequiredParam(const std::string &name, const std::string &doc_string)
This method adds a parameter and documentation string to the InputParameters object that will be extr...
MooseMesh *const _mesh
Pointer to the owning mesh.
processor_id_type size() const
uint8_t processor_id_type
T round(T x)
Definition: MathUtils.h:77
const std::string & type() const
Get the type of this class.
Definition: MooseBase.h:89
This is a "smart" enum class intended to replace many of the shortcomings in the C++ enum type It sho...
Definition: MooseEnum.h:33
static InputParameters validParams()
Definition: MeshGenerator.C:23
Real & _xmin
The min/max values for x,y,z component.
void buildCube(UnstructuredMesh &, const unsigned int, unsigned int, unsigned int, const Real, const Real, const Real, const Real, const Real, const Real, const ElemType)
Build a cube mesh.
processor_id_type linearPartitionChunk(dof_id_type num_items, dof_id_type num_chunks, dof_id_type item_id)
Return the chunk_id that is assigned to handle item_id.
Definition: MooseUtils.C:1105
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
CTSub CT_OPERATOR_BINARY CTMul CTCompareLess CTCompareGreater CTCompareEqual _arg template * sqrt(_arg)) *_arg.template D< dtag >()) CT_SIMPLE_UNARY_FUNCTION(tanh
void mooseWarning(Args &&... args) const
Emits a warning prefixed with object name and type.
Definition: MooseBase.h:295
void mooseError(Args &&... args) const
Emits an error prefixed with object name and type and optionally a file path to the top-level block p...
Definition: MooseBase.h:267
void addClassDescription(const std::string &doc_string)
This method adds a description of the class that will be displayed in the input file syntax dump...
void needGhostGhostedBoundaries(bool needghost)
Whether or not we want to ghost ghosted boundaries.
Definition: MooseMesh.h:616
T & declareMeshProperty(const std::string &data_name, Args &&... args)
Methods for writing out attributes to the mesh meta-data store, which can be retrieved from most othe...
void addParam(const std::string &name, const S &value, const std::string &doc_string)
These methods add an optional parameter and a documentation string to the InputParameters object...
Real _bias_x
The amount by which to bias the cells in the x,y,z directions.
void addRangeCheckedParam(const std::string &name, const T &value, const std::string &parsed_function, const std::string &doc_string)
bool isParamValid(const std::string &name) const
Test if the supplied parameter is valid.
Definition: MooseBase.h:195
unsigned _num_side_layers
Number of element side neighbor layers While most of applications in moose require one layer of side ...
MooseUnits pow(const MooseUnits &, int)
Definition: Units.C:537
std::unique_ptr< MeshBase > generate() override
Generate / modify the mesh.
MeshGenerators are objects that can modify or add to an existing mesh.
Definition: MeshGenerator.h:32
uint8_t dof_id_type
const Real pi
std::string _partition_method
Which method is used to partition the mesh that is not built yet.
const RemoteElem * remote_elem