https://mooseframework.inl.gov
PetscExternalPartitioner.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
11 
12 #include "GeneratedMesh.h"
13 #include "MooseApp.h"
14 
15 #include "libmesh/mesh_tools.h"
16 #include "libmesh/linear_partitioner.h"
17 #include "libmesh/elem.h"
18 #include "libmesh/mesh_base.h"
19 #include "libmesh/petsc_solver_exception.h"
20 
21 using namespace libMesh;
22 
24 
25 #include <memory>
26 
29 {
31 
32  MooseEnum partPackage("parmetis ptscotch chaco party hierarch", "parmetis", false);
33 
34  params.addParam<MooseEnum>("part_package",
35  partPackage,
36  "The external package is used for partitioning the mesh via PETSc");
37 
39  "num_cores_per_compute_node",
40  1,
41  "Number of cores per compute node for hierarchical partitioning");
42 
43  params.addParam<bool>("apply_element_weight",
44  false,
45  "Indicate if we are going to apply element weights to partitioners");
46 
47  params.addParam<bool>(
48  "apply_side_weight", false, "Indicate if we are going to apply side weights to partitioners");
49 
50  params.addClassDescription(
51  "Partition mesh using external packages via PETSc MatPartitioning interface");
52 
53  return params;
54 }
55 
57  : MoosePartitioner(params),
58  _part_package(params.get<MooseEnum>("part_package")),
59  _apply_element_weight(params.get<bool>("apply_element_weight")),
60  _apply_side_weight(params.get<bool>("apply_side_weight")),
61  _num_parts_per_compute_node(params.get<processor_id_type>("num_cores_per_compute_node"))
62 {
64  (_part_package == "chaco" || _part_package == "party"))
65  mooseError(_part_package, " does not support weighted graph");
66 }
67 
68 std::unique_ptr<Partitioner>
70 {
71  return _app.getFactory().clone(*this);
72 }
73 
74 void
76 {
77  // Temporarily cache the old partition method
78  auto old_partitioner = std::move(mesh.partitioner());
79  // Create a linear partitioner
80  mesh.partitioner() = std::make_unique<LinearPartitioner>();
81  // Partition mesh
83  // Restore the old partition
84  mesh.partitioner() = std::move(old_partitioner);
85 }
86 
87 void
88 PetscExternalPartitioner::partition(MeshBase & mesh, const unsigned int n_parts)
89 {
90  // We want to use a parallel partitioner that requires a distributed graph
91  // Simply calling a linear partitioner provides us the distributed graph
92  // We should not do anything when using a distributed mesh since the mesh itself
93  // is already distributed
94  // When n_parts=1, we do not need to run any partitioner, instead, let libmesh
95  // handle this
96  if (mesh.is_replicated() && n_parts > 1)
98 
99  if (!isParamSetByUser("part_package") && (mesh.n_elem() / n_parts < 28) &&
100  _part_package == "parmetis")
101  {
102  Moose::out
103  << "Average number of elements per partition (" << mesh.n_elem() / n_parts
104  << ") is less than 28. We are switching from ParMETIS to PTScotch for the partitioning."
105  << std::endl;
106  _part_package = "ptscotch";
107  }
108 
109  Partitioner::partition(mesh, n_parts);
110 }
111 
112 void
113 PetscExternalPartitioner::_do_partition(MeshBase & mesh, const unsigned int n_parts)
114 {
115  initialize(mesh);
116 
117  dof_id_type num_edges, num_local_elems, local_elem_id, nj, side;
118  std::vector<dof_id_type> side_weights;
119  std::vector<dof_id_type> elem_weights;
120 
121  // Call libmesh to build the dual graph of mesh
122  build_graph(mesh);
123  num_local_elems = _dual_graph.size();
124 
125  elem_weights.clear();
127  elem_weights.resize(num_local_elems);
128 
129  num_edges = 0;
130  // compute element weight
131  for (dof_id_type k = 0; k < num_local_elems; k++)
132  {
133  num_edges += _dual_graph[k].size();
135  {
136  // Get the original element
137  mooseAssert(k < static_cast<dof_id_type>(_local_id_to_elem.size()),
138  "Local element id " << k << " is not smaller than " << _local_id_to_elem.size());
139  auto elem = _local_id_to_elem[k];
140 
141  elem_weights[k] = computeElementWeight(*elem);
142  }
143  }
144 
145  side_weights.clear();
146  // Edge weights represent the communication
147  if (_apply_side_weight)
148  side_weights.resize(num_edges);
149 
150  local_elem_id = 0;
151  nj = 0;
152  for (auto & row : _dual_graph)
153  {
154  mooseAssert(local_elem_id < static_cast<dof_id_type>(_local_id_to_elem.size()),
155  "Local element id " << local_elem_id << " is not smaller than "
156  << _local_id_to_elem.size());
157  auto elem = _local_id_to_elem[local_elem_id];
158  unsigned int n_neighbors = 0;
159 
160  side = 0;
161  for (auto neighbor : elem->neighbor_ptr_range())
162  {
163  // Skip boundary sides since they do not connect to
164  // anything.
165  if (neighbor != nullptr && neighbor->active())
166  {
167  if (_apply_side_weight)
168  side_weights[nj] = computeSideWeight(*elem, side);
169 
170  nj++;
171  n_neighbors++;
172  }
173 
174  side++;
175  }
176  if (n_neighbors != row.size())
177  mooseError(
178  "Cannot construct dual graph correctly since the number of neighbors is inconsistent");
179 
180  local_elem_id++;
181  }
182 
183  std::vector<dof_id_type> partition;
184  // Partition graph
186  _dual_graph,
187  elem_weights,
188  side_weights,
189  n_parts,
192  partition);
193  // Assign partition to mesh
195 }
196 
197 void
199  const std::vector<std::vector<dof_id_type>> & graph,
200  const std::vector<dof_id_type> & elem_weights,
201  const std::vector<dof_id_type> & side_weights,
202  const dof_id_type num_parts,
203  const dof_id_type num_parts_per_compute_node,
204  const std::string & part_package,
205  std::vector<dof_id_type> & partition)
206 {
207  Mat dual;
208  PetscInt num_local_elems, num_elems, *xadj = nullptr, *adjncy = nullptr, i, *values = nullptr,
209  *petsc_elem_weights = nullptr;
210  const PetscInt * parts;
211  MatPartitioning part;
212  IS is;
213 
214  // Number of local elements
215  num_elems = num_local_elems = graph.size();
216  // Figure out the total of elements
217  comm.sum(num_elems);
218 
219  LibmeshPetscCallA(comm.get(), PetscCalloc1(num_local_elems + 1, &xadj));
220 
221  num_local_elems = 0;
222  xadj[0] = 0;
223  for (auto & row : graph)
224  {
225  num_local_elems++;
226  xadj[num_local_elems] = xadj[num_local_elems - 1] + row.size();
227  }
228 
229  LibmeshPetscCallA(comm.get(), PetscCalloc1(xadj[num_local_elems], &adjncy));
230 
231  // Fill up adjacency
232  i = 0;
233  for (auto & row : graph)
234  for (auto elem : row)
235  adjncy[i++] = elem;
236 
237  // If there are no neighbors at all, no side weights should be proivded
238  if (!i)
239  {
240  mooseAssert(!side_weights.size(),
241  "No side weights should be provided since there are no neighbors at all");
242  }
243 
244  // Copy over weights
245  if (side_weights.size())
246  {
247  mooseAssert((PetscInt)side_weights.size() == i,
248  "Side weight size " << side_weights.size()
249  << " does not match with adjacency matrix size " << i);
250  LibmeshPetscCallA(comm.get(), PetscCalloc1(side_weights.size(), &values));
251  i = 0;
252  for (auto weight : side_weights)
253  values[i++] = weight;
254  }
255 
256  LibmeshPetscCallA(
257  comm.get(),
258  MatCreateMPIAdj(comm.get(), num_local_elems, num_elems, xadj, adjncy, values, &dual));
259 
260  LibmeshPetscCallA(comm.get(), MatPartitioningCreate(comm.get(), &part));
261 #if !PETSC_VERSION_LESS_THAN(3, 12, 3)
262  LibmeshPetscCallA(comm.get(), MatPartitioningSetUseEdgeWeights(part, PETSC_TRUE));
263 #endif
264  LibmeshPetscCallA(comm.get(), MatPartitioningSetAdjacency(part, dual));
265 
266  if (!num_local_elems)
267  {
268  mooseAssert(!elem_weights.size(),
269  "No element weights should be provided since there are no elements at all");
270  }
271 
272  // Handle element weights
273  if (elem_weights.size())
274  {
275  mooseAssert((PetscInt)elem_weights.size() == num_local_elems,
276  "Element weight size " << elem_weights.size()
277  << " does not match with the number of local elements"
278  << num_local_elems);
279 
280  LibmeshPetscCallA(comm.get(), PetscCalloc1(elem_weights.size(), &petsc_elem_weights));
281  i = 0;
282  for (auto weight : elem_weights)
283  petsc_elem_weights[i++] = weight;
284 
285  LibmeshPetscCallA(comm.get(), MatPartitioningSetVertexWeights(part, petsc_elem_weights));
286  }
287 
288  LibmeshPetscCallA(comm.get(), MatPartitioningSetNParts(part, num_parts));
289 #if PETSC_VERSION_LESS_THAN(3, 9, 2)
290  mooseAssert(part_package != "party", "PETSc-3.9.3 or higher is required for using party");
291 #endif
292 #if PETSC_VERSION_LESS_THAN(3, 9, 0)
293  mooseAssert(part_package != "chaco", "PETSc-3.9.0 or higher is required for using chaco");
294 #endif
295  LibmeshPetscCallA(comm.get(), MatPartitioningSetType(part, part_package.c_str()));
296  if (part_package == "hierarch")
297  LibmeshPetscCallA(comm.get(),
298  MatPartitioningHierarchicalSetNfineparts(part, num_parts_per_compute_node));
299 
300  LibmeshPetscCallA(comm.get(), MatPartitioningSetFromOptions(part));
301  LibmeshPetscCallA(comm.get(), MatPartitioningApply(part, &is));
302 
303  LibmeshPetscCallA(comm.get(), ISGetIndices(is, &parts));
304 
305  partition.resize(num_local_elems);
306  for (i = 0; i < num_local_elems; i++)
307  partition[i] = parts[i];
308 
309  LibmeshPetscCallA(comm.get(), ISRestoreIndices(is, &parts));
310  LibmeshPetscCallA(comm.get(), MatPartitioningDestroy(&part));
311  LibmeshPetscCallA(comm.get(), MatDestroy(&dual));
312  LibmeshPetscCallA(comm.get(), ISDestroy(&is));
313 }
314 
317 {
318  return 1;
319 }
320 
322 PetscExternalPartitioner::computeSideWeight(Elem & /*elem*/, unsigned int /*side*/)
323 {
324  return 1;
325 }
static InputParameters validParams()
T * get(const std::unique_ptr< T > &u)
The MooseUtils::get() specializations are used to support making forwards-compatible code changes fro...
Definition: MooseUtils.h:1155
MeshBase & mesh
static InputParameters validParams()
virtual std::unique_ptr< Partitioner > & partitioner()
The main MOOSE class responsible for handling user-defined parameters in almost every MOOSE system...
static void partitionGraph(const Parallel::Communicator &comm, const std::vector< std::vector< dof_id_type >> &graph, const std::vector< dof_id_type > &elem_weights, const std::vector< dof_id_type > &side_weights, const dof_id_type num_parts, const dof_id_type num_parts_per_compute_node, const std::string &part_package, std::vector< dof_id_type > &partition)
const Parallel::Communicator & comm() const
void assign_partitioning(MeshBase &mesh, const std::vector< dof_id_type > &parts)
The following methods are specializations for using the libMesh::Parallel::packed_range_* routines fo...
virtual void build_graph(const MeshBase &mesh)
std::vector< Elem *> _local_id_to_elem
Factory & getFactory()
Retrieve a writable reference to the Factory associated with this App.
Definition: MooseApp.h:424
virtual void partition(MeshBase &mesh, const unsigned int n) override
virtual void partition(const unsigned int n_parts)
processor_id_type _num_parts_per_compute_node
uint8_t processor_id_type
processor_id_type n_processors() const
virtual dof_id_type computeElementWeight(Elem &elm)
dof_id_type weight(const MeshBase &mesh, const processor_id_type pid)
virtual dof_id_type computeSideWeight(Elem &elem, unsigned int side)
registerMooseObject("MooseApp", PetscExternalPartitioner)
This is a "smart" enum class intended to replace many of the shortcomings in the C++ enum type It sho...
Definition: MooseEnum.h:33
PetscErrorCode PetscInt const PetscInt IS * is
MooseApp & _app
The MOOSE application this is associated with.
Definition: MooseBase.h:84
void preLinearPartition(MeshBase &mesh)
bool isParamSetByUser(const std::string &nm) const
Test if the supplied parameter is set by a user, as opposed to not set or set to default.
Base class for MOOSE partitioner.
virtual bool is_replicated() const
void mooseError(Args &&... args) const
Emits an error prefixed with object name and type.
void addClassDescription(const std::string &doc_string)
This method adds a description of the class that will be displayed in the input file syntax dump...
PetscExternalPartitioner(const InputParameters &params)
virtual void initialize(MeshBase &)
Called immediately before partitioning.
void addParam(const std::string &name, const S &value, const std::string &doc_string)
These methods add an optional parameter and a documentation string to the InputParameters object...
Partitions a mesh using external petsc partitioners such as parmetis, ptscotch, chaco, party, etc.
virtual dof_id_type n_elem() const=0
std::vector< std::vector< dof_id_type > > _dual_graph
virtual void _do_partition(MeshBase &mesh, const unsigned int n) override
uint8_t dof_id_type
virtual std::unique_ptr< Partitioner > clone() const override