libMesh
petsc_dm_wrapper.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 #include "libmesh/libmesh_common.h"
19 #include "libmesh/petsc_macro.h"
20 
21 #ifdef LIBMESH_HAVE_PETSC
22 #if !PETSC_VERSION_LESS_THAN(3,7,3)
23 #if defined(LIBMESH_ENABLE_AMR) && defined(LIBMESH_HAVE_METAPHYSICL)
24 
25 #include "libmesh/ignore_warnings.h"
26 #include <petscsf.h>
27 #if PETSC_VERSION_LESS_THAN(3,12,0)
28 #include <petsc/private/dmimpl.h>
29 #endif
30 #include <petscdmshell.h>
31 #include "libmesh/restore_warnings.h"
32 
33 #include "libmesh/petsc_dm_wrapper.h"
34 #include "libmesh/libmesh_logging.h"
35 #include "libmesh/system.h"
36 #include "libmesh/mesh.h"
37 #include "libmesh/mesh_base.h"
38 #include "libmesh/mesh_refinement.h"
39 #include "libmesh/mesh_tools.h"
40 #include "libmesh/partitioner.h"
41 #include "libmesh/dof_map.h"
42 #include "libmesh/elem.h"
43 #include "libmesh/petsc_matrix.h"
44 
45 namespace libMesh
46 {
47 
48  //--------------------------------------------------------------------
49  // Functions with C linkage to pass to PETSc. PETSc will call these
50  // methods as needed.
51  //
52  // Since they must have C linkage they have no knowledge of a namespace.
53  // We give them an obscure name to avoid namespace pollution.
54  //--------------------------------------------------------------------
55  extern "C"
56  {
57 
59 #if PETSC_VERSION_LESS_THAN(3,9,0)
60  PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm, PetscInt numFields, PetscInt fields[], IS *is, DM *subdm)
61 #else
62  PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm, PetscInt numFields, const PetscInt fields[], IS *is, DM *subdm)
63 #endif
64  {
65  PetscFunctionBegin;
66 
67  // Basically, we copy the PETSc ShellCreateSubDM implementation,
68  // but also need to set the embedding dim and also propagate
69  // the relevant function pointers to the subDM for GMG purposes.
70  // Since this is called by PETSc we gotta pull some of this info
71  // from the context in the DM.
72 
73  // First, retrieve our context
74  void * ctx = nullptr;
75  LibmeshPetscCallQ(DMShellGetContext(dm, & ctx));
77  PetscDMContext * p_ctx = static_cast<PetscDMContext * >(ctx);
78 
79  if (subdm)
80  {
81  LibmeshPetscCallQ(DMShellCreate(PetscObjectComm((PetscObject) dm), subdm));
82 
83  // Set the DM embedding dimension to help PetscDS (Discrete System)
84  LibmeshPetscCallQ(DMSetCoordinateDim(*subdm, p_ctx->mesh_dim));
85 
86  // Now set the function pointers for the subDM
87  // Some DMShellGet* functions only exist with PETSc >= 3.12.0.
88 
89  // Set Coarsen function pointer
90 #if PETSC_VERSION_LESS_THAN(3,12,0)
91  if (dm->ops->coarsen)
92  LibmeshPetscCallQ(DMShellSetCoarsen(*subdm, dm->ops->coarsen));
93 #else
94  PetscErrorCode (*coarsen)(DM,MPI_Comm,DM*) = nullptr;
95  LibmeshPetscCallQ(DMShellGetCoarsen(dm, &coarsen));
96  if (coarsen)
97  LibmeshPetscCallQ(DMShellSetCoarsen(*subdm, coarsen));
98 #endif
99 
100  // Set Refine function pointer
101 #if PETSC_VERSION_LESS_THAN(3,12,0)
102  if (dm->ops->refine)
103  LibmeshPetscCallQ(DMShellSetRefine(*subdm, dm->ops->refine));
104 #else
105  PetscErrorCode (*refine)(DM,MPI_Comm,DM*) = nullptr;
106  LibmeshPetscCallQ(DMShellGetRefine(dm, &refine));
107 
108  if (refine)
109  LibmeshPetscCallQ(DMShellSetRefine(*subdm, refine));
110 #endif
111 
112  // Set Interpolation function pointer
113 #if PETSC_VERSION_LESS_THAN(3,12,0)
114  if (dm->ops->createinterpolation)
115  LibmeshPetscCallQ(DMShellSetCreateInterpolation(*subdm, dm->ops->createinterpolation));
116 #else
117  PetscErrorCode (*interp)(DM,DM,Mat*,Vec*) = nullptr;
118  LibmeshPetscCallQ(DMShellGetCreateInterpolation(dm, &interp));
119  if (interp)
120  LibmeshPetscCallQ(DMShellSetCreateInterpolation(*subdm, interp));
121 #endif
122 
123  // Set Restriction function pointer
124 #if PETSC_VERSION_LESS_THAN(3,12,0)
125  if (dm->ops->createrestriction)
126  LibmeshPetscCallQ(DMShellSetCreateRestriction(*subdm, dm->ops->createrestriction));
127 #else
128  PetscErrorCode (*createrestriction)(DM,DM,Mat*) = nullptr;
129  LibmeshPetscCallQ(DMShellGetCreateRestriction(dm, &createrestriction));
130  if (createrestriction)
131  LibmeshPetscCallQ(DMShellSetCreateRestriction(*subdm, createrestriction));
132 #endif
133 
134  // Set CreateSubDM function pointer
135 #if PETSC_VERSION_LESS_THAN(3,12,0)
136  if (dm->ops->createsubdm)
137  LibmeshPetscCallQ(DMShellSetCreateSubDM(*subdm, dm->ops->createsubdm));
138 #else
139  PetscErrorCode (*createsubdm)(DM,PetscInt,const PetscInt[],IS*,DM*) = nullptr;
140  LibmeshPetscCallQ(DMShellGetCreateSubDM(dm, &createsubdm));
141  if (createsubdm)
142  LibmeshPetscCallQ(DMShellSetCreateSubDM(*subdm, createsubdm));
143 #endif
144  // Set Context pointer
145  if (ctx)
146  LibmeshPetscCallQ(DMShellSetContext(*subdm, ctx));
147 #if PETSC_VERSION_LESS_THAN(3,11,0)
148  // Lastly, Compute the subsection for the subDM
149  LibmeshPetscCallQ(DMCreateSubDM_Section_Private(dm, numFields, fields, is, subdm));
150 #elif PETSC_RELEASE_LESS_THAN(3, 21, 0)
151  LibmeshPetscCallQ(DMCreateSectionSubDM(dm, numFields, fields, is, subdm));
152 #else
153  LibmeshPetscCallQ(DMCreateSectionSubDM(dm, numFields, fields, NULL, NULL, is, subdm));
154 #endif
155  }
156 
157  PetscFunctionReturn(LIBMESH_PETSC_SUCCESS);
158  }
159 
161  PetscErrorCode libmesh_petsc_DMRefine(DM dmc, MPI_Comm /*comm*/, DM * dmf)
162  {
163  PetscFunctionBegin;
164 
165  libmesh_assert(dmc);
166  libmesh_assert(dmf);
167 
168  // extract our context from the incoming dmc
169  void * ctx_c = nullptr;
170  LibmeshPetscCallQ(DMShellGetContext(dmc, & ctx_c));
171  libmesh_assert(ctx_c);
172  PetscDMContext * p_ctx = static_cast<PetscDMContext * >(ctx_c);
173 
174  // check / set the finer DM
177  *(dmf) = *(p_ctx->finer_dm);
178 
179  PetscFunctionReturn(LIBMESH_PETSC_SUCCESS);
180  }
181 
183  PetscErrorCode libmesh_petsc_DMCoarsen(DM dmf, MPI_Comm /*comm*/, DM * dmc)
184  {
185  PetscFunctionBegin;
186 
187  libmesh_assert(dmc);
188  libmesh_assert(dmf);
189 
190  // Extract our context from the incoming dmf
191  void * ctx_f = nullptr;
192  LibmeshPetscCallQ(DMShellGetContext(dmf, &ctx_f));
193  libmesh_assert(ctx_f);
194  PetscDMContext * p_ctx_f = static_cast<PetscDMContext*>(ctx_f);
195 
196  // First, ensure that there exists a coarse DM that we want to
197  // set. There ought to be as we created it while walking the
198  // hierarchy.
199  libmesh_assert(p_ctx_f->coarser_dm);
200  libmesh_assert(*(p_ctx_f->coarser_dm));
201 
202  // In situations using fieldsplit we need to provide a coarser
203  // DM which only has the relevant subfields in it. Since we
204  // create global DMs for each mesh level, we need to also create
205  // the subDMs. We do this by checking the number of fields. When
206  // less than all the fields are used, we need to create the
207  // proper subDMs. We get the number of fields and their names
208  // from the incoming fine DM and the global reference DM
209  PetscInt nfieldsf, nfieldsg;
210  char ** fieldnamesf;
211  char ** fieldnamesg;
212 
213  libmesh_assert(p_ctx_f->global_dm);
214  DM * globaldm = p_ctx_f->global_dm;
215  LibmeshPetscCallQ(DMCreateFieldIS(dmf, &nfieldsf, &fieldnamesf, nullptr));
216  LibmeshPetscCallQ(DMCreateFieldIS(*globaldm, &nfieldsg, &fieldnamesg, nullptr));
217 
218  // If the probed number of fields is less than the number of
219  // global fields, this amounts to PETSc 'indicating' to us we
220  // are doing FS. So, we must create subDMs for the coarser
221  // DMs.
222  if ( nfieldsf < nfieldsg )
223  {
224  p_ctx_f->subfields.clear();
225  p_ctx_f->subfields.resize(nfieldsf);
226 
227  // To select the subDM fields we match fine grid DM field
228  // names to their global DM counterparts. Since PETSc can
229  // internally reassign field numbering under a fieldsplit,
230  // we must extract subsections via the field names. This is
231  // admittedly gross, but c'est la vie.
232  for (int i = 0; i < nfieldsf ; i++)
233  {
234  for (int j = 0; j < nfieldsg ;j++)
235  if ( strcmp( fieldnamesg[j], fieldnamesf[i] ) == 0 )
236  p_ctx_f->subfields[i] = j;
237  }
238 
239  // Next, for the found fields we create a subDM
240  DM subdm;
242  p_ctx_f->subfields.data(), nullptr, &subdm));
243 
244  // Extract our coarse context from the created subDM so we
245  // can set its subfields for use in createInterp.
246  void * ctx_c = nullptr;
247  LibmeshPetscCallQ(DMShellGetContext(subdm, &ctx_c));
248  libmesh_assert(ctx_c);
249  PetscDMContext * p_ctx_c = static_cast<PetscDMContext*>(ctx_c);
250 
251  // propagate subfield info to subDM
252  p_ctx_c->subfields = p_ctx_f->subfields;
253 
254  // return created subDM to PETSc
255  *(dmc) = subdm;
256  }
257  else {
258  // No fieldsplit was requested so set the coarser DM to the
259  // global coarser DM.
260  *(dmc) = *(p_ctx_f->coarser_dm);
261  }
262 
263  PetscFunctionReturn(LIBMESH_PETSC_SUCCESS);
264  }
265 
267  PetscErrorCode
268  libmesh_petsc_DMCreateInterpolation (DM dmc /*coarse*/, DM dmf /*fine*/,
269  Mat * mat ,Vec * vec)
270  {
271  PetscFunctionBegin;
272 
273  libmesh_assert(dmc);
274  libmesh_assert(dmf);
275  libmesh_assert(mat);
276  libmesh_assert(vec); // Optional scaling (not needed for mg)
277 
278  // Get a communicator from incoming DM
279  MPI_Comm comm;
280  LibmeshPetscCallQ(PetscObjectGetComm((PetscObject)dmc, &comm));
281 
282  // Extract our coarse context from the incoming DM
283  void * ctx_c = nullptr;
284  LibmeshPetscCallQ(DMShellGetContext(dmc, &ctx_c));
285  libmesh_assert(ctx_c);
286  PetscDMContext * p_ctx_c = static_cast<PetscDMContext*>(ctx_c);
287 
288  // Extract our fine context from the incoming DM
289  void * ctx_f = nullptr;
290  LibmeshPetscCallQ(DMShellGetContext(dmf, &ctx_f));
291  libmesh_assert(ctx_f);
292  PetscDMContext * p_ctx_f = static_cast<PetscDMContext*>(ctx_f);
293 
294  // Check for existing global projection matrix
295  libmesh_assert(p_ctx_c->K_interp_ptr);
296 
297  // If were doing fieldsplit we need to construct sub projection
298  // matrices. We compare the passed in number of DMs fields to a
299  // global DM in order to determine if a subprojection is needed.
300  PetscInt nfieldsf, nfieldsg;
301 
302  libmesh_assert(p_ctx_c->global_dm);
303  DM * globaldm = p_ctx_c->global_dm;
304 
305  LibmeshPetscCallQ(DMCreateFieldIS(dmf, &nfieldsf, nullptr, nullptr));
306  LibmeshPetscCallQ(DMCreateFieldIS(*globaldm, &nfieldsg, nullptr, nullptr));
307 
308  // If the probed number of fields is less than the number of
309  // global fields, this amounts to PETSc 'indicating' to us we
310  // are doing FS.
311  if ( nfieldsf < nfieldsg)
312  {
313  // Loop over the fields and merge their index sets.
314  std::vector<std::vector<numeric_index_type>> allrows,allcols;
315  std::vector<numeric_index_type> rows,cols;
316  allrows = p_ctx_f->dof_vec;
317  allcols = p_ctx_c->dof_vec;
318 
319  // For internal libmesh submat extraction need to merge all
320  // field dofs and then sort the vectors so that they match
321  // the Projection Matrix ordering
322  const int n_subfields = p_ctx_f->subfields.size();
323  if ( n_subfields >= 1 )
324  {
325  for (int i : p_ctx_f->subfields)
326  {
327  rows.insert(rows.end(), allrows[i].begin(), allrows[i].end());
328  cols.insert(cols.end(), allcols[i].begin(), allcols[i].end());
329  }
330  std::sort(rows.begin(),rows.end());
331  std::sort(cols.begin(),cols.end());
332  }
333 
334  // Now that we have merged the fine and coarse index sets
335  // were ready to make the submatrix and pass it off to PETSc
336  p_ctx_c->K_interp_ptr->create_submatrix (*p_ctx_c->K_sub_interp_ptr, rows, cols);
337 
338  // return to PETSc the created submatrix
339  *(mat) = p_ctx_c->K_sub_interp_ptr->mat();
340 
341  } // endif less incoming DM fields than global DM fields
342  else
343  {
344  // We are not doing fieldsplit, so return global projection
345  *(mat) = p_ctx_c->K_interp_ptr->mat();
346  }
347 
348  // Vec scaling isnt needed so were done.
349  *(vec) = LIBMESH_PETSC_NULLPTR;
350 
351  PetscFunctionReturn(LIBMESH_PETSC_SUCCESS);
352  } // end libmesh_petsc_DMCreateInterpolation
353 
355  PetscErrorCode
356  libmesh_petsc_DMCreateRestriction (DM dmc /*coarse*/, DM dmf/*fine*/, Mat * mat)
357  {
358  PetscFunctionBegin;
359 
360  libmesh_assert(dmc);
361  libmesh_assert(dmf);
362  libmesh_assert(mat);
363 
364  // get a communicator from incoming DM
365  MPI_Comm comm;
366  LibmeshPetscCallQ(PetscObjectGetComm((PetscObject)dmc, &comm));
367 
368  // extract our fine context from the incoming DM
369  void * ctx_f = nullptr;
370  LibmeshPetscCallQ(DMShellGetContext(dmf, &ctx_f));
371  libmesh_assert(ctx_f);
372  PetscDMContext * p_ctx_f = static_cast<PetscDMContext*>(ctx_f);
373 
374  // check / give PETSc its matrix
375  libmesh_assert(p_ctx_f->K_restrict_ptr);
376  *(mat) = p_ctx_f->K_restrict_ptr->mat();
377 
378  PetscFunctionReturn(LIBMESH_PETSC_SUCCESS);
379  }
380 
381  } // end extern C functions
382 
383 
385 
387  {
388  // Calls custom deleters
389  _dms.clear();
390  _sections.clear();
391 
392  // We don't manage the lifetime of the PetscSF objects
393  _star_forests.clear();
394 
395  // The relevant C++ destructors are called for these objects
396  // automatically.
397  _pmtx_vec.clear();
398  _subpmtx_vec.clear();
399  _vec_vec.clear();
400 
401  // These members are trivially clear()able.
402  _ctx_vec.clear();
403  _mesh_dof_sizes.clear();
404  _mesh_dof_loc_sizes.clear();
405  }
406 
407  unsigned int PetscDMWrapper::init_petscdm(System & system)
408  {
409  LOG_SCOPE ("init_and_attach_petscdm()", "PetscDMWrapper");
410 
411  MeshBase & mesh = system.get_mesh(); // Convenience
412  MeshRefinement mesh_refinement(mesh); // Used for swapping between grids
413 
414  // There's no need for these code paths while traversing the hierarchy
415  mesh.allow_renumbering(false);
416  mesh.allow_remote_element_removal(false);
417  mesh.partitioner() = nullptr;
418 
419  // First walk over the active local elements and see how many maximum MG levels we can construct
420 /*
421  unsigned int n_levels = 0;
422  for ( auto & elem : mesh.active_local_element_ptr_range() )
423  {
424  if ( elem->level() > n_levels )
425  n_levels = elem->level();
426  }
427  // On coarse grids some processors may have no active local elements,
428  // these processors shouldn't make projections
429  if (n_levels >= 1)
430  n_levels += 1;
431 */
432 
433  unsigned int n_levels = MeshTools::n_levels(mesh);
434 
435  // How many MG levels did the user request?
436  unsigned int usr_requested_mg_lvls = 0;
437  usr_requested_mg_lvls = command_line_next("-pc_mg_levels", usr_requested_mg_lvls);
438 
439  // Only construct however many levels were requested if something was actually requested
440  if ( usr_requested_mg_lvls != 0 )
441  {
442  // Dont request more than avail num levels on mesh, require at least 2 levels
443  libmesh_assert_less_equal( usr_requested_mg_lvls, n_levels );
444  libmesh_assert( usr_requested_mg_lvls > 1 );
445 
446  n_levels = usr_requested_mg_lvls;
447  }
448  else
449  {
450  // if -pc_mg_levels is not specified we just construct fieldsplit related
451  // structures on the finest mesh.
452  n_levels = 1;
453  }
454 
455 
456  // Init data structures: data[0] ~ coarse grid, data[n_levels-1] ~ fine grid
457  this->init_dm_data(n_levels, system.comm());
458 
459  // Step 1. contract : all active elements have no children
460  mesh.contract();
461 
462  // Start on finest grid. Construct DM datas and stash some info for
463  // later projection_matrix and vec sizing
464  for(unsigned int level = n_levels; level >= 1; level--)
465  {
466  // Save the n_fine_dofs before coarsening for later projection matrix sizing
467  _mesh_dof_sizes[level-1] = system.get_dof_map().n_dofs();
468  _mesh_dof_loc_sizes[level-1] = system.get_dof_map().n_local_dofs();
469 
470  // Get refs to things we will fill
471  DM & dm = this->get_dm(level-1);
472  PetscSection & section = this->get_section(level-1);
473  PetscSF & star_forest = this->get_star_forest(level-1);
474 
475  // The shell will contain other DM info
476  LibmeshPetscCall2(system.comm(), DMShellCreate(system.comm().get(), &dm));
477 
478  // Set the DM embedding dimension to help PetscDS (Discrete System)
479  LibmeshPetscCall2(system.comm(), DMSetCoordinateDim(dm, mesh.mesh_dimension()));
480 
481  // Build the PetscSection and attach it to the DM
482  this->build_section(system, section);
483 #if PETSC_VERSION_LESS_THAN(3,10,0)
484  LibmeshPetscCall2(system.comm(), DMSetDefaultSection(dm, section));
485 #elif PETSC_VERSION_LESS_THAN(3,23,0)
486  LibmeshPetscCall2(system.comm(), DMSetSection(dm, section));
487 #else
488  LibmeshPetscCall2(system.comm(), DMSetLocalSection(dm, section));
489 #endif
490 
491  // We only need to build the star forest if we're in a parallel environment
492  if (system.n_processors() > 1)
493  {
494  // Build the PetscSF and attach it to the DM
495  this->build_sf(system, star_forest);
496 #if PETSC_VERSION_LESS_THAN(3,12,0)
497  LibmeshPetscCall2(system.comm(), DMSetDefaultSF(dm, star_forest));
498 #else
499  LibmeshPetscCall2(system.comm(), DMSetSectionSF(dm, star_forest));
500 #endif
501  }
502 
503  // Set PETSC's Restriction, Interpolation, Coarsen and Refine functions for the current DM
504  LibmeshPetscCall2(system.comm(), DMShellSetCreateInterpolation ( dm, libmesh_petsc_DMCreateInterpolation ));
505 
506  // Not implemented. For now we rely on galerkin style restrictions
507  bool supply_restriction = false;
508  if (supply_restriction)
509  LibmeshPetscCall2(system.comm(), DMShellSetCreateRestriction ( dm, libmesh_petsc_DMCreateRestriction ));
510 
511  LibmeshPetscCall2(system.comm(), DMShellSetCoarsen ( dm, libmesh_petsc_DMCoarsen ));
512 
513  LibmeshPetscCall2(system.comm(), DMShellSetRefine ( dm, libmesh_petsc_DMRefine ));
514 
515  LibmeshPetscCall2(system.comm(), DMShellSetCreateSubDM(dm, libmesh_petsc_DMCreateSubDM));
516 
517  // Uniformly coarsen if not the coarsest grid and distribute dof info.
518  if ( level != 1 )
519  {
520  LOG_CALL("PDM_coarsen", "PetscDMWrapper", mesh_refinement.uniformly_coarsen(1));
521  LOG_CALL("PDM_dist_dof", "PetscDMWrapper", system.get_dof_map().distribute_dofs(mesh));
522  LOG_CALL("PDM_prep_send", "PetscDMWrapper", system.get_dof_map().prepare_send_list());
523  }
524  } // End PETSc data structure creation
525 
526  // Now fill the corresponding internal PetscDMContext for each created DM
527  for( unsigned int i=1; i <= n_levels; i++ )
528  {
529  // Set context dimension
530  _ctx_vec[i-1].mesh_dim = mesh.mesh_dimension();
531 
532  // Create and attach a sized vector to the current ctx
533  _vec_vec[i-1]->init( _mesh_dof_sizes[i-1] );
534  _ctx_vec[i-1].current_vec = _vec_vec[i-1].get();
535 
536  // Set a global DM to be used as reference when using fieldsplit
537  _ctx_vec[i-1].global_dm = &(this->get_dm(n_levels-1));
538 
539  if (n_levels > 1 )
540  {
541  // Set pointers to surrounding dm levels to help PETSc refine/coarsen
542  if ( i == 1 ) // were at the coarsest mesh
543  {
544  _ctx_vec[i-1].coarser_dm = nullptr;
545  _ctx_vec[i-1].finer_dm = _dms[1].get();
546  }
547  else if( i == n_levels ) // were at the finest mesh
548  {
549  _ctx_vec[i-1].coarser_dm = _dms[_dms.size() - 2].get();
550  _ctx_vec[i-1].finer_dm = nullptr;
551  }
552  else // were in the middle of the hierarchy
553  {
554  _ctx_vec[i-1].coarser_dm = _dms[i-2].get();
555  _ctx_vec[i-1].finer_dm = _dms[i].get();
556  }
557  }
558  } // End context creation
559 
560  // Attach a vector and context to each DM
561  if ( n_levels >= 1 )
562  {
563  for ( unsigned int i = 1; i <= n_levels ; ++i)
564  {
565  DM & dm = this->get_dm(i-1);
566 
567  LibmeshPetscCall2(system.comm(), DMShellSetGlobalVector( dm, _ctx_vec[i-1].current_vec->vec() ));
568 
569  LibmeshPetscCall2(system.comm(), DMShellSetContext( dm, &_ctx_vec[i-1] ));
570  }
571  }
572 
573  // DM structures created, now we need projection matrixes if GMG is requested.
574  // To prepare for projection creation go to second coarsest mesh so we can utilize
575  // old_dof_indices information in the projection creation.
576  if (n_levels > 1 )
577  {
578  // First, stash the coarse dof indices for FS purposes
579  unsigned int n_vars = system.n_vars();
580  _ctx_vec[0].dof_vec.resize(n_vars);
581 
582  for( unsigned int v = 0; v < n_vars; v++ )
583  {
584  std::vector<numeric_index_type> di;
585  system.get_dof_map().local_variable_indices(di, system.get_mesh(), v);
586  _ctx_vec[0].dof_vec[v] = di;
587  }
588 
589  LOG_CALL ("PDM_refine", "PetscDMWrapper", mesh_refinement.uniformly_refine(1));
590  LOG_CALL ("PDM_dist_dof", "PetscDMWrapper", system.get_dof_map().distribute_dofs(mesh));
591  LOG_CALL ("PDM_cnstrnts", "PetscDMWrapper", system.reinit_constraints());
592  LOG_CALL ("PDM_prep_send", "PetscDMWrapper", system.get_dof_map().prepare_send_list());
593  }
594 
595  // Create the Interpolation Matrices between adjacent mesh levels
596  for ( unsigned int i = 1 ; i < n_levels ; ++i )
597  {
598  if ( i != n_levels )
599  {
600  // Stash the rest of the dof indices
601  unsigned int n_vars = system.n_vars();
602  _ctx_vec[i].dof_vec.resize(n_vars);
603 
604  for( unsigned int v = 0; v < n_vars; v++ )
605  {
606  std::vector<numeric_index_type> di;
607  system.get_dof_map().local_variable_indices(di, system.get_mesh(), v);
608  _ctx_vec[i].dof_vec[v] = di;
609  }
610 
611  unsigned int ndofs_c = _mesh_dof_sizes[i-1];
612  unsigned int ndofs_f = _mesh_dof_sizes[i];
613 
614  // Create the Interpolation matrix and set its pointer
615  _ctx_vec[i-1].K_interp_ptr = _pmtx_vec[i-1].get();
616  _ctx_vec[i-1].K_sub_interp_ptr = _subpmtx_vec[i-1].get();
617 
618  unsigned int ndofs_local = system.get_dof_map().n_local_dofs();
619  unsigned int ndofs_old_first = system.get_dof_map().first_old_dof();
620  unsigned int ndofs_old_end = system.get_dof_map().end_old_dof();
621  unsigned int ndofs_old_size = ndofs_old_end - ndofs_old_first;
622 
623  // Init and zero the matrix
624  _ctx_vec[i-1].K_interp_ptr->init(ndofs_f, ndofs_c, ndofs_local, ndofs_old_size, 30 , 20);
625 
626  // Disable Mat destruction since PETSc destroys these for us
627  _ctx_vec[i-1].K_interp_ptr->set_destroy_mat_on_exit(false);
628  _ctx_vec[i-1].K_sub_interp_ptr->set_destroy_mat_on_exit(false);
629 
630  // TODO: Projection matrix sparsity pattern?
631  //MatSetOption(_ctx_vec[i-1].K_interp_ptr->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
632 
633  // Compute the interpolation matrix and set K_interp_ptr
634  LOG_CALL ("PDM_proj_mat", "PetscDMWrapper", system.projection_matrix(*_ctx_vec[i-1].K_interp_ptr));
635 
636  // Always close matrix that contains altered data
637  _ctx_vec[i-1].K_interp_ptr->close();
638  }
639 
640  // Move to next grid to make next projection
641  if ( i != n_levels - 1 )
642  {
643  LOG_CALL ("PDM_refine", "PetscDMWrapper", mesh_refinement.uniformly_refine(1));
644  LOG_CALL ("PDM_dist_dof", "PetscDMWrapper", system.get_dof_map().distribute_dofs(mesh));
645  LOG_CALL ("PDM_cnstrnts", "PetscDMWrapper", system.reinit_constraints());
646  LOG_CALL ("PDM_prep_send", "PetscDMWrapper", system.get_dof_map().prepare_send_list());
647  }
648  } // End create transfer operators. System back at the finest grid
649 
650  return n_levels;
651  }
652 
654  {
655  const auto n_levels = this->init_petscdm(system);
656 
657  // Lastly, give SNES the finest level DM
658  DM & dm = this->get_dm(n_levels-1);
659  LibmeshPetscCall2(system.comm(), SNESSetDM(snes, dm));
660  }
661 
663  {
664  const auto n_levels = this->init_petscdm(system);
665 
666  // Lastly, give KSP the finest level DM
667  DM & dm = this->get_dm(n_levels-1);
668  LibmeshPetscCall2(system.comm(), KSPSetDM(ksp, dm));
669  }
670 
671  void PetscDMWrapper::build_section( const System & system, PetscSection & section )
672  {
673  LOG_SCOPE ("build_section()", "PetscDMWrapper");
674 
675  LibmeshPetscCall2(system.comm(), PetscSectionCreate(system.comm().get(),&section));
676 
677  // Tell the PetscSection about all of our System variables
678  LibmeshPetscCall2(system.comm(), PetscSectionSetNumFields(section,system.n_vars()));
679 
680  // Set the actual names of all the field variables
681  for (auto v : make_range(system.n_vars()))
682  LibmeshPetscCall2(system.comm(), PetscSectionSetFieldName( section, v, system.variable_name(v).c_str() ));
683 
684  // For building the section, we need to create local-to-global map
685  // of local "point" ids to the libMesh global id of that point.
686  // A "point" in PETSc nomenclature is a geometric object that can have
687  // dofs associated with it, e.g. Node, Edge, Face, Elem.
688  // The numbering PETSc expects is continuous for the local numbering.
689  // Since we're only using this interface for solvers, then we can just
690  // assign whatever local id to any of the global ids. But it is local
691  // so we don't need to worry about processor numbering for the local
692  // point ids.
693  std::unordered_map<dof_id_type,dof_id_type> node_map;
694  std::unordered_map<dof_id_type,dof_id_type> elem_map;
695  std::map<dof_id_type,unsigned int> scalar_map;
696 
697  // First we tell the PetscSection about all of our points that have
698  // dofs associated with them.
699  this->set_point_range_in_section(system, section, node_map, elem_map, scalar_map);
700 
701  // Now we can build up the dofs per "point" in the PetscSection
702  this->add_dofs_to_section(system, section, node_map, elem_map, scalar_map);
703 
704  // Final setup of PetscSection
705  // Until Matt Knepley finishes implementing the commented out function
706  // below, the PetscSection will be assuming node-major ordering
707  // so let's throw an error if the user tries to use this without
708  // node-major order
709  libmesh_error_msg_if(!libMesh::on_command_line("--node-major-dofs"),
710  "ERROR: Must use --node-major-dofs with PetscSection!");
711 
712  //else if (!system.identify_variable_groups())
713  // ierr = PetscSectionSetUseFieldOffsets(section,PETSC_TRUE);LIBMESH_CHKERR(ierr);
714  //else
715  // {
716  // std::string msg = "ERROR: Only node-major or var-major ordering supported for PetscSection!\n";
717  // msg += " var-group-major ordering not supported!\n";
718  // msg += " Must use --node-major-dofs or set System::identify_variable_groups() = false!\n";
719  // libmesh_error_msg(msg);
720  // }
721 
722  LibmeshPetscCall2(system.comm(), PetscSectionSetUp(section));
723 
724  // Sanity checking at least that local_n_dofs match
725  libmesh_assert_equal_to(system.n_local_dofs(),this->check_section_n_dofs(section));
726  }
727 
728  void PetscDMWrapper::build_sf( const System & system, PetscSF & star_forest )
729  {
730  LOG_SCOPE ("build_sf()", "PetscDMWrapper");
731 
732  const DofMap & dof_map = system.get_dof_map();
733 
734  const std::vector<dof_id_type> & send_list = dof_map.get_send_list();
735 
736  // Number of ghost dofs that send information to this processor
737  const PetscInt n_leaves = cast_int<PetscInt>(send_list.size());
738 
739  // Number of local dofs, including ghosts dofs
740  const PetscInt n_roots = dof_map.n_local_dofs() + n_leaves;
741 
742  // This is the vector of dof indices coming from other processors
743  // We need to give this to the PetscSF
744  // We'll be extra paranoid about this ugly double cast
745  static_assert(sizeof(PetscInt) == sizeof(dof_id_type),"PetscInt is not a dof_id_type!");
746  PetscInt * local_dofs = reinterpret_cast<PetscInt *>(const_cast<dof_id_type *>(send_list.data()));
747 
748 #ifdef DEBUG
749  // PETSc 3.18 and above don't want duplicate entries here ... and
750  // frankly we shouldn't have duplicates in the first place!
751  {
752  std::set<dof_id_type> send_set(send_list.begin(), send_list.end());
753  libmesh_assert_equal_to(send_list.size(), send_set.size());
754  }
755 #endif
756 
757  // This is the vector of PetscSFNode's for the local_dofs.
758  // For each entry in local_dof, we have to supply the rank from which
759  // that dof stems and its local index on that rank.
760  // PETSc documentation here:
761  // http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PetscSF/PetscSFNode.html
762  std::vector<PetscSFNode> sf_nodes(send_list.size());
763 
764  for (auto i : index_range(send_list))
765  {
766  dof_id_type incoming_dof = send_list[i];
767 
768  const processor_id_type rank = dof_map.dof_owner(incoming_dof);
769 
770  // Dofs are sorted and continuous on the processor so local index
771  // is counted up from the first dof on the processor.
772  PetscInt index = incoming_dof - dof_map.first_dof(rank);
773 
774  sf_nodes[i].rank = rank; /* Rank of owner */
775  sf_nodes[i].index = index;/* Index of dof on rank */
776  }
777 
778  PetscSFNode * remote_dofs = sf_nodes.data();
779 
780  LibmeshPetscCall2(system.comm(), PetscSFCreate(system.comm().get(), &star_forest));
781 
782  // TODO: We should create pointers to arrays so we don't have to copy
783  // and then can use PETSC_OWN_POINTER where PETSc will take ownership
784  // and delete the memory for us. But then we'd have to use PetscMalloc.
785  LibmeshPetscCall2(system.comm(), PetscSFSetGraph(star_forest,
786  n_roots,
787  n_leaves,
788  local_dofs,
789  PETSC_COPY_VALUES,
790  remote_dofs,
791  PETSC_COPY_VALUES));
792  }
793 
795  PetscSection & section,
796  std::unordered_map<dof_id_type,dof_id_type> & node_map,
797  std::unordered_map<dof_id_type,dof_id_type> & elem_map,
798  std::map<dof_id_type,unsigned int> & scalar_map)
799  {
800  // We're expecting this to be empty coming in
801  libmesh_assert(node_map.empty());
802 
803  // We need to count up the number of active "points" on this processor.
804  // Nominally, a "point" in PETSc parlance is a geometric object that can
805  // hold DoFs, i.e node, edge, face, elem. Since we handle the mesh and are only
806  // interested in solvers, then the only thing PETSc needs is a unique *local* number
807  // for each "point" that has active DoFs; note however this local numbering
808  // we construct must be continuous.
809  //
810  // In libMesh, for most finite elements, we just associate those DoFs with the
811  // geometric nodes. So can we loop over the nodes on this processor and check
812  // if any of the fields are have active DoFs on that node.
813  // If so, then we tell PETSc about that "point". At this stage, we just need
814  // to count up how many active "points" we have and cache the local number to global id
815  // mapping.
816 
817 
818  // These will be our local counters. pstart should always be zero.
819  // pend will track our local "point" count.
820  // If we're on a processor who coarsened the mesh to have no local elements,
821  // we should make an empty PetscSection. An empty PetscSection is specified
822  // by passing [0,0) to the PetscSectionSetChart call at the end. So, if we
823  // have nothing on this processor, these are the correct values to pass to
824  // PETSc.
825  dof_id_type pstart = 0;
826  dof_id_type pend = 0;
827 
828  const MeshBase & mesh = system.get_mesh();
829 
830  const DofMap & dof_map = system.get_dof_map();
831 
832  // If we don't have any local dofs, then there's nothing to tell to the PetscSection
833  if (dof_map.n_local_dofs() > 0)
834  {
835  // Conservative estimate of space needed so we don't thrash
836  node_map.reserve(mesh.n_local_nodes());
837  elem_map.reserve(mesh.n_active_local_elem());
838 
839  // We loop over active elements and then cache the global/local node mapping to make sure
840  // we only count active nodes. For example, if we're calling this function and we're
841  // not the finest level in the Mesh tree, we don't want to include nodes of child elements
842  // that aren't active on this level.
843  for (const auto & elem : mesh.active_local_element_ptr_range())
844  {
845  for (const Node & node : elem->node_ref_range())
846  {
847  // get the global id number of local node n
848 
849  // Only register nodes with the PetscSection if they have dofs that belong to
850  // this processor. Even though we're active local elements, the dofs associated
851  // with the node may belong to a different processor. The processor who owns
852  // those dofs will register that node with the PetscSection on that processor.
853  std::vector<dof_id_type> node_dof_indices;
854  dof_map.dof_indices( &node, node_dof_indices );
855  if( !node_dof_indices.empty() && dof_map.local_index(node_dof_indices[0]) )
856  {
857 #ifndef NDEBUG
858  // We're assuming that if the first dof for this node belongs to this processor,
859  // then all of them do.
860  for( auto dof : node_dof_indices )
861  libmesh_assert(dof_map.local_index(dof));
862 #endif
863  // Cache the global/local mapping if we haven't already
864  // Then increment our local count
865  dof_id_type node_id = node.id();
866  if( node_map.count(node_id) == 0 )
867  {
868  node_map.emplace(node_id, pend);
869  pend++;
870  }
871  }
872  }
873 
874  // Some finite elements, e.g. Hierarchic, associate element interior DoFs with the element
875  // rather than the node (since we ought to be able to use Hierachic elements on a QUAD4,
876  // which has no interior node). Thus, we also need to check element interiors for DoFs
877  // as well and, if the finite element has them, we also need to count the Elem in our
878  // "point" accounting.
879  if( elem->n_dofs(system.number()) > 0 )
880  {
881  dof_id_type elem_id = elem->id();
882  elem_map.emplace(elem_id, pend);
883  pend++;
884  }
885  }
886 
887  // SCALAR dofs live on the "last" processor, so only work there if there are any
888  if( dof_map.n_SCALAR_dofs() > 0 && (system.processor_id() == (system.n_processors()-1)) )
889  {
890  // Loop through all the variables and cache the scalar ones. We cache the
891  // SCALAR variable index along with the local point to make it easier when
892  // we have to register dofs with the PetscSection
893  for (auto v : make_range(system.n_vars()))
894  {
895  if( system.variable(v).type().family == SCALAR )
896  {
897  scalar_map.emplace(pend, v);
898  pend++;
899  }
900  }
901  }
902 
903  }
904 
905  LibmeshPetscCall2(system.comm(), PetscSectionSetChart(section, pstart, pend));
906  }
907 
909  PetscSection & section,
910  const std::unordered_map<dof_id_type,dof_id_type> & node_map,
911  const std::unordered_map<dof_id_type,dof_id_type> & elem_map,
912  const std::map<dof_id_type,unsigned int> & scalar_map)
913  {
914  const MeshBase & mesh = system.get_mesh();
915 
916  // Now we go through and add dof information for each "point".
917  //
918  // In libMesh, for most finite elements, we just associate those DoFs with the
919  // geometric nodes. So can we loop over the nodes we cached in the node_map and
920  // the DoFs for each field for that node. We need to give PETSc the local id
921  // we built up in the node map.
922  for (const auto [global_node_id, local_node_id] : node_map )
923  {
924  libmesh_assert( mesh.query_node_ptr(global_node_id) );
925 
926  const Node & node = mesh.node_ref(global_node_id);
927 
928  this->add_dofs_helper(system,node,local_node_id,section);
929  }
930 
931  // Some finite element types associate dofs with the element. So now we go through
932  // any of those with the Elem as the point we add to the PetscSection with accompanying
933  // dofs
934  for (const auto [global_elem_id, local_elem_id] : elem_map )
935  {
936  libmesh_assert( mesh.query_elem_ptr(global_elem_id) );
937 
938  const Elem & elem = mesh.elem_ref(global_elem_id);
939 
940  this->add_dofs_helper(system,elem,local_elem_id,section);
941  }
942 
943  // Now add any SCALAR dofs to the PetscSection
944  // SCALAR dofs live on the "last" processor, so only work there if there are any
945  if (system.processor_id() == (system.n_processors()-1))
946  {
947  for (const auto [local_id, scalar_var] : scalar_map )
948  {
949  // The number of SCALAR dofs comes from the variable order
950  const int n_dofs = system.variable(scalar_var).type().order.get_order();
951 
952  LibmeshPetscCall2(system.comm(), PetscSectionSetFieldDof( section, local_id, scalar_var, n_dofs ));
953 
954  // In the SCALAR case, there are no other variables associate with the "point"
955  // the total number of dofs on the point is the same as that for the field
956  LibmeshPetscCall2(system.comm(), PetscSectionSetDof( section, local_id, n_dofs ));
957  }
958  }
959 
960  }
961 
963  const DofObject & dof_object,
964  dof_id_type local_id,
965  PetscSection & section)
966  {
967  unsigned int total_n_dofs_at_dofobject = 0;
968 
969  // We are assuming variables are also numbered 0 to n_vars()-1
970  for (auto v : make_range(system.n_vars()))
971  {
972  unsigned int n_dofs_at_dofobject = dof_object.n_dofs(system.number(), v);
973 
974  if( n_dofs_at_dofobject > 0 )
975  {
976  LibmeshPetscCall2(system.comm(), PetscSectionSetFieldDof( section,
977  local_id,
978  v,
979  n_dofs_at_dofobject ));
980 
981  total_n_dofs_at_dofobject += n_dofs_at_dofobject;
982  }
983  }
984 
985  libmesh_assert_equal_to(total_n_dofs_at_dofobject, dof_object.n_dofs(system.number()));
986 
987  LibmeshPetscCall2(system.comm(), PetscSectionSetDof( section, local_id, total_n_dofs_at_dofobject ));
988  }
989 
990 
992  {
993  PetscInt n_local_dofs = 0;
994 
995  // Grap the starting and ending points from the section
996  PetscInt pstart, pend;
997  PetscErrorCode ierr = PetscSectionGetChart(section, &pstart, &pend);
998  if (ierr)
999  libmesh_error();
1000 
1001  // Count up the n_dofs for each point from the section
1002  for( PetscInt p = pstart; p < pend; p++ )
1003  {
1004  PetscInt n_dofs;
1005  ierr = PetscSectionGetDof(section,p,&n_dofs);
1006  if (ierr)
1007  libmesh_error();
1008  n_local_dofs += n_dofs;
1009  }
1010 
1011  static_assert(sizeof(PetscInt) == sizeof(dof_id_type),"PetscInt is not a dof_id_type!");
1012  return n_local_dofs;
1013  }
1014 
1016  {
1017  _dms.resize(n_levels);
1018  _sections.resize(n_levels);
1019  _star_forests.resize(n_levels);
1020  _ctx_vec.resize(n_levels);
1021  _pmtx_vec.resize(n_levels);
1022  _subpmtx_vec.resize(n_levels);
1023  _vec_vec.resize(n_levels);
1024  _mesh_dof_sizes.resize(n_levels);
1025  _mesh_dof_loc_sizes.resize(n_levels);
1026 
1027  for( unsigned int i = 0; i < n_levels; i++ )
1028  {
1029  // Call C++ object constructors
1030  _pmtx_vec[i] = std::make_unique<PetscMatrix<Number>>(comm);
1031  _subpmtx_vec[i] = std::make_unique<PetscMatrix<Number>>(comm);
1032  _vec_vec[i] = std::make_unique<PetscVector<Number>>(comm);
1033  }
1034  }
1035 
1036 } // end namespace libMesh
1037 
1038 #endif // #if LIBMESH_ENABLE_AMR && LIBMESH_HAVE_METAPHYSICL
1039 #endif // PETSC_VERSION
1040 #endif // LIBMESH_HAVE_PETSC
void init_dm_data(unsigned int n_levels, const Parallel::Communicator &comm)
Init all the n_mesh_level dependent data structures.
PetscErrorCode PetscInt const PetscInt IS DM * subdm
FEFamily family
The type of finite element.
Definition: fe_type.h:221
dof_id_type check_section_n_dofs(PetscSection &section)
Helper function to sanity check PetscSection construction The PetscSection contains local dof informa...
std::vector< std::unique_ptr< PetscVector< Number > > > _vec_vec
Vector of solution vectors for all grid levels.
T command_line_next(std::string name, T default_value)
Use GetPot&#39;s search()/next() functions to get following arguments from the command line...
Definition: libmesh.C:1078
virtual void create_submatrix(SparseMatrix< T > &submatrix, const std::vector< numeric_index_type > &rows, const std::vector< numeric_index_type > &cols) const
This function creates a matrix called "submatrix" which is defined by the row and column indices give...
PetscDMContext * p_ctx
std::vector< PetscDMContext > _ctx_vec
Vector of internal PetscDM context structs for all grid levels Pointers to these C++ objects are pass...
A Node is like a Point, but with more information.
Definition: node.h:52
dof_id_type n_SCALAR_dofs() const
Definition: dof_map.h:678
const Variable & variable(unsigned int var) const
Return a constant reference to Variable var.
Definition: system.h:2458
void set_point_range_in_section(const System &system, PetscSection &section, std::unordered_map< dof_id_type, dof_id_type > &node_map, std::unordered_map< dof_id_type, dof_id_type > &elem_map, std::map< dof_id_type, unsigned int > &scalar_map)
Helper function for build_section.
processor_id_type dof_owner(const dof_id_type dof) const
Definition: dof_map.h:707
void dof_indices(const Elem *const elem, std::vector< dof_id_type > &di) const
Definition: dof_map.C:2164
std::size_t distribute_dofs(MeshBase &)
Distribute dofs on the current mesh.
Definition: dof_map.C:978
DM & get_dm(unsigned int level)
Get reference to DM for the given mesh level.
void local_variable_indices(T &idx, const MeshBase &mesh, unsigned int var_num) const
If T == dof_id_type, counts, if T == std::vector<dof_id_type>, fills an array of, those dof indices w...
Definition: dof_map.C:1151
dof_id_type end_old_dof(const processor_id_type proc) const
Definition: dof_map_base.h:210
std::vector< WrappedPetsc< DM > > _dms
Vector of DMs for all grid levels.
void projection_matrix(SparseMatrix< Number > &proj_mat) const
This method creates a projection matrix which corresponds to the operation of project_vector between ...
unsigned int init_petscdm(System &system)
Initialize the PETSc DM and return the number of geometric multigrid levels.
This is the base class from which all geometric element types are derived.
Definition: elem.h:94
MeshBase & mesh
std::vector< PetscInt > subfields
Stores subfield ids for use in subprojection matrixes on coarser DMs.
const Parallel::Communicator & comm() const
void add_dofs_to_section(const System &system, PetscSection &section, const std::unordered_map< dof_id_type, dof_id_type > &node_map, const std::unordered_map< dof_id_type, dof_id_type > &elem_map, const std::map< dof_id_type, unsigned int > &scalar_map)
Helper function for build_section.
PetscMatrixBase< libMesh::Number > * K_interp_ptr
OrderWrapper order
The approximation order of the element.
Definition: fe_type.h:215
dof_id_type n_dofs(const unsigned int vn) const
Definition: dof_map.h:668
The libMesh namespace provides an interface to certain functionality in the library.
dof_id_type n_local_dofs() const
Definition: system.C:158
dof_id_type n_local_dofs(const unsigned int vn) const
Definition: dof_map.h:686
PetscErrorCode libmesh_petsc_DMCreateInterpolation(DM dmc, DM dmf, Mat *mat, Vec *vec)
Function to give PETSc that sets the Interpolation Matrix between two DMs.
const MeshBase & get_mesh() const
Definition: system.h:2358
PetscErrorCode libmesh_petsc_DMRefine(DM dmc, MPI_Comm, DM *dmf)
Help PETSc identify the finer DM given a dmc.
PetscErrorCode PetscInt numFields
PetscErrorCode libmesh_petsc_DMCreateRestriction(DM dmc, DM dmf, Mat *mat)
Function to give PETSc that sets the Restriction Matrix between two DMs.
PetscMatrixBase< libMesh::Number > * K_restrict_ptr
uint8_t processor_id_type
Definition: id_types.h:104
This is the MeshBase class.
Definition: mesh_base.h:75
unsigned int n_dofs(const unsigned int s, const unsigned int var=libMesh::invalid_uint) const
Definition: dof_object.h:806
This class handles the numbering of degrees of freedom on a mesh.
Definition: dof_map.h:179
processor_id_type n_processors() const
unsigned int number() const
Definition: system.h:2350
Struct to house data regarding where in the mesh hierarchy we are located.
std::vector< std::unique_ptr< PetscMatrixBase< Number > > > _pmtx_vec
Vector of projection matrixes for all grid levels.
Implements (adaptive) mesh refinement algorithms for a MeshBase.
unsigned int n_vars
std::vector< WrappedPetsc< PetscSection > > _sections
Vector of PETScSections for all grid levels.
Manages consistently variables, degrees of freedom, and coefficient vectors.
Definition: system.h:96
unsigned int n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:802
void uniformly_coarsen(unsigned int n=1)
Attempts to uniformly coarsen the mesh n times.
libmesh_assert(ctx)
PetscErrorCode PetscInt const PetscInt IS * is
const std::string & variable_name(const unsigned int i) const
Definition: system.h:2478
std::vector< std::vector< numeric_index_type > > dof_vec
Stores local dofs for each var for use in subprojection matrixes.
PetscSection & get_section(unsigned int level)
Get reference to PetscSection for the given mesh level.
PetscErrorCode libmesh_petsc_DMCoarsen(DM dmf, MPI_Comm, DM *dmc)
Help PETSc identify the coarser DM dmc given the fine DM dmf.
PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm, PetscInt numFields, PetscInt fields[], IS *is, DM *subdm) PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm
Help PETSc create a subDM given a global dm when using fieldsplit.
std::vector< unsigned int > _mesh_dof_loc_sizes
Stores n_local_dofs for each grid level, to be used for projection vector sizing. ...
virtual void reinit_constraints()
Reinitializes the constraints for this system.
Definition: system.C:495
communicator & get()
std::vector< PetscSF > _star_forests
Vector of star forests for all grid levels.
int get_order() const
Explicitly request the order as an int.
Definition: fe_type.h:80
PetscSF & get_star_forest(unsigned int level)
Get reference to PetscSF for the given mesh level.
PetscErrorCode PetscInt const PetscInt fields[]
LibmeshPetscCallQ(DMShellGetContext(dm, &ctx))
void add_dofs_helper(const System &system, const DofObject &dof_object, dof_id_type local_id, PetscSection &section)
PetscMatrixBase< libMesh::Number > * K_sub_interp_ptr
void init_and_attach_petscdm(System &system, SNES snes)
IntRange< T > make_range(T beg, T end)
The 2-parameter make_range() helper function returns an IntRange<T> when both input parameters are of...
Definition: int_range.h:140
std::vector< unsigned int > _mesh_dof_sizes
Stores n_dofs for each grid level, to be used for projection matrix sizing.
The DofObject defines an abstract base class for objects that have degrees of freedom associated with...
Definition: dof_object.h:54
void * ctx
void prepare_send_list()
Takes the _send_list vector (which may have duplicate entries) and sorts it.
Definition: dof_map.C:1864
bool on_command_line(std::string arg)
Definition: libmesh.C:987
dof_id_type first_old_dof(const processor_id_type proc) const
Definition: dof_map_base.h:204
dof_id_type first_dof(const processor_id_type proc) const
Definition: dof_map_base.h:185
void build_section(const System &system, PetscSection &section)
Takes System, empty PetscSection and populates the PetscSection.
void build_sf(const System &system, PetscSF &star_forest)
Takes System, empty PetscSF and populates the PetscSF.
unsigned int n_vars() const
Definition: system.h:2430
PetscFunctionReturn(LIBMESH_PETSC_SUCCESS)
processor_id_type processor_id() const
std::vector< std::unique_ptr< PetscMatrixBase< Number > > > _subpmtx_vec
Vector of sub projection matrixes for all grid levels for fieldsplit.
void clear()
Destroys and clears all build DM-related data.
const DofMap & get_dof_map() const
Definition: system.h:2374
const std::vector< dof_id_type > & get_send_list() const
Definition: dof_map.h:526
auto index_range(const T &sizable)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:117
uint8_t dof_id_type
Definition: id_types.h:67
bool local_index(dof_id_type dof_index) const
Definition: dof_map.h:839
const FEType & type() const
Definition: variable.h:144
void uniformly_refine(unsigned int n=1)
Uniformly refines the mesh n times.