libMesh
petsc_dm_wrapper.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2019 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 #include "libmesh/libmesh_common.h"
19 #include "libmesh/petsc_macro.h"
20 
21 #ifdef LIBMESH_HAVE_PETSC
22 #if !PETSC_VERSION_LESS_THAN(3,7,3)
23 #if defined(LIBMESH_ENABLE_AMR) && defined(LIBMESH_HAVE_METAPHYSICL)
24 
25 #include "libmesh/ignore_warnings.h"
26 #include <petscsf.h>
27 #if PETSC_VERSION_LESS_THAN(3,12,0)
28 #include <petsc/private/dmimpl.h>
29 #endif
30 #include <petscdmshell.h>
31 #include "libmesh/restore_warnings.h"
32 
33 #include "libmesh/petsc_dm_wrapper.h"
34 #include "libmesh/libmesh_logging.h"
35 #include "libmesh/system.h"
36 #include "libmesh/mesh.h"
37 #include "libmesh/mesh_base.h"
38 #include "libmesh/mesh_refinement.h"
39 #include "libmesh/partitioner.h"
40 #include "libmesh/dof_map.h"
41 #include "libmesh/elem.h"
42 
43 namespace libMesh
44 {
45 
46  //--------------------------------------------------------------------
47  // Functions with C linkage to pass to PETSc. PETSc will call these
48  // methods as needed.
49  //
50  // Since they must have C linkage they have no knowledge of a namespace.
51  // We give them an obscure name to avoid namespace pollution.
52  //--------------------------------------------------------------------
53  extern "C"
54  {
55 
57 #if PETSC_VERSION_LESS_THAN(3,9,0)
58  PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm, PetscInt numFields, PetscInt fields[], IS *is, DM *subdm)
59 #else
60  PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm, PetscInt numFields, const PetscInt fields[], IS *is, DM *subdm)
61 #endif
62  {
63  PetscErrorCode ierr;
64  // Basically, we copy the PETSc ShellCreateSubDM implementation,
65  // but also need to set the embedding dim and also propagate
66  // the relevant function pointers to the subDM for GMG purposes.
67  // Since this is called by PETSc we gotta pull some of this info
68  // from the context in the DM.
69 
70  // First, retrieve our context
71  void * ctx = nullptr;
72  ierr = DMShellGetContext(dm, & ctx);
73  CHKERRQ(ierr);
75  PetscDMContext * p_ctx = static_cast<PetscDMContext * >(ctx);
76 
77  if (subdm)
78  {
79  ierr = DMShellCreate(PetscObjectComm((PetscObject) dm), subdm);
80  CHKERRQ(ierr);
81 
82  // Set the DM embedding dimension to help PetscDS (Discrete System)
83  ierr = DMSetCoordinateDim(*subdm, p_ctx->mesh_dim);
84  CHKERRQ(ierr);
85 
86  // Now set the function pointers for the subDM
87  // Some DMShellGet* functions only exist with PETSc >= 3.12.0.
88 
89  // Set Coarsen function pointer
90 #if PETSC_VERSION_LESS_THAN(3,12,0)
91  if (dm->ops->coarsen)
92  {
93  ierr = DMShellSetCoarsen(*subdm, dm->ops->coarsen);
94  CHKERRQ(ierr);
95  }
96 #else
97  PetscErrorCode (*coarsen)(DM,MPI_Comm,DM*) = nullptr;
98  ierr = DMShellGetCoarsen(dm, &coarsen);
99  CHKERRQ(ierr);
100  if (coarsen)
101  {
102  ierr = DMShellSetCoarsen(*subdm, coarsen);
103  CHKERRQ(ierr);
104  }
105 #endif
106 
107  // Set Refine function pointer
108 #if PETSC_VERSION_LESS_THAN(3,12,0)
109  if (dm->ops->refine)
110  {
111  ierr = DMShellSetRefine(*subdm, dm->ops->refine);
112  CHKERRQ(ierr);
113  }
114 #else
115  PetscErrorCode (*refine)(DM,MPI_Comm,DM*) = nullptr;
116  ierr = DMShellGetRefine(dm, &refine);
117  if (refine)
118  {
119  ierr = DMShellSetRefine(*subdm, refine);
120  CHKERRQ(ierr);
121  }
122 #endif
123 
124  // Set Interpolation function pointer
125 #if PETSC_VERSION_LESS_THAN(3,12,0)
126  if (dm->ops->createinterpolation)
127  {
128  ierr = DMShellSetCreateInterpolation(*subdm, dm->ops->createinterpolation);
129  CHKERRQ(ierr);
130  }
131 #else
132  PetscErrorCode (*interp)(DM,DM,Mat*,Vec*) = nullptr;
133  ierr = DMShellGetCreateInterpolation(dm, &interp);
134  CHKERRQ(ierr);
135  if (interp)
136  {
137  ierr = DMShellSetCreateInterpolation(*subdm, interp);
138  CHKERRQ(ierr);
139  }
140 #endif
141 
142  // Set Restriction function pointer
143 #if PETSC_VERSION_LESS_THAN(3,12,0)
144  if (dm->ops->createrestriction)
145  {
146  ierr = DMShellSetCreateRestriction(*subdm, dm->ops->createrestriction);
147  CHKERRQ(ierr);
148  }
149 #else
150  PetscErrorCode (*createrestriction)(DM,DM,Mat*) = nullptr;
151  ierr = DMShellGetCreateRestriction(dm, &createrestriction);
152  CHKERRQ(ierr);
153  if (createrestriction)
154  {
155  ierr = DMShellSetCreateRestriction(*subdm, createrestriction);
156  CHKERRQ(ierr);
157  }
158 #endif
159 
160  // Set CreateSubDM function pointer
161 #if PETSC_VERSION_LESS_THAN(3,12,0)
162  if (dm->ops->createsubdm)
163  {
164  ierr = DMShellSetCreateSubDM(*subdm, dm->ops->createsubdm);
165  CHKERRQ(ierr);
166  }
167 #else
168  PetscErrorCode (*createsubdm)(DM,PetscInt,const PetscInt[],IS*,DM*) = nullptr;
169  ierr = DMShellGetCreateSubDM(dm, &createsubdm);
170  CHKERRQ(ierr);
171  if (createsubdm)
172  {
173  ierr = DMShellSetCreateSubDM(*subdm, createsubdm);
174  CHKERRQ(ierr);
175  }
176 #endif
177  // Set Context pointer
178  if (ctx)
179  {
180  ierr = DMShellSetContext(*subdm, ctx);
181  CHKERRQ(ierr);
182  }
183 #if PETSC_VERSION_LESS_THAN(3,11,0)
184  // Lastly, Compute the subsection for the subDM
185  ierr = DMCreateSubDM_Section_Private(dm, numFields, fields, is, subdm);
186  CHKERRQ(ierr);
187 #else
188  ierr = DMCreateSectionSubDM(dm, numFields, fields, is, subdm);
189  CHKERRQ(ierr);
190 #endif
191  }
192 
193  return 0;
194  }
195 
197  PetscErrorCode libmesh_petsc_DMRefine(DM dmc, MPI_Comm /*comm*/, DM * dmf)
198  {
199  libmesh_assert(dmc);
200  libmesh_assert(dmf);
201 
202  PetscErrorCode ierr;
203 
204  // extract our context from the incoming dmc
205  void * ctx_c = nullptr;
206  ierr = DMShellGetContext(dmc, & ctx_c);CHKERRQ(ierr);
207  libmesh_assert(ctx_c);
208  PetscDMContext * p_ctx = static_cast<PetscDMContext * >(ctx_c);
209 
210  // check / set the finer DM
213  *(dmf) = *(p_ctx->finer_dm);
214 
215  return 0;
216  }
217 
219  PetscErrorCode libmesh_petsc_DMCoarsen(DM dmf, MPI_Comm /*comm*/, DM * dmc)
220  {
221  libmesh_assert(dmc);
222  libmesh_assert(dmf);
223 
224  PetscErrorCode ierr;
225 
226  // Extract our context from the incoming dmf
227  void * ctx_f = nullptr;
228  ierr = DMShellGetContext(dmf, &ctx_f);CHKERRQ(ierr);
229  libmesh_assert(ctx_f);
230  PetscDMContext * p_ctx_f = static_cast<PetscDMContext*>(ctx_f);
231 
232  // First, ensure that there exists a coarse DM that we want to
233  // set. There ought to be as we created it while walking the
234  // hierarchy.
235  libmesh_assert(p_ctx_f->coarser_dm);
236  libmesh_assert(*(p_ctx_f->coarser_dm));
237 
238  // In situations using fieldsplit we need to provide a coarser
239  // DM which only has the relevant subfields in it. Since we
240  // create global DMs for each mesh level, we need to also create
241  // the subDMs. We do this by checking the number of fields. When
242  // less than all the fields are used, we need to create the
243  // proper subDMs. We get the number of fields and their names
244  // from the incoming fine DM and the global reference DM
245  PetscInt nfieldsf, nfieldsg;
246  char ** fieldnamesf;
247  char ** fieldnamesg;
248 
249  libmesh_assert(p_ctx_f->global_dm);
250  DM * globaldm = p_ctx_f->global_dm;
251  ierr = DMCreateFieldIS(dmf, &nfieldsf, &fieldnamesf, nullptr);
252  CHKERRQ(ierr);
253  ierr = DMCreateFieldIS(*globaldm, &nfieldsg, &fieldnamesg, nullptr);
254  CHKERRQ(ierr);
255 
256  // If the probed number of fields is less than the number of
257  // global fields, this amounts to PETSc 'indicating' to us we
258  // are doing FS. So, we must create subDMs for the coarser
259  // DMs.
260  if ( nfieldsf < nfieldsg )
261  {
262  p_ctx_f->subfields.clear();
263  p_ctx_f->subfields.resize(nfieldsf);
264 
265  // To select the subDM fields we match fine grid DM field
266  // names to their global DM counterparts. Since PETSc can
267  // internally reassign field numbering under a fieldsplit,
268  // we must extract subsections via the field names. This is
269  // admittedly gross, but c'est la vie.
270  for (int i = 0; i < nfieldsf ; i++)
271  {
272  for (int j = 0; j < nfieldsg ;j++)
273  if ( strcmp( fieldnamesg[j], fieldnamesf[i] ) == 0 )
274  p_ctx_f->subfields[i] = j;
275  }
276 
277  // Next, for the found fields we create a subDM
278  DM subdm;
279  libmesh_petsc_DMCreateSubDM(*(p_ctx_f->coarser_dm), nfieldsf,
280  p_ctx_f->subfields.data(), nullptr, &subdm);
281 
282  // Extract our coarse context from the created subDM so we
283  // can set its subfields for use in createInterp.
284  void * ctx_c = nullptr;
285  ierr = DMShellGetContext(subdm, &ctx_c);
286  CHKERRQ(ierr);
287  libmesh_assert(ctx_c);
288  PetscDMContext * p_ctx_c = static_cast<PetscDMContext*>(ctx_c);
289 
290  // propogate subfield info to subDM
291  p_ctx_c->subfields = p_ctx_f->subfields;
292 
293  // return created subDM to PETSc
294  *(dmc) = subdm;
295  }
296  else {
297  // No fieldsplit was requested so set the coarser DM to the
298  // global coarser DM.
299  *(dmc) = *(p_ctx_f->coarser_dm);
300  }
301 
302  return 0;
303  }
304 
306  PetscErrorCode
307  libmesh_petsc_DMCreateInterpolation (DM dmc /*coarse*/, DM dmf /*fine*/,
308  Mat * mat ,Vec * vec)
309  {
310  libmesh_assert(dmc);
311  libmesh_assert(dmf);
312  libmesh_assert(mat);
313  libmesh_assert(vec); // Optional scaling (not needed for mg)
314 
315  // Get a communicator from incoming DM
316  PetscErrorCode ierr;
317  MPI_Comm comm;
318  PetscObjectGetComm((PetscObject)dmc, &comm);
319 
320  // Extract our coarse context from the incoming DM
321  void * ctx_c = nullptr;
322  ierr = DMShellGetContext(dmc, &ctx_c);
323  CHKERRQ(ierr);
324  libmesh_assert(ctx_c);
325  PetscDMContext * p_ctx_c = static_cast<PetscDMContext*>(ctx_c);
326 
327  // Extract our fine context from the incoming DM
328  void * ctx_f = nullptr;
329  ierr = DMShellGetContext(dmf, &ctx_f);CHKERRQ(ierr);
330  libmesh_assert(ctx_f);
331  PetscDMContext * p_ctx_f = static_cast<PetscDMContext*>(ctx_f);
332 
333  // Check for existing global projection matrix
334  libmesh_assert(p_ctx_c->K_interp_ptr);
335 
336  // If were doing fieldsplit we need to construct sub projection
337  // matrices. We compare the passed in number of DMs fields to a
338  // global DM in order to determine if a subprojection is needed.
339  PetscInt nfieldsf, nfieldsg;
340 
341  libmesh_assert(p_ctx_c->global_dm);
342  DM * globaldm = p_ctx_c->global_dm;
343 
344  ierr = DMCreateFieldIS(dmf, &nfieldsf, nullptr, nullptr);
345  CHKERRQ(ierr);
346  ierr = DMCreateFieldIS(*globaldm, &nfieldsg, nullptr, nullptr);
347  CHKERRQ(ierr);
348 
349  // If the probed number of fields is less than the number of
350  // global fields, this amounts to PETSc 'indicating' to us we
351  // are doing FS.
352  if ( nfieldsf < nfieldsg)
353  {
354  // Loop over the fields and merge their index sets.
355  std::vector<std::vector<numeric_index_type>> allrows,allcols;
356  std::vector<numeric_index_type> rows,cols;
357  allrows = p_ctx_f->dof_vec;
358  allcols = p_ctx_c->dof_vec;
359 
360  // For internal libmesh submat extraction need to merge all
361  // field dofs and then sort the vectors so that they match
362  // the Projection Matrix ordering
363  const int n_subfields = p_ctx_f->subfields.size();
364  if ( n_subfields >= 1 )
365  {
366  for (int i : p_ctx_f->subfields)
367  {
368  rows.insert(rows.end(), allrows[i].begin(), allrows[i].end());
369  cols.insert(cols.end(), allcols[i].begin(), allcols[i].end());
370  }
371  std::sort(rows.begin(),rows.end());
372  std::sort(cols.begin(),cols.end());
373  }
374 
375  // Now that we have merged the fine and coarse index sets
376  // were ready to make the submatrix and pass it off to PETSc
377  p_ctx_c->K_interp_ptr->create_submatrix (*p_ctx_c->K_sub_interp_ptr, rows, cols);
378 
379  // return to PETSc the created submatrix
380  *(mat) = p_ctx_c->K_sub_interp_ptr->mat();
381 
382  } // endif less incoming DM fields than global DM fields
383  else
384  {
385  // We are not doing fieldsplit, so return global projection
386  *(mat) = p_ctx_c->K_interp_ptr->mat();
387  }
388 
389  // Vec scaling isnt needed so were done.
390  *(vec) = PETSC_NULL;
391 
392  return 0;
393  } // end libmesh_petsc_DMCreateInterpolation
394 
396  PetscErrorCode
397  libmesh_petsc_DMCreateRestriction (DM dmc /*coarse*/, DM dmf/*fine*/, Mat * mat)
398  {
399  libmesh_assert(dmc);
400  libmesh_assert(dmf);
401  libmesh_assert(mat);
402 
403  PetscErrorCode ierr;
404 
405  // get a communicator from incoming DM
406  MPI_Comm comm;
407  PetscObjectGetComm((PetscObject)dmc, &comm);
408 
409  // extract our fine context from the incoming DM
410  void * ctx_f = nullptr;
411  ierr = DMShellGetContext(dmf, &ctx_f);CHKERRQ(ierr);
412  libmesh_assert(ctx_f);
413  PetscDMContext * p_ctx_f = static_cast<PetscDMContext*>(ctx_f);
414 
415  // check / give PETSc its matrix
416  libmesh_assert(p_ctx_f->K_restrict_ptr);
417  *(mat) = p_ctx_f->K_restrict_ptr->mat();
418 
419  return 0;
420  }
421 
422  } // end extern C functions
423 
424 
426  {
427  this->clear();
428  }
429 
431  {
432  // PETSc will destroy the attached PetscSection, PetscSF as well as
433  // other relateds such as the Projections so we just tidy up the
434  // containers here.
435 
436  _dms.clear();
437  _sections.clear();
438  _star_forests.clear();
439  _pmtx_vec.clear();
440  _vec_vec.clear();
441  _ctx_vec.clear();
442 
443  }
444 
446  {
447  START_LOG ("init_and_attach_petscdm()", "PetscDMWrapper");
448 
449  PetscErrorCode ierr;
450 
451  MeshBase & mesh = system.get_mesh(); // Convenience
452  MeshRefinement mesh_refinement(mesh); // Used for swapping between grids
453 
454  // Theres no need for these code paths while traversing the hierarchy
455  mesh.allow_renumbering(false);
456  mesh.allow_remote_element_removal(false);
457  mesh.partitioner() = nullptr;
458 
459  // First walk over the active local elements and see how many maximum MG levels we can construct
460  unsigned int n_levels = 0;
461  for ( auto & elem : mesh.active_local_element_ptr_range() )
462  {
463  if ( elem->level() > n_levels )
464  n_levels = elem->level();
465  }
466  // On coarse grids some processors may have no active local elements,
467  // these processors shouldnt make projections
468  if (n_levels >= 1)
469  n_levels += 1;
470 
471  // How many MG levels did the user request?
472  unsigned int usr_requested_mg_lvls = 0;
473  usr_requested_mg_lvls = command_line_next("-pc_mg_levels", usr_requested_mg_lvls);
474 
475  // Only construct however many levels were requested if something was actually requested
476  if ( usr_requested_mg_lvls != 0 )
477  {
478  // Dont request more than avail num levels on mesh, require at least 2 levels
479  libmesh_assert_less_equal( usr_requested_mg_lvls, n_levels );
480  libmesh_assert( usr_requested_mg_lvls > 1 );
481 
482  n_levels = usr_requested_mg_lvls;
483  }
484  else
485  {
486  // if -pc_mg_levels is not specified we just construct fieldsplit related
487  // structures on the finest mesh.
488  n_levels = 1;
489  }
490 
491 
492  // Init data structures: data[0] ~ coarse grid, data[n_levels-1] ~ fine grid
493  this->init_dm_data(n_levels, system.comm());
494 
495  // Step 1. contract : all active elements have no children
496  mesh.contract();
497 
498  // Start on finest grid. Construct DM datas and stash some info for
499  // later projection_matrix and vec sizing
500  for(unsigned int level = n_levels; level >= 1; level--)
501  {
502  // Save the n_fine_dofs before coarsening for later projection matrix sizing
503  _mesh_dof_sizes[level-1] = system.get_dof_map().n_dofs();
504  _mesh_dof_loc_sizes[level-1] = system.get_dof_map().n_local_dofs();
505 
506  // Get refs to things we will fill
507  DM & dm = this->get_dm(level-1);
508  PetscSection & section = this->get_section(level-1);
509  PetscSF & star_forest = this->get_star_forest(level-1);
510 
511  // The shell will contain other DM info
512  ierr = DMShellCreate(system.comm().get(), &dm);
513  CHKERRABORT(system.comm().get(),ierr);
514 
515  // Set the DM embedding dimension to help PetscDS (Discrete System)
516  ierr = DMSetCoordinateDim(dm, mesh.mesh_dimension());
517  CHKERRABORT(system.comm().get(),ierr);
518 
519  // Build the PetscSection and attach it to the DM
520  this->build_section(system, section);
521 #if PETSC_VERSION_LESS_THAN(3,12,0)
522  ierr = DMSetDefaultSection(dm, section);
523 #else
524  ierr = DMSetSection(dm, section);
525 #endif
526  CHKERRABORT(system.comm().get(),ierr);
527 
528  // We only need to build the star forest if we're in a parallel environment
529  if (system.n_processors() > 1)
530  {
531  // Build the PetscSF and attach it to the DM
532  this->build_sf(system, star_forest);
533 #if PETSC_VERSION_LESS_THAN(3,12,0)
534  ierr = DMSetDefaultSF(dm, star_forest);
535 #else
536  ierr = DMSetSectionSF(dm, star_forest);
537 #endif
538  CHKERRABORT(system.comm().get(),ierr);
539  }
540 
541  // Set PETSC's Restriction, Interpolation, Coarsen and Refine functions for the current DM
542  ierr = DMShellSetCreateInterpolation ( dm, libmesh_petsc_DMCreateInterpolation );
543  CHKERRABORT(system.comm().get(),ierr);
544 
545  // Not implemented. For now we rely on galerkin style restrictions
546  bool supply_restriction = false;
547  if (supply_restriction)
548  {
549  ierr = DMShellSetCreateRestriction ( dm, libmesh_petsc_DMCreateRestriction );
550  CHKERRABORT(system.comm().get(),ierr);
551  }
552 
553  ierr = DMShellSetCoarsen ( dm, libmesh_petsc_DMCoarsen );
554  CHKERRABORT(system.comm().get(),ierr);
555 
556  ierr = DMShellSetRefine ( dm, libmesh_petsc_DMRefine );
557  CHKERRABORT(system.comm().get(),ierr);
558 
559  ierr= DMShellSetCreateSubDM(dm, libmesh_petsc_DMCreateSubDM);
560  CHKERRABORT(system.comm().get(), ierr);
561 
562  // Uniformly coarsen if not the coarsest grid and distribute dof info.
563  if ( level != 1 )
564  {
565  START_LOG ("PDM_coarsen", "PetscDMWrapper");
566  mesh_refinement.uniformly_coarsen(1);
567  STOP_LOG ("PDM_coarsen", "PetscDMWrapper");
568 
569  START_LOG ("PDM_dist_dof", "PetscDMWrapper");
570  system.get_dof_map().distribute_dofs(mesh);
571  STOP_LOG ("PDM_dist_dof", "PetscDMWrapper");
572  }
573  } // End PETSc data structure creation
574 
575  // Now fill the corresponding internal PetscDMContext for each created DM
576  for( unsigned int i=1; i <= n_levels; i++ )
577  {
578  // Set context dimension
579  (*_ctx_vec[i-1]).mesh_dim = mesh.mesh_dimension();
580 
581  // Create and attach a sized vector to the current ctx
582  _vec_vec[i-1]->init( _mesh_dof_sizes[i-1] );
583  _ctx_vec[i-1]->current_vec = _vec_vec[i-1].get();
584 
585  // Set a global DM to be used as reference when using fieldsplit
586  _ctx_vec[i-1]->global_dm = &(this->get_dm(n_levels-1));
587 
588  if (n_levels > 1 )
589  {
590  // Set pointers to surrounding dm levels to help PETSc refine/coarsen
591  if ( i == 1 ) // were at the coarsest mesh
592  {
593  (*_ctx_vec[i-1]).coarser_dm = nullptr;
594  (*_ctx_vec[i-1]).finer_dm = _dms[1].get();
595  }
596  else if( i == n_levels ) // were at the finest mesh
597  {
598  (*_ctx_vec[i-1]).coarser_dm = _dms[_dms.size() - 2].get();
599  (*_ctx_vec[i-1]).finer_dm = nullptr;
600  }
601  else // were in the middle of the hierarchy
602  {
603  (*_ctx_vec[i-1]).coarser_dm = _dms[i-2].get();
604  (*_ctx_vec[i-1]).finer_dm = _dms[i].get();
605  }
606  }
607 
608  } // End context creation
609 
610  // Attach a vector and context to each DM
611  if ( n_levels >= 1 )
612  {
613 
614  for ( unsigned int i = 1; i <= n_levels ; ++i)
615  {
616  DM & dm = this->get_dm(i-1);
617 
618  ierr = DMShellSetGlobalVector( dm, (*_ctx_vec[ i-1 ]).current_vec->vec() );
619  CHKERRABORT(system.comm().get(),ierr);
620 
621  ierr = DMShellSetContext( dm, _ctx_vec[ i-1 ].get() );
622  CHKERRABORT(system.comm().get(),ierr);
623  }
624  }
625 
626  // DM structures created, now we need projection matrixes if GMG is requested.
627  // To prepare for projection creation go to second coarsest mesh so we can utilize
628  // old_dof_indices information in the projection creation.
629  if (n_levels > 1 )
630  {
631 
632  // First, stash the coarse dof indices for FS purposes
633  unsigned int n_vars = system.n_vars();
634  _ctx_vec[0]->dof_vec.resize(n_vars);
635 
636  for( unsigned int v = 0; v < n_vars; v++ )
637  {
638  std::vector<numeric_index_type> di;
639  system.get_dof_map().local_variable_indices(di, system.get_mesh(), v);
640  _ctx_vec[0]->dof_vec[v] = di;
641  }
642 
643  START_LOG ("PDM_refine", "PetscDMWrapper");
644  mesh_refinement.uniformly_refine(1);
645  STOP_LOG ("PDM_refine", "PetscDMWrapper");
646 
647  START_LOG ("PDM_dist_dof", "PetscDMWrapper");
648  system.get_dof_map().distribute_dofs(mesh);
649  STOP_LOG ("PDM_dist_dof", "PetscDMWrapper");
650 
651  START_LOG ("PDM_cnstrnts", "PetscDMWrapper");
652  system.reinit_constraints();
653  STOP_LOG ("PDM_cnstrnts", "PetscDMWrapper");
654  }
655 
656  // Create the Interpolation Matrices between adjacent mesh levels
657  for ( unsigned int i = 1 ; i < n_levels ; ++i )
658  {
659  if ( i != n_levels )
660  {
661  // Stash the rest of the dof indices
662  unsigned int n_vars = system.n_vars();
663  _ctx_vec[i]->dof_vec.resize(n_vars);
664 
665  for( unsigned int v = 0; v < n_vars; v++ )
666  {
667  std::vector<numeric_index_type> di;
668  system.get_dof_map().local_variable_indices(di, system.get_mesh(), v);
669  _ctx_vec[i]->dof_vec[v] = di;
670  }
671 
672  unsigned int ndofs_c = _mesh_dof_sizes[i-1];
673  unsigned int ndofs_f = _mesh_dof_sizes[i];
674 
675  // Create the Interpolation matrix and set its pointer
676  _ctx_vec[i-1]->K_interp_ptr = _pmtx_vec[i-1].get();
677  _ctx_vec[i-1]->K_sub_interp_ptr = _subpmtx_vec[i-1].get();
678 
679  unsigned int ndofs_local = system.get_dof_map().n_dofs_on_processor(system.processor_id());
680  unsigned int ndofs_old_first = system.get_dof_map().first_old_dof(system.processor_id());
681  unsigned int ndofs_old_end = system.get_dof_map().end_old_dof(system.processor_id());
682  unsigned int ndofs_old_size = ndofs_old_end - ndofs_old_first;
683 
684  // Init and zero the matrix
685  _ctx_vec[i-1]->K_interp_ptr->init(ndofs_f, ndofs_c, ndofs_local, ndofs_old_size, 30 , 20);
686 
687  // Disable Mat destruction since PETSc destroys these for us
688  _ctx_vec[i-1]->K_interp_ptr->set_destroy_mat_on_exit(false);
689  _ctx_vec[i-1]->K_sub_interp_ptr->set_destroy_mat_on_exit(false);
690 
691  // TODO: Projection matrix sparsity pattern?
692  //MatSetOption(_ctx_vec[i-1]->K_interp_ptr->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE);
693 
694  // Compute the interpolation matrix and set K_interp_ptr
695  START_LOG ("PDM_proj_mat", "PetscDMWrapper");
696  system.projection_matrix(*_ctx_vec[i-1]->K_interp_ptr);
697  STOP_LOG ("PDM_proj_mat", "PetscDMWrapper");
698 
699  // Always close matrix that contains altered data
700  _ctx_vec[i-1]->K_interp_ptr->close();
701  }
702 
703  // Move to next grid to make next projection
704  if ( i != n_levels - 1 )
705  {
706  START_LOG ("PDM_refine", "PetscDMWrapper");
707  mesh_refinement.uniformly_refine(1);
708  STOP_LOG ("PDM_refine", "PetscDMWrapper");
709 
710  START_LOG ("PDM_dist_dof", "PetscDMWrapper");
711  system.get_dof_map().distribute_dofs(mesh);
712  STOP_LOG ("PDM_dist_dof", "PetscDMWrapper");
713 
714  START_LOG ("PDM_cnstrnts", "PetscDMWrapper");
715  system.reinit_constraints();
716  STOP_LOG ("PDM_cnstrnts", "PetscDMWrapper");
717 
718  }
719  } // End create transfer operators. System back at the finest grid
720 
721  // Lastly, give SNES the finest level DM
722  DM & dm = this->get_dm(n_levels-1);
723  ierr = SNESSetDM(snes, dm);
724  CHKERRABORT(system.comm().get(),ierr);
725 
726  STOP_LOG ("init_and_attach_petscdm()", "PetscDMWrapper");
727  }
728 
729  void PetscDMWrapper::build_section( const System & system, PetscSection & section )
730  {
731  START_LOG ("build_section()", "PetscDMWrapper");
732 
733  PetscErrorCode ierr;
734  ierr = PetscSectionCreate(system.comm().get(),&section);
735  CHKERRABORT(system.comm().get(),ierr);
736 
737  // Tell the PetscSection about all of our System variables
738  ierr = PetscSectionSetNumFields(section,system.n_vars());
739  CHKERRABORT(system.comm().get(),ierr);
740 
741  // Set the actual names of all the field variables
742  for (auto v : IntRange<unsigned int>(0, system.n_vars()))
743  {
744  ierr = PetscSectionSetFieldName( section, v, system.variable_name(v).c_str() );
745  CHKERRABORT(system.comm().get(),ierr);
746  }
747 
748  // For building the section, we need to create local-to-global map
749  // of local "point" ids to the libMesh global id of that point.
750  // A "point" in PETSc nomenclature is a geometric object that can have
751  // dofs associated with it, e.g. Node, Edge, Face, Elem.
752  // The numbering PETSc expects is continuous for the local numbering.
753  // Since we're only using this interface for solvers, then we can just
754  // assign whatever local id to any of the global ids. But it is local
755  // so we don't need to worry about processor numbering for the local
756  // point ids.
757  std::unordered_map<dof_id_type,dof_id_type> node_map;
758  std::unordered_map<dof_id_type,dof_id_type> elem_map;
759  std::map<dof_id_type,unsigned int> scalar_map;
760 
761  // First we tell the PetscSection about all of our points that have
762  // dofs associated with them.
763  this->set_point_range_in_section(system, section, node_map, elem_map, scalar_map);
764 
765  // Now we can build up the dofs per "point" in the PetscSection
766  this->add_dofs_to_section(system, section, node_map, elem_map, scalar_map);
767 
768  // Final setup of PetscSection
769  // Until Matt Knepley finishes implementing the commented out function
770  // below, the PetscSection will be assuming node-major ordering
771  // so let's throw an error if the user tries to use this without
772  // node-major order
773  if (!libMesh::on_command_line("--node-major-dofs"))
774  libmesh_error_msg("ERROR: Must use --node-major-dofs with PetscSection!");
775 
776  //else if (!system.identify_variable_groups())
777  // ierr = PetscSectionSetUseFieldOffsets(section,PETSC_TRUE);LIBMESH_CHKERR(ierr);
778  //else
779  // {
780  // std::string msg = "ERROR: Only node-major or var-major ordering supported for PetscSection!\n";
781  // msg += " var-group-major ordering not supported!\n";
782  // msg += " Must use --node-major-dofs or set System::identify_variable_groups() = false!\n";
783  // libmesh_error_msg(msg);
784  // }
785 
786  ierr = PetscSectionSetUp(section);
787  CHKERRABORT(system.comm().get(),ierr);
788 
789  // Sanity checking at least that local_n_dofs match
790  libmesh_assert_equal_to(system.n_local_dofs(),this->check_section_n_dofs(section));
791 
792  STOP_LOG ("build_section()", "PetscDMWrapper");
793  }
794 
795  void PetscDMWrapper::build_sf( const System & system, PetscSF & star_forest )
796  {
797  START_LOG ("build_sf()", "PetscDMWrapper");
798 
799  const DofMap & dof_map = system.get_dof_map();
800 
801  const std::vector<dof_id_type> & send_list = dof_map.get_send_list();
802 
803  // Number of ghost dofs that send information to this processor
804  const PetscInt n_leaves = cast_int<PetscInt>(send_list.size());
805 
806  // Number of local dofs, including ghosts dofs
807  const PetscInt n_roots = dof_map.n_local_dofs() + n_leaves;
808 
809  // This is the vector of dof indices coming from other processors
810  // We need to give this to the PetscSF
811  // We'll be extra paranoid about this ugly double cast
812  static_assert(sizeof(PetscInt) == sizeof(dof_id_type),"PetscInt is not a dof_id_type!");
813  PetscInt * local_dofs = reinterpret_cast<PetscInt *>(const_cast<dof_id_type *>(send_list.data()));
814 
815  // This is the vector of PetscSFNode's for the local_dofs.
816  // For each entry in local_dof, we have to supply the rank from which
817  // that dof stems and its local index on that rank.
818  // PETSc documentation here:
819  // http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/PetscSF/PetscSFNode.html
820  std::vector<PetscSFNode> sf_nodes(send_list.size());
821 
822  for (auto i : index_range(send_list))
823  {
824  dof_id_type incoming_dof = send_list[i];
825 
826  const processor_id_type rank = dof_map.dof_owner(incoming_dof);
827 
828  // Dofs are sorted and continuous on the processor so local index
829  // is counted up from the first dof on the processor.
830  PetscInt index = incoming_dof - dof_map.first_dof(rank);
831 
832  sf_nodes[i].rank = rank; /* Rank of owner */
833  sf_nodes[i].index = index;/* Index of dof on rank */
834  }
835 
836  PetscSFNode * remote_dofs = sf_nodes.data();
837 
838  PetscErrorCode ierr;
839  ierr = PetscSFCreate(system.comm().get(), &star_forest);
840  CHKERRABORT(system.comm().get(),ierr);
841 
842  // TODO: We should create pointers to arrays so we don't have to copy
843  // and then can use PETSC_OWN_POINTER where PETSc will take ownership
844  // and delete the memory for us. But then we'd have to use PetscMalloc.
845  ierr = PetscSFSetGraph(star_forest,
846  n_roots,
847  n_leaves,
848  local_dofs,
850  remote_dofs,
852  CHKERRABORT(system.comm().get(),ierr);
853 
854  STOP_LOG ("build_sf()", "PetscDMWrapper");
855  }
856 
858  PetscSection & section,
859  std::unordered_map<dof_id_type,dof_id_type> & node_map,
860  std::unordered_map<dof_id_type,dof_id_type> & elem_map,
861  std::map<dof_id_type,unsigned int> & scalar_map)
862  {
863  // We're expecting this to be empty coming in
864  libmesh_assert(node_map.empty());
865 
866  // We need to count up the number of active "points" on this processor.
867  // Nominally, a "point" in PETSc parlance is a geometric object that can
868  // hold DoFs, i.e node, edge, face, elem. Since we handle the mesh and are only
869  // interested in solvers, then the only thing PETSc needs is a unique *local* number
870  // for each "point" that has active DoFs; note however this local numbering
871  // we construct must be continuous.
872  //
873  // In libMesh, for most finite elements, we just associate those DoFs with the
874  // geometric nodes. So can we loop over the nodes on this processor and check
875  // if any of the fields are have active DoFs on that node.
876  // If so, then we tell PETSc about that "point". At this stage, we just need
877  // to count up how many active "points" we have and cache the local number to global id
878  // mapping.
879 
880 
881  // These will be our local counters. pstart should always be zero.
882  // pend will track our local "point" count.
883  // If we're on a processor who coarsened the mesh to have no local elements,
884  // we should make an empty PetscSection. An empty PetscSection is specified
885  // by passing [0,0) to the PetscSectionSetChart call at the end. So, if we
886  // have nothing on this processor, these are the correct values to pass to
887  // PETSc.
888  dof_id_type pstart = 0;
889  dof_id_type pend = 0;
890 
891  const MeshBase & mesh = system.get_mesh();
892 
893  const DofMap & dof_map = system.get_dof_map();
894 
895  // If we don't have any local dofs, then there's nothing to tell to the PetscSection
896  if (dof_map.n_local_dofs() > 0)
897  {
898  // Conservative estimate of space needed so we don't thrash
899  node_map.reserve(mesh.n_local_nodes());
900  elem_map.reserve(mesh.n_active_local_elem());
901 
902  // We loop over active elements and then cache the global/local node mapping to make sure
903  // we only count active nodes. For example, if we're calling this function and we're
904  // not the finest level in the Mesh tree, we don't want to include nodes of child elements
905  // that aren't active on this level.
906  for (const auto & elem : mesh.active_local_element_ptr_range())
907  {
908  for (const Node & node : elem->node_ref_range())
909  {
910  // get the global id number of local node n
911 
912  // Only register nodes with the PetscSection if they have dofs that belong to
913  // this processor. Even though we're active local elements, the dofs associated
914  // with the node may belong to a different processor. The processor who owns
915  // those dofs will register that node with the PetscSection on that processor.
916  std::vector<dof_id_type> node_dof_indices;
917  dof_map.dof_indices( &node, node_dof_indices );
918  if( !node_dof_indices.empty() && dof_map.local_index(node_dof_indices[0]) )
919  {
920 #ifndef NDEBUG
921  // We're assuming that if the first dof for this node belongs to this processor,
922  // then all of them do.
923  for( auto dof : node_dof_indices )
924  libmesh_assert(dof_map.local_index(dof));
925 #endif
926  // Cache the global/local mapping if we haven't already
927  // Then increment our local count
928  dof_id_type node_id = node.id();
929  if( node_map.count(node_id) == 0 )
930  {
931  node_map.insert(std::make_pair(node_id,pend));
932  pend++;
933  }
934  }
935  }
936 
937  // Some finite elements, e.g. Hierarchic, associate element interior DoFs with the element
938  // rather than the node (since we ought to be able to use Hierachic elements on a QUAD4,
939  // which has no interior node). Thus, we also need to check element interiors for DoFs
940  // as well and, if the finite element has them, we also need to count the Elem in our
941  // "point" accounting.
942  if( elem->n_dofs(system.number()) > 0 )
943  {
944  dof_id_type elem_id = elem->id();
945  elem_map.insert(std::make_pair(elem_id,pend));
946  pend++;
947  }
948  }
949 
950  // SCALAR dofs live on the "last" processor, so only work there if there are any
951  if( dof_map.n_SCALAR_dofs() > 0 && (system.processor_id() == (system.n_processors()-1)) )
952  {
953  // Loop through all the variables and cache the scalar ones. We cache the
954  // SCALAR variable index along with the local point to make it easier when
955  // we have to register dofs with the PetscSection
956  for (auto v : IntRange<unsigned int>(0, system.n_vars()))
957  {
958  if( system.variable(v).type().family == SCALAR )
959  {
960  scalar_map.insert(std::make_pair(pend,v));
961  pend++;
962  }
963  }
964  }
965 
966  }
967 
968  PetscErrorCode ierr = PetscSectionSetChart(section, pstart, pend);
969  CHKERRABORT(system.comm().get(),ierr);
970  }
971 
973  PetscSection & section,
974  const std::unordered_map<dof_id_type,dof_id_type> & node_map,
975  const std::unordered_map<dof_id_type,dof_id_type> & elem_map,
976  const std::map<dof_id_type,unsigned int> & scalar_map)
977  {
978  const MeshBase & mesh = system.get_mesh();
979 
980  PetscErrorCode ierr;
981 
982  // Now we go through and add dof information for each "point".
983  //
984  // In libMesh, for most finite elements, we just associate those DoFs with the
985  // geometric nodes. So can we loop over the nodes we cached in the node_map and
986  // the DoFs for each field for that node. We need to give PETSc the local id
987  // we built up in the node map.
988  for (const auto & nmap : node_map )
989  {
990  const dof_id_type global_node_id = nmap.first;
991  const dof_id_type local_node_id = nmap.second;
992 
993  libmesh_assert( mesh.query_node_ptr(global_node_id) );
994 
995  const Node & node = mesh.node_ref(global_node_id);
996 
997  this->add_dofs_helper(system,node,local_node_id,section);
998  }
999 
1000  // Some finite element types associate dofs with the element. So now we go through
1001  // any of those with the Elem as the point we add to the PetscSection with accompanying
1002  // dofs
1003  for (const auto & emap : elem_map )
1004  {
1005  const dof_id_type global_elem_id = emap.first;
1006  const dof_id_type local_elem_id = emap.second;
1007 
1008  libmesh_assert( mesh.query_elem_ptr(global_elem_id) );
1009 
1010  const Elem & elem = mesh.elem_ref(global_elem_id);
1011 
1012  this->add_dofs_helper(system,elem,local_elem_id,section);
1013  }
1014 
1015  // Now add any SCALAR dofs to the PetscSection
1016  // SCALAR dofs live on the "last" processor, so only work there if there are any
1017  if (system.processor_id() == (system.n_processors()-1))
1018  {
1019  for (const auto & smap : scalar_map )
1020  {
1021  const dof_id_type local_id = smap.first;
1022  const unsigned int scalar_var = smap.second;
1023 
1024  // The number of SCALAR dofs comes from the variable order
1025  const int n_dofs = system.variable(scalar_var).type().order.get_order();
1026 
1027  ierr = PetscSectionSetFieldDof( section, local_id, scalar_var, n_dofs );
1028  CHKERRABORT(system.comm().get(),ierr);
1029 
1030  // In the SCALAR case, there are no other variables associate with the "point"
1031  // the total number of dofs on the point is the same as that for the field
1032  ierr = PetscSectionSetDof( section, local_id, n_dofs );
1033  CHKERRABORT(system.comm().get(),ierr);
1034  }
1035  }
1036 
1037  }
1038 
1040  const DofObject & dof_object,
1041  dof_id_type local_id,
1042  PetscSection & section)
1043  {
1044  unsigned int total_n_dofs_at_dofobject = 0;
1045 
1046  // We are assuming variables are also numbered 0 to n_vars()-1
1047  for (auto v : IntRange<unsigned int>(0, system.n_vars()))
1048  {
1049  unsigned int n_dofs_at_dofobject = dof_object.n_dofs(system.number(), v);
1050 
1051  if( n_dofs_at_dofobject > 0 )
1052  {
1053  PetscErrorCode ierr = PetscSectionSetFieldDof( section,
1054  local_id,
1055  v,
1056  n_dofs_at_dofobject );
1057 
1058  CHKERRABORT(system.comm().get(),ierr);
1059 
1060  total_n_dofs_at_dofobject += n_dofs_at_dofobject;
1061  }
1062  }
1063 
1064  libmesh_assert_equal_to(total_n_dofs_at_dofobject, dof_object.n_dofs(system.number()));
1065 
1066  PetscErrorCode ierr =
1067  PetscSectionSetDof( section, local_id, total_n_dofs_at_dofobject );
1068  CHKERRABORT(system.comm().get(),ierr);
1069  }
1070 
1071 
1073  {
1074  PetscInt n_local_dofs = 0;
1075 
1076  // Grap the starting and ending points from the section
1077  PetscInt pstart, pend;
1078  PetscErrorCode ierr = PetscSectionGetChart(section, &pstart, &pend);
1079  if (ierr)
1080  libmesh_error();
1081 
1082  // Count up the n_dofs for each point from the section
1083  for( PetscInt p = pstart; p < pend; p++ )
1084  {
1085  PetscInt n_dofs;
1086  ierr = PetscSectionGetDof(section,p,&n_dofs);
1087  if (ierr)
1088  libmesh_error();
1089  n_local_dofs += n_dofs;
1090  }
1091 
1092  static_assert(sizeof(PetscInt) == sizeof(dof_id_type),"PetscInt is not a dof_id_type!");
1093  return n_local_dofs;
1094  }
1095 
1096  void PetscDMWrapper::init_dm_data(unsigned int n_levels, const Parallel::Communicator & comm)
1097  {
1098  _dms.resize(n_levels);
1099  _sections.resize(n_levels);
1100  _star_forests.resize(n_levels);
1101  _ctx_vec.resize(n_levels);
1102  _pmtx_vec.resize(n_levels);
1103  _subpmtx_vec.resize(n_levels);
1104  _vec_vec.resize(n_levels);
1105  _mesh_dof_sizes.resize(n_levels);
1106  _mesh_dof_loc_sizes.resize(n_levels);
1107 
1108  for( unsigned int i = 0; i < n_levels; i++ )
1109  {
1110  _dms[i] = libmesh_make_unique<DM>();
1111  _sections[i] = libmesh_make_unique<PetscSection>();
1112  _star_forests[i] = libmesh_make_unique<PetscSF>();
1113  _ctx_vec[i] = libmesh_make_unique<PetscDMContext>();
1114  _pmtx_vec[i]= libmesh_make_unique<PetscMatrix<Number>>(comm);
1115  _subpmtx_vec[i]= libmesh_make_unique<PetscMatrix<Number>>(comm);
1116  _vec_vec[i] = libmesh_make_unique<PetscVector<Number>>(comm);
1117  }
1118  }
1119 
1120 } // end namespace libMesh
1121 
1122 #endif // #if LIBMESH_ENABLE_AMR && LIBMESH_HAVE_METAPHYSICL
1123 #endif // PETSC_VERSION
1124 #endif // LIBMESH_HAVE_PETSC
libMesh::System
Manages consistently variables, degrees of freedom, and coefficient vectors.
Definition: system.h:100
libMesh::PetscDMContext::subfields
std::vector< PetscInt > subfields
Stores subfield ids for use in subprojection matrixes on coarser DMs.
Definition: petsc_dm_wrapper.h:69
libMesh::dof_id_type
uint8_t dof_id_type
Definition: id_types.h:67
libMesh::PetscDMWrapper::set_point_range_in_section
void set_point_range_in_section(const System &system, PetscSection &section, std::unordered_map< dof_id_type, dof_id_type > &node_map, std::unordered_map< dof_id_type, dof_id_type > &elem_map, std::map< dof_id_type, unsigned int > &scalar_map)
Helper function for build_section.
Definition: petsc_dm_wrapper.C:857
libMesh::System::n_vars
unsigned int n_vars() const
Definition: system.h:2155
libMesh::PetscDMWrapper::get_section
PetscSection & get_section(unsigned int level)
Get reference to PetscSection for the given mesh level.
Definition: petsc_dm_wrapper.h:152
libMesh::DofObject::n_dofs
unsigned int n_dofs(const unsigned int s, const unsigned int var=libMesh::invalid_uint) const
Definition: dof_object.h:745
libMesh::OrderWrapper::get_order
int get_order() const
Explicitly request the order as an int.
Definition: fe_type.h:77
libMesh::PetscDMWrapper::_ctx_vec
std::vector< std::unique_ptr< PetscDMContext > > _ctx_vec
Vector of internal PetscDM context structs for all grid levels.
Definition: petsc_dm_wrapper.h:126
libMesh::PetscDMWrapper::get_dm
DM & get_dm(unsigned int level)
Get reference to DM for the given mesh level.
Definition: petsc_dm_wrapper.h:144
libMesh::command_line_next
T command_line_next(std::string name, T default_value)
Use GetPot's search()/next() functions to get following arguments from the command line.
Definition: libmesh.C:963
libMesh::p_ctx
PetscDMContext * p_ctx
Definition: petsc_dm_wrapper.C:75
libMesh::FEType::family
FEFamily family
The type of finite element.
Definition: fe_type.h:203
libMesh::DofMap::dof_indices
void dof_indices(const Elem *const elem, std::vector< dof_id_type > &di) const
Fills the vector di with the global degree of freedom indices for the element.
Definition: dof_map.C:1967
n_vars
unsigned int n_vars
Definition: adaptivity_ex3.C:116
libMesh::PetscDMWrapper::add_dofs_helper
void add_dofs_helper(const System &system, const DofObject &dof_object, dof_id_type local_id, PetscSection &section)
Helper function to reduce code duplication when setting dofs in section.
Definition: petsc_dm_wrapper.C:1039
libMesh::DofMap::n_local_dofs
dof_id_type n_local_dofs() const
Definition: dof_map.h:635
libMesh::DofMap::n_dofs
dof_id_type n_dofs() const
Definition: dof_map.h:625
libMesh::System::variable_name
const std::string & variable_name(const unsigned int i) const
Definition: system.h:2203
libMesh::index_range
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:106
libMesh
The libMesh namespace provides an interface to certain functionality in the library.
Definition: factoryfunction.C:55
libMesh::PetscMatrix::mat
Mat mat()
Definition: petsc_matrix.h:281
libMesh::PetscDMWrapper::build_section
void build_section(const System &system, PetscSection &section)
Takes System, empty PetscSection and populates the PetscSection.
Definition: petsc_dm_wrapper.C:729
libMesh::PetscDMWrapper::_dms
std::vector< std::unique_ptr< DM > > _dms
Vector of DMs for all grid levels.
Definition: petsc_dm_wrapper.h:111
libMesh::ParallelObject::comm
const Parallel::Communicator & comm() const
Definition: parallel_object.h:94
libMesh::System::number
unsigned int number() const
Definition: system.h:2075
libMesh::Variable::type
const FEType & type() const
Definition: variable.h:119
mesh
MeshBase & mesh
Definition: mesh_communication.C:1257
libMesh::DofMap::end_old_dof
dof_id_type end_old_dof(const processor_id_type proc) const
Definition: dof_map.h:715
libMesh::DofMap::first_dof
dof_id_type first_dof(const processor_id_type proc) const
Definition: dof_map.h:650
libMesh::System::variable
const Variable & variable(unsigned int var) const
Return a constant reference to Variable var.
Definition: system.h:2183
libMesh::libmesh_petsc_DMCreateRestriction
PetscErrorCode libmesh_petsc_DMCreateRestriction(DM dmc, DM dmf, Mat *mat)
Function to give PETSc that sets the Restriction Matrix between two DMs.
Definition: petsc_dm_wrapper.C:397
libMesh::ierr
ierr
Definition: petsc_dm_wrapper.C:72
libMesh::PetscDMWrapper::add_dofs_to_section
void add_dofs_to_section(const System &system, PetscSection &section, const std::unordered_map< dof_id_type, dof_id_type > &node_map, const std::unordered_map< dof_id_type, dof_id_type > &elem_map, const std::map< dof_id_type, unsigned int > &scalar_map)
Helper function for build_section.
Definition: petsc_dm_wrapper.C:972
libMesh::PetscDMContext
Struct to house data regarding where in the mesh hierarchy we are located.
Definition: petsc_dm_wrapper.h:53
libMesh::PetscDMContext::dof_vec
std::vector< std::vector< numeric_index_type > > dof_vec
Stores local dofs for each var for use in subprojection matrixes.
Definition: petsc_dm_wrapper.h:66
libMesh::PetscDMWrapper::_mesh_dof_loc_sizes
std::vector< unsigned int > _mesh_dof_loc_sizes
Stores n_local_dofs for each grid level, to be used for projection vector sizing.
Definition: petsc_dm_wrapper.h:135
libMesh::MeshTools::n_levels
unsigned int n_levels(const MeshBase &mesh)
Definition: mesh_tools.C:656
libMesh::DofObject
The DofObject defines an abstract base class for objects that have degrees of freedom associated with...
Definition: dof_object.h:53
libMesh::PetscDMContext::global_dm
DM * global_dm
Definition: petsc_dm_wrapper.h:59
libMesh::PetscDMWrapper::_subpmtx_vec
std::vector< std::unique_ptr< PetscMatrix< Number > > > _subpmtx_vec
Vector of sub projection matrixes for all grid levels for fieldsplit.
Definition: petsc_dm_wrapper.h:123
libMesh::numFields
PetscErrorCode PetscInt numFields
Definition: petsc_dm_wrapper.C:60
libMesh::MeshRefinement
Implements (adaptive) mesh refinement algorithms for a MeshBase.
Definition: mesh_refinement.h:61
libMesh::libmesh_assert
libmesh_assert(ctx)
libMesh::IntRange
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
libMesh::PetscDMWrapper::_sections
std::vector< std::unique_ptr< PetscSection > > _sections
Vector of PETScSections for all grid levels.
Definition: petsc_dm_wrapper.h:114
libMesh::DofMap::distribute_dofs
void distribute_dofs(MeshBase &)
Distribute dofs on the current mesh.
Definition: dof_map.C:910
libMesh::MeshBase
This is the MeshBase class.
Definition: mesh_base.h:78
libMesh::System::reinit_constraints
virtual void reinit_constraints()
Reinitializes the constraints for this system.
Definition: system.C:397
libMesh::DofMap::local_variable_indices
void local_variable_indices(std::vector< dof_id_type > &idx, const MeshBase &mesh, unsigned int var_num) const
Fills an array of those dof indices which belong to the given variable number and live on the current...
Definition: dof_map.C:1084
libMesh::ParallelObject::n_processors
processor_id_type n_processors() const
Definition: parallel_object.h:100
libMesh::System::n_local_dofs
dof_id_type n_local_dofs() const
Definition: system.C:187
libMesh::libmesh_petsc_DMCreateSubDM
PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm, PetscInt numFields, PetscInt fields[], IS *is, DM *subdm) PetscErrorCode libmesh_petsc_DMCreateSubDM(DM dm
Help PETSc create a subDM given a global dm when using fieldsplit.
libMesh::PetscDMContext::K_sub_interp_ptr
PetscMatrix< libMesh::Number > * K_sub_interp_ptr
Definition: petsc_dm_wrapper.h:61
libMesh::libmesh_petsc_DMCreateInterpolation
PetscErrorCode libmesh_petsc_DMCreateInterpolation(DM dmc, DM dmf, Mat *mat, Vec *vec)
Function to give PETSc that sets the Interpolation Matrix between two DMs.
Definition: petsc_dm_wrapper.C:307
libMesh::PetscDMContext::K_restrict_ptr
PetscMatrix< libMesh::Number > * K_restrict_ptr
Definition: petsc_dm_wrapper.h:62
libMesh::System::get_mesh
const MeshBase & get_mesh() const
Definition: system.h:2083
libMesh::ParallelObject::processor_id
processor_id_type processor_id() const
Definition: parallel_object.h:106
libMesh::libmesh_petsc_DMRefine
PetscErrorCode libmesh_petsc_DMRefine(DM dmc, MPI_Comm, DM *dmf)
Help PETSc identify the finer DM given a dmc.
Definition: petsc_dm_wrapper.C:197
libMesh::PetscDMWrapper::_star_forests
std::vector< std::unique_ptr< PetscSF > > _star_forests
Vector of star forests for all grid levels.
Definition: petsc_dm_wrapper.h:117
libMesh::processor_id_type
uint8_t processor_id_type
Definition: id_types.h:104
libMesh::PetscDMWrapper::_mesh_dof_sizes
std::vector< unsigned int > _mesh_dof_sizes
Stores n_dofs for each grid level, to be used for projection matrix sizing.
Definition: petsc_dm_wrapper.h:132
libMesh::Node
A Node is like a Point, but with more information.
Definition: node.h:52
libMesh::PetscDMWrapper::~PetscDMWrapper
~PetscDMWrapper()
Definition: petsc_dm_wrapper.C:425
libMesh::PetscDMWrapper::build_sf
void build_sf(const System &system, PetscSF &star_forest)
Takes System, empty PetscSF and populates the PetscSF.
Definition: petsc_dm_wrapper.C:795
libMesh::PetscDMContext::mesh_dim
int mesh_dim
Definition: petsc_dm_wrapper.h:56
libMesh::PetscDMContext::K_interp_ptr
PetscMatrix< libMesh::Number > * K_interp_ptr
Definition: petsc_dm_wrapper.h:60
libMesh::PetscDMWrapper::init_dm_data
void init_dm_data(unsigned int n_levels, const Parallel::Communicator &comm)
Init all the n_mesh_level dependent data structures.
Definition: petsc_dm_wrapper.C:1096
libMesh::DofMap::get_send_list
const std::vector< dof_id_type > & get_send_list() const
Definition: dof_map.h:496
PETSC_COPY_VALUES
Definition: petsc_macro.h:103
libMesh::is
PetscErrorCode PetscInt const PetscInt IS * is
Definition: petsc_dm_wrapper.C:60
libMesh::fields
PetscErrorCode PetscInt const PetscInt fields[]
Definition: petsc_dm_wrapper.C:60
libMesh::PetscDMWrapper::init_and_attach_petscdm
void init_and_attach_petscdm(System &system, SNES &snes)
Definition: petsc_dm_wrapper.C:445
libMesh::DofMap::n_dofs_on_processor
dof_id_type n_dofs_on_processor(const processor_id_type proc) const
Definition: dof_map.h:641
libMesh::PetscDMWrapper::clear
void clear()
Destroys and clears all build DM-related data.
Definition: petsc_dm_wrapper.C:430
libMesh::DofMap::local_index
bool local_index(dof_id_type dof_index) const
Definition: dof_map.h:815
libMesh::PetscDMWrapper::_pmtx_vec
std::vector< std::unique_ptr< PetscMatrix< Number > > > _pmtx_vec
Vector of projection matrixes for all grid levels.
Definition: petsc_dm_wrapper.h:120
libMesh::ReferenceElem::get
const Elem & get(const ElemType type_in)
Definition: reference_elem.C:237
libMesh::FEType::order
OrderWrapper order
The approximation order of the element.
Definition: fe_type.h:197
libMesh::DofMap
This class handles the numbering of degrees of freedom on a mesh.
Definition: dof_map.h:176
libMesh::System::projection_matrix
void projection_matrix(SparseMatrix< Number > &proj_mat) const
This method creates a projection matrix which corresponds to the operation of project_vector between ...
Definition: system_projection.C:870
libMesh::libmesh_petsc_DMCoarsen
PetscErrorCode libmesh_petsc_DMCoarsen(DM dmf, MPI_Comm, DM *dmc)
Help PETSc identify the coarser DM dmc given the fine DM dmf.
Definition: petsc_dm_wrapper.C:219
libMesh::PetscDMWrapper::check_section_n_dofs
dof_id_type check_section_n_dofs(PetscSection &section)
Helper function to sanity check PetscSection construction.
Definition: petsc_dm_wrapper.C:1072
libMesh::ctx
void * ctx
Definition: petsc_dm_wrapper.C:71
libMesh::Elem
This is the base class from which all geometric element types are derived.
Definition: elem.h:100
libMesh::on_command_line
bool on_command_line(std::string arg)
Definition: libmesh.C:898
libMesh::SparseMatrix::create_submatrix
virtual void create_submatrix(SparseMatrix< T > &submatrix, const std::vector< numeric_index_type > &rows, const std::vector< numeric_index_type > &cols) const
This function creates a matrix called "submatrix" which is defined by the row and column indices give...
Definition: sparse_matrix.h:358
libMesh::System::get_dof_map
const DofMap & get_dof_map() const
Definition: system.h:2099
libMesh::PetscDMWrapper::get_star_forest
PetscSF & get_star_forest(unsigned int level)
Get reference to PetscSF for the given mesh level.
Definition: petsc_dm_wrapper.h:160
libMesh::SCALAR
Definition: enum_fe_family.h:58
libMesh::DofMap::n_SCALAR_dofs
dof_id_type n_SCALAR_dofs() const
Definition: dof_map.h:630
libMesh::CHKERRQ
CHKERRQ(ierr)
libMesh::PetscDMContext::finer_dm
DM * finer_dm
Definition: petsc_dm_wrapper.h:58
libMesh::PetscDMWrapper::_vec_vec
std::vector< std::unique_ptr< PetscVector< Number > > > _vec_vec
Vector of solution vectors for all grid levels.
Definition: petsc_dm_wrapper.h:129
libMesh::DofMap::dof_owner
processor_id_type dof_owner(const dof_id_type dof) const
Definition: dof_map.h:701
libMesh::MeshRefinement::uniformly_refine
void uniformly_refine(unsigned int n=1)
Uniformly refines the mesh n times.
Definition: mesh_refinement.C:1678
libMesh::DofMap::first_old_dof
dof_id_type first_old_dof(const processor_id_type proc) const
Definition: dof_map.h:660
libMesh::MeshRefinement::uniformly_coarsen
void uniformly_coarsen(unsigned int n=1)
Attempts to uniformly coarsen the mesh n times.
Definition: mesh_refinement.C:1703
libMesh::PetscDMContext::coarser_dm
DM * coarser_dm
Definition: petsc_dm_wrapper.h:57
libMesh::subdm
PetscErrorCode PetscInt const PetscInt IS DM * subdm
Definition: petsc_dm_wrapper.C:62