Loading [MathJax]/extensions/tex2jax.js
https://mooseframework.inl.gov
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends
NonlinearSystemBase.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
10 #include "NonlinearSystemBase.h"
11 #include "AuxiliarySystem.h"
12 #include "Problem.h"
13 #include "FEProblem.h"
14 #include "MooseVariableFE.h"
15 #include "MooseVariableScalar.h"
16 #include "PetscSupport.h"
17 #include "Factory.h"
18 #include "ParallelUniqueId.h"
19 #include "ThreadedElementLoop.h"
20 #include "MaterialData.h"
21 #include "ComputeResidualThread.h"
23 #include "ComputeFVFluxThread.h"
24 #include "ComputeJacobianThread.h"
28 #include "ComputeDiracThread.h"
35 #include "TimeKernel.h"
36 #include "BoundaryCondition.h"
37 #include "DirichletBCBase.h"
38 #include "NodalBCBase.h"
39 #include "IntegratedBCBase.h"
40 #include "DGKernel.h"
41 #include "InterfaceKernelBase.h"
42 #include "ElementDamper.h"
43 #include "NodalDamper.h"
44 #include "GeneralDamper.h"
45 #include "DisplacedProblem.h"
46 #include "NearestNodeLocator.h"
47 #include "PenetrationLocator.h"
48 #include "NodalConstraint.h"
49 #include "NodeFaceConstraint.h"
50 #include "NodeElemConstraint.h"
51 #include "MortarConstraint.h"
52 #include "ElemElemConstraint.h"
53 #include "ScalarKernelBase.h"
54 #include "Parser.h"
55 #include "Split.h"
57 #include "MooseMesh.h"
58 #include "MooseUtils.h"
59 #include "MooseApp.h"
60 #include "NodalKernelBase.h"
61 #include "DiracKernelBase.h"
62 #include "TimeIntegrator.h"
63 #include "Predictor.h"
64 #include "Assembly.h"
65 #include "ElementPairLocator.h"
66 #include "ODETimeKernel.h"
69 #include "ADKernel.h"
70 #include "ADDirichletBCBase.h"
71 #include "Moose.h"
72 #include "ConsoleStream.h"
73 #include "MooseError.h"
74 #include "FVElementalKernel.h"
77 #include "FVFluxKernel.h"
79 #include "UserObject.h"
81 #include "HDGKernel.h"
82 #include "HDGIntegratedBC.h"
83 
84 // libMesh
85 #include "libmesh/nonlinear_solver.h"
86 #include "libmesh/quadrature_gauss.h"
87 #include "libmesh/dense_vector.h"
88 #include "libmesh/boundary_info.h"
89 #include "libmesh/petsc_matrix.h"
90 #include "libmesh/petsc_vector.h"
91 #include "libmesh/petsc_nonlinear_solver.h"
92 #include "libmesh/numeric_vector.h"
93 #include "libmesh/mesh.h"
94 #include "libmesh/dense_subvector.h"
95 #include "libmesh/dense_submatrix.h"
96 #include "libmesh/dof_map.h"
97 #include "libmesh/sparse_matrix.h"
98 #include "libmesh/petsc_matrix.h"
99 #include "libmesh/default_coupling.h"
100 #include "libmesh/diagonal_matrix.h"
101 #include "libmesh/fe_interface.h"
102 #include "libmesh/petsc_solver_exception.h"
103 
104 #include <ios>
105 
106 #include "petscsnes.h"
107 #include <PetscDMMoose.h>
108 EXTERN_C_BEGIN
109 extern PetscErrorCode DMCreate_Moose(DM);
110 EXTERN_C_END
111 
112 using namespace libMesh;
113 
115  System & sys,
116  const std::string & name)
117  : SolverSystem(fe_problem, fe_problem, name, Moose::VAR_SOLVER),
118  PerfGraphInterface(fe_problem.getMooseApp().perfGraph(), "NonlinearSystemBase"),
119  _sys(sys),
120  _last_nl_rnorm(0.),
121  _current_nl_its(0),
122  _residual_ghosted(NULL),
123  _Re_time_tag(-1),
124  _Re_time(NULL),
125  _Re_non_time_tag(-1),
126  _Re_non_time(NULL),
127  _scalar_kernels(/*threaded=*/false),
128  _nodal_bcs(/*threaded=*/false),
129  _preset_nodal_bcs(/*threaded=*/false),
130  _ad_preset_nodal_bcs(/*threaded=*/false),
131  _splits(/*threaded=*/false),
132  _increment_vec(NULL),
133  _use_finite_differenced_preconditioner(false),
134  _fdcoloring(nullptr),
135  _have_decomposition(false),
136  _use_field_split_preconditioner(false),
137  _add_implicit_geometric_coupling_entries_to_jacobian(false),
138  _assemble_constraints_separately(false),
139  _need_residual_ghosted(false),
140  _debugging_residuals(false),
141  _doing_dg(false),
142  _n_iters(0),
143  _n_linear_iters(0),
144  _n_residual_evaluations(0),
145  _final_residual(0.),
146  _computing_pre_smo_residual(false),
147  _pre_smo_residual(0),
148  _initial_residual(0),
149  _use_pre_smo_residual(false),
150  _print_all_var_norms(false),
151  _has_save_in(false),
152  _has_diag_save_in(false),
153  _has_nodalbc_save_in(false),
154  _has_nodalbc_diag_save_in(false),
155  _computed_scaling(false),
156  _compute_scaling_once(true),
157  _resid_vs_jac_scaling_param(0),
158  _off_diagonals_in_auto_scaling(false),
159  _auto_scaling_initd(false)
160 {
162  // Don't need to add the matrix - it already exists (for now)
164 
165  // The time matrix tag is not normally used - but must be added to the system
166  // in case it is so that objects can have 'time' in their matrix tags by default
167  _fe_problem.addMatrixTag("TIME");
168 
169  _Re_tag = _fe_problem.addVectorTag("RESIDUAL");
170 
172 
174  {
175  auto & dof_map = _sys.get_dof_map();
176  dof_map.remove_algebraic_ghosting_functor(dof_map.default_algebraic_ghosting());
177  dof_map.set_implicit_neighbor_dofs(false);
178  }
179 }
180 
182 
183 void
185 {
187 
188  if (_fe_problem.hasDampers())
189  setupDampers();
190 
191  if (_residual_copy.get())
192  _residual_copy->init(_sys.n_dofs(), false, SERIAL);
193 }
194 
195 void
197 {
199  nonlinearSolver()->jacobian = NULL;
200 }
201 
202 void
204 {
205  TIME_SECTION("nlInitialSetup", 2, "Setting Up Nonlinear System");
206 
208 
209  {
210  TIME_SECTION("kernelsInitialSetup", 2, "Setting Up Kernels/BCs/Constraints");
211 
212  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
213  {
214  _kernels.initialSetup(tid);
217  if (_doing_dg)
220 
224 
225  if (_fe_problem.haveFV())
226  {
227  std::vector<FVElementalKernel *> fv_elemental_kernels;
229  .query()
230  .template condition<AttribSystem>("FVElementalKernel")
231  .template condition<AttribThread>(tid)
232  .queryInto(fv_elemental_kernels);
233 
234  for (auto * fv_kernel : fv_elemental_kernels)
235  fv_kernel->initialSetup();
236 
237  std::vector<FVFluxKernel *> fv_flux_kernels;
239  .query()
240  .template condition<AttribSystem>("FVFluxKernel")
241  .template condition<AttribThread>(tid)
242  .queryInto(fv_flux_kernels);
243 
244  for (auto * fv_kernel : fv_flux_kernels)
245  fv_kernel->initialSetup();
246  }
247  }
248 
253  }
254 
255  {
256  TIME_SECTION("mortarSetup", 2, "Initializing Mortar Interfaces");
257 
258  auto create_mortar_functors = [this](const bool displaced)
259  {
260  // go over mortar interfaces and construct functors
261  const auto & mortar_interfaces = _fe_problem.getMortarInterfaces(displaced);
262  for (const auto & mortar_interface : mortar_interfaces)
263  {
264  const auto primary_secondary_boundary_pair = mortar_interface.first;
265  if (!_constraints.hasActiveMortarConstraints(primary_secondary_boundary_pair, displaced))
266  continue;
267 
268  const auto & mortar_generation_object = mortar_interface.second;
269 
270  auto & mortar_constraints =
271  _constraints.getActiveMortarConstraints(primary_secondary_boundary_pair, displaced);
272 
273  auto & subproblem = displaced
274  ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
275  : static_cast<SubProblem &>(_fe_problem);
276 
277  auto & mortar_functors =
279 
280  mortar_functors.emplace(primary_secondary_boundary_pair,
281  ComputeMortarFunctor(mortar_constraints,
282  mortar_generation_object,
283  subproblem,
284  _fe_problem,
285  displaced,
286  subproblem.assembly(0, number())));
287  }
288  };
289 
290  create_mortar_functors(false);
291  create_mortar_functors(true);
292  }
293 
294  if (_automatic_scaling)
295  {
297  _scaling_matrix = std::make_unique<OffDiagonalScalingMatrix<Number>>(_communicator);
298  else
299  _scaling_matrix = std::make_unique<DiagonalMatrix<Number>>(_communicator);
300  }
301 }
302 
303 void
305 {
307 
308  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
309  {
310  _kernels.timestepSetup(tid);
313  if (_doing_dg)
319 
320  if (_fe_problem.haveFV())
321  {
322  std::vector<FVFluxBC *> bcs;
324  .query()
325  .template condition<AttribSystem>("FVFluxBC")
326  .template condition<AttribThread>(tid)
327  .queryInto(bcs);
328 
329  std::vector<FVInterfaceKernel *> iks;
331  .query()
332  .template condition<AttribSystem>("FVInterfaceKernel")
333  .template condition<AttribThread>(tid)
334  .queryInto(iks);
335 
336  std::vector<FVFluxKernel *> kernels;
338  .query()
339  .template condition<AttribSystem>("FVFluxKernel")
340  .template condition<AttribThread>(tid)
341  .queryInto(kernels);
342 
343  for (auto * bc : bcs)
344  bc->timestepSetup();
345  for (auto * ik : iks)
346  ik->timestepSetup();
347  for (auto * kernel : kernels)
348  kernel->timestepSetup();
349  }
350  }
355 }
356 
357 void
359 {
360  SolverSystem::customSetup(exec_type);
361 
362  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
363  {
364  _kernels.customSetup(exec_type, tid);
365  _nodal_kernels.customSetup(exec_type, tid);
366  _dirac_kernels.customSetup(exec_type, tid);
367  if (_doing_dg)
368  _dg_kernels.customSetup(exec_type, tid);
369  _interface_kernels.customSetup(exec_type, tid);
370  _element_dampers.customSetup(exec_type, tid);
371  _nodal_dampers.customSetup(exec_type, tid);
372  _integrated_bcs.customSetup(exec_type, tid);
373 
374  if (_fe_problem.haveFV())
375  {
376  std::vector<FVFluxBC *> bcs;
378  .query()
379  .template condition<AttribSystem>("FVFluxBC")
380  .template condition<AttribThread>(tid)
381  .queryInto(bcs);
382 
383  std::vector<FVInterfaceKernel *> iks;
385  .query()
386  .template condition<AttribSystem>("FVInterfaceKernel")
387  .template condition<AttribThread>(tid)
388  .queryInto(iks);
389 
390  std::vector<FVFluxKernel *> kernels;
392  .query()
393  .template condition<AttribSystem>("FVFluxKernel")
394  .template condition<AttribThread>(tid)
395  .queryInto(kernels);
396 
397  for (auto * bc : bcs)
398  bc->customSetup(exec_type);
399  for (auto * ik : iks)
400  ik->customSetup(exec_type);
401  for (auto * kernel : kernels)
402  kernel->customSetup(exec_type);
403  }
404  }
405  _scalar_kernels.customSetup(exec_type);
406  _constraints.customSetup(exec_type);
407  _general_dampers.customSetup(exec_type);
408  _nodal_bcs.customSetup(exec_type);
409 }
410 
411 void
413 {
416 }
417 
418 void
419 NonlinearSystemBase::setDecomposition(const std::vector<std::string> & splits)
420 {
422  if (splits.size() && splits.size() != 1)
423  mooseError("Only a single top-level split is allowed in a Problem's decomposition.");
424 
425  if (splits.size())
426  {
427  _decomposition_split = splits[0];
428  _have_decomposition = true;
429  }
430  else
431  _have_decomposition = false;
432 }
433 
434 void
436 {
437  if (!_have_decomposition)
438  return;
439 
440  std::shared_ptr<Split> top_split = getSplit(_decomposition_split);
441  top_split->setup(*this);
442 }
443 
444 void
445 NonlinearSystemBase::addKernel(const std::string & kernel_name,
446  const std::string & name,
447  InputParameters & parameters)
448 {
449  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
450  {
451  // Create the kernel object via the factory and add to warehouse
452  std::shared_ptr<KernelBase> kernel =
453  _factory.create<KernelBase>(kernel_name, name, parameters, tid);
454  _kernels.addObject(kernel, tid);
455  postAddResidualObject(*kernel);
456  }
457 
458  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
459  _has_save_in = true;
460  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
461  _has_diag_save_in = true;
462 }
463 
464 void
465 NonlinearSystemBase::addHDGKernel(const std::string & kernel_name,
466  const std::string & name,
467  InputParameters & parameters)
468 {
469  // The hybridized objects require that the residual and Jacobian be computed together
471 
472  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
473  {
474  parameters.set<MooseObjectWarehouse<HDGIntegratedBC> *>("hibc_warehouse") = &_hybridized_ibcs;
475  // Create the kernel object via the factory and add to warehouse
476  auto kernel = _factory.create<HDGKernel>(kernel_name, name, parameters, tid);
477  _kernels.addObject(kernel, tid);
478  _hybridized_kernels.addObject(kernel, tid);
479  postAddResidualObject(*kernel);
480  }
481 }
482 
483 void
484 NonlinearSystemBase::addHDGIntegratedBC(const std::string & bc_name,
485  const std::string & name,
486  InputParameters & parameters)
487 {
488  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
489  {
490  // Create the bc object via the factory and add to warehouse
491  auto bc = _factory.create<HDGIntegratedBC>(bc_name, name, parameters, tid);
492  _hybridized_ibcs.addObject(bc, tid);
493  }
494 }
495 
496 void
497 NonlinearSystemBase::addNodalKernel(const std::string & kernel_name,
498  const std::string & name,
499  InputParameters & parameters)
500 {
501  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
502  {
503  // Create the kernel object via the factory and add to the warehouse
504  std::shared_ptr<NodalKernelBase> kernel =
505  _factory.create<NodalKernelBase>(kernel_name, name, parameters, tid);
506  _nodal_kernels.addObject(kernel, tid);
507  postAddResidualObject(*kernel);
508  }
509 
510  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
511  _has_save_in = true;
512  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
513  _has_diag_save_in = true;
514 }
515 
516 void
517 NonlinearSystemBase::addScalarKernel(const std::string & kernel_name,
518  const std::string & name,
519  InputParameters & parameters)
520 {
521  std::shared_ptr<ScalarKernelBase> kernel =
522  _factory.create<ScalarKernelBase>(kernel_name, name, parameters);
523  postAddResidualObject(*kernel);
524  _scalar_kernels.addObject(kernel);
525 }
526 
527 void
528 NonlinearSystemBase::addBoundaryCondition(const std::string & bc_name,
529  const std::string & name,
530  InputParameters & parameters)
531 {
532  // ThreadID
533  THREAD_ID tid = 0;
534 
535  // Create the object
536  std::shared_ptr<BoundaryCondition> bc =
537  _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
539 
540  // Active BoundaryIDs for the object
541  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
542  auto bc_var = dynamic_cast<const MooseVariableFieldBase *>(&bc->variable());
543  _vars[tid].addBoundaryVar(boundary_ids, bc_var);
544 
545  // Cast to the various types of BCs
546  std::shared_ptr<NodalBCBase> nbc = std::dynamic_pointer_cast<NodalBCBase>(bc);
547  std::shared_ptr<IntegratedBCBase> ibc = std::dynamic_pointer_cast<IntegratedBCBase>(bc);
548 
549  // NodalBCBase
550  if (nbc)
551  {
552  if (nbc->checkNodalVar() && !nbc->variable().isNodal())
553  mooseError("Trying to use nodal boundary condition '",
554  nbc->name(),
555  "' on a non-nodal variable '",
556  nbc->variable().name(),
557  "'.");
558 
559  _nodal_bcs.addObject(nbc);
560  _vars[tid].addBoundaryVars(boundary_ids, nbc->getCoupledVars());
561 
562  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
563  _has_nodalbc_save_in = true;
564  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
566 
567  // DirichletBCs that are preset
568  std::shared_ptr<DirichletBCBase> dbc = std::dynamic_pointer_cast<DirichletBCBase>(bc);
569  if (dbc && dbc->preset())
571 
572  std::shared_ptr<ADDirichletBCBase> addbc = std::dynamic_pointer_cast<ADDirichletBCBase>(bc);
573  if (addbc && addbc->preset())
575  }
576 
577  // IntegratedBCBase
578  else if (ibc)
579  {
580  _integrated_bcs.addObject(ibc, tid);
581  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
582 
583  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
584  _has_save_in = true;
585  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
586  _has_diag_save_in = true;
587 
588  for (tid = 1; tid < libMesh::n_threads(); tid++)
589  {
590  // Create the object
591  bc = _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
592 
593  // Give users opportunity to set some parameters
595 
596  // Active BoundaryIDs for the object
597  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
598  _vars[tid].addBoundaryVar(boundary_ids, bc_var);
599 
600  ibc = std::static_pointer_cast<IntegratedBCBase>(bc);
601 
602  _integrated_bcs.addObject(ibc, tid);
603  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
604  }
605  }
606 
607  else
608  mooseError("Unknown BoundaryCondition type for object named ", bc->name());
609 }
610 
611 void
612 NonlinearSystemBase::addConstraint(const std::string & c_name,
613  const std::string & name,
614  InputParameters & parameters)
615 {
616  std::shared_ptr<Constraint> constraint = _factory.create<Constraint>(c_name, name, parameters);
617  _constraints.addObject(constraint);
618  postAddResidualObject(*constraint);
619 
620  if (constraint && constraint->addCouplingEntriesToJacobian())
622 }
623 
624 void
625 NonlinearSystemBase::addDiracKernel(const std::string & kernel_name,
626  const std::string & name,
627  InputParameters & parameters)
628 {
629  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
630  {
631  std::shared_ptr<DiracKernelBase> kernel =
632  _factory.create<DiracKernelBase>(kernel_name, name, parameters, tid);
633  postAddResidualObject(*kernel);
634  _dirac_kernels.addObject(kernel, tid);
635  }
636 }
637 
638 void
639 NonlinearSystemBase::addDGKernel(std::string dg_kernel_name,
640  const std::string & name,
641  InputParameters & parameters)
642 {
643  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
644  {
645  auto dg_kernel = _factory.create<DGKernelBase>(dg_kernel_name, name, parameters, tid);
646  _dg_kernels.addObject(dg_kernel, tid);
647  postAddResidualObject(*dg_kernel);
648  }
649 
650  _doing_dg = true;
651 
652  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
653  _has_save_in = true;
654  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
655  _has_diag_save_in = true;
656 }
657 
658 void
659 NonlinearSystemBase::addInterfaceKernel(std::string interface_kernel_name,
660  const std::string & name,
661  InputParameters & parameters)
662 {
663  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
664  {
665  std::shared_ptr<InterfaceKernelBase> interface_kernel =
666  _factory.create<InterfaceKernelBase>(interface_kernel_name, name, parameters, tid);
667  postAddResidualObject(*interface_kernel);
668 
669  const std::set<BoundaryID> & boundary_ids = interface_kernel->boundaryIDs();
670  auto ik_var = dynamic_cast<const MooseVariableFieldBase *>(&interface_kernel->variable());
671  _vars[tid].addBoundaryVar(boundary_ids, ik_var);
672 
673  _interface_kernels.addObject(interface_kernel, tid);
674  _vars[tid].addBoundaryVars(boundary_ids, interface_kernel->getCoupledVars());
675  }
676 }
677 
678 void
679 NonlinearSystemBase::addDamper(const std::string & damper_name,
680  const std::string & name,
681  InputParameters & parameters)
682 {
683  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
684  {
685  std::shared_ptr<Damper> damper = _factory.create<Damper>(damper_name, name, parameters, tid);
686 
687  // Attempt to cast to the damper types
688  std::shared_ptr<ElementDamper> ed = std::dynamic_pointer_cast<ElementDamper>(damper);
689  std::shared_ptr<NodalDamper> nd = std::dynamic_pointer_cast<NodalDamper>(damper);
690  std::shared_ptr<GeneralDamper> gd = std::dynamic_pointer_cast<GeneralDamper>(damper);
691 
692  if (gd)
693  {
695  break; // not threaded
696  }
697  else if (ed)
698  _element_dampers.addObject(ed, tid);
699  else if (nd)
700  _nodal_dampers.addObject(nd, tid);
701  else
702  mooseError("Invalid damper type");
703  }
704 }
705 
706 void
707 NonlinearSystemBase::addSplit(const std::string & split_name,
708  const std::string & name,
709  InputParameters & parameters)
710 {
711  std::shared_ptr<Split> split = _factory.create<Split>(split_name, name, parameters);
713 }
714 
715 std::shared_ptr<Split>
716 NonlinearSystemBase::getSplit(const std::string & name)
717 {
718  return _splits.getActiveObject(name);
719 }
720 
721 bool
723 {
725  return false;
726 
727  // The legacy behavior (#10464) _always_ performs the pre-SMO residual evaluation
728  // regardless of whether it is needed.
729  //
730  // This is not ideal and has been fixed by #23472. This legacy option ensures a smooth transition
731  // to the new behavior. Modules and Apps that want to migrate to the new behavior should set this
732  // parameter to false.
733  if (_app.parameters().get<bool>("use_legacy_initial_residual_evaluation_behavior"))
734  return true;
735 
736  return _use_pre_smo_residual;
737 }
738 
739 Real
741 {
743 }
744 
745 Real
747 {
749  mooseError("pre-SMO residual is requested but not evaluated.");
750 
751  return _pre_smo_residual;
752 }
753 
754 Real
756 {
757  return _initial_residual;
758 }
759 
760 void
762 {
763  _initial_residual = r;
764 }
765 
766 void
767 NonlinearSystemBase::zeroVectorForResidual(const std::string & vector_name)
768 {
769  for (unsigned int i = 0; i < _vecs_to_zero_for_residual.size(); ++i)
770  if (vector_name == _vecs_to_zero_for_residual[i])
771  return;
772 
773  _vecs_to_zero_for_residual.push_back(vector_name);
774 }
775 
776 void
778 {
779  _nl_vector_tags.clear();
780  _nl_vector_tags.insert(tag_id);
782 
784 
786 
788 }
789 
790 void
792 {
793  mooseDeprecated(" Please use computeResidualTag");
794 
795  computeResidualTag(residual, tag_id);
796 }
797 
798 void
799 NonlinearSystemBase::computeResidualTags(const std::set<TagID> & tags)
800 {
801  parallel_object_only();
802 
803  TIME_SECTION("nl::computeResidualTags", 5);
804 
807 
808  bool required_residual = tags.find(residualVectorTag()) == tags.end() ? false : true;
809 
811 
812  // not suppose to do anythin on matrix
814 
816 
817  for (const auto & numeric_vec : _vecs_to_zero_for_residual)
818  if (hasVector(numeric_vec))
819  {
820  NumericVector<Number> & vec = getVector(numeric_vec);
821  vec.close();
822  vec.zero();
823  }
824 
825  try
826  {
827  zeroTaggedVectors(tags);
829  closeTaggedVectors(tags);
830 
831  if (required_residual)
832  {
833  auto & residual = getVector(residualVectorTag());
834  if (!_time_integrators.empty())
835  {
836  for (auto & ti : _time_integrators)
837  ti->postResidual(residual);
838  }
839  else
840  residual += *_Re_non_time;
841  residual.close();
842  }
844  // We don't want to do nodal bcs or anything else
845  return;
846 
847  computeNodalBCs(tags);
848  closeTaggedVectors(tags);
849 
850  // If we are debugging residuals we need one more assignment to have the ghosted copy up to
851  // date
852  if (_need_residual_ghosted && _debugging_residuals && required_residual)
853  {
854  auto & residual = getVector(residualVectorTag());
855 
856  *_residual_ghosted = residual;
858  }
859  // Need to close and update the aux system in case residuals were saved to it.
862  if (hasSaveIn())
864  }
865  catch (MooseException & e)
866  {
867  // The buck stops here, we have already handled the exception by
868  // calling stopSolve(), it is now up to PETSc to return a
869  // "diverged" reason during the next solve.
870  }
871 
872  // not supposed to do anything on matrix
874 
876 }
877 
878 void
879 NonlinearSystemBase::computeResidualAndJacobianTags(const std::set<TagID> & vector_tags,
880  const std::set<TagID> & matrix_tags)
881 {
882  const bool required_residual =
883  vector_tags.find(residualVectorTag()) == vector_tags.end() ? false : true;
884 
885  try
886  {
887  zeroTaggedVectors(vector_tags);
888  computeResidualAndJacobianInternal(vector_tags, matrix_tags);
889  closeTaggedVectors(vector_tags);
890  closeTaggedMatrices(matrix_tags);
891 
892  if (required_residual)
893  {
894  auto & residual = getVector(residualVectorTag());
895  if (!_time_integrators.empty())
896  {
897  for (auto & ti : _time_integrators)
898  ti->postResidual(residual);
899  }
900  else
901  residual += *_Re_non_time;
902  residual.close();
903  }
904 
906  closeTaggedVectors(vector_tags);
907  closeTaggedMatrices(matrix_tags);
908  }
909  catch (MooseException & e)
910  {
911  // The buck stops here, we have already handled the exception by
912  // calling stopSolve(), it is now up to PETSc to return a
913  // "diverged" reason during the next solve.
914  }
915 }
916 
917 void
919 {
920  for (auto & ti : _time_integrators)
921  ti->preSolve();
922  if (_predictor.get())
923  _predictor->timestepSetup();
924 }
925 
926 void
928 {
930 
931  NumericVector<Number> & initial_solution(solution());
932  if (_predictor.get())
933  {
934  if (_predictor->shouldApply())
935  {
936  TIME_SECTION("applyPredictor", 2, "Applying Predictor");
937 
938  _predictor->apply(initial_solution);
939  _fe_problem.predictorCleanup(initial_solution);
940  }
941  else
942  _console << " Skipping predictor this step" << std::endl;
943  }
944 
945  // do nodal BC
946  {
947  TIME_SECTION("initialBCs", 2, "Applying BCs To Initial Condition");
948 
950  for (const auto & bnode : bnd_nodes)
951  {
952  BoundaryID boundary_id = bnode->_bnd_id;
953  Node * node = bnode->_node;
954 
955  if (node->processor_id() == processor_id())
956  {
957  // reinit variables in nodes
958  _fe_problem.reinitNodeFace(node, boundary_id, 0);
959 
961  {
962  const auto & preset_bcs = _preset_nodal_bcs.getActiveBoundaryObjects(boundary_id);
963  for (const auto & preset_bc : preset_bcs)
964  preset_bc->computeValue(initial_solution);
965  }
967  {
968  const auto & preset_bcs_res = _ad_preset_nodal_bcs.getActiveBoundaryObjects(boundary_id);
969  for (const auto & preset_bc : preset_bcs_res)
970  preset_bc->computeValue(initial_solution);
971  }
972  }
973  }
974  }
975 
976  _sys.solution->close();
977  update();
978 
979  // Set constraint secondary values
980  setConstraintSecondaryValues(initial_solution, false);
981 
983  setConstraintSecondaryValues(initial_solution, true);
984 }
985 
986 void
987 NonlinearSystemBase::setPredictor(std::shared_ptr<Predictor> predictor)
988 {
989  _predictor = predictor;
990 }
991 
992 void
994 {
996 
997  _kernels.subdomainSetup(subdomain, tid);
998  _nodal_kernels.subdomainSetup(subdomain, tid);
999  _element_dampers.subdomainSetup(subdomain, tid);
1000  _nodal_dampers.subdomainSetup(subdomain, tid);
1001 }
1002 
1005 {
1006  if (!_Re_time)
1007  {
1009 
1010  // Most applications don't need the expense of ghosting
1012  _Re_time = &addVector(_Re_time_tag, false, ptype);
1013  }
1014  else if (_need_residual_ghosted && _Re_time->type() == PARALLEL)
1015  {
1016  const auto vector_name = _subproblem.vectorTagName(_Re_time_tag);
1017 
1018  // If an application changes its mind, the libMesh API lets us
1019  // change the vector.
1020  _Re_time = &system().add_vector(vector_name, false, GHOSTED);
1021  }
1022 
1023  return *_Re_time;
1024 }
1025 
1028 {
1029  if (!_Re_non_time)
1030  {
1032 
1033  // Most applications don't need the expense of ghosting
1035  _Re_non_time = &addVector(_Re_non_time_tag, false, ptype);
1036  }
1038  {
1039  const auto vector_name = _subproblem.vectorTagName(_Re_non_time_tag);
1040 
1041  // If an application changes its mind, the libMesh API lets us
1042  // change the vector.
1043  _Re_non_time = &system().add_vector(vector_name, false, GHOSTED);
1044  }
1045 
1046  return *_Re_non_time;
1047 }
1048 
1051 {
1052  mooseDeprecated("Please use getVector()");
1053  switch (tag)
1054  {
1055  case 0:
1056  return getResidualNonTimeVector();
1057 
1058  case 1:
1059  return getResidualTimeVector();
1060 
1061  default:
1062  mooseError("The required residual vector is not available");
1063  }
1064 }
1065 
1066 void
1068 {
1069  THREAD_ID tid = 0; // constraints are going to be done single-threaded
1070  residual.close();
1072  {
1073  const auto & ncs = _constraints.getActiveNodalConstraints();
1074  for (const auto & nc : ncs)
1075  {
1076  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
1077  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
1078 
1079  if ((secondary_node_ids.size() > 0) && (primary_node_ids.size() > 0))
1080  {
1081  _fe_problem.reinitNodes(primary_node_ids, tid);
1082  _fe_problem.reinitNodesNeighbor(secondary_node_ids, tid);
1083  nc->computeResidual(residual);
1084  }
1085  }
1086  _fe_problem.addCachedResidualDirectly(residual, tid);
1087  residual.close();
1088  }
1089 }
1090 
1091 void
1093 {
1094  if (!hasMatrix(systemMatrixTag()))
1095  mooseError(" A system matrix is required");
1096 
1097  auto & jacobian = getMatrix(systemMatrixTag());
1098  THREAD_ID tid = 0; // constraints are going to be done single-threaded
1099 
1101  {
1102  const auto & ncs = _constraints.getActiveNodalConstraints();
1103  for (const auto & nc : ncs)
1104  {
1105  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
1106  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
1107 
1108  if ((secondary_node_ids.size() > 0) && (primary_node_ids.size() > 0))
1109  {
1110  _fe_problem.reinitNodes(primary_node_ids, tid);
1111  _fe_problem.reinitNodesNeighbor(secondary_node_ids, tid);
1112  nc->computeJacobian(jacobian);
1113  }
1114  }
1116  jacobian.close();
1117  }
1118 }
1119 
1120 void
1122  const BoundaryID secondary_boundary,
1123  const PenetrationInfo & info,
1124  const bool displaced)
1125 {
1126  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1127  : static_cast<SubProblem &>(_fe_problem);
1128 
1129  const Elem * primary_elem = info._elem;
1130  unsigned int primary_side = info._side_num;
1131  std::vector<Point> points;
1132  points.push_back(info._closest_point);
1133 
1134  // *These next steps MUST be done in this order!*
1135  // ADL: This is a Chesterton's fence situation. I don't know which calls exactly the above comment
1136  // is referring to. If I had to guess I would guess just the reinitNodeFace and prepareAssembly
1137  // calls since the former will size the variable's dof indices and then the latter will resize the
1138  // residual/Jacobian based off the variable's cached dof indices size
1139 
1140  // This reinits the variables that exist on the secondary node
1141  _fe_problem.reinitNodeFace(&secondary_node, secondary_boundary, 0);
1142 
1143  // This will set aside residual and jacobian space for the variables that have dofs on
1144  // the secondary node
1146 
1147  _fe_problem.setNeighborSubdomainID(primary_elem, 0);
1148 
1149  //
1150  // Reinit material on undisplaced mesh
1151  //
1152 
1153  const Elem * const undisplaced_primary_elem =
1154  displaced ? _mesh.elemPtr(primary_elem->id()) : primary_elem;
1155  const Point undisplaced_primary_physical_point =
1156  [&points, displaced, primary_elem, undisplaced_primary_elem]()
1157  {
1158  if (displaced)
1159  {
1160  const Point reference_point =
1161  FEMap::inverse_map(primary_elem->dim(), primary_elem, points[0]);
1162  return FEMap::map(primary_elem->dim(), undisplaced_primary_elem, reference_point);
1163  }
1164  else
1165  // If our penetration locator is on the reference mesh, then our undisplaced
1166  // physical point is simply the point coming from the penetration locator
1167  return points[0];
1168  }();
1169 
1171  undisplaced_primary_elem, primary_side, {undisplaced_primary_physical_point}, 0);
1172  // Stateful material properties are only initialized for neighbor material data for internal faces
1173  // for discontinuous Galerkin methods or for conforming interfaces for interface kernels. We don't
1174  // have either of those use cases here where we likely have disconnected meshes
1175  _fe_problem.reinitMaterialsNeighbor(primary_elem->subdomain_id(), 0, /*swap_stateful=*/false);
1176 
1177  // Reinit points for constraint enforcement
1178  if (displaced)
1179  subproblem.reinitNeighborPhys(primary_elem, primary_side, points, 0);
1180 }
1181 
1182 void
1184 {
1185 
1186  if (displaced)
1187  mooseAssert(_fe_problem.getDisplacedProblem(),
1188  "If we're calling this method with displaced = true, then we better well have a "
1189  "displaced problem");
1190  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1191  : static_cast<SubProblem &>(_fe_problem);
1192  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1193 
1194  bool constraints_applied = false;
1195 
1196  for (const auto & it : penetration_locators)
1197  {
1198  PenetrationLocator & pen_loc = *(it.second);
1199 
1200  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1201 
1202  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1203  BoundaryID primary_boundary = pen_loc._primary_boundary;
1204 
1205  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
1206  {
1207  const auto & constraints =
1208  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
1209  std::unordered_set<unsigned int> needed_mat_props;
1210  for (const auto & constraint : constraints)
1211  {
1212  const auto & mp_deps = constraint->getMatPropDependencies();
1213  needed_mat_props.insert(mp_deps.begin(), mp_deps.end());
1214  }
1215  _fe_problem.setActiveMaterialProperties(needed_mat_props, /*tid=*/0);
1216 
1217  for (unsigned int i = 0; i < secondary_nodes.size(); i++)
1218  {
1219  dof_id_type secondary_node_num = secondary_nodes[i];
1220  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1221 
1222  if (secondary_node.processor_id() == processor_id())
1223  {
1224  if (pen_loc._penetration_info[secondary_node_num])
1225  {
1226  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
1227 
1228  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
1229 
1230  for (const auto & nfc : constraints)
1231  {
1232  if (nfc->isExplicitConstraint())
1233  continue;
1234  // Return if this constraint does not correspond to the primary-secondary pair
1235  // prepared by the outer loops.
1236  // This continue statement is required when, e.g. one secondary surface constrains
1237  // more than one primary surface.
1238  if (nfc->secondaryBoundary() != secondary_boundary ||
1239  nfc->primaryBoundary() != primary_boundary)
1240  continue;
1241 
1242  if (nfc->shouldApply())
1243  {
1244  constraints_applied = true;
1245  nfc->computeSecondaryValue(solution);
1246  }
1247 
1248  if (nfc->hasWritableCoupledVariables())
1249  {
1250  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1251  for (auto * var : nfc->getWritableCoupledVariables())
1252  {
1253  if (var->isNodalDefined())
1254  var->insert(_fe_problem.getAuxiliarySystem().solution());
1255  }
1256  }
1257  }
1258  }
1259  }
1260  }
1261  }
1262  }
1263 
1264  // go over NodeELemConstraints
1265  std::set<dof_id_type> unique_secondary_node_ids;
1266 
1267  for (const auto & secondary_id : _mesh.meshSubdomains())
1268  {
1269  for (const auto & primary_id : _mesh.meshSubdomains())
1270  {
1271  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
1272  {
1273  const auto & constraints =
1274  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
1275 
1276  // get unique set of ids of all nodes on current block
1277  unique_secondary_node_ids.clear();
1278  const MeshBase & meshhelper = _mesh.getMesh();
1279  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
1280  meshhelper.active_subdomain_elements_end(secondary_id)))
1281  {
1282  for (auto & n : elem->node_ref_range())
1283  unique_secondary_node_ids.insert(n.id());
1284  }
1285 
1286  for (auto secondary_node_id : unique_secondary_node_ids)
1287  {
1288  Node & secondary_node = _mesh.nodeRef(secondary_node_id);
1289 
1290  // check if secondary node is on current processor
1291  if (secondary_node.processor_id() == processor_id())
1292  {
1293  // This reinits the variables that exist on the secondary node
1294  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
1295 
1296  // This will set aside residual and jacobian space for the variables that have dofs
1297  // on the secondary node
1299 
1300  for (const auto & nec : constraints)
1301  {
1302  if (nec->shouldApply())
1303  {
1304  constraints_applied = true;
1305  nec->computeSecondaryValue(solution);
1306  }
1307  }
1308  }
1309  }
1310  }
1311  }
1312  }
1313 
1314  // See if constraints were applied anywhere
1315  _communicator.max(constraints_applied);
1316 
1317  if (constraints_applied)
1318  {
1319  solution.close();
1320  update();
1321  }
1322 }
1323 
1324 void
1326 {
1327  // Make sure the residual is in a good state
1328  residual.close();
1329 
1330  if (displaced)
1331  mooseAssert(_fe_problem.getDisplacedProblem(),
1332  "If we're calling this method with displaced = true, then we better well have a "
1333  "displaced problem");
1334  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1335  : static_cast<SubProblem &>(_fe_problem);
1336  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1337 
1338  bool constraints_applied;
1339  bool residual_has_inserted_values = false;
1341  constraints_applied = false;
1342  for (const auto & it : penetration_locators)
1343  {
1345  {
1346  // Reset the constraint_applied flag before each new constraint, as they need to be
1347  // assembled separately
1348  constraints_applied = false;
1349  }
1350  PenetrationLocator & pen_loc = *(it.second);
1351 
1352  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1353 
1354  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1355  BoundaryID primary_boundary = pen_loc._primary_boundary;
1356 
1357  bool has_writable_variables(false);
1358 
1359  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
1360  {
1361  const auto & constraints =
1362  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
1363 
1364  for (unsigned int i = 0; i < secondary_nodes.size(); i++)
1365  {
1366  dof_id_type secondary_node_num = secondary_nodes[i];
1367  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1368 
1369  if (secondary_node.processor_id() == processor_id())
1370  {
1371  if (pen_loc._penetration_info[secondary_node_num])
1372  {
1373  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
1374 
1375  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
1376 
1377  for (const auto & nfc : constraints)
1378  {
1379  // Return if this constraint does not correspond to the primary-secondary pair
1380  // prepared by the outer loops.
1381  // This continue statement is required when, e.g. one secondary surface constrains
1382  // more than one primary surface.
1383  if (nfc->secondaryBoundary() != secondary_boundary ||
1384  nfc->primaryBoundary() != primary_boundary)
1385  continue;
1386 
1387  if (nfc->shouldApply())
1388  {
1389  constraints_applied = true;
1390  nfc->computeResidual();
1391 
1392  if (nfc->overwriteSecondaryResidual())
1393  {
1394  // The below will actually overwrite the residual for every single dof that
1395  // lives on the node. We definitely don't want to do that!
1396  // _fe_problem.setResidual(residual, 0);
1397 
1398  const auto & secondary_var = nfc->variable();
1399  const auto & secondary_dofs = secondary_var.dofIndices();
1400  mooseAssert(secondary_dofs.size() == secondary_var.count(),
1401  "We are on a node so there should only be one dof per variable (for "
1402  "an ArrayVariable we should have a number of dofs equal to the "
1403  "number of components");
1404 
1405  // Assume that if the user is overwriting the secondary residual, then they are
1406  // supplying residuals that do not correspond to their other physics
1407  // (e.g. Kernels), hence we should not apply a scalingFactor that is normally
1408  // based on the order of their other physics (e.g. Kernels)
1409  std::vector<Number> values = {nfc->secondaryResidual()};
1410  residual.insert(values, secondary_dofs);
1411  residual_has_inserted_values = true;
1412  }
1413  else
1416  }
1417  if (nfc->hasWritableCoupledVariables())
1418  {
1419  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1420  has_writable_variables = true;
1421  for (auto * var : nfc->getWritableCoupledVariables())
1422  {
1423  if (var->isNodalDefined())
1424  var->insert(_fe_problem.getAuxiliarySystem().solution());
1425  }
1426  }
1427  }
1428  }
1429  }
1430  }
1431  }
1432  _communicator.max(has_writable_variables);
1433 
1434  if (has_writable_variables)
1435  {
1436  // Explicit contact dynamic constraints write to auxiliary variables and update the old
1437  // displacement solution on the constraint boundaries. Close solutions and update system
1438  // accordingly.
1441  solutionOld().close();
1442  }
1443 
1445  {
1446  // Make sure that secondary contribution to primary are assembled, and ghosts have been
1447  // exchanged, as current primaries might become secondaries on next iteration and will need to
1448  // contribute their former secondaries' contributions to the future primaries. See if
1449  // constraints were applied anywhere
1450  _communicator.max(constraints_applied);
1451 
1452  if (constraints_applied)
1453  {
1454  // If any of the above constraints inserted values in the residual, it needs to be
1455  // assembled before adding the cached residuals below.
1456  _communicator.max(residual_has_inserted_values);
1457  if (residual_has_inserted_values)
1458  {
1459  residual.close();
1460  residual_has_inserted_values = false;
1461  }
1463  residual.close();
1464 
1466  *_residual_ghosted = residual;
1467  }
1468  }
1469  }
1471  {
1472  _communicator.max(constraints_applied);
1473 
1474  if (constraints_applied)
1475  {
1476  // If any of the above constraints inserted values in the residual, it needs to be assembled
1477  // before adding the cached residuals below.
1478  _communicator.max(residual_has_inserted_values);
1479  if (residual_has_inserted_values)
1480  residual.close();
1481 
1483  residual.close();
1484 
1486  *_residual_ghosted = residual;
1487  }
1488  }
1489 
1490  // go over element-element constraint interface
1491  THREAD_ID tid = 0;
1492  const auto & element_pair_locators = subproblem.geomSearchData()._element_pair_locators;
1493  for (const auto & it : element_pair_locators)
1494  {
1495  ElementPairLocator & elem_pair_loc = *(it.second);
1496 
1497  if (_constraints.hasActiveElemElemConstraints(it.first, displaced))
1498  {
1499  // ElemElemConstraint objects
1500  const auto & _element_constraints =
1501  _constraints.getActiveElemElemConstraints(it.first, displaced);
1502 
1503  // go over pair elements
1504  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
1505  elem_pair_loc.getElemPairs();
1506  for (const auto & pr : elem_pairs)
1507  {
1508  const Elem * elem1 = pr.first;
1509  const Elem * elem2 = pr.second;
1510 
1511  if (elem1->processor_id() != processor_id())
1512  continue;
1513 
1514  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
1515 
1516  // for each element process constraints on the
1517  for (const auto & ec : _element_constraints)
1518  {
1519  _fe_problem.setCurrentSubdomainID(elem1, tid);
1520  subproblem.reinitElemPhys(elem1, info._elem1_constraint_q_point, tid);
1521  _fe_problem.setNeighborSubdomainID(elem2, tid);
1522  subproblem.reinitNeighborPhys(elem2, info._elem2_constraint_q_point, tid);
1523 
1524  ec->prepareShapes(ec->variable().number());
1525  ec->prepareNeighborShapes(ec->variable().number());
1526 
1527  ec->reinit(info);
1528  ec->computeResidual();
1531  }
1533  }
1534  }
1535  }
1536 
1537  // go over NodeELemConstraints
1538  std::set<dof_id_type> unique_secondary_node_ids;
1539 
1540  constraints_applied = false;
1541  residual_has_inserted_values = false;
1542  for (const auto & secondary_id : _mesh.meshSubdomains())
1543  {
1544  for (const auto & primary_id : _mesh.meshSubdomains())
1545  {
1546  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
1547  {
1548  const auto & constraints =
1549  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
1550 
1551  // get unique set of ids of all nodes on current block
1552  unique_secondary_node_ids.clear();
1553  const MeshBase & meshhelper = _mesh.getMesh();
1554  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
1555  meshhelper.active_subdomain_elements_end(secondary_id)))
1556  {
1557  for (auto & n : elem->node_ref_range())
1558  unique_secondary_node_ids.insert(n.id());
1559  }
1560 
1561  for (auto secondary_node_id : unique_secondary_node_ids)
1562  {
1563  Node & secondary_node = _mesh.nodeRef(secondary_node_id);
1564  // check if secondary node is on current processor
1565  if (secondary_node.processor_id() == processor_id())
1566  {
1567  // This reinits the variables that exist on the secondary node
1568  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
1569 
1570  // This will set aside residual and jacobian space for the variables that have dofs
1571  // on the secondary node
1573 
1574  for (const auto & nec : constraints)
1575  {
1576  if (nec->shouldApply())
1577  {
1578  constraints_applied = true;
1579  nec->computeResidual();
1580 
1581  if (nec->overwriteSecondaryResidual())
1582  {
1583  _fe_problem.setResidual(residual, 0);
1584  residual_has_inserted_values = true;
1585  }
1586  else
1589  }
1590  }
1592  }
1593  }
1594  }
1595  }
1596  }
1597  _communicator.max(constraints_applied);
1598 
1599  if (constraints_applied)
1600  {
1601  // If any of the above constraints inserted values in the residual, it needs to be assembled
1602  // before adding the cached residuals below.
1603  _communicator.max(residual_has_inserted_values);
1604  if (residual_has_inserted_values)
1605  residual.close();
1606 
1608  residual.close();
1609 
1611  *_residual_ghosted = residual;
1612  }
1613 
1614  // We may have additional tagged vectors that also need to be accumulated
1616 }
1617 
1618 void
1620 {
1621  // Overwrite results from integrator in case we have explicit dynamics contact constraints
1623  ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1624  : static_cast<SubProblem &>(_fe_problem);
1625  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1626 
1627  for (const auto & it : penetration_locators)
1628  {
1629  PenetrationLocator & pen_loc = *(it.second);
1630 
1631  const auto & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1632  const BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1633  const BoundaryID primary_boundary = pen_loc._primary_boundary;
1634 
1635  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, true))
1636  {
1637  const auto & constraints =
1638  _constraints.getActiveNodeFaceConstraints(secondary_boundary, true);
1639  for (const auto i : index_range(secondary_nodes))
1640  {
1641  const auto secondary_node_num = secondary_nodes[i];
1642  const Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1643 
1644  if (secondary_node.processor_id() == processor_id())
1645  if (pen_loc._penetration_info[secondary_node_num])
1646  for (const auto & nfc : constraints)
1647  {
1648  if (!nfc->isExplicitConstraint())
1649  continue;
1650 
1651  // Return if this constraint does not correspond to the primary-secondary pair
1652  // prepared by the outer loops.
1653  // This continue statement is required when, e.g. one secondary surface constrains
1654  // more than one primary surface.
1655  if (nfc->secondaryBoundary() != secondary_boundary ||
1656  nfc->primaryBoundary() != primary_boundary)
1657  continue;
1658 
1659  nfc->overwriteBoundaryVariables(soln, secondary_node);
1660  }
1661  }
1662  }
1663  }
1664  soln.close();
1665 }
1666 
1667 void
1669 {
1670  TIME_SECTION("residualSetup", 3);
1671 
1673 
1674  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
1675  {
1676  _kernels.residualSetup(tid);
1679  if (_doing_dg)
1685  }
1690 
1691  // Avoid recursion
1692  if (this == &_fe_problem.currentNonlinearSystem())
1695 }
1696 
1697 void
1699 {
1700  parallel_object_only();
1701 
1702  TIME_SECTION("computeResidualInternal", 3);
1703 
1704  residualSetup();
1705 
1706  const auto vector_tag_data = _fe_problem.getVectorTags(tags);
1707 
1708  // Residual contributions from UOs - for now this is used for ray tracing
1709  // and ray kernels that contribute to the residual (think line sources)
1710  std::vector<UserObject *> uos;
1712  .query()
1713  .condition<AttribSystem>("UserObject")
1714  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
1715  .queryInto(uos);
1716  for (auto & uo : uos)
1717  uo->residualSetup();
1718  for (auto & uo : uos)
1719  {
1720  uo->initialize();
1721  uo->execute();
1722  uo->finalize();
1723  }
1724 
1725  // reinit scalar variables
1726  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
1728 
1729  // residual contributions from the domain
1730  PARALLEL_TRY
1731  {
1732  TIME_SECTION("Kernels", 3 /*, "Computing Kernels"*/);
1733 
1735 
1737  Threads::parallel_reduce(elem_range, cr);
1738 
1739  // We pass face information directly to FV residual objects for their evaluation. Consequently
1740  // we must make sure to do separate threaded loops for 1) undisplaced face information objects
1741  // and undisplaced residual objects and 2) displaced face information objects and displaced
1742  // residual objects
1744  if (_fe_problem.haveFV())
1745  {
1747  _fe_problem, this->number(), tags, /*on_displaced=*/false);
1749  Threads::parallel_reduce(faces, fvr);
1750  }
1751  if (auto displaced_problem = _fe_problem.getDisplacedProblem();
1752  displaced_problem && displaced_problem->haveFV())
1753  {
1755  _fe_problem, this->number(), tags, /*on_displaced=*/true);
1756  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
1757  displaced_problem->mesh().ownedFaceInfoEnd());
1758  Threads::parallel_reduce(faces, fvr);
1759  }
1760 
1761  unsigned int n_threads = libMesh::n_threads();
1762  for (unsigned int i = 0; i < n_threads;
1763  i++) // Add any cached residuals that might be hanging around
1765  }
1766  PARALLEL_CATCH;
1767 
1768  // residual contributions from the scalar kernels
1769  PARALLEL_TRY
1770  {
1771  // do scalar kernels (not sure how to thread this)
1773  {
1774  TIME_SECTION("ScalarKernels", 3 /*, "Computing ScalarKernels"*/);
1775 
1776  MooseObjectWarehouse<ScalarKernelBase> * scalar_kernel_warehouse;
1777  // This code should be refactored once we can do tags for scalar
1778  // kernels
1779  // Should redo this based on Warehouse
1780  if (!tags.size() || tags.size() == _fe_problem.numVectorTags(Moose::VECTOR_TAG_RESIDUAL))
1781  scalar_kernel_warehouse = &_scalar_kernels;
1782  else if (tags.size() == 1)
1783  scalar_kernel_warehouse =
1784  &(_scalar_kernels.getVectorTagObjectWarehouse(*(tags.begin()), 0));
1785  else
1786  // scalar_kernels is not threading
1787  scalar_kernel_warehouse = &(_scalar_kernels.getVectorTagsObjectWarehouse(tags, 0));
1788 
1789  bool have_scalar_contributions = false;
1790  const auto & scalars = scalar_kernel_warehouse->getActiveObjects();
1791  for (const auto & scalar_kernel : scalars)
1792  {
1793  scalar_kernel->reinit();
1794  const std::vector<dof_id_type> & dof_indices = scalar_kernel->variable().dofIndices();
1795  const DofMap & dof_map = scalar_kernel->variable().dofMap();
1796  const dof_id_type first_dof = dof_map.first_dof();
1797  const dof_id_type end_dof = dof_map.end_dof();
1798  for (dof_id_type dof : dof_indices)
1799  {
1800  if (dof >= first_dof && dof < end_dof)
1801  {
1802  scalar_kernel->computeResidual();
1803  have_scalar_contributions = true;
1804  break;
1805  }
1806  }
1807  }
1808  if (have_scalar_contributions)
1810  }
1811  }
1812  PARALLEL_CATCH;
1813 
1814  // residual contributions from Block NodalKernels
1815  PARALLEL_TRY
1816  {
1818  {
1819  TIME_SECTION("NodalKernels", 3 /*, "Computing NodalKernels"*/);
1820 
1822 
1824 
1825  if (range.begin() != range.end())
1826  {
1827  _fe_problem.reinitNode(*range.begin(), 0);
1828 
1829  Threads::parallel_reduce(range, cnk);
1830 
1831  unsigned int n_threads = libMesh::n_threads();
1832  for (unsigned int i = 0; i < n_threads;
1833  i++) // Add any cached residuals that might be hanging around
1835  }
1836  }
1837  }
1838  PARALLEL_CATCH;
1839 
1841  // We computed the volumetric objects. We can return now before we get into
1842  // any strongly enforced constraint conditions or penalty-type objects
1843  // (DGKernels, IntegratedBCs, InterfaceKernels, Constraints)
1844  return;
1845 
1846  // residual contributions from boundary NodalKernels
1847  PARALLEL_TRY
1848  {
1850  {
1851  TIME_SECTION("NodalKernelBCs", 3 /*, "Computing NodalKernelBCs"*/);
1852 
1854 
1856 
1857  Threads::parallel_reduce(bnd_node_range, cnk);
1858 
1859  unsigned int n_threads = libMesh::n_threads();
1860  for (unsigned int i = 0; i < n_threads;
1861  i++) // Add any cached residuals that might be hanging around
1863  }
1864  }
1865  PARALLEL_CATCH;
1866 
1868 
1869  if (_residual_copy.get())
1870  {
1871  _Re_non_time->close();
1873  }
1874 
1876  {
1877  _Re_non_time->close();
1880  }
1881 
1882  PARALLEL_TRY { computeDiracContributions(tags, false); }
1883  PARALLEL_CATCH;
1884 
1886  {
1887  PARALLEL_TRY { enforceNodalConstraintsResidual(*_Re_non_time); }
1888  PARALLEL_CATCH;
1889  _Re_non_time->close();
1890  }
1891 
1892  // Add in Residual contributions from other Constraints
1894  {
1895  PARALLEL_TRY
1896  {
1897  // Undisplaced Constraints
1899 
1900  // Displaced Constraints
1903 
1906  }
1907  PARALLEL_CATCH;
1908  _Re_non_time->close();
1909  }
1910 
1911  // Accumulate the occurrence of solution invalid warnings for the current iteration cumulative
1912  // counters
1915 }
1916 
1917 void
1919  const std::set<TagID> & matrix_tags)
1920 {
1921  TIME_SECTION("computeResidualAndJacobianInternal", 3);
1922 
1923  // Make matrix ready to use
1925 
1926  for (auto tag : matrix_tags)
1927  {
1928  if (!hasMatrix(tag))
1929  continue;
1930 
1931  auto & jacobian = getMatrix(tag);
1932  // Necessary for speed
1933  if (auto petsc_matrix = dynamic_cast<PetscMatrix<Number> *>(&jacobian))
1934  {
1935  LibmeshPetscCall(MatSetOption(petsc_matrix->mat(),
1936  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
1937  PETSC_TRUE));
1939  LibmeshPetscCall(
1940  MatSetOption(petsc_matrix->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
1941  }
1942  }
1943 
1944  residualSetup();
1945 
1946  // Residual contributions from UOs - for now this is used for ray tracing
1947  // and ray kernels that contribute to the residual (think line sources)
1948  std::vector<UserObject *> uos;
1950  .query()
1951  .condition<AttribSystem>("UserObject")
1952  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
1953  .queryInto(uos);
1954  for (auto & uo : uos)
1955  uo->residualSetup();
1956  for (auto & uo : uos)
1957  {
1958  uo->initialize();
1959  uo->execute();
1960  uo->finalize();
1961  }
1962 
1963  // reinit scalar variables
1964  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
1966 
1967  // residual contributions from the domain
1968  PARALLEL_TRY
1969  {
1970  TIME_SECTION("Kernels", 3 /*, "Computing Kernels"*/);
1971 
1973 
1974  ComputeResidualAndJacobianThread crj(_fe_problem, vector_tags, matrix_tags);
1975  Threads::parallel_reduce(elem_range, crj);
1976 
1978  if (_fe_problem.haveFV())
1979  {
1981  _fe_problem, this->number(), vector_tags, matrix_tags, /*on_displaced=*/false);
1983  Threads::parallel_reduce(faces, fvrj);
1984  }
1985  if (auto displaced_problem = _fe_problem.getDisplacedProblem();
1986  displaced_problem && displaced_problem->haveFV())
1987  {
1989  _fe_problem, this->number(), vector_tags, matrix_tags, /*on_displaced=*/true);
1990  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
1991  displaced_problem->mesh().ownedFaceInfoEnd());
1992  Threads::parallel_reduce(faces, fvr);
1993  }
1994 
1996 
1997  unsigned int n_threads = libMesh::n_threads();
1998  for (unsigned int i = 0; i < n_threads;
1999  i++) // Add any cached residuals that might be hanging around
2000  {
2003  }
2004  }
2005  PARALLEL_CATCH;
2006 }
2007 
2008 void
2010 {
2011  _nl_vector_tags.clear();
2012 
2013  const auto & residual_vector_tags = _fe_problem.getVectorTags(Moose::VECTOR_TAG_RESIDUAL);
2014  for (const auto & residual_vector_tag : residual_vector_tags)
2015  _nl_vector_tags.insert(residual_vector_tag._id);
2016 
2018  computeNodalBCs(residual, _nl_vector_tags);
2020 }
2021 
2022 void
2023 NonlinearSystemBase::computeNodalBCs(NumericVector<Number> & residual, const std::set<TagID> & tags)
2024 {
2026 
2027  computeNodalBCs(tags);
2028 
2030 }
2031 
2032 void
2033 NonlinearSystemBase::computeNodalBCs(const std::set<TagID> & tags)
2034 {
2035  // We need to close the diag_save_in variables on the aux system before NodalBCBases clear the
2036  // dofs on boundary nodes
2037  if (_has_save_in)
2039 
2040  // Select nodal kernels
2041  MooseObjectWarehouse<NodalBCBase> * nbc_warehouse;
2042 
2043  if (tags.size() == _fe_problem.numVectorTags(Moose::VECTOR_TAG_RESIDUAL) || !tags.size())
2044  nbc_warehouse = &_nodal_bcs;
2045  else if (tags.size() == 1)
2046  nbc_warehouse = &(_nodal_bcs.getVectorTagObjectWarehouse(*(tags.begin()), 0));
2047  else
2048  nbc_warehouse = &(_nodal_bcs.getVectorTagsObjectWarehouse(tags, 0));
2049 
2050  // Return early if there is no nodal kernel
2051  if (!nbc_warehouse->size())
2052  return;
2053 
2054  PARALLEL_TRY
2055  {
2057 
2058  if (!bnd_nodes.empty())
2059  {
2060  TIME_SECTION("NodalBCs", 3 /*, "Computing NodalBCs"*/);
2061 
2062  for (const auto & bnode : bnd_nodes)
2063  {
2064  BoundaryID boundary_id = bnode->_bnd_id;
2065  Node * node = bnode->_node;
2066 
2067  if (node->processor_id() == processor_id() &&
2068  nbc_warehouse->hasActiveBoundaryObjects(boundary_id))
2069  {
2070  // reinit variables in nodes
2071  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2072 
2073  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(boundary_id);
2074  for (const auto & nbc : bcs)
2075  if (nbc->shouldApply())
2076  nbc->computeResidual();
2077  }
2078  }
2079  }
2080  }
2081  PARALLEL_CATCH;
2082 
2083  if (_Re_time)
2084  _Re_time->close();
2085  _Re_non_time->close();
2086 }
2087 
2088 void
2090 {
2091  PARALLEL_TRY
2092  {
2094 
2095  if (!bnd_nodes.empty())
2096  {
2097  TIME_SECTION("NodalBCs", 3 /*, "Computing NodalBCs"*/);
2098 
2099  for (const auto & bnode : bnd_nodes)
2100  {
2101  BoundaryID boundary_id = bnode->_bnd_id;
2102  Node * node = bnode->_node;
2103 
2104  if (node->processor_id() == processor_id())
2105  {
2106  // reinit variables in nodes
2107  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2108  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
2109  {
2110  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
2111  for (const auto & nbc : bcs)
2112  if (nbc->shouldApply())
2113  nbc->computeResidualAndJacobian();
2114  }
2115  }
2116  }
2117  }
2118  }
2119  PARALLEL_CATCH;
2120 
2121  // Set the cached NodalBCBase values in the Jacobian matrix
2123 }
2124 
2125 void
2126 NonlinearSystemBase::getNodeDofs(dof_id_type node_id, std::vector<dof_id_type> & dofs)
2127 {
2128  const Node & node = _mesh.nodeRef(node_id);
2129  unsigned int s = number();
2130  if (node.has_dofs(s))
2131  {
2132  for (unsigned int v = 0; v < nVariables(); v++)
2133  for (unsigned int c = 0; c < node.n_comp(s, v); c++)
2134  dofs.push_back(node.dof_number(s, v, c));
2135  }
2136 }
2137 
2138 void
2140  GeometricSearchData & geom_search_data,
2141  std::unordered_map<dof_id_type, std::vector<dof_id_type>> & graph)
2142 {
2143  const auto & node_to_elem_map = _mesh.nodeToElemMap();
2144  const auto & nearest_node_locators = geom_search_data._nearest_node_locators;
2145  for (const auto & it : nearest_node_locators)
2146  {
2147  std::vector<dof_id_type> & secondary_nodes = it.second->_secondary_nodes;
2148 
2149  for (const auto & secondary_node : secondary_nodes)
2150  {
2151  std::set<dof_id_type> unique_secondary_indices;
2152  std::set<dof_id_type> unique_primary_indices;
2153 
2154  auto node_to_elem_pair = node_to_elem_map.find(secondary_node);
2155  if (node_to_elem_pair != node_to_elem_map.end())
2156  {
2157  const std::vector<dof_id_type> & elems = node_to_elem_pair->second;
2158 
2159  // Get the dof indices from each elem connected to the node
2160  for (const auto & cur_elem : elems)
2161  {
2162  std::vector<dof_id_type> dof_indices;
2163  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
2164 
2165  for (const auto & dof : dof_indices)
2166  unique_secondary_indices.insert(dof);
2167  }
2168  }
2169 
2170  std::vector<dof_id_type> primary_nodes = it.second->_neighbor_nodes[secondary_node];
2171 
2172  for (const auto & primary_node : primary_nodes)
2173  {
2174  auto primary_node_to_elem_pair = node_to_elem_map.find(primary_node);
2175  mooseAssert(primary_node_to_elem_pair != node_to_elem_map.end(),
2176  "Missing entry in node to elem map");
2177  const std::vector<dof_id_type> & primary_node_elems = primary_node_to_elem_pair->second;
2178 
2179  // Get the dof indices from each elem connected to the node
2180  for (const auto & cur_elem : primary_node_elems)
2181  {
2182  std::vector<dof_id_type> dof_indices;
2183  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
2184 
2185  for (const auto & dof : dof_indices)
2186  unique_primary_indices.insert(dof);
2187  }
2188  }
2189 
2190  for (const auto & secondary_id : unique_secondary_indices)
2191  for (const auto & primary_id : unique_primary_indices)
2192  {
2193  graph[secondary_id].push_back(primary_id);
2194  graph[primary_id].push_back(secondary_id);
2195  }
2196  }
2197  }
2198 
2199  // handle node-to-node constraints
2200  const auto & ncs = _constraints.getActiveNodalConstraints();
2201  for (const auto & nc : ncs)
2202  {
2203  std::vector<dof_id_type> primary_dofs;
2204  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
2205  for (const auto & node_id : primary_node_ids)
2206  {
2207  Node * node = _mesh.queryNodePtr(node_id);
2208  if (node && node->processor_id() == this->processor_id())
2209  {
2210  getNodeDofs(node_id, primary_dofs);
2211  }
2212  }
2213 
2214  _communicator.allgather(primary_dofs);
2215 
2216  std::vector<dof_id_type> secondary_dofs;
2217  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
2218  for (const auto & node_id : secondary_node_ids)
2219  {
2220  Node * node = _mesh.queryNodePtr(node_id);
2221  if (node && node->processor_id() == this->processor_id())
2222  {
2223  getNodeDofs(node_id, secondary_dofs);
2224  }
2225  }
2226 
2227  _communicator.allgather(secondary_dofs);
2228 
2229  for (const auto & primary_id : primary_dofs)
2230  for (const auto & secondary_id : secondary_dofs)
2231  {
2232  graph[primary_id].push_back(secondary_id);
2233  graph[secondary_id].push_back(primary_id);
2234  }
2235  }
2236 
2237  // Make every entry sorted and unique
2238  for (auto & it : graph)
2239  {
2240  std::vector<dof_id_type> & row = it.second;
2241  std::sort(row.begin(), row.end());
2242  std::vector<dof_id_type>::iterator uit = std::unique(row.begin(), row.end());
2243  row.resize(uit - row.begin());
2244  }
2245 }
2246 
2247 void
2249 {
2250  if (!hasMatrix(systemMatrixTag()))
2251  mooseError("Need a system matrix ");
2252 
2253  // At this point, have no idea how to make
2254  // this work with tag system
2255  auto & jacobian = getMatrix(systemMatrixTag());
2256 
2257  std::unordered_map<dof_id_type, std::vector<dof_id_type>> graph;
2258 
2259  findImplicitGeometricCouplingEntries(geom_search_data, graph);
2260 
2261  for (const auto & it : graph)
2262  {
2263  dof_id_type dof = it.first;
2264  const auto & row = it.second;
2265 
2266  for (const auto & coupled_dof : row)
2267  jacobian.add(dof, coupled_dof, 0);
2268  }
2269 }
2270 
2271 void
2273 {
2274  if (!hasMatrix(systemMatrixTag()))
2275  mooseError("A system matrix is required");
2276 
2277  auto & jacobian = getMatrix(systemMatrixTag());
2278 
2280  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2281  MAT_NEW_NONZERO_ALLOCATION_ERR,
2282  PETSC_FALSE));
2283 
2285  LibmeshPetscCall(MatSetOption(
2286  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2287 
2288  std::vector<numeric_index_type> zero_rows;
2289 
2290  if (displaced)
2291  mooseAssert(_fe_problem.getDisplacedProblem(),
2292  "If we're calling this method with displaced = true, then we better well have a "
2293  "displaced problem");
2294  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
2295  : static_cast<SubProblem &>(_fe_problem);
2296  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
2297 
2298  bool constraints_applied;
2300  constraints_applied = false;
2301  for (const auto & it : penetration_locators)
2302  {
2304  {
2305  // Reset the constraint_applied flag before each new constraint, as they need to be
2306  // assembled separately
2307  constraints_applied = false;
2308  }
2309  PenetrationLocator & pen_loc = *(it.second);
2310 
2311  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
2312 
2313  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
2314  BoundaryID primary_boundary = pen_loc._primary_boundary;
2315 
2316  zero_rows.clear();
2317  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
2318  {
2319  const auto & constraints =
2320  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
2321 
2322  for (const auto & secondary_node_num : secondary_nodes)
2323  {
2324  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
2325 
2326  if (secondary_node.processor_id() == processor_id())
2327  {
2328  if (pen_loc._penetration_info[secondary_node_num])
2329  {
2330  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
2331 
2332  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
2334 
2335  for (const auto & nfc : constraints)
2336  {
2337  if (nfc->isExplicitConstraint())
2338  continue;
2339  // Return if this constraint does not correspond to the primary-secondary pair
2340  // prepared by the outer loops.
2341  // This continue statement is required when, e.g. one secondary surface constrains
2342  // more than one primary surface.
2343  if (nfc->secondaryBoundary() != secondary_boundary ||
2344  nfc->primaryBoundary() != primary_boundary)
2345  continue;
2346 
2347  nfc->_jacobian = &jacobian;
2348 
2349  if (nfc->shouldApply())
2350  {
2351  constraints_applied = true;
2352 
2353  nfc->prepareShapes(nfc->variable().number());
2354  nfc->prepareNeighborShapes(nfc->variable().number());
2355 
2356  nfc->computeJacobian();
2357 
2358  if (nfc->overwriteSecondaryJacobian())
2359  {
2360  // Add this variable's dof's row to be zeroed
2361  zero_rows.push_back(nfc->variable().nodalDofIndex());
2362  }
2363 
2364  std::vector<dof_id_type> secondary_dofs(1, nfc->variable().nodalDofIndex());
2365 
2366  // Assume that if the user is overwriting the secondary Jacobian, then they are
2367  // supplying Jacobians that do not correspond to their other physics
2368  // (e.g. Kernels), hence we should not apply a scalingFactor that is normally
2369  // based on the order of their other physics (e.g. Kernels)
2370  Real scaling_factor =
2371  nfc->overwriteSecondaryJacobian() ? 1. : nfc->variable().scalingFactor();
2372 
2373  // Cache the jacobian block for the secondary side
2374  nfc->addJacobian(_fe_problem.assembly(0, number()),
2375  nfc->_Kee,
2376  secondary_dofs,
2377  nfc->_connected_dof_indices,
2378  scaling_factor);
2379 
2380  // Cache Ken, Kne, Knn
2381  if (nfc->addCouplingEntriesToJacobian())
2382  {
2383  // Make sure we use a proper scaling factor (e.g. don't use an interior scaling
2384  // factor when we're overwriting secondary stuff)
2385  nfc->addJacobian(_fe_problem.assembly(0, number()),
2386  nfc->_Ken,
2387  secondary_dofs,
2388  nfc->primaryVariable().dofIndicesNeighbor(),
2389  scaling_factor);
2390 
2391  // Use _connected_dof_indices to get all the correct columns
2392  nfc->addJacobian(_fe_problem.assembly(0, number()),
2393  nfc->_Kne,
2394  nfc->primaryVariable().dofIndicesNeighbor(),
2395  nfc->_connected_dof_indices,
2396  nfc->variable().scalingFactor());
2397 
2398  // We've handled Ken and Kne, finally handle Knn
2400  }
2401 
2402  // Do the off-diagonals next
2403  const std::vector<MooseVariableFEBase *> coupled_vars = nfc->getCoupledMooseVars();
2404  for (const auto & jvar : coupled_vars)
2405  {
2406  // Only compute jacobians for nonlinear variables
2407  if (jvar->kind() != Moose::VAR_SOLVER)
2408  continue;
2409 
2410  // Only compute Jacobian entries if this coupling is being used by the
2411  // preconditioner
2412  if (nfc->variable().number() == jvar->number() ||
2414  nfc->variable().number(), jvar->number(), this->number()))
2415  continue;
2416 
2417  // Need to zero out the matrices first
2419 
2420  nfc->prepareShapes(nfc->variable().number());
2421  nfc->prepareNeighborShapes(jvar->number());
2422 
2423  nfc->computeOffDiagJacobian(jvar->number());
2424 
2425  // Cache the jacobian block for the secondary side
2426  nfc->addJacobian(_fe_problem.assembly(0, number()),
2427  nfc->_Kee,
2428  secondary_dofs,
2429  nfc->_connected_dof_indices,
2430  scaling_factor);
2431 
2432  // Cache Ken, Kne, Knn
2433  if (nfc->addCouplingEntriesToJacobian())
2434  {
2435  // Make sure we use a proper scaling factor (e.g. don't use an interior scaling
2436  // factor when we're overwriting secondary stuff)
2437  nfc->addJacobian(_fe_problem.assembly(0, number()),
2438  nfc->_Ken,
2439  secondary_dofs,
2440  jvar->dofIndicesNeighbor(),
2441  scaling_factor);
2442 
2443  // Use _connected_dof_indices to get all the correct columns
2444  nfc->addJacobian(_fe_problem.assembly(0, number()),
2445  nfc->_Kne,
2446  nfc->variable().dofIndicesNeighbor(),
2447  nfc->_connected_dof_indices,
2448  nfc->variable().scalingFactor());
2449 
2450  // We've handled Ken and Kne, finally handle Knn
2452  }
2453  }
2454  }
2455  }
2456  }
2457  }
2458  }
2459  }
2461  {
2462  // See if constraints were applied anywhere
2463  _communicator.max(constraints_applied);
2464 
2465  if (constraints_applied)
2466  {
2467  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2468  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2469  PETSC_TRUE));
2470 
2471  jacobian.close();
2472  jacobian.zero_rows(zero_rows, 0.0);
2473  jacobian.close();
2475  jacobian.close();
2476  }
2477  }
2478  }
2480  {
2481  // See if constraints were applied anywhere
2482  _communicator.max(constraints_applied);
2483 
2484  if (constraints_applied)
2485  {
2486  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2487  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2488  PETSC_TRUE));
2489 
2490  jacobian.close();
2491  jacobian.zero_rows(zero_rows, 0.0);
2492  jacobian.close();
2494  jacobian.close();
2495  }
2496  }
2497 
2498  THREAD_ID tid = 0;
2499  // go over element-element constraint interface
2500  const auto & element_pair_locators = subproblem.geomSearchData()._element_pair_locators;
2501  for (const auto & it : element_pair_locators)
2502  {
2503  ElementPairLocator & elem_pair_loc = *(it.second);
2504 
2505  if (_constraints.hasActiveElemElemConstraints(it.first, displaced))
2506  {
2507  // ElemElemConstraint objects
2508  const auto & _element_constraints =
2509  _constraints.getActiveElemElemConstraints(it.first, displaced);
2510 
2511  // go over pair elements
2512  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
2513  elem_pair_loc.getElemPairs();
2514  for (const auto & pr : elem_pairs)
2515  {
2516  const Elem * elem1 = pr.first;
2517  const Elem * elem2 = pr.second;
2518 
2519  if (elem1->processor_id() != processor_id())
2520  continue;
2521 
2522  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
2523 
2524  // for each element process constraints on the
2525  for (const auto & ec : _element_constraints)
2526  {
2527  _fe_problem.setCurrentSubdomainID(elem1, tid);
2528  subproblem.reinitElemPhys(elem1, info._elem1_constraint_q_point, tid);
2529  _fe_problem.setNeighborSubdomainID(elem2, tid);
2530  subproblem.reinitNeighborPhys(elem2, info._elem2_constraint_q_point, tid);
2531 
2532  ec->prepareShapes(ec->variable().number());
2533  ec->prepareNeighborShapes(ec->variable().number());
2534 
2535  ec->reinit(info);
2536  ec->computeJacobian();
2539  }
2541  }
2542  }
2543  }
2544 
2545  // go over NodeELemConstraints
2546  std::set<dof_id_type> unique_secondary_node_ids;
2547  constraints_applied = false;
2548  for (const auto & secondary_id : _mesh.meshSubdomains())
2549  {
2550  for (const auto & primary_id : _mesh.meshSubdomains())
2551  {
2552  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
2553  {
2554  const auto & constraints =
2555  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
2556 
2557  // get unique set of ids of all nodes on current block
2558  unique_secondary_node_ids.clear();
2559  const MeshBase & meshhelper = _mesh.getMesh();
2560  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
2561  meshhelper.active_subdomain_elements_end(secondary_id)))
2562  {
2563  for (auto & n : elem->node_ref_range())
2564  unique_secondary_node_ids.insert(n.id());
2565  }
2566 
2567  for (auto secondary_node_id : unique_secondary_node_ids)
2568  {
2569  const Node & secondary_node = _mesh.nodeRef(secondary_node_id);
2570  // check if secondary node is on current processor
2571  if (secondary_node.processor_id() == processor_id())
2572  {
2573  // This reinits the variables that exist on the secondary node
2574  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
2575 
2576  // This will set aside residual and jacobian space for the variables that have dofs
2577  // on the secondary node
2580 
2581  for (const auto & nec : constraints)
2582  {
2583  if (nec->shouldApply())
2584  {
2585  constraints_applied = true;
2586 
2587  nec->_jacobian = &jacobian;
2588  nec->prepareShapes(nec->variable().number());
2589  nec->prepareNeighborShapes(nec->variable().number());
2590 
2591  nec->computeJacobian();
2592 
2593  if (nec->overwriteSecondaryJacobian())
2594  {
2595  // Add this variable's dof's row to be zeroed
2596  zero_rows.push_back(nec->variable().nodalDofIndex());
2597  }
2598 
2599  std::vector<dof_id_type> secondary_dofs(1, nec->variable().nodalDofIndex());
2600 
2601  // Cache the jacobian block for the secondary side
2602  nec->addJacobian(_fe_problem.assembly(0, number()),
2603  nec->_Kee,
2604  secondary_dofs,
2605  nec->_connected_dof_indices,
2606  nec->variable().scalingFactor());
2607 
2608  // Cache the jacobian block for the primary side
2609  nec->addJacobian(_fe_problem.assembly(0, number()),
2610  nec->_Kne,
2611  nec->primaryVariable().dofIndicesNeighbor(),
2612  nec->_connected_dof_indices,
2613  nec->variable().scalingFactor());
2614 
2617 
2618  // Do the off-diagonals next
2619  const std::vector<MooseVariableFEBase *> coupled_vars = nec->getCoupledMooseVars();
2620  for (const auto & jvar : coupled_vars)
2621  {
2622  // Only compute jacobians for nonlinear variables
2623  if (jvar->kind() != Moose::VAR_SOLVER)
2624  continue;
2625 
2626  // Only compute Jacobian entries if this coupling is being used by the
2627  // preconditioner
2628  if (nec->variable().number() == jvar->number() ||
2630  nec->variable().number(), jvar->number(), this->number()))
2631  continue;
2632 
2633  // Need to zero out the matrices first
2635 
2636  nec->prepareShapes(nec->variable().number());
2637  nec->prepareNeighborShapes(jvar->number());
2638 
2639  nec->computeOffDiagJacobian(jvar->number());
2640 
2641  // Cache the jacobian block for the secondary side
2642  nec->addJacobian(_fe_problem.assembly(0, number()),
2643  nec->_Kee,
2644  secondary_dofs,
2645  nec->_connected_dof_indices,
2646  nec->variable().scalingFactor());
2647 
2648  // Cache the jacobian block for the primary side
2649  nec->addJacobian(_fe_problem.assembly(0, number()),
2650  nec->_Kne,
2651  nec->variable().dofIndicesNeighbor(),
2652  nec->_connected_dof_indices,
2653  nec->variable().scalingFactor());
2654 
2657  }
2658  }
2659  }
2660  }
2661  }
2662  }
2663  }
2664  }
2665  // See if constraints were applied anywhere
2666  _communicator.max(constraints_applied);
2667 
2668  if (constraints_applied)
2669  {
2670  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2671  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2672  PETSC_TRUE));
2673 
2674  jacobian.close();
2675  jacobian.zero_rows(zero_rows, 0.0);
2676  jacobian.close();
2678  jacobian.close();
2679  }
2680 }
2681 
2682 void
2684 {
2685  MooseObjectWarehouse<ScalarKernelBase> * scalar_kernel_warehouse;
2686 
2687  if (!tags.size() || tags.size() == _fe_problem.numMatrixTags())
2688  scalar_kernel_warehouse = &_scalar_kernels;
2689  else if (tags.size() == 1)
2690  scalar_kernel_warehouse = &(_scalar_kernels.getMatrixTagObjectWarehouse(*(tags.begin()), 0));
2691  else
2692  scalar_kernel_warehouse = &(_scalar_kernels.getMatrixTagsObjectWarehouse(tags, 0));
2693 
2694  // Compute the diagonal block for scalar variables
2695  if (scalar_kernel_warehouse->hasActiveObjects())
2696  {
2697  const auto & scalars = scalar_kernel_warehouse->getActiveObjects();
2698 
2699  _fe_problem.reinitScalars(/*tid=*/0);
2700 
2701  _fe_problem.reinitOffDiagScalars(/*_tid*/ 0);
2702 
2703  bool have_scalar_contributions = false;
2704  for (const auto & kernel : scalars)
2705  {
2706  kernel->reinit();
2707  const std::vector<dof_id_type> & dof_indices = kernel->variable().dofIndices();
2708  const DofMap & dof_map = kernel->variable().dofMap();
2709  const dof_id_type first_dof = dof_map.first_dof();
2710  const dof_id_type end_dof = dof_map.end_dof();
2711  for (dof_id_type dof : dof_indices)
2712  {
2713  if (dof >= first_dof && dof < end_dof)
2714  {
2715  kernel->computeJacobian();
2716  _fe_problem.addJacobianOffDiagScalar(kernel->variable().number());
2717  have_scalar_contributions = true;
2718  break;
2719  }
2720  }
2721  }
2722 
2723  if (have_scalar_contributions)
2725  }
2726 }
2727 
2728 void
2730 {
2732 
2733  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
2734  {
2735  _kernels.jacobianSetup(tid);
2738  if (_doing_dg)
2744  }
2749 
2750  // Avoid recursion
2751  if (this == &_fe_problem.currentNonlinearSystem())
2754 }
2755 
2756 void
2758 {
2759  TIME_SECTION("computeJacobianInternal", 3);
2760 
2762 
2763  // Make matrix ready to use
2765 
2766  for (auto tag : tags)
2767  {
2768  if (!hasMatrix(tag))
2769  continue;
2770 
2771  auto & jacobian = getMatrix(tag);
2772  // Necessary for speed
2773  if (auto petsc_matrix = dynamic_cast<PetscMatrix<Number> *>(&jacobian))
2774  {
2775  LibmeshPetscCall(MatSetOption(petsc_matrix->mat(),
2776  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2777  PETSC_TRUE));
2779  LibmeshPetscCall(
2780  MatSetOption(petsc_matrix->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
2781  }
2782  }
2783 
2784  jacobianSetup();
2785 
2786  // Jacobian contributions from UOs - for now this is used for ray tracing
2787  // and ray kernels that contribute to the Jacobian (think line sources)
2788  std::vector<UserObject *> uos;
2790  .query()
2791  .condition<AttribSystem>("UserObject")
2792  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
2793  .queryInto(uos);
2794  for (auto & uo : uos)
2795  uo->jacobianSetup();
2796  for (auto & uo : uos)
2797  {
2798  uo->initialize();
2799  uo->execute();
2800  uo->finalize();
2801  }
2802 
2803  // reinit scalar variables
2804  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
2806 
2807  PARALLEL_TRY
2808  {
2809  // We would like to compute ScalarKernels, block NodalKernels, FVFluxKernels, and mortar objects
2810  // up front because we want these included whether we are computing an ordinary Jacobian or a
2811  // Jacobian for determining variable scaling factors
2813 
2814  // Block restricted Nodal Kernels
2816  {
2819  Threads::parallel_reduce(range, cnkjt);
2820 
2821  unsigned int n_threads = libMesh::n_threads();
2822  for (unsigned int i = 0; i < n_threads;
2823  i++) // Add any cached jacobians that might be hanging around
2825  }
2826 
2828  if (_fe_problem.haveFV())
2829  {
2830  // the same loop works for both residual and jacobians because it keys
2831  // off of FEProblem's _currently_computing_jacobian parameter
2833  _fe_problem, this->number(), tags, /*on_displaced=*/false);
2835  Threads::parallel_reduce(faces, fvj);
2836  }
2837  if (auto displaced_problem = _fe_problem.getDisplacedProblem();
2838  displaced_problem && displaced_problem->haveFV())
2839  {
2841  _fe_problem, this->number(), tags, /*on_displaced=*/true);
2842  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
2843  displaced_problem->mesh().ownedFaceInfoEnd());
2844  Threads::parallel_reduce(faces, fvr);
2845  }
2846 
2848 
2849  // Get our element range for looping over
2851 
2853  {
2854  // Only compute Jacobians corresponding to the diagonals of volumetric compute objects
2855  // because this typically gives us a good representation of the physics. NodalBCs and
2856  // Constraints can introduce dramatically different scales (often order unity).
2857  // IntegratedBCs and/or InterfaceKernels may use penalty factors. DGKernels may be ok, but
2858  // they are almost always used in conjunction with Kernels
2860  Threads::parallel_reduce(elem_range, cj);
2861  unsigned int n_threads = libMesh::n_threads();
2862  for (unsigned int i = 0; i < n_threads;
2863  i++) // Add any Jacobian contributions still hanging around
2865 
2866  // Check whether any exceptions were thrown and propagate this information for parallel
2867  // consistency before
2868  // 1) we do parallel communication when closing tagged matrices
2869  // 2) early returning before reaching our PARALLEL_CATCH below
2871 
2872  closeTaggedMatrices(tags);
2873 
2874  return;
2875  }
2876 
2877  switch (_fe_problem.coupling())
2878  {
2879  case Moose::COUPLING_DIAG:
2880  {
2882  Threads::parallel_reduce(elem_range, cj);
2883 
2884  unsigned int n_threads = libMesh::n_threads();
2885  for (unsigned int i = 0; i < n_threads;
2886  i++) // Add any Jacobian contributions still hanging around
2888 
2889  // Boundary restricted Nodal Kernels
2891  {
2894 
2895  Threads::parallel_reduce(bnd_range, cnkjt);
2896  unsigned int n_threads = libMesh::n_threads();
2897  for (unsigned int i = 0; i < n_threads;
2898  i++) // Add any cached jacobians that might be hanging around
2900  }
2901  }
2902  break;
2903 
2904  default:
2906  {
2908  Threads::parallel_reduce(elem_range, cj);
2909  unsigned int n_threads = libMesh::n_threads();
2910 
2911  for (unsigned int i = 0; i < n_threads; i++)
2913 
2914  // Boundary restricted Nodal Kernels
2916  {
2919 
2920  Threads::parallel_reduce(bnd_range, cnkjt);
2921  unsigned int n_threads = libMesh::n_threads();
2922  for (unsigned int i = 0; i < n_threads;
2923  i++) // Add any cached jacobians that might be hanging around
2925  }
2926  }
2927  break;
2928  }
2929 
2930  computeDiracContributions(tags, true);
2931 
2932  static bool first = true;
2933 
2934  // This adds zeroes into geometric coupling entries to ensure they stay in the matrix
2936  {
2937  first = false;
2939 
2942  }
2943  }
2944  PARALLEL_CATCH;
2945 
2946  // Have no idea how to have constraints work
2947  // with the tag system
2948  PARALLEL_TRY
2949  {
2950  // Add in Jacobian contributions from other Constraints
2951  if (_fe_problem._has_constraints && tags.count(systemMatrixTag()))
2952  {
2953  // Some constraints need values from the Jacobian
2954  closeTaggedMatrices(tags);
2955 
2956  // Nodal Constraints
2958 
2959  // Undisplaced Constraints
2960  constraintJacobians(false);
2961 
2962  // Displaced Constraints
2964  constraintJacobians(true);
2965  }
2966  }
2967  PARALLEL_CATCH;
2968 
2969  // We need to close the save_in variables on the aux system before NodalBCBases clear the dofs
2970  // on boundary nodes
2971  if (_has_diag_save_in)
2973 
2974  PARALLEL_TRY
2975  {
2976  MooseObjectWarehouse<NodalBCBase> * nbc_warehouse;
2977  // Select nodal kernels
2978  if (tags.size() == _fe_problem.numMatrixTags() || !tags.size())
2979  nbc_warehouse = &_nodal_bcs;
2980  else if (tags.size() == 1)
2981  nbc_warehouse = &(_nodal_bcs.getMatrixTagObjectWarehouse(*(tags.begin()), 0));
2982  else
2983  nbc_warehouse = &(_nodal_bcs.getMatrixTagsObjectWarehouse(tags, 0));
2984 
2985  if (nbc_warehouse->hasActiveObjects())
2986  {
2987  // We may be switching from add to set. Moreover, we rely on a call to MatZeroRows to enforce
2988  // the nodal boundary condition constraints which requires that the matrix be truly assembled
2989  // as opposed to just flushed. Consequently we can't do the following despite any desire to
2990  // keep our initial sparsity pattern honored (see https://gitlab.com/petsc/petsc/-/issues/852)
2991  //
2992  // flushTaggedMatrices(tags);
2993  closeTaggedMatrices(tags);
2994 
2995  // Cache the information about which BCs are coupled to which
2996  // variables, so we don't have to figure it out for each node.
2997  std::map<std::string, std::set<unsigned int>> bc_involved_vars;
2998  const std::set<BoundaryID> & all_boundary_ids = _mesh.getBoundaryIDs();
2999  for (const auto & bid : all_boundary_ids)
3000  {
3001  // Get reference to all the NodalBCs for this ID. This is only
3002  // safe if there are NodalBCBases there to be gotten...
3003  if (nbc_warehouse->hasActiveBoundaryObjects(bid))
3004  {
3005  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(bid);
3006  for (const auto & bc : bcs)
3007  {
3008  const std::vector<MooseVariableFEBase *> & coupled_moose_vars =
3009  bc->getCoupledMooseVars();
3010 
3011  // Create the set of "involved" MOOSE nonlinear vars, which includes all coupled vars
3012  // and the BC's own variable
3013  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
3014  for (const auto & coupled_var : coupled_moose_vars)
3015  if (coupled_var->kind() == Moose::VAR_SOLVER)
3016  var_set.insert(coupled_var->number());
3017 
3018  var_set.insert(bc->variable().number());
3019  }
3020  }
3021  }
3022 
3023  // reinit scalar variables again. This reinit does not re-fill any of the scalar variable
3024  // solution arrays because that was done above. It only will reorder the derivative
3025  // information for AD calculations to be suitable for NodalBC calculations
3026  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
3027  _fe_problem.reinitScalars(tid, true);
3028 
3029  // Get variable coupling list. We do all the NodalBCBase stuff on
3030  // thread 0... The couplingEntries() data structure determines
3031  // which variables are "coupled" as far as the preconditioner is
3032  // concerned, not what variables a boundary condition specifically
3033  // depends on.
3034  auto & coupling_entries = _fe_problem.couplingEntries(/*_tid=*/0, this->number());
3035 
3036  // Compute Jacobians for NodalBCBases
3038  for (const auto & bnode : bnd_nodes)
3039  {
3040  BoundaryID boundary_id = bnode->_bnd_id;
3041  Node * node = bnode->_node;
3042 
3043  if (nbc_warehouse->hasActiveBoundaryObjects(boundary_id) &&
3044  node->processor_id() == processor_id())
3045  {
3046  _fe_problem.reinitNodeFace(node, boundary_id, 0);
3047 
3048  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(boundary_id);
3049  for (const auto & bc : bcs)
3050  {
3051  // Get the set of involved MOOSE vars for this BC
3052  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
3053 
3054  // Loop over all the variables whose Jacobian blocks are
3055  // actually being computed, call computeOffDiagJacobian()
3056  // for each one which is actually coupled (otherwise the
3057  // value is zero.)
3058  for (const auto & it : coupling_entries)
3059  {
3060  unsigned int ivar = it.first->number(), jvar = it.second->number();
3061 
3062  // We are only going to call computeOffDiagJacobian() if:
3063  // 1.) the BC's variable is ivar
3064  // 2.) jvar is "involved" with the BC (including jvar==ivar), and
3065  // 3.) the BC should apply.
3066  if ((bc->variable().number() == ivar) && var_set.count(jvar) && bc->shouldApply())
3067  bc->computeOffDiagJacobian(jvar);
3068  }
3069 
3070  const auto & coupled_scalar_vars = bc->getCoupledMooseScalarVars();
3071  for (const auto & jvariable : coupled_scalar_vars)
3072  if (hasScalarVariable(jvariable->name()))
3073  bc->computeOffDiagJacobianScalar(jvariable->number());
3074  }
3075  }
3076  } // end loop over boundary nodes
3077 
3078  // Set the cached NodalBCBase values in the Jacobian matrix
3080  }
3081  }
3082  PARALLEL_CATCH;
3083 
3084  closeTaggedMatrices(tags);
3085 
3086  // We need to close the save_in variables on the aux system before NodalBCBases clear the dofs
3087  // on boundary nodes
3090 
3091  if (hasDiagSaveIn())
3093 
3094  // Accumulate the occurrence of solution invalid warnings for the current iteration cumulative
3095  // counters
3098 }
3099 
3100 void
3102 {
3103  _nl_matrix_tags.clear();
3104 
3105  auto & tags = _fe_problem.getMatrixTags();
3106 
3107  for (auto & tag : tags)
3108  _nl_matrix_tags.insert(tag.second);
3109 
3110  computeJacobian(jacobian, _nl_matrix_tags);
3111 }
3112 
3113 void
3114 NonlinearSystemBase::computeJacobian(SparseMatrix<Number> & jacobian, const std::set<TagID> & tags)
3115 {
3117 
3118  computeJacobianTags(tags);
3119 
3121 }
3122 
3123 void
3124 NonlinearSystemBase::computeJacobianTags(const std::set<TagID> & tags)
3125 {
3126  TIME_SECTION("computeJacobianTags", 5);
3127 
3128  FloatingPointExceptionGuard fpe_guard(_app);
3129 
3130  try
3131  {
3133  }
3134  catch (MooseException & e)
3135  {
3136  // The buck stops here, we have already handled the exception by
3137  // calling stopSolve(), it is now up to PETSc to return a
3138  // "diverged" reason during the next solve.
3139  }
3140 }
3141 
3142 void
3143 NonlinearSystemBase::computeJacobianBlocks(std::vector<JacobianBlock *> & blocks)
3144 {
3145  _nl_matrix_tags.clear();
3146 
3147  auto & tags = _fe_problem.getMatrixTags();
3148  for (auto & tag : tags)
3149  _nl_matrix_tags.insert(tag.second);
3150 
3152 }
3153 
3154 void
3155 NonlinearSystemBase::computeJacobianBlocks(std::vector<JacobianBlock *> & blocks,
3156  const std::set<TagID> & tags)
3157 {
3158  TIME_SECTION("computeJacobianBlocks", 3);
3159  FloatingPointExceptionGuard fpe_guard(_app);
3160 
3161  for (unsigned int i = 0; i < blocks.size(); i++)
3162  {
3163  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
3164 
3165  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
3166  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
3167  PETSC_TRUE));
3169  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
3170  MAT_NEW_NONZERO_ALLOCATION_ERR,
3171  PETSC_TRUE));
3172 
3173  jacobian.zero();
3174  }
3175 
3176  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
3178 
3179  PARALLEL_TRY
3180  {
3182  ComputeJacobianBlocksThread cjb(_fe_problem, blocks, tags);
3183  Threads::parallel_reduce(elem_range, cjb);
3184  }
3185  PARALLEL_CATCH;
3186 
3187  for (unsigned int i = 0; i < blocks.size(); i++)
3188  blocks[i]->_jacobian.close();
3189 
3190  for (unsigned int i = 0; i < blocks.size(); i++)
3191  {
3192  libMesh::System & precond_system = blocks[i]->_precond_system;
3193  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
3194 
3195  unsigned int ivar = blocks[i]->_ivar;
3196  unsigned int jvar = blocks[i]->_jvar;
3197 
3198  // Dirichlet BCs
3199  std::vector<numeric_index_type> zero_rows;
3200  PARALLEL_TRY
3201  {
3203  for (const auto & bnode : bnd_nodes)
3204  {
3205  BoundaryID boundary_id = bnode->_bnd_id;
3206  Node * node = bnode->_node;
3207 
3208  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
3209  {
3210  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
3211 
3212  if (node->processor_id() == processor_id())
3213  {
3214  _fe_problem.reinitNodeFace(node, boundary_id, 0);
3215 
3216  for (const auto & bc : bcs)
3217  if (bc->variable().number() == ivar && bc->shouldApply())
3218  {
3219  // The first zero is for the variable number... there is only one variable in
3220  // each mini-system The second zero only works with Lagrange elements!
3221  zero_rows.push_back(node->dof_number(precond_system.number(), 0, 0));
3222  }
3223  }
3224  }
3225  }
3226  }
3227  PARALLEL_CATCH;
3228 
3229  jacobian.close();
3230 
3231  // This zeroes the rows corresponding to Dirichlet BCs and puts a 1.0 on the diagonal
3232  if (ivar == jvar)
3233  jacobian.zero_rows(zero_rows, 1.0);
3234  else
3235  jacobian.zero_rows(zero_rows, 0.0);
3236 
3237  jacobian.close();
3238  }
3239 }
3240 
3241 void
3243 {
3250  _kernels.updateActive(tid);
3252  if (tid == 0)
3253  {
3260  }
3261 }
3262 
3263 Real
3265  const NumericVector<Number> & update)
3266 {
3267  // Default to no damping
3268  Real damping = 1.0;
3269  bool has_active_dampers = false;
3270 
3271  try
3272  {
3274  {
3275  PARALLEL_TRY
3276  {
3277  TIME_SECTION("computeDampers", 3, "Computing Dampers");
3278  has_active_dampers = true;
3281  Threads::parallel_reduce(_fe_problem.getCurrentAlgebraicElementRange(), cid);
3282  damping = std::min(cid.damping(), damping);
3283  }
3284  PARALLEL_CATCH;
3285  }
3286 
3288  {
3289  PARALLEL_TRY
3290  {
3291  TIME_SECTION("computeDamping::element", 3, "Computing Element Damping");
3292 
3293  has_active_dampers = true;
3296  Threads::parallel_reduce(_fe_problem.getCurrentAlgebraicNodeRange(), cndt);
3297  damping = std::min(cndt.damping(), damping);
3298  }
3299  PARALLEL_CATCH;
3300  }
3301 
3303  {
3304  PARALLEL_TRY
3305  {
3306  TIME_SECTION("computeDamping::general", 3, "Computing General Damping");
3307 
3308  has_active_dampers = true;
3309  const auto & gdampers = _general_dampers.getActiveObjects();
3310  for (const auto & damper : gdampers)
3311  {
3312  Real gd_damping = damper->computeDamping(solution, update);
3313  try
3314  {
3315  damper->checkMinDamping(gd_damping);
3316  }
3317  catch (MooseException & e)
3318  {
3320  }
3321  damping = std::min(gd_damping, damping);
3322  }
3323  }
3324  PARALLEL_CATCH;
3325  }
3326  }
3327  catch (MooseException & e)
3328  {
3329  // The buck stops here, we have already handled the exception by
3330  // calling stopSolve(), it is now up to PETSc to return a
3331  // "diverged" reason during the next solve.
3332  }
3333 
3334  _communicator.min(damping);
3335 
3336  if (has_active_dampers && damping < 1.0)
3337  _console << " Damping factor: " << damping << std::endl;
3338 
3339  return damping;
3340 }
3341 
3342 void
3343 NonlinearSystemBase::computeDiracContributions(const std::set<TagID> & tags, bool is_jacobian)
3344 {
3346 
3347  std::set<const Elem *> dirac_elements;
3348 
3350  {
3351  TIME_SECTION("computeDirac", 3, "Computing DiracKernels");
3352 
3353  // TODO: Need a threading fix... but it's complicated!
3354  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
3355  {
3356  const auto & dkernels = _dirac_kernels.getActiveObjects(tid);
3357  for (const auto & dkernel : dkernels)
3358  {
3359  dkernel->clearPoints();
3360  dkernel->addPoints();
3361  }
3362  }
3363 
3364  ComputeDiracThread cd(_fe_problem, tags, is_jacobian);
3365 
3366  _fe_problem.getDiracElements(dirac_elements);
3367 
3368  DistElemRange range(dirac_elements.begin(), dirac_elements.end(), 1);
3369  // TODO: Make Dirac work thread!
3370  // Threads::parallel_reduce(range, cd);
3371 
3372  cd(range);
3373  }
3374 }
3375 
3378 {
3379  if (!_residual_copy.get())
3381 
3382  return *_residual_copy;
3383 }
3384 
3387 {
3388  _need_residual_ghosted = true;
3389  if (!_residual_ghosted)
3390  {
3391  // The first time we realize we need a ghosted residual vector,
3392  // we add it.
3393  _residual_ghosted = &addVector("residual_ghosted", false, GHOSTED);
3394 
3395  // If we've already realized we need time and/or non-time
3396  // residual vectors, but we haven't yet realized they need to be
3397  // ghosted, fix that now.
3398  //
3399  // If an application changes its mind, the libMesh API lets us
3400  // change the vector.
3401  if (_Re_time)
3402  {
3403  const auto vector_name = _subproblem.vectorTagName(_Re_time_tag);
3404  _Re_time = &system().add_vector(vector_name, false, GHOSTED);
3405  }
3406  if (_Re_non_time)
3407  {
3408  const auto vector_name = _subproblem.vectorTagName(_Re_non_time_tag);
3409  _Re_non_time = &system().add_vector(vector_name, false, GHOSTED);
3410  }
3411  }
3412  return *_residual_ghosted;
3413 }
3414 
3415 void
3417  std::vector<dof_id_type> & n_nz,
3418  std::vector<dof_id_type> & n_oz)
3419 {
3421  {
3423 
3424  std::unordered_map<dof_id_type, std::vector<dof_id_type>> graph;
3425 
3427 
3430  graph);
3431 
3432  const dof_id_type first_dof_on_proc = dofMap().first_dof(processor_id());
3433  const dof_id_type end_dof_on_proc = dofMap().end_dof(processor_id());
3434 
3435  // The total number of dofs on and off processor
3436  const dof_id_type n_dofs_on_proc = dofMap().n_local_dofs();
3437  const dof_id_type n_dofs_not_on_proc = dofMap().n_dofs() - dofMap().n_local_dofs();
3438 
3439  for (const auto & git : graph)
3440  {
3441  dof_id_type dof = git.first;
3442  dof_id_type local_dof = dof - first_dof_on_proc;
3443 
3444  if (dof < first_dof_on_proc || dof >= end_dof_on_proc)
3445  continue;
3446 
3447  const auto & row = git.second;
3448 
3449  SparsityPattern::Row & sparsity_row = sparsity[local_dof];
3450 
3451  unsigned int original_row_length = sparsity_row.size();
3452 
3453  sparsity_row.insert(sparsity_row.end(), row.begin(), row.end());
3454 
3455  SparsityPattern::sort_row(
3456  sparsity_row.begin(), sparsity_row.begin() + original_row_length, sparsity_row.end());
3457 
3458  // Fix up nonzero arrays
3459  for (const auto & coupled_dof : row)
3460  {
3461  if (coupled_dof < first_dof_on_proc || coupled_dof >= end_dof_on_proc)
3462  {
3463  if (n_oz[local_dof] < n_dofs_not_on_proc)
3464  n_oz[local_dof]++;
3465  }
3466  else
3467  {
3468  if (n_nz[local_dof] < n_dofs_on_proc)
3469  n_nz[local_dof]++;
3470  }
3471  }
3472  }
3473  }
3474 }
3475 
3476 void
3478 {
3479  *_u_dot = u_dot;
3480 }
3481 
3482 void
3484 {
3485  *_u_dotdot = u_dotdot;
3486 }
3487 
3488 void
3490 {
3491  *_u_dot_old = u_dot_old;
3492 }
3493 
3494 void
3496 {
3497  *_u_dotdot_old = u_dotdot_old;
3498 }
3499 
3500 void
3501 NonlinearSystemBase::setPreconditioner(std::shared_ptr<MoosePreconditioner> pc)
3502 {
3503  if (_preconditioner.get() != nullptr)
3504  mooseError("More than one active Preconditioner detected");
3505 
3506  _preconditioner = pc;
3507 }
3508 
3509 MoosePreconditioner const *
3511 {
3512  return _preconditioner.get();
3513 }
3514 
3515 void
3517 {
3518  _increment_vec = &_sys.add_vector("u_increment", true, GHOSTED);
3519 }
3520 
3521 void
3523  const std::set<MooseVariable *> & damped_vars)
3524 {
3525  for (const auto & var : damped_vars)
3526  var->computeIncrementAtQps(*_increment_vec);
3527 }
3528 
3529 void
3531  const std::set<MooseVariable *> & damped_vars)
3532 {
3533  for (const auto & var : damped_vars)
3534  var->computeIncrementAtNode(*_increment_vec);
3535 }
3536 
3537 void
3538 NonlinearSystemBase::checkKernelCoverage(const std::set<SubdomainID> & mesh_subdomains) const
3539 {
3540  // Obtain all blocks and variables covered by all kernels
3541  std::set<SubdomainID> input_subdomains;
3542  std::set<std::string> kernel_variables;
3543 
3544  bool global_kernels_exist = false;
3545  global_kernels_exist |= _scalar_kernels.hasActiveObjects();
3546  global_kernels_exist |= _nodal_kernels.hasActiveObjects();
3547 
3548  _kernels.subdomainsCovered(input_subdomains, kernel_variables);
3549  _dg_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3550  _nodal_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3551  _scalar_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3552  _constraints.subdomainsCovered(input_subdomains, kernel_variables);
3553 
3554  if (_fe_problem.haveFV())
3555  {
3556  std::vector<FVElementalKernel *> fv_elemental_kernels;
3558  .query()
3559  .template condition<AttribSystem>("FVElementalKernel")
3560  .queryInto(fv_elemental_kernels);
3561 
3562  for (auto fv_kernel : fv_elemental_kernels)
3563  {
3564  if (fv_kernel->blockRestricted())
3565  for (auto block_id : fv_kernel->blockIDs())
3566  input_subdomains.insert(block_id);
3567  else
3568  global_kernels_exist = true;
3569  kernel_variables.insert(fv_kernel->variable().name());
3570 
3571  // Check for lagrange multiplier
3572  if (dynamic_cast<FVScalarLagrangeMultiplierConstraint *>(fv_kernel))
3573  kernel_variables.insert(dynamic_cast<FVScalarLagrangeMultiplierConstraint *>(fv_kernel)
3574  ->lambdaVariable()
3575  .name());
3576  }
3577 
3578  std::vector<FVFluxKernel *> fv_flux_kernels;
3580  .query()
3581  .template condition<AttribSystem>("FVFluxKernel")
3582  .queryInto(fv_flux_kernels);
3583 
3584  for (auto fv_kernel : fv_flux_kernels)
3585  {
3586  if (fv_kernel->blockRestricted())
3587  for (auto block_id : fv_kernel->blockIDs())
3588  input_subdomains.insert(block_id);
3589  else
3590  global_kernels_exist = true;
3591  kernel_variables.insert(fv_kernel->variable().name());
3592  }
3593 
3594  std::vector<FVInterfaceKernel *> fv_interface_kernels;
3596  .query()
3597  .template condition<AttribSystem>("FVInterfaceKernel")
3598  .queryInto(fv_interface_kernels);
3599 
3600  for (auto fvik : fv_interface_kernels)
3601  if (auto scalar_fvik = dynamic_cast<FVScalarLagrangeMultiplierInterface *>(fvik))
3602  kernel_variables.insert(scalar_fvik->lambdaVariable().name());
3603 
3604  std::vector<FVFluxBC *> fv_flux_bcs;
3606  .query()
3607  .template condition<AttribSystem>("FVFluxBC")
3608  .queryInto(fv_flux_bcs);
3609 
3610  for (auto fvbc : fv_flux_bcs)
3611  if (auto scalar_fvbc = dynamic_cast<FVBoundaryScalarLagrangeMultiplierConstraint *>(fvbc))
3612  kernel_variables.insert(scalar_fvbc->lambdaVariable().name());
3613  }
3614 
3615  // Check kernel coverage of subdomains (blocks) in your mesh
3616  if (!global_kernels_exist)
3617  {
3618  std::set<SubdomainID> difference;
3619  std::set_difference(mesh_subdomains.begin(),
3620  mesh_subdomains.end(),
3621  input_subdomains.begin(),
3622  input_subdomains.end(),
3623  std::inserter(difference, difference.end()));
3624 
3625  // there supposed to be no kernels on this lower-dimensional subdomain
3626  for (const auto & id : _mesh.interiorLowerDBlocks())
3627  difference.erase(id);
3628  for (const auto & id : _mesh.boundaryLowerDBlocks())
3629  difference.erase(id);
3630 
3631  if (!difference.empty())
3632  {
3633  std::vector<SubdomainID> difference_vec =
3634  std::vector<SubdomainID>(difference.begin(), difference.end());
3635  std::vector<SubdomainName> difference_names = _mesh.getSubdomainNames(difference_vec);
3636  std::stringstream missing_block_names;
3637  std::copy(difference_names.begin(),
3638  difference_names.end(),
3639  std::ostream_iterator<std::string>(missing_block_names, " "));
3640  std::stringstream missing_block_ids;
3641  std::copy(difference.begin(),
3642  difference.end(),
3643  std::ostream_iterator<unsigned int>(missing_block_ids, " "));
3644 
3645  mooseError("Each subdomain must contain at least one Kernel.\nThe following block(s) lack an "
3646  "active kernel: " +
3647  missing_block_names.str(),
3648  " (ids: ",
3649  missing_block_ids.str(),
3650  ")");
3651  }
3652  }
3653 
3654  // Check kernel use of variables
3655  std::set<VariableName> variables(getVariableNames().begin(), getVariableNames().end());
3656 
3657  std::set<VariableName> difference;
3658  std::set_difference(variables.begin(),
3659  variables.end(),
3660  kernel_variables.begin(),
3661  kernel_variables.end(),
3662  std::inserter(difference, difference.end()));
3663 
3664  // skip checks for varaibles defined on lower-dimensional subdomain
3665  std::set<VariableName> vars(difference);
3666  for (auto & var_name : vars)
3667  {
3668  auto blks = getSubdomainsForVar(var_name);
3669  for (const auto & id : blks)
3670  if (_mesh.interiorLowerDBlocks().count(id) > 0 || _mesh.boundaryLowerDBlocks().count(id) > 0)
3671  difference.erase(var_name);
3672  }
3673 
3674  if (!difference.empty())
3675  {
3676  std::stringstream missing_kernel_vars;
3677  std::copy(difference.begin(),
3678  difference.end(),
3679  std::ostream_iterator<std::string>(missing_kernel_vars, " "));
3680  mooseError("Each variable must be referenced by at least one active Kernel.\nThe following "
3681  "variable(s) lack an active kernel: " +
3682  missing_kernel_vars.str());
3683  }
3684 }
3685 
3686 bool
3688 {
3689  auto & time_kernels = _kernels.getVectorTagObjectWarehouse(timeVectorTag(), 0);
3690 
3691  return time_kernels.hasActiveObjects();
3692 }
3693 
3694 std::vector<std::string>
3696 {
3697  std::vector<std::string> variable_names;
3698  const auto & time_kernels = _kernels.getVectorTagObjectWarehouse(timeVectorTag(), 0);
3699  if (time_kernels.hasActiveObjects())
3700  for (const auto & kernel : time_kernels.getObjects())
3701  variable_names.push_back(kernel->variable().name());
3702 
3703  return variable_names;
3704 }
3705 
3706 bool
3708 {
3709  return _integrated_bcs.hasActiveBoundaryObjects(bnd_id, tid);
3710 }
3711 
3712 bool
3714 {
3715  return _interface_kernels.hasActiveBoundaryObjects(bnd_id, tid);
3716 }
3717 
3718 bool
3720  THREAD_ID /*tid*/) const
3721 {
3722  return _doing_dg;
3723 }
3724 
3725 bool
3727 {
3728  return _doing_dg;
3729 }
3730 
3731 void
3733 {
3736 }
3737 
3738 void
3740  const std::set<TagID> & vector_tags,
3741  const std::set<TagID> & matrix_tags)
3742 {
3743  parallel_object_only();
3744 
3745  try
3746  {
3747  for (auto & map_pr : _undisplaced_mortar_functors)
3748  map_pr.second(compute_type, vector_tags, matrix_tags);
3749 
3750  for (auto & map_pr : _displaced_mortar_functors)
3751  map_pr.second(compute_type, vector_tags, matrix_tags);
3752  }
3753  catch (MetaPhysicL::LogicError &)
3754  {
3755  mooseError(
3756  "We caught a MetaPhysicL error in NonlinearSystemBase::mortarConstraints. This is very "
3757  "likely due to AD not having a sufficiently large derivative container size. Please run "
3758  "MOOSE configure with the '--with-derivative-size=<n>' option");
3759  }
3760 }
3761 
3762 void
3764 {
3765  if (_auto_scaling_initd)
3766  return;
3767 
3768  // Want the libMesh count of variables, not MOOSE, e.g. I don't care about array variable counts
3769  const auto n_vars = system().n_vars();
3770 
3771  if (_scaling_group_variables.empty())
3772  {
3773  _var_to_group_var.reserve(n_vars);
3775 
3776  for (const auto var_number : make_range(n_vars))
3777  _var_to_group_var.emplace(var_number, var_number);
3778  }
3779  else
3780  {
3781  std::set<unsigned int> var_numbers, var_numbers_covered, var_numbers_not_covered;
3782  for (const auto var_number : make_range(n_vars))
3783  var_numbers.insert(var_number);
3784 
3786 
3787  for (const auto group_index : index_range(_scaling_group_variables))
3788  for (const auto & var_name : _scaling_group_variables[group_index])
3789  {
3790  if (!hasVariable(var_name) && !hasScalarVariable(var_name))
3791  mooseError("'",
3792  var_name,
3793  "', provided to the 'scaling_group_variables' parameter, does not exist in "
3794  "the nonlinear system.");
3795 
3796  const MooseVariableBase & var =
3797  hasVariable(var_name)
3798  ? static_cast<MooseVariableBase &>(getVariable(0, var_name))
3799  : static_cast<MooseVariableBase &>(getScalarVariable(0, var_name));
3800  auto map_pair = _var_to_group_var.emplace(var.number(), group_index);
3801  if (!map_pair.second)
3802  mooseError("Variable ", var_name, " is contained in multiple scaling grouplings");
3803  var_numbers_covered.insert(var.number());
3804  }
3805 
3806  std::set_difference(var_numbers.begin(),
3807  var_numbers.end(),
3808  var_numbers_covered.begin(),
3809  var_numbers_covered.end(),
3810  std::inserter(var_numbers_not_covered, var_numbers_not_covered.begin()));
3811 
3812  _num_scaling_groups = _scaling_group_variables.size() + var_numbers_not_covered.size();
3813 
3814  auto index = static_cast<unsigned int>(_scaling_group_variables.size());
3815  for (auto var_number : var_numbers_not_covered)
3816  _var_to_group_var.emplace(var_number, index++);
3817  }
3818 
3819  _variable_autoscaled.resize(n_vars, true);
3820  const auto & number_to_var_map = _vars[0].numberToVariableMap();
3821 
3823  for (const auto i : index_range(_variable_autoscaled))
3824  if (std::find(_ignore_variables_for_autoscaling.begin(),
3826  libmesh_map_find(number_to_var_map, i)->name()) !=
3828  _variable_autoscaled[i] = false;
3829 
3830  _auto_scaling_initd = true;
3831 }
3832 
3833 bool
3835 {
3837  return true;
3838 
3839  _console << "\nPerforming automatic scaling calculation\n" << std::endl;
3840 
3841  TIME_SECTION("computeScaling", 3, "Computing Automatic Scaling");
3842 
3843  // It's funny but we need to assemble our vector of scaling factors here otherwise we will be
3844  // applying scaling factors of 0 during Assembly of our scaling Jacobian
3846 
3847  // container for repeated access of element global dof indices
3848  std::vector<dof_id_type> dof_indices;
3849 
3850  if (!_auto_scaling_initd)
3851  setupScalingData();
3852 
3853  std::vector<Real> inverse_scaling_factors(_num_scaling_groups, 0);
3854  std::vector<Real> resid_inverse_scaling_factors(_num_scaling_groups, 0);
3855  std::vector<Real> jac_inverse_scaling_factors(_num_scaling_groups, 0);
3856  auto & dof_map = dofMap();
3857 
3858  // what types of scaling do we want?
3859  bool jac_scaling = _resid_vs_jac_scaling_param < 1. - TOLERANCE;
3860  bool resid_scaling = _resid_vs_jac_scaling_param > TOLERANCE;
3861 
3862  const NumericVector<Number> & scaling_residual = RHS();
3863 
3864  if (jac_scaling)
3865  {
3866  // if (!_auto_scaling_initd)
3867  // We need to reinit this when the number of dofs changes
3868  // but there is no good way to track that
3869  // In theory, it is the job of libmesh system to track this,
3870  // but this special matrix is not owned by libMesh system
3871  // Let us reinit eveytime since it is not expensive
3872  {
3873  auto init_vector = NumericVector<Number>::build(this->comm());
3874  init_vector->init(system().n_dofs(), system().n_local_dofs(), /*fast=*/false, PARALLEL);
3875 
3876  _scaling_matrix->clear();
3877  _scaling_matrix->init(*init_vector);
3878  }
3879 
3881  // Dispatch to derived classes to ensure that we use the correct matrix tag
3884  }
3885 
3886  if (resid_scaling)
3887  {
3890  // Dispatch to derived classes to ensure that we use the correct vector tag
3894  }
3895 
3896  // Did something bad happen during residual/Jacobian scaling computation?
3898  return false;
3899 
3900  auto examine_dof_indices = [this,
3901  jac_scaling,
3902  resid_scaling,
3903  &dof_map,
3904  &jac_inverse_scaling_factors,
3905  &resid_inverse_scaling_factors,
3906  &scaling_residual](const auto & dof_indices, const auto var_number)
3907  {
3908  for (auto dof_index : dof_indices)
3909  if (dof_map.local_index(dof_index))
3910  {
3911  if (jac_scaling)
3912  {
3913  // For now we will use the diagonal for determining scaling
3914  auto mat_value = (*_scaling_matrix)(dof_index, dof_index);
3915  auto & factor = jac_inverse_scaling_factors[_var_to_group_var[var_number]];
3916  factor = std::max(factor, std::abs(mat_value));
3917  }
3918  if (resid_scaling)
3919  {
3920  auto vec_value = scaling_residual(dof_index);
3921  auto & factor = resid_inverse_scaling_factors[_var_to_group_var[var_number]];
3922  factor = std::max(factor, std::abs(vec_value));
3923  }
3924  }
3925  };
3926 
3927  // Compute our scaling factors for the spatial field variables
3928  for (const auto & elem : _fe_problem.getCurrentAlgebraicElementRange())
3929  for (const auto i : make_range(system().n_vars()))
3931  {
3932  dof_map.dof_indices(elem, dof_indices, i);
3933  examine_dof_indices(dof_indices, i);
3934  }
3935 
3936  for (const auto i : make_range(system().n_vars()))
3937  if (_variable_autoscaled[i] && system().variable_type(i).family == SCALAR)
3938  {
3939  dof_map.SCALAR_dof_indices(dof_indices, i);
3940  examine_dof_indices(dof_indices, i);
3941  }
3942 
3943  if (resid_scaling)
3944  _communicator.max(resid_inverse_scaling_factors);
3945  if (jac_scaling)
3946  _communicator.max(jac_inverse_scaling_factors);
3947 
3948  if (jac_scaling && resid_scaling)
3949  for (MooseIndex(inverse_scaling_factors) i = 0; i < inverse_scaling_factors.size(); ++i)
3950  {
3951  // Be careful not to take log(0)
3952  if (!resid_inverse_scaling_factors[i])
3953  {
3954  if (!jac_inverse_scaling_factors[i])
3955  inverse_scaling_factors[i] = 1;
3956  else
3957  inverse_scaling_factors[i] = jac_inverse_scaling_factors[i];
3958  }
3959  else if (!jac_inverse_scaling_factors[i])
3960  // We know the resid is not zero
3961  inverse_scaling_factors[i] = resid_inverse_scaling_factors[i];
3962  else
3963  inverse_scaling_factors[i] =
3964  std::exp(_resid_vs_jac_scaling_param * std::log(resid_inverse_scaling_factors[i]) +
3965  (1 - _resid_vs_jac_scaling_param) * std::log(jac_inverse_scaling_factors[i]));
3966  }
3967  else if (jac_scaling)
3968  inverse_scaling_factors = jac_inverse_scaling_factors;
3969  else if (resid_scaling)
3970  inverse_scaling_factors = resid_inverse_scaling_factors;
3971  else
3972  mooseError("We shouldn't be calling this routine if we're not performing any scaling");
3973 
3974  // We have to make sure that our scaling values are not zero
3975  for (auto & scaling_factor : inverse_scaling_factors)
3976  if (scaling_factor == 0)
3977  scaling_factor = 1;
3978 
3979  // Now flatten the group scaling factors to the individual variable scaling factors
3980  std::vector<Real> flattened_inverse_scaling_factors(system().n_vars());
3981  for (const auto i : index_range(flattened_inverse_scaling_factors))
3982  flattened_inverse_scaling_factors[i] = inverse_scaling_factors[_var_to_group_var[i]];
3983 
3984  // Now set the scaling factors for the variables
3985  applyScalingFactors(flattened_inverse_scaling_factors);
3986  if (auto displaced_problem = _fe_problem.getDisplacedProblem().get())
3987  displaced_problem->systemBaseNonlinear(number()).applyScalingFactors(
3988  flattened_inverse_scaling_factors);
3989 
3990  _computed_scaling = true;
3991  return true;
3992 }
3993 
3994 void
3996 {
3997  if (!hasVector("scaling_factors"))
3998  // No variables have indicated they need scaling
3999  return;
4000 
4001  auto & scaling_vector = getVector("scaling_factors");
4002 
4003  const auto & lm_mesh = _mesh.getMesh();
4004  const auto & dof_map = dofMap();
4005 
4006  const auto & field_variables = _vars[0].fieldVariables();
4007  const auto & scalar_variables = _vars[0].scalars();
4008 
4009  std::vector<dof_id_type> dof_indices;
4010 
4011  for (const Elem * const elem :
4012  as_range(lm_mesh.active_local_elements_begin(), lm_mesh.active_local_elements_end()))
4013  for (const auto * const field_var : field_variables)
4014  {
4015  const auto & factors = field_var->arrayScalingFactor();
4016  for (const auto i : make_range(field_var->count()))
4017  {
4018  dof_map.dof_indices(elem, dof_indices, field_var->number() + i);
4019  for (const auto dof : dof_indices)
4020  scaling_vector.set(dof, factors[i]);
4021  }
4022  }
4023 
4024  for (const auto * const scalar_var : scalar_variables)
4025  {
4026  mooseAssert(scalar_var->count() == 1,
4027  "Scalar variables should always have only one component.");
4028  dof_map.SCALAR_dof_indices(dof_indices, scalar_var->number());
4029  for (const auto dof : dof_indices)
4030  scaling_vector.set(dof, scalar_var->scalingFactor());
4031  }
4032 
4033  // Parallel assemble
4034  scaling_vector.close();
4035 
4036  if (auto * displaced_problem = _fe_problem.getDisplacedProblem().get())
4037  // copy into the corresponding displaced system vector because they should be the exact same
4038  displaced_problem->systemBaseNonlinear(number()).getVector("scaling_factors") = scaling_vector;
4039 }
4040 
4041 bool
4043 {
4044  // Clear the iteration counters
4045  _current_l_its.clear();
4046  _current_nl_its = 0;
4047 
4048  // Initialize the solution vector using a predictor and known values from nodal bcs
4050 
4051  // Now that the initial solution has ben set, potentially perform a residual/Jacobian evaluation
4052  // to determine variable scaling factors
4053  if (_automatic_scaling)
4054  {
4055  const bool scaling_succeeded = computeScaling();
4056  if (!scaling_succeeded)
4057  return false;
4058  }
4059 
4060  // We do not know a priori what variable a global degree of freedom corresponds to, so we need a
4061  // map from global dof to scaling factor. We just use a ghosted NumericVector for that mapping
4063 
4064  return true;
4065 }
4066 
4067 void
4069 {
4070  if (matrixFromColoring())
4071  LibmeshPetscCall(MatFDColoringDestroy(&_fdcoloring));
4072 }
std::string name(const ElemQuality q)
std::vector< std::shared_ptr< TimeIntegrator > > _time_integrators
Time integrator.
Definition: SystemBase.h:1041
virtual void setSolutionUDotDotOld(const NumericVector< Number > &u_dotdot_old)
virtual void reinitNeighborPhys(const Elem *neighbor, unsigned int neighbor_side, const std::vector< Point > &physical_points, const THREAD_ID tid)=0
virtual void residualSetup(THREAD_ID tid=0) const
NumericVector< Number > & getResidualTimeVector()
Return a numeric vector that is associated with the time tag.
void setActiveMaterialProperties(const std::unordered_set< unsigned int > &mat_prop_ids, const THREAD_ID tid)
Record and set the material properties required by the current computing thread.
MooseObjectTagWarehouse< NodalKernelBase > _nodal_kernels
NodalKernels for each thread.
NumericVector< Number > * _Re_time
residual vector for time contributions
void computeJacobianBlocks(std::vector< JacobianBlock *> &blocks)
Computes several Jacobian blocks simultaneously, summing their contributions into smaller preconditio...
virtual void insert(const T *v, const std::vector< numeric_index_type > &dof_indices)
MetaPhysicL::DualNumber< V, D, asd > abs(const MetaPhysicL::DualNumber< V, D, asd > &a)
Definition: EigenADReal.h:42
TagID _Re_time_tag
Tag for time contribution residual.
void allgather(const T &send_data, std::vector< T, A > &recv_data) const
unsigned int size(THREAD_ID tid=0) const
Return how many kernels we store in the current warehouse.
std::map< std::pair< BoundaryID, BoundaryID >, PenetrationLocator * > _penetration_locators
bool empty() const
virtual void addKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a kernel.
virtual void setSolutionUDotDot(const NumericVector< Number > &udotdot)
Set transient term used by residual and Jacobian evaluation.
dof_id_type dof_number(const unsigned int s, const unsigned int var, const unsigned int comp) const
A kernel for hybridized finite element formulations.
Definition: HDGKernel.h:25
dof_id_type n_local_dofs() const
void reinitIncrementAtNodeForDampers(THREAD_ID tid, const std::set< MooseVariable *> &damped_vars)
Compute the incremental change in variables at nodes for dampers.
void overwriteNodeFace(NumericVector< Number > &soln)
Called from explicit time stepping to overwrite boundary positions (explicit dynamics).
Base class for deriving general dampers.
Definition: GeneralDamper.h:21
bool _use_pre_smo_residual
Whether to use the pre-SMO initial residual in the relative convergence check.
MoosePreconditioner const * getPreconditioner() const
virtual const char * what() const
Get out the error message.
std::vector< std::pair< MooseVariableFEBase *, MooseVariableFEBase * > > & couplingEntries(const THREAD_ID tid, const unsigned int nl_sys_num)
void findImplicitGeometricCouplingEntries(GeometricSearchData &geom_search_data, std::unordered_map< dof_id_type, std::vector< dof_id_type >> &graph)
Finds the implicit sparsity graph between geometrically related dofs.
void setupDampers()
Setup damping stuff (called before we actually start)
Real _initial_residual
The initial (i.e., 0th nonlinear iteration) residual, see setPreSMOResidual for a detailed explanatio...
unsigned int n_threads()
std::vector< bool > _variable_autoscaled
Container to hold flag if variable is to participate in autoscaling.
void zeroVectorForResidual(const std::string &vector_name)
virtual void cacheResidualNeighbor(const THREAD_ID tid) override
bool identifyVariableGroupsInNL() const
Whether to identify variable groups in nonlinear systems.
void zeroTaggedVectors(const std::set< TagID > &tags)
Zero all vectors for given tags.
Definition: SystemBase.C:686
const std::set< SubdomainID > & interiorLowerDBlocks() const
Definition: MooseMesh.h:1388
Base class for split-based preconditioners.
Definition: Split.h:25
bool hasActiveBlockObjects(THREAD_ID tid=0) const
bool hasVector(const std::string &tag_name) const
Check if the named vector exists in the system.
Definition: SystemBase.C:878
virtual void checkExceptionAndStopSolve(bool print_message=true)
Check to see if an exception has occurred on any processor and, if possible, force the solve to fail...
unsigned int n_comp(const unsigned int s, const unsigned int var) const
void applyScalingFactors(const std::vector< Real > &inverse_scaling_factors)
Applies scaling factors to the system&#39;s variables.
Definition: SystemBase.C:1433
void computeNodalBCsResidualAndJacobian()
compute the residual and Jacobian for nodal boundary conditions
bool _debugging_residuals
true if debugging residuals
SCALAR
BoundaryID _secondary_boundary
NumericVector< Number > * _Re_non_time
residual vector for non-time contributions
unsigned int TagID
Definition: MooseTypes.h:206
Real computeDamping(const NumericVector< Number > &solution, const NumericVector< Number > &update)
Compute damping.
virtual void reinitNode(const Node *node, const THREAD_ID tid) override
virtual void setPreviousNewtonSolution(const NumericVector< Number > &soln)
virtual void predictorCleanup(NumericVector< libMesh::Number > &ghosted_solution)
Perform cleanup tasks after application of predictor to solution vector.
bool _assemble_constraints_separately
Whether or not to assemble the residual and Jacobian after the application of each constraint...
virtual Elem * elemPtr(const dof_id_type i)
Definition: MooseMesh.C:3082
auto exp(const T &)
TagID systemMatrixTag() const override
Return the Matrix Tag ID for System.
void petscSetupDM(NonlinearSystemBase &nl, const std::string &dm_name)
Setup the PETSc DM object.
Definition: PetscSupport.C:190
MPI_Info info
NumericVector< Number > & solution()
Definition: SystemBase.h:195
MooseObjectTagWarehouse< DGKernelBase > _dg_kernels
virtual bool haveFV() const override
returns true if this problem includes/needs finite volume functionality.
face_info_iterator ownedFaceInfoBegin()
Iterators to owned faceInfo objects.
Definition: MooseMesh.C:1487
PARALLEL
void reinitIncrementAtQpsForDampers(THREAD_ID tid, const std::set< MooseVariable *> &damped_vars)
Compute the incremental change in variables at QPs for dampers.
void computeResidualAndJacobianInternal(const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Compute residual and Jacobian from contributions not related to constraints, such as nodal boundary c...
void addImplicitGeometricCouplingEntriesToJacobian(bool add=true)
If called with true this will add entries into the jacobian to link together degrees of freedom that ...
void dof_indices(const Elem *const elem, std::vector< dof_id_type > &di) const
bool haveFieldSplitPreconditioner() const
bool areCoupled(const unsigned int ivar, const unsigned int jvar, const unsigned int nl_sys_num) const
void mooseError(Args &&... args)
Emit an error message with the given stringified, concatenated args and terminate the application...
Definition: MooseError.h:302
InputParameters & parameters()
Get the parameters of the object.
Definition: MooseApp.h:150
unsigned int number() const
Get variable number coming from libMesh.
virtual void initialSetup() override
Setup Functions.
Data structure used to hold penetration information.
std::vector< std::pair< R1, R2 > > get(const std::string &param1, const std::string &param2) const
Combine two vector parameters into a single vector of pairs.
const std::vector< std::shared_ptr< NodalConstraint > > & getActiveNodalConstraints() const
Access methods for active objects.
NumericVector< Number > * _u_dot_old
old solution vector for u^dot
Definition: SystemBase.h:1005
bool _has_nodalbc_diag_save_in
If there is a nodal BC having diag_save_in.
virtual void reinitScalars(const THREAD_ID tid, bool reinit_for_derivative_reordering=false) override
fills the VariableValue arrays for scalar variables from the solution vector
Base class for automatic differentiation Dirichlet BCs.
virtual void getDiracElements(std::set< const Elem *> &elems) override
Fills "elems" with the elements that should be looped over for Dirac Kernels.
void setupDM()
Setup the PETSc DM object (when appropriate)
void setCurrentlyComputingResidual(bool currently_computing_residual) final
Set whether or not the problem is in the process of computing the residual.
void checkKernelCoverage(const std::set< SubdomainID > &mesh_subdomains) const
void addDGKernel(std::string dg_kernel_name, const std::string &name, InputParameters &parameters)
Adds a DG kernel.
void computeJacobian(libMesh::SparseMatrix< Number > &jacobian, const std::set< TagID > &tags)
Associate jacobian to systemMatrixTag, and then form a matrix for all the tags.
virtual void reinitNeighborPhys(const Elem *neighbor, unsigned int neighbor_side, const std::vector< Point > &physical_points, const THREAD_ID tid) override
virtual TagID addVectorTag(const TagName &tag_name, const Moose::VectorTagType type=Moose::VECTOR_TAG_RESIDUAL)
Create a Tag.
Definition: SubProblem.C:93
std::vector< T * > & queryInto(std::vector< T *> &results, Args &&... args)
queryInto executes the query and stores the results in the given vector.
Definition: TheWarehouse.h:311
void getNodeDofs(dof_id_type node_id, std::vector< dof_id_type > &dofs)
Base class for all Constraint types.
Definition: Constraint.h:19
std::set< TagID > _nl_vector_tags
Vector tags to temporarily store all tags associated with the current system.
std::vector< std::string > _ignore_variables_for_autoscaling
A container for variables that do not partipate in autoscaling.
void residualSetup() override
Base boundary condition of a Dirichlet type.
T & set(const std::string &name, bool quiet_mode=false)
Returns a writable reference to the named parameters.
virtual void associateVectorToTag(NumericVector< Number > &vec, TagID tag)
Associate a vector for a given tag.
Definition: SystemBase.C:935
virtual void reinitNodes(const std::vector< dof_id_type > &nodes, const THREAD_ID tid) override
MooseObjectTagWarehouse< NodalBCBase > _nodal_bcs
virtual void addJacobianOffDiagScalar(unsigned int ivar, const THREAD_ID tid=0)
virtual void setException(const std::string &message)
Set an exception, which is stored at this point by toggling a member variable in this class...
const std::vector< std::shared_ptr< NodeFaceConstraint > > & getActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
void setDecomposition(const std::vector< std::string > &decomposition)
If called with a single string, it is used as the name of a the top-level decomposition split...
virtual void customSetup(const ExecFlagType &exec_type, THREAD_ID tid=0) const
bool needSubdomainMaterialOnSide(SubdomainID subdomain_id, THREAD_ID tid) const
Indicates whether this system needs material properties on internal sides.
Factory & _factory
Definition: SystemBase.h:983
MooseObjectWarehouse< T > & getVectorTagsObjectWarehouse(const std::set< TagID > &tags, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object at least has one of the given vector ta...
virtual const Node * queryNodePtr(const dof_id_type i) const
Definition: MooseMesh.C:837
const ElementPairInfo & getElemPairInfo(std::pair< const Elem *, const Elem *> elem_pair) const
MooseObjectWarehouseBase< Split > _splits
Decomposition splits.
The main MOOSE class responsible for handling user-defined parameters in almost every MOOSE system...
NumericVector< Number > * _u_dotdot
solution vector for u^dotdot
Definition: SystemBase.h:1002
bool hasActiveMortarConstraints(const std::pair< BoundaryID, BoundaryID > &mortar_interface_key, bool displaced) const
const Parallel::Communicator & comm() const
NumericVector< Number > & add_vector(std::string_view vec_name, const bool projections=true, const ParallelType type=PARALLEL)
Solving a linear problem.
Definition: MooseTypes.h:842
bool _has_nodalbc_save_in
If there is a nodal BC having save_in.
bool hasActiveNodalConstraints() const
Deterimine if active objects exist.
virtual bool hasMatrix(TagID tag) const
Check if the tagged matrix exists in the system.
Definition: SystemBase.h:351
MooseObjectWarehouse< NodalDamper > _nodal_dampers
Nodal Dampers for each thread.
std::unique_ptr< T_DEST, T_DELETER > dynamic_pointer_cast(std::unique_ptr< T_SRC, T_DELETER > &src)
These are reworked from https://stackoverflow.com/a/11003103.
virtual libMesh::NonlinearSolver< Number > * nonlinearSolver()=0
Real preSMOResidual() const
The pre-SMO residual.
std::unique_ptr< libMesh::DiagonalMatrix< Number > > _scaling_matrix
A diagonal matrix used for computing scaling.
virtual void setSolutionUDotOld(const NumericVector< Number > &u_dot_old)
virtual void associateMatrixToTag(libMesh::SparseMatrix< Number > &matrix, TagID tag)
Associate a matrix to a tag.
Definition: SystemBase.C:1030
bool hasDiagSaveIn() const
Weather or not the nonlinear system has diagonal Jacobian save-ins.
This class provides an interface for common operations on field variables of both FE and FV types wit...
const Parallel::Communicator & _communicator
void updateActive(THREAD_ID tid=0) override
Update the various active lists.
Real initialResidual() const
The initial residual.
The following methods are specializations for using the libMesh::Parallel::packed_range_* routines fo...
bool needInterfaceMaterialOnSide(BoundaryID bnd_id, THREAD_ID tid) const
Indicated whether this system needs material properties on interfaces.
const libMesh::ConstElemRange & getCurrentAlgebraicElementRange()
These are the element and nodes that contribute to the jacobian and residual for this local processor...
std::size_t _num_scaling_groups
The number of scaling groups.
const libMesh::ConstNodeRange & getCurrentAlgebraicNodeRange()
void computingScalingJacobian(bool computing_scaling_jacobian)
Setter for whether we&#39;re computing the scaling jacobian.
virtual void updateActive(THREAD_ID tid=0) override
Update the active status of Kernels.
bool has_dofs(const unsigned int s=libMesh::invalid_uint) const
std::map< dof_id_type, PenetrationInfo * > & _penetration_info
Data structure of nodes and their associated penetration information.
bool hasActiveNodeElemConstraints(SubdomainID secondary_id, SubdomainID primary_id, bool displaced) const
Real _pre_smo_residual
The pre-SMO residual, see setPreSMOResidual for a detailed explanation.
bool hasDampers()
Whether or not this system has dampers.
bool _compute_scaling_once
Whether the scaling factors should only be computed once at the beginning of the simulation through a...
void computeResidualTags(const std::set< TagID > &tags)
Form multiple tag-associated residual vectors for all the given tags.
virtual void cacheJacobianNeighbor(const THREAD_ID tid) override
Specialization of SubProblem for solving nonlinear equations plus auxiliary equations.
bool _has_save_in
If there is any Kernel or IntegratedBC having save_in.
virtual const Node & nodeRef(const dof_id_type i) const
Definition: MooseMesh.C:811
TagID _Ke_system_tag
Tag for system contribution Jacobian.
virtual void setResidual(NumericVector< libMesh::Number > &residual, const THREAD_ID tid) override
bool _have_decomposition
Whether or not the system can be decomposed into splits.
dof_id_type n_dofs() const
virtual void disassociateMatrixFromTag(libMesh::SparseMatrix< Number > &matrix, TagID tag)
Disassociate a matrix from a tag.
Definition: SystemBase.C:1042
Scope guard for starting and stopping Floating Point Exception Trapping.
virtual void zero()=0
auto max(const L &left, const R &right)
Serves as a base class for DGKernel and ADDGKernel.
Definition: DGKernelBase.h:32
void constraintResiduals(NumericVector< Number > &residual, bool displaced)
Add residual contributions from Constraints.
Base class for MOOSE preconditioners.
NumericVector< Number > & addVector(const std::string &vector_name, const bool project, const libMesh::ParallelType type)
Adds a solution length vector to the system.
virtual GeometricSearchData & geomSearchData() override
std::unordered_map< std::pair< BoundaryID, BoundaryID >, ComputeMortarFunctor > _undisplaced_mortar_functors
Functors for computing undisplaced mortar constraints.
Specialization for filling multiple "small" preconditioning matrices simulatenously.
virtual bool matrixFromColoring() const
Whether a system matrix is formed from coloring.
Definition: SolverSystem.h:102
void update()
Update the system (doing libMesh magic)
Definition: SystemBase.C:1216
virtual Assembly & assembly(const THREAD_ID tid, const unsigned int sys_num) override
void addScalarKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a scalar kernel.
void addBoundaryCondition(const std::string &bc_name, const std::string &name, InputParameters &parameters)
Adds a boundary condition.
bool hasActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
void computeResidual(NumericVector< Number > &residual, TagID tag_id)
Form a residual vector for a given tag.
virtual void addCachedResidualDirectly(NumericVector< libMesh::Number > &residual, const THREAD_ID tid)
Allows for all the residual contributions that are currently cached to be added directly into the vec...
virtual unsigned int nVariables() const
Get the number of variables in this system.
Definition: SystemBase.C:845
EXTERN_C_BEGIN PetscErrorCode DMCreate_Moose(DM)
bool hasActiveBoundaryObjects(THREAD_ID tid=0) const
bool _need_residual_ghosted
Whether or not a ghosted copy of the residual needs to be made.
virtual const std::string & name() const
Definition: SystemBase.C:1301
virtual void jacobianSetup()
Definition: SystemBase.C:1544
virtual bool containsTimeKernel() override
If the system has a kernel that corresponds to a time derivative.
void onTimestepBegin()
Called at the beginning of the time step.
const ConstBndNodeRange & getCurrentAlgebraicBndNodeRange()
std::vector< dof_id_type, Threads::scalable_allocator< dof_id_type > > Row
MooseObjectTagWarehouse< DiracKernelBase > _dirac_kernels
Dirac Kernel storage for each thread.
void closeTaggedMatrices(const std::set< TagID > &tags)
Close all matrices associated the tags.
Definition: SystemBase.C:1014
virtual void deactiveAllMatrixTags()
Make matrices inactive.
Definition: SystemBase.C:1097
TagID _Re_non_time_tag
Tag for non-time contribution residual.
std::set< TagID > _nl_matrix_tags
Matrix tags to temporarily store all tags associated with the current system.
void solutionInvalidAccumulation()
Pass the number of solution invalid occurrences from current iteration to cumulative counters...
void set_basic_system_only()
std::map< std::pair< BoundaryID, BoundaryID >, NearestNodeLocator * > _nearest_node_locators
void setPredictor(std::shared_ptr< Predictor > predictor)
Real _resid_vs_jac_scaling_param
The param that indicates the weighting of the residual vs the Jacobian in determining variable scalin...
bool _auto_scaling_initd
Whether we&#39;ve initialized the automatic scaling data structures.
virtual void computeScalingResidual()=0
Compute a "residual" for automatic scaling purposes.
bool _doing_dg
true if DG is active (optimization reasons)
virtual libMesh::DofMap & dofMap()
Gets writeable reference to the dof map.
Definition: SystemBase.C:1136
void syncIteration()
Sync iteration counts to main processor.
unsigned int number() const
void computeResidualAndJacobianTags(const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Form possibly multiple tag-associated vectors and matrices.
SERIAL
bool needBoundaryMaterialOnSide(BoundaryID bnd_id, THREAD_ID tid) const
Indicated whether this system needs material properties on boundaries.
dof_id_type n_dofs() const
MooseObjectWarehouse< DirichletBCBase > _preset_nodal_bcs
std::unordered_map< unsigned int, unsigned int > _var_to_group_var
A map from variable index to group variable index and it&#39;s associated (inverse) scaling factor...
const std::vector< std::shared_ptr< T > > & getActiveObjects(THREAD_ID tid=0) const
Retrieve complete vector to the active all/block/boundary restricted objects for a given thread...
const Variable & variable(const unsigned int c) const
std::vector< unsigned int > _current_l_its
virtual void zero()=0
virtual std::unique_ptr< Base > create()=0
void setCurrentNonlinearSystem(const unsigned int nl_sys_num)
std::shared_ptr< T > getActiveObject(const std::string &name, THREAD_ID tid=0) const
This is the common base class for the three main kernel types implemented in MOOSE, Kernel, VectorKernel and ArrayKernel.
Definition: KernelBase.h:23
dof_id_type id() const
std::vector< dof_id_type > _secondary_nodes
MeshBase & getMesh()
Accessor for the underlying libMesh Mesh object.
Definition: MooseMesh.C:3417
void min(const T &r, T &o, Request &req) const
MooseObjectWarehouse< HDGIntegratedBC > _hybridized_ibcs
void computeDiracContributions(const std::set< TagID > &tags, bool is_jacobian)
void updateActive(THREAD_ID tid)
Update active objects of Warehouses owned by NonlinearSystemBase.
TheWarehouse & theWarehouse() const
unsigned int n_vars
void reinitMaterialsNeighbor(SubdomainID blk_id, const THREAD_ID tid, bool swap_stateful=true, const std::deque< MaterialBase *> *reinit_mats=nullptr)
reinit materials on the neighboring element face
const ElementPairList & getElemPairs() const
std::unordered_map< std::pair< BoundaryID, BoundaryID >, ComputeMortarFunctor > _displaced_mortar_functors
Functors for computing displaced mortar constraints.
GHOSTED
boundary_id_type BoundaryID
An integrated boundary condition for hybridized finite element formulations.
Real referenceResidual() const
The reference residual used in relative convergence check.
void addSplit(const std::string &split_name, const std::string &name, InputParameters &parameters)
Adds a split.
bool _automatic_scaling
Whether to automatically scale the variables.
Definition: SystemBase.h:1047
void computeJacobianTags(const std::set< TagID > &tags)
Computes multiple (tag associated) Jacobian matricese.
SolutionInvalidity & solutionInvalidity()
Get the SolutionInvalidity for this app.
Definition: MooseApp.h:167
std::shared_ptr< MoosePreconditioner > _preconditioner
Preconditioner.
virtual void timestepSetup(THREAD_ID tid=0) const
NonlinearSystemBase & currentNonlinearSystem()
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
void computingScalingResidual(bool computing_scaling_residual)
Setter for whether we&#39;re computing the scaling residual.
SubProblem & subproblem()
Definition: SystemBase.h:101
std::vector< std::string > _vecs_to_zero_for_residual
vectors that will be zeroed before a residual computation
virtual TagID addMatrixTag(TagName tag_name)
Create a Tag.
Definition: SubProblem.C:312
std::shared_ptr< Split > getSplit(const std::string &name)
Retrieves a split by name.
std::unique_ptr< NumericVector< Number > > solution
SubProblem & _subproblem
The subproblem for whom this class holds variable data, etc; this can either be the governing finite ...
Definition: SystemBase.h:977
MooseObjectWarehouse< HDGKernel > _hybridized_kernels
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Populates a set of covered subdomains and the associated variable names.
void addImplicitGeometricCouplingEntries(GeometricSearchData &geom_search_data)
Adds entries to the Jacobian in the correct positions for couplings coming from dofs being coupled th...
virtual void addHDGKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a hybridized discontinuous Galerkin (HDG) kernel.
virtual void zero_rows(std::vector< numeric_index_type > &rows, T diag_value=0.0)
MooseObjectTagWarehouse< KernelBase > _kernels
MooseObjectWarehouse< T > & getMatrixTagObjectWarehouse(TagID tag_id, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has the given matrix tag...
bool shouldEvaluatePreSMOResidual() const
We offer the option to check convergence against the pre-SMO residual.
std::vector< VectorTag > getVectorTags(const std::set< TagID > &tag_ids) const
Definition: SubProblem.C:173
Moose::SolveType _type
Definition: SolverParams.h:19
Base class for deriving nodal dampers.
Definition: NodalDamper.h:27
virtual void disassociateVectorFromTag(NumericVector< Number > &vec, TagID tag)
Disassociate a given vector from a given tag.
virtual void cacheResidual(const THREAD_ID tid) override
void mooseDeprecated(Args &&... args)
Emit a deprecated code/feature message with the given stringified, concatenated args.
Definition: MooseError.h:353
virtual void computeScalingJacobian()=0
Compute a "Jacobian" for automatic scaling purposes.
bool errorOnJacobianNonzeroReallocation() const
Will return True if the user wants to get an error when a nonzero is reallocated in the Jacobian by P...
This is the ElementPairLocator class.
This is the ElementPairInfo class.
std::map< BoundaryID, std::shared_ptr< ElementPairLocator > > _element_pair_locators
virtual void activeAllMatrixTags()
Make all exsiting matrices ative.
Definition: SystemBase.C:1108
unsigned int number() const
Gets the number of this system.
Definition: SystemBase.C:1130
void setupScalingData()
Setup group scaling containers.
virtual GeometricSearchData & geomSearchData()=0
const bool & usePreSMOResidual() const
Whether we are using pre-SMO residual in relative convergence checks.
const std::set< SubdomainID > & boundaryLowerDBlocks() const
Definition: MooseMesh.h:1392
std::vector< SubdomainName > getSubdomainNames(const std::vector< SubdomainID > &subdomain_ids) const
Get the associated subdomainNames for the subdomain ids that are passed in.
Definition: MooseMesh.C:1738
auto log(const T &)
AuxiliarySystem & getAuxiliarySystem()
virtual void initialSetup(THREAD_ID tid=0) const
Convenience methods for calling object setup methods.
virtual void updateGeomSearch(GeometricSearchData::GeometricSearchType type=GeometricSearchData::ALL) override
void closeTaggedVectors(const std::set< TagID > &tags)
Close all vectors for given tags.
Definition: SystemBase.C:660
Base class for deriving any boundary condition that works at nodes.
Definition: NodalBCBase.h:26
void computeResidualInternal(const std::set< TagID > &tags)
Compute the residual for a given tag.
virtual void prepareAssembly(const THREAD_ID tid) override
bool computeScaling()
Method used to obtain scaling factors for variables.
Interface for objects interacting with the PerfGraph.
virtual std::map< TagName, TagID > & getMatrixTags()
Return all matrix tags in the system, where a tag is represented by a map from name to ID...
Definition: SubProblem.h:249
virtual bool hasVariable(const std::string &var_name) const
Query a system for a variable.
Definition: SystemBase.C:805
virtual void setCurrentSubdomainID(const Elem *elem, const THREAD_ID tid) override
virtual void clearDiracInfo() override
Gets called before Dirac Kernels are asked to add the points they are supposed to be evaluated in...
void destroyColoring()
Destroy the coloring object if it exists.
virtual void reinitNodesNeighbor(const std::vector< dof_id_type > &nodes, const THREAD_ID tid) override
bool identify_variable_groups() const
virtual void close()=0
virtual void jacobianSetup(THREAD_ID tid=0) const
ConstraintWarehouse _constraints
Constraints storage object.
virtual void turnOffJacobian()
Turn off the Jacobian (must be called before equation system initialization)
TagID residualVectorTag() const override
const std::map< BoundaryID, std::vector< std::shared_ptr< T > > > & getActiveBoundaryObjects(THREAD_ID tid=0) const
void computingNonlinearResid(bool computing_nonlinear_residual) final
Set whether or not the problem is in the process of computing the nonlinear residual.
virtual void customSetup(const ExecFlagType &exec_type) override
ComputeType
The type of nonlinear computation being performed.
Definition: MooseTypes.h:774
Base class for deriving element dampers.
Definition: ElementDamper.h:33
bool _add_implicit_geometric_coupling_entries_to_jacobian
Whether or not to add implicit geometric couplings to the Jacobian for FDP.
const_iterator end() const
virtual void addHDGIntegratedBC(const std::string &bc_name, const std::string &name, InputParameters &parameters)
Adds a hybridized discontinuous Galerkin (HDG) bc.
virtual void addNodalKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a NodalKernel.
virtual unsigned int numMatrixTags() const
The total number of tags.
Definition: SubProblem.h:244
Base class for creating new types of boundary conditions.
tbb::split split
MooseApp & _app
Definition: SystemBase.h:982
FEProblemBase & _fe_problem
the governing finite element/volume problem
Definition: SystemBase.h:980
virtual void reinitElemPhys(const Elem *elem, const std::vector< Point > &phys_points_in_elem, const THREAD_ID tid)=0
virtual MooseVariableScalar & getScalarVariable(THREAD_ID tid, const std::string &var_name) const
Gets a reference to a scalar variable with specified number.
Definition: SystemBase.C:144
ParallelType type() const
virtual NumericVector< Number > & RHS()=0
std::vector< VariableWarehouse > _vars
Variable warehouses (one for each thread)
Definition: SystemBase.h:990
Provides a way for users to bail out of the current solve.
unsigned int _n_residual_evaluations
Total number of residual evaluations that have been performed.
virtual void update()
void addDiracKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a Dirac kernel.
virtual void close()=0
void computeJacobianInternal(const std::set< TagID > &tags)
Form multiple matrices for all the tags.
bool _has_diag_save_in
If there is any Kernel or IntegratedBC having diag_save_in.
Base class for creating new types of nodal kernels.
const FEType & variable_type(const unsigned int i) const
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
virtual std::shared_ptr< const DisplacedProblem > getDisplacedProblem() const
std::unique_ptr< NumericVector< Number > > _residual_copy
Copy of the residual vector, or nullptr if a copy is not needed.
void constraintJacobians(bool displaced)
Add jacobian contributions from Constraints.
virtual void subdomainSetup()
Definition: SystemBase.C:1530
bool hasSaveIn() const
Weather or not the nonlinear system has save-ins.
Generic class for solving transient nonlinear problems.
Definition: SubProblem.h:78
TagID timeVectorTag() const override
Ideally, we should not need this API.
Class for containing MooseEnum item information.
Definition: MooseEnumItem.h:18
MooseMesh & _mesh
Definition: SystemBase.h:985
virtual std::vector< std::string > timeKernelVariableNames() override
Returns the names of the variables that have time derivative kernels in the system.
bool hasActiveObjects(THREAD_ID tid=0) const
NonlinearSystemBase(FEProblemBase &problem, libMesh::System &sys, const std::string &name)
void max(const T &r, T &o, Request &req) const
NumericVector< Number > * _u_dot
solution vector for u^dot
Definition: SystemBase.h:1000
MooseObjectWarehouse< ElementDamper > _element_dampers
Element Dampers for each thread.
void addObject(std::shared_ptr< Constraint > object, THREAD_ID tid=0, bool recurse=true) override
Add Constraint object to the warehouse.
virtual void setSolutionUDot(const NumericVector< Number > &udot)
Set transient term used by residual and Jacobian evaluation.
virtual libMesh::SparseMatrix< Number > & getMatrix(TagID tag)
Get a raw SparseMatrix.
Definition: SystemBase.C:978
virtual void augmentSparsity(libMesh::SparsityPattern::Graph &sparsity, std::vector< dof_id_type > &n_nz, std::vector< dof_id_type > &n_oz) override
Will modify the sparsity pattern to add logical geometric connections.
const std::vector< std::shared_ptr< MortarConstraintBase > > & getActiveMortarConstraints(const std::pair< BoundaryID, BoundaryID > &mortar_interface_key, bool displaced) const
MooseObjectWarehouse< T > & getMatrixTagsObjectWarehouse(const std::set< TagID > &tags, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has one of the given matrix tags...
void computeScalarKernelsJacobians(const std::set< TagID > &tags)
void setInitialResidual(Real r)
Record the initial residual (for later relative convergence check)
Query query()
query creates and returns an initialized a query object for querying objects from the warehouse...
Definition: TheWarehouse.h:466
virtual Assembly & assembly(const THREAD_ID tid, const unsigned int sys_num)=0
const_iterator begin() const
Base class for deriving dampers.
Definition: Damper.h:24
bool getFailNextNonlinearConvergenceCheck() const
Whether it will skip further residual evaluations and fail the next nonlinear convergence check...
Base class shared by AD and non-AD scalar kernels.
void addDamper(const std::string &damper_name, const std::string &name, InputParameters &parameters)
Adds a damper.
IntRange< T > make_range(T beg, T end)
virtual MooseMesh & mesh() override
virtual void postAddResidualObject(ResidualObject &)
Called after any ResidualObject-derived objects are added to the system.
const std::vector< VariableName > & getVariableNames() const
Definition: SystemBase.h:861
virtual void preInit() override
This is called prior to the libMesh system has been init&#39;d.
Definition: SolverSystem.C:32
virtual void updateActive(THREAD_ID tid=0) override
Update the active status of Kernels.
virtual void timestepSetup() override
bool hasActiveElemElemConstraints(const InterfaceID interface_id, bool displaced) const
bool _off_diagonals_in_auto_scaling
Whether to include off diagonals when determining automatic scaling factors.
virtual unsigned int numVectorTags(const Moose::VectorTagType type=Moose::VECTOR_TAG_ANY) const
The total number of tags, which can be limited to the tag type.
Definition: SubProblem.C:196
void reinitNodeFace(const Node &secondary_node, const BoundaryID secondary_boundary, const PenetrationInfo &info, const bool displaced)
Reinitialize quantities such as variables, residuals, Jacobians, materials for node-face constraints...
Base class for deriving any boundary condition of a integrated type.
SolverParams & solverParams(unsigned int solver_sys_num=0)
Get the solver parameters.
NumericVector< Number > * _residual_ghosted
ghosted form of the residual
void setCachedJacobian(GlobalDataKey)
Sets previously-cached Jacobian values via SparseMatrix::set() calls.
Definition: Assembly.C:4493
TagID _Re_tag
Used for the residual vector from PETSc.
virtual void customSetup(const ExecFlagType &exec_type)
Definition: SystemBase.C:1523
MooseObjectWarehouse< T > & getVectorTagObjectWarehouse(TagID tag_id, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has the given vector tag...
void computeNodalBCs(NumericVector< Number > &residual)
Enforces nodal boundary conditions.
const std::set< SubdomainID > & getSubdomainsForVar(unsigned int var_number) const
Definition: SystemBase.h:762
Moose::CouplingType coupling()
virtual void addObject(std::shared_ptr< T > object, THREAD_ID tid=0, bool recurse=true)
Adds an object to the storage structure.
bool ignoreZerosInJacobian() const
Will return true if zeros in the Jacobian are to be dropped from the sparsity pattern.
const ExecFlagType EXEC_PRE_KERNELS
Definition: Moose.C:49
libMesh::System & _sys
void mortarConstraints(Moose::ComputeType compute_type, const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Do mortar constraint residual/jacobian computations.
NumericVector< Number > * _increment_vec
increment vector
InterfaceKernelBase is the base class for all InterfaceKernel type classes.
QueryCache & condition(Args &&... args)
Adds a new condition to the query.
Definition: TheWarehouse.h:284
bool doingDG() const
Getter for _doing_dg.
void computeResidualTag(NumericVector< Number > &residual, TagID tag_id)
Computes residual for a given tag.
void addConstraint(const std::string &c_name, const std::string &name, InputParameters &parameters)
Adds a Constraint.
virtual TagName vectorTagName(const TagID tag) const
Retrieve the name associated with a TagID.
Definition: SubProblem.C:222
MOOSE now contains C++17 code, so give a reasonable error message stating what the user can do to add...
const ConsoleStream _console
An instance of helper class to write streams to the Console objects.
face_info_iterator ownedFaceInfoEnd()
Definition: MooseMesh.C:1496
virtual libMesh::System & system() override
Get the reference to the libMesh system.
MooseVariableFieldBase & getVariable(THREAD_ID tid, const std::string &var_name) const
Gets a reference to a variable of with specified name.
Definition: SystemBase.C:89
std::vector< BoundaryID > getBoundaryIDs(const Elem *const elem, const unsigned short int side) const
Returns a vector of boundary IDs for the requested element on the requested side. ...
const std::vector< std::shared_ptr< NodeElemConstraint > > & getActiveNodeElemConstraints(SubdomainID secondary_id, SubdomainID primary_id, bool displaced) const
bool preSolve()
Perform some steps to get ready for the solver.
const std::unordered_map< std::pair< BoundaryID, BoundaryID >, AutomaticMortarGeneration > & getMortarInterfaces(bool on_displaced) const
bool _has_constraints
Whether or not this system has any Constraints.
virtual void residualAndJacobianTogether()=0
Call this method if you want the residual and Jacobian to be computed simultaneously.
bool _computed_scaling
Flag used to indicate whether we have already computed the scaling Jacobian.
dof_id_type first_dof(const processor_id_type proc) const
std::string _decomposition_split
Name of the top-level split of the decomposition.
void remove_algebraic_ghosting_functor(GhostingFunctor &evaluable_functor)
MooseObjectTagWarehouse< InterfaceKernelBase > _interface_kernels
unsigned int n_vars() const
void resetSolutionInvalidCurrentIteration()
Reset the number of solution invalid occurrences back to zero.
dof_id_type end_dof(const processor_id_type proc) const
NumericVector< Number > & residualVector(TagID tag)
Return a residual vector that is associated with the residual tag.
NumericVector< Number > & solutionOld()
Definition: SystemBase.h:196
NumericVector< Number > & getResidualNonTimeVector()
Return a numeric vector that is associated with the nontime tag.
virtual bool hasScalarVariable(const std::string &var_name) const
Definition: SystemBase.C:830
const TagName PREVIOUS_NL_SOLUTION_TAG
Definition: MooseTypes.C:28
processor_id_type processor_id() const
std::shared_ptr< Predictor > _predictor
If predictor is active, this is non-NULL.
void enforceNodalConstraintsResidual(NumericVector< Number > &residual)
Enforce nodal constraints.
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Update supplied subdomain and variable coverate containters.
MooseObjectWarehouse< GeneralDamper > _general_dampers
General Dampers.
const std::vector< std::shared_ptr< ElemElemConstraint > > & getActiveElemElemConstraints(InterfaceID interface_id, bool displaced) const
virtual void initialSetup()
Setup Functions.
Definition: SystemBase.C:1496
bool defaultGhosting()
Whether or not the user has requested default ghosting ot be on.
Definition: SubProblem.h:140
void addCachedJacobian(GlobalDataKey)
Adds the values that have been cached by calling cacheJacobian() and or cacheJacobianNeighbor() to th...
Definition: Assembly.C:3816
virtual void cacheJacobian(const THREAD_ID tid) override
auto min(const L &left, const R &right)
void jacobianSetup() override
virtual NumericVector< Number > & getVector(const std::string &name)
Get a raw NumericVector by name.
Definition: SystemBase.C:887
MooseObjectTagWarehouse< IntegratedBCBase > _integrated_bcs
const DofMap & get_dof_map() const
virtual void reinitNodeFace(const Node *node, BoundaryID bnd_id, const THREAD_ID tid) override
virtual void residualSetup()
Definition: SystemBase.C:1537
virtual void reinitOffDiagScalars(const THREAD_ID tid) override
processor_id_type processor_id() const
virtual void addObject(std::shared_ptr< T > object, THREAD_ID tid=0, bool recurse=true) override
Adds an object to the storage structure.
virtual void setNeighborSubdomainID(const Elem *elem, unsigned int side, const THREAD_ID tid) override
virtual void addResidualScalar(const THREAD_ID tid=0)
virtual void subdomainSetup(THREAD_ID tid=0) const
virtual void jacobianSetup() override
virtual void addCachedResidual(const THREAD_ID tid) override
std::vector< std::vector< std::string > > _scaling_group_variables
A container of variable groupings that can be used in scaling calculations.
void addInterfaceKernel(std::string interface_kernel_name, const std::string &name, InputParameters &parameters)
Adds an interface kernel.
auto index_range(const T &sizable)
virtual NumericVector< Number > & residualCopy() override
virtual NumericVector< Number > & residualGhosted() override
DiracKernelBase is the base class for all DiracKernel type classes.
void assembleScalingVector()
Assemble the numeric vector of scaling factors such that it can be used during assembly of the system...
BoundaryID _primary_boundary
NumericVector< Number > * _u_dotdot_old
old solution vector for u^dotdot
Definition: SystemBase.h:1007
MooseObjectTagWarehouse< ScalarKernelBase > _scalar_kernels
virtual void residualEnd(THREAD_ID tid=0) const
void setConstraintSecondaryValues(NumericVector< Number > &solution, bool displaced)
Sets the value of constrained variables in the solution vector.
unsigned int THREAD_ID
Definition: MooseTypes.h:205
uint8_t dof_id_type
virtual void addJacobianScalar(const THREAD_ID tid=0)
NearestNodeLocator & _nearest_node
const std::map< dof_id_type, std::vector< dof_id_type > > & nodeToElemMap()
If not already created, creates a map from every node to all elements to which they are connected...
Definition: MooseMesh.C:1155
const std::set< SubdomainID > & meshSubdomains() const
Returns a read-only reference to the set of subdomains currently present in the Mesh.
Definition: MooseMesh.C:3140
ParallelType
virtual void addCachedJacobian(const THREAD_ID tid) override
virtual void timestepSetup()
Definition: SystemBase.C:1516
virtual ~NonlinearSystemBase()
virtual void preInit() override
This is called prior to the libMesh system has been init&#39;d.
virtual void residualSetup() override
void setPreconditioner(std::shared_ptr< MoosePreconditioner > pc)
Sets a preconditioner.
virtual void localize(std::vector< T > &v_local) const=0
MooseObjectWarehouse< ADDirichletBCBase > _ad_preset_nodal_bcs
virtual libMesh::System & system() override
Get the reference to the libMesh system.
Key structure for APIs manipulating global vectors/matrices.
Definition: Assembly.h:804