https://mooseframework.inl.gov
NonlinearSystemBase.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
10 #include "NonlinearSystemBase.h"
11 #include "AuxiliarySystem.h"
12 #include "Problem.h"
13 #include "FEProblem.h"
14 #include "MooseVariableFE.h"
15 #include "MooseVariableScalar.h"
16 #include "PetscSupport.h"
17 #include "Factory.h"
18 #include "ParallelUniqueId.h"
19 #include "ThreadedElementLoop.h"
20 #include "MaterialData.h"
21 #include "ComputeResidualThread.h"
23 #include "ComputeFVFluxThread.h"
24 #include "ComputeJacobianThread.h"
28 #include "ComputeDiracThread.h"
35 #include "TimeKernel.h"
36 #include "BoundaryCondition.h"
37 #include "DirichletBCBase.h"
38 #include "NodalBCBase.h"
39 #include "IntegratedBCBase.h"
40 #include "DGKernel.h"
41 #include "InterfaceKernelBase.h"
42 #include "ElementDamper.h"
43 #include "NodalDamper.h"
44 #include "GeneralDamper.h"
45 #include "DisplacedProblem.h"
46 #include "NearestNodeLocator.h"
47 #include "PenetrationLocator.h"
48 #include "NodalConstraint.h"
49 #include "NodeFaceConstraint.h"
50 #include "NodeElemConstraintBase.h"
51 #include "MortarConstraint.h"
52 #include "ElemElemConstraint.h"
53 #include "ScalarKernelBase.h"
54 #include "Parser.h"
55 #include "Split.h"
57 #include "MooseMesh.h"
58 #include "MooseUtils.h"
59 #include "MooseApp.h"
60 #include "NodalKernelBase.h"
61 #include "DiracKernelBase.h"
62 #include "TimeIntegrator.h"
63 #include "Predictor.h"
64 #include "Assembly.h"
65 #include "ElementPairLocator.h"
66 #include "ODETimeKernel.h"
69 #include "ADKernel.h"
70 #include "ADDirichletBCBase.h"
71 #include "Moose.h"
72 #include "ConsoleStream.h"
73 #include "MooseError.h"
74 #include "FVElementalKernel.h"
77 #include "FVFluxKernel.h"
79 #include "UserObject.h"
81 #include "HDGKernel.h"
82 
83 // libMesh
84 #include "libmesh/nonlinear_solver.h"
85 #include "libmesh/quadrature_gauss.h"
86 #include "libmesh/dense_vector.h"
87 #include "libmesh/boundary_info.h"
88 #include "libmesh/petsc_matrix.h"
89 #include "libmesh/petsc_vector.h"
90 #include "libmesh/petsc_nonlinear_solver.h"
91 #include "libmesh/numeric_vector.h"
92 #include "libmesh/mesh.h"
93 #include "libmesh/dense_subvector.h"
94 #include "libmesh/dense_submatrix.h"
95 #include "libmesh/dof_map.h"
96 #include "libmesh/sparse_matrix.h"
97 #include "libmesh/petsc_matrix.h"
98 #include "libmesh/default_coupling.h"
99 #include "libmesh/diagonal_matrix.h"
100 #include "libmesh/fe_interface.h"
101 #include "libmesh/petsc_solver_exception.h"
102 
103 #include <ios>
104 
105 #include "petscsnes.h"
106 #include <PetscDMMoose.h>
107 EXTERN_C_BEGIN
108 extern PetscErrorCode DMCreate_Moose(DM);
109 EXTERN_C_END
110 
111 using namespace libMesh;
112 
114  System & sys,
115  const std::string & name)
116  : SolverSystem(fe_problem, fe_problem, name, Moose::VAR_SOLVER),
117  PerfGraphInterface(fe_problem.getMooseApp().perfGraph(), "NonlinearSystemBase"),
118  _sys(sys),
119  _last_nl_rnorm(0.),
120  _current_nl_its(0),
121  _residual_ghosted(NULL),
122  _Re_time_tag(-1),
123  _Re_time(NULL),
124  _Re_non_time_tag(-1),
125  _Re_non_time(NULL),
126  _scalar_kernels(/*threaded=*/false),
127  _nodal_bcs(/*threaded=*/false),
128  _preset_nodal_bcs(/*threaded=*/false),
129  _ad_preset_nodal_bcs(/*threaded=*/false),
130  _splits(/*threaded=*/false),
131  _increment_vec(NULL),
132  _use_finite_differenced_preconditioner(false),
133  _fdcoloring(nullptr),
134  _have_decomposition(false),
135  _use_field_split_preconditioner(false),
136  _add_implicit_geometric_coupling_entries_to_jacobian(false),
137  _assemble_constraints_separately(false),
138  _need_residual_ghosted(false),
139  _debugging_residuals(false),
140  _doing_dg(false),
141  _n_iters(0),
142  _n_linear_iters(0),
143  _n_residual_evaluations(0),
144  _final_residual(0.),
145  _computing_pre_smo_residual(false),
146  _pre_smo_residual(0),
147  _initial_residual(0),
148  _use_pre_smo_residual(false),
149  _print_all_var_norms(false),
150  _has_save_in(false),
151  _has_diag_save_in(false),
152  _has_nodalbc_save_in(false),
153  _has_nodalbc_diag_save_in(false),
154  _computed_scaling(false),
155  _compute_scaling_once(true),
156  _resid_vs_jac_scaling_param(0),
157  _off_diagonals_in_auto_scaling(false),
158  _auto_scaling_initd(false)
159 {
161  // Don't need to add the matrix - it already exists (for now)
163 
164  // The time matrix tag is not normally used - but must be added to the system
165  // in case it is so that objects can have 'time' in their matrix tags by default
166  _fe_problem.addMatrixTag("TIME");
167 
168  _Re_tag = _fe_problem.addVectorTag("RESIDUAL");
169 
171 
173  {
174  auto & dof_map = _sys.get_dof_map();
175  dof_map.remove_algebraic_ghosting_functor(dof_map.default_algebraic_ghosting());
176  dof_map.set_implicit_neighbor_dofs(false);
177  }
178 }
179 
181 
182 void
184 {
186 
187  if (_fe_problem.hasDampers())
188  setupDampers();
189 
190  if (_residual_copy.get())
191  _residual_copy->init(_sys.n_dofs(), false, SERIAL);
192 }
193 
194 void
196 {
198  nonlinearSolver()->jacobian = NULL;
199 }
200 
201 void
203 {
204  TIME_SECTION("nlInitialSetup", 2, "Setting Up Nonlinear System");
205 
207 
208  {
209  TIME_SECTION("kernelsInitialSetup", 2, "Setting Up Kernels/BCs/Constraints");
210 
211  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
212  {
213  _kernels.initialSetup(tid);
216  if (_doing_dg)
219 
223 
224  if (_fe_problem.haveFV())
225  {
226  std::vector<FVElementalKernel *> fv_elemental_kernels;
228  .query()
229  .template condition<AttribSystem>("FVElementalKernel")
230  .template condition<AttribThread>(tid)
231  .queryInto(fv_elemental_kernels);
232 
233  for (auto * fv_kernel : fv_elemental_kernels)
234  fv_kernel->initialSetup();
235 
236  std::vector<FVFluxKernel *> fv_flux_kernels;
238  .query()
239  .template condition<AttribSystem>("FVFluxKernel")
240  .template condition<AttribThread>(tid)
241  .queryInto(fv_flux_kernels);
242 
243  for (auto * fv_kernel : fv_flux_kernels)
244  fv_kernel->initialSetup();
245  }
246  }
247 
252  }
253 
254  {
255  TIME_SECTION("mortarSetup", 2, "Initializing Mortar Interfaces");
256 
257  auto create_mortar_functors = [this](const bool displaced)
258  {
259  // go over mortar interfaces and construct functors
260  const auto & mortar_interfaces = _fe_problem.getMortarInterfaces(displaced);
261  for (const auto & mortar_interface : mortar_interfaces)
262  {
263  const auto primary_secondary_boundary_pair = mortar_interface.first;
264  if (!_constraints.hasActiveMortarConstraints(primary_secondary_boundary_pair, displaced))
265  continue;
266 
267  const auto & mortar_generation_object = mortar_interface.second;
268 
269  auto & mortar_constraints =
270  _constraints.getActiveMortarConstraints(primary_secondary_boundary_pair, displaced);
271 
272  auto & subproblem = displaced
273  ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
274  : static_cast<SubProblem &>(_fe_problem);
275 
276  auto & mortar_functors =
278 
279  mortar_functors.emplace(primary_secondary_boundary_pair,
280  ComputeMortarFunctor(mortar_constraints,
281  mortar_generation_object,
282  subproblem,
283  _fe_problem,
284  displaced,
285  subproblem.assembly(0, number())));
286  }
287  };
288 
289  create_mortar_functors(false);
290  create_mortar_functors(true);
291  }
292 
293  if (_automatic_scaling)
294  {
296  _scaling_matrix = std::make_unique<OffDiagonalScalingMatrix<Number>>(_communicator);
297  else
298  _scaling_matrix = std::make_unique<DiagonalMatrix<Number>>(_communicator);
299  }
300 
301  if (_preconditioner)
302  _preconditioner->initialSetup();
303 }
304 
305 void
307 {
309 
310  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
311  {
312  _kernels.timestepSetup(tid);
315  if (_doing_dg)
321 
322  if (_fe_problem.haveFV())
323  {
324  std::vector<FVFluxBC *> bcs;
326  .query()
327  .template condition<AttribSystem>("FVFluxBC")
328  .template condition<AttribThread>(tid)
329  .queryInto(bcs);
330 
331  std::vector<FVInterfaceKernel *> iks;
333  .query()
334  .template condition<AttribSystem>("FVInterfaceKernel")
335  .template condition<AttribThread>(tid)
336  .queryInto(iks);
337 
338  std::vector<FVFluxKernel *> kernels;
340  .query()
341  .template condition<AttribSystem>("FVFluxKernel")
342  .template condition<AttribThread>(tid)
343  .queryInto(kernels);
344 
345  for (auto * bc : bcs)
346  bc->timestepSetup();
347  for (auto * ik : iks)
348  ik->timestepSetup();
349  for (auto * kernel : kernels)
350  kernel->timestepSetup();
351  }
352  }
357 }
358 
359 void
361 {
362  SolverSystem::customSetup(exec_type);
363 
364  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
365  {
366  _kernels.customSetup(exec_type, tid);
367  _nodal_kernels.customSetup(exec_type, tid);
368  _dirac_kernels.customSetup(exec_type, tid);
369  if (_doing_dg)
370  _dg_kernels.customSetup(exec_type, tid);
371  _interface_kernels.customSetup(exec_type, tid);
372  _element_dampers.customSetup(exec_type, tid);
373  _nodal_dampers.customSetup(exec_type, tid);
374  _integrated_bcs.customSetup(exec_type, tid);
375 
376  if (_fe_problem.haveFV())
377  {
378  std::vector<FVFluxBC *> bcs;
380  .query()
381  .template condition<AttribSystem>("FVFluxBC")
382  .template condition<AttribThread>(tid)
383  .queryInto(bcs);
384 
385  std::vector<FVInterfaceKernel *> iks;
387  .query()
388  .template condition<AttribSystem>("FVInterfaceKernel")
389  .template condition<AttribThread>(tid)
390  .queryInto(iks);
391 
392  std::vector<FVFluxKernel *> kernels;
394  .query()
395  .template condition<AttribSystem>("FVFluxKernel")
396  .template condition<AttribThread>(tid)
397  .queryInto(kernels);
398 
399  for (auto * bc : bcs)
400  bc->customSetup(exec_type);
401  for (auto * ik : iks)
402  ik->customSetup(exec_type);
403  for (auto * kernel : kernels)
404  kernel->customSetup(exec_type);
405  }
406  }
407  _scalar_kernels.customSetup(exec_type);
408  _constraints.customSetup(exec_type);
409  _general_dampers.customSetup(exec_type);
410  _nodal_bcs.customSetup(exec_type);
411 }
412 
413 void
415 {
418 }
419 
420 void
421 NonlinearSystemBase::setDecomposition(const std::vector<std::string> & splits)
422 {
424  if (splits.size() && splits.size() != 1)
425  mooseError("Only a single top-level split is allowed in a Problem's decomposition.");
426 
427  if (splits.size())
428  {
429  _decomposition_split = splits[0];
430  _have_decomposition = true;
431  }
432  else
433  _have_decomposition = false;
434 }
435 
436 void
438 {
439  if (!_have_decomposition)
440  return;
441 
442  std::shared_ptr<Split> top_split = getSplit(_decomposition_split);
443  top_split->setup(*this);
444 }
445 
446 void
447 NonlinearSystemBase::addKernel(const std::string & kernel_name,
448  const std::string & name,
449  InputParameters & parameters)
450 {
451  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
452  {
453  // Create the kernel object via the factory and add to warehouse
454  std::shared_ptr<KernelBase> kernel =
455  _factory.create<KernelBase>(kernel_name, name, parameters, tid);
456  _kernels.addObject(kernel, tid);
457  postAddResidualObject(*kernel);
458  // Add to theWarehouse, a centralized storage for all moose objects
459  _fe_problem.theWarehouse().add(kernel);
460  }
461 
462  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
463  _has_save_in = true;
464  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
465  _has_diag_save_in = true;
466 }
467 
468 void
469 NonlinearSystemBase::addHDGKernel(const std::string & kernel_name,
470  const std::string & name,
471  InputParameters & parameters)
472 {
473  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
474  {
475  // Create the kernel object via the factory and add to warehouse
476  auto kernel = _factory.create<HDGKernel>(kernel_name, name, parameters, tid);
477  _kernels.addObject(kernel, tid);
478  _hybridized_kernels.addObject(kernel, tid);
479  // Add to theWarehouse, a centralized storage for all moose objects
480  _fe_problem.theWarehouse().add(kernel);
481  postAddResidualObject(*kernel);
482  }
483 }
484 
485 void
486 NonlinearSystemBase::addNodalKernel(const std::string & kernel_name,
487  const std::string & name,
488  InputParameters & parameters)
489 {
490  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
491  {
492  // Create the kernel object via the factory and add to the warehouse
493  std::shared_ptr<NodalKernelBase> kernel =
494  _factory.create<NodalKernelBase>(kernel_name, name, parameters, tid);
495  _nodal_kernels.addObject(kernel, tid);
496  // Add to theWarehouse, a centralized storage for all moose objects
497  _fe_problem.theWarehouse().add(kernel);
498  postAddResidualObject(*kernel);
499  }
500 
501  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
502  _has_save_in = true;
503  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
504  _has_diag_save_in = true;
505 }
506 
507 void
508 NonlinearSystemBase::addScalarKernel(const std::string & kernel_name,
509  const std::string & name,
510  InputParameters & parameters)
511 {
512  std::shared_ptr<ScalarKernelBase> kernel =
513  _factory.create<ScalarKernelBase>(kernel_name, name, parameters);
514  postAddResidualObject(*kernel);
515  // Add to theWarehouse, a centralized storage for all moose objects
516  _fe_problem.theWarehouse().add(kernel);
517  _scalar_kernels.addObject(kernel);
518 }
519 
520 void
521 NonlinearSystemBase::addBoundaryCondition(const std::string & bc_name,
522  const std::string & name,
523  InputParameters & parameters)
524 {
525  // ThreadID
526  THREAD_ID tid = 0;
527 
528  // Create the object
529  std::shared_ptr<BoundaryCondition> bc =
530  _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
532 
533  // Active BoundaryIDs for the object
534  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
535  auto bc_var = dynamic_cast<const MooseVariableFieldBase *>(&bc->variable());
536  _vars[tid].addBoundaryVar(boundary_ids, bc_var);
537 
538  // Cast to the various types of BCs
539  std::shared_ptr<NodalBCBase> nbc = std::dynamic_pointer_cast<NodalBCBase>(bc);
540  std::shared_ptr<IntegratedBCBase> ibc = std::dynamic_pointer_cast<IntegratedBCBase>(bc);
541 
542  // NodalBCBase
543  if (nbc)
544  {
545  if (nbc->checkNodalVar() && !nbc->variable().isNodal())
546  mooseError("Trying to use nodal boundary condition '",
547  nbc->name(),
548  "' on a non-nodal variable '",
549  nbc->variable().name(),
550  "'.");
551 
552  _nodal_bcs.addObject(nbc);
553  // Add to theWarehouse, a centralized storage for all moose objects
555  _vars[tid].addBoundaryVars(boundary_ids, nbc->getCoupledVars());
556 
557  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
558  _has_nodalbc_save_in = true;
559  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
561 
562  // DirichletBCs that are preset
563  std::shared_ptr<DirichletBCBase> dbc = std::dynamic_pointer_cast<DirichletBCBase>(bc);
564  if (dbc && dbc->preset())
566 
567  std::shared_ptr<ADDirichletBCBase> addbc = std::dynamic_pointer_cast<ADDirichletBCBase>(bc);
568  if (addbc && addbc->preset())
570  }
571 
572  // IntegratedBCBase
573  else if (ibc)
574  {
575  _integrated_bcs.addObject(ibc, tid);
576  // Add to theWarehouse, a centralized storage for all moose objects
578  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
579 
580  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
581  _has_save_in = true;
582  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
583  _has_diag_save_in = true;
584 
585  for (tid = 1; tid < libMesh::n_threads(); tid++)
586  {
587  // Create the object
588  bc = _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
589 
590  // Give users opportunity to set some parameters
592 
593  // Active BoundaryIDs for the object
594  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
595  _vars[tid].addBoundaryVar(boundary_ids, bc_var);
596 
597  ibc = std::static_pointer_cast<IntegratedBCBase>(bc);
598 
599  _integrated_bcs.addObject(ibc, tid);
600  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
601  }
602  }
603 
604  else
605  mooseError("Unknown BoundaryCondition type for object named ", bc->name());
606 }
607 
608 void
609 NonlinearSystemBase::addConstraint(const std::string & c_name,
610  const std::string & name,
611  InputParameters & parameters)
612 {
613  std::shared_ptr<Constraint> constraint = _factory.create<Constraint>(c_name, name, parameters);
614  _constraints.addObject(constraint);
615  postAddResidualObject(*constraint);
616 
617  if (constraint && constraint->addCouplingEntriesToJacobian())
619 }
620 
621 void
622 NonlinearSystemBase::addDiracKernel(const std::string & kernel_name,
623  const std::string & name,
624  InputParameters & parameters)
625 {
626  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
627  {
628  std::shared_ptr<DiracKernelBase> kernel =
629  _factory.create<DiracKernelBase>(kernel_name, name, parameters, tid);
630  postAddResidualObject(*kernel);
631  _dirac_kernels.addObject(kernel, tid);
632  // Add to theWarehouse, a centralized storage for all moose objects
633  _fe_problem.theWarehouse().add(kernel);
634  }
635 }
636 
637 void
638 NonlinearSystemBase::addDGKernel(std::string dg_kernel_name,
639  const std::string & name,
640  InputParameters & parameters)
641 {
642  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
643  {
644  auto dg_kernel = _factory.create<DGKernelBase>(dg_kernel_name, name, parameters, tid);
645  _dg_kernels.addObject(dg_kernel, tid);
646  // Add to theWarehouse, a centralized storage for all moose objects
647  _fe_problem.theWarehouse().add(dg_kernel);
648  postAddResidualObject(*dg_kernel);
649  }
650 
651  _doing_dg = true;
652 
653  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
654  _has_save_in = true;
655  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
656  _has_diag_save_in = true;
657 }
658 
659 void
660 NonlinearSystemBase::addInterfaceKernel(std::string interface_kernel_name,
661  const std::string & name,
662  InputParameters & parameters)
663 {
664  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
665  {
666  std::shared_ptr<InterfaceKernelBase> interface_kernel =
667  _factory.create<InterfaceKernelBase>(interface_kernel_name, name, parameters, tid);
668  postAddResidualObject(*interface_kernel);
669 
670  const std::set<BoundaryID> & boundary_ids = interface_kernel->boundaryIDs();
671  auto ik_var = dynamic_cast<const MooseVariableFieldBase *>(&interface_kernel->variable());
672  _vars[tid].addBoundaryVar(boundary_ids, ik_var);
673 
674  _interface_kernels.addObject(interface_kernel, tid);
675  // Add to theWarehouse, a centralized storage for all moose objects
676  _fe_problem.theWarehouse().add(interface_kernel);
677  _vars[tid].addBoundaryVars(boundary_ids, interface_kernel->getCoupledVars());
678  }
679 }
680 
681 void
682 NonlinearSystemBase::addDamper(const std::string & damper_name,
683  const std::string & name,
684  InputParameters & parameters)
685 {
686  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
687  {
688  std::shared_ptr<Damper> damper = _factory.create<Damper>(damper_name, name, parameters, tid);
689 
690  // Attempt to cast to the damper types
691  std::shared_ptr<ElementDamper> ed = std::dynamic_pointer_cast<ElementDamper>(damper);
692  std::shared_ptr<NodalDamper> nd = std::dynamic_pointer_cast<NodalDamper>(damper);
693  std::shared_ptr<GeneralDamper> gd = std::dynamic_pointer_cast<GeneralDamper>(damper);
694 
695  if (gd)
696  {
698  break; // not threaded
699  }
700  else if (ed)
701  _element_dampers.addObject(ed, tid);
702  else if (nd)
703  _nodal_dampers.addObject(nd, tid);
704  else
705  mooseError("Invalid damper type");
706  }
707 }
708 
709 void
710 NonlinearSystemBase::addSplit(const std::string & split_name,
711  const std::string & name,
712  InputParameters & parameters)
713 {
714  std::shared_ptr<Split> split = _factory.create<Split>(split_name, name, parameters);
716  // Add to theWarehouse, a centralized storage for all moose objects
718 }
719 
720 std::shared_ptr<Split>
721 NonlinearSystemBase::getSplit(const std::string & name)
722 {
723  return _splits.getActiveObject(name);
724 }
725 
726 bool
728 {
730  return false;
731 
732  // The legacy behavior (#10464) _always_ performs the pre-SMO residual evaluation
733  // regardless of whether it is needed.
734  //
735  // This is not ideal and has been fixed by #23472. This legacy option ensures a smooth transition
736  // to the new behavior. Modules and Apps that want to migrate to the new behavior should set this
737  // parameter to false.
738  if (_app.parameters().get<bool>("use_legacy_initial_residual_evaluation_behavior"))
739  return true;
740 
741  return _use_pre_smo_residual;
742 }
743 
744 Real
746 {
748 }
749 
750 Real
752 {
754  mooseError("pre-SMO residual is requested but not evaluated.");
755 
756  return _pre_smo_residual;
757 }
758 
759 Real
761 {
762  return _initial_residual;
763 }
764 
765 void
767 {
768  _initial_residual = r;
769 }
770 
771 void
772 NonlinearSystemBase::zeroVectorForResidual(const std::string & vector_name)
773 {
774  for (unsigned int i = 0; i < _vecs_to_zero_for_residual.size(); ++i)
775  if (vector_name == _vecs_to_zero_for_residual[i])
776  return;
777 
778  _vecs_to_zero_for_residual.push_back(vector_name);
779 }
780 
781 void
783 {
784  _nl_vector_tags.clear();
785  _nl_vector_tags.insert(tag_id);
787 
789 
791 
793 }
794 
795 void
797 {
798  mooseDeprecated(" Please use computeResidualTag");
799 
800  computeResidualTag(residual, tag_id);
801 }
802 
803 void
804 NonlinearSystemBase::computeResidualTags(const std::set<TagID> & tags)
805 {
806  parallel_object_only();
807 
808  TIME_SECTION("nl::computeResidualTags", 5);
809 
812 
813  bool required_residual = tags.find(residualVectorTag()) == tags.end() ? false : true;
814 
816 
817  // not suppose to do anythin on matrix
819 
821 
822  for (const auto & numeric_vec : _vecs_to_zero_for_residual)
823  if (hasVector(numeric_vec))
824  {
825  NumericVector<Number> & vec = getVector(numeric_vec);
826  vec.close();
827  vec.zero();
828  }
829 
830  try
831  {
832  zeroTaggedVectors(tags);
834  closeTaggedVectors(tags);
835 
836  if (required_residual)
837  {
838  auto & residual = getVector(residualVectorTag());
839  if (!_time_integrators.empty())
840  {
841  for (auto & ti : _time_integrators)
842  ti->postResidual(residual);
843  }
844  else
845  residual += *_Re_non_time;
846  residual.close();
847  }
849  // We don't want to do nodal bcs or anything else
850  return;
851 
852  computeNodalBCs(tags);
853  closeTaggedVectors(tags);
854 
855  // If we are debugging residuals we need one more assignment to have the ghosted copy up to
856  // date
857  if (_need_residual_ghosted && _debugging_residuals && required_residual)
858  {
859  auto & residual = getVector(residualVectorTag());
860 
861  *_residual_ghosted = residual;
863  }
864  // Need to close and update the aux system in case residuals were saved to it.
867  if (hasSaveIn())
869  }
870  catch (MooseException & e)
871  {
872  // The buck stops here, we have already handled the exception by
873  // calling stopSolve(), it is now up to PETSc to return a
874  // "diverged" reason during the next solve.
875  }
876 
877  // not supposed to do anything on matrix
879 
881 }
882 
883 void
884 NonlinearSystemBase::computeResidualAndJacobianTags(const std::set<TagID> & vector_tags,
885  const std::set<TagID> & matrix_tags)
886 {
887  const bool required_residual =
888  vector_tags.find(residualVectorTag()) == vector_tags.end() ? false : true;
889 
890  try
891  {
892  zeroTaggedVectors(vector_tags);
893  computeResidualAndJacobianInternal(vector_tags, matrix_tags);
894  closeTaggedVectors(vector_tags);
895  closeTaggedMatrices(matrix_tags);
896 
897  if (required_residual)
898  {
899  auto & residual = getVector(residualVectorTag());
900  if (!_time_integrators.empty())
901  {
902  for (auto & ti : _time_integrators)
903  ti->postResidual(residual);
904  }
905  else
906  residual += *_Re_non_time;
907  residual.close();
908  }
909 
911  closeTaggedVectors(vector_tags);
912  closeTaggedMatrices(matrix_tags);
913  }
914  catch (MooseException & e)
915  {
916  // The buck stops here, we have already handled the exception by
917  // calling stopSolve(), it is now up to PETSc to return a
918  // "diverged" reason during the next solve.
919  }
920 }
921 
922 void
924 {
925  for (auto & ti : _time_integrators)
926  ti->preSolve();
927  if (_predictor.get())
928  _predictor->timestepSetup();
929 }
930 
931 void
933 {
935 
936  NumericVector<Number> & initial_solution(solution());
937  if (_predictor.get())
938  {
939  if (_predictor->shouldApply())
940  {
941  TIME_SECTION("applyPredictor", 2, "Applying Predictor");
942 
943  _predictor->apply(initial_solution);
944  _fe_problem.predictorCleanup(initial_solution);
945  }
946  else
947  _console << " Skipping predictor this step" << std::endl;
948  }
949 
950  // do nodal BC
951  {
952  TIME_SECTION("initialBCs", 2, "Applying BCs To Initial Condition");
953 
955  for (const auto & bnode : bnd_nodes)
956  {
957  BoundaryID boundary_id = bnode->_bnd_id;
958  Node * node = bnode->_node;
959 
960  if (node->processor_id() == processor_id())
961  {
962  // reinit variables in nodes
963  _fe_problem.reinitNodeFace(node, boundary_id, 0);
964 
966  {
967  const auto & preset_bcs = _preset_nodal_bcs.getActiveBoundaryObjects(boundary_id);
968  for (const auto & preset_bc : preset_bcs)
969  preset_bc->computeValue(initial_solution);
970  }
972  {
973  const auto & preset_bcs_res = _ad_preset_nodal_bcs.getActiveBoundaryObjects(boundary_id);
974  for (const auto & preset_bc : preset_bcs_res)
975  preset_bc->computeValue(initial_solution);
976  }
977  }
978  }
979  }
980 
981  _sys.solution->close();
982  update();
983 
984  // Set constraint secondary values
985  setConstraintSecondaryValues(initial_solution, false);
986 
988  setConstraintSecondaryValues(initial_solution, true);
989 }
990 
991 void
992 NonlinearSystemBase::setPredictor(std::shared_ptr<Predictor> predictor)
993 {
994  _predictor = predictor;
995 }
996 
997 void
999 {
1001 
1002  _kernels.subdomainSetup(subdomain, tid);
1003  _nodal_kernels.subdomainSetup(subdomain, tid);
1004  _element_dampers.subdomainSetup(subdomain, tid);
1005  _nodal_dampers.subdomainSetup(subdomain, tid);
1006 }
1007 
1010 {
1011  if (!_Re_time)
1012  {
1014 
1015  // Most applications don't need the expense of ghosting
1017  _Re_time = &addVector(_Re_time_tag, false, ptype);
1018  }
1019  else if (_need_residual_ghosted && _Re_time->type() == PARALLEL)
1020  {
1021  const auto vector_name = _subproblem.vectorTagName(_Re_time_tag);
1022 
1023  // If an application changes its mind, the libMesh API lets us
1024  // change the vector.
1025  _Re_time = &system().add_vector(vector_name, false, GHOSTED);
1026  }
1027 
1028  return *_Re_time;
1029 }
1030 
1033 {
1034  if (!_Re_non_time)
1035  {
1037 
1038  // Most applications don't need the expense of ghosting
1040  _Re_non_time = &addVector(_Re_non_time_tag, false, ptype);
1041  }
1043  {
1044  const auto vector_name = _subproblem.vectorTagName(_Re_non_time_tag);
1045 
1046  // If an application changes its mind, the libMesh API lets us
1047  // change the vector.
1048  _Re_non_time = &system().add_vector(vector_name, false, GHOSTED);
1049  }
1050 
1051  return *_Re_non_time;
1052 }
1053 
1056 {
1057  mooseDeprecated("Please use getVector()");
1058  switch (tag)
1059  {
1060  case 0:
1061  return getResidualNonTimeVector();
1062 
1063  case 1:
1064  return getResidualTimeVector();
1065 
1066  default:
1067  mooseError("The required residual vector is not available");
1068  }
1069 }
1070 
1071 void
1073 {
1074  THREAD_ID tid = 0; // constraints are going to be done single-threaded
1075  residual.close();
1077  {
1078  const auto & ncs = _constraints.getActiveNodalConstraints();
1079  for (const auto & nc : ncs)
1080  {
1081  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
1082  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
1083 
1084  if ((secondary_node_ids.size() > 0) && (primary_node_ids.size() > 0))
1085  {
1086  _fe_problem.reinitNodes(primary_node_ids, tid);
1087  _fe_problem.reinitNodesNeighbor(secondary_node_ids, tid);
1088  nc->computeResidual(residual);
1089  }
1090  }
1091  _fe_problem.addCachedResidualDirectly(residual, tid);
1092  residual.close();
1093  }
1094 }
1095 
1096 void
1098 {
1099  if (!hasMatrix(systemMatrixTag()))
1100  mooseError(" A system matrix is required");
1101 
1102  auto & jacobian = getMatrix(systemMatrixTag());
1103  THREAD_ID tid = 0; // constraints are going to be done single-threaded
1104 
1106  {
1107  const auto & ncs = _constraints.getActiveNodalConstraints();
1108  for (const auto & nc : ncs)
1109  {
1110  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
1111  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
1112 
1113  if ((secondary_node_ids.size() > 0) && (primary_node_ids.size() > 0))
1114  {
1115  _fe_problem.reinitNodes(primary_node_ids, tid);
1116  _fe_problem.reinitNodesNeighbor(secondary_node_ids, tid);
1117  nc->computeJacobian(jacobian);
1118  }
1119  }
1121  }
1122 }
1123 
1124 void
1126  const BoundaryID secondary_boundary,
1127  const PenetrationInfo & info,
1128  const bool displaced)
1129 {
1130  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1131  : static_cast<SubProblem &>(_fe_problem);
1132 
1133  const Elem * primary_elem = info._elem;
1134  unsigned int primary_side = info._side_num;
1135  std::vector<Point> points;
1136  points.push_back(info._closest_point);
1137 
1138  // *These next steps MUST be done in this order!*
1139  // ADL: This is a Chesterton's fence situation. I don't know which calls exactly the above comment
1140  // is referring to. If I had to guess I would guess just the reinitNodeFace and prepareAssembly
1141  // calls since the former will size the variable's dof indices and then the latter will resize the
1142  // residual/Jacobian based off the variable's cached dof indices size
1143 
1144  // This reinits the variables that exist on the secondary node
1145  _fe_problem.reinitNodeFace(&secondary_node, secondary_boundary, 0);
1146 
1147  // This will set aside residual and jacobian space for the variables that have dofs on
1148  // the secondary node
1150 
1151  _fe_problem.setNeighborSubdomainID(primary_elem, 0);
1152 
1153  //
1154  // Reinit material on undisplaced mesh
1155  //
1156 
1157  const Elem * const undisplaced_primary_elem =
1158  displaced ? _mesh.elemPtr(primary_elem->id()) : primary_elem;
1159  const Point undisplaced_primary_physical_point =
1160  [&points, displaced, primary_elem, undisplaced_primary_elem]()
1161  {
1162  if (displaced)
1163  {
1164  const Point reference_point =
1165  FEMap::inverse_map(primary_elem->dim(), primary_elem, points[0]);
1166  return FEMap::map(primary_elem->dim(), undisplaced_primary_elem, reference_point);
1167  }
1168  else
1169  // If our penetration locator is on the reference mesh, then our undisplaced
1170  // physical point is simply the point coming from the penetration locator
1171  return points[0];
1172  }();
1173 
1175  undisplaced_primary_elem, primary_side, {undisplaced_primary_physical_point}, 0);
1176  // Stateful material properties are only initialized for neighbor material data for internal faces
1177  // for discontinuous Galerkin methods or for conforming interfaces for interface kernels. We don't
1178  // have either of those use cases here where we likely have disconnected meshes
1179  _fe_problem.reinitMaterialsNeighbor(primary_elem->subdomain_id(), 0, /*swap_stateful=*/false);
1180 
1181  // Reinit points for constraint enforcement
1182  if (displaced)
1183  subproblem.reinitNeighborPhys(primary_elem, primary_side, points, 0);
1184 }
1185 
1186 void
1188 {
1189 
1190  if (displaced)
1191  mooseAssert(_fe_problem.getDisplacedProblem(),
1192  "If we're calling this method with displaced = true, then we better well have a "
1193  "displaced problem");
1194  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1195  : static_cast<SubProblem &>(_fe_problem);
1196  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1197 
1198  bool constraints_applied = false;
1199 
1200  for (const auto & it : penetration_locators)
1201  {
1202  PenetrationLocator & pen_loc = *(it.second);
1203 
1204  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1205 
1206  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1207  BoundaryID primary_boundary = pen_loc._primary_boundary;
1208 
1209  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
1210  {
1211  const auto & constraints =
1212  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
1213  std::unordered_set<unsigned int> needed_mat_props;
1214  for (const auto & constraint : constraints)
1215  {
1216  const auto & mp_deps = constraint->getMatPropDependencies();
1217  needed_mat_props.insert(mp_deps.begin(), mp_deps.end());
1218  }
1219  _fe_problem.setActiveMaterialProperties(needed_mat_props, /*tid=*/0);
1220 
1221  for (unsigned int i = 0; i < secondary_nodes.size(); i++)
1222  {
1223  dof_id_type secondary_node_num = secondary_nodes[i];
1224  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1225 
1226  if (secondary_node.processor_id() == processor_id())
1227  {
1228  if (pen_loc._penetration_info[secondary_node_num])
1229  {
1230  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
1231 
1232  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
1233 
1234  for (const auto & nfc : constraints)
1235  {
1236  if (nfc->isExplicitConstraint())
1237  continue;
1238  // Return if this constraint does not correspond to the primary-secondary pair
1239  // prepared by the outer loops.
1240  // This continue statement is required when, e.g. one secondary surface constrains
1241  // more than one primary surface.
1242  if (nfc->secondaryBoundary() != secondary_boundary ||
1243  nfc->primaryBoundary() != primary_boundary)
1244  continue;
1245 
1246  if (nfc->shouldApply())
1247  {
1248  constraints_applied = true;
1249  nfc->computeSecondaryValue(solution);
1250  }
1251 
1252  if (nfc->hasWritableCoupledVariables())
1253  {
1254  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1255  for (auto * var : nfc->getWritableCoupledVariables())
1256  {
1257  if (var->isNodalDefined())
1258  var->insert(_fe_problem.getAuxiliarySystem().solution());
1259  }
1260  }
1261  }
1262  }
1263  }
1264  }
1265  }
1266  }
1267 
1268  // go over NodeELemConstraints
1269  std::set<dof_id_type> unique_secondary_node_ids;
1270 
1271  for (const auto & secondary_id : _mesh.meshSubdomains())
1272  {
1273  for (const auto & primary_id : _mesh.meshSubdomains())
1274  {
1275  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
1276  {
1277  const auto & constraints =
1278  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
1279 
1280  // get unique set of ids of all nodes on current block
1281  unique_secondary_node_ids.clear();
1282  const MeshBase & meshhelper = _mesh.getMesh();
1283  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
1284  meshhelper.active_subdomain_elements_end(secondary_id)))
1285  {
1286  for (auto & n : elem->node_ref_range())
1287  unique_secondary_node_ids.insert(n.id());
1288  }
1289 
1290  for (auto secondary_node_id : unique_secondary_node_ids)
1291  {
1292  Node & secondary_node = _mesh.nodeRef(secondary_node_id);
1293 
1294  // check if secondary node is on current processor
1295  if (secondary_node.processor_id() == processor_id())
1296  {
1297  // This reinits the variables that exist on the secondary node
1298  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
1299 
1300  // This will set aside residual and jacobian space for the variables that have dofs
1301  // on the secondary node
1303 
1304  for (const auto & nec : constraints)
1305  {
1306  if (nec->shouldApply())
1307  {
1308  constraints_applied = true;
1309  nec->computeSecondaryValue(solution);
1310  }
1311  }
1312  }
1313  }
1314  }
1315  }
1316  }
1317 
1318  // See if constraints were applied anywhere
1319  _communicator.max(constraints_applied);
1320 
1321  if (constraints_applied)
1322  {
1323  solution.close();
1324  update();
1325  }
1326 }
1327 
1328 void
1330 {
1331  // Make sure the residual is in a good state
1332  residual.close();
1333 
1334  if (displaced)
1335  mooseAssert(_fe_problem.getDisplacedProblem(),
1336  "If we're calling this method with displaced = true, then we better well have a "
1337  "displaced problem");
1338  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1339  : static_cast<SubProblem &>(_fe_problem);
1340  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1341 
1342  bool constraints_applied;
1343  bool residual_has_inserted_values = false;
1345  constraints_applied = false;
1346  for (const auto & it : penetration_locators)
1347  {
1349  {
1350  // Reset the constraint_applied flag before each new constraint, as they need to be
1351  // assembled separately
1352  constraints_applied = false;
1353  }
1354  PenetrationLocator & pen_loc = *(it.second);
1355 
1356  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1357 
1358  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1359  BoundaryID primary_boundary = pen_loc._primary_boundary;
1360 
1361  bool has_writable_variables(false);
1362 
1363  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
1364  {
1365  const auto & constraints =
1366  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
1367 
1368  for (unsigned int i = 0; i < secondary_nodes.size(); i++)
1369  {
1370  dof_id_type secondary_node_num = secondary_nodes[i];
1371  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1372 
1373  if (secondary_node.processor_id() == processor_id())
1374  {
1375  if (pen_loc._penetration_info[secondary_node_num])
1376  {
1377  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
1378 
1379  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
1380 
1381  for (const auto & nfc : constraints)
1382  {
1383  // Return if this constraint does not correspond to the primary-secondary pair
1384  // prepared by the outer loops.
1385  // This continue statement is required when, e.g. one secondary surface constrains
1386  // more than one primary surface.
1387  if (nfc->secondaryBoundary() != secondary_boundary ||
1388  nfc->primaryBoundary() != primary_boundary)
1389  continue;
1390 
1391  if (nfc->shouldApply())
1392  {
1393  constraints_applied = true;
1394  nfc->computeResidual();
1395 
1396  if (nfc->overwriteSecondaryResidual())
1397  {
1398  // The below will actually overwrite the residual for every single dof that
1399  // lives on the node. We definitely don't want to do that!
1400  // _fe_problem.setResidual(residual, 0);
1401 
1402  const auto & secondary_var = nfc->variable();
1403  const auto & secondary_dofs = secondary_var.dofIndices();
1404  mooseAssert(secondary_dofs.size() == secondary_var.count(),
1405  "We are on a node so there should only be one dof per variable (for "
1406  "an ArrayVariable we should have a number of dofs equal to the "
1407  "number of components");
1408 
1409  // Assume that if the user is overwriting the secondary residual, then they are
1410  // supplying residuals that do not correspond to their other physics
1411  // (e.g. Kernels), hence we should not apply a scalingFactor that is normally
1412  // based on the order of their other physics (e.g. Kernels)
1413  std::vector<Number> values = {nfc->secondaryResidual()};
1414  residual.insert(values, secondary_dofs);
1415  residual_has_inserted_values = true;
1416  }
1417  else
1420  }
1421  if (nfc->hasWritableCoupledVariables())
1422  {
1423  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1424  has_writable_variables = true;
1425  for (auto * var : nfc->getWritableCoupledVariables())
1426  {
1427  if (var->isNodalDefined())
1428  var->insert(_fe_problem.getAuxiliarySystem().solution());
1429  }
1430  }
1431  }
1432  }
1433  }
1434  }
1435  }
1436  _communicator.max(has_writable_variables);
1437 
1438  if (has_writable_variables)
1439  {
1440  // Explicit contact dynamic constraints write to auxiliary variables and update the old
1441  // displacement solution on the constraint boundaries. Close solutions and update system
1442  // accordingly.
1445  solutionOld().close();
1446  }
1447 
1449  {
1450  // Make sure that secondary contribution to primary are assembled, and ghosts have been
1451  // exchanged, as current primaries might become secondaries on next iteration and will need to
1452  // contribute their former secondaries' contributions to the future primaries. See if
1453  // constraints were applied anywhere
1454  _communicator.max(constraints_applied);
1455 
1456  if (constraints_applied)
1457  {
1458  // If any of the above constraints inserted values in the residual, it needs to be
1459  // assembled before adding the cached residuals below.
1460  _communicator.max(residual_has_inserted_values);
1461  if (residual_has_inserted_values)
1462  {
1463  residual.close();
1464  residual_has_inserted_values = false;
1465  }
1467  residual.close();
1468 
1470  *_residual_ghosted = residual;
1471  }
1472  }
1473  }
1475  {
1476  _communicator.max(constraints_applied);
1477 
1478  if (constraints_applied)
1479  {
1480  // If any of the above constraints inserted values in the residual, it needs to be assembled
1481  // before adding the cached residuals below.
1482  _communicator.max(residual_has_inserted_values);
1483  if (residual_has_inserted_values)
1484  residual.close();
1485 
1487  residual.close();
1488 
1490  *_residual_ghosted = residual;
1491  }
1492  }
1493 
1494  // go over element-element constraint interface
1495  THREAD_ID tid = 0;
1496  const auto & element_pair_locators = subproblem.geomSearchData()._element_pair_locators;
1497  for (const auto & it : element_pair_locators)
1498  {
1499  ElementPairLocator & elem_pair_loc = *(it.second);
1500 
1501  if (_constraints.hasActiveElemElemConstraints(it.first, displaced))
1502  {
1503  // ElemElemConstraint objects
1504  const auto & _element_constraints =
1505  _constraints.getActiveElemElemConstraints(it.first, displaced);
1506 
1507  // go over pair elements
1508  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
1509  elem_pair_loc.getElemPairs();
1510  for (const auto & pr : elem_pairs)
1511  {
1512  const Elem * elem1 = pr.first;
1513  const Elem * elem2 = pr.second;
1514 
1515  if (elem1->processor_id() != processor_id())
1516  continue;
1517 
1518  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
1519 
1520  // for each element process constraints on the
1521  for (const auto & ec : _element_constraints)
1522  {
1523  _fe_problem.setCurrentSubdomainID(elem1, tid);
1524  subproblem.reinitElemPhys(elem1, info._elem1_constraint_q_point, tid);
1525  _fe_problem.setNeighborSubdomainID(elem2, tid);
1526  subproblem.reinitNeighborPhys(elem2, info._elem2_constraint_q_point, tid);
1527 
1528  ec->prepareShapes(ec->variable().number());
1529  ec->prepareNeighborShapes(ec->variable().number());
1530 
1531  ec->reinit(info);
1532  ec->computeResidual();
1535  }
1537  }
1538  }
1539  }
1540 
1541  // go over NodeElemConstraints
1542  std::set<dof_id_type> unique_secondary_node_ids;
1543 
1544  constraints_applied = false;
1545  residual_has_inserted_values = false;
1546  bool has_writable_variables = false;
1547  for (const auto & secondary_id : _mesh.meshSubdomains())
1548  {
1549  for (const auto & primary_id : _mesh.meshSubdomains())
1550  {
1551  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
1552  {
1553  const auto & constraints =
1554  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
1555 
1556  // get unique set of ids of all nodes on current block
1557  unique_secondary_node_ids.clear();
1558  const MeshBase & meshhelper = _mesh.getMesh();
1559  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
1560  meshhelper.active_subdomain_elements_end(secondary_id)))
1561  {
1562  for (auto & n : elem->node_ref_range())
1563  unique_secondary_node_ids.insert(n.id());
1564  }
1565 
1566  for (auto secondary_node_id : unique_secondary_node_ids)
1567  {
1568  Node & secondary_node = _mesh.nodeRef(secondary_node_id);
1569  // check if secondary node is on current processor
1570  if (secondary_node.processor_id() == processor_id())
1571  {
1572  // This reinits the variables that exist on the secondary node
1573  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
1574 
1575  // This will set aside residual and jacobian space for the variables that have dofs
1576  // on the secondary node
1578 
1579  for (const auto & nec : constraints)
1580  {
1581  if (nec->shouldApply())
1582  {
1583  constraints_applied = true;
1584  nec->computeResidual();
1585 
1586  if (nec->overwriteSecondaryResidual())
1587  {
1588  _fe_problem.setResidual(residual, 0);
1589  residual_has_inserted_values = true;
1590  }
1591  else
1594  }
1595  if (nec->hasWritableCoupledVariables())
1596  {
1597  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1598  has_writable_variables = true;
1599  for (auto * var : nec->getWritableCoupledVariables())
1600  {
1601  if (var->isNodalDefined())
1602  var->insert(_fe_problem.getAuxiliarySystem().solution());
1603  }
1604  }
1605  }
1607  }
1608  }
1609  }
1610  }
1611  }
1612  _communicator.max(constraints_applied);
1613 
1614  if (constraints_applied)
1615  {
1616  // If any of the above constraints inserted values in the residual, it needs to be assembled
1617  // before adding the cached residuals below.
1618  _communicator.max(residual_has_inserted_values);
1619  if (residual_has_inserted_values)
1620  residual.close();
1621 
1623  residual.close();
1624 
1626  *_residual_ghosted = residual;
1627  }
1628  _communicator.max(has_writable_variables);
1629 
1630  if (has_writable_variables)
1631  {
1632  // Explicit contact dynamic constraints write to auxiliary variables and update the old
1633  // displacement solution on the constraint boundaries. Close solutions and update system
1634  // accordingly.
1637  solutionOld().close();
1638  }
1639 
1640  // We may have additional tagged vectors that also need to be accumulated
1642 }
1643 
1644 void
1646 {
1647  // Overwrite results from integrator in case we have explicit dynamics contact constraints
1649  ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1650  : static_cast<SubProblem &>(_fe_problem);
1651  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1652 
1653  for (const auto & it : penetration_locators)
1654  {
1655  PenetrationLocator & pen_loc = *(it.second);
1656 
1657  const auto & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1658  const BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1659  const BoundaryID primary_boundary = pen_loc._primary_boundary;
1660 
1661  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, true))
1662  {
1663  const auto & constraints =
1664  _constraints.getActiveNodeFaceConstraints(secondary_boundary, true);
1665  for (const auto i : index_range(secondary_nodes))
1666  {
1667  const auto secondary_node_num = secondary_nodes[i];
1668  const Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1669 
1670  if (secondary_node.processor_id() == processor_id())
1671  if (pen_loc._penetration_info[secondary_node_num])
1672  for (const auto & nfc : constraints)
1673  {
1674  if (!nfc->isExplicitConstraint())
1675  continue;
1676 
1677  // Return if this constraint does not correspond to the primary-secondary pair
1678  // prepared by the outer loops.
1679  // This continue statement is required when, e.g. one secondary surface constrains
1680  // more than one primary surface.
1681  if (nfc->secondaryBoundary() != secondary_boundary ||
1682  nfc->primaryBoundary() != primary_boundary)
1683  continue;
1684 
1685  nfc->overwriteBoundaryVariables(soln, secondary_node);
1686  }
1687  }
1688  }
1689  }
1690  soln.close();
1691 }
1692 
1693 void
1695 {
1696  TIME_SECTION("residualSetup", 3);
1697 
1699 
1700  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
1701  {
1702  _kernels.residualSetup(tid);
1705  if (_doing_dg)
1711  }
1716 
1717  // Avoid recursion
1718  if (this == &_fe_problem.currentNonlinearSystem())
1721 }
1722 
1723 void
1725 {
1726  parallel_object_only();
1727 
1728  TIME_SECTION("computeResidualInternal", 3);
1729 
1730  residualSetup();
1731 
1732  const auto vector_tag_data = _fe_problem.getVectorTags(tags);
1733 
1734  // Residual contributions from UOs - for now this is used for ray tracing
1735  // and ray kernels that contribute to the residual (think line sources)
1736  std::vector<UserObject *> uos;
1738  .query()
1739  .condition<AttribSystem>("UserObject")
1740  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
1741  .queryInto(uos);
1742  for (auto & uo : uos)
1743  uo->residualSetup();
1744  for (auto & uo : uos)
1745  {
1746  uo->initialize();
1747  uo->execute();
1748  uo->finalize();
1749  }
1750 
1751  // reinit scalar variables
1752  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
1754 
1755  // residual contributions from the domain
1756  PARALLEL_TRY
1757  {
1758  TIME_SECTION("Kernels", 3 /*, "Computing Kernels"*/);
1759 
1761 
1763  Threads::parallel_reduce(elem_range, cr);
1764 
1765  // We pass face information directly to FV residual objects for their evaluation. Consequently
1766  // we must make sure to do separate threaded loops for 1) undisplaced face information objects
1767  // and undisplaced residual objects and 2) displaced face information objects and displaced
1768  // residual objects
1770  if (_fe_problem.haveFV())
1771  {
1773  _fe_problem, this->number(), tags, /*on_displaced=*/false);
1775  Threads::parallel_reduce(faces, fvr);
1776  }
1778  displaced_problem && displaced_problem->haveFV())
1779  {
1781  _fe_problem, this->number(), tags, /*on_displaced=*/true);
1782  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
1783  displaced_problem->mesh().ownedFaceInfoEnd());
1784  Threads::parallel_reduce(faces, fvr);
1785  }
1786 
1787  unsigned int n_threads = libMesh::n_threads();
1788  for (unsigned int i = 0; i < n_threads;
1789  i++) // Add any cached residuals that might be hanging around
1791  }
1792  PARALLEL_CATCH;
1793 
1794  // residual contributions from the scalar kernels
1795  PARALLEL_TRY
1796  {
1797  // do scalar kernels (not sure how to thread this)
1799  {
1800  TIME_SECTION("ScalarKernels", 3 /*, "Computing ScalarKernels"*/);
1801 
1802  MooseObjectWarehouse<ScalarKernelBase> * scalar_kernel_warehouse;
1803  // This code should be refactored once we can do tags for scalar
1804  // kernels
1805  // Should redo this based on Warehouse
1806  if (!tags.size() || tags.size() == _fe_problem.numVectorTags(Moose::VECTOR_TAG_RESIDUAL))
1807  scalar_kernel_warehouse = &_scalar_kernels;
1808  else if (tags.size() == 1)
1809  scalar_kernel_warehouse =
1810  &(_scalar_kernels.getVectorTagObjectWarehouse(*(tags.begin()), 0));
1811  else
1812  // scalar_kernels is not threading
1813  scalar_kernel_warehouse = &(_scalar_kernels.getVectorTagsObjectWarehouse(tags, 0));
1814 
1815  bool have_scalar_contributions = false;
1816  const auto & scalars = scalar_kernel_warehouse->getActiveObjects();
1817  for (const auto & scalar_kernel : scalars)
1818  {
1819  scalar_kernel->reinit();
1820  const std::vector<dof_id_type> & dof_indices = scalar_kernel->variable().dofIndices();
1821  const DofMap & dof_map = scalar_kernel->variable().dofMap();
1822  const dof_id_type first_dof = dof_map.first_dof();
1823  const dof_id_type end_dof = dof_map.end_dof();
1824  for (dof_id_type dof : dof_indices)
1825  {
1826  if (dof >= first_dof && dof < end_dof)
1827  {
1828  scalar_kernel->computeResidual();
1829  have_scalar_contributions = true;
1830  break;
1831  }
1832  }
1833  }
1834  if (have_scalar_contributions)
1836  }
1837  }
1838  PARALLEL_CATCH;
1839 
1840  // residual contributions from Block NodalKernels
1841  PARALLEL_TRY
1842  {
1844  {
1845  TIME_SECTION("NodalKernels", 3 /*, "Computing NodalKernels"*/);
1846 
1848 
1850 
1851  if (range.begin() != range.end())
1852  {
1853  _fe_problem.reinitNode(*range.begin(), 0);
1854 
1855  Threads::parallel_reduce(range, cnk);
1856 
1857  unsigned int n_threads = libMesh::n_threads();
1858  for (unsigned int i = 0; i < n_threads;
1859  i++) // Add any cached residuals that might be hanging around
1861  }
1862  }
1863  }
1864  PARALLEL_CATCH;
1865 
1867  // We computed the volumetric objects. We can return now before we get into
1868  // any strongly enforced constraint conditions or penalty-type objects
1869  // (DGKernels, IntegratedBCs, InterfaceKernels, Constraints)
1870  return;
1871 
1872  // residual contributions from boundary NodalKernels
1873  PARALLEL_TRY
1874  {
1876  {
1877  TIME_SECTION("NodalKernelBCs", 3 /*, "Computing NodalKernelBCs"*/);
1878 
1880 
1882 
1883  Threads::parallel_reduce(bnd_node_range, cnk);
1884 
1885  unsigned int n_threads = libMesh::n_threads();
1886  for (unsigned int i = 0; i < n_threads;
1887  i++) // Add any cached residuals that might be hanging around
1889  }
1890  }
1891  PARALLEL_CATCH;
1892 
1894 
1895  if (_residual_copy.get())
1896  {
1897  _Re_non_time->close();
1899  }
1900 
1902  {
1903  _Re_non_time->close();
1906  }
1907 
1908  PARALLEL_TRY { computeDiracContributions(tags, false); }
1909  PARALLEL_CATCH;
1910 
1912  {
1913  PARALLEL_TRY { enforceNodalConstraintsResidual(*_Re_non_time); }
1914  PARALLEL_CATCH;
1915  _Re_non_time->close();
1916  }
1917 
1918  // Add in Residual contributions from other Constraints
1920  {
1921  PARALLEL_TRY
1922  {
1923  // Undisplaced Constraints
1925 
1926  // Displaced Constraints
1929 
1932  }
1933  PARALLEL_CATCH;
1934  _Re_non_time->close();
1935  }
1936 
1937  // Accumulate the occurrence of solution invalid warnings for the current iteration cumulative
1938  // counters
1941 }
1942 
1943 void
1945  const std::set<TagID> & matrix_tags)
1946 {
1947  TIME_SECTION("computeResidualAndJacobianInternal", 3);
1948 
1949  // Make matrix ready to use
1951 
1952  for (auto tag : matrix_tags)
1953  {
1954  if (!hasMatrix(tag))
1955  continue;
1956 
1957  auto & jacobian = getMatrix(tag);
1958  // Necessary for speed
1959  if (auto petsc_matrix = dynamic_cast<PetscMatrix<Number> *>(&jacobian))
1960  {
1961  LibmeshPetscCall(MatSetOption(petsc_matrix->mat(),
1962  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
1963  PETSC_TRUE));
1965  LibmeshPetscCall(
1966  MatSetOption(petsc_matrix->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
1968  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
1969  MAT_IGNORE_ZERO_ENTRIES,
1970  PETSC_TRUE));
1971  }
1972  }
1973 
1974  residualSetup();
1975 
1976  // Residual contributions from UOs - for now this is used for ray tracing
1977  // and ray kernels that contribute to the residual (think line sources)
1978  std::vector<UserObject *> uos;
1980  .query()
1981  .condition<AttribSystem>("UserObject")
1982  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
1983  .queryInto(uos);
1984  for (auto & uo : uos)
1985  uo->residualSetup();
1986  for (auto & uo : uos)
1987  {
1988  uo->initialize();
1989  uo->execute();
1990  uo->finalize();
1991  }
1992 
1993  // reinit scalar variables
1994  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
1996 
1997  // residual contributions from the domain
1998  PARALLEL_TRY
1999  {
2000  TIME_SECTION("Kernels", 3 /*, "Computing Kernels"*/);
2001 
2003 
2004  ComputeResidualAndJacobianThread crj(_fe_problem, vector_tags, matrix_tags);
2005  Threads::parallel_reduce(elem_range, crj);
2006 
2008  if (_fe_problem.haveFV())
2009  {
2011  _fe_problem, this->number(), vector_tags, matrix_tags, /*on_displaced=*/false);
2013  Threads::parallel_reduce(faces, fvrj);
2014  }
2016  displaced_problem && displaced_problem->haveFV())
2017  {
2019  _fe_problem, this->number(), vector_tags, matrix_tags, /*on_displaced=*/true);
2020  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
2021  displaced_problem->mesh().ownedFaceInfoEnd());
2022  Threads::parallel_reduce(faces, fvr);
2023  }
2024 
2026 
2027  unsigned int n_threads = libMesh::n_threads();
2028  for (unsigned int i = 0; i < n_threads;
2029  i++) // Add any cached residuals that might be hanging around
2030  {
2033  }
2034  }
2035  PARALLEL_CATCH;
2036 }
2037 
2038 void
2040 {
2041  _nl_vector_tags.clear();
2042 
2043  const auto & residual_vector_tags = _fe_problem.getVectorTags(Moose::VECTOR_TAG_RESIDUAL);
2044  for (const auto & residual_vector_tag : residual_vector_tags)
2045  _nl_vector_tags.insert(residual_vector_tag._id);
2046 
2048  computeNodalBCs(residual, _nl_vector_tags);
2050 }
2051 
2052 void
2053 NonlinearSystemBase::computeNodalBCs(NumericVector<Number> & residual, const std::set<TagID> & tags)
2054 {
2056 
2057  computeNodalBCs(tags);
2058 
2060 }
2061 
2062 void
2063 NonlinearSystemBase::computeNodalBCs(const std::set<TagID> & tags)
2064 {
2065  // We need to close the diag_save_in variables on the aux system before NodalBCBases clear the
2066  // dofs on boundary nodes
2067  if (_has_save_in)
2069 
2070  // Select nodal kernels
2071  MooseObjectWarehouse<NodalBCBase> * nbc_warehouse;
2072 
2073  if (tags.size() == _fe_problem.numVectorTags(Moose::VECTOR_TAG_RESIDUAL) || !tags.size())
2074  nbc_warehouse = &_nodal_bcs;
2075  else if (tags.size() == 1)
2076  nbc_warehouse = &(_nodal_bcs.getVectorTagObjectWarehouse(*(tags.begin()), 0));
2077  else
2078  nbc_warehouse = &(_nodal_bcs.getVectorTagsObjectWarehouse(tags, 0));
2079 
2080  // Return early if there is no nodal kernel
2081  if (!nbc_warehouse->size())
2082  return;
2083 
2084  PARALLEL_TRY
2085  {
2087 
2088  if (!bnd_nodes.empty())
2089  {
2090  TIME_SECTION("NodalBCs", 3 /*, "Computing NodalBCs"*/);
2091 
2092  for (const auto & bnode : bnd_nodes)
2093  {
2094  BoundaryID boundary_id = bnode->_bnd_id;
2095  Node * node = bnode->_node;
2096 
2097  if (node->processor_id() == processor_id() &&
2098  nbc_warehouse->hasActiveBoundaryObjects(boundary_id))
2099  {
2100  // reinit variables in nodes
2101  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2102 
2103  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(boundary_id);
2104  for (const auto & nbc : bcs)
2105  if (nbc->shouldApply())
2106  nbc->computeResidual();
2107  }
2108  }
2109  }
2110  }
2111  PARALLEL_CATCH;
2112 
2113  if (_Re_time)
2114  _Re_time->close();
2115  _Re_non_time->close();
2116 }
2117 
2118 void
2120 {
2121  PARALLEL_TRY
2122  {
2124 
2125  if (!bnd_nodes.empty())
2126  {
2127  TIME_SECTION("NodalBCs", 3 /*, "Computing NodalBCs"*/);
2128 
2129  for (const auto & bnode : bnd_nodes)
2130  {
2131  BoundaryID boundary_id = bnode->_bnd_id;
2132  Node * node = bnode->_node;
2133 
2134  if (node->processor_id() == processor_id())
2135  {
2136  // reinit variables in nodes
2137  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2138  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
2139  {
2140  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
2141  for (const auto & nbc : bcs)
2142  if (nbc->shouldApply())
2143  nbc->computeResidualAndJacobian();
2144  }
2145  }
2146  }
2147  }
2148  }
2149  PARALLEL_CATCH;
2150 
2151  // Set the cached NodalBCBase values in the Jacobian matrix
2153 }
2154 
2155 void
2156 NonlinearSystemBase::getNodeDofs(dof_id_type node_id, std::vector<dof_id_type> & dofs)
2157 {
2158  const Node & node = _mesh.nodeRef(node_id);
2159  unsigned int s = number();
2160  if (node.has_dofs(s))
2161  {
2162  for (unsigned int v = 0; v < nVariables(); v++)
2163  for (unsigned int c = 0; c < node.n_comp(s, v); c++)
2164  dofs.push_back(node.dof_number(s, v, c));
2165  }
2166 }
2167 
2168 void
2170  GeometricSearchData & geom_search_data,
2171  std::unordered_map<dof_id_type, std::vector<dof_id_type>> & graph)
2172 {
2173  const auto & node_to_elem_map = _mesh.nodeToElemMap();
2174  const auto & nearest_node_locators = geom_search_data._nearest_node_locators;
2175  for (const auto & it : nearest_node_locators)
2176  {
2177  std::vector<dof_id_type> & secondary_nodes = it.second->_secondary_nodes;
2178 
2179  for (const auto & secondary_node : secondary_nodes)
2180  {
2181  std::set<dof_id_type> unique_secondary_indices;
2182  std::set<dof_id_type> unique_primary_indices;
2183 
2184  auto node_to_elem_pair = node_to_elem_map.find(secondary_node);
2185  if (node_to_elem_pair != node_to_elem_map.end())
2186  {
2187  const std::vector<dof_id_type> & elems = node_to_elem_pair->second;
2188 
2189  // Get the dof indices from each elem connected to the node
2190  for (const auto & cur_elem : elems)
2191  {
2192  std::vector<dof_id_type> dof_indices;
2193  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
2194 
2195  for (const auto & dof : dof_indices)
2196  unique_secondary_indices.insert(dof);
2197  }
2198  }
2199 
2200  std::vector<dof_id_type> primary_nodes = it.second->_neighbor_nodes[secondary_node];
2201 
2202  for (const auto & primary_node : primary_nodes)
2203  {
2204  auto primary_node_to_elem_pair = node_to_elem_map.find(primary_node);
2205  mooseAssert(primary_node_to_elem_pair != node_to_elem_map.end(),
2206  "Missing entry in node to elem map");
2207  const std::vector<dof_id_type> & primary_node_elems = primary_node_to_elem_pair->second;
2208 
2209  // Get the dof indices from each elem connected to the node
2210  for (const auto & cur_elem : primary_node_elems)
2211  {
2212  std::vector<dof_id_type> dof_indices;
2213  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
2214 
2215  for (const auto & dof : dof_indices)
2216  unique_primary_indices.insert(dof);
2217  }
2218  }
2219 
2220  for (const auto & secondary_id : unique_secondary_indices)
2221  for (const auto & primary_id : unique_primary_indices)
2222  {
2223  graph[secondary_id].push_back(primary_id);
2224  graph[primary_id].push_back(secondary_id);
2225  }
2226  }
2227  }
2228 
2229  // handle node-to-node constraints
2230  const auto & ncs = _constraints.getActiveNodalConstraints();
2231  for (const auto & nc : ncs)
2232  {
2233  std::vector<dof_id_type> primary_dofs;
2234  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
2235  for (const auto & node_id : primary_node_ids)
2236  {
2237  Node * node = _mesh.queryNodePtr(node_id);
2238  if (node && node->processor_id() == this->processor_id())
2239  {
2240  getNodeDofs(node_id, primary_dofs);
2241  }
2242  }
2243 
2244  _communicator.allgather(primary_dofs);
2245 
2246  std::vector<dof_id_type> secondary_dofs;
2247  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
2248  for (const auto & node_id : secondary_node_ids)
2249  {
2250  Node * node = _mesh.queryNodePtr(node_id);
2251  if (node && node->processor_id() == this->processor_id())
2252  {
2253  getNodeDofs(node_id, secondary_dofs);
2254  }
2255  }
2256 
2257  _communicator.allgather(secondary_dofs);
2258 
2259  for (const auto & primary_id : primary_dofs)
2260  for (const auto & secondary_id : secondary_dofs)
2261  {
2262  graph[primary_id].push_back(secondary_id);
2263  graph[secondary_id].push_back(primary_id);
2264  }
2265  }
2266 
2267  // Make every entry sorted and unique
2268  for (auto & it : graph)
2269  {
2270  std::vector<dof_id_type> & row = it.second;
2271  std::sort(row.begin(), row.end());
2272  std::vector<dof_id_type>::iterator uit = std::unique(row.begin(), row.end());
2273  row.resize(uit - row.begin());
2274  }
2275 }
2276 
2277 void
2279 {
2280  if (!hasMatrix(systemMatrixTag()))
2281  mooseError("Need a system matrix ");
2282 
2283  // At this point, have no idea how to make
2284  // this work with tag system
2285  auto & jacobian = getMatrix(systemMatrixTag());
2286 
2287  std::unordered_map<dof_id_type, std::vector<dof_id_type>> graph;
2288 
2289  findImplicitGeometricCouplingEntries(geom_search_data, graph);
2290 
2291  for (const auto & it : graph)
2292  {
2293  dof_id_type dof = it.first;
2294  const auto & row = it.second;
2295 
2296  for (const auto & coupled_dof : row)
2297  jacobian.add(dof, coupled_dof, 0);
2298  }
2299 }
2300 
2301 void
2303  bool displaced)
2304 {
2305  if (!hasMatrix(systemMatrixTag()))
2306  mooseError("A system matrix is required");
2307 
2308  auto & jacobian = getMatrix(systemMatrixTag());
2309 
2311  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2312  MAT_NEW_NONZERO_ALLOCATION_ERR,
2313  PETSC_FALSE));
2315  LibmeshPetscCall(MatSetOption(
2316  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2317 
2318  std::vector<numeric_index_type> zero_rows;
2319 
2320  if (displaced)
2321  mooseAssert(_fe_problem.getDisplacedProblem(),
2322  "If we're calling this method with displaced = true, then we better well have a "
2323  "displaced problem");
2324  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
2325  : static_cast<SubProblem &>(_fe_problem);
2326  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
2327 
2328  bool constraints_applied;
2330  constraints_applied = false;
2331  for (const auto & it : penetration_locators)
2332  {
2334  {
2335  // Reset the constraint_applied flag before each new constraint, as they need to be
2336  // assembled separately
2337  constraints_applied = false;
2338  }
2339  PenetrationLocator & pen_loc = *(it.second);
2340 
2341  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
2342 
2343  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
2344  BoundaryID primary_boundary = pen_loc._primary_boundary;
2345 
2346  zero_rows.clear();
2347  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
2348  {
2349  const auto & constraints =
2350  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
2351 
2352  for (const auto & secondary_node_num : secondary_nodes)
2353  {
2354  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
2355 
2356  if (secondary_node.processor_id() == processor_id())
2357  {
2358  if (pen_loc._penetration_info[secondary_node_num])
2359  {
2360  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
2361 
2362  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
2364 
2365  for (const auto & nfc : constraints)
2366  {
2367  if (nfc->isExplicitConstraint())
2368  continue;
2369  // Return if this constraint does not correspond to the primary-secondary pair
2370  // prepared by the outer loops.
2371  // This continue statement is required when, e.g. one secondary surface constrains
2372  // more than one primary surface.
2373  if (nfc->secondaryBoundary() != secondary_boundary ||
2374  nfc->primaryBoundary() != primary_boundary)
2375  continue;
2376 
2377  nfc->_jacobian = &jacobian_to_view;
2378 
2379  if (nfc->shouldApply())
2380  {
2381  constraints_applied = true;
2382 
2383  nfc->prepareShapes(nfc->variable().number());
2384  nfc->prepareNeighborShapes(nfc->variable().number());
2385 
2386  nfc->computeJacobian();
2387 
2388  if (nfc->overwriteSecondaryJacobian())
2389  {
2390  // Add this variable's dof's row to be zeroed
2391  zero_rows.push_back(nfc->variable().nodalDofIndex());
2392  }
2393 
2394  std::vector<dof_id_type> secondary_dofs(1, nfc->variable().nodalDofIndex());
2395 
2396  // Assume that if the user is overwriting the secondary Jacobian, then they are
2397  // supplying Jacobians that do not correspond to their other physics
2398  // (e.g. Kernels), hence we should not apply a scalingFactor that is normally
2399  // based on the order of their other physics (e.g. Kernels)
2400  Real scaling_factor =
2401  nfc->overwriteSecondaryJacobian() ? 1. : nfc->variable().scalingFactor();
2402 
2403  // Cache the jacobian block for the secondary side
2404  nfc->addJacobian(_fe_problem.assembly(0, number()),
2405  nfc->_Kee,
2406  secondary_dofs,
2407  nfc->_connected_dof_indices,
2408  scaling_factor);
2409 
2410  // Cache Ken, Kne, Knn
2411  if (nfc->addCouplingEntriesToJacobian())
2412  {
2413  // Make sure we use a proper scaling factor (e.g. don't use an interior scaling
2414  // factor when we're overwriting secondary stuff)
2415  nfc->addJacobian(_fe_problem.assembly(0, number()),
2416  nfc->_Ken,
2417  secondary_dofs,
2418  nfc->primaryVariable().dofIndicesNeighbor(),
2419  scaling_factor);
2420 
2421  // Use _connected_dof_indices to get all the correct columns
2422  nfc->addJacobian(_fe_problem.assembly(0, number()),
2423  nfc->_Kne,
2424  nfc->primaryVariable().dofIndicesNeighbor(),
2425  nfc->_connected_dof_indices,
2426  nfc->primaryVariable().scalingFactor());
2427 
2428  // We've handled Ken and Kne, finally handle Knn
2430  }
2431 
2432  // Do the off-diagonals next
2433  const std::vector<MooseVariableFEBase *> coupled_vars = nfc->getCoupledMooseVars();
2434  for (const auto & jvar : coupled_vars)
2435  {
2436  // Only compute jacobians for nonlinear variables
2437  if (jvar->kind() != Moose::VAR_SOLVER)
2438  continue;
2439 
2440  // Only compute Jacobian entries if this coupling is being used by the
2441  // preconditioner
2442  if (nfc->variable().number() == jvar->number() ||
2444  nfc->variable().number(), jvar->number(), this->number()))
2445  continue;
2446 
2447  // Need to zero out the matrices first
2449 
2450  nfc->prepareShapes(nfc->variable().number());
2451  nfc->prepareNeighborShapes(jvar->number());
2452 
2453  nfc->computeOffDiagJacobian(jvar->number());
2454 
2455  // Cache the jacobian block for the secondary side
2456  nfc->addJacobian(_fe_problem.assembly(0, number()),
2457  nfc->_Kee,
2458  secondary_dofs,
2459  nfc->_connected_dof_indices,
2460  scaling_factor);
2461 
2462  // Cache Ken, Kne, Knn
2463  if (nfc->addCouplingEntriesToJacobian())
2464  {
2465  // Make sure we use a proper scaling factor (e.g. don't use an interior scaling
2466  // factor when we're overwriting secondary stuff)
2467  nfc->addJacobian(_fe_problem.assembly(0, number()),
2468  nfc->_Ken,
2469  secondary_dofs,
2470  jvar->dofIndicesNeighbor(),
2471  scaling_factor);
2472 
2473  // Use _connected_dof_indices to get all the correct columns
2474  nfc->addJacobian(_fe_problem.assembly(0, number()),
2475  nfc->_Kne,
2476  nfc->variable().dofIndicesNeighbor(),
2477  nfc->_connected_dof_indices,
2478  nfc->variable().scalingFactor());
2479 
2480  // We've handled Ken and Kne, finally handle Knn
2482  }
2483  }
2484  }
2485  }
2486  }
2487  }
2488  }
2489  }
2491  {
2492  // See if constraints were applied anywhere
2493  _communicator.max(constraints_applied);
2494 
2495  if (constraints_applied)
2496  {
2497  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2498  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2499  PETSC_TRUE));
2500 
2501  jacobian.close();
2502  jacobian.zero_rows(zero_rows, 0.0);
2503  jacobian.close();
2505  jacobian.close();
2506  }
2507  }
2508  }
2510  {
2511  // See if constraints were applied anywhere
2512  _communicator.max(constraints_applied);
2513 
2514  if (constraints_applied)
2515  {
2516  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2517  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2518  PETSC_TRUE));
2519 
2520  jacobian.close();
2521  jacobian.zero_rows(zero_rows, 0.0);
2522  jacobian.close();
2524  jacobian.close();
2525  }
2526  }
2527 
2528  THREAD_ID tid = 0;
2529  // go over element-element constraint interface
2530  const auto & element_pair_locators = subproblem.geomSearchData()._element_pair_locators;
2531  for (const auto & it : element_pair_locators)
2532  {
2533  ElementPairLocator & elem_pair_loc = *(it.second);
2534 
2535  if (_constraints.hasActiveElemElemConstraints(it.first, displaced))
2536  {
2537  // ElemElemConstraint objects
2538  const auto & _element_constraints =
2539  _constraints.getActiveElemElemConstraints(it.first, displaced);
2540 
2541  // go over pair elements
2542  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
2543  elem_pair_loc.getElemPairs();
2544  for (const auto & pr : elem_pairs)
2545  {
2546  const Elem * elem1 = pr.first;
2547  const Elem * elem2 = pr.second;
2548 
2549  if (elem1->processor_id() != processor_id())
2550  continue;
2551 
2552  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
2553 
2554  // for each element process constraints on the
2555  for (const auto & ec : _element_constraints)
2556  {
2557  _fe_problem.setCurrentSubdomainID(elem1, tid);
2558  subproblem.reinitElemPhys(elem1, info._elem1_constraint_q_point, tid);
2559  _fe_problem.setNeighborSubdomainID(elem2, tid);
2560  subproblem.reinitNeighborPhys(elem2, info._elem2_constraint_q_point, tid);
2561 
2562  ec->prepareShapes(ec->variable().number());
2563  ec->prepareNeighborShapes(ec->variable().number());
2564 
2565  ec->reinit(info);
2566  ec->computeJacobian();
2569  }
2571  }
2572  }
2573  }
2574 
2575  // go over NodeElemConstraints
2576  std::set<dof_id_type> unique_secondary_node_ids;
2577  constraints_applied = false;
2578  for (const auto & secondary_id : _mesh.meshSubdomains())
2579  {
2580  for (const auto & primary_id : _mesh.meshSubdomains())
2581  {
2582  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
2583  {
2584  const auto & constraints =
2585  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
2586 
2587  // get unique set of ids of all nodes on current block
2588  unique_secondary_node_ids.clear();
2589  const MeshBase & meshhelper = _mesh.getMesh();
2590  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
2591  meshhelper.active_subdomain_elements_end(secondary_id)))
2592  {
2593  for (auto & n : elem->node_ref_range())
2594  unique_secondary_node_ids.insert(n.id());
2595  }
2596 
2597  for (auto secondary_node_id : unique_secondary_node_ids)
2598  {
2599  const Node & secondary_node = _mesh.nodeRef(secondary_node_id);
2600  // check if secondary node is on current processor
2601  if (secondary_node.processor_id() == processor_id())
2602  {
2603  // This reinits the variables that exist on the secondary node
2604  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
2605 
2606  // This will set aside residual and jacobian space for the variables that have dofs
2607  // on the secondary node
2610 
2611  for (const auto & nec : constraints)
2612  {
2613  if (nec->shouldApply())
2614  {
2615  constraints_applied = true;
2616 
2617  nec->_jacobian = &jacobian_to_view;
2618  nec->prepareShapes(nec->variable().number());
2619  nec->prepareNeighborShapes(nec->variable().number());
2620 
2621  nec->computeJacobian();
2622 
2623  if (nec->overwriteSecondaryJacobian())
2624  {
2625  // Add this variable's dof's row to be zeroed
2626  zero_rows.push_back(nec->variable().nodalDofIndex());
2627  }
2628 
2629  std::vector<dof_id_type> secondary_dofs(1, nec->variable().nodalDofIndex());
2630 
2631  // Cache the jacobian block for the secondary side
2632  nec->addJacobian(_fe_problem.assembly(0, number()),
2633  nec->_Kee,
2634  secondary_dofs,
2635  nec->_connected_dof_indices,
2636  nec->variable().scalingFactor());
2637 
2638  // Cache the jacobian block for the primary side
2639  nec->addJacobian(_fe_problem.assembly(0, number()),
2640  nec->_Kne,
2641  nec->primaryVariable().dofIndicesNeighbor(),
2642  nec->_connected_dof_indices,
2643  nec->primaryVariable().scalingFactor());
2644 
2647 
2648  // Do the off-diagonals next
2649  const std::vector<MooseVariableFEBase *> coupled_vars = nec->getCoupledMooseVars();
2650  for (const auto & jvar : coupled_vars)
2651  {
2652  // Only compute jacobians for nonlinear variables
2653  if (jvar->kind() != Moose::VAR_SOLVER)
2654  continue;
2655 
2656  // Only compute Jacobian entries if this coupling is being used by the
2657  // preconditioner
2658  if (nec->variable().number() == jvar->number() ||
2660  nec->variable().number(), jvar->number(), this->number()))
2661  continue;
2662 
2663  // Need to zero out the matrices first
2665 
2666  nec->prepareShapes(nec->variable().number());
2667  nec->prepareNeighborShapes(jvar->number());
2668 
2669  nec->computeOffDiagJacobian(jvar->number());
2670 
2671  // Cache the jacobian block for the secondary side
2672  nec->addJacobian(_fe_problem.assembly(0, number()),
2673  nec->_Kee,
2674  secondary_dofs,
2675  nec->_connected_dof_indices,
2676  nec->variable().scalingFactor());
2677 
2678  // Cache the jacobian block for the primary side
2679  nec->addJacobian(_fe_problem.assembly(0, number()),
2680  nec->_Kne,
2681  nec->variable().dofIndicesNeighbor(),
2682  nec->_connected_dof_indices,
2683  nec->variable().scalingFactor());
2684 
2687  }
2688  }
2689  }
2690  }
2691  }
2692  }
2693  }
2694  }
2695  // See if constraints were applied anywhere
2696  _communicator.max(constraints_applied);
2697 
2698  if (constraints_applied)
2699  {
2700  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2701  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2702  PETSC_TRUE));
2703 
2704  jacobian.close();
2705  jacobian.zero_rows(zero_rows, 0.0);
2706  jacobian.close();
2708  jacobian.close();
2709  }
2710 }
2711 
2712 void
2714 {
2715  MooseObjectWarehouse<ScalarKernelBase> * scalar_kernel_warehouse;
2716 
2717  if (!tags.size() || tags.size() == _fe_problem.numMatrixTags())
2718  scalar_kernel_warehouse = &_scalar_kernels;
2719  else if (tags.size() == 1)
2720  scalar_kernel_warehouse = &(_scalar_kernels.getMatrixTagObjectWarehouse(*(tags.begin()), 0));
2721  else
2722  scalar_kernel_warehouse = &(_scalar_kernels.getMatrixTagsObjectWarehouse(tags, 0));
2723 
2724  // Compute the diagonal block for scalar variables
2725  if (scalar_kernel_warehouse->hasActiveObjects())
2726  {
2727  const auto & scalars = scalar_kernel_warehouse->getActiveObjects();
2728 
2729  _fe_problem.reinitScalars(/*tid=*/0);
2730 
2731  _fe_problem.reinitOffDiagScalars(/*_tid*/ 0);
2732 
2733  bool have_scalar_contributions = false;
2734  for (const auto & kernel : scalars)
2735  {
2736  kernel->reinit();
2737  const std::vector<dof_id_type> & dof_indices = kernel->variable().dofIndices();
2738  const DofMap & dof_map = kernel->variable().dofMap();
2739  const dof_id_type first_dof = dof_map.first_dof();
2740  const dof_id_type end_dof = dof_map.end_dof();
2741  for (dof_id_type dof : dof_indices)
2742  {
2743  if (dof >= first_dof && dof < end_dof)
2744  {
2745  kernel->computeJacobian();
2746  _fe_problem.addJacobianOffDiagScalar(kernel->variable().number());
2747  have_scalar_contributions = true;
2748  break;
2749  }
2750  }
2751  }
2752 
2753  if (have_scalar_contributions)
2755  }
2756 }
2757 
2758 void
2760 {
2762 
2763  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
2764  {
2765  _kernels.jacobianSetup(tid);
2768  if (_doing_dg)
2774  }
2779 
2780  // Avoid recursion
2781  if (this == &_fe_problem.currentNonlinearSystem())
2784 }
2785 
2786 void
2788 {
2789  TIME_SECTION("computeJacobianInternal", 3);
2790 
2792 
2793  // Make matrix ready to use
2795 
2796  for (auto tag : tags)
2797  {
2798  if (!hasMatrix(tag))
2799  continue;
2800 
2801  auto & jacobian = getMatrix(tag);
2802  // Necessary for speed
2803  if (auto petsc_matrix = dynamic_cast<PetscMatrix<Number> *>(&jacobian))
2804  {
2805  LibmeshPetscCall(MatSetOption(petsc_matrix->mat(),
2806  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2807  PETSC_TRUE));
2809  LibmeshPetscCall(
2810  MatSetOption(petsc_matrix->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
2812  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2813  MAT_IGNORE_ZERO_ENTRIES,
2814  PETSC_TRUE));
2815  }
2816  }
2817 
2818  jacobianSetup();
2819 
2820  // Jacobian contributions from UOs - for now this is used for ray tracing
2821  // and ray kernels that contribute to the Jacobian (think line sources)
2822  std::vector<UserObject *> uos;
2824  .query()
2825  .condition<AttribSystem>("UserObject")
2826  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
2827  .queryInto(uos);
2828  for (auto & uo : uos)
2829  uo->jacobianSetup();
2830  for (auto & uo : uos)
2831  {
2832  uo->initialize();
2833  uo->execute();
2834  uo->finalize();
2835  }
2836 
2837  // reinit scalar variables
2838  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
2840 
2841  PARALLEL_TRY
2842  {
2843  // We would like to compute ScalarKernels, block NodalKernels, FVFluxKernels, and mortar objects
2844  // up front because we want these included whether we are computing an ordinary Jacobian or a
2845  // Jacobian for determining variable scaling factors
2847 
2848  // Block restricted Nodal Kernels
2850  {
2853  Threads::parallel_reduce(range, cnkjt);
2854 
2855  unsigned int n_threads = libMesh::n_threads();
2856  for (unsigned int i = 0; i < n_threads;
2857  i++) // Add any cached jacobians that might be hanging around
2859  }
2860 
2862  if (_fe_problem.haveFV())
2863  {
2864  // the same loop works for both residual and jacobians because it keys
2865  // off of FEProblem's _currently_computing_jacobian parameter
2867  _fe_problem, this->number(), tags, /*on_displaced=*/false);
2869  Threads::parallel_reduce(faces, fvj);
2870  }
2872  displaced_problem && displaced_problem->haveFV())
2873  {
2875  _fe_problem, this->number(), tags, /*on_displaced=*/true);
2876  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
2877  displaced_problem->mesh().ownedFaceInfoEnd());
2878  Threads::parallel_reduce(faces, fvr);
2879  }
2880 
2882 
2883  // Get our element range for looping over
2885 
2887  {
2888  // Only compute Jacobians corresponding to the diagonals of volumetric compute objects
2889  // because this typically gives us a good representation of the physics. NodalBCs and
2890  // Constraints can introduce dramatically different scales (often order unity).
2891  // IntegratedBCs and/or InterfaceKernels may use penalty factors. DGKernels may be ok, but
2892  // they are almost always used in conjunction with Kernels
2894  Threads::parallel_reduce(elem_range, cj);
2895  unsigned int n_threads = libMesh::n_threads();
2896  for (unsigned int i = 0; i < n_threads;
2897  i++) // Add any Jacobian contributions still hanging around
2899 
2900  // Check whether any exceptions were thrown and propagate this information for parallel
2901  // consistency before
2902  // 1) we do parallel communication when closing tagged matrices
2903  // 2) early returning before reaching our PARALLEL_CATCH below
2905 
2906  closeTaggedMatrices(tags);
2907 
2908  return;
2909  }
2910 
2911  switch (_fe_problem.coupling())
2912  {
2913  case Moose::COUPLING_DIAG:
2914  {
2916  Threads::parallel_reduce(elem_range, cj);
2917 
2918  unsigned int n_threads = libMesh::n_threads();
2919  for (unsigned int i = 0; i < n_threads;
2920  i++) // Add any Jacobian contributions still hanging around
2922 
2923  // Boundary restricted Nodal Kernels
2925  {
2928 
2929  Threads::parallel_reduce(bnd_range, cnkjt);
2930  unsigned int n_threads = libMesh::n_threads();
2931  for (unsigned int i = 0; i < n_threads;
2932  i++) // Add any cached jacobians that might be hanging around
2934  }
2935  }
2936  break;
2937 
2938  default:
2940  {
2942  Threads::parallel_reduce(elem_range, cj);
2943  unsigned int n_threads = libMesh::n_threads();
2944 
2945  for (unsigned int i = 0; i < n_threads; i++)
2947 
2948  // Boundary restricted Nodal Kernels
2950  {
2953 
2954  Threads::parallel_reduce(bnd_range, cnkjt);
2955  unsigned int n_threads = libMesh::n_threads();
2956  for (unsigned int i = 0; i < n_threads;
2957  i++) // Add any cached jacobians that might be hanging around
2959  }
2960  }
2961  break;
2962  }
2963 
2964  computeDiracContributions(tags, true);
2965 
2966  static bool first = true;
2967 
2968  // This adds zeroes into geometric coupling entries to ensure they stay in the matrix
2969  if ((_fe_problem.restoreOriginalNonzeroPattern() || first) &&
2971  {
2972  first = false;
2974 
2977  }
2978  }
2979  PARALLEL_CATCH;
2980 
2981  // Have no idea how to have constraints work
2982  // with the tag system
2983  PARALLEL_TRY
2984  {
2985  // Add in Jacobian contributions from other Constraints
2986  if (_fe_problem._has_constraints && tags.count(systemMatrixTag()))
2987  {
2988  // Some constraints need to be able to read values from the Jacobian, which requires that it
2989  // be closed/assembled
2990  auto & system_matrix = getMatrix(systemMatrixTag());
2991 #if PETSC_RELEASE_GREATER_EQUALS(3, 23, 0)
2992  SparseMatrix<Number> * view_jac_ptr;
2993  std::unique_ptr<SparseMatrix<Number>> hash_copy;
2994  if (system_matrix.use_hash_table())
2995  {
2996  hash_copy = libMesh::cast_ref<PetscMatrix<Number> &>(system_matrix).copy_from_hash();
2997  view_jac_ptr = hash_copy.get();
2998  }
2999  else
3000  view_jac_ptr = &system_matrix;
3001  auto & jacobian_to_view = *view_jac_ptr;
3002 #else
3003  auto & jacobian_to_view = system_matrix;
3004 #endif
3005  if (&jacobian_to_view == &system_matrix)
3006  system_matrix.close();
3007 
3008  // Nodal Constraints
3010 
3011  // Undisplaced Constraints
3012  constraintJacobians(jacobian_to_view, false);
3013 
3014  // Displaced Constraints
3016  constraintJacobians(jacobian_to_view, true);
3017  }
3018  }
3019  PARALLEL_CATCH;
3020 
3021  // We need to close the save_in variables on the aux system before NodalBCBases clear the dofs
3022  // on boundary nodes
3023  if (_has_diag_save_in)
3025 
3026  PARALLEL_TRY
3027  {
3028  MooseObjectWarehouse<NodalBCBase> * nbc_warehouse;
3029  // Select nodal kernels
3030  if (tags.size() == _fe_problem.numMatrixTags() || !tags.size())
3031  nbc_warehouse = &_nodal_bcs;
3032  else if (tags.size() == 1)
3033  nbc_warehouse = &(_nodal_bcs.getMatrixTagObjectWarehouse(*(tags.begin()), 0));
3034  else
3035  nbc_warehouse = &(_nodal_bcs.getMatrixTagsObjectWarehouse(tags, 0));
3036 
3037  if (nbc_warehouse->hasActiveObjects())
3038  {
3039  // We may be switching from add to set. Moreover, we rely on a call to MatZeroRows to enforce
3040  // the nodal boundary condition constraints which requires that the matrix be truly assembled
3041  // as opposed to just flushed. Consequently we can't do the following despite any desire to
3042  // keep our initial sparsity pattern honored (see https://gitlab.com/petsc/petsc/-/issues/852)
3043  //
3044  // flushTaggedMatrices(tags);
3045  closeTaggedMatrices(tags);
3046 
3047  // Cache the information about which BCs are coupled to which
3048  // variables, so we don't have to figure it out for each node.
3049  std::map<std::string, std::set<unsigned int>> bc_involved_vars;
3050  const std::set<BoundaryID> & all_boundary_ids = _mesh.getBoundaryIDs();
3051  for (const auto & bid : all_boundary_ids)
3052  {
3053  // Get reference to all the NodalBCs for this ID. This is only
3054  // safe if there are NodalBCBases there to be gotten...
3055  if (nbc_warehouse->hasActiveBoundaryObjects(bid))
3056  {
3057  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(bid);
3058  for (const auto & bc : bcs)
3059  {
3060  const std::vector<MooseVariableFEBase *> & coupled_moose_vars =
3061  bc->getCoupledMooseVars();
3062 
3063  // Create the set of "involved" MOOSE nonlinear vars, which includes all coupled vars
3064  // and the BC's own variable
3065  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
3066  for (const auto & coupled_var : coupled_moose_vars)
3067  if (coupled_var->kind() == Moose::VAR_SOLVER)
3068  var_set.insert(coupled_var->number());
3069 
3070  var_set.insert(bc->variable().number());
3071  }
3072  }
3073  }
3074 
3075  // reinit scalar variables again. This reinit does not re-fill any of the scalar variable
3076  // solution arrays because that was done above. It only will reorder the derivative
3077  // information for AD calculations to be suitable for NodalBC calculations
3078  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
3079  _fe_problem.reinitScalars(tid, true);
3080 
3081  // Get variable coupling list. We do all the NodalBCBase stuff on
3082  // thread 0... The couplingEntries() data structure determines
3083  // which variables are "coupled" as far as the preconditioner is
3084  // concerned, not what variables a boundary condition specifically
3085  // depends on.
3086  auto & coupling_entries = _fe_problem.couplingEntries(/*_tid=*/0, this->number());
3087 
3088  // Compute Jacobians for NodalBCBases
3090  for (const auto & bnode : bnd_nodes)
3091  {
3092  BoundaryID boundary_id = bnode->_bnd_id;
3093  Node * node = bnode->_node;
3094 
3095  if (nbc_warehouse->hasActiveBoundaryObjects(boundary_id) &&
3096  node->processor_id() == processor_id())
3097  {
3098  _fe_problem.reinitNodeFace(node, boundary_id, 0);
3099 
3100  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(boundary_id);
3101  for (const auto & bc : bcs)
3102  {
3103  // Get the set of involved MOOSE vars for this BC
3104  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
3105 
3106  // Loop over all the variables whose Jacobian blocks are
3107  // actually being computed, call computeOffDiagJacobian()
3108  // for each one which is actually coupled (otherwise the
3109  // value is zero.)
3110  for (const auto & it : coupling_entries)
3111  {
3112  unsigned int ivar = it.first->number(), jvar = it.second->number();
3113 
3114  // We are only going to call computeOffDiagJacobian() if:
3115  // 1.) the BC's variable is ivar
3116  // 2.) jvar is "involved" with the BC (including jvar==ivar), and
3117  // 3.) the BC should apply.
3118  if ((bc->variable().number() == ivar) && var_set.count(jvar) && bc->shouldApply())
3119  bc->computeOffDiagJacobian(jvar);
3120  }
3121 
3122  const auto & coupled_scalar_vars = bc->getCoupledMooseScalarVars();
3123  for (const auto & jvariable : coupled_scalar_vars)
3124  if (hasScalarVariable(jvariable->name()))
3125  bc->computeOffDiagJacobianScalar(jvariable->number());
3126  }
3127  }
3128  } // end loop over boundary nodes
3129 
3130  // Set the cached NodalBCBase values in the Jacobian matrix
3132  }
3133  }
3134  PARALLEL_CATCH;
3135 
3136  closeTaggedMatrices(tags);
3137 
3138  // We need to close the save_in variables on the aux system before NodalBCBases clear the dofs
3139  // on boundary nodes
3142 
3143  if (hasDiagSaveIn())
3145 
3146  // Accumulate the occurrence of solution invalid warnings for the current iteration cumulative
3147  // counters
3150 }
3151 
3152 void
3154 {
3155  _nl_matrix_tags.clear();
3156 
3157  auto & tags = _fe_problem.getMatrixTags();
3158 
3159  for (auto & tag : tags)
3160  _nl_matrix_tags.insert(tag.second);
3161 
3162  computeJacobian(jacobian, _nl_matrix_tags);
3163 }
3164 
3165 void
3166 NonlinearSystemBase::computeJacobian(SparseMatrix<Number> & jacobian, const std::set<TagID> & tags)
3167 {
3169 
3170  computeJacobianTags(tags);
3171 
3173 }
3174 
3175 void
3176 NonlinearSystemBase::computeJacobianTags(const std::set<TagID> & tags)
3177 {
3178  TIME_SECTION("computeJacobianTags", 5);
3179 
3180  FloatingPointExceptionGuard fpe_guard(_app);
3181 
3182  try
3183  {
3185  }
3186  catch (MooseException & e)
3187  {
3188  // The buck stops here, we have already handled the exception by
3189  // calling stopSolve(), it is now up to PETSc to return a
3190  // "diverged" reason during the next solve.
3191  }
3192 }
3193 
3194 void
3196 {
3197  _nl_matrix_tags.clear();
3198 
3199  auto & tags = _fe_problem.getMatrixTags();
3200  for (auto & tag : tags)
3201  _nl_matrix_tags.insert(tag.second);
3202 
3204 }
3205 
3206 void
3208  const std::set<TagID> & tags)
3209 {
3210  TIME_SECTION("computeJacobianBlocks", 3);
3211  FloatingPointExceptionGuard fpe_guard(_app);
3212 
3213  for (unsigned int i = 0; i < blocks.size(); i++)
3214  {
3215  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
3216 
3217  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
3218  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
3219  PETSC_TRUE));
3221  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
3222  MAT_NEW_NONZERO_ALLOCATION_ERR,
3223  PETSC_TRUE));
3224 
3225  jacobian.zero();
3226  }
3227 
3228  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
3230 
3231  PARALLEL_TRY
3232  {
3235  Threads::parallel_reduce(elem_range, cjb);
3236  }
3237  PARALLEL_CATCH;
3238 
3239  for (unsigned int i = 0; i < blocks.size(); i++)
3240  blocks[i]->_jacobian.close();
3241 
3242  for (unsigned int i = 0; i < blocks.size(); i++)
3243  {
3244  libMesh::System & precond_system = blocks[i]->_precond_system;
3245  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
3246 
3247  unsigned int ivar = blocks[i]->_ivar;
3248  unsigned int jvar = blocks[i]->_jvar;
3249 
3250  // Dirichlet BCs
3251  std::vector<numeric_index_type> zero_rows;
3252  PARALLEL_TRY
3253  {
3255  for (const auto & bnode : bnd_nodes)
3256  {
3257  BoundaryID boundary_id = bnode->_bnd_id;
3258  Node * node = bnode->_node;
3259 
3260  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
3261  {
3262  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
3263 
3264  if (node->processor_id() == processor_id())
3265  {
3266  _fe_problem.reinitNodeFace(node, boundary_id, 0);
3267 
3268  for (const auto & bc : bcs)
3269  if (bc->variable().number() == ivar && bc->shouldApply())
3270  {
3271  // The first zero is for the variable number... there is only one variable in
3272  // each mini-system The second zero only works with Lagrange elements!
3273  zero_rows.push_back(node->dof_number(precond_system.number(), 0, 0));
3274  }
3275  }
3276  }
3277  }
3278  }
3279  PARALLEL_CATCH;
3280 
3281  jacobian.close();
3282 
3283  // This zeroes the rows corresponding to Dirichlet BCs and puts a 1.0 on the diagonal
3284  if (ivar == jvar)
3285  jacobian.zero_rows(zero_rows, 1.0);
3286  else
3287  jacobian.zero_rows(zero_rows, 0.0);
3288 
3289  jacobian.close();
3290  }
3291 }
3292 
3293 void
3295 {
3302  _kernels.updateActive(tid);
3304  if (tid == 0)
3305  {
3312  }
3313 }
3314 
3315 Real
3317  const NumericVector<Number> & update)
3318 {
3319  // Default to no damping
3320  Real damping = 1.0;
3321  bool has_active_dampers = false;
3322 
3323  try
3324  {
3326  {
3327  PARALLEL_TRY
3328  {
3329  TIME_SECTION("computeDampers", 3, "Computing Dampers");
3330  has_active_dampers = true;
3333  Threads::parallel_reduce(_fe_problem.getCurrentAlgebraicElementRange(), cid);
3334  damping = std::min(cid.damping(), damping);
3335  }
3336  PARALLEL_CATCH;
3337  }
3338 
3340  {
3341  PARALLEL_TRY
3342  {
3343  TIME_SECTION("computeDamping::element", 3, "Computing Element Damping");
3344 
3345  has_active_dampers = true;
3348  Threads::parallel_reduce(_fe_problem.getCurrentAlgebraicNodeRange(), cndt);
3349  damping = std::min(cndt.damping(), damping);
3350  }
3351  PARALLEL_CATCH;
3352  }
3353 
3355  {
3356  PARALLEL_TRY
3357  {
3358  TIME_SECTION("computeDamping::general", 3, "Computing General Damping");
3359 
3360  has_active_dampers = true;
3361  const auto & gdampers = _general_dampers.getActiveObjects();
3362  for (const auto & damper : gdampers)
3363  {
3364  Real gd_damping = damper->computeDamping(solution, update);
3365  try
3366  {
3367  damper->checkMinDamping(gd_damping);
3368  }
3369  catch (MooseException & e)
3370  {
3372  }
3373  damping = std::min(gd_damping, damping);
3374  }
3375  }
3376  PARALLEL_CATCH;
3377  }
3378  }
3379  catch (MooseException & e)
3380  {
3381  // The buck stops here, we have already handled the exception by
3382  // calling stopSolve(), it is now up to PETSc to return a
3383  // "diverged" reason during the next solve.
3384  }
3385 
3386  _communicator.min(damping);
3387 
3388  if (has_active_dampers && damping < 1.0)
3389  _console << " Damping factor: " << damping << std::endl;
3390 
3391  return damping;
3392 }
3393 
3394 void
3395 NonlinearSystemBase::computeDiracContributions(const std::set<TagID> & tags, bool is_jacobian)
3396 {
3398 
3399  std::set<const Elem *> dirac_elements;
3400 
3402  {
3403  TIME_SECTION("computeDirac", 3, "Computing DiracKernels");
3404 
3405  // TODO: Need a threading fix... but it's complicated!
3406  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
3407  {
3408  const auto & dkernels = _dirac_kernels.getActiveObjects(tid);
3409  for (const auto & dkernel : dkernels)
3410  {
3411  dkernel->clearPoints();
3412  dkernel->addPoints();
3413  }
3414  }
3415 
3416  ComputeDiracThread cd(_fe_problem, tags, is_jacobian);
3417 
3418  _fe_problem.getDiracElements(dirac_elements);
3419 
3420  DistElemRange range(dirac_elements.begin(), dirac_elements.end(), 1);
3421  // TODO: Make Dirac work thread!
3422  // Threads::parallel_reduce(range, cd);
3423 
3424  cd(range);
3425  }
3426 }
3427 
3430 {
3431  if (!_residual_copy.get())
3433 
3434  return *_residual_copy;
3435 }
3436 
3439 {
3440  _need_residual_ghosted = true;
3441  if (!_residual_ghosted)
3442  {
3443  // The first time we realize we need a ghosted residual vector,
3444  // we add it.
3445  _residual_ghosted = &addVector("residual_ghosted", false, GHOSTED);
3446 
3447  // If we've already realized we need time and/or non-time
3448  // residual vectors, but we haven't yet realized they need to be
3449  // ghosted, fix that now.
3450  //
3451  // If an application changes its mind, the libMesh API lets us
3452  // change the vector.
3453  if (_Re_time)
3454  {
3455  const auto vector_name = _subproblem.vectorTagName(_Re_time_tag);
3456  _Re_time = &system().add_vector(vector_name, false, GHOSTED);
3457  }
3458  if (_Re_non_time)
3459  {
3460  const auto vector_name = _subproblem.vectorTagName(_Re_non_time_tag);
3461  _Re_non_time = &system().add_vector(vector_name, false, GHOSTED);
3462  }
3463  }
3464  return *_residual_ghosted;
3465 }
3466 
3467 void
3469  std::vector<dof_id_type> & n_nz,
3470  std::vector<dof_id_type> & n_oz)
3471 {
3473  {
3475 
3476  std::unordered_map<dof_id_type, std::vector<dof_id_type>> graph;
3477 
3479 
3482  graph);
3483 
3484  const dof_id_type first_dof_on_proc = dofMap().first_dof(processor_id());
3485  const dof_id_type end_dof_on_proc = dofMap().end_dof(processor_id());
3486 
3487  // The total number of dofs on and off processor
3488  const dof_id_type n_dofs_on_proc = dofMap().n_local_dofs();
3489  const dof_id_type n_dofs_not_on_proc = dofMap().n_dofs() - dofMap().n_local_dofs();
3490 
3491  for (const auto & git : graph)
3492  {
3493  dof_id_type dof = git.first;
3494  dof_id_type local_dof = dof - first_dof_on_proc;
3495 
3496  if (dof < first_dof_on_proc || dof >= end_dof_on_proc)
3497  continue;
3498 
3499  const auto & row = git.second;
3500 
3501  SparsityPattern::Row & sparsity_row = sparsity[local_dof];
3502 
3503  unsigned int original_row_length = sparsity_row.size();
3504 
3505  sparsity_row.insert(sparsity_row.end(), row.begin(), row.end());
3506 
3507  SparsityPattern::sort_row(
3508  sparsity_row.begin(), sparsity_row.begin() + original_row_length, sparsity_row.end());
3509 
3510  // Fix up nonzero arrays
3511  for (const auto & coupled_dof : row)
3512  {
3513  if (coupled_dof < first_dof_on_proc || coupled_dof >= end_dof_on_proc)
3514  {
3515  if (n_oz[local_dof] < n_dofs_not_on_proc)
3516  n_oz[local_dof]++;
3517  }
3518  else
3519  {
3520  if (n_nz[local_dof] < n_dofs_on_proc)
3521  n_nz[local_dof]++;
3522  }
3523  }
3524  }
3525  }
3526 }
3527 
3528 void
3530 {
3531  *_u_dot = u_dot;
3532 }
3533 
3534 void
3536 {
3537  *_u_dotdot = u_dotdot;
3538 }
3539 
3540 void
3542 {
3543  *_u_dot_old = u_dot_old;
3544 }
3545 
3546 void
3548 {
3549  *_u_dotdot_old = u_dotdot_old;
3550 }
3551 
3552 void
3553 NonlinearSystemBase::setPreconditioner(std::shared_ptr<MoosePreconditioner> pc)
3554 {
3555  if (_preconditioner.get() != nullptr)
3556  mooseError("More than one active Preconditioner detected");
3557 
3558  _preconditioner = pc;
3559 }
3560 
3561 MoosePreconditioner const *
3563 {
3564  return _preconditioner.get();
3565 }
3566 
3567 void
3569 {
3570  _increment_vec = &_sys.add_vector("u_increment", true, GHOSTED);
3571 }
3572 
3573 void
3575  const std::set<MooseVariable *> & damped_vars)
3576 {
3577  for (const auto & var : damped_vars)
3578  var->computeIncrementAtQps(*_increment_vec);
3579 }
3580 
3581 void
3583  const std::set<MooseVariable *> & damped_vars)
3584 {
3585  for (const auto & var : damped_vars)
3586  var->computeIncrementAtNode(*_increment_vec);
3587 }
3588 
3589 void
3590 NonlinearSystemBase::checkKernelCoverage(const std::set<SubdomainID> & mesh_subdomains) const
3591 {
3592  // Obtain all blocks and variables covered by all kernels
3593  std::set<SubdomainID> input_subdomains;
3594  std::set<std::string> kernel_variables;
3595 
3596  bool global_kernels_exist = false;
3597  global_kernels_exist |= _scalar_kernels.hasActiveObjects();
3598  global_kernels_exist |= _nodal_kernels.hasActiveObjects();
3599 
3600  _kernels.subdomainsCovered(input_subdomains, kernel_variables);
3601  _dg_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3602  _nodal_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3603  _scalar_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3604  _constraints.subdomainsCovered(input_subdomains, kernel_variables);
3605 
3606  if (_fe_problem.haveFV())
3607  {
3608  std::vector<FVElementalKernel *> fv_elemental_kernels;
3610  .query()
3611  .template condition<AttribSystem>("FVElementalKernel")
3612  .queryInto(fv_elemental_kernels);
3613 
3614  for (auto fv_kernel : fv_elemental_kernels)
3615  {
3616  if (fv_kernel->blockRestricted())
3617  for (auto block_id : fv_kernel->blockIDs())
3618  input_subdomains.insert(block_id);
3619  else
3620  global_kernels_exist = true;
3621  kernel_variables.insert(fv_kernel->variable().name());
3622 
3623  // Check for lagrange multiplier
3624  if (dynamic_cast<FVScalarLagrangeMultiplierConstraint *>(fv_kernel))
3625  kernel_variables.insert(dynamic_cast<FVScalarLagrangeMultiplierConstraint *>(fv_kernel)
3626  ->lambdaVariable()
3627  .name());
3628  }
3629 
3630  std::vector<FVFluxKernel *> fv_flux_kernels;
3632  .query()
3633  .template condition<AttribSystem>("FVFluxKernel")
3634  .queryInto(fv_flux_kernels);
3635 
3636  for (auto fv_kernel : fv_flux_kernels)
3637  {
3638  if (fv_kernel->blockRestricted())
3639  for (auto block_id : fv_kernel->blockIDs())
3640  input_subdomains.insert(block_id);
3641  else
3642  global_kernels_exist = true;
3643  kernel_variables.insert(fv_kernel->variable().name());
3644  }
3645 
3646  std::vector<FVInterfaceKernel *> fv_interface_kernels;
3648  .query()
3649  .template condition<AttribSystem>("FVInterfaceKernel")
3650  .queryInto(fv_interface_kernels);
3651 
3652  for (auto fvik : fv_interface_kernels)
3653  if (auto scalar_fvik = dynamic_cast<FVScalarLagrangeMultiplierInterface *>(fvik))
3654  kernel_variables.insert(scalar_fvik->lambdaVariable().name());
3655 
3656  std::vector<FVFluxBC *> fv_flux_bcs;
3658  .query()
3659  .template condition<AttribSystem>("FVFluxBC")
3660  .queryInto(fv_flux_bcs);
3661 
3662  for (auto fvbc : fv_flux_bcs)
3663  if (auto scalar_fvbc = dynamic_cast<FVBoundaryScalarLagrangeMultiplierConstraint *>(fvbc))
3664  kernel_variables.insert(scalar_fvbc->lambdaVariable().name());
3665  }
3666 
3667  // Check kernel coverage of subdomains (blocks) in your mesh
3668  if (!global_kernels_exist)
3669  {
3670  std::set<SubdomainID> difference;
3671  std::set_difference(mesh_subdomains.begin(),
3672  mesh_subdomains.end(),
3673  input_subdomains.begin(),
3674  input_subdomains.end(),
3675  std::inserter(difference, difference.end()));
3676 
3677  // there supposed to be no kernels on this lower-dimensional subdomain
3678  for (const auto & id : _mesh.interiorLowerDBlocks())
3679  difference.erase(id);
3680  for (const auto & id : _mesh.boundaryLowerDBlocks())
3681  difference.erase(id);
3682 
3683  if (!difference.empty())
3684  {
3685  std::vector<SubdomainID> difference_vec =
3686  std::vector<SubdomainID>(difference.begin(), difference.end());
3687  std::vector<SubdomainName> difference_names = _mesh.getSubdomainNames(difference_vec);
3688  std::stringstream missing_block_names;
3689  std::copy(difference_names.begin(),
3690  difference_names.end(),
3691  std::ostream_iterator<std::string>(missing_block_names, " "));
3692  std::stringstream missing_block_ids;
3693  std::copy(difference.begin(),
3694  difference.end(),
3695  std::ostream_iterator<unsigned int>(missing_block_ids, " "));
3696 
3697  mooseError("Each subdomain must contain at least one Kernel.\nThe following block(s) lack an "
3698  "active kernel: " +
3699  missing_block_names.str(),
3700  " (ids: ",
3701  missing_block_ids.str(),
3702  ")");
3703  }
3704  }
3705 
3706  // Check kernel use of variables
3707  std::set<VariableName> variables(getVariableNames().begin(), getVariableNames().end());
3708 
3709  std::set<VariableName> difference;
3710  std::set_difference(variables.begin(),
3711  variables.end(),
3712  kernel_variables.begin(),
3713  kernel_variables.end(),
3714  std::inserter(difference, difference.end()));
3715 
3716  // skip checks for varaibles defined on lower-dimensional subdomain
3717  std::set<VariableName> vars(difference);
3718  for (auto & var_name : vars)
3719  {
3720  auto blks = getSubdomainsForVar(var_name);
3721  for (const auto & id : blks)
3722  if (_mesh.interiorLowerDBlocks().count(id) > 0 || _mesh.boundaryLowerDBlocks().count(id) > 0)
3723  difference.erase(var_name);
3724  }
3725 
3726  if (!difference.empty())
3727  {
3728  std::stringstream missing_kernel_vars;
3729  std::copy(difference.begin(),
3730  difference.end(),
3731  std::ostream_iterator<std::string>(missing_kernel_vars, " "));
3732  mooseError("Each variable must be referenced by at least one active Kernel.\nThe following "
3733  "variable(s) lack an active kernel: " +
3734  missing_kernel_vars.str());
3735  }
3736 }
3737 
3738 bool
3740 {
3741  auto & time_kernels = _kernels.getVectorTagObjectWarehouse(timeVectorTag(), 0);
3742 
3743  return time_kernels.hasActiveObjects();
3744 }
3745 
3746 std::vector<std::string>
3748 {
3749  std::vector<std::string> variable_names;
3750  const auto & time_kernels = _kernels.getVectorTagObjectWarehouse(timeVectorTag(), 0);
3751  if (time_kernels.hasActiveObjects())
3752  for (const auto & kernel : time_kernels.getObjects())
3753  variable_names.push_back(kernel->variable().name());
3754 
3755  return variable_names;
3756 }
3757 
3758 bool
3760 {
3761  return _integrated_bcs.hasActiveBoundaryObjects(bnd_id, tid);
3762 }
3763 
3764 bool
3766 {
3767  return _interface_kernels.hasActiveBoundaryObjects(bnd_id, tid);
3768 }
3769 
3770 bool
3772  THREAD_ID /*tid*/) const
3773 {
3774  return _doing_dg;
3775 }
3776 
3777 bool
3779 {
3780  return _doing_dg;
3781 }
3782 
3783 void
3785 {
3788 }
3789 
3790 void
3792  const std::set<TagID> & vector_tags,
3793  const std::set<TagID> & matrix_tags)
3794 {
3795  parallel_object_only();
3796 
3797  try
3798  {
3799  for (auto & map_pr : _undisplaced_mortar_functors)
3800  map_pr.second(compute_type, vector_tags, matrix_tags);
3801 
3802  for (auto & map_pr : _displaced_mortar_functors)
3803  map_pr.second(compute_type, vector_tags, matrix_tags);
3804  }
3805  catch (MetaPhysicL::LogicError &)
3806  {
3807  mooseError(
3808  "We caught a MetaPhysicL error in NonlinearSystemBase::mortarConstraints. This is very "
3809  "likely due to AD not having a sufficiently large derivative container size. Please run "
3810  "MOOSE configure with the '--with-derivative-size=<n>' option");
3811  }
3812 }
3813 
3814 void
3816 {
3817  if (_auto_scaling_initd)
3818  return;
3819 
3820  // Want the libMesh count of variables, not MOOSE, e.g. I don't care about array variable counts
3821  const auto n_vars = system().n_vars();
3822 
3823  if (_scaling_group_variables.empty())
3824  {
3825  _var_to_group_var.reserve(n_vars);
3827 
3828  for (const auto var_number : make_range(n_vars))
3829  _var_to_group_var.emplace(var_number, var_number);
3830  }
3831  else
3832  {
3833  std::set<unsigned int> var_numbers, var_numbers_covered, var_numbers_not_covered;
3834  for (const auto var_number : make_range(n_vars))
3835  var_numbers.insert(var_number);
3836 
3838 
3839  for (const auto group_index : index_range(_scaling_group_variables))
3840  for (const auto & var_name : _scaling_group_variables[group_index])
3841  {
3842  if (!hasVariable(var_name) && !hasScalarVariable(var_name))
3843  mooseError("'",
3844  var_name,
3845  "', provided to the 'scaling_group_variables' parameter, does not exist in "
3846  "the nonlinear system.");
3847 
3848  const MooseVariableBase & var =
3849  hasVariable(var_name)
3850  ? static_cast<MooseVariableBase &>(getVariable(0, var_name))
3851  : static_cast<MooseVariableBase &>(getScalarVariable(0, var_name));
3852  auto map_pair = _var_to_group_var.emplace(var.number(), group_index);
3853  if (!map_pair.second)
3854  mooseError("Variable ", var_name, " is contained in multiple scaling grouplings");
3855  var_numbers_covered.insert(var.number());
3856  }
3857 
3858  std::set_difference(var_numbers.begin(),
3859  var_numbers.end(),
3860  var_numbers_covered.begin(),
3861  var_numbers_covered.end(),
3862  std::inserter(var_numbers_not_covered, var_numbers_not_covered.begin()));
3863 
3864  _num_scaling_groups = _scaling_group_variables.size() + var_numbers_not_covered.size();
3865 
3866  auto index = static_cast<unsigned int>(_scaling_group_variables.size());
3867  for (auto var_number : var_numbers_not_covered)
3868  _var_to_group_var.emplace(var_number, index++);
3869  }
3870 
3871  _variable_autoscaled.resize(n_vars, true);
3872  const auto & number_to_var_map = _vars[0].numberToVariableMap();
3873 
3875  for (const auto i : index_range(_variable_autoscaled))
3876  if (std::find(_ignore_variables_for_autoscaling.begin(),
3878  libmesh_map_find(number_to_var_map, i)->name()) !=
3880  _variable_autoscaled[i] = false;
3881 
3882  _auto_scaling_initd = true;
3883 }
3884 
3885 bool
3887 {
3889  return true;
3890 
3891  _console << "\nPerforming automatic scaling calculation\n" << std::endl;
3892 
3893  TIME_SECTION("computeScaling", 3, "Computing Automatic Scaling");
3894 
3895  // It's funny but we need to assemble our vector of scaling factors here otherwise we will be
3896  // applying scaling factors of 0 during Assembly of our scaling Jacobian
3898 
3899  // container for repeated access of element global dof indices
3900  std::vector<dof_id_type> dof_indices;
3901 
3902  if (!_auto_scaling_initd)
3903  setupScalingData();
3904 
3905  std::vector<Real> inverse_scaling_factors(_num_scaling_groups, 0);
3906  std::vector<Real> resid_inverse_scaling_factors(_num_scaling_groups, 0);
3907  std::vector<Real> jac_inverse_scaling_factors(_num_scaling_groups, 0);
3908  auto & dof_map = dofMap();
3909 
3910  // what types of scaling do we want?
3911  bool jac_scaling = _resid_vs_jac_scaling_param < 1. - TOLERANCE;
3912  bool resid_scaling = _resid_vs_jac_scaling_param > TOLERANCE;
3913 
3914  const NumericVector<Number> & scaling_residual = RHS();
3915 
3916  if (jac_scaling)
3917  {
3918  // if (!_auto_scaling_initd)
3919  // We need to reinit this when the number of dofs changes
3920  // but there is no good way to track that
3921  // In theory, it is the job of libmesh system to track this,
3922  // but this special matrix is not owned by libMesh system
3923  // Let us reinit eveytime since it is not expensive
3924  {
3925  auto init_vector = NumericVector<Number>::build(this->comm());
3926  init_vector->init(system().n_dofs(), system().n_local_dofs(), /*fast=*/false, PARALLEL);
3927 
3928  _scaling_matrix->clear();
3929  _scaling_matrix->init(*init_vector);
3930  }
3931 
3933  // Dispatch to derived classes to ensure that we use the correct matrix tag
3936  }
3937 
3938  if (resid_scaling)
3939  {
3942  // Dispatch to derived classes to ensure that we use the correct vector tag
3946  }
3947 
3948  // Did something bad happen during residual/Jacobian scaling computation?
3950  return false;
3951 
3952  auto examine_dof_indices = [this,
3953  jac_scaling,
3954  resid_scaling,
3955  &dof_map,
3956  &jac_inverse_scaling_factors,
3957  &resid_inverse_scaling_factors,
3958  &scaling_residual](const auto & dof_indices, const auto var_number)
3959  {
3960  for (auto dof_index : dof_indices)
3961  if (dof_map.local_index(dof_index))
3962  {
3963  if (jac_scaling)
3964  {
3965  // For now we will use the diagonal for determining scaling
3966  auto mat_value = (*_scaling_matrix)(dof_index, dof_index);
3967  auto & factor = jac_inverse_scaling_factors[_var_to_group_var[var_number]];
3968  factor = std::max(factor, std::abs(mat_value));
3969  }
3970  if (resid_scaling)
3971  {
3972  auto vec_value = scaling_residual(dof_index);
3973  auto & factor = resid_inverse_scaling_factors[_var_to_group_var[var_number]];
3974  factor = std::max(factor, std::abs(vec_value));
3975  }
3976  }
3977  };
3978 
3979  // Compute our scaling factors for the spatial field variables
3980  for (const auto & elem : _fe_problem.getCurrentAlgebraicElementRange())
3981  for (const auto i : make_range(system().n_vars()))
3983  {
3984  dof_map.dof_indices(elem, dof_indices, i);
3985  examine_dof_indices(dof_indices, i);
3986  }
3987 
3988  for (const auto i : make_range(system().n_vars()))
3989  if (_variable_autoscaled[i] && system().variable_type(i).family == SCALAR)
3990  {
3991  dof_map.SCALAR_dof_indices(dof_indices, i);
3992  examine_dof_indices(dof_indices, i);
3993  }
3994 
3995  if (resid_scaling)
3996  _communicator.max(resid_inverse_scaling_factors);
3997  if (jac_scaling)
3998  _communicator.max(jac_inverse_scaling_factors);
3999 
4000  if (jac_scaling && resid_scaling)
4001  for (MooseIndex(inverse_scaling_factors) i = 0; i < inverse_scaling_factors.size(); ++i)
4002  {
4003  // Be careful not to take log(0)
4004  if (!resid_inverse_scaling_factors[i])
4005  {
4006  if (!jac_inverse_scaling_factors[i])
4007  inverse_scaling_factors[i] = 1;
4008  else
4009  inverse_scaling_factors[i] = jac_inverse_scaling_factors[i];
4010  }
4011  else if (!jac_inverse_scaling_factors[i])
4012  // We know the resid is not zero
4013  inverse_scaling_factors[i] = resid_inverse_scaling_factors[i];
4014  else
4015  inverse_scaling_factors[i] =
4016  std::exp(_resid_vs_jac_scaling_param * std::log(resid_inverse_scaling_factors[i]) +
4017  (1 - _resid_vs_jac_scaling_param) * std::log(jac_inverse_scaling_factors[i]));
4018  }
4019  else if (jac_scaling)
4020  inverse_scaling_factors = jac_inverse_scaling_factors;
4021  else if (resid_scaling)
4022  inverse_scaling_factors = resid_inverse_scaling_factors;
4023  else
4024  mooseError("We shouldn't be calling this routine if we're not performing any scaling");
4025 
4026  // We have to make sure that our scaling values are not zero
4027  for (auto & scaling_factor : inverse_scaling_factors)
4028  if (scaling_factor == 0)
4029  scaling_factor = 1;
4030 
4031  // Now flatten the group scaling factors to the individual variable scaling factors
4032  std::vector<Real> flattened_inverse_scaling_factors(system().n_vars());
4033  for (const auto i : index_range(flattened_inverse_scaling_factors))
4034  flattened_inverse_scaling_factors[i] = inverse_scaling_factors[_var_to_group_var[i]];
4035 
4036  // Now set the scaling factors for the variables
4037  applyScalingFactors(flattened_inverse_scaling_factors);
4039  displaced_problem->systemBaseNonlinear(number()).applyScalingFactors(
4040  flattened_inverse_scaling_factors);
4041 
4042  _computed_scaling = true;
4043  return true;
4044 }
4045 
4046 void
4048 {
4049  if (!hasVector("scaling_factors"))
4050  // No variables have indicated they need scaling
4051  return;
4052 
4053  auto & scaling_vector = getVector("scaling_factors");
4054 
4055  const auto & lm_mesh = _mesh.getMesh();
4056  const auto & dof_map = dofMap();
4057 
4058  const auto & field_variables = _vars[0].fieldVariables();
4059  const auto & scalar_variables = _vars[0].scalars();
4060 
4061  std::vector<dof_id_type> dof_indices;
4062 
4063  for (const Elem * const elem :
4064  as_range(lm_mesh.active_local_elements_begin(), lm_mesh.active_local_elements_end()))
4065  for (const auto * const field_var : field_variables)
4066  {
4067  const auto & factors = field_var->arrayScalingFactor();
4068  for (const auto i : make_range(field_var->count()))
4069  {
4070  dof_map.dof_indices(elem, dof_indices, field_var->number() + i);
4071  for (const auto dof : dof_indices)
4072  scaling_vector.set(dof, factors[i]);
4073  }
4074  }
4075 
4076  for (const auto * const scalar_var : scalar_variables)
4077  {
4078  mooseAssert(scalar_var->count() == 1,
4079  "Scalar variables should always have only one component.");
4080  dof_map.SCALAR_dof_indices(dof_indices, scalar_var->number());
4081  for (const auto dof : dof_indices)
4082  scaling_vector.set(dof, scalar_var->scalingFactor());
4083  }
4084 
4085  // Parallel assemble
4086  scaling_vector.close();
4087 
4088  if (auto * displaced_problem = _fe_problem.getDisplacedProblem().get())
4089  // copy into the corresponding displaced system vector because they should be the exact same
4090  displaced_problem->systemBaseNonlinear(number()).getVector("scaling_factors") = scaling_vector;
4091 }
4092 
4093 bool
4095 {
4096  // Clear the iteration counters
4097  _current_l_its.clear();
4098  _current_nl_its = 0;
4099 
4100  // Initialize the solution vector using a predictor and known values from nodal bcs
4102 
4103  // Now that the initial solution has ben set, potentially perform a residual/Jacobian evaluation
4104  // to determine variable scaling factors
4105  if (_automatic_scaling)
4106  {
4107  const bool scaling_succeeded = computeScaling();
4108  if (!scaling_succeeded)
4109  return false;
4110  }
4111 
4112  // We do not know a priori what variable a global degree of freedom corresponds to, so we need a
4113  // map from global dof to scaling factor. We just use a ghosted NumericVector for that mapping
4115 
4116  return true;
4117 }
4118 
4119 void
4121 {
4122  if (matrixFromColoring())
4123  LibmeshPetscCall(MatFDColoringDestroy(&_fdcoloring));
4124 }
std::string name(const ElemQuality q)
std::vector< std::shared_ptr< TimeIntegrator > > _time_integrators
Time integrator.
Definition: SystemBase.h:1041
virtual void setSolutionUDotDotOld(const NumericVector< Number > &u_dotdot_old)
virtual void reinitNeighborPhys(const Elem *neighbor, unsigned int neighbor_side, const std::vector< Point > &physical_points, const THREAD_ID tid)=0
virtual void residualSetup(THREAD_ID tid=0) const
NumericVector< Number > & getResidualTimeVector()
Return a numeric vector that is associated with the time tag.
void setActiveMaterialProperties(const std::unordered_set< unsigned int > &mat_prop_ids, const THREAD_ID tid)
Record and set the material properties required by the current computing thread.
MooseObjectTagWarehouse< NodalKernelBase > _nodal_kernels
NodalKernels for each thread.
NumericVector< Number > * _Re_time
residual vector for time contributions
void computeJacobianBlocks(std::vector< JacobianBlock *> &blocks)
Computes several Jacobian blocks simultaneously, summing their contributions into smaller preconditio...
virtual void insert(const T *v, const std::vector< numeric_index_type > &dof_indices)
MetaPhysicL::DualNumber< V, D, asd > abs(const MetaPhysicL::DualNumber< V, D, asd > &a)
Definition: EigenADReal.h:42
TagID _Re_time_tag
Tag for time contribution residual.
void allgather(const T &send_data, std::vector< T, A > &recv_data) const
unsigned int size(THREAD_ID tid=0) const
Return how many kernels we store in the current warehouse.
std::map< std::pair< BoundaryID, BoundaryID >, PenetrationLocator * > _penetration_locators
dof_id_type end_dof(const processor_id_type proc) const
bool empty() const
virtual void addKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a kernel.
virtual void setSolutionUDotDot(const NumericVector< Number > &udotdot)
Set transient term used by residual and Jacobian evaluation.
dof_id_type dof_number(const unsigned int s, const unsigned int var, const unsigned int comp) const
A kernel for hybridized finite element formulations.
Definition: HDGKernel.h:17
void reinitIncrementAtNodeForDampers(THREAD_ID tid, const std::set< MooseVariable *> &damped_vars)
Compute the incremental change in variables at nodes for dampers.
void overwriteNodeFace(NumericVector< Number > &soln)
Called from explicit time stepping to overwrite boundary positions (explicit dynamics).
Base class for deriving general dampers.
Definition: GeneralDamper.h:21
bool _use_pre_smo_residual
Whether to use the pre-SMO initial residual in the relative convergence check.
MoosePreconditioner const * getPreconditioner() const
virtual const char * what() const
Get out the error message.
std::vector< std::pair< MooseVariableFEBase *, MooseVariableFEBase * > > & couplingEntries(const THREAD_ID tid, const unsigned int nl_sys_num)
void findImplicitGeometricCouplingEntries(GeometricSearchData &geom_search_data, std::unordered_map< dof_id_type, std::vector< dof_id_type >> &graph)
Finds the implicit sparsity graph between geometrically related dofs.
void setupDampers()
Setup damping stuff (called before we actually start)
Real _initial_residual
The initial (i.e., 0th nonlinear iteration) residual, see setPreSMOResidual for a detailed explanatio...
unsigned int n_threads()
std::vector< bool > _variable_autoscaled
Container to hold flag if variable is to participate in autoscaling.
void zeroVectorForResidual(const std::string &vector_name)
virtual void cacheResidualNeighbor(const THREAD_ID tid) override
bool identifyVariableGroupsInNL() const
Whether to identify variable groups in nonlinear systems.
void zeroTaggedVectors(const std::set< TagID > &tags)
Zero all vectors for given tags.
Definition: SystemBase.C:685
const std::set< SubdomainID > & interiorLowerDBlocks() const
Definition: MooseMesh.h:1403
Base class for split-based preconditioners.
Definition: Split.h:25
bool hasActiveBlockObjects(THREAD_ID tid=0) const
std::shared_ptr< DisplacedProblem > displaced_problem
bool hasVector(const std::string &tag_name) const
Check if the named vector exists in the system.
Definition: SystemBase.C:907
virtual void checkExceptionAndStopSolve(bool print_message=true)
Check to see if an exception has occurred on any processor and, if possible, force the solve to fail...
unsigned int n_comp(const unsigned int s, const unsigned int var) const
void applyScalingFactors(const std::vector< Real > &inverse_scaling_factors)
Applies scaling factors to the system&#39;s variables.
Definition: SystemBase.C:1462
void computeNodalBCsResidualAndJacobian()
compute the residual and Jacobian for nodal boundary conditions
bool _debugging_residuals
true if debugging residuals
SCALAR
BoundaryID _secondary_boundary
NumericVector< Number > * _Re_non_time
residual vector for non-time contributions
unsigned int TagID
Definition: MooseTypes.h:210
Real computeDamping(const NumericVector< Number > &solution, const NumericVector< Number > &update)
Compute damping.
virtual void reinitNode(const Node *node, const THREAD_ID tid) override
virtual void setPreviousNewtonSolution(const NumericVector< Number > &soln)
virtual void predictorCleanup(NumericVector< libMesh::Number > &ghosted_solution)
Perform cleanup tasks after application of predictor to solution vector.
bool _assemble_constraints_separately
Whether or not to assemble the residual and Jacobian after the application of each constraint...
virtual Elem * elemPtr(const dof_id_type i)
Definition: MooseMesh.C:3108
auto exp(const T &)
TagID systemMatrixTag() const override
Return the Matrix Tag ID for System.
void petscSetupDM(NonlinearSystemBase &nl, const std::string &dm_name)
Setup the PETSc DM object.
Definition: PetscSupport.C:190
MPI_Info info
NumericVector< Number > & solution()
Definition: SystemBase.h:195
MooseObjectTagWarehouse< DGKernelBase > _dg_kernels
virtual bool haveFV() const override
returns true if this problem includes/needs finite volume functionality.
char ** blocks
face_info_iterator ownedFaceInfoBegin()
Iterators to owned faceInfo objects.
Definition: MooseMesh.C:1507
PARALLEL
void reinitIncrementAtQpsForDampers(THREAD_ID tid, const std::set< MooseVariable *> &damped_vars)
Compute the incremental change in variables at QPs for dampers.
void computeResidualAndJacobianInternal(const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Compute residual and Jacobian from contributions not related to constraints, such as nodal boundary c...
void addImplicitGeometricCouplingEntriesToJacobian(bool add=true)
If called with true this will add entries into the jacobian to link together degrees of freedom that ...
void dof_indices(const Elem *const elem, std::vector< dof_id_type > &di) const
bool haveFieldSplitPreconditioner() const
bool areCoupled(const unsigned int ivar, const unsigned int jvar, const unsigned int nl_sys_num) const
void mooseError(Args &&... args)
Emit an error message with the given stringified, concatenated args and terminate the application...
Definition: MooseError.h:302
unsigned int number() const
Get variable number coming from libMesh.
virtual void initialSetup() override
Setup Functions.
Data structure used to hold penetration information.
std::vector< std::pair< R1, R2 > > get(const std::string &param1, const std::string &param2) const
Combine two vector parameters into a single vector of pairs.
const std::vector< std::shared_ptr< NodalConstraint > > & getActiveNodalConstraints() const
Access methods for active objects.
NumericVector< Number > * _u_dot_old
old solution vector for u^dot
Definition: SystemBase.h:1005
bool _has_nodalbc_diag_save_in
If there is a nodal BC having diag_save_in.
virtual void reinitScalars(const THREAD_ID tid, bool reinit_for_derivative_reordering=false) override
fills the VariableValue arrays for scalar variables from the solution vector
Base class for automatic differentiation Dirichlet BCs.
void add(std::shared_ptr< MooseObject > obj)
add adds a new object to the warehouse and stores attributes/metadata about it for running queries/fi...
Definition: TheWarehouse.C:116
virtual void getDiracElements(std::set< const Elem *> &elems) override
Fills "elems" with the elements that should be looped over for Dirac Kernels.
void setupDM()
Setup the PETSc DM object (when appropriate)
void setCurrentlyComputingResidual(bool currently_computing_residual) final
Set whether or not the problem is in the process of computing the residual.
void checkKernelCoverage(const std::set< SubdomainID > &mesh_subdomains) const
void addDGKernel(std::string dg_kernel_name, const std::string &name, InputParameters &parameters)
Adds a DG kernel.
void computeJacobian(libMesh::SparseMatrix< Number > &jacobian, const std::set< TagID > &tags)
Associate jacobian to systemMatrixTag, and then form a matrix for all the tags.
char ** vars
virtual void reinitNeighborPhys(const Elem *neighbor, unsigned int neighbor_side, const std::vector< Point > &physical_points, const THREAD_ID tid) override
virtual TagID addVectorTag(const TagName &tag_name, const Moose::VectorTagType type=Moose::VECTOR_TAG_RESIDUAL)
Create a Tag.
Definition: SubProblem.C:92
std::vector< T * > & queryInto(std::vector< T *> &results, Args &&... args)
queryInto executes the query and stores the results in the given vector.
Definition: TheWarehouse.h:311
void getNodeDofs(dof_id_type node_id, std::vector< dof_id_type > &dofs)
Base class for all Constraint types.
Definition: Constraint.h:19
std::set< TagID > _nl_vector_tags
Vector tags to temporarily store all tags associated with the current system.
std::vector< std::string > _ignore_variables_for_autoscaling
A container for variables that do not partipate in autoscaling.
void residualSetup() override
Base boundary condition of a Dirichlet type.
virtual void associateVectorToTag(NumericVector< Number > &vec, TagID tag)
Associate a vector for a given tag.
Definition: SystemBase.C:964
virtual void reinitNodes(const std::vector< dof_id_type > &nodes, const THREAD_ID tid) override
MooseObjectTagWarehouse< NodalBCBase > _nodal_bcs
virtual void addJacobianOffDiagScalar(unsigned int ivar, const THREAD_ID tid=0)
virtual void setException(const std::string &message)
Set an exception, which is stored at this point by toggling a member variable in this class...
const std::vector< std::shared_ptr< NodeFaceConstraint > > & getActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
void setDecomposition(const std::vector< std::string > &decomposition)
If called with a single string, it is used as the name of a the top-level decomposition split...
virtual void customSetup(const ExecFlagType &exec_type, THREAD_ID tid=0) const
bool needSubdomainMaterialOnSide(SubdomainID subdomain_id, THREAD_ID tid) const
Indicates whether this system needs material properties on internal sides.
Factory & _factory
Definition: SystemBase.h:983
MooseObjectWarehouse< T > & getVectorTagsObjectWarehouse(const std::set< TagID > &tags, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object at least has one of the given vector ta...
virtual const Node * queryNodePtr(const dof_id_type i) const
Definition: MooseMesh.C:857
const ElementPairInfo & getElemPairInfo(std::pair< const Elem *, const Elem *> elem_pair) const
MooseObjectWarehouseBase< Split > _splits
Decomposition splits.
The main MOOSE class responsible for handling user-defined parameters in almost every MOOSE system...
NumericVector< Number > * _u_dotdot
solution vector for u^dotdot
Definition: SystemBase.h:1002
bool hasActiveMortarConstraints(const std::pair< BoundaryID, BoundaryID > &mortar_interface_key, bool displaced) const
const Parallel::Communicator & comm() const
NumericVector< Number > & add_vector(std::string_view vec_name, const bool projections=true, const ParallelType type=PARALLEL)
Solving a linear problem.
Definition: MooseTypes.h:848
bool _has_nodalbc_save_in
If there is a nodal BC having save_in.
bool hasActiveNodalConstraints() const
Deterimine if active objects exist.
virtual bool hasMatrix(TagID tag) const
Check if the tagged matrix exists in the system.
Definition: SystemBase.h:351
MooseObjectWarehouse< NodalDamper > _nodal_dampers
Nodal Dampers for each thread.
std::unique_ptr< T_DEST, T_DELETER > dynamic_pointer_cast(std::unique_ptr< T_SRC, T_DELETER > &src)
These are reworked from https://stackoverflow.com/a/11003103.
virtual libMesh::NonlinearSolver< Number > * nonlinearSolver()=0
dof_id_type n_dofs(const unsigned int vn) const
Real preSMOResidual() const
The pre-SMO residual.
std::unique_ptr< libMesh::DiagonalMatrix< Number > > _scaling_matrix
A diagonal matrix used for computing scaling.
virtual void setSolutionUDotOld(const NumericVector< Number > &u_dot_old)
virtual void associateMatrixToTag(libMesh::SparseMatrix< Number > &matrix, TagID tag)
Associate a matrix to a tag.
Definition: SystemBase.C:1059
bool hasDiagSaveIn() const
Weather or not the nonlinear system has diagonal Jacobian save-ins.
This class provides an interface for common operations on field variables of both FE and FV types wit...
const Parallel::Communicator & _communicator
void updateActive(THREAD_ID tid=0) override
Update the various active lists.
Real initialResidual() const
The initial residual.
The following methods are specializations for using the libMesh::Parallel::packed_range_* routines fo...
bool needInterfaceMaterialOnSide(BoundaryID bnd_id, THREAD_ID tid) const
Indicated whether this system needs material properties on interfaces.
const libMesh::ConstElemRange & getCurrentAlgebraicElementRange()
These are the element and nodes that contribute to the jacobian and residual for this local processor...
std::size_t _num_scaling_groups
The number of scaling groups.
const libMesh::ConstNodeRange & getCurrentAlgebraicNodeRange()
void computingScalingJacobian(bool computing_scaling_jacobian)
Setter for whether we&#39;re computing the scaling jacobian.
dof_id_type n_local_dofs(const unsigned int vn) const
virtual void updateActive(THREAD_ID tid=0) override
Update the active status of Kernels.
bool has_dofs(const unsigned int s=libMesh::invalid_uint) const
std::map< dof_id_type, PenetrationInfo * > & _penetration_info
Data structure of nodes and their associated penetration information.
bool hasActiveNodeElemConstraints(SubdomainID secondary_id, SubdomainID primary_id, bool displaced) const
Real _pre_smo_residual
The pre-SMO residual, see setPreSMOResidual for a detailed explanation.
bool hasDampers()
Whether or not this system has dampers.
const std::vector< std::shared_ptr< NodeElemConstraintBase > > & getActiveNodeElemConstraints(SubdomainID secondary_id, SubdomainID primary_id, bool displaced) const
bool _compute_scaling_once
Whether the scaling factors should only be computed once at the beginning of the simulation through a...
void computeResidualTags(const std::set< TagID > &tags)
Form multiple tag-associated residual vectors for all the given tags.
virtual void cacheJacobianNeighbor(const THREAD_ID tid) override
Specialization of SubProblem for solving nonlinear equations plus auxiliary equations.
bool _has_save_in
If there is any Kernel or IntegratedBC having save_in.
const InputParameters & parameters()
Get the parameters of the object.
Definition: MooseApp.h:150
virtual const Node & nodeRef(const dof_id_type i) const
Definition: MooseMesh.C:831
TagID _Ke_system_tag
Tag for system contribution Jacobian.
virtual void setResidual(NumericVector< libMesh::Number > &residual, const THREAD_ID tid) override
bool _have_decomposition
Whether or not the system can be decomposed into splits.
dof_id_type n_dofs() const
virtual void disassociateMatrixFromTag(libMesh::SparseMatrix< Number > &matrix, TagID tag)
Disassociate a matrix from a tag.
Definition: SystemBase.C:1071
Scope guard for starting and stopping Floating Point Exception Trapping.
virtual void zero()=0
auto max(const L &left, const R &right)
Serves as a base class for DGKernel and ADDGKernel.
Definition: DGKernelBase.h:32
const Variable & variable(const unsigned int c) const override
void constraintResiduals(NumericVector< Number > &residual, bool displaced)
Add residual contributions from Constraints.
Base class for MOOSE preconditioners.
NumericVector< Number > & addVector(const std::string &vector_name, const bool project, const libMesh::ParallelType type)
Adds a solution length vector to the system.
virtual GeometricSearchData & geomSearchData() override
std::unordered_map< std::pair< BoundaryID, BoundaryID >, ComputeMortarFunctor > _undisplaced_mortar_functors
Functors for computing undisplaced mortar constraints.
Specialization for filling multiple "small" preconditioning matrices simulatenously.
virtual bool matrixFromColoring() const
Whether a system matrix is formed from coloring.
Definition: SolverSystem.h:102
void update()
Update the system (doing libMesh magic)
Definition: SystemBase.C:1245
virtual Assembly & assembly(const THREAD_ID tid, const unsigned int sys_num) override
void addScalarKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a scalar kernel.
void addBoundaryCondition(const std::string &bc_name, const std::string &name, InputParameters &parameters)
Adds a boundary condition.
bool hasActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
void computeResidual(NumericVector< Number > &residual, TagID tag_id)
Form a residual vector for a given tag.
virtual void addCachedResidualDirectly(NumericVector< libMesh::Number > &residual, const THREAD_ID tid)
Allows for all the residual contributions that are currently cached to be added directly into the vec...
virtual unsigned int nVariables() const
Get the number of variables in this system.
Definition: SystemBase.C:874
EXTERN_C_BEGIN PetscErrorCode DMCreate_Moose(DM)
bool hasActiveBoundaryObjects(THREAD_ID tid=0) const
bool _need_residual_ghosted
Whether or not a ghosted copy of the residual needs to be made.
virtual const std::string & name() const
Definition: SystemBase.C:1330
virtual void jacobianSetup()
Definition: SystemBase.C:1573
virtual bool containsTimeKernel() override
If the system has a kernel that corresponds to a time derivative.
void onTimestepBegin()
Called at the beginning of the time step.
const ConstBndNodeRange & getCurrentAlgebraicBndNodeRange()
std::vector< dof_id_type, Threads::scalable_allocator< dof_id_type > > Row
MooseObjectTagWarehouse< DiracKernelBase > _dirac_kernels
Dirac Kernel storage for each thread.
void closeTaggedMatrices(const std::set< TagID > &tags)
Close all matrices associated the tags.
Definition: SystemBase.C:1043
virtual void deactiveAllMatrixTags()
Make matrices inactive.
Definition: SystemBase.C:1126
TagID _Re_non_time_tag
Tag for non-time contribution residual.
std::set< TagID > _nl_matrix_tags
Matrix tags to temporarily store all tags associated with the current system.
void solutionInvalidAccumulation()
Pass the number of solution invalid occurrences from current iteration to cumulative counters...
void set_basic_system_only()
std::map< std::pair< BoundaryID, BoundaryID >, NearestNodeLocator * > _nearest_node_locators
void setPredictor(std::shared_ptr< Predictor > predictor)
Real _resid_vs_jac_scaling_param
The param that indicates the weighting of the residual vs the Jacobian in determining variable scalin...
bool _auto_scaling_initd
Whether we&#39;ve initialized the automatic scaling data structures.
virtual void computeScalingResidual()=0
Compute a "residual" for automatic scaling purposes.
bool _doing_dg
true if DG is active (optimization reasons)
virtual libMesh::DofMap & dofMap()
Gets writeable reference to the dof map.
Definition: SystemBase.C:1165
void syncIteration()
Sync iteration counts to main processor.
unsigned int number() const
void computeResidualAndJacobianTags(const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Form possibly multiple tag-associated vectors and matrices.
SERIAL
bool needBoundaryMaterialOnSide(BoundaryID bnd_id, THREAD_ID tid) const
Indicated whether this system needs material properties on boundaries.
MooseObjectWarehouse< DirichletBCBase > _preset_nodal_bcs
std::unordered_map< unsigned int, unsigned int > _var_to_group_var
A map from variable index to group variable index and it&#39;s associated (inverse) scaling factor...
const std::vector< std::shared_ptr< T > > & getActiveObjects(THREAD_ID tid=0) const
Retrieve complete vector to the active all/block/boundary restricted objects for a given thread...
std::vector< unsigned int > _current_l_its
virtual void zero()=0
virtual std::unique_ptr< Base > create()=0
void setCurrentNonlinearSystem(const unsigned int nl_sys_num)
std::shared_ptr< T > getActiveObject(const std::string &name, THREAD_ID tid=0) const
This is the common base class for the three main kernel types implemented in MOOSE, Kernel, VectorKernel and ArrayKernel.
Definition: KernelBase.h:23
dof_id_type id() const
std::vector< dof_id_type > _secondary_nodes
MeshBase & getMesh()
Accessor for the underlying libMesh Mesh object.
Definition: MooseMesh.C:3443
void min(const T &r, T &o, Request &req) const
void computeDiracContributions(const std::set< TagID > &tags, bool is_jacobian)
void updateActive(THREAD_ID tid)
Update active objects of Warehouses owned by NonlinearSystemBase.
TheWarehouse & theWarehouse() const
unsigned int n_vars
void reinitMaterialsNeighbor(SubdomainID blk_id, const THREAD_ID tid, bool swap_stateful=true, const std::deque< MaterialBase *> *reinit_mats=nullptr)
reinit materials on the neighboring element face
const ElementPairList & getElemPairs() const
std::unordered_map< std::pair< BoundaryID, BoundaryID >, ComputeMortarFunctor > _displaced_mortar_functors
Functors for computing displaced mortar constraints.
GHOSTED
boundary_id_type BoundaryID
Real referenceResidual() const
The reference residual used in relative convergence check.
void addSplit(const std::string &split_name, const std::string &name, InputParameters &parameters)
Adds a split.
bool _automatic_scaling
Whether to automatically scale the variables.
Definition: SystemBase.h:1047
void computeJacobianTags(const std::set< TagID > &tags)
Computes multiple (tag associated) Jacobian matricese.
SolutionInvalidity & solutionInvalidity()
Get the SolutionInvalidity for this app.
Definition: MooseApp.h:167
std::shared_ptr< MoosePreconditioner > _preconditioner
Preconditioner.
virtual void timestepSetup(THREAD_ID tid=0) const
NonlinearSystemBase & currentNonlinearSystem()
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
void computingScalingResidual(bool computing_scaling_residual)
Setter for whether we&#39;re computing the scaling residual.
SubProblem & subproblem()
Definition: SystemBase.h:101
std::vector< std::string > _vecs_to_zero_for_residual
vectors that will be zeroed before a residual computation
virtual TagID addMatrixTag(TagName tag_name)
Create a Tag.
Definition: SubProblem.C:311
std::shared_ptr< Split > getSplit(const std::string &name)
Retrieves a split by name.
std::unique_ptr< NumericVector< Number > > solution
SubProblem & _subproblem
The subproblem for whom this class holds variable data, etc; this can either be the governing finite ...
Definition: SystemBase.h:977
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Populates a set of covered subdomains and the associated variable names.
void addImplicitGeometricCouplingEntries(GeometricSearchData &geom_search_data)
Adds entries to the Jacobian in the correct positions for couplings coming from dofs being coupled th...
virtual void addHDGKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a hybridized discontinuous Galerkin (HDG) kernel.
virtual void zero_rows(std::vector< numeric_index_type > &rows, T diag_value=0.0)
MooseObjectTagWarehouse< KernelBase > _kernels
MooseObjectWarehouse< T > & getMatrixTagObjectWarehouse(TagID tag_id, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has the given matrix tag...
bool shouldEvaluatePreSMOResidual() const
We offer the option to check convergence against the pre-SMO residual.
std::vector< VectorTag > getVectorTags(const std::set< TagID > &tag_ids) const
Definition: SubProblem.C:172
Moose::SolveType _type
Definition: SolverParams.h:19
Base class for deriving nodal dampers.
Definition: NodalDamper.h:27
virtual void disassociateVectorFromTag(NumericVector< Number > &vec, TagID tag)
Disassociate a given vector from a given tag.
virtual void cacheResidual(const THREAD_ID tid) override
void mooseDeprecated(Args &&... args)
Emit a deprecated code/feature message with the given stringified, concatenated args.
Definition: MooseError.h:353
virtual void computeScalingJacobian()=0
Compute a "Jacobian" for automatic scaling purposes.
bool errorOnJacobianNonzeroReallocation() const
Will return True if the user wants to get an error when a nonzero is reallocated in the Jacobian by P...
This is the ElementPairLocator class.
This is the ElementPairInfo class.
std::map< BoundaryID, std::shared_ptr< ElementPairLocator > > _element_pair_locators
Moose::CouplingType coupling() const
virtual void activeAllMatrixTags()
Make all exsiting matrices ative.
Definition: SystemBase.C:1137
unsigned int number() const
Gets the number of this system.
Definition: SystemBase.C:1159
void setupScalingData()
Setup group scaling containers.
virtual GeometricSearchData & geomSearchData()=0
const bool & usePreSMOResidual() const
Whether we are using pre-SMO residual in relative convergence checks.
MooseObjectTagWarehouse< HDGKernel > _hybridized_kernels
const std::set< SubdomainID > & boundaryLowerDBlocks() const
Definition: MooseMesh.h:1407
std::vector< SubdomainName > getSubdomainNames(const std::vector< SubdomainID > &subdomain_ids) const
Get the associated subdomainNames for the subdomain ids that are passed in.
Definition: MooseMesh.C:1764
auto log(const T &)
AuxiliarySystem & getAuxiliarySystem()
virtual void initialSetup(THREAD_ID tid=0) const
Convenience methods for calling object setup methods.
virtual void updateGeomSearch(GeometricSearchData::GeometricSearchType type=GeometricSearchData::ALL) override
void closeTaggedVectors(const std::set< TagID > &tags)
Close all vectors for given tags.
Definition: SystemBase.C:659
Base class for deriving any boundary condition that works at nodes.
Definition: NodalBCBase.h:26
void computeResidualInternal(const std::set< TagID > &tags)
Compute the residual for a given tag.
virtual void prepareAssembly(const THREAD_ID tid) override
bool computeScaling()
Method used to obtain scaling factors for variables.
Interface for objects interacting with the PerfGraph.
virtual std::map< TagName, TagID > & getMatrixTags()
Return all matrix tags in the system, where a tag is represented by a map from name to ID...
Definition: SubProblem.h:253
virtual bool hasVariable(const std::string &var_name) const
Query a system for a variable.
Definition: SystemBase.C:834
virtual void setCurrentSubdomainID(const Elem *elem, const THREAD_ID tid) override
virtual void clearDiracInfo() override
Gets called before Dirac Kernels are asked to add the points they are supposed to be evaluated in...
void destroyColoring()
Destroy the coloring object if it exists.
virtual void reinitNodesNeighbor(const std::vector< dof_id_type > &nodes, const THREAD_ID tid) override
bool identify_variable_groups() const
virtual void close()=0
virtual void jacobianSetup(THREAD_ID tid=0) const
ConstraintWarehouse _constraints
Constraints storage object.
virtual void turnOffJacobian()
Turn off the Jacobian (must be called before equation system initialization)
TagID residualVectorTag() const override
const std::map< BoundaryID, std::vector< std::shared_ptr< T > > > & getActiveBoundaryObjects(THREAD_ID tid=0) const
void computingNonlinearResid(bool computing_nonlinear_residual) final
Set whether or not the problem is in the process of computing the nonlinear residual.
virtual void customSetup(const ExecFlagType &exec_type) override
ComputeType
The type of nonlinear computation being performed.
Definition: MooseTypes.h:780
Base class for deriving element dampers.
Definition: ElementDamper.h:33
bool _add_implicit_geometric_coupling_entries_to_jacobian
Whether or not to add implicit geometric couplings to the Jacobian for FDP.
const_iterator end() const
virtual void addNodalKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a NodalKernel.
virtual unsigned int numMatrixTags() const
The total number of tags.
Definition: SubProblem.h:248
Base class for creating new types of boundary conditions.
tbb::split split
MooseApp & _app
Definition: SystemBase.h:982
FEProblemBase & _fe_problem
the governing finite element/volume problem
Definition: SystemBase.h:980
virtual void reinitElemPhys(const Elem *elem, const std::vector< Point > &phys_points_in_elem, const THREAD_ID tid)=0
virtual MooseVariableScalar & getScalarVariable(THREAD_ID tid, const std::string &var_name) const
Gets a reference to a scalar variable with specified number.
Definition: SystemBase.C:144
ParallelType type() const
virtual NumericVector< Number > & RHS()=0
std::vector< VariableWarehouse > _vars
Variable warehouses (one for each thread)
Definition: SystemBase.h:990
Provides a way for users to bail out of the current solve.
unsigned int _n_residual_evaluations
Total number of residual evaluations that have been performed.
virtual void update()
void addDiracKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a Dirac kernel.
virtual void close()=0
void computeJacobianInternal(const std::set< TagID > &tags)
Form multiple matrices for all the tags.
bool _has_diag_save_in
If there is any Kernel or IntegratedBC having diag_save_in.
Base class for creating new types of nodal kernels.
const FEType & variable_type(const unsigned int i) const
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
virtual std::shared_ptr< const DisplacedProblem > getDisplacedProblem() const
std::unique_ptr< NumericVector< Number > > _residual_copy
Copy of the residual vector, or nullptr if a copy is not needed.
virtual void subdomainSetup()
Definition: SystemBase.C:1559
bool hasSaveIn() const
Weather or not the nonlinear system has save-ins.
Generic class for solving transient nonlinear problems.
Definition: SubProblem.h:78
TagID timeVectorTag() const override
Ideally, we should not need this API.
Class for containing MooseEnum item information.
Definition: MooseEnumItem.h:18
MooseMesh & _mesh
Definition: SystemBase.h:985
virtual std::vector< std::string > timeKernelVariableNames() override
Returns the names of the variables that have time derivative kernels in the system.
bool hasActiveObjects(THREAD_ID tid=0) const
NonlinearSystemBase(FEProblemBase &problem, libMesh::System &sys, const std::string &name)
void max(const T &r, T &o, Request &req) const
NumericVector< Number > * _u_dot
solution vector for u^dot
Definition: SystemBase.h:1000
MooseObjectWarehouse< ElementDamper > _element_dampers
Element Dampers for each thread.
void addObject(std::shared_ptr< Constraint > object, THREAD_ID tid=0, bool recurse=true) override
Add Constraint object to the warehouse.
virtual void setSolutionUDot(const NumericVector< Number > &udot)
Set transient term used by residual and Jacobian evaluation.
virtual libMesh::SparseMatrix< Number > & getMatrix(TagID tag)
Get a raw SparseMatrix.
Definition: SystemBase.C:1007
virtual void augmentSparsity(libMesh::SparsityPattern::Graph &sparsity, std::vector< dof_id_type > &n_nz, std::vector< dof_id_type > &n_oz) override
Will modify the sparsity pattern to add logical geometric connections.
const std::vector< std::shared_ptr< MortarConstraintBase > > & getActiveMortarConstraints(const std::pair< BoundaryID, BoundaryID > &mortar_interface_key, bool displaced) const
MooseObjectWarehouse< T > & getMatrixTagsObjectWarehouse(const std::set< TagID > &tags, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has one of the given matrix tags...
void computeScalarKernelsJacobians(const std::set< TagID > &tags)
void setInitialResidual(Real r)
Record the initial residual (for later relative convergence check)
Query query()
query creates and returns an initialized a query object for querying objects from the warehouse...
Definition: TheWarehouse.h:466
virtual Assembly & assembly(const THREAD_ID tid, const unsigned int sys_num)=0
const_iterator begin() const
Base class for deriving dampers.
Definition: Damper.h:24
bool getFailNextNonlinearConvergenceCheck() const
Whether it will skip further residual evaluations and fail the next nonlinear convergence check(s) ...
Base class shared by AD and non-AD scalar kernels.
void addDamper(const std::string &damper_name, const std::string &name, InputParameters &parameters)
Adds a damper.
IntRange< T > make_range(T beg, T end)
virtual MooseMesh & mesh() override
virtual void postAddResidualObject(ResidualObject &)
Called after any ResidualObject-derived objects are added to the system.
const std::vector< VariableName > & getVariableNames() const
Definition: SystemBase.h:861
virtual void preInit() override
This is called prior to the libMesh system has been init&#39;d.
Definition: SolverSystem.C:32
virtual void updateActive(THREAD_ID tid=0) override
Update the active status of Kernels.
virtual void timestepSetup() override
bool hasActiveElemElemConstraints(const InterfaceID interface_id, bool displaced) const
bool _off_diagonals_in_auto_scaling
Whether to include off diagonals when determining automatic scaling factors.
virtual unsigned int numVectorTags(const Moose::VectorTagType type=Moose::VECTOR_TAG_ANY) const
The total number of tags, which can be limited to the tag type.
Definition: SubProblem.C:195
void reinitNodeFace(const Node &secondary_node, const BoundaryID secondary_boundary, const PenetrationInfo &info, const bool displaced)
Reinitialize quantities such as variables, residuals, Jacobians, materials for node-face constraints...
Base class for deriving any boundary condition of a integrated type.
SolverParams & solverParams(unsigned int solver_sys_num=0)
Get the solver parameters.
NumericVector< Number > * _residual_ghosted
ghosted form of the residual
void setCachedJacobian(GlobalDataKey)
Sets previously-cached Jacobian values via SparseMatrix::set() calls.
Definition: Assembly.C:4497
TagID _Re_tag
Used for the residual vector from PETSc.
virtual void customSetup(const ExecFlagType &exec_type)
Definition: SystemBase.C:1552
MooseObjectWarehouse< T > & getVectorTagObjectWarehouse(TagID tag_id, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has the given vector tag...
void computeNodalBCs(NumericVector< Number > &residual)
Enforces nodal boundary conditions.
const std::set< SubdomainID > & getSubdomainsForVar(unsigned int var_number) const
Definition: SystemBase.h:762
virtual void addObject(std::shared_ptr< T > object, THREAD_ID tid=0, bool recurse=true)
Adds an object to the storage structure.
bool ignoreZerosInJacobian() const
Will return true if zeros in the Jacobian are to be dropped from the sparsity pattern.
const ExecFlagType EXEC_PRE_KERNELS
Definition: Moose.C:52
libMesh::System & _sys
void mortarConstraints(Moose::ComputeType compute_type, const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Do mortar constraint residual/jacobian computations.
NumericVector< Number > * _increment_vec
increment vector
InterfaceKernelBase is the base class for all InterfaceKernel type classes.
QueryCache & condition(Args &&... args)
Adds a new condition to the query.
Definition: TheWarehouse.h:284
bool doingDG() const
Getter for _doing_dg.
void computeResidualTag(NumericVector< Number > &residual, TagID tag_id)
Computes residual for a given tag.
void addConstraint(const std::string &c_name, const std::string &name, InputParameters &parameters)
Adds a Constraint.
virtual TagName vectorTagName(const TagID tag) const
Retrieve the name associated with a TagID.
Definition: SubProblem.C:221
MOOSE now contains C++17 code, so give a reasonable error message stating what the user can do to add...
const ConsoleStream _console
An instance of helper class to write streams to the Console objects.
bool restoreOriginalNonzeroPattern() const
face_info_iterator ownedFaceInfoEnd()
Definition: MooseMesh.C:1516
void constraintJacobians(const SparseMatrix< Number > &jacobian_to_view, bool displaced)
Add jacobian contributions from Constraints.
virtual libMesh::System & system() override
Get the reference to the libMesh system.
MooseVariableFieldBase & getVariable(THREAD_ID tid, const std::string &var_name) const
Gets a reference to a variable of with specified name.
Definition: SystemBase.C:89
std::vector< BoundaryID > getBoundaryIDs(const Elem *const elem, const unsigned short int side) const
Returns a vector of boundary IDs for the requested element on the requested side. ...
bool preSolve()
Perform some steps to get ready for the solver.
const std::unordered_map< std::pair< BoundaryID, BoundaryID >, AutomaticMortarGeneration > & getMortarInterfaces(bool on_displaced) const
bool _has_constraints
Whether or not this system has any Constraints.
dof_id_type first_dof(const processor_id_type proc) const
bool _computed_scaling
Flag used to indicate whether we have already computed the scaling Jacobian.
std::string _decomposition_split
Name of the top-level split of the decomposition.
void remove_algebraic_ghosting_functor(GhostingFunctor &evaluable_functor)
MooseObjectTagWarehouse< InterfaceKernelBase > _interface_kernels
unsigned int n_vars() const
void resetSolutionInvalidCurrentIteration()
Reset the number of solution invalid occurrences back to zero.
NumericVector< Number > & residualVector(TagID tag)
Return a residual vector that is associated with the residual tag.
NumericVector< Number > & solutionOld()
Definition: SystemBase.h:196
NumericVector< Number > & getResidualNonTimeVector()
Return a numeric vector that is associated with the nontime tag.
virtual bool hasScalarVariable(const std::string &var_name) const
Definition: SystemBase.C:859
const TagName PREVIOUS_NL_SOLUTION_TAG
Definition: MooseTypes.C:28
processor_id_type processor_id() const
std::shared_ptr< Predictor > _predictor
If predictor is active, this is non-NULL.
void enforceNodalConstraintsResidual(NumericVector< Number > &residual)
Enforce nodal constraints.
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Update supplied subdomain and variable coverate containters.
MooseObjectWarehouse< GeneralDamper > _general_dampers
General Dampers.
const std::vector< std::shared_ptr< ElemElemConstraint > > & getActiveElemElemConstraints(InterfaceID interface_id, bool displaced) const
virtual void initialSetup()
Setup Functions.
Definition: SystemBase.C:1525
bool defaultGhosting()
Whether or not the user has requested default ghosting ot be on.
Definition: SubProblem.h:144
void addCachedJacobian(GlobalDataKey)
Adds the values that have been cached by calling cacheJacobian() and or cacheJacobianNeighbor() to th...
Definition: Assembly.C:3820
virtual void cacheJacobian(const THREAD_ID tid) override
auto min(const L &left, const R &right)
void jacobianSetup() override
virtual NumericVector< Number > & getVector(const std::string &name)
Get a raw NumericVector by name.
Definition: SystemBase.C:916
MooseObjectTagWarehouse< IntegratedBCBase > _integrated_bcs
const DofMap & get_dof_map() const
virtual void reinitNodeFace(const Node *node, BoundaryID bnd_id, const THREAD_ID tid) override
virtual void residualSetup()
Definition: SystemBase.C:1566
virtual void reinitOffDiagScalars(const THREAD_ID tid) override
processor_id_type processor_id() const
virtual void addObject(std::shared_ptr< T > object, THREAD_ID tid=0, bool recurse=true) override
Adds an object to the storage structure.
virtual void setNeighborSubdomainID(const Elem *elem, unsigned int side, const THREAD_ID tid) override
virtual void addResidualScalar(const THREAD_ID tid=0)
virtual void subdomainSetup(THREAD_ID tid=0) const
virtual void jacobianSetup() override
virtual void addCachedResidual(const THREAD_ID tid) override
std::vector< std::vector< std::string > > _scaling_group_variables
A container of variable groupings that can be used in scaling calculations.
void addInterfaceKernel(std::string interface_kernel_name, const std::string &name, InputParameters &parameters)
Adds an interface kernel.
auto index_range(const T &sizable)
virtual NumericVector< Number > & residualCopy() override
virtual NumericVector< Number > & residualGhosted() override
DiracKernelBase is the base class for all DiracKernel type classes.
void assembleScalingVector()
Assemble the numeric vector of scaling factors such that it can be used during assembly of the system...
BoundaryID _primary_boundary
NumericVector< Number > * _u_dotdot_old
old solution vector for u^dotdot
Definition: SystemBase.h:1007
MooseObjectTagWarehouse< ScalarKernelBase > _scalar_kernels
virtual void residualEnd(THREAD_ID tid=0) const
void setConstraintSecondaryValues(NumericVector< Number > &solution, bool displaced)
Sets the value of constrained variables in the solution vector.
unsigned int THREAD_ID
Definition: MooseTypes.h:209
uint8_t dof_id_type
virtual void addJacobianScalar(const THREAD_ID tid=0)
NearestNodeLocator & _nearest_node
const std::map< dof_id_type, std::vector< dof_id_type > > & nodeToElemMap()
If not already created, creates a map from every node to all elements to which they are connected...
Definition: MooseMesh.C:1175
const std::set< SubdomainID > & meshSubdomains() const
Returns a read-only reference to the set of subdomains currently present in the Mesh.
Definition: MooseMesh.C:3166
ParallelType
virtual void addCachedJacobian(const THREAD_ID tid) override
virtual void timestepSetup()
Definition: SystemBase.C:1545
virtual ~NonlinearSystemBase()
virtual void preInit() override
This is called prior to the libMesh system has been init&#39;d.
virtual void residualSetup() override
void setPreconditioner(std::shared_ptr< MoosePreconditioner > pc)
Sets a preconditioner.
virtual void localize(std::vector< T > &v_local) const=0
MooseObjectWarehouse< ADDirichletBCBase > _ad_preset_nodal_bcs
virtual libMesh::System & system() override
Get the reference to the libMesh system.
Key structure for APIs manipulating global vectors/matrices.
Definition: Assembly.h:815