https://mooseframework.inl.gov
NonlinearSystemBase.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
10 #include "NonlinearSystemBase.h"
11 #include "AuxiliarySystem.h"
12 #include "Problem.h"
13 #include "FEProblem.h"
14 #include "MooseVariableFE.h"
15 #include "MooseVariableScalar.h"
16 #include "PetscSupport.h"
17 #include "Factory.h"
18 #include "ParallelUniqueId.h"
19 #include "ThreadedElementLoop.h"
20 #include "MaterialData.h"
21 #include "ComputeResidualThread.h"
23 #include "ComputeFVFluxThread.h"
24 #include "ComputeJacobianThread.h"
28 #include "ComputeDiracThread.h"
35 #include "TimeKernel.h"
36 #include "BoundaryCondition.h"
37 #include "DirichletBCBase.h"
38 #include "NodalBCBase.h"
39 #include "IntegratedBCBase.h"
40 #include "DGKernel.h"
41 #include "InterfaceKernelBase.h"
42 #include "ElementDamper.h"
43 #include "NodalDamper.h"
44 #include "GeneralDamper.h"
45 #include "DisplacedProblem.h"
46 #include "NearestNodeLocator.h"
47 #include "PenetrationLocator.h"
48 #include "NodalConstraint.h"
49 #include "NodeFaceConstraint.h"
50 #include "NodeElemConstraintBase.h"
51 #include "MortarConstraint.h"
52 #include "ElemElemConstraint.h"
53 #include "ScalarKernelBase.h"
54 #include "Parser.h"
55 #include "Split.h"
57 #include "MooseMesh.h"
58 #include "MooseUtils.h"
59 #include "MooseApp.h"
60 #include "NodalKernelBase.h"
61 #include "DiracKernelBase.h"
62 #include "TimeIntegrator.h"
63 #include "Predictor.h"
64 #include "Assembly.h"
65 #include "ElementPairLocator.h"
66 #include "ODETimeKernel.h"
69 #include "ADKernel.h"
70 #include "ADDirichletBCBase.h"
71 #include "Moose.h"
72 #include "ConsoleStream.h"
73 #include "MooseError.h"
74 #include "FVElementalKernel.h"
77 #include "FVFluxKernel.h"
79 #include "UserObject.h"
81 #include "HDGKernel.h"
83 
84 // libMesh
85 #include "libmesh/nonlinear_solver.h"
86 #include "libmesh/quadrature_gauss.h"
87 #include "libmesh/dense_vector.h"
88 #include "libmesh/boundary_info.h"
89 #include "libmesh/petsc_matrix.h"
90 #include "libmesh/petsc_vector.h"
91 #include "libmesh/petsc_nonlinear_solver.h"
92 #include "libmesh/numeric_vector.h"
93 #include "libmesh/mesh.h"
94 #include "libmesh/dense_subvector.h"
95 #include "libmesh/dense_submatrix.h"
96 #include "libmesh/dof_map.h"
97 #include "libmesh/sparse_matrix.h"
98 #include "libmesh/petsc_matrix.h"
99 #include "libmesh/default_coupling.h"
100 #include "libmesh/diagonal_matrix.h"
101 #include "libmesh/fe_interface.h"
102 #include "libmesh/petsc_solver_exception.h"
103 
104 #include <ios>
105 
106 #include "petscsnes.h"
107 #include <PetscDMMoose.h>
108 EXTERN_C_BEGIN
109 extern PetscErrorCode DMCreate_Moose(DM);
110 EXTERN_C_END
111 
112 using namespace libMesh;
113 
115  System & sys,
116  const std::string & name)
117  : SolverSystem(fe_problem, fe_problem, name, Moose::VAR_SOLVER),
118  PerfGraphInterface(fe_problem.getMooseApp().perfGraph(), "NonlinearSystemBase"),
119  _sys(sys),
120  _last_nl_rnorm(0.),
121  _current_nl_its(0),
122  _residual_ghosted(NULL),
123  _Re_time_tag(-1),
124  _Re_time(NULL),
125  _Re_non_time_tag(-1),
126  _Re_non_time(NULL),
127  _scalar_kernels(/*threaded=*/false),
128  _nodal_bcs(/*threaded=*/false),
129  _preset_nodal_bcs(/*threaded=*/false),
130  _ad_preset_nodal_bcs(/*threaded=*/false),
131 #ifdef MOOSE_KOKKOS_ENABLED
132  _kokkos_kernels(/*threaded=*/false),
133  _kokkos_integrated_bcs(/*threaded=*/false),
134  _kokkos_nodal_bcs(/*threaded=*/false),
135  _kokkos_preset_nodal_bcs(/*threaded=*/false),
136  _kokkos_nodal_kernels(/*threaded=*/false),
137 #endif
138  _general_dampers(/*threaded=*/false),
139  _splits(/*threaded=*/false),
140  _increment_vec(NULL),
141  _use_finite_differenced_preconditioner(false),
142  _fdcoloring(nullptr),
143  _fsp(nullptr),
144  _add_implicit_geometric_coupling_entries_to_jacobian(false),
145  _assemble_constraints_separately(false),
146  _need_residual_ghosted(false),
147  _debugging_residuals(false),
148  _doing_dg(false),
149  _n_iters(0),
150  _n_linear_iters(0),
151  _n_residual_evaluations(0),
152  _final_residual(0.),
153  _computing_pre_smo_residual(false),
154  _pre_smo_residual(0),
155  _initial_residual(0),
156  _use_pre_smo_residual(false),
157  _print_all_var_norms(false),
158  _has_save_in(false),
159  _has_diag_save_in(false),
160  _has_nodalbc_save_in(false),
161  _has_nodalbc_diag_save_in(false),
162  _computed_scaling(false),
163  _compute_scaling_once(true),
164  _resid_vs_jac_scaling_param(0),
165  _off_diagonals_in_auto_scaling(false),
166  _auto_scaling_initd(false)
167 {
169  // Don't need to add the matrix - it already exists (for now)
171 
172  // The time matrix tag is not normally used - but must be added to the system
173  // in case it is so that objects can have 'time' in their matrix tags by default
174  _fe_problem.addMatrixTag("TIME");
175 
176  _Re_tag = _fe_problem.addVectorTag("RESIDUAL");
177 
179 
181  {
182  auto & dof_map = _sys.get_dof_map();
183  dof_map.remove_algebraic_ghosting_functor(dof_map.default_algebraic_ghosting());
184  dof_map.set_implicit_neighbor_dofs(false);
185  }
186 }
187 
189 
190 void
192 {
194 
195  if (_fe_problem.hasDampers())
196  setupDampers();
197 
198  if (_residual_copy.get())
199  _residual_copy->init(_sys.n_dofs(), false, SERIAL);
200 
201 #ifdef MOOSE_KOKKOS_ENABLED
204 #endif
205 }
206 
207 void
209 {
210  // reinit is called on meshChanged() in FEProblemBase. We could implement meshChanged() instead.
211  // Subdomains might have changed
212  for (auto & functor : _displaced_mortar_functors)
213  functor.second.setupMortarMaterials();
214  for (auto & functor : _undisplaced_mortar_functors)
215  functor.second.setupMortarMaterials();
216 }
217 
218 void
220 {
222  nonlinearSolver()->jacobian = NULL;
223 }
224 
225 void
227 {
228  TIME_SECTION("nlInitialSetup", 2, "Setting Up Nonlinear System");
229 
231 
232  {
233  TIME_SECTION("kernelsInitialSetup", 2, "Setting Up Kernels/BCs/Constraints");
234 
235  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
236  {
237  _kernels.initialSetup(tid);
240  if (_doing_dg)
243 
247 
248  if (_fe_problem.haveFV())
249  {
250  std::vector<FVElementalKernel *> fv_elemental_kernels;
252  .query()
253  .template condition<AttribSystem>("FVElementalKernel")
254  .template condition<AttribThread>(tid)
255  .queryInto(fv_elemental_kernels);
256 
257  for (auto * fv_kernel : fv_elemental_kernels)
258  fv_kernel->initialSetup();
259 
260  std::vector<FVFluxKernel *> fv_flux_kernels;
262  .query()
263  .template condition<AttribSystem>("FVFluxKernel")
264  .template condition<AttribThread>(tid)
265  .queryInto(fv_flux_kernels);
266 
267  for (auto * fv_kernel : fv_flux_kernels)
268  fv_kernel->initialSetup();
269  }
270  }
271 
278 
279 #ifdef MOOSE_KOKKOS_ENABLED
284 #endif
285  }
286 
287  {
288  TIME_SECTION("mortarSetup", 2, "Initializing Mortar Interfaces");
289 
290  auto create_mortar_functors = [this](const bool displaced)
291  {
292  // go over mortar interfaces and construct functors
293  const auto & mortar_interfaces = _fe_problem.getMortarInterfaces(displaced);
294  for (const auto & [primary_secondary_boundary_pair, mortar_generation_ptr] :
295  mortar_interfaces)
296  {
297  if (!_constraints.hasActiveMortarConstraints(primary_secondary_boundary_pair, displaced))
298  continue;
299 
300  auto & mortar_constraints =
301  _constraints.getActiveMortarConstraints(primary_secondary_boundary_pair, displaced);
302 
303  auto & subproblem = displaced
304  ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
305  : static_cast<SubProblem &>(_fe_problem);
306 
307  auto & mortar_functors =
309 
310  mortar_functors.emplace(primary_secondary_boundary_pair,
311  ComputeMortarFunctor(mortar_constraints,
312  *mortar_generation_ptr,
313  subproblem,
314  _fe_problem,
315  displaced,
316  subproblem.assembly(0, number())));
317  }
318  };
319 
320  create_mortar_functors(false);
321  create_mortar_functors(true);
322  }
323 
324  if (_automatic_scaling)
325  {
327  _scaling_matrix = std::make_unique<OffDiagonalScalingMatrix<Number>>(_communicator);
328  else
329  _scaling_matrix = std::make_unique<DiagonalMatrix<Number>>(_communicator);
330  }
331 
332  if (_preconditioner)
333  _preconditioner->initialSetup();
334 }
335 
336 void
338 {
340 
341  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
342  {
343  _kernels.timestepSetup(tid);
346  if (_doing_dg)
352 
353  if (_fe_problem.haveFV())
354  {
355  std::vector<FVFluxBC *> bcs;
357  .query()
358  .template condition<AttribSystem>("FVFluxBC")
359  .template condition<AttribThread>(tid)
360  .queryInto(bcs);
361 
362  std::vector<FVInterfaceKernel *> iks;
364  .query()
365  .template condition<AttribSystem>("FVInterfaceKernel")
366  .template condition<AttribThread>(tid)
367  .queryInto(iks);
368 
369  std::vector<FVFluxKernel *> kernels;
371  .query()
372  .template condition<AttribSystem>("FVFluxKernel")
373  .template condition<AttribThread>(tid)
374  .queryInto(kernels);
375 
376  for (auto * bc : bcs)
377  bc->timestepSetup();
378  for (auto * ik : iks)
379  ik->timestepSetup();
380  for (auto * kernel : kernels)
381  kernel->timestepSetup();
382  }
383  }
390 
391 #ifdef MOOSE_KOKKOS_ENABLED
396 #endif
397 }
398 
399 void
401 {
402  SolverSystem::customSetup(exec_type);
403 
404  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
405  {
406  _kernels.customSetup(exec_type, tid);
407  _nodal_kernels.customSetup(exec_type, tid);
408  _dirac_kernels.customSetup(exec_type, tid);
409  if (_doing_dg)
410  _dg_kernels.customSetup(exec_type, tid);
411  _interface_kernels.customSetup(exec_type, tid);
412  _element_dampers.customSetup(exec_type, tid);
413  _nodal_dampers.customSetup(exec_type, tid);
414  _integrated_bcs.customSetup(exec_type, tid);
415 
416  if (_fe_problem.haveFV())
417  {
418  std::vector<FVFluxBC *> bcs;
420  .query()
421  .template condition<AttribSystem>("FVFluxBC")
422  .template condition<AttribThread>(tid)
423  .queryInto(bcs);
424 
425  std::vector<FVInterfaceKernel *> iks;
427  .query()
428  .template condition<AttribSystem>("FVInterfaceKernel")
429  .template condition<AttribThread>(tid)
430  .queryInto(iks);
431 
432  std::vector<FVFluxKernel *> kernels;
434  .query()
435  .template condition<AttribSystem>("FVFluxKernel")
436  .template condition<AttribThread>(tid)
437  .queryInto(kernels);
438 
439  for (auto * bc : bcs)
440  bc->customSetup(exec_type);
441  for (auto * ik : iks)
442  ik->customSetup(exec_type);
443  for (auto * kernel : kernels)
444  kernel->customSetup(exec_type);
445  }
446  }
447  _scalar_kernels.customSetup(exec_type);
448  _constraints.customSetup(exec_type);
449  _general_dampers.customSetup(exec_type);
450  _nodal_bcs.customSetup(exec_type);
451  _preset_nodal_bcs.customSetup(exec_type);
453 
454 #ifdef MOOSE_KOKKOS_ENABLED
455  _kokkos_kernels.customSetup(exec_type);
458  _kokkos_nodal_bcs.customSetup(exec_type);
459 #endif
460 }
461 
462 void
464 {
465  if (_fsp)
466  _fsp->setupDM();
467 }
468 
469 void
470 NonlinearSystemBase::addKernel(const std::string & kernel_name,
471  const std::string & name,
472  InputParameters & parameters)
473 {
474  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
475  {
476  // Create the kernel object via the factory and add to warehouse
477  std::shared_ptr<KernelBase> kernel =
478  _factory.create<KernelBase>(kernel_name, name, parameters, tid);
479  _kernels.addObject(kernel, tid);
480  postAddResidualObject(*kernel);
481  // Add to theWarehouse, a centralized storage for all moose objects
482  _fe_problem.theWarehouse().add(kernel);
483  }
484 
485  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
486  _has_save_in = true;
487  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
488  _has_diag_save_in = true;
489 }
490 
491 void
492 NonlinearSystemBase::addHDGKernel(const std::string & kernel_name,
493  const std::string & name,
494  InputParameters & parameters)
495 {
496  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
497  {
498  // Create the kernel object via the factory and add to warehouse
499  auto kernel = _factory.create<HDGKernel>(kernel_name, name, parameters, tid);
500  _kernels.addObject(kernel, tid);
501  _hybridized_kernels.addObject(kernel, tid);
502  // Add to theWarehouse, a centralized storage for all moose objects
503  _fe_problem.theWarehouse().add(kernel);
504  postAddResidualObject(*kernel);
505  }
506 }
507 
508 void
509 NonlinearSystemBase::addNodalKernel(const std::string & kernel_name,
510  const std::string & name,
511  InputParameters & parameters)
512 {
513  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
514  {
515  // Create the kernel object via the factory and add to the warehouse
516  std::shared_ptr<NodalKernelBase> kernel =
517  _factory.create<NodalKernelBase>(kernel_name, name, parameters, tid);
518  _nodal_kernels.addObject(kernel, tid);
519  // Add to theWarehouse, a centralized storage for all moose objects
520  _fe_problem.theWarehouse().add(kernel);
521  postAddResidualObject(*kernel);
522  }
523 
524  if (parameters.have_parameter<std::vector<AuxVariableName>>("save_in") &&
525  parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
526  _has_save_in = true;
527  if (parameters.have_parameter<std::vector<AuxVariableName>>("save_in") &&
528  parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
529  _has_diag_save_in = true;
530 }
531 
532 void
533 NonlinearSystemBase::addScalarKernel(const std::string & kernel_name,
534  const std::string & name,
535  InputParameters & parameters)
536 {
537  std::shared_ptr<ScalarKernelBase> kernel =
538  _factory.create<ScalarKernelBase>(kernel_name, name, parameters);
539  postAddResidualObject(*kernel);
540  // Add to theWarehouse, a centralized storage for all moose objects
541  _fe_problem.theWarehouse().add(kernel);
542  _scalar_kernels.addObject(kernel);
543 }
544 
545 void
546 NonlinearSystemBase::addBoundaryCondition(const std::string & bc_name,
547  const std::string & name,
548  InputParameters & parameters)
549 {
550  // ThreadID
551  THREAD_ID tid = 0;
552 
553  // Create the object
554  std::shared_ptr<BoundaryCondition> bc =
555  _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
557 
558  // Active BoundaryIDs for the object
559  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
560  auto bc_var = dynamic_cast<const MooseVariableFieldBase *>(&bc->variable());
561  _vars[tid].addBoundaryVar(boundary_ids, bc_var);
562 
563  // Cast to the various types of BCs
564  std::shared_ptr<NodalBCBase> nbc = std::dynamic_pointer_cast<NodalBCBase>(bc);
565  std::shared_ptr<IntegratedBCBase> ibc = std::dynamic_pointer_cast<IntegratedBCBase>(bc);
566 
567  // NodalBCBase
568  if (nbc)
569  {
570  if (nbc->checkNodalVar() && !nbc->variable().isNodal())
571  mooseError("Trying to use nodal boundary condition '",
572  nbc->name(),
573  "' on a non-nodal variable '",
574  nbc->variable().name(),
575  "'.");
576 
577  _nodal_bcs.addObject(nbc);
578  // Add to theWarehouse, a centralized storage for all moose objects
580  _vars[tid].addBoundaryVars(boundary_ids, nbc->getCoupledVars());
581 
582  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
583  _has_nodalbc_save_in = true;
584  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
586 
587  // DirichletBCs that are preset
588  std::shared_ptr<DirichletBCBase> dbc = std::dynamic_pointer_cast<DirichletBCBase>(bc);
589  if (dbc && dbc->preset())
591 
592  std::shared_ptr<ADDirichletBCBase> addbc = std::dynamic_pointer_cast<ADDirichletBCBase>(bc);
593  if (addbc && addbc->preset())
595  }
596 
597  // IntegratedBCBase
598  else if (ibc)
599  {
600  _integrated_bcs.addObject(ibc, tid);
601  // Add to theWarehouse, a centralized storage for all moose objects
603  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
604 
605  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
606  _has_save_in = true;
607  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
608  _has_diag_save_in = true;
609 
610  for (tid = 1; tid < libMesh::n_threads(); tid++)
611  {
612  // Create the object
613  bc = _factory.create<BoundaryCondition>(bc_name, name, parameters, tid);
614 
615  // Give users opportunity to set some parameters
617 
618  // Active BoundaryIDs for the object
619  const std::set<BoundaryID> & boundary_ids = bc->boundaryIDs();
620  _vars[tid].addBoundaryVar(boundary_ids, bc_var);
621 
622  ibc = std::static_pointer_cast<IntegratedBCBase>(bc);
623 
624  _integrated_bcs.addObject(ibc, tid);
625  _vars[tid].addBoundaryVars(boundary_ids, ibc->getCoupledVars());
626  }
627  }
628 
629  else
630  mooseError("Unknown BoundaryCondition type for object named ", bc->name());
631 }
632 
633 void
634 NonlinearSystemBase::addConstraint(const std::string & c_name,
635  const std::string & name,
636  InputParameters & parameters)
637 {
638  std::shared_ptr<Constraint> constraint = _factory.create<Constraint>(c_name, name, parameters);
639  _constraints.addObject(constraint);
640  postAddResidualObject(*constraint);
641 
642  if (constraint && constraint->addCouplingEntriesToJacobian())
644 }
645 
646 void
647 NonlinearSystemBase::addDiracKernel(const std::string & kernel_name,
648  const std::string & name,
649  InputParameters & parameters)
650 {
651  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
652  {
653  std::shared_ptr<DiracKernelBase> kernel =
654  _factory.create<DiracKernelBase>(kernel_name, name, parameters, tid);
655  postAddResidualObject(*kernel);
656  _dirac_kernels.addObject(kernel, tid);
657  // Add to theWarehouse, a centralized storage for all moose objects
658  _fe_problem.theWarehouse().add(kernel);
659  }
660 }
661 
662 void
663 NonlinearSystemBase::addDGKernel(std::string dg_kernel_name,
664  const std::string & name,
665  InputParameters & parameters)
666 {
667  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
668  {
669  auto dg_kernel = _factory.create<DGKernelBase>(dg_kernel_name, name, parameters, tid);
670  _dg_kernels.addObject(dg_kernel, tid);
671  // Add to theWarehouse, a centralized storage for all moose objects
672  _fe_problem.theWarehouse().add(dg_kernel);
673  postAddResidualObject(*dg_kernel);
674  }
675 
676  _doing_dg = true;
677 
678  if (parameters.get<std::vector<AuxVariableName>>("save_in").size() > 0)
679  _has_save_in = true;
680  if (parameters.get<std::vector<AuxVariableName>>("diag_save_in").size() > 0)
681  _has_diag_save_in = true;
682 }
683 
684 void
685 NonlinearSystemBase::addInterfaceKernel(std::string interface_kernel_name,
686  const std::string & name,
687  InputParameters & parameters)
688 {
689  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
690  {
691  std::shared_ptr<InterfaceKernelBase> interface_kernel =
692  _factory.create<InterfaceKernelBase>(interface_kernel_name, name, parameters, tid);
693  postAddResidualObject(*interface_kernel);
694 
695  const std::set<BoundaryID> & boundary_ids = interface_kernel->boundaryIDs();
696  auto ik_var = dynamic_cast<const MooseVariableFieldBase *>(&interface_kernel->variable());
697  _vars[tid].addBoundaryVar(boundary_ids, ik_var);
698 
699  _interface_kernels.addObject(interface_kernel, tid);
700  // Add to theWarehouse, a centralized storage for all moose objects
701  _fe_problem.theWarehouse().add(interface_kernel);
702  _vars[tid].addBoundaryVars(boundary_ids, interface_kernel->getCoupledVars());
703  }
704 }
705 
706 void
707 NonlinearSystemBase::addDamper(const std::string & damper_name,
708  const std::string & name,
709  InputParameters & parameters)
710 {
711  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
712  {
713  std::shared_ptr<Damper> damper = _factory.create<Damper>(damper_name, name, parameters, tid);
714 
715  // Attempt to cast to the damper types
716  std::shared_ptr<ElementDamper> ed = std::dynamic_pointer_cast<ElementDamper>(damper);
717  std::shared_ptr<NodalDamper> nd = std::dynamic_pointer_cast<NodalDamper>(damper);
718  std::shared_ptr<GeneralDamper> gd = std::dynamic_pointer_cast<GeneralDamper>(damper);
719 
720  if (gd)
721  {
723  break; // not threaded
724  }
725  else if (ed)
726  _element_dampers.addObject(ed, tid);
727  else if (nd)
728  _nodal_dampers.addObject(nd, tid);
729  else
730  mooseError("Invalid damper type");
731  }
732 }
733 
734 void
735 NonlinearSystemBase::addSplit(const std::string & split_name,
736  const std::string & name,
737  InputParameters & parameters)
738 {
739  std::shared_ptr<Split> split = _factory.create<Split>(split_name, name, parameters);
741  // Add to theWarehouse, a centralized storage for all moose objects
743 }
744 
745 std::shared_ptr<Split>
746 NonlinearSystemBase::getSplit(const std::string & name)
747 {
748  return _splits.getActiveObject(name);
749 }
750 
751 bool
753 {
755  return false;
756 
757  // The legacy behavior (#10464) _always_ performs the pre-SMO residual evaluation
758  // regardless of whether it is needed.
759  //
760  // This is not ideal and has been fixed by #23472. This legacy option ensures a smooth transition
761  // to the new behavior. Modules and Apps that want to migrate to the new behavior should set this
762  // parameter to false.
763  if (_app.parameters().get<bool>("use_legacy_initial_residual_evaluation_behavior"))
764  return true;
765 
766  return _use_pre_smo_residual;
767 }
768 
769 Real
771 {
773 }
774 
775 Real
777 {
779  mooseError("pre-SMO residual is requested but not evaluated.");
780 
781  return _pre_smo_residual;
782 }
783 
784 Real
786 {
787  return _initial_residual;
788 }
789 
790 void
792 {
793  _initial_residual = r;
794 }
795 
796 void
797 NonlinearSystemBase::zeroVectorForResidual(const std::string & vector_name)
798 {
799  for (unsigned int i = 0; i < _vecs_to_zero_for_residual.size(); ++i)
800  if (vector_name == _vecs_to_zero_for_residual[i])
801  return;
802 
803  _vecs_to_zero_for_residual.push_back(vector_name);
804 }
805 
806 void
808 {
809  _nl_vector_tags.clear();
810  _nl_vector_tags.insert(tag_id);
812 
814 
816 
818 }
819 
820 void
822 {
823  mooseDeprecated(" Please use computeResidualTag");
824 
825  computeResidualTag(residual, tag_id);
826 }
827 
828 void
829 NonlinearSystemBase::computeResidualTags(const std::set<TagID> & tags)
830 {
831  parallel_object_only();
832 
833  TIME_SECTION("nl::computeResidualTags", 5);
834 
837 
838  bool required_residual = tags.find(residualVectorTag()) == tags.end() ? false : true;
839 
841 
842  // not suppose to do anythin on matrix
844 
846 
847  for (const auto & numeric_vec : _vecs_to_zero_for_residual)
848  if (hasVector(numeric_vec))
849  {
850  NumericVector<Number> & vec = getVector(numeric_vec);
851  vec.close();
852  vec.zero();
853  }
854 
855  try
856  {
857  zeroTaggedVectors(tags);
859  closeTaggedVectors(tags);
860 
861  if (required_residual)
862  {
863  auto & residual = getVector(residualVectorTag());
864  if (!_time_integrators.empty())
865  {
866  for (auto & ti : _time_integrators)
867  ti->postResidual(residual);
868  }
869  else
870  residual += *_Re_non_time;
871  residual.close();
872  }
874  // We don't want to do nodal bcs or anything else
875  return;
876 
877  computeNodalBCs(tags);
878  closeTaggedVectors(tags);
879 
880  // If we are debugging residuals we need one more assignment to have the ghosted copy up to
881  // date
882  if (_need_residual_ghosted && _debugging_residuals && required_residual)
883  {
884  auto & residual = getVector(residualVectorTag());
885 
886  *_residual_ghosted = residual;
888  }
889  // Need to close and update the aux system in case residuals were saved to it.
892  if (hasSaveIn())
894  }
895  catch (MooseException & e)
896  {
897  // The buck stops here, we have already handled the exception by
898  // calling stopSolve(), it is now up to PETSc to return a
899  // "diverged" reason during the next solve.
900  }
901 
902  // not supposed to do anything on matrix
904 
906 }
907 
908 void
909 NonlinearSystemBase::computeResidualAndJacobianTags(const std::set<TagID> & vector_tags,
910  const std::set<TagID> & matrix_tags)
911 {
912  const bool required_residual =
913  vector_tags.find(residualVectorTag()) == vector_tags.end() ? false : true;
914 
915  try
916  {
917  zeroTaggedVectors(vector_tags);
918  computeResidualAndJacobianInternal(vector_tags, matrix_tags);
919  closeTaggedVectors(vector_tags);
920  closeTaggedMatrices(matrix_tags);
921 
922  if (required_residual)
923  {
924  auto & residual = getVector(residualVectorTag());
925  if (!_time_integrators.empty())
926  {
927  for (auto & ti : _time_integrators)
928  ti->postResidual(residual);
929  }
930  else
931  residual += *_Re_non_time;
932  residual.close();
933  }
934 
936  closeTaggedVectors(vector_tags);
937  closeTaggedMatrices(matrix_tags);
938  }
939  catch (MooseException & e)
940  {
941  // The buck stops here, we have already handled the exception by
942  // calling stopSolve(), it is now up to PETSc to return a
943  // "diverged" reason during the next solve.
944  }
945 }
946 
947 void
949 {
950  for (auto & ti : _time_integrators)
951  ti->preSolve();
952  if (_predictor.get())
953  _predictor->timestepSetup();
954 }
955 
956 void
958 {
960 
961  NumericVector<Number> & initial_solution(solution());
962  if (_predictor.get())
963  {
964  if (_predictor->shouldApply())
965  {
966  TIME_SECTION("applyPredictor", 2, "Applying Predictor");
967 
968  _predictor->apply(initial_solution);
969  _fe_problem.predictorCleanup(initial_solution);
970  }
971  else
972  _console << " Skipping predictor this step" << std::endl;
973  }
974 
975  // do nodal BC
976  {
977  TIME_SECTION("initialBCs", 2, "Applying BCs To Initial Condition");
978 
980  for (const auto & bnode : bnd_nodes)
981  {
982  BoundaryID boundary_id = bnode->_bnd_id;
983  Node * node = bnode->_node;
984 
985  if (node->processor_id() == processor_id())
986  {
987  bool has_preset_nodal_bcs = _preset_nodal_bcs.hasActiveBoundaryObjects(boundary_id);
988  bool has_ad_preset_nodal_bcs = _ad_preset_nodal_bcs.hasActiveBoundaryObjects(boundary_id);
989 
990  // reinit variables in nodes
991  if (has_preset_nodal_bcs || has_ad_preset_nodal_bcs)
992  _fe_problem.reinitNodeFace(node, boundary_id, 0);
993 
994  if (has_preset_nodal_bcs)
995  {
996  const auto & preset_bcs = _preset_nodal_bcs.getActiveBoundaryObjects(boundary_id);
997  for (const auto & preset_bc : preset_bcs)
998  preset_bc->computeValue(initial_solution);
999  }
1000  if (has_ad_preset_nodal_bcs)
1001  {
1002  const auto & preset_bcs_res = _ad_preset_nodal_bcs.getActiveBoundaryObjects(boundary_id);
1003  for (const auto & preset_bc : preset_bcs_res)
1004  preset_bc->computeValue(initial_solution);
1005  }
1006  }
1007  }
1008  }
1009 
1010 #ifdef MOOSE_KOKKOS_ENABLED
1013 #endif
1014 
1015  _sys.solution->close();
1016  update();
1017 
1018  // Set constraint secondary values
1019  setConstraintSecondaryValues(initial_solution, false);
1020 
1022  setConstraintSecondaryValues(initial_solution, true);
1023 }
1024 
1025 void
1026 NonlinearSystemBase::setPredictor(std::shared_ptr<Predictor> predictor)
1027 {
1028  _predictor = predictor;
1029 }
1030 
1031 void
1033 {
1035 
1036  _kernels.subdomainSetup(subdomain, tid);
1037  _nodal_kernels.subdomainSetup(subdomain, tid);
1038  _element_dampers.subdomainSetup(subdomain, tid);
1039  _nodal_dampers.subdomainSetup(subdomain, tid);
1040 }
1041 
1044 {
1045  if (!_Re_time)
1046  {
1048 
1049  // Most applications don't need the expense of ghosting
1051  _Re_time = &addVector(_Re_time_tag, false, ptype);
1052  }
1053  else if (_need_residual_ghosted && _Re_time->type() == PARALLEL)
1054  {
1055  const auto vector_name = _subproblem.vectorTagName(_Re_time_tag);
1056 
1057  // If an application changes its mind, the libMesh API lets us
1058  // change the vector.
1059  _Re_time = &system().add_vector(vector_name, false, GHOSTED);
1060  }
1061 
1062  return *_Re_time;
1063 }
1064 
1067 {
1068  if (!_Re_non_time)
1069  {
1071 
1072  // Most applications don't need the expense of ghosting
1074  _Re_non_time = &addVector(_Re_non_time_tag, false, ptype);
1075  }
1077  {
1078  const auto vector_name = _subproblem.vectorTagName(_Re_non_time_tag);
1079 
1080  // If an application changes its mind, the libMesh API lets us
1081  // change the vector.
1082  _Re_non_time = &system().add_vector(vector_name, false, GHOSTED);
1083  }
1084 
1085  return *_Re_non_time;
1086 }
1087 
1090 {
1091  mooseDeprecated("Please use getVector()");
1092  switch (tag)
1093  {
1094  case 0:
1095  return getResidualNonTimeVector();
1096 
1097  case 1:
1098  return getResidualTimeVector();
1099 
1100  default:
1101  mooseError("The required residual vector is not available");
1102  }
1103 }
1104 
1105 void
1107 {
1108  THREAD_ID tid = 0; // constraints are going to be done single-threaded
1109  residual.close();
1111  {
1112  const auto & ncs = _constraints.getActiveNodalConstraints();
1113  for (const auto & nc : ncs)
1114  {
1115  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
1116  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
1117 
1118  if ((secondary_node_ids.size() > 0) && (primary_node_ids.size() > 0))
1119  {
1120  _fe_problem.reinitNodes(primary_node_ids, tid);
1121  _fe_problem.reinitNodesNeighbor(secondary_node_ids, tid);
1122  nc->computeResidual(residual);
1123  }
1124  }
1125  _fe_problem.addCachedResidualDirectly(residual, tid);
1126  residual.close();
1127  }
1128 }
1129 
1130 bool
1132 {
1133  if (!hasMatrix(systemMatrixTag()))
1134  mooseError(" A system matrix is required");
1135 
1136  THREAD_ID tid = 0; // constraints are going to be done single-threaded
1137 
1139  {
1140  const auto & ncs = _constraints.getActiveNodalConstraints();
1141  for (const auto & nc : ncs)
1142  {
1143  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
1144  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
1145 
1146  if ((secondary_node_ids.size() > 0) && (primary_node_ids.size() > 0))
1147  {
1148  _fe_problem.reinitNodes(primary_node_ids, tid);
1149  _fe_problem.reinitNodesNeighbor(secondary_node_ids, tid);
1150  nc->computeJacobian(jacobian_to_view);
1151  }
1152  }
1154 
1155  return true;
1156  }
1157  else
1158  return false;
1159 }
1160 
1161 void
1163  const BoundaryID secondary_boundary,
1164  const PenetrationInfo & info,
1165  const bool displaced)
1166 {
1167  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1168  : static_cast<SubProblem &>(_fe_problem);
1169 
1170  const Elem * primary_elem = info._elem;
1171  unsigned int primary_side = info._side_num;
1172  std::vector<Point> points;
1173  points.push_back(info._closest_point);
1174 
1175  // *These next steps MUST be done in this order!*
1176  // ADL: This is a Chesterton's fence situation. I don't know which calls exactly the above comment
1177  // is referring to. If I had to guess I would guess just the reinitNodeFace and prepareAssembly
1178  // calls since the former will size the variable's dof indices and then the latter will resize the
1179  // residual/Jacobian based off the variable's cached dof indices size
1180 
1181  // This reinits the variables that exist on the secondary node
1182  _fe_problem.reinitNodeFace(&secondary_node, secondary_boundary, 0);
1183 
1184  // This will set aside residual and jacobian space for the variables that have dofs on
1185  // the secondary node
1187 
1188  _fe_problem.setNeighborSubdomainID(primary_elem, 0);
1189 
1190  //
1191  // Reinit material on undisplaced mesh
1192  //
1193 
1194  const Elem * const undisplaced_primary_elem =
1195  displaced ? _mesh.elemPtr(primary_elem->id()) : primary_elem;
1196  const Point undisplaced_primary_physical_point =
1197  [&points, displaced, primary_elem, undisplaced_primary_elem]()
1198  {
1199  if (displaced)
1200  {
1201  const Point reference_point =
1202  FEMap::inverse_map(primary_elem->dim(), primary_elem, points[0]);
1203  return FEMap::map(primary_elem->dim(), undisplaced_primary_elem, reference_point);
1204  }
1205  else
1206  // If our penetration locator is on the reference mesh, then our undisplaced
1207  // physical point is simply the point coming from the penetration locator
1208  return points[0];
1209  }();
1210 
1212  undisplaced_primary_elem, primary_side, {undisplaced_primary_physical_point}, 0);
1213  // Stateful material properties are only initialized for neighbor material data for internal faces
1214  // for discontinuous Galerkin methods or for conforming interfaces for interface kernels. We don't
1215  // have either of those use cases here where we likely have disconnected meshes
1216  _fe_problem.reinitMaterialsNeighbor(primary_elem->subdomain_id(), 0, /*swap_stateful=*/false);
1217 
1218  // Reinit points for constraint enforcement
1219  if (displaced)
1220  subproblem.reinitNeighborPhys(primary_elem, primary_side, points, 0);
1221 }
1222 
1223 void
1225 {
1226 
1227  if (displaced)
1228  mooseAssert(_fe_problem.getDisplacedProblem(),
1229  "If we're calling this method with displaced = true, then we better well have a "
1230  "displaced problem");
1231  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1232  : static_cast<SubProblem &>(_fe_problem);
1233  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1234 
1235  bool constraints_applied = false;
1236 
1237  for (const auto & it : penetration_locators)
1238  {
1239  PenetrationLocator & pen_loc = *(it.second);
1240 
1241  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1242 
1243  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1244  BoundaryID primary_boundary = pen_loc._primary_boundary;
1245 
1246  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
1247  {
1248  const auto & constraints =
1249  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
1250  std::unordered_set<unsigned int> needed_mat_props;
1251  for (const auto & constraint : constraints)
1252  {
1253  const auto & mp_deps = constraint->getMatPropDependencies();
1254  needed_mat_props.insert(mp_deps.begin(), mp_deps.end());
1255  }
1256  _fe_problem.setActiveMaterialProperties(needed_mat_props, /*tid=*/0);
1257 
1258  for (unsigned int i = 0; i < secondary_nodes.size(); i++)
1259  {
1260  dof_id_type secondary_node_num = secondary_nodes[i];
1261  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1262 
1263  if (secondary_node.processor_id() == processor_id())
1264  {
1265  if (pen_loc._penetration_info[secondary_node_num])
1266  {
1267  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
1268 
1269  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
1270 
1271  for (const auto & nfc : constraints)
1272  {
1273  if (nfc->isExplicitConstraint())
1274  continue;
1275  // Return if this constraint does not correspond to the primary-secondary pair
1276  // prepared by the outer loops.
1277  // This continue statement is required when, e.g. one secondary surface constrains
1278  // more than one primary surface.
1279  if (nfc->secondaryBoundary() != secondary_boundary ||
1280  nfc->primaryBoundary() != primary_boundary)
1281  continue;
1282 
1283  if (nfc->shouldApply())
1284  {
1285  constraints_applied = true;
1286  nfc->computeSecondaryValue(solution);
1287  }
1288 
1289  if (nfc->hasWritableCoupledVariables())
1290  {
1291  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1292  for (auto * var : nfc->getWritableCoupledVariables())
1293  {
1294  if (var->isNodalDefined())
1295  var->insert(_fe_problem.getAuxiliarySystem().solution());
1296  }
1297  }
1298  }
1299  }
1300  }
1301  }
1302  }
1303  }
1304 
1305  // go over NodeELemConstraints
1306  std::set<dof_id_type> unique_secondary_node_ids;
1307 
1308  for (const auto & secondary_id : _mesh.meshSubdomains())
1309  {
1310  for (const auto & primary_id : _mesh.meshSubdomains())
1311  {
1312  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
1313  {
1314  const auto & constraints =
1315  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
1316 
1317  // get unique set of ids of all nodes on current block
1318  unique_secondary_node_ids.clear();
1319  const MeshBase & meshhelper = _mesh.getMesh();
1320  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
1321  meshhelper.active_subdomain_elements_end(secondary_id)))
1322  {
1323  for (auto & n : elem->node_ref_range())
1324  unique_secondary_node_ids.insert(n.id());
1325  }
1326 
1327  for (auto secondary_node_id : unique_secondary_node_ids)
1328  {
1329  Node & secondary_node = _mesh.nodeRef(secondary_node_id);
1330 
1331  // check if secondary node is on current processor
1332  if (secondary_node.processor_id() == processor_id())
1333  {
1334  // This reinits the variables that exist on the secondary node
1335  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
1336 
1337  // This will set aside residual and jacobian space for the variables that have dofs
1338  // on the secondary node
1340 
1341  for (const auto & nec : constraints)
1342  {
1343  if (nec->shouldApply())
1344  {
1345  constraints_applied = true;
1346  nec->computeSecondaryValue(solution);
1347  }
1348  }
1349  }
1350  }
1351  }
1352  }
1353  }
1354 
1355  // See if constraints were applied anywhere
1356  _communicator.max(constraints_applied);
1357 
1358  if (constraints_applied)
1359  {
1360  solution.close();
1361  update();
1362  }
1363 }
1364 
1365 void
1367 {
1368  // Make sure the residual is in a good state
1369  residual.close();
1370 
1371  if (displaced)
1372  mooseAssert(_fe_problem.getDisplacedProblem(),
1373  "If we're calling this method with displaced = true, then we better well have a "
1374  "displaced problem");
1375  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1376  : static_cast<SubProblem &>(_fe_problem);
1377  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1378 
1379  bool constraints_applied;
1380  bool residual_has_inserted_values = false;
1382  constraints_applied = false;
1383  for (const auto & it : penetration_locators)
1384  {
1386  {
1387  // Reset the constraint_applied flag before each new constraint, as they need to be
1388  // assembled separately
1389  constraints_applied = false;
1390  }
1391  PenetrationLocator & pen_loc = *(it.second);
1392 
1393  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1394 
1395  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1396  BoundaryID primary_boundary = pen_loc._primary_boundary;
1397 
1398  bool has_writable_variables(false);
1399 
1400  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
1401  {
1402  const auto & constraints =
1403  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
1404 
1405  for (unsigned int i = 0; i < secondary_nodes.size(); i++)
1406  {
1407  dof_id_type secondary_node_num = secondary_nodes[i];
1408  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1409 
1410  if (secondary_node.processor_id() == processor_id())
1411  {
1412  if (pen_loc._penetration_info[secondary_node_num])
1413  {
1414  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
1415 
1416  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
1417 
1418  for (const auto & nfc : constraints)
1419  {
1420  // Return if this constraint does not correspond to the primary-secondary pair
1421  // prepared by the outer loops.
1422  // This continue statement is required when, e.g. one secondary surface constrains
1423  // more than one primary surface.
1424  if (nfc->secondaryBoundary() != secondary_boundary ||
1425  nfc->primaryBoundary() != primary_boundary)
1426  continue;
1427 
1428  if (nfc->shouldApply())
1429  {
1430  constraints_applied = true;
1431  nfc->computeResidual();
1432 
1433  if (nfc->overwriteSecondaryResidual())
1434  {
1435  // The below will actually overwrite the residual for every single dof that
1436  // lives on the node. We definitely don't want to do that!
1437  // _fe_problem.setResidual(residual, 0);
1438 
1439  const auto & secondary_var = nfc->variable();
1440  const auto & secondary_dofs = secondary_var.dofIndices();
1441  mooseAssert(secondary_dofs.size() == secondary_var.count(),
1442  "We are on a node so there should only be one dof per variable (for "
1443  "an ArrayVariable we should have a number of dofs equal to the "
1444  "number of components");
1445 
1446  // Assume that if the user is overwriting the secondary residual, then they are
1447  // supplying residuals that do not correspond to their other physics
1448  // (e.g. Kernels), hence we should not apply a scalingFactor that is normally
1449  // based on the order of their other physics (e.g. Kernels)
1450  std::vector<Number> values = {nfc->secondaryResidual()};
1451  residual.insert(values, secondary_dofs);
1452  residual_has_inserted_values = true;
1453  }
1454  else
1457  }
1458  if (nfc->hasWritableCoupledVariables())
1459  {
1460  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1461  has_writable_variables = true;
1462  for (auto * var : nfc->getWritableCoupledVariables())
1463  {
1464  if (var->isNodalDefined())
1465  var->insert(_fe_problem.getAuxiliarySystem().solution());
1466  }
1467  }
1468  }
1469  }
1470  }
1471  }
1472  }
1473  _communicator.max(has_writable_variables);
1474 
1475  if (has_writable_variables)
1476  {
1477  // Explicit contact dynamic constraints write to auxiliary variables and update the old
1478  // displacement solution on the constraint boundaries. Close solutions and update system
1479  // accordingly.
1482  solutionOld().close();
1483  }
1484 
1486  {
1487  // Make sure that secondary contribution to primary are assembled, and ghosts have been
1488  // exchanged, as current primaries might become secondaries on next iteration and will need to
1489  // contribute their former secondaries' contributions to the future primaries. See if
1490  // constraints were applied anywhere
1491  _communicator.max(constraints_applied);
1492 
1493  if (constraints_applied)
1494  {
1495  // If any of the above constraints inserted values in the residual, it needs to be
1496  // assembled before adding the cached residuals below.
1497  _communicator.max(residual_has_inserted_values);
1498  if (residual_has_inserted_values)
1499  {
1500  residual.close();
1501  residual_has_inserted_values = false;
1502  }
1504  residual.close();
1505 
1507  *_residual_ghosted = residual;
1508  }
1509  }
1510  }
1512  {
1513  _communicator.max(constraints_applied);
1514 
1515  if (constraints_applied)
1516  {
1517  // If any of the above constraints inserted values in the residual, it needs to be assembled
1518  // before adding the cached residuals below.
1519  _communicator.max(residual_has_inserted_values);
1520  if (residual_has_inserted_values)
1521  residual.close();
1522 
1524  residual.close();
1525 
1527  *_residual_ghosted = residual;
1528  }
1529  }
1530 
1531  // go over element-element constraint interface
1532  THREAD_ID tid = 0;
1533  const auto & element_pair_locators = subproblem.geomSearchData()._element_pair_locators;
1534  for (const auto & it : element_pair_locators)
1535  {
1536  ElementPairLocator & elem_pair_loc = *(it.second);
1537 
1538  if (_constraints.hasActiveElemElemConstraints(it.first, displaced))
1539  {
1540  // ElemElemConstraint objects
1541  const auto & element_constraints =
1542  _constraints.getActiveElemElemConstraints(it.first, displaced);
1543 
1544  // go over pair elements
1545  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
1546  elem_pair_loc.getElemPairs();
1547  for (const auto & pr : elem_pairs)
1548  {
1549  const Elem * elem1 = pr.first;
1550  const Elem * elem2 = pr.second;
1551 
1552  if (elem1->processor_id() != processor_id())
1553  continue;
1554 
1555  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
1556 
1557  // for each element process constraints on the
1558  for (const auto & ec : element_constraints)
1559  {
1560  _fe_problem.setCurrentSubdomainID(elem1, tid);
1561  subproblem.reinitElemPhys(elem1, info._elem1_constraint_q_point, tid);
1562  _fe_problem.setNeighborSubdomainID(elem2, tid);
1563  subproblem.reinitNeighborPhys(elem2, info._elem2_constraint_q_point, tid);
1564 
1565  ec->prepareShapes(ec->variable().number());
1566  ec->prepareNeighborShapes(ec->variable().number());
1567 
1568  ec->reinit(info);
1569  ec->computeResidual();
1572  }
1574  }
1575  }
1576  }
1577 
1578  // go over NodeElemConstraints
1579  std::set<dof_id_type> unique_secondary_node_ids;
1580 
1581  constraints_applied = false;
1582  residual_has_inserted_values = false;
1583  bool has_writable_variables = false;
1584  for (const auto & secondary_id : _mesh.meshSubdomains())
1585  {
1586  for (const auto & primary_id : _mesh.meshSubdomains())
1587  {
1588  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
1589  {
1590  const auto & constraints =
1591  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
1592 
1593  // get unique set of ids of all nodes on current block
1594  unique_secondary_node_ids.clear();
1595  const MeshBase & meshhelper = _mesh.getMesh();
1596  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
1597  meshhelper.active_subdomain_elements_end(secondary_id)))
1598  {
1599  for (auto & n : elem->node_ref_range())
1600  unique_secondary_node_ids.insert(n.id());
1601  }
1602 
1603  for (auto secondary_node_id : unique_secondary_node_ids)
1604  {
1605  Node & secondary_node = _mesh.nodeRef(secondary_node_id);
1606  // check if secondary node is on current processor
1607  if (secondary_node.processor_id() == processor_id())
1608  {
1609  // This reinits the variables that exist on the secondary node
1610  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
1611 
1612  // This will set aside residual and jacobian space for the variables that have dofs
1613  // on the secondary node
1615 
1616  for (const auto & nec : constraints)
1617  {
1618  if (nec->shouldApply())
1619  {
1620  constraints_applied = true;
1621  nec->computeResidual();
1622 
1623  if (nec->overwriteSecondaryResidual())
1624  {
1625  _fe_problem.setResidual(residual, 0);
1626  residual_has_inserted_values = true;
1627  }
1628  else
1631  }
1632  if (nec->hasWritableCoupledVariables())
1633  {
1634  Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
1635  has_writable_variables = true;
1636  for (auto * var : nec->getWritableCoupledVariables())
1637  {
1638  if (var->isNodalDefined())
1639  var->insert(_fe_problem.getAuxiliarySystem().solution());
1640  }
1641  }
1642  }
1644  }
1645  }
1646  }
1647  }
1648  }
1649  _communicator.max(constraints_applied);
1650 
1651  if (constraints_applied)
1652  {
1653  // If any of the above constraints inserted values in the residual, it needs to be assembled
1654  // before adding the cached residuals below.
1655  _communicator.max(residual_has_inserted_values);
1656  if (residual_has_inserted_values)
1657  residual.close();
1658 
1660  residual.close();
1661 
1663  *_residual_ghosted = residual;
1664  }
1665  _communicator.max(has_writable_variables);
1666 
1667  if (has_writable_variables)
1668  {
1669  // Explicit contact dynamic constraints write to auxiliary variables and update the old
1670  // displacement solution on the constraint boundaries. Close solutions and update system
1671  // accordingly.
1674  solutionOld().close();
1675  }
1676 
1677  // We may have additional tagged vectors that also need to be accumulated
1679 }
1680 
1681 void
1683 {
1684  // Overwrite results from integrator in case we have explicit dynamics contact constraints
1686  ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
1687  : static_cast<SubProblem &>(_fe_problem);
1688  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
1689 
1690  for (const auto & it : penetration_locators)
1691  {
1692  PenetrationLocator & pen_loc = *(it.second);
1693 
1694  const auto & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
1695  const BoundaryID secondary_boundary = pen_loc._secondary_boundary;
1696  const BoundaryID primary_boundary = pen_loc._primary_boundary;
1697 
1698  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, true))
1699  {
1700  const auto & constraints =
1701  _constraints.getActiveNodeFaceConstraints(secondary_boundary, true);
1702  for (const auto i : index_range(secondary_nodes))
1703  {
1704  const auto secondary_node_num = secondary_nodes[i];
1705  const Node & secondary_node = _mesh.nodeRef(secondary_node_num);
1706 
1707  if (secondary_node.processor_id() == processor_id())
1708  if (pen_loc._penetration_info[secondary_node_num])
1709  for (const auto & nfc : constraints)
1710  {
1711  if (!nfc->isExplicitConstraint())
1712  continue;
1713 
1714  // Return if this constraint does not correspond to the primary-secondary pair
1715  // prepared by the outer loops.
1716  // This continue statement is required when, e.g. one secondary surface constrains
1717  // more than one primary surface.
1718  if (nfc->secondaryBoundary() != secondary_boundary ||
1719  nfc->primaryBoundary() != primary_boundary)
1720  continue;
1721 
1722  nfc->overwriteBoundaryVariables(soln, secondary_node);
1723  }
1724  }
1725  }
1726  }
1727  soln.close();
1728 }
1729 
1730 void
1732 {
1733  TIME_SECTION("residualSetup", 3);
1734 
1736 
1737  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
1738  {
1739  _kernels.residualSetup(tid);
1742  if (_doing_dg)
1748  }
1755 
1756 #ifdef MOOSE_KOKKOS_ENABLED
1761 #endif
1762 
1763  // Avoid recursion
1764  if (this == &_fe_problem.currentNonlinearSystem())
1767 }
1768 
1769 void
1771 {
1772  parallel_object_only();
1773 
1774  TIME_SECTION("computeResidualInternal", 3);
1775 
1776  residualSetup();
1777 
1778 #ifdef MOOSE_KOKKOS_ENABLED
1780  computeKokkosResidual(tags);
1781 #endif
1782 
1783  const auto vector_tag_data = _fe_problem.getVectorTags(tags);
1784 
1785  // Residual contributions from UOs - for now this is used for ray tracing
1786  // and ray kernels that contribute to the residual (think line sources)
1787  std::vector<UserObject *> uos;
1789  .query()
1790  .condition<AttribSystem>("UserObject")
1791  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
1792  .queryInto(uos);
1793  for (auto & uo : uos)
1794  uo->residualSetup();
1795  for (auto & uo : uos)
1796  {
1797  uo->initialize();
1798  uo->execute();
1799  uo->finalize();
1800  }
1801 
1802  // reinit scalar variables
1803  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
1805 
1806  // residual contributions from the domain
1807  PARALLEL_TRY
1808  {
1809  TIME_SECTION("Kernels", 3 /*, "Computing Kernels"*/);
1810 
1812 
1814  Threads::parallel_reduce(elem_range, cr);
1815 
1816  // We pass face information directly to FV residual objects for their evaluation. Consequently
1817  // we must make sure to do separate threaded loops for 1) undisplaced face information objects
1818  // and undisplaced residual objects and 2) displaced face information objects and displaced
1819  // residual objects
1821  if (_fe_problem.haveFV())
1822  {
1824  _fe_problem, this->number(), tags, /*on_displaced=*/false);
1826  Threads::parallel_reduce(faces, fvr);
1827  }
1829  displaced_problem && displaced_problem->haveFV())
1830  {
1832  _fe_problem, this->number(), tags, /*on_displaced=*/true);
1833  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
1834  displaced_problem->mesh().ownedFaceInfoEnd());
1835  Threads::parallel_reduce(faces, fvr);
1836  }
1837 
1838  unsigned int n_threads = libMesh::n_threads();
1839  for (unsigned int i = 0; i < n_threads;
1840  i++) // Add any cached residuals that might be hanging around
1842  }
1843  PARALLEL_CATCH;
1844 
1845  // residual contributions from the scalar kernels
1846  PARALLEL_TRY
1847  {
1848  // do scalar kernels (not sure how to thread this)
1850  {
1851  TIME_SECTION("ScalarKernels", 3 /*, "Computing ScalarKernels"*/);
1852 
1853  MooseObjectWarehouse<ScalarKernelBase> * scalar_kernel_warehouse;
1854  // This code should be refactored once we can do tags for scalar
1855  // kernels
1856  // Should redo this based on Warehouse
1857  if (!tags.size() || tags.size() == _fe_problem.numVectorTags(Moose::VECTOR_TAG_RESIDUAL))
1858  scalar_kernel_warehouse = &_scalar_kernels;
1859  else if (tags.size() == 1)
1860  scalar_kernel_warehouse =
1861  &(_scalar_kernels.getVectorTagObjectWarehouse(*(tags.begin()), 0));
1862  else
1863  // scalar_kernels is not threading
1864  scalar_kernel_warehouse = &(_scalar_kernels.getVectorTagsObjectWarehouse(tags, 0));
1865 
1866  bool have_scalar_contributions = false;
1867  const auto & scalars = scalar_kernel_warehouse->getActiveObjects();
1868  for (const auto & scalar_kernel : scalars)
1869  {
1870  scalar_kernel->reinit();
1871  const std::vector<dof_id_type> & dof_indices = scalar_kernel->variable().dofIndices();
1872  const DofMap & dof_map = scalar_kernel->variable().dofMap();
1873  const dof_id_type first_dof = dof_map.first_dof();
1874  const dof_id_type end_dof = dof_map.end_dof();
1875  for (dof_id_type dof : dof_indices)
1876  {
1877  if (dof >= first_dof && dof < end_dof)
1878  {
1879  scalar_kernel->computeResidual();
1880  have_scalar_contributions = true;
1881  break;
1882  }
1883  }
1884  }
1885  if (have_scalar_contributions)
1887  }
1888  }
1889  PARALLEL_CATCH;
1890 
1891  // residual contributions from Block NodalKernels
1892  PARALLEL_TRY
1893  {
1895  {
1896  TIME_SECTION("NodalKernels", 3 /*, "Computing NodalKernels"*/);
1897 
1899 
1901 
1902  if (range.begin() != range.end())
1903  {
1904  _fe_problem.reinitNode(*range.begin(), 0);
1905 
1906  Threads::parallel_reduce(range, cnk);
1907 
1908  unsigned int n_threads = libMesh::n_threads();
1909  for (unsigned int i = 0; i < n_threads;
1910  i++) // Add any cached residuals that might be hanging around
1912  }
1913  }
1914  }
1915  PARALLEL_CATCH;
1916 
1918  // We computed the volumetric objects. We can return now before we get into
1919  // any strongly enforced constraint conditions or penalty-type objects
1920  // (DGKernels, IntegratedBCs, InterfaceKernels, Constraints)
1921  return;
1922 
1923  // residual contributions from boundary NodalKernels
1924  PARALLEL_TRY
1925  {
1927  {
1928  TIME_SECTION("NodalKernelBCs", 3 /*, "Computing NodalKernelBCs"*/);
1929 
1931 
1933 
1934  Threads::parallel_reduce(bnd_node_range, cnk);
1935 
1936  unsigned int n_threads = libMesh::n_threads();
1937  for (unsigned int i = 0; i < n_threads;
1938  i++) // Add any cached residuals that might be hanging around
1940  }
1941  }
1942  PARALLEL_CATCH;
1943 
1945 
1946  if (_residual_copy.get())
1947  {
1948  _Re_non_time->close();
1950  }
1951 
1953  {
1954  _Re_non_time->close();
1957  }
1958 
1959  PARALLEL_TRY { computeDiracContributions(tags, false); }
1960  PARALLEL_CATCH;
1961 
1963  {
1964  PARALLEL_TRY { enforceNodalConstraintsResidual(*_Re_non_time); }
1965  PARALLEL_CATCH;
1966  _Re_non_time->close();
1967  }
1968 
1969  // Add in Residual contributions from other Constraints
1971  {
1972  PARALLEL_TRY
1973  {
1974  // Undisplaced Constraints
1976 
1977  // Displaced Constraints
1980 
1983  }
1984  PARALLEL_CATCH;
1985  _Re_non_time->close();
1986  }
1987 
1988  // Accumulate the occurrence of solution invalid warnings for the current iteration cumulative
1989  // counters
1992 }
1993 
1994 void
1996  const std::set<TagID> & matrix_tags)
1997 {
1998  TIME_SECTION("computeResidualAndJacobianInternal", 3);
1999 
2000  // Make matrix ready to use
2002 
2003  for (auto tag : matrix_tags)
2004  {
2005  if (!hasMatrix(tag))
2006  continue;
2007 
2008  auto & jacobian = getMatrix(tag);
2009  // Necessary for speed
2010  if (auto petsc_matrix = dynamic_cast<PetscMatrix<Number> *>(&jacobian))
2011  {
2012  LibmeshPetscCall(MatSetOption(petsc_matrix->mat(),
2013  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2014  PETSC_TRUE));
2016  LibmeshPetscCall(
2017  MatSetOption(petsc_matrix->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
2019  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2020  MAT_IGNORE_ZERO_ENTRIES,
2021  PETSC_TRUE));
2022  }
2023  }
2024 
2025  residualSetup();
2026 
2027  // Residual contributions from UOs - for now this is used for ray tracing
2028  // and ray kernels that contribute to the residual (think line sources)
2029  std::vector<UserObject *> uos;
2031  .query()
2032  .condition<AttribSystem>("UserObject")
2033  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
2034  .queryInto(uos);
2035  for (auto & uo : uos)
2036  uo->residualSetup();
2037  for (auto & uo : uos)
2038  {
2039  uo->initialize();
2040  uo->execute();
2041  uo->finalize();
2042  }
2043 
2044  // reinit scalar variables
2045  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
2047 
2048  // residual contributions from the domain
2049  PARALLEL_TRY
2050  {
2051  TIME_SECTION("Kernels", 3 /*, "Computing Kernels"*/);
2052 
2054 
2055  ComputeResidualAndJacobianThread crj(_fe_problem, vector_tags, matrix_tags);
2056  Threads::parallel_reduce(elem_range, crj);
2057 
2059  if (_fe_problem.haveFV())
2060  {
2062  _fe_problem, this->number(), vector_tags, matrix_tags, /*on_displaced=*/false);
2064  Threads::parallel_reduce(faces, fvrj);
2065  }
2067  displaced_problem && displaced_problem->haveFV())
2068  {
2070  _fe_problem, this->number(), vector_tags, matrix_tags, /*on_displaced=*/true);
2071  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
2072  displaced_problem->mesh().ownedFaceInfoEnd());
2073  Threads::parallel_reduce(faces, fvr);
2074  }
2075 
2077 
2078  unsigned int n_threads = libMesh::n_threads();
2079  for (unsigned int i = 0; i < n_threads;
2080  i++) // Add any cached residuals that might be hanging around
2081  {
2084  }
2085  }
2086  PARALLEL_CATCH;
2087 }
2088 
2089 void
2091 {
2092  _nl_vector_tags.clear();
2093 
2094  const auto & residual_vector_tags = _fe_problem.getVectorTags(Moose::VECTOR_TAG_RESIDUAL);
2095  for (const auto & residual_vector_tag : residual_vector_tags)
2096  _nl_vector_tags.insert(residual_vector_tag._id);
2097 
2099  computeNodalBCs(residual, _nl_vector_tags);
2101 }
2102 
2103 void
2104 NonlinearSystemBase::computeNodalBCs(NumericVector<Number> & residual, const std::set<TagID> & tags)
2105 {
2107 
2108  computeNodalBCs(tags);
2109 
2111 }
2112 
2113 void
2114 NonlinearSystemBase::computeNodalBCs(const std::set<TagID> & tags)
2115 {
2116  // We need to close the diag_save_in variables on the aux system before NodalBCBases clear the
2117  // dofs on boundary nodes
2118  if (_has_save_in)
2120 
2121  // Select nodal kernels
2122  MooseObjectWarehouse<NodalBCBase> * nbc_warehouse;
2123 
2124  if (tags.size() == _fe_problem.numVectorTags(Moose::VECTOR_TAG_RESIDUAL) || !tags.size())
2125  nbc_warehouse = &_nodal_bcs;
2126  else if (tags.size() == 1)
2127  nbc_warehouse = &(_nodal_bcs.getVectorTagObjectWarehouse(*(tags.begin()), 0));
2128  else
2129  nbc_warehouse = &(_nodal_bcs.getVectorTagsObjectWarehouse(tags, 0));
2130 
2131  // Return early if there is no nodal kernel
2132  if (!nbc_warehouse->size())
2133  return;
2134 
2135  PARALLEL_TRY
2136  {
2138 
2139  if (!bnd_nodes.empty())
2140  {
2141  TIME_SECTION("NodalBCs", 3 /*, "Computing NodalBCs"*/);
2142 
2143  for (const auto & bnode : bnd_nodes)
2144  {
2145  BoundaryID boundary_id = bnode->_bnd_id;
2146  Node * node = bnode->_node;
2147 
2148  if (node->processor_id() == processor_id() &&
2149  nbc_warehouse->hasActiveBoundaryObjects(boundary_id))
2150  {
2151  // reinit variables in nodes
2152  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2153 
2154  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(boundary_id);
2155  for (const auto & nbc : bcs)
2156  if (nbc->shouldApply())
2157  nbc->computeResidual();
2158  }
2159  }
2160  }
2161  }
2162  PARALLEL_CATCH;
2163 
2164  if (_Re_time)
2165  _Re_time->close();
2166  _Re_non_time->close();
2167 }
2168 
2169 void
2171 {
2172  PARALLEL_TRY
2173  {
2175 
2176  if (!bnd_nodes.empty())
2177  {
2178  TIME_SECTION("NodalBCs", 3 /*, "Computing NodalBCs"*/);
2179 
2180  for (const auto & bnode : bnd_nodes)
2181  {
2182  BoundaryID boundary_id = bnode->_bnd_id;
2183  Node * node = bnode->_node;
2184 
2185  if (node->processor_id() == processor_id())
2186  {
2187  // reinit variables in nodes
2188  _fe_problem.reinitNodeFace(node, boundary_id, 0);
2189  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
2190  {
2191  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
2192  for (const auto & nbc : bcs)
2193  if (nbc->shouldApply())
2194  nbc->computeResidualAndJacobian();
2195  }
2196  }
2197  }
2198  }
2199  }
2200  PARALLEL_CATCH;
2201 
2202  // Set the cached NodalBCBase values in the Jacobian matrix
2204 }
2205 
2206 void
2207 NonlinearSystemBase::getNodeDofs(dof_id_type node_id, std::vector<dof_id_type> & dofs)
2208 {
2209  const Node & node = _mesh.nodeRef(node_id);
2210  unsigned int s = number();
2211  if (node.has_dofs(s))
2212  {
2213  for (unsigned int v = 0; v < nVariables(); v++)
2214  for (unsigned int c = 0; c < node.n_comp(s, v); c++)
2215  dofs.push_back(node.dof_number(s, v, c));
2216  }
2217 }
2218 
2219 void
2221  GeometricSearchData & geom_search_data,
2222  std::unordered_map<dof_id_type, std::vector<dof_id_type>> & graph)
2223 {
2224  const auto & node_to_elem_map = _mesh.nodeToElemMap();
2225  const auto & nearest_node_locators = geom_search_data._nearest_node_locators;
2226  for (const auto & it : nearest_node_locators)
2227  {
2228  std::vector<dof_id_type> & secondary_nodes = it.second->_secondary_nodes;
2229 
2230  for (const auto & secondary_node : secondary_nodes)
2231  {
2232  std::set<dof_id_type> unique_secondary_indices;
2233  std::set<dof_id_type> unique_primary_indices;
2234 
2235  auto node_to_elem_pair = node_to_elem_map.find(secondary_node);
2236  if (node_to_elem_pair != node_to_elem_map.end())
2237  {
2238  const std::vector<dof_id_type> & elems = node_to_elem_pair->second;
2239 
2240  // Get the dof indices from each elem connected to the node
2241  for (const auto & cur_elem : elems)
2242  {
2243  std::vector<dof_id_type> dof_indices;
2244  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
2245 
2246  for (const auto & dof : dof_indices)
2247  unique_secondary_indices.insert(dof);
2248  }
2249  }
2250 
2251  std::vector<dof_id_type> primary_nodes = it.second->_neighbor_nodes[secondary_node];
2252 
2253  for (const auto & primary_node : primary_nodes)
2254  {
2255  auto primary_node_to_elem_pair = node_to_elem_map.find(primary_node);
2256  mooseAssert(primary_node_to_elem_pair != node_to_elem_map.end(),
2257  "Missing entry in node to elem map");
2258  const std::vector<dof_id_type> & primary_node_elems = primary_node_to_elem_pair->second;
2259 
2260  // Get the dof indices from each elem connected to the node
2261  for (const auto & cur_elem : primary_node_elems)
2262  {
2263  std::vector<dof_id_type> dof_indices;
2264  dofMap().dof_indices(_mesh.elemPtr(cur_elem), dof_indices);
2265 
2266  for (const auto & dof : dof_indices)
2267  unique_primary_indices.insert(dof);
2268  }
2269  }
2270 
2271  for (const auto & secondary_id : unique_secondary_indices)
2272  for (const auto & primary_id : unique_primary_indices)
2273  {
2274  graph[secondary_id].push_back(primary_id);
2275  graph[primary_id].push_back(secondary_id);
2276  }
2277  }
2278  }
2279 
2280  // handle node-to-node constraints
2281  const auto & ncs = _constraints.getActiveNodalConstraints();
2282  for (const auto & nc : ncs)
2283  {
2284  std::vector<dof_id_type> primary_dofs;
2285  std::vector<dof_id_type> & primary_node_ids = nc->getPrimaryNodeId();
2286  for (const auto & node_id : primary_node_ids)
2287  {
2288  Node * node = _mesh.queryNodePtr(node_id);
2289  if (node && node->processor_id() == this->processor_id())
2290  {
2291  getNodeDofs(node_id, primary_dofs);
2292  }
2293  }
2294 
2295  _communicator.allgather(primary_dofs);
2296 
2297  std::vector<dof_id_type> secondary_dofs;
2298  std::vector<dof_id_type> & secondary_node_ids = nc->getSecondaryNodeId();
2299  for (const auto & node_id : secondary_node_ids)
2300  {
2301  Node * node = _mesh.queryNodePtr(node_id);
2302  if (node && node->processor_id() == this->processor_id())
2303  {
2304  getNodeDofs(node_id, secondary_dofs);
2305  }
2306  }
2307 
2308  _communicator.allgather(secondary_dofs);
2309 
2310  for (const auto & primary_id : primary_dofs)
2311  for (const auto & secondary_id : secondary_dofs)
2312  {
2313  graph[primary_id].push_back(secondary_id);
2314  graph[secondary_id].push_back(primary_id);
2315  }
2316  }
2317 
2318  // Make every entry sorted and unique
2319  for (auto & it : graph)
2320  {
2321  std::vector<dof_id_type> & row = it.second;
2322  std::sort(row.begin(), row.end());
2323  std::vector<dof_id_type>::iterator uit = std::unique(row.begin(), row.end());
2324  row.resize(uit - row.begin());
2325  }
2326 }
2327 
2328 void
2330 {
2331  if (!hasMatrix(systemMatrixTag()))
2332  mooseError("Need a system matrix ");
2333 
2334  // At this point, have no idea how to make
2335  // this work with tag system
2336  auto & jacobian = getMatrix(systemMatrixTag());
2337 
2338  std::unordered_map<dof_id_type, std::vector<dof_id_type>> graph;
2339 
2340  findImplicitGeometricCouplingEntries(geom_search_data, graph);
2341 
2342  for (const auto & it : graph)
2343  {
2344  dof_id_type dof = it.first;
2345  const auto & row = it.second;
2346 
2347  for (const auto & coupled_dof : row)
2348  jacobian.add(dof, coupled_dof, 0);
2349  }
2350 }
2351 
2352 void
2354  bool displaced)
2355 {
2356  if (!hasMatrix(systemMatrixTag()))
2357  mooseError("A system matrix is required");
2358 
2359  auto & jacobian = getMatrix(systemMatrixTag());
2360 
2362  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2363  MAT_NEW_NONZERO_ALLOCATION_ERR,
2364  PETSC_FALSE));
2366  LibmeshPetscCall(MatSetOption(
2367  static_cast<PetscMatrix<Number> &>(jacobian).mat(), MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE));
2368 
2369  std::vector<numeric_index_type> zero_rows;
2370 
2371  if (displaced)
2372  mooseAssert(_fe_problem.getDisplacedProblem(),
2373  "If we're calling this method with displaced = true, then we better well have a "
2374  "displaced problem");
2375  auto & subproblem = displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
2376  : static_cast<SubProblem &>(_fe_problem);
2377  const auto & penetration_locators = subproblem.geomSearchData()._penetration_locators;
2378 
2379  bool constraints_applied;
2381  constraints_applied = false;
2382  for (const auto & it : penetration_locators)
2383  {
2385  {
2386  // Reset the constraint_applied flag before each new constraint, as they need to be
2387  // assembled separately
2388  constraints_applied = false;
2389  }
2390  PenetrationLocator & pen_loc = *(it.second);
2391 
2392  std::vector<dof_id_type> & secondary_nodes = pen_loc._nearest_node._secondary_nodes;
2393 
2394  BoundaryID secondary_boundary = pen_loc._secondary_boundary;
2395  BoundaryID primary_boundary = pen_loc._primary_boundary;
2396 
2397  zero_rows.clear();
2398  if (_constraints.hasActiveNodeFaceConstraints(secondary_boundary, displaced))
2399  {
2400  const auto & constraints =
2401  _constraints.getActiveNodeFaceConstraints(secondary_boundary, displaced);
2402 
2403  for (const auto & secondary_node_num : secondary_nodes)
2404  {
2405  Node & secondary_node = _mesh.nodeRef(secondary_node_num);
2406 
2407  if (secondary_node.processor_id() == processor_id())
2408  {
2409  if (pen_loc._penetration_info[secondary_node_num])
2410  {
2411  PenetrationInfo & info = *pen_loc._penetration_info[secondary_node_num];
2412 
2413  reinitNodeFace(secondary_node, secondary_boundary, info, displaced);
2415 
2416  for (const auto & nfc : constraints)
2417  {
2418  if (nfc->isExplicitConstraint())
2419  continue;
2420  // Return if this constraint does not correspond to the primary-secondary pair
2421  // prepared by the outer loops.
2422  // This continue statement is required when, e.g. one secondary surface constrains
2423  // more than one primary surface.
2424  if (nfc->secondaryBoundary() != secondary_boundary ||
2425  nfc->primaryBoundary() != primary_boundary)
2426  continue;
2427 
2428  nfc->_jacobian = &jacobian_to_view;
2429 
2430  if (nfc->shouldApply())
2431  {
2432  constraints_applied = true;
2433 
2434  nfc->prepareShapes(nfc->variable().number());
2435  nfc->prepareNeighborShapes(nfc->variable().number());
2436 
2437  nfc->computeJacobian();
2438 
2439  if (nfc->overwriteSecondaryJacobian())
2440  {
2441  // Add this variable's dof's row to be zeroed
2442  zero_rows.push_back(nfc->variable().nodalDofIndex());
2443  }
2444 
2445  std::vector<dof_id_type> secondary_dofs(1, nfc->variable().nodalDofIndex());
2446 
2447  // Assume that if the user is overwriting the secondary Jacobian, then they are
2448  // supplying Jacobians that do not correspond to their other physics
2449  // (e.g. Kernels), hence we should not apply a scalingFactor that is normally
2450  // based on the order of their other physics (e.g. Kernels)
2451  Real scaling_factor =
2452  nfc->overwriteSecondaryJacobian() ? 1. : nfc->variable().scalingFactor();
2453 
2454  // Cache the jacobian block for the secondary side
2455  nfc->addJacobian(_fe_problem.assembly(0, number()),
2456  nfc->_Kee,
2457  secondary_dofs,
2458  nfc->_connected_dof_indices,
2459  scaling_factor);
2460 
2461  // Cache Ken, Kne, Knn
2462  if (nfc->addCouplingEntriesToJacobian())
2463  {
2464  // Make sure we use a proper scaling factor (e.g. don't use an interior scaling
2465  // factor when we're overwriting secondary stuff)
2466  nfc->addJacobian(_fe_problem.assembly(0, number()),
2467  nfc->_Ken,
2468  secondary_dofs,
2469  nfc->primaryVariable().dofIndicesNeighbor(),
2470  scaling_factor);
2471 
2472  // Use _connected_dof_indices to get all the correct columns
2473  nfc->addJacobian(_fe_problem.assembly(0, number()),
2474  nfc->_Kne,
2475  nfc->primaryVariable().dofIndicesNeighbor(),
2476  nfc->_connected_dof_indices,
2477  nfc->primaryVariable().scalingFactor());
2478 
2479  // We've handled Ken and Kne, finally handle Knn
2481  }
2482 
2483  // Do the off-diagonals next
2484  const std::vector<MooseVariableFEBase *> coupled_vars = nfc->getCoupledMooseVars();
2485  for (const auto & jvar : coupled_vars)
2486  {
2487  // Only compute jacobians for nonlinear variables
2488  if (jvar->kind() != Moose::VAR_SOLVER)
2489  continue;
2490 
2491  // Only compute Jacobian entries if this coupling is being used by the
2492  // preconditioner
2493  if (nfc->variable().number() == jvar->number() ||
2495  nfc->variable().number(), jvar->number(), this->number()))
2496  continue;
2497 
2498  // Need to zero out the matrices first
2500 
2501  nfc->prepareShapes(nfc->variable().number());
2502  nfc->prepareNeighborShapes(jvar->number());
2503 
2504  nfc->computeOffDiagJacobian(jvar->number());
2505 
2506  // Cache the jacobian block for the secondary side
2507  nfc->addJacobian(_fe_problem.assembly(0, number()),
2508  nfc->_Kee,
2509  secondary_dofs,
2510  nfc->_connected_dof_indices,
2511  scaling_factor);
2512 
2513  // Cache Ken, Kne, Knn
2514  if (nfc->addCouplingEntriesToJacobian())
2515  {
2516  // Make sure we use a proper scaling factor (e.g. don't use an interior scaling
2517  // factor when we're overwriting secondary stuff)
2518  nfc->addJacobian(_fe_problem.assembly(0, number()),
2519  nfc->_Ken,
2520  secondary_dofs,
2521  jvar->dofIndicesNeighbor(),
2522  scaling_factor);
2523 
2524  // Use _connected_dof_indices to get all the correct columns
2525  nfc->addJacobian(_fe_problem.assembly(0, number()),
2526  nfc->_Kne,
2527  nfc->variable().dofIndicesNeighbor(),
2528  nfc->_connected_dof_indices,
2529  nfc->variable().scalingFactor());
2530 
2531  // We've handled Ken and Kne, finally handle Knn
2533  }
2534  }
2535  }
2536  }
2537  }
2538  }
2539  }
2540  }
2542  {
2543  // See if constraints were applied anywhere
2544  _communicator.max(constraints_applied);
2545 
2546  if (constraints_applied)
2547  {
2548  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2549  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2550  PETSC_TRUE));
2551 
2552  jacobian.close();
2553  jacobian.zero_rows(zero_rows, 0.0);
2554  jacobian.close();
2556  jacobian.close();
2557  }
2558  }
2559  }
2561  {
2562  // See if constraints were applied anywhere
2563  _communicator.max(constraints_applied);
2564 
2565  if (constraints_applied)
2566  {
2567  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2568  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2569  PETSC_TRUE));
2570 
2571  jacobian.close();
2572  jacobian.zero_rows(zero_rows, 0.0);
2573  jacobian.close();
2575  jacobian.close();
2576  }
2577  }
2578 
2579  THREAD_ID tid = 0;
2580  // go over element-element constraint interface
2581  const auto & element_pair_locators = subproblem.geomSearchData()._element_pair_locators;
2582  for (const auto & it : element_pair_locators)
2583  {
2584  ElementPairLocator & elem_pair_loc = *(it.second);
2585 
2586  if (_constraints.hasActiveElemElemConstraints(it.first, displaced))
2587  {
2588  // ElemElemConstraint objects
2589  const auto & element_constraints =
2590  _constraints.getActiveElemElemConstraints(it.first, displaced);
2591 
2592  // go over pair elements
2593  const std::list<std::pair<const Elem *, const Elem *>> & elem_pairs =
2594  elem_pair_loc.getElemPairs();
2595  for (const auto & pr : elem_pairs)
2596  {
2597  const Elem * elem1 = pr.first;
2598  const Elem * elem2 = pr.second;
2599 
2600  if (elem1->processor_id() != processor_id())
2601  continue;
2602 
2603  const ElementPairInfo & info = elem_pair_loc.getElemPairInfo(pr);
2604 
2605  // for each element process constraints on the
2606  for (const auto & ec : element_constraints)
2607  {
2608  _fe_problem.setCurrentSubdomainID(elem1, tid);
2609  subproblem.reinitElemPhys(elem1, info._elem1_constraint_q_point, tid);
2610  _fe_problem.setNeighborSubdomainID(elem2, tid);
2611  subproblem.reinitNeighborPhys(elem2, info._elem2_constraint_q_point, tid);
2612 
2613  ec->prepareShapes(ec->variable().number());
2614  ec->prepareNeighborShapes(ec->variable().number());
2615 
2616  ec->reinit(info);
2617  ec->computeJacobian();
2620  }
2622  }
2623  }
2624  }
2625 
2626  // go over NodeElemConstraints
2627  std::set<dof_id_type> unique_secondary_node_ids;
2628  constraints_applied = false;
2629  for (const auto & secondary_id : _mesh.meshSubdomains())
2630  {
2631  for (const auto & primary_id : _mesh.meshSubdomains())
2632  {
2633  if (_constraints.hasActiveNodeElemConstraints(secondary_id, primary_id, displaced))
2634  {
2635  const auto & constraints =
2636  _constraints.getActiveNodeElemConstraints(secondary_id, primary_id, displaced);
2637 
2638  // get unique set of ids of all nodes on current block
2639  unique_secondary_node_ids.clear();
2640  const MeshBase & meshhelper = _mesh.getMesh();
2641  for (const auto & elem : as_range(meshhelper.active_subdomain_elements_begin(secondary_id),
2642  meshhelper.active_subdomain_elements_end(secondary_id)))
2643  {
2644  for (auto & n : elem->node_ref_range())
2645  unique_secondary_node_ids.insert(n.id());
2646  }
2647 
2648  for (auto secondary_node_id : unique_secondary_node_ids)
2649  {
2650  const Node & secondary_node = _mesh.nodeRef(secondary_node_id);
2651  // check if secondary node is on current processor
2652  if (secondary_node.processor_id() == processor_id())
2653  {
2654  // This reinits the variables that exist on the secondary node
2655  _fe_problem.reinitNodeFace(&secondary_node, secondary_id, 0);
2656 
2657  // This will set aside residual and jacobian space for the variables that have dofs
2658  // on the secondary node
2661 
2662  for (const auto & nec : constraints)
2663  {
2664  if (nec->shouldApply())
2665  {
2666  constraints_applied = true;
2667 
2668  nec->_jacobian = &jacobian_to_view;
2669  nec->prepareShapes(nec->variable().number());
2670  nec->prepareNeighborShapes(nec->variable().number());
2671 
2672  nec->computeJacobian();
2673 
2674  if (nec->overwriteSecondaryJacobian())
2675  {
2676  // Add this variable's dof's row to be zeroed
2677  zero_rows.push_back(nec->variable().nodalDofIndex());
2678  }
2679 
2680  std::vector<dof_id_type> secondary_dofs(1, nec->variable().nodalDofIndex());
2681 
2682  // Cache the jacobian block for the secondary side
2683  nec->addJacobian(_fe_problem.assembly(0, number()),
2684  nec->_Kee,
2685  secondary_dofs,
2686  nec->_connected_dof_indices,
2687  nec->variable().scalingFactor());
2688 
2689  // Cache the jacobian block for the primary side
2690  nec->addJacobian(_fe_problem.assembly(0, number()),
2691  nec->_Kne,
2692  nec->primaryVariable().dofIndicesNeighbor(),
2693  nec->_connected_dof_indices,
2694  nec->primaryVariable().scalingFactor());
2695 
2698 
2699  // Do the off-diagonals next
2700  const std::vector<MooseVariableFEBase *> coupled_vars = nec->getCoupledMooseVars();
2701  for (const auto & jvar : coupled_vars)
2702  {
2703  // Only compute jacobians for nonlinear variables
2704  if (jvar->kind() != Moose::VAR_SOLVER)
2705  continue;
2706 
2707  // Only compute Jacobian entries if this coupling is being used by the
2708  // preconditioner
2709  if (nec->variable().number() == jvar->number() ||
2711  nec->variable().number(), jvar->number(), this->number()))
2712  continue;
2713 
2714  // Need to zero out the matrices first
2716 
2717  nec->prepareShapes(nec->variable().number());
2718  nec->prepareNeighborShapes(jvar->number());
2719 
2720  nec->computeOffDiagJacobian(jvar->number());
2721 
2722  // Cache the jacobian block for the secondary side
2723  nec->addJacobian(_fe_problem.assembly(0, number()),
2724  nec->_Kee,
2725  secondary_dofs,
2726  nec->_connected_dof_indices,
2727  nec->variable().scalingFactor());
2728 
2729  // Cache the jacobian block for the primary side
2730  nec->addJacobian(_fe_problem.assembly(0, number()),
2731  nec->_Kne,
2732  nec->variable().dofIndicesNeighbor(),
2733  nec->_connected_dof_indices,
2734  nec->variable().scalingFactor());
2735 
2738  }
2739  }
2740  }
2741  }
2742  }
2743  }
2744  }
2745  }
2746  // See if constraints were applied anywhere
2747  _communicator.max(constraints_applied);
2748 
2749  if (constraints_applied)
2750  {
2751  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2752  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2753  PETSC_TRUE));
2754 
2755  jacobian.close();
2756  jacobian.zero_rows(zero_rows, 0.0);
2757  jacobian.close();
2759  jacobian.close();
2760  }
2761 }
2762 
2763 void
2765 {
2766  MooseObjectWarehouse<ScalarKernelBase> * scalar_kernel_warehouse;
2767 
2768  if (!tags.size() || tags.size() == _fe_problem.numMatrixTags())
2769  scalar_kernel_warehouse = &_scalar_kernels;
2770  else if (tags.size() == 1)
2771  scalar_kernel_warehouse = &(_scalar_kernels.getMatrixTagObjectWarehouse(*(tags.begin()), 0));
2772  else
2773  scalar_kernel_warehouse = &(_scalar_kernels.getMatrixTagsObjectWarehouse(tags, 0));
2774 
2775  // Compute the diagonal block for scalar variables
2776  if (scalar_kernel_warehouse->hasActiveObjects())
2777  {
2778  const auto & scalars = scalar_kernel_warehouse->getActiveObjects();
2779 
2780  _fe_problem.reinitScalars(/*tid=*/0);
2781 
2782  _fe_problem.reinitOffDiagScalars(/*_tid*/ 0);
2783 
2784  bool have_scalar_contributions = false;
2785  for (const auto & kernel : scalars)
2786  {
2787  if (!kernel->computesJacobian())
2788  continue;
2789 
2790  kernel->reinit();
2791  const std::vector<dof_id_type> & dof_indices = kernel->variable().dofIndices();
2792  const DofMap & dof_map = kernel->variable().dofMap();
2793  const dof_id_type first_dof = dof_map.first_dof();
2794  const dof_id_type end_dof = dof_map.end_dof();
2795  for (dof_id_type dof : dof_indices)
2796  {
2797  if (dof >= first_dof && dof < end_dof)
2798  {
2799  kernel->computeJacobian();
2800  _fe_problem.addJacobianOffDiagScalar(kernel->variable().number());
2801  have_scalar_contributions = true;
2802  break;
2803  }
2804  }
2805  }
2806 
2807  if (have_scalar_contributions)
2809  }
2810 }
2811 
2812 void
2814 {
2816 
2817  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); tid++)
2818  {
2819  _kernels.jacobianSetup(tid);
2822  if (_doing_dg)
2828  }
2835 
2836 #ifdef MOOSE_KOKKOS_ENABLED
2841 #endif
2842 
2843  // Avoid recursion
2844  if (this == &_fe_problem.currentNonlinearSystem())
2847 }
2848 
2849 void
2851 {
2852  TIME_SECTION("computeJacobianInternal", 3);
2853 
2855 
2856  // Make matrix ready to use
2858 
2859  for (auto tag : tags)
2860  {
2861  if (!hasMatrix(tag))
2862  continue;
2863 
2864  auto & jacobian = getMatrix(tag);
2865  // Necessary for speed
2866  if (auto petsc_matrix = dynamic_cast<PetscMatrix<Number> *>(&jacobian))
2867  {
2868  LibmeshPetscCall(MatSetOption(petsc_matrix->mat(),
2869  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
2870  PETSC_TRUE));
2872  LibmeshPetscCall(
2873  MatSetOption(petsc_matrix->mat(), MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE));
2875  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
2876  MAT_IGNORE_ZERO_ENTRIES,
2877  PETSC_TRUE));
2878  }
2879  }
2880 
2881  jacobianSetup();
2882 
2883 #ifdef MOOSE_KOKKOS_ENABLED
2885  computeKokkosJacobian(tags);
2886 #endif
2887 
2888  // Jacobian contributions from UOs - for now this is used for ray tracing
2889  // and ray kernels that contribute to the Jacobian (think line sources)
2890  std::vector<UserObject *> uos;
2892  .query()
2893  .condition<AttribSystem>("UserObject")
2894  .condition<AttribExecOns>(EXEC_PRE_KERNELS)
2895  .queryInto(uos);
2896  for (auto & uo : uos)
2897  uo->jacobianSetup();
2898  for (auto & uo : uos)
2899  {
2900  uo->initialize();
2901  uo->execute();
2902  uo->finalize();
2903  }
2904 
2905  // reinit scalar variables
2906  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
2908 
2909  PARALLEL_TRY
2910  {
2911  // We would like to compute ScalarKernels, block NodalKernels, FVFluxKernels, and mortar objects
2912  // up front because we want these included whether we are computing an ordinary Jacobian or a
2913  // Jacobian for determining variable scaling factors
2915 
2916  // Block restricted Nodal Kernels
2918  {
2921  Threads::parallel_reduce(range, cnkjt);
2922 
2923  unsigned int n_threads = libMesh::n_threads();
2924  for (unsigned int i = 0; i < n_threads;
2925  i++) // Add any cached jacobians that might be hanging around
2927  }
2928 
2930  if (_fe_problem.haveFV())
2931  {
2932  // the same loop works for both residual and jacobians because it keys
2933  // off of FEProblem's _currently_computing_jacobian parameter
2935  _fe_problem, this->number(), tags, /*on_displaced=*/false);
2937  Threads::parallel_reduce(faces, fvj);
2938  }
2940  displaced_problem && displaced_problem->haveFV())
2941  {
2943  _fe_problem, this->number(), tags, /*on_displaced=*/true);
2944  FVRange faces(displaced_problem->mesh().ownedFaceInfoBegin(),
2945  displaced_problem->mesh().ownedFaceInfoEnd());
2946  Threads::parallel_reduce(faces, fvr);
2947  }
2948 
2950 
2951  // Get our element range for looping over
2953 
2955  {
2956  // Only compute Jacobians corresponding to the diagonals of volumetric compute objects
2957  // because this typically gives us a good representation of the physics. NodalBCs and
2958  // Constraints can introduce dramatically different scales (often order unity).
2959  // IntegratedBCs and/or InterfaceKernels may use penalty factors. DGKernels may be ok, but
2960  // they are almost always used in conjunction with Kernels
2962  Threads::parallel_reduce(elem_range, cj);
2963  unsigned int n_threads = libMesh::n_threads();
2964  for (unsigned int i = 0; i < n_threads;
2965  i++) // Add any Jacobian contributions still hanging around
2967 
2968  // Check whether any exceptions were thrown and propagate this information for parallel
2969  // consistency before
2970  // 1) we do parallel communication when closing tagged matrices
2971  // 2) early returning before reaching our PARALLEL_CATCH below
2973 
2974  closeTaggedMatrices(tags);
2975 
2976  return;
2977  }
2978 
2979  switch (_fe_problem.coupling())
2980  {
2981  case Moose::COUPLING_DIAG:
2982  {
2984  Threads::parallel_reduce(elem_range, cj);
2985 
2986  unsigned int n_threads = libMesh::n_threads();
2987  for (unsigned int i = 0; i < n_threads;
2988  i++) // Add any Jacobian contributions still hanging around
2990 
2991  // Boundary restricted Nodal Kernels
2993  {
2996 
2997  Threads::parallel_reduce(bnd_range, cnkjt);
2998  unsigned int n_threads = libMesh::n_threads();
2999  for (unsigned int i = 0; i < n_threads;
3000  i++) // Add any cached jacobians that might be hanging around
3002  }
3003  }
3004  break;
3005 
3006  default:
3008  {
3010  Threads::parallel_reduce(elem_range, cj);
3011  unsigned int n_threads = libMesh::n_threads();
3012 
3013  for (unsigned int i = 0; i < n_threads; i++)
3015 
3016  // Boundary restricted Nodal Kernels
3018  {
3021 
3022  Threads::parallel_reduce(bnd_range, cnkjt);
3023  unsigned int n_threads = libMesh::n_threads();
3024  for (unsigned int i = 0; i < n_threads;
3025  i++) // Add any cached jacobians that might be hanging around
3027  }
3028  }
3029  break;
3030  }
3031 
3032  computeDiracContributions(tags, true);
3033 
3034  static bool first = true;
3035 
3036  // This adds zeroes into geometric coupling entries to ensure they stay in the matrix
3037  if ((_fe_problem.restoreOriginalNonzeroPattern() || first) &&
3039  {
3040  first = false;
3042 
3045  }
3046  }
3047  PARALLEL_CATCH;
3048 
3049  // Have no idea how to have constraints work
3050  // with the tag system
3051  PARALLEL_TRY
3052  {
3053  // Add in Jacobian contributions from other Constraints
3054  if (_fe_problem._has_constraints && tags.count(systemMatrixTag()))
3055  {
3056  // Some constraints need to be able to read values from the Jacobian, which requires that it
3057  // be closed/assembled
3058  auto & system_matrix = getMatrix(systemMatrixTag());
3059  std::unique_ptr<SparseMatrix<Number>> hash_copy;
3060  const SparseMatrix<Number> * view_jac_ptr;
3061  auto make_readable_jacobian = [&]()
3062  {
3063 #if PETSC_RELEASE_GREATER_EQUALS(3, 23, 0)
3064  if (system_matrix.use_hash_table())
3065  {
3066  hash_copy = libMesh::cast_ref<PetscMatrix<Number> &>(system_matrix).copy_from_hash();
3067  view_jac_ptr = hash_copy.get();
3068  }
3069  else
3070  view_jac_ptr = &system_matrix;
3071 #else
3072  view_jac_ptr = &system_matrix;
3073 #endif
3074  if (view_jac_ptr == &system_matrix)
3075  system_matrix.close();
3076  };
3077 
3078  make_readable_jacobian();
3079 
3080  // Nodal Constraints
3081  const bool had_nodal_constraints = enforceNodalConstraintsJacobian(*view_jac_ptr);
3082  if (had_nodal_constraints)
3083  // We have to make a new readable Jacobian
3084  make_readable_jacobian();
3085 
3086  // Undisplaced Constraints
3087  constraintJacobians(*view_jac_ptr, false);
3088 
3089  // Displaced Constraints
3091  constraintJacobians(*view_jac_ptr, true);
3092  }
3093  }
3094  PARALLEL_CATCH;
3095 
3096  // We need to close the save_in variables on the aux system before NodalBCBases clear the dofs
3097  // on boundary nodes
3098  if (_has_diag_save_in)
3100 
3101  PARALLEL_TRY
3102  {
3103  MooseObjectWarehouse<NodalBCBase> * nbc_warehouse;
3104  // Select nodal kernels
3105  if (tags.size() == _fe_problem.numMatrixTags() || !tags.size())
3106  nbc_warehouse = &_nodal_bcs;
3107  else if (tags.size() == 1)
3108  nbc_warehouse = &(_nodal_bcs.getMatrixTagObjectWarehouse(*(tags.begin()), 0));
3109  else
3110  nbc_warehouse = &(_nodal_bcs.getMatrixTagsObjectWarehouse(tags, 0));
3111 
3112  if (nbc_warehouse->hasActiveObjects())
3113  {
3114  // We may be switching from add to set. Moreover, we rely on a call to MatZeroRows to enforce
3115  // the nodal boundary condition constraints which requires that the matrix be truly assembled
3116  // as opposed to just flushed. Consequently we can't do the following despite any desire to
3117  // keep our initial sparsity pattern honored (see https://gitlab.com/petsc/petsc/-/issues/852)
3118  //
3119  // flushTaggedMatrices(tags);
3120  closeTaggedMatrices(tags);
3121 
3122  // Cache the information about which BCs are coupled to which
3123  // variables, so we don't have to figure it out for each node.
3124  std::map<std::string, std::set<unsigned int>> bc_involved_vars;
3125  const std::set<BoundaryID> & all_boundary_ids = _mesh.getBoundaryIDs();
3126  for (const auto & bid : all_boundary_ids)
3127  {
3128  // Get reference to all the NodalBCs for this ID. This is only
3129  // safe if there are NodalBCBases there to be gotten...
3130  if (nbc_warehouse->hasActiveBoundaryObjects(bid))
3131  {
3132  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(bid);
3133  for (const auto & bc : bcs)
3134  {
3135  const std::vector<MooseVariableFEBase *> & coupled_moose_vars =
3136  bc->getCoupledMooseVars();
3137 
3138  // Create the set of "involved" MOOSE nonlinear vars, which includes all coupled vars
3139  // and the BC's own variable
3140  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
3141  for (const auto & coupled_var : coupled_moose_vars)
3142  if (coupled_var->kind() == Moose::VAR_SOLVER)
3143  var_set.insert(coupled_var->number());
3144 
3145  var_set.insert(bc->variable().number());
3146  }
3147  }
3148  }
3149 
3150  // reinit scalar variables again. This reinit does not re-fill any of the scalar variable
3151  // solution arrays because that was done above. It only will reorder the derivative
3152  // information for AD calculations to be suitable for NodalBC calculations
3153  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
3154  _fe_problem.reinitScalars(tid, true);
3155 
3156  // Get variable coupling list. We do all the NodalBCBase stuff on
3157  // thread 0... The couplingEntries() data structure determines
3158  // which variables are "coupled" as far as the preconditioner is
3159  // concerned, not what variables a boundary condition specifically
3160  // depends on.
3161  auto & coupling_entries = _fe_problem.couplingEntries(/*_tid=*/0, this->number());
3162 
3163  // Compute Jacobians for NodalBCBases
3165  for (const auto & bnode : bnd_nodes)
3166  {
3167  BoundaryID boundary_id = bnode->_bnd_id;
3168  Node * node = bnode->_node;
3169 
3170  if (nbc_warehouse->hasActiveBoundaryObjects(boundary_id) &&
3171  node->processor_id() == processor_id())
3172  {
3173  _fe_problem.reinitNodeFace(node, boundary_id, 0);
3174 
3175  const auto & bcs = nbc_warehouse->getActiveBoundaryObjects(boundary_id);
3176  for (const auto & bc : bcs)
3177  {
3178  // Get the set of involved MOOSE vars for this BC
3179  std::set<unsigned int> & var_set = bc_involved_vars[bc->name()];
3180 
3181  // Loop over all the variables whose Jacobian blocks are
3182  // actually being computed, call computeOffDiagJacobian()
3183  // for each one which is actually coupled (otherwise the
3184  // value is zero.)
3185  for (const auto & it : coupling_entries)
3186  {
3187  unsigned int ivar = it.first->number(), jvar = it.second->number();
3188 
3189  // We are only going to call computeOffDiagJacobian() if:
3190  // 1.) the BC's variable is ivar
3191  // 2.) jvar is "involved" with the BC (including jvar==ivar), and
3192  // 3.) the BC should apply.
3193  if ((bc->variable().number() == ivar) && var_set.count(jvar) && bc->shouldApply())
3194  bc->computeOffDiagJacobian(jvar);
3195  }
3196 
3197  const auto & coupled_scalar_vars = bc->getCoupledMooseScalarVars();
3198  for (const auto & jvariable : coupled_scalar_vars)
3199  if (hasScalarVariable(jvariable->name()))
3200  bc->computeOffDiagJacobianScalar(jvariable->number());
3201  }
3202  }
3203  } // end loop over boundary nodes
3204 
3205  // Set the cached NodalBCBase values in the Jacobian matrix
3207  }
3208  }
3209  PARALLEL_CATCH;
3210 
3211  closeTaggedMatrices(tags);
3212 
3213  // We need to close the save_in variables on the aux system before NodalBCBases clear the dofs
3214  // on boundary nodes
3217 
3218  if (hasDiagSaveIn())
3220 
3221  // Accumulate the occurrence of solution invalid warnings for the current iteration cumulative
3222  // counters
3225 }
3226 
3227 void
3229 {
3230  _nl_matrix_tags.clear();
3231 
3232  auto & tags = _fe_problem.getMatrixTags();
3233 
3234  for (auto & tag : tags)
3235  _nl_matrix_tags.insert(tag.second);
3236 
3237  computeJacobian(jacobian, _nl_matrix_tags);
3238 }
3239 
3240 void
3241 NonlinearSystemBase::computeJacobian(SparseMatrix<Number> & jacobian, const std::set<TagID> & tags)
3242 {
3244 
3245  computeJacobianTags(tags);
3246 
3248 }
3249 
3250 void
3251 NonlinearSystemBase::computeJacobianTags(const std::set<TagID> & tags)
3252 {
3253  TIME_SECTION("computeJacobianTags", 5);
3254 
3255  FloatingPointExceptionGuard fpe_guard(_app);
3256 
3257  try
3258  {
3260  }
3261  catch (MooseException & e)
3262  {
3263  // The buck stops here, we have already handled the exception by
3264  // calling stopSolve(), it is now up to PETSc to return a
3265  // "diverged" reason during the next solve.
3266  }
3267 }
3268 
3269 void
3271 {
3272  _nl_matrix_tags.clear();
3273 
3274  auto & tags = _fe_problem.getMatrixTags();
3275  for (auto & tag : tags)
3276  _nl_matrix_tags.insert(tag.second);
3277 
3279 }
3280 
3281 void
3283  const std::set<TagID> & tags)
3284 {
3285  TIME_SECTION("computeJacobianBlocks", 3);
3286  FloatingPointExceptionGuard fpe_guard(_app);
3287 
3288  for (unsigned int i = 0; i < blocks.size(); i++)
3289  {
3290  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
3291 
3292  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
3293  MAT_KEEP_NONZERO_PATTERN, // This is changed in 3.1
3294  PETSC_TRUE));
3296  LibmeshPetscCall(MatSetOption(static_cast<PetscMatrix<Number> &>(jacobian).mat(),
3297  MAT_NEW_NONZERO_ALLOCATION_ERR,
3298  PETSC_TRUE));
3299 
3300  jacobian.zero();
3301  }
3302 
3303  for (unsigned int tid = 0; tid < libMesh::n_threads(); tid++)
3305 
3306  PARALLEL_TRY
3307  {
3310  Threads::parallel_reduce(elem_range, cjb);
3311  }
3312  PARALLEL_CATCH;
3313 
3314  for (unsigned int i = 0; i < blocks.size(); i++)
3315  blocks[i]->_jacobian.close();
3316 
3317  for (unsigned int i = 0; i < blocks.size(); i++)
3318  {
3319  libMesh::System & precond_system = blocks[i]->_precond_system;
3320  SparseMatrix<Number> & jacobian = blocks[i]->_jacobian;
3321 
3322  unsigned int ivar = blocks[i]->_ivar;
3323  unsigned int jvar = blocks[i]->_jvar;
3324 
3325  // Dirichlet BCs
3326  std::vector<numeric_index_type> zero_rows;
3327  PARALLEL_TRY
3328  {
3330  for (const auto & bnode : bnd_nodes)
3331  {
3332  BoundaryID boundary_id = bnode->_bnd_id;
3333  Node * node = bnode->_node;
3334 
3335  if (_nodal_bcs.hasActiveBoundaryObjects(boundary_id))
3336  {
3337  const auto & bcs = _nodal_bcs.getActiveBoundaryObjects(boundary_id);
3338 
3339  if (node->processor_id() == processor_id())
3340  {
3341  _fe_problem.reinitNodeFace(node, boundary_id, 0);
3342 
3343  for (const auto & bc : bcs)
3344  if (bc->variable().number() == ivar && bc->shouldApply())
3345  {
3346  // The first zero is for the variable number... there is only one variable in
3347  // each mini-system The second zero only works with Lagrange elements!
3348  zero_rows.push_back(node->dof_number(precond_system.number(), 0, 0));
3349  }
3350  }
3351  }
3352  }
3353  }
3354  PARALLEL_CATCH;
3355 
3356  jacobian.close();
3357 
3358  // This zeroes the rows corresponding to Dirichlet BCs and puts a 1.0 on the diagonal
3359  if (ivar == jvar)
3360  jacobian.zero_rows(zero_rows, 1.0);
3361  else
3362  jacobian.zero_rows(zero_rows, 0.0);
3363 
3364  jacobian.close();
3365  }
3366 }
3367 
3368 void
3370 {
3377  _kernels.updateActive(tid);
3379 
3380  if (tid == 0)
3381  {
3389 
3390 #ifdef MOOSE_KOKKOS_ENABLED
3396 #endif
3397  }
3398 }
3399 
3400 Real
3402  const NumericVector<Number> & update)
3403 {
3404  // Default to no damping
3405  Real damping = 1.0;
3406  bool has_active_dampers = false;
3407 
3408  try
3409  {
3411  {
3412  PARALLEL_TRY
3413  {
3414  TIME_SECTION("computeDampers", 3, "Computing Dampers");
3415  has_active_dampers = true;
3418  Threads::parallel_reduce(_fe_problem.getCurrentAlgebraicElementRange(), cid);
3419  damping = std::min(cid.damping(), damping);
3420  }
3421  PARALLEL_CATCH;
3422  }
3423 
3425  {
3426  PARALLEL_TRY
3427  {
3428  TIME_SECTION("computeDamping::element", 3, "Computing Element Damping");
3429 
3430  has_active_dampers = true;
3433  Threads::parallel_reduce(_fe_problem.getCurrentAlgebraicNodeRange(), cndt);
3434  damping = std::min(cndt.damping(), damping);
3435  }
3436  PARALLEL_CATCH;
3437  }
3438 
3440  {
3441  PARALLEL_TRY
3442  {
3443  TIME_SECTION("computeDamping::general", 3, "Computing General Damping");
3444 
3445  has_active_dampers = true;
3446  const auto & gdampers = _general_dampers.getActiveObjects();
3447  for (const auto & damper : gdampers)
3448  {
3449  Real gd_damping = damper->computeDamping(solution, update);
3450  try
3451  {
3452  damper->checkMinDamping(gd_damping);
3453  }
3454  catch (MooseException & e)
3455  {
3457  }
3458  damping = std::min(gd_damping, damping);
3459  }
3460  }
3461  PARALLEL_CATCH;
3462  }
3463  }
3464  catch (MooseException & e)
3465  {
3466  // The buck stops here, we have already handled the exception by
3467  // calling stopSolve(), it is now up to PETSc to return a
3468  // "diverged" reason during the next solve.
3469  }
3470  catch (std::exception & e)
3471  {
3472  // Allow the libmesh error/exception on negative jacobian
3473  const std::string & message = e.what();
3474  if (message.find("Jacobian") == std::string::npos)
3475  throw;
3476  }
3477 
3478  _communicator.min(damping);
3479 
3480  if (has_active_dampers && damping < 1.0)
3481  _console << " Damping factor: " << damping << std::endl;
3482 
3483  return damping;
3484 }
3485 
3486 void
3487 NonlinearSystemBase::computeDiracContributions(const std::set<TagID> & tags, bool is_jacobian)
3488 {
3490 
3491  std::set<const Elem *> dirac_elements;
3492 
3494  {
3495  TIME_SECTION("computeDirac", 3, "Computing DiracKernels");
3496 
3497  // TODO: Need a threading fix... but it's complicated!
3498  for (THREAD_ID tid = 0; tid < libMesh::n_threads(); ++tid)
3499  {
3500  const auto & dkernels = _dirac_kernels.getActiveObjects(tid);
3501  for (const auto & dkernel : dkernels)
3502  {
3503  dkernel->clearPoints();
3504  dkernel->addPoints();
3505  }
3506  }
3507 
3508  ComputeDiracThread cd(_fe_problem, tags, is_jacobian);
3509 
3510  _fe_problem.getDiracElements(dirac_elements);
3511 
3512  DistElemRange range(dirac_elements.begin(), dirac_elements.end(), 1);
3513  // TODO: Make Dirac work thread!
3514  // Threads::parallel_reduce(range, cd);
3515 
3516  cd(range);
3517 
3518  if (is_jacobian)
3519  for (const auto tid : make_range(libMesh::n_threads()))
3521  }
3522 }
3523 
3526 {
3527  if (!_residual_copy.get())
3529 
3530  return *_residual_copy;
3531 }
3532 
3535 {
3536  _need_residual_ghosted = true;
3537  if (!_residual_ghosted)
3538  {
3539  // The first time we realize we need a ghosted residual vector,
3540  // we add it.
3541  _residual_ghosted = &addVector("residual_ghosted", false, GHOSTED);
3542 
3543  // If we've already realized we need time and/or non-time
3544  // residual vectors, but we haven't yet realized they need to be
3545  // ghosted, fix that now.
3546  //
3547  // If an application changes its mind, the libMesh API lets us
3548  // change the vector.
3549  if (_Re_time)
3550  {
3551  const auto vector_name = _subproblem.vectorTagName(_Re_time_tag);
3552  _Re_time = &system().add_vector(vector_name, false, GHOSTED);
3553  }
3554  if (_Re_non_time)
3555  {
3556  const auto vector_name = _subproblem.vectorTagName(_Re_non_time_tag);
3557  _Re_non_time = &system().add_vector(vector_name, false, GHOSTED);
3558  }
3559  }
3560  return *_residual_ghosted;
3561 }
3562 
3563 void
3565  std::vector<dof_id_type> & n_nz,
3566  std::vector<dof_id_type> & n_oz)
3567 {
3569  {
3571 
3572  std::unordered_map<dof_id_type, std::vector<dof_id_type>> graph;
3573 
3575 
3578  graph);
3579 
3580  const dof_id_type first_dof_on_proc = dofMap().first_dof(processor_id());
3581  const dof_id_type end_dof_on_proc = dofMap().end_dof(processor_id());
3582 
3583  // The total number of dofs on and off processor
3584  const dof_id_type n_dofs_on_proc = dofMap().n_local_dofs();
3585  const dof_id_type n_dofs_not_on_proc = dofMap().n_dofs() - dofMap().n_local_dofs();
3586 
3587  for (const auto & git : graph)
3588  {
3589  dof_id_type dof = git.first;
3590  dof_id_type local_dof = dof - first_dof_on_proc;
3591 
3592  if (dof < first_dof_on_proc || dof >= end_dof_on_proc)
3593  continue;
3594 
3595  const auto & row = git.second;
3596 
3597  SparsityPattern::Row & sparsity_row = sparsity[local_dof];
3598 
3599  unsigned int original_row_length = sparsity_row.size();
3600 
3601  sparsity_row.insert(sparsity_row.end(), row.begin(), row.end());
3602 
3603  SparsityPattern::sort_row(
3604  sparsity_row.begin(), sparsity_row.begin() + original_row_length, sparsity_row.end());
3605 
3606  // Fix up nonzero arrays
3607  for (const auto & coupled_dof : row)
3608  {
3609  if (coupled_dof < first_dof_on_proc || coupled_dof >= end_dof_on_proc)
3610  {
3611  if (n_oz[local_dof] < n_dofs_not_on_proc)
3612  n_oz[local_dof]++;
3613  }
3614  else
3615  {
3616  if (n_nz[local_dof] < n_dofs_on_proc)
3617  n_nz[local_dof]++;
3618  }
3619  }
3620  }
3621  }
3622 }
3623 
3624 void
3626 {
3627  *_u_dot = u_dot;
3628 }
3629 
3630 void
3632 {
3633  *_u_dotdot = u_dotdot;
3634 }
3635 
3636 void
3638 {
3639  *_u_dot_old = u_dot_old;
3640 }
3641 
3642 void
3644 {
3645  *_u_dotdot_old = u_dotdot_old;
3646 }
3647 
3648 void
3649 NonlinearSystemBase::setPreconditioner(std::shared_ptr<MoosePreconditioner> pc)
3650 {
3651  if (_preconditioner.get() != nullptr)
3652  mooseError("More than one active Preconditioner detected");
3653 
3654  _preconditioner = pc;
3655 }
3656 
3657 MoosePreconditioner const *
3659 {
3660  return _preconditioner.get();
3661 }
3662 
3663 void
3665 {
3666  _increment_vec = &_sys.add_vector("u_increment", true, GHOSTED);
3667 }
3668 
3669 void
3671  const std::set<MooseVariable *> & damped_vars)
3672 {
3673  for (const auto & var : damped_vars)
3674  var->computeIncrementAtQps(*_increment_vec);
3675 }
3676 
3677 void
3679  const std::set<MooseVariable *> & damped_vars)
3680 {
3681  for (const auto & var : damped_vars)
3682  var->computeIncrementAtNode(*_increment_vec);
3683 }
3684 
3685 void
3686 NonlinearSystemBase::checkKernelCoverage(const std::set<SubdomainID> & mesh_subdomains) const
3687 {
3688  // Obtain all blocks and variables covered by all kernels
3689  std::set<SubdomainID> input_subdomains;
3690  std::set<std::string> kernel_variables;
3691 
3692  bool global_kernels_exist = false;
3693  global_kernels_exist |= _scalar_kernels.hasActiveObjects();
3694  global_kernels_exist |= _nodal_kernels.hasActiveObjects();
3695 
3696  _kernels.subdomainsCovered(input_subdomains, kernel_variables);
3697  _dg_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3698  _nodal_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3699  _scalar_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3700  _constraints.subdomainsCovered(input_subdomains, kernel_variables);
3701 
3702 #ifdef MOOSE_KOKKOS_ENABLED
3703  _kokkos_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3704  _kokkos_nodal_kernels.subdomainsCovered(input_subdomains, kernel_variables);
3705 #endif
3706 
3707  if (_fe_problem.haveFV())
3708  {
3709  std::vector<FVElementalKernel *> fv_elemental_kernels;
3711  .query()
3712  .template condition<AttribSystem>("FVElementalKernel")
3713  .queryInto(fv_elemental_kernels);
3714 
3715  for (auto fv_kernel : fv_elemental_kernels)
3716  {
3717  if (fv_kernel->blockRestricted())
3718  for (auto block_id : fv_kernel->blockIDs())
3719  input_subdomains.insert(block_id);
3720  else
3721  global_kernels_exist = true;
3722  kernel_variables.insert(fv_kernel->variable().name());
3723 
3724  // Check for lagrange multiplier
3725  if (dynamic_cast<FVScalarLagrangeMultiplierConstraint *>(fv_kernel))
3726  kernel_variables.insert(dynamic_cast<FVScalarLagrangeMultiplierConstraint *>(fv_kernel)
3727  ->lambdaVariable()
3728  .name());
3729  }
3730 
3731  std::vector<FVFluxKernel *> fv_flux_kernels;
3733  .query()
3734  .template condition<AttribSystem>("FVFluxKernel")
3735  .queryInto(fv_flux_kernels);
3736 
3737  for (auto fv_kernel : fv_flux_kernels)
3738  {
3739  if (fv_kernel->blockRestricted())
3740  for (auto block_id : fv_kernel->blockIDs())
3741  input_subdomains.insert(block_id);
3742  else
3743  global_kernels_exist = true;
3744  kernel_variables.insert(fv_kernel->variable().name());
3745  }
3746 
3747  std::vector<FVInterfaceKernel *> fv_interface_kernels;
3749  .query()
3750  .template condition<AttribSystem>("FVInterfaceKernel")
3751  .queryInto(fv_interface_kernels);
3752 
3753  for (auto fvik : fv_interface_kernels)
3754  if (auto scalar_fvik = dynamic_cast<FVScalarLagrangeMultiplierInterface *>(fvik))
3755  kernel_variables.insert(scalar_fvik->lambdaVariable().name());
3756 
3757  std::vector<FVFluxBC *> fv_flux_bcs;
3759  .query()
3760  .template condition<AttribSystem>("FVFluxBC")
3761  .queryInto(fv_flux_bcs);
3762 
3763  for (auto fvbc : fv_flux_bcs)
3764  if (auto scalar_fvbc = dynamic_cast<FVBoundaryScalarLagrangeMultiplierConstraint *>(fvbc))
3765  kernel_variables.insert(scalar_fvbc->lambdaVariable().name());
3766  }
3767 
3768  // Check kernel coverage of subdomains (blocks) in your mesh
3769  if (!global_kernels_exist)
3770  {
3771  std::set<SubdomainID> difference;
3772  std::set_difference(mesh_subdomains.begin(),
3773  mesh_subdomains.end(),
3774  input_subdomains.begin(),
3775  input_subdomains.end(),
3776  std::inserter(difference, difference.end()));
3777 
3778  // there supposed to be no kernels on this lower-dimensional subdomain
3779  for (const auto & id : _mesh.interiorLowerDBlocks())
3780  difference.erase(id);
3781  for (const auto & id : _mesh.boundaryLowerDBlocks())
3782  difference.erase(id);
3783 
3784  if (!difference.empty())
3785  {
3786  std::vector<SubdomainID> difference_vec =
3787  std::vector<SubdomainID>(difference.begin(), difference.end());
3788  std::vector<SubdomainName> difference_names = _mesh.getSubdomainNames(difference_vec);
3789  std::stringstream missing_block_names;
3790  std::copy(difference_names.begin(),
3791  difference_names.end(),
3792  std::ostream_iterator<std::string>(missing_block_names, " "));
3793  std::stringstream missing_block_ids;
3794  std::copy(difference.begin(),
3795  difference.end(),
3796  std::ostream_iterator<unsigned int>(missing_block_ids, " "));
3797 
3798  mooseError("Each subdomain must contain at least one Kernel.\nThe following block(s) lack an "
3799  "active kernel: " +
3800  missing_block_names.str(),
3801  " (ids: ",
3802  missing_block_ids.str(),
3803  ")");
3804  }
3805  }
3806 
3807  // Check kernel use of variables
3808  std::set<VariableName> variables(getVariableNames().begin(), getVariableNames().end());
3809 
3810  std::set<VariableName> difference;
3811  std::set_difference(variables.begin(),
3812  variables.end(),
3813  kernel_variables.begin(),
3814  kernel_variables.end(),
3815  std::inserter(difference, difference.end()));
3816 
3817  // skip checks for varaibles defined on lower-dimensional subdomain
3818  std::set<VariableName> vars(difference);
3819  for (auto & var_name : vars)
3820  {
3821  auto blks = getSubdomainsForVar(var_name);
3822  for (const auto & id : blks)
3823  if (_mesh.interiorLowerDBlocks().count(id) > 0 || _mesh.boundaryLowerDBlocks().count(id) > 0)
3824  difference.erase(var_name);
3825  }
3826 
3827  if (!difference.empty())
3828  {
3829  std::stringstream missing_kernel_vars;
3830  std::copy(difference.begin(),
3831  difference.end(),
3832  std::ostream_iterator<std::string>(missing_kernel_vars, " "));
3833  mooseError("Each variable must be referenced by at least one active Kernel.\nThe following "
3834  "variable(s) lack an active kernel: " +
3835  missing_kernel_vars.str());
3836  }
3837 }
3838 
3839 bool
3841 {
3842  auto & time_kernels = _kernels.getVectorTagObjectWarehouse(timeVectorTag(), 0);
3843 
3844  return time_kernels.hasActiveObjects();
3845 }
3846 
3847 std::vector<std::string>
3849 {
3850  std::vector<std::string> variable_names;
3851  const auto & time_kernels = _kernels.getVectorTagObjectWarehouse(timeVectorTag(), 0);
3852  if (time_kernels.hasActiveObjects())
3853  for (const auto & kernel : time_kernels.getObjects())
3854  variable_names.push_back(kernel->variable().name());
3855 
3856  return variable_names;
3857 }
3858 
3859 bool
3861 {
3862  // IntegratedBCs are for now the only objects we consider to be consuming
3863  // matprops on boundaries.
3864  if (_integrated_bcs.hasActiveBoundaryObjects(bnd_id, tid))
3865  for (const auto & bc : _integrated_bcs.getActiveBoundaryObjects(bnd_id, tid))
3866  if (std::static_pointer_cast<MaterialPropertyInterface>(bc)->getMaterialPropertyCalled())
3867  return true;
3868 
3869  // Thin layer heat transfer in the heat_transfer module is being used on a boundary even though
3870  // it's an interface kernel. That boundary is external, on both sides of a gap in a mesh
3872  for (const auto & ik : _interface_kernels.getActiveBoundaryObjects(bnd_id, tid))
3873  if (std::static_pointer_cast<MaterialPropertyInterface>(ik)->getMaterialPropertyCalled())
3874  return true;
3875 
3876  // Because MortarConstraints do not inherit from BoundaryRestrictable, they are not sorted
3877  // by boundary in the MooseObjectWarehouse. So for now, we return true for all boundaries
3878  // Note: constraints are not threaded at this time
3879  if (_constraints.hasActiveObjects(/*tid*/ 0))
3880  for (const auto & ct : _constraints.getActiveObjects(/*tid*/ 0))
3881  if (auto mpi = std::dynamic_pointer_cast<MaterialPropertyInterface>(ct);
3882  mpi && mpi->getMaterialPropertyCalled())
3883  return true;
3884  return false;
3885 }
3886 
3887 bool
3889 {
3890  // InterfaceKernels are for now the only objects we consider to be consuming matprops on internal
3891  // boundaries.
3893  for (const auto & ik : _interface_kernels.getActiveBoundaryObjects(bnd_id, tid))
3894  if (std::static_pointer_cast<MaterialPropertyInterface>(ik)->getMaterialPropertyCalled())
3895  return true;
3896  return false;
3897 }
3898 
3899 bool
3901 {
3902  // DGKernels are for now the only objects we consider to be consuming matprops on
3903  // internal sides.
3904  if (_dg_kernels.hasActiveBlockObjects(subdomain_id, tid))
3905  for (const auto & dg : _dg_kernels.getActiveBlockObjects(subdomain_id, tid))
3906  if (std::static_pointer_cast<MaterialPropertyInterface>(dg)->getMaterialPropertyCalled())
3907  return true;
3908  // NOTE:
3909  // HDG kernels do not require face material properties on internal sides at this time.
3910  // The idea is to have element locality of HDG for hybridization
3911  return false;
3912 }
3913 
3914 bool
3916 {
3917  return _doing_dg;
3918 }
3919 
3920 void
3922 {
3925 }
3926 
3927 void
3929  const std::set<TagID> & vector_tags,
3930  const std::set<TagID> & matrix_tags)
3931 {
3932  parallel_object_only();
3933 
3934  try
3935  {
3936  for (auto & map_pr : _undisplaced_mortar_functors)
3937  map_pr.second(compute_type, vector_tags, matrix_tags);
3938 
3939  for (auto & map_pr : _displaced_mortar_functors)
3940  map_pr.second(compute_type, vector_tags, matrix_tags);
3941  }
3942  catch (MetaPhysicL::LogicError &)
3943  {
3944  mooseError(
3945  "We caught a MetaPhysicL error in NonlinearSystemBase::mortarConstraints. This is very "
3946  "likely due to AD not having a sufficiently large derivative container size. Please run "
3947  "MOOSE configure with the '--with-derivative-size=<n>' option");
3948  }
3949 }
3950 
3951 void
3953 {
3954  if (_auto_scaling_initd)
3955  return;
3956 
3957  // Want the libMesh count of variables, not MOOSE, e.g. I don't care about array variable counts
3958  const auto n_vars = system().n_vars();
3959 
3960  if (_scaling_group_variables.empty())
3961  {
3962  _var_to_group_var.reserve(n_vars);
3964 
3965  for (const auto var_number : make_range(n_vars))
3966  _var_to_group_var.emplace(var_number, var_number);
3967  }
3968  else
3969  {
3970  std::set<unsigned int> var_numbers, var_numbers_covered, var_numbers_not_covered;
3971  for (const auto var_number : make_range(n_vars))
3972  var_numbers.insert(var_number);
3973 
3975 
3976  for (const auto group_index : index_range(_scaling_group_variables))
3977  for (const auto & var_name : _scaling_group_variables[group_index])
3978  {
3979  if (!hasVariable(var_name) && !hasScalarVariable(var_name))
3980  mooseError("'",
3981  var_name,
3982  "', provided to the 'scaling_group_variables' parameter, does not exist in "
3983  "the nonlinear system.");
3984 
3985  const MooseVariableBase & var =
3986  hasVariable(var_name)
3987  ? static_cast<MooseVariableBase &>(getVariable(0, var_name))
3988  : static_cast<MooseVariableBase &>(getScalarVariable(0, var_name));
3989  auto map_pair = _var_to_group_var.emplace(var.number(), group_index);
3990  if (!map_pair.second)
3991  mooseError("Variable ", var_name, " is contained in multiple scaling grouplings");
3992  var_numbers_covered.insert(var.number());
3993  }
3994 
3995  std::set_difference(var_numbers.begin(),
3996  var_numbers.end(),
3997  var_numbers_covered.begin(),
3998  var_numbers_covered.end(),
3999  std::inserter(var_numbers_not_covered, var_numbers_not_covered.begin()));
4000 
4001  _num_scaling_groups = _scaling_group_variables.size() + var_numbers_not_covered.size();
4002 
4003  auto index = static_cast<unsigned int>(_scaling_group_variables.size());
4004  for (auto var_number : var_numbers_not_covered)
4005  _var_to_group_var.emplace(var_number, index++);
4006  }
4007 
4008  _variable_autoscaled.resize(n_vars, true);
4009  const auto & number_to_var_map = _vars[0].numberToVariableMap();
4010 
4012  for (const auto i : index_range(_variable_autoscaled))
4015  libmesh_map_find(number_to_var_map, i)->name()) !=
4017  _variable_autoscaled[i] = false;
4018 
4019  _auto_scaling_initd = true;
4020 }
4021 
4022 bool
4024 {
4026  return true;
4027 
4028  _console << "\nPerforming automatic scaling calculation\n" << std::endl;
4029 
4030  TIME_SECTION("computeScaling", 3, "Computing Automatic Scaling");
4031 
4032  // It's funny but we need to assemble our vector of scaling factors here otherwise we will be
4033  // applying scaling factors of 0 during Assembly of our scaling Jacobian
4035 
4036  // container for repeated access of element global dof indices
4037  std::vector<dof_id_type> dof_indices;
4038 
4039  if (!_auto_scaling_initd)
4040  setupScalingData();
4041 
4042  std::vector<Real> inverse_scaling_factors(_num_scaling_groups, 0);
4043  std::vector<Real> resid_inverse_scaling_factors(_num_scaling_groups, 0);
4044  std::vector<Real> jac_inverse_scaling_factors(_num_scaling_groups, 0);
4045  auto & dof_map = dofMap();
4046 
4047  // what types of scaling do we want?
4048  bool jac_scaling = _resid_vs_jac_scaling_param < 1. - TOLERANCE;
4049  bool resid_scaling = _resid_vs_jac_scaling_param > TOLERANCE;
4050 
4051  const NumericVector<Number> & scaling_residual = RHS();
4052 
4053  if (jac_scaling)
4054  {
4055  // if (!_auto_scaling_initd)
4056  // We need to reinit this when the number of dofs changes
4057  // but there is no good way to track that
4058  // In theory, it is the job of libmesh system to track this,
4059  // but this special matrix is not owned by libMesh system
4060  // Let us reinit eveytime since it is not expensive
4061  {
4062  auto init_vector = NumericVector<Number>::build(this->comm());
4063  init_vector->init(system().n_dofs(), system().n_local_dofs(), /*fast=*/false, PARALLEL);
4064 
4065  _scaling_matrix->clear();
4066  _scaling_matrix->init(*init_vector);
4067  }
4068 
4070  // Dispatch to derived classes to ensure that we use the correct matrix tag
4073  }
4074 
4075  if (resid_scaling)
4076  {
4079  // Dispatch to derived classes to ensure that we use the correct vector tag
4083  }
4084 
4085  // Did something bad happen during residual/Jacobian scaling computation?
4087  return false;
4088 
4089  auto examine_dof_indices = [this,
4090  jac_scaling,
4091  resid_scaling,
4092  &dof_map,
4093  &jac_inverse_scaling_factors,
4094  &resid_inverse_scaling_factors,
4095  &scaling_residual](const auto & dof_indices, const auto var_number)
4096  {
4097  for (auto dof_index : dof_indices)
4098  if (dof_map.local_index(dof_index))
4099  {
4100  if (jac_scaling)
4101  {
4102  // For now we will use the diagonal for determining scaling
4103  auto mat_value = (*_scaling_matrix)(dof_index, dof_index);
4104  auto & factor = jac_inverse_scaling_factors[_var_to_group_var[var_number]];
4105  factor = std::max(factor, std::abs(mat_value));
4106  }
4107  if (resid_scaling)
4108  {
4109  auto vec_value = scaling_residual(dof_index);
4110  auto & factor = resid_inverse_scaling_factors[_var_to_group_var[var_number]];
4111  factor = std::max(factor, std::abs(vec_value));
4112  }
4113  }
4114  };
4115 
4116  // Compute our scaling factors for the spatial field variables
4117  for (const auto & elem : _fe_problem.getCurrentAlgebraicElementRange())
4118  for (const auto i : make_range(system().n_vars()))
4120  {
4121  dof_map.dof_indices(elem, dof_indices, i);
4122  examine_dof_indices(dof_indices, i);
4123  }
4124 
4125  for (const auto i : make_range(system().n_vars()))
4126  if (_variable_autoscaled[i] && system().variable_type(i).family == SCALAR)
4127  {
4128  dof_map.SCALAR_dof_indices(dof_indices, i);
4129  examine_dof_indices(dof_indices, i);
4130  }
4131 
4132  if (resid_scaling)
4133  _communicator.max(resid_inverse_scaling_factors);
4134  if (jac_scaling)
4135  _communicator.max(jac_inverse_scaling_factors);
4136 
4137  if (jac_scaling && resid_scaling)
4138  for (MooseIndex(inverse_scaling_factors) i = 0; i < inverse_scaling_factors.size(); ++i)
4139  {
4140  // Be careful not to take log(0)
4141  if (!resid_inverse_scaling_factors[i])
4142  {
4143  if (!jac_inverse_scaling_factors[i])
4144  inverse_scaling_factors[i] = 1;
4145  else
4146  inverse_scaling_factors[i] = jac_inverse_scaling_factors[i];
4147  }
4148  else if (!jac_inverse_scaling_factors[i])
4149  // We know the resid is not zero
4150  inverse_scaling_factors[i] = resid_inverse_scaling_factors[i];
4151  else
4152  inverse_scaling_factors[i] =
4153  std::exp(_resid_vs_jac_scaling_param * std::log(resid_inverse_scaling_factors[i]) +
4154  (1 - _resid_vs_jac_scaling_param) * std::log(jac_inverse_scaling_factors[i]));
4155  }
4156  else if (jac_scaling)
4157  inverse_scaling_factors = jac_inverse_scaling_factors;
4158  else if (resid_scaling)
4159  inverse_scaling_factors = resid_inverse_scaling_factors;
4160  else
4161  mooseError("We shouldn't be calling this routine if we're not performing any scaling");
4162 
4163  // We have to make sure that our scaling values are not zero
4164  for (auto & scaling_factor : inverse_scaling_factors)
4165  if (scaling_factor == 0)
4166  scaling_factor = 1;
4167 
4168  // Now flatten the group scaling factors to the individual variable scaling factors
4169  std::vector<Real> flattened_inverse_scaling_factors(system().n_vars());
4170  for (const auto i : index_range(flattened_inverse_scaling_factors))
4171  flattened_inverse_scaling_factors[i] = inverse_scaling_factors[_var_to_group_var[i]];
4172 
4173  // Now set the scaling factors for the variables
4174  applyScalingFactors(flattened_inverse_scaling_factors);
4176  displaced_problem->systemBaseNonlinear(number()).applyScalingFactors(
4177  flattened_inverse_scaling_factors);
4178 
4179  _computed_scaling = true;
4180  return true;
4181 }
4182 
4183 void
4185 {
4186  if (!hasVector("scaling_factors"))
4187  // No variables have indicated they need scaling
4188  return;
4189 
4190  auto & scaling_vector = getVector("scaling_factors");
4191 
4192  const auto & lm_mesh = _mesh.getMesh();
4193  const auto & dof_map = dofMap();
4194 
4195  const auto & field_variables = _vars[0].fieldVariables();
4196  const auto & scalar_variables = _vars[0].scalars();
4197 
4198  std::vector<dof_id_type> dof_indices;
4199 
4200  for (const Elem * const elem :
4201  as_range(lm_mesh.active_local_elements_begin(), lm_mesh.active_local_elements_end()))
4202  for (const auto * const field_var : field_variables)
4203  {
4204  const auto & factors = field_var->arrayScalingFactor();
4205  for (const auto i : make_range(field_var->count()))
4206  {
4207  dof_map.dof_indices(elem, dof_indices, field_var->number() + i);
4208  for (const auto dof : dof_indices)
4209  scaling_vector.set(dof, factors[i]);
4210  }
4211  }
4212 
4213  for (const auto * const scalar_var : scalar_variables)
4214  {
4215  mooseAssert(scalar_var->count() == 1,
4216  "Scalar variables should always have only one component.");
4217  dof_map.SCALAR_dof_indices(dof_indices, scalar_var->number());
4218  for (const auto dof : dof_indices)
4219  scaling_vector.set(dof, scalar_var->scalingFactor());
4220  }
4221 
4222  // Parallel assemble
4223  scaling_vector.close();
4224 
4225  if (auto * displaced_problem = _fe_problem.getDisplacedProblem().get())
4226  // copy into the corresponding displaced system vector because they should be the exact same
4227  displaced_problem->systemBaseNonlinear(number()).getVector("scaling_factors") = scaling_vector;
4228 }
4229 
4230 bool
4232 {
4233  // Clear the iteration counters
4234  _current_l_its.clear();
4235  _current_nl_its = 0;
4236 
4237  // Initialize the solution vector using a predictor and known values from nodal bcs
4239 
4240  // Now that the initial solution has ben set, potentially perform a residual/Jacobian evaluation
4241  // to determine variable scaling factors
4242  if (_automatic_scaling)
4243  {
4244  const bool scaling_succeeded = computeScaling();
4245  if (!scaling_succeeded)
4246  return false;
4247  }
4248 
4249  // We do not know a priori what variable a global degree of freedom corresponds to, so we need a
4250  // map from global dof to scaling factor. We just use a ghosted NumericVector for that mapping
4252 
4253  return true;
4254 }
4255 
4256 void
4258 {
4259  if (matrixFromColoring())
4260  LibmeshPetscCall(MatFDColoringDestroy(&_fdcoloring));
4261 }
4262 
4265 {
4266  if (!_fsp)
4267  mooseError("No field split preconditioner is present for this system");
4268 
4269  return *_fsp;
4270 }
std::string name(const ElemQuality q)
std::vector< std::shared_ptr< TimeIntegrator > > _time_integrators
Time integrator.
Definition: SystemBase.h:1049
virtual void setSolutionUDotDotOld(const NumericVector< Number > &u_dotdot_old)
virtual void reinitNeighborPhys(const Elem *neighbor, unsigned int neighbor_side, const std::vector< Point > &physical_points, const THREAD_ID tid)=0
virtual void residualSetup(THREAD_ID tid=0) const
NumericVector< Number > & getResidualTimeVector()
Return a numeric vector that is associated with the time tag.
void setActiveMaterialProperties(const std::unordered_set< unsigned int > &mat_prop_ids, const THREAD_ID tid)
Record and set the material properties required by the current computing thread.
MooseObjectTagWarehouse< NodalKernelBase > _nodal_kernels
NodalKernels for each thread.
NumericVector< Number > * _Re_time
residual vector for time contributions
void computeJacobianBlocks(std::vector< JacobianBlock *> &blocks)
Computes several Jacobian blocks simultaneously, summing their contributions into smaller preconditio...
virtual void insert(const T *v, const std::vector< numeric_index_type > &dof_indices)
MetaPhysicL::DualNumber< V, D, asd > abs(const MetaPhysicL::DualNumber< V, D, asd > &a)
Definition: EigenADReal.h:50
TagID _Re_time_tag
Tag for time contribution residual.
void allgather(const T &send_data, std::vector< T, A > &recv_data) const
unsigned int size(THREAD_ID tid=0) const
Return how many kernels we store in the current warehouse.
std::map< std::pair< BoundaryID, BoundaryID >, PenetrationLocator * > _penetration_locators
dof_id_type end_dof(const processor_id_type proc) const
bool empty() const
virtual void addKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a kernel.
virtual void setSolutionUDotDot(const NumericVector< Number > &udotdot)
Set transient term used by residual and Jacobian evaluation.
dof_id_type dof_number(const unsigned int s, const unsigned int var, const unsigned int comp) const
A kernel for hybridized finite element formulations.
Definition: HDGKernel.h:17
MooseObjectTagWarehouse< ResidualObject > _kokkos_nodal_kernels
KOKKOS_INLINE_FUNCTION const T * find(const T &target, const T *const begin, const T *const end)
Find a value in an array.
Definition: KokkosUtils.h:42
void reinitIncrementAtNodeForDampers(THREAD_ID tid, const std::set< MooseVariable *> &damped_vars)
Compute the incremental change in variables at nodes for dampers.
void overwriteNodeFace(NumericVector< Number > &soln)
Called from explicit time stepping to overwrite boundary positions (explicit dynamics).
Base class for deriving general dampers.
Definition: GeneralDamper.h:21
bool _use_pre_smo_residual
Whether to use the pre-SMO initial residual in the relative convergence check.
MoosePreconditioner const * getPreconditioner() const
virtual const char * what() const
Get out the error message.
std::vector< std::pair< MooseVariableFEBase *, MooseVariableFEBase * > > & couplingEntries(const THREAD_ID tid, const unsigned int nl_sys_num)
void findImplicitGeometricCouplingEntries(GeometricSearchData &geom_search_data, std::unordered_map< dof_id_type, std::vector< dof_id_type >> &graph)
Finds the implicit sparsity graph between geometrically related dofs.
void setupDampers()
Setup damping stuff (called before we actually start)
Real _initial_residual
The initial (i.e., 0th nonlinear iteration) residual, see setPreSMOResidual for a detailed explanatio...
unsigned int n_threads()
std::vector< bool > _variable_autoscaled
Container to hold flag if variable is to participate in autoscaling.
void zeroVectorForResidual(const std::string &vector_name)
virtual void cacheResidualNeighbor(const THREAD_ID tid) override
bool identifyVariableGroupsInNL() const
Whether to identify variable groups in nonlinear systems.
void zeroTaggedVectors(const std::set< TagID > &tags)
Zero all vectors for given tags.
Definition: SystemBase.C:693
const std::set< SubdomainID > & interiorLowerDBlocks() const
Definition: MooseMesh.h:1421
Base class for split-based preconditioners.
Definition: Split.h:25
bool hasActiveBlockObjects(THREAD_ID tid=0) const
std::shared_ptr< DisplacedProblem > displaced_problem
bool hasVector(const std::string &tag_name) const
Check if the named vector exists in the system.
Definition: SystemBase.C:924
virtual void checkExceptionAndStopSolve(bool print_message=true)
Check to see if an exception has occurred on any processor and, if possible, force the solve to fail...
unsigned int n_comp(const unsigned int s, const unsigned int var) const
void applyScalingFactors(const std::vector< Real > &inverse_scaling_factors)
Applies scaling factors to the system&#39;s variables.
Definition: SystemBase.C:1495
void computeNodalBCsResidualAndJacobian()
compute the residual and Jacobian for nodal boundary conditions
bool _debugging_residuals
true if debugging residuals
SCALAR
BoundaryID _secondary_boundary
NumericVector< Number > * _Re_non_time
residual vector for non-time contributions
const std::map< SubdomainID, std::vector< std::shared_ptr< T > > > & getActiveBlockObjects(THREAD_ID tid=0) const
MooseObjectTagWarehouse< ResidualObject > _kokkos_kernels
unsigned int TagID
Definition: MooseTypes.h:238
Real computeDamping(const NumericVector< Number > &solution, const NumericVector< Number > &update)
Compute damping.
virtual void reinitNode(const Node *node, const THREAD_ID tid) override
virtual void setPreviousNewtonSolution(const NumericVector< Number > &soln)
virtual void predictorCleanup(NumericVector< libMesh::Number > &ghosted_solution)
Perform cleanup tasks after application of predictor to solution vector.
bool _assemble_constraints_separately
Whether or not to assemble the residual and Jacobian after the application of each constraint...
virtual Elem * elemPtr(const dof_id_type i)
Definition: MooseMesh.C:3193
auto exp(const T &)
TagID systemMatrixTag() const override
Return the Matrix Tag ID for System.
MPI_Info info
NumericVector< Number > & solution()
Definition: SystemBase.h:196
bool hasObjects(THREAD_ID tid=0) const
Convenience functions for determining if objects exist.
MooseObjectTagWarehouse< DGKernelBase > _dg_kernels
virtual bool haveFV() const override
returns true if this problem includes/needs finite volume functionality.
char ** blocks
face_info_iterator ownedFaceInfoBegin()
Iterators to owned faceInfo objects.
Definition: MooseMesh.C:1560
PARALLEL
void reinitIncrementAtQpsForDampers(THREAD_ID tid, const std::set< MooseVariable *> &damped_vars)
Compute the incremental change in variables at QPs for dampers.
void computeResidualAndJacobianInternal(const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Compute residual and Jacobian from contributions not related to constraints, such as nodal boundary c...
void addImplicitGeometricCouplingEntriesToJacobian(bool add=true)
If called with true this will add entries into the jacobian to link together degrees of freedom that ...
void dof_indices(const Elem *const elem, std::vector< dof_id_type > &di) const
bool areCoupled(const unsigned int ivar, const unsigned int jvar, const unsigned int nl_sys_num) const
void mooseError(Args &&... args)
Emit an error message with the given stringified, concatenated args and terminate the application...
Definition: MooseError.h:323
unsigned int number() const
Get variable number coming from libMesh.
virtual void initialSetup() override
Setup Functions.
Data structure used to hold penetration information.
std::vector< std::pair< R1, R2 > > get(const std::string &param1, const std::string &param2) const
Combine two vector parameters into a single vector of pairs.
const std::vector< std::shared_ptr< NodalConstraint > > & getActiveNodalConstraints() const
Access methods for active objects.
NumericVector< Number > * _u_dot_old
old solution vector for u^dot
Definition: SystemBase.h:1011
bool _has_nodalbc_diag_save_in
If there is a nodal BC having diag_save_in.
virtual void reinitScalars(const THREAD_ID tid, bool reinit_for_derivative_reordering=false) override
fills the VariableValue arrays for scalar variables from the solution vector
Base class for automatic differentiation Dirichlet BCs.
void add(std::shared_ptr< MooseObject > obj)
add adds a new object to the warehouse and stores attributes/metadata about it for running queries/fi...
Definition: TheWarehouse.C:116
virtual void getDiracElements(std::set< const Elem *> &elems) override
Fills "elems" with the elements that should be looped over for Dirac Kernels.
void setupDM()
Setup the PETSc DM object (when appropriate)
void setCurrentlyComputingResidual(bool currently_computing_residual) final
Set whether or not the problem is in the process of computing the residual.
void checkKernelCoverage(const std::set< SubdomainID > &mesh_subdomains) const
void addDGKernel(std::string dg_kernel_name, const std::string &name, InputParameters &parameters)
Adds a DG kernel.
void computeJacobian(libMesh::SparseMatrix< Number > &jacobian, const std::set< TagID > &tags)
Associate jacobian to systemMatrixTag, and then form a matrix for all the tags.
const InputParameters & parameters() const
Get the parameters of the object.
Definition: MooseBase.h:131
char ** vars
virtual void reinitNeighborPhys(const Elem *neighbor, unsigned int neighbor_side, const std::vector< Point > &physical_points, const THREAD_ID tid) override
virtual TagID addVectorTag(const TagName &tag_name, const Moose::VectorTagType type=Moose::VECTOR_TAG_RESIDUAL)
Create a Tag.
Definition: SubProblem.C:93
std::vector< T * > & queryInto(std::vector< T *> &results, Args &&... args)
queryInto executes the query and stores the results in the given vector.
Definition: TheWarehouse.h:311
void getNodeDofs(dof_id_type node_id, std::vector< dof_id_type > &dofs)
Base class for all Constraint types.
Definition: Constraint.h:19
std::set< TagID > _nl_vector_tags
Vector tags to temporarily store all tags associated with the current system.
std::vector< std::string > _ignore_variables_for_autoscaling
A container for variables that do not partipate in autoscaling.
void residualSetup() override
Base boundary condition of a Dirichlet type.
virtual void associateVectorToTag(NumericVector< Number > &vec, TagID tag)
Associate a vector for a given tag.
Definition: SystemBase.C:981
virtual void reinitNodes(const std::vector< dof_id_type > &nodes, const THREAD_ID tid) override
MooseObjectTagWarehouse< NodalBCBase > _nodal_bcs
virtual void addJacobianOffDiagScalar(unsigned int ivar, const THREAD_ID tid=0)
virtual void setException(const std::string &message)
Set an exception, which is stored at this point by toggling a member variable in this class...
const std::vector< std::shared_ptr< NodeFaceConstraint > > & getActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
virtual void customSetup(const ExecFlagType &exec_type, THREAD_ID tid=0) const
Factory & _factory
Definition: SystemBase.h:989
MooseObjectWarehouse< T > & getVectorTagsObjectWarehouse(const std::set< TagID > &tags, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object at least has one of the given vector ta...
virtual const Node * queryNodePtr(const dof_id_type i) const
Definition: MooseMesh.C:887
const ElementPairInfo & getElemPairInfo(std::pair< const Elem *, const Elem *> elem_pair) const
MooseObjectWarehouseBase< Split > _splits
Decomposition splits.
The main MOOSE class responsible for handling user-defined parameters in almost every MOOSE system...
NumericVector< Number > * _u_dotdot
solution vector for u^dotdot
Definition: SystemBase.h:1008
bool hasActiveMortarConstraints(const std::pair< BoundaryID, BoundaryID > &mortar_interface_key, bool displaced) const
const Parallel::Communicator & comm() const
NumericVector< Number > & add_vector(std::string_view vec_name, const bool projections=true, const ParallelType type=PARALLEL)
Solving a linear problem.
Definition: MooseTypes.h:897
bool _has_nodalbc_save_in
If there is a nodal BC having save_in.
bool hasActiveNodalConstraints() const
Deterimine if active objects exist.
virtual bool hasMatrix(TagID tag) const
Check if the tagged matrix exists in the system.
Definition: SystemBase.h:360
MooseObjectWarehouse< NodalDamper > _nodal_dampers
Nodal Dampers for each thread.
std::unique_ptr< T_DEST, T_DELETER > dynamic_pointer_cast(std::unique_ptr< T_SRC, T_DELETER > &src)
These are reworked from https://stackoverflow.com/a/11003103.
virtual libMesh::NonlinearSolver< Number > * nonlinearSolver()=0
dof_id_type n_dofs(const unsigned int vn) const
Real preSMOResidual() const
The pre-SMO residual.
std::unique_ptr< libMesh::DiagonalMatrix< Number > > _scaling_matrix
A diagonal matrix used for computing scaling.
virtual void setSolutionUDotOld(const NumericVector< Number > &u_dot_old)
virtual void associateMatrixToTag(libMesh::SparseMatrix< Number > &matrix, TagID tag)
Associate a matrix to a tag.
Definition: SystemBase.C:1076
bool hasDiagSaveIn() const
Weather or not the nonlinear system has diagonal Jacobian save-ins.
This class provides an interface for common operations on field variables of both FE and FV types wit...
const Parallel::Communicator & _communicator
void updateActive(THREAD_ID tid=0) override
Update the various active lists.
Real initialResidual() const
The initial residual.
The following methods are specializations for using the libMesh::Parallel::packed_range_* routines fo...
bool needInterfaceMaterialOnSide(BoundaryID bnd_id, THREAD_ID tid) const
Indicated whether this system needs material properties on interfaces.
const libMesh::ConstElemRange & getCurrentAlgebraicElementRange()
These are the element and nodes that contribute to the jacobian and residual for this local processor...
MooseObjectWarehouse< ResidualObject > _kokkos_preset_nodal_bcs
std::size_t _num_scaling_groups
The number of scaling groups.
const libMesh::ConstNodeRange & getCurrentAlgebraicNodeRange()
void computingScalingJacobian(bool computing_scaling_jacobian)
Setter for whether we&#39;re computing the scaling jacobian.
dof_id_type n_local_dofs(const unsigned int vn) const
virtual void updateActive(THREAD_ID tid=0) override
Update the active status of Kernels.
bool has_dofs(const unsigned int s=libMesh::invalid_uint) const
std::map< dof_id_type, PenetrationInfo * > & _penetration_info
Data structure of nodes and their associated penetration information.
bool hasActiveNodeElemConstraints(SubdomainID secondary_id, SubdomainID primary_id, bool displaced) const
Real _pre_smo_residual
The pre-SMO residual, see setPreSMOResidual for a detailed explanation.
bool hasDampers()
Whether or not this system has dampers.
const std::vector< std::shared_ptr< NodeElemConstraintBase > > & getActiveNodeElemConstraints(SubdomainID secondary_id, SubdomainID primary_id, bool displaced) const
bool _compute_scaling_once
Whether the scaling factors should only be computed once at the beginning of the simulation through a...
void computeResidualTags(const std::set< TagID > &tags)
Form multiple tag-associated residual vectors for all the given tags.
virtual void cacheJacobianNeighbor(const THREAD_ID tid) override
Specialization of SubProblem for solving nonlinear equations plus auxiliary equations.
bool _has_save_in
If there is any Kernel or IntegratedBC having save_in.
virtual const Node & nodeRef(const dof_id_type i) const
Definition: MooseMesh.C:861
TagID _Ke_system_tag
Tag for system contribution Jacobian.
virtual void setResidual(NumericVector< libMesh::Number > &residual, const THREAD_ID tid) override
dof_id_type n_dofs() const
virtual void disassociateMatrixFromTag(libMesh::SparseMatrix< Number > &matrix, TagID tag)
Disassociate a matrix from a tag.
Definition: SystemBase.C:1088
Scope guard for starting and stopping Floating Point Exception Trapping.
virtual void zero()=0
auto max(const L &left, const R &right)
Serves as a base class for DGKernel and ADDGKernel.
Definition: DGKernelBase.h:32
const Variable & variable(const unsigned int c) const override
void constraintResiduals(NumericVector< Number > &residual, bool displaced)
Add residual contributions from Constraints.
Base class for MOOSE preconditioners.
NumericVector< Number > & addVector(const std::string &vector_name, const bool project, const libMesh::ParallelType type)
Adds a solution length vector to the system.
virtual GeometricSearchData & geomSearchData() override
std::unordered_map< std::pair< BoundaryID, BoundaryID >, ComputeMortarFunctor > _undisplaced_mortar_functors
Functors for computing undisplaced mortar constraints.
Specialization for filling multiple "small" preconditioning matrices simulatenously.
virtual bool matrixFromColoring() const
Whether a system matrix is formed from coloring.
Definition: SolverSystem.h:102
void update()
Update the system (doing libMesh magic)
Definition: SystemBase.C:1243
virtual Assembly & assembly(const THREAD_ID tid, const unsigned int sys_num) override
void addScalarKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a scalar kernel.
void addBoundaryCondition(const std::string &bc_name, const std::string &name, InputParameters &parameters)
Adds a boundary condition.
bool hasActiveNodeFaceConstraints(BoundaryID boundary_id, bool displaced) const
void computeResidual(NumericVector< Number > &residual, TagID tag_id)
Form a residual vector for a given tag.
virtual void addCachedResidualDirectly(NumericVector< libMesh::Number > &residual, const THREAD_ID tid)
Allows for all the residual contributions that are currently cached to be added directly into the vec...
virtual unsigned int nVariables() const
Get the number of variables in this system.
Definition: SystemBase.C:891
EXTERN_C_BEGIN PetscErrorCode DMCreate_Moose(DM)
bool hasActiveBoundaryObjects(THREAD_ID tid=0) const
bool _need_residual_ghosted
Whether or not a ghosted copy of the residual needs to be made.
virtual void activateAllMatrixTags()
Make all existing matrices active.
Definition: SystemBase.C:1131
virtual const std::string & name() const
Definition: SystemBase.C:1340
virtual void jacobianSetup()
Definition: SystemBase.C:1606
virtual bool containsTimeKernel() override
If the system has a kernel that corresponds to a time derivative.
void onTimestepBegin()
Called at the beginning of the time step.
const ConstBndNodeRange & getCurrentAlgebraicBndNodeRange()
std::vector< dof_id_type, Threads::scalable_allocator< dof_id_type > > Row
MooseObjectTagWarehouse< DiracKernelBase > _dirac_kernels
Dirac Kernel storage for each thread.
void closeTaggedMatrices(const std::set< TagID > &tags)
Close all matrices associated the tags.
Definition: SystemBase.C:1060
TagID _Re_non_time_tag
Tag for non-time contribution residual.
std::set< TagID > _nl_matrix_tags
Matrix tags to temporarily store all tags associated with the current system.
void solutionInvalidAccumulation()
Pass the number of solution invalid occurrences from current iteration to cumulative counters...
void set_basic_system_only()
std::map< std::pair< BoundaryID, BoundaryID >, NearestNodeLocator * > _nearest_node_locators
void setPredictor(std::shared_ptr< Predictor > predictor)
Real _resid_vs_jac_scaling_param
The param that indicates the weighting of the residual vs the Jacobian in determining variable scalin...
bool _auto_scaling_initd
Whether we&#39;ve initialized the automatic scaling data structures.
virtual void computeScalingResidual()=0
Compute a "residual" for automatic scaling purposes.
bool _doing_dg
true if DG is active (optimization reasons)
virtual libMesh::DofMap & dofMap()
Gets writeable reference to the dof map.
Definition: SystemBase.C:1163
void syncIteration()
Sync iteration counts to main processor.
virtual void deactivateAllMatrixTags()
Make matrices inactive.
Definition: SystemBase.C:1119
unsigned int number() const
void computeResidualAndJacobianTags(const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Form possibly multiple tag-associated vectors and matrices.
SERIAL
bool needBoundaryMaterialOnSide(BoundaryID bnd_id, THREAD_ID tid) const
Indicated whether this system needs material properties on boundaries.
MooseObjectWarehouse< DirichletBCBase > _preset_nodal_bcs
std::unordered_map< unsigned int, unsigned int > _var_to_group_var
A map from variable index to group variable index and it&#39;s associated (inverse) scaling factor...
const std::vector< std::shared_ptr< T > > & getActiveObjects(THREAD_ID tid=0) const
Retrieve complete vector to the active all/block/boundary restricted objects for a given thread...
std::vector< unsigned int > _current_l_its
virtual void zero()=0
virtual std::unique_ptr< Base > create()=0
void setCurrentNonlinearSystem(const unsigned int nl_sys_num)
std::shared_ptr< T > getActiveObject(const std::string &name, THREAD_ID tid=0) const
This is the common base class for the three main kernel types implemented in MOOSE, Kernel, VectorKernel and ArrayKernel.
Definition: KernelBase.h:23
dof_id_type id() const
std::vector< dof_id_type > _secondary_nodes
MeshBase & getMesh()
Accessor for the underlying libMesh Mesh object.
Definition: MooseMesh.C:3528
void min(const T &r, T &o, Request &req) const
void computeDiracContributions(const std::set< TagID > &tags, bool is_jacobian)
void updateActive(THREAD_ID tid)
Update active objects of Warehouses owned by NonlinearSystemBase.
TheWarehouse & theWarehouse() const
unsigned int n_vars
void reinitMaterialsNeighbor(SubdomainID blk_id, const THREAD_ID tid, bool swap_stateful=true, const std::deque< MaterialBase *> *reinit_mats=nullptr)
reinit materials on the neighboring element face
const ElementPairList & getElemPairs() const
FieldSplitPreconditionerBase & getFieldSplitPreconditioner()
std::unordered_map< std::pair< BoundaryID, BoundaryID >, ComputeMortarFunctor > _displaced_mortar_functors
Functors for computing displaced mortar constraints.
bool enforceNodalConstraintsJacobian(const SparseMatrix< Number > &jacobian)
Enforce nodal constraints in the Jacobian.
const std::unordered_map< std::pair< BoundaryID, BoundaryID >, std::unique_ptr< AutomaticMortarGeneration > > & getMortarInterfaces(bool on_displaced) const
GHOSTED
boundary_id_type BoundaryID
Real referenceResidual() const
The reference residual used in relative convergence check.
void addSplit(const std::string &split_name, const std::string &name, InputParameters &parameters)
Adds a split.
bool _automatic_scaling
Whether to automatically scale the variables.
Definition: SystemBase.h:1055
void computeJacobianTags(const std::set< TagID > &tags)
Computes multiple (tag associated) Jacobian matricese.
SolutionInvalidity & solutionInvalidity()
Get the SolutionInvalidity for this app.
Definition: MooseApp.h:179
std::shared_ptr< MoosePreconditioner > _preconditioner
Preconditioner.
virtual void timestepSetup(THREAD_ID tid=0) const
NonlinearSystemBase & currentNonlinearSystem()
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
void computingScalingResidual(bool computing_scaling_residual)
Setter for whether we&#39;re computing the scaling residual.
virtual void setupDM()=0
setup the data management data structure that manages the field split
SubProblem & subproblem()
Definition: SystemBase.h:101
std::vector< std::string > _vecs_to_zero_for_residual
vectors that will be zeroed before a residual computation
virtual TagID addMatrixTag(TagName tag_name)
Create a Tag.
Definition: SubProblem.C:312
std::shared_ptr< Split > getSplit(const std::string &name)
Retrieves a split by name.
std::unique_ptr< NumericVector< Number > > solution
SubProblem & _subproblem
The subproblem for whom this class holds variable data, etc; this can either be the governing finite ...
Definition: SystemBase.h:983
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Populates a set of covered subdomains and the associated variable names.
void addImplicitGeometricCouplingEntries(GeometricSearchData &geom_search_data)
Adds entries to the Jacobian in the correct positions for couplings coming from dofs being coupled th...
virtual void addHDGKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a hybridized discontinuous Galerkin (HDG) kernel.
virtual void zero_rows(std::vector< numeric_index_type > &rows, T diag_value=0.0)
MooseObjectTagWarehouse< KernelBase > _kernels
MooseObjectWarehouse< T > & getMatrixTagObjectWarehouse(TagID tag_id, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has the given matrix tag...
bool shouldEvaluatePreSMOResidual() const
We offer the option to check convergence against the pre-SMO residual.
std::vector< VectorTag > getVectorTags(const std::set< TagID > &tag_ids) const
Definition: SubProblem.C:173
Moose::SolveType _type
Definition: SolverParams.h:19
Base class for deriving nodal dampers.
Definition: NodalDamper.h:27
virtual void disassociateVectorFromTag(NumericVector< Number > &vec, TagID tag)
Disassociate a given vector from a given tag.
virtual void cacheResidual(const THREAD_ID tid) override
void mooseDeprecated(Args &&... args)
Emit a deprecated code/feature message with the given stringified, concatenated args.
Definition: MooseError.h:374
virtual void computeScalingJacobian()=0
Compute a "Jacobian" for automatic scaling purposes.
bool errorOnJacobianNonzeroReallocation() const
Will return True if the user wants to get an error when a nonzero is reallocated in the Jacobian by P...
This is the ElementPairLocator class.
This is the ElementPairInfo class.
std::map< BoundaryID, std::shared_ptr< ElementPairLocator > > _element_pair_locators
Moose::CouplingType coupling() const
unsigned int number() const
Gets the number of this system.
Definition: SystemBase.C:1157
void setupScalingData()
Setup group scaling containers.
virtual GeometricSearchData & geomSearchData()=0
const bool & usePreSMOResidual() const
Whether we are using pre-SMO residual in relative convergence checks.
MooseObjectTagWarehouse< HDGKernel > _hybridized_kernels
const std::set< SubdomainID > & boundaryLowerDBlocks() const
Definition: MooseMesh.h:1425
std::vector< SubdomainName > getSubdomainNames(const std::vector< SubdomainID > &subdomain_ids) const
Get the associated subdomainNames for the subdomain ids that are passed in.
Definition: MooseMesh.C:1819
auto log(const T &)
AuxiliarySystem & getAuxiliarySystem()
virtual void initialSetup(THREAD_ID tid=0) const
Convenience methods for calling object setup methods.
virtual void updateGeomSearch(GeometricSearchData::GeometricSearchType type=GeometricSearchData::ALL) override
void closeTaggedVectors(const std::set< TagID > &tags)
Close all vectors for given tags.
Definition: SystemBase.C:667
Base class for deriving any boundary condition that works at nodes.
Definition: NodalBCBase.h:26
void computeResidualInternal(const std::set< TagID > &tags)
Compute the residual for a given tag.
virtual void prepareAssembly(const THREAD_ID tid) override
bool computeScaling()
Method used to obtain scaling factors for variables.
Interface for objects interacting with the PerfGraph.
virtual std::map< TagName, TagID > & getMatrixTags()
Return all matrix tags in the system, where a tag is represented by a map from name to ID...
Definition: SubProblem.h:253
virtual bool hasVariable(const std::string &var_name) const
Query a system for a variable.
Definition: SystemBase.C:851
virtual void setCurrentSubdomainID(const Elem *elem, const THREAD_ID tid) override
virtual void clearDiracInfo() override
Gets called before Dirac Kernels are asked to add the points they are supposed to be evaluated in...
void destroyColoring()
Destroy the coloring object if it exists.
virtual void reinitNodesNeighbor(const std::vector< dof_id_type > &nodes, const THREAD_ID tid) override
bool identify_variable_groups() const
virtual void close()=0
virtual void jacobianSetup(THREAD_ID tid=0) const
ConstraintWarehouse _constraints
Constraints storage object.
virtual void turnOffJacobian()
Turn off the Jacobian (must be called before equation system initialization)
void computeKokkosJacobian(const std::set< TagID > &tags)
Compute Jacobian with Kokkos objects.
TagID residualVectorTag() const override
const std::map< BoundaryID, std::vector< std::shared_ptr< T > > > & getActiveBoundaryObjects(THREAD_ID tid=0) const
void computingNonlinearResid(bool computing_nonlinear_residual) final
Set whether or not the problem is in the process of computing the nonlinear residual.
virtual void customSetup(const ExecFlagType &exec_type) override
ComputeType
The type of nonlinear computation being performed.
Definition: MooseTypes.h:829
Base class for deriving element dampers.
Definition: ElementDamper.h:33
Base interface for field split preconditioner.
bool _add_implicit_geometric_coupling_entries_to_jacobian
Whether or not to add implicit geometric couplings to the Jacobian for FDP.
const_iterator end() const
MooseObjectTagWarehouse< ResidualObject > _kokkos_integrated_bcs
bool have_parameter(std::string_view name) const
A wrapper around the Parameters base class method.
virtual void addNodalKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a NodalKernel.
virtual unsigned int numMatrixTags() const
The total number of tags.
Definition: SubProblem.h:248
Base class for creating new types of boundary conditions.
tbb::split split
MooseApp & _app
Definition: SystemBase.h:988
FEProblemBase & _fe_problem
the governing finite element/volume problem
Definition: SystemBase.h:986
virtual void reinitElemPhys(const Elem *elem, const std::vector< Point > &phys_points_in_elem, const THREAD_ID tid)=0
virtual MooseVariableScalar & getScalarVariable(THREAD_ID tid, const std::string &var_name) const
Gets a reference to a scalar variable with specified number.
Definition: SystemBase.C:145
ParallelType type() const
virtual NumericVector< Number > & RHS()=0
std::vector< VariableWarehouse > _vars
Variable warehouses (one for each thread)
Definition: SystemBase.h:996
Provides a way for users to bail out of the current solve.
unsigned int _n_residual_evaluations
Total number of residual evaluations that have been performed.
virtual void update()
void addDiracKernel(const std::string &kernel_name, const std::string &name, InputParameters &parameters)
Adds a Dirac kernel.
virtual void close()=0
void computeJacobianInternal(const std::set< TagID > &tags)
Form multiple matrices for all the tags.
bool _has_diag_save_in
If there is any Kernel or IntegratedBC having diag_save_in.
Base class for creating new types of nodal kernels.
const FEType & variable_type(const unsigned int i) const
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
virtual std::shared_ptr< const DisplacedProblem > getDisplacedProblem() const
std::unique_ptr< NumericVector< Number > > _residual_copy
Copy of the residual vector, or nullptr if a copy is not needed.
virtual void subdomainSetup()
Definition: SystemBase.C:1592
bool hasSaveIn() const
Weather or not the nonlinear system has save-ins.
Generic class for solving transient nonlinear problems.
Definition: SubProblem.h:78
TagID timeVectorTag() const override
Ideally, we should not need this API.
Class for containing MooseEnum item information.
Definition: MooseEnumItem.h:18
MooseMesh & _mesh
Definition: SystemBase.h:991
virtual std::vector< std::string > timeKernelVariableNames() override
Returns the names of the variables that have time derivative kernels in the system.
bool hasActiveObjects(THREAD_ID tid=0) const
NonlinearSystemBase(FEProblemBase &problem, libMesh::System &sys, const std::string &name)
void max(const T &r, T &o, Request &req) const
NumericVector< Number > * _u_dot
solution vector for u^dot
Definition: SystemBase.h:1006
MooseObjectWarehouse< ElementDamper > _element_dampers
Element Dampers for each thread.
void addObject(std::shared_ptr< Constraint > object, THREAD_ID tid=0, bool recurse=true) override
Add Constraint object to the warehouse.
virtual void setSolutionUDot(const NumericVector< Number > &udot)
Set transient term used by residual and Jacobian evaluation.
virtual libMesh::SparseMatrix< Number > & getMatrix(TagID tag)
Get a raw SparseMatrix.
Definition: SystemBase.C:1024
bool hasKokkosResidualObjects() const
virtual void augmentSparsity(libMesh::SparsityPattern::Graph &sparsity, std::vector< dof_id_type > &n_nz, std::vector< dof_id_type > &n_oz) override
Will modify the sparsity pattern to add logical geometric connections.
const std::vector< std::shared_ptr< MortarConstraintBase > > & getActiveMortarConstraints(const std::pair< BoundaryID, BoundaryID > &mortar_interface_key, bool displaced) const
MooseObjectWarehouse< T > & getMatrixTagsObjectWarehouse(const std::set< TagID > &tags, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has one of the given matrix tags...
void computeScalarKernelsJacobians(const std::set< TagID > &tags)
void setInitialResidual(Real r)
Record the initial residual (for later relative convergence check)
Query query()
query creates and returns an initialized a query object for querying objects from the warehouse...
Definition: TheWarehouse.h:466
virtual Assembly & assembly(const THREAD_ID tid, const unsigned int sys_num)=0
const_iterator begin() const
bool needInternalNeighborSideMaterial(SubdomainID subdomain_id, THREAD_ID tid) const
Indicates whether this system needs material properties on internal sides.
void computeKokkosResidual(const std::set< TagID > &tags)
Compute residual with Kokkos objects.
Base class for deriving dampers.
Definition: Damper.h:24
bool getFailNextNonlinearConvergenceCheck() const
Whether it will skip further residual evaluations and fail the next nonlinear convergence check(s) ...
Base class shared by AD and non-AD scalar kernels.
void addDamper(const std::string &damper_name, const std::string &name, InputParameters &parameters)
Adds a damper.
IntRange< T > make_range(T beg, T end)
virtual MooseMesh & mesh() override
virtual void postAddResidualObject(ResidualObject &)
Called after any ResidualObject-derived objects are added to the system.
const std::vector< VariableName > & getVariableNames() const
Definition: SystemBase.h:860
virtual void preInit() override
This is called prior to the libMesh system has been init&#39;d.
Definition: SolverSystem.C:32
virtual void updateActive(THREAD_ID tid=0) override
Update the active status of Kernels.
virtual void timestepSetup() override
bool hasActiveElemElemConstraints(const InterfaceID interface_id, bool displaced) const
bool _off_diagonals_in_auto_scaling
Whether to include off diagonals when determining automatic scaling factors.
virtual unsigned int numVectorTags(const Moose::VectorTagType type=Moose::VECTOR_TAG_ANY) const
The total number of tags, which can be limited to the tag type.
Definition: SubProblem.C:196
void reinitNodeFace(const Node &secondary_node, const BoundaryID secondary_boundary, const PenetrationInfo &info, const bool displaced)
Reinitialize quantities such as variables, residuals, Jacobians, materials for node-face constraints...
Base class for deriving any boundary condition of a integrated type.
SolverParams & solverParams(unsigned int solver_sys_num=0)
Get the solver parameters.
NumericVector< Number > * _residual_ghosted
ghosted form of the residual
void setCachedJacobian(GlobalDataKey)
Sets previously-cached Jacobian values via SparseMatrix::set() calls.
Definition: Assembly.C:4468
TagID _Re_tag
Used for the residual vector from PETSc.
virtual void customSetup(const ExecFlagType &exec_type)
Definition: SystemBase.C:1585
MooseObjectWarehouse< T > & getVectorTagObjectWarehouse(TagID tag_id, THREAD_ID tid)
Retrieve a moose object warehouse in which every moose object has the given vector tag...
void computeNodalBCs(NumericVector< Number > &residual)
Enforces nodal boundary conditions.
const std::set< SubdomainID > & getSubdomainsForVar(unsigned int var_number) const
Definition: SystemBase.h:761
virtual void addObject(std::shared_ptr< T > object, THREAD_ID tid=0, bool recurse=true)
Adds an object to the storage structure.
bool ignoreZerosInJacobian() const
Will return true if zeros in the Jacobian are to be dropped from the sparsity pattern.
const ExecFlagType EXEC_PRE_KERNELS
Definition: Moose.C:56
libMesh::System & _sys
void mortarConstraints(Moose::ComputeType compute_type, const std::set< TagID > &vector_tags, const std::set< TagID > &matrix_tags)
Do mortar constraint residual/jacobian computations.
NumericVector< Number > * _increment_vec
increment vector
InterfaceKernelBase is the base class for all InterfaceKernel type classes.
QueryCache & condition(Args &&... args)
Adds a new condition to the query.
Definition: TheWarehouse.h:284
bool doingDG() const
Getter for _doing_dg.
void computeResidualTag(NumericVector< Number > &residual, TagID tag_id)
Computes residual for a given tag.
void addConstraint(const std::string &c_name, const std::string &name, InputParameters &parameters)
Adds a Constraint.
virtual TagName vectorTagName(const TagID tag) const
Retrieve the name associated with a TagID.
Definition: SubProblem.C:222
MOOSE now contains C++17 code, so give a reasonable error message stating what the user can do to add...
const ConsoleStream _console
An instance of helper class to write streams to the Console objects.
bool hasKokkosObjects() const
bool restoreOriginalNonzeroPattern() const
face_info_iterator ownedFaceInfoEnd()
Definition: MooseMesh.C:1569
void constraintJacobians(const SparseMatrix< Number > &jacobian_to_view, bool displaced)
Add jacobian contributions from Constraints.
virtual libMesh::System & system() override
Get the reference to the libMesh system.
MooseVariableFieldBase & getVariable(THREAD_ID tid, const std::string &var_name) const
Gets a reference to a variable of with specified name.
Definition: SystemBase.C:90
std::vector< BoundaryID > getBoundaryIDs(const Elem *const elem, const unsigned short int side) const
Returns a vector of boundary IDs for the requested element on the requested side. ...
bool preSolve()
Perform some steps to get ready for the solver.
void full_sparsity_pattern_needed()
bool _has_constraints
Whether or not this system has any Constraints.
dof_id_type first_dof(const processor_id_type proc) const
bool _computed_scaling
Flag used to indicate whether we have already computed the scaling Jacobian.
void remove_algebraic_ghosting_functor(GhostingFunctor &evaluable_functor)
MooseObjectTagWarehouse< InterfaceKernelBase > _interface_kernels
unsigned int n_vars() const
void resetSolutionInvalidCurrentIteration()
Reset the number of solution invalid occurrences back to zero.
NumericVector< Number > & residualVector(TagID tag)
Return a residual vector that is associated with the residual tag.
NumericVector< Number > & solutionOld()
Definition: SystemBase.h:197
NumericVector< Number > & getResidualNonTimeVector()
Return a numeric vector that is associated with the nontime tag.
virtual bool hasScalarVariable(const std::string &var_name) const
Definition: SystemBase.C:876
const TagName PREVIOUS_NL_SOLUTION_TAG
Definition: MooseTypes.C:28
processor_id_type processor_id() const
std::shared_ptr< Predictor > _predictor
If predictor is active, this is non-NULL.
void enforceNodalConstraintsResidual(NumericVector< Number > &residual)
Enforce nodal constraints.
void subdomainsCovered(std::set< SubdomainID > &subdomains_covered, std::set< std::string > &unique_variables, THREAD_ID tid=0) const
Update supplied subdomain and variable coverate containters.
MooseObjectWarehouse< GeneralDamper > _general_dampers
General Dampers.
const std::vector< std::shared_ptr< ElemElemConstraint > > & getActiveElemElemConstraints(InterfaceID interface_id, bool displaced) const
virtual void initialSetup()
Setup Functions.
Definition: SystemBase.C:1558
bool defaultGhosting()
Whether or not the user has requested default ghosting ot be on.
Definition: SubProblem.h:144
void addCachedJacobian(GlobalDataKey)
Adds the values that have been cached by calling cacheJacobian() and or cacheJacobianNeighbor() to th...
Definition: Assembly.C:3791
virtual void cacheJacobian(const THREAD_ID tid) override
auto min(const L &left, const R &right)
void jacobianSetup() override
void setKokkosInitialSolution()
virtual NumericVector< Number > & getVector(const std::string &name)
Get a raw NumericVector by name.
Definition: SystemBase.C:933
MooseObjectTagWarehouse< IntegratedBCBase > _integrated_bcs
const DofMap & get_dof_map() const
virtual void reinitNodeFace(const Node *node, BoundaryID bnd_id, const THREAD_ID tid) override
virtual void residualSetup()
Definition: SystemBase.C:1599
virtual void reinitOffDiagScalars(const THREAD_ID tid) override
processor_id_type processor_id() const
virtual void addObject(std::shared_ptr< T > object, THREAD_ID tid=0, bool recurse=true) override
Adds an object to the storage structure.
virtual void setNeighborSubdomainID(const Elem *elem, unsigned int side, const THREAD_ID tid) override
virtual void addResidualScalar(const THREAD_ID tid=0)
virtual void subdomainSetup(THREAD_ID tid=0) const
virtual void jacobianSetup() override
virtual void addCachedResidual(const THREAD_ID tid) override
std::vector< std::vector< std::string > > _scaling_group_variables
A container of variable groupings that can be used in scaling calculations.
void addInterfaceKernel(std::string interface_kernel_name, const std::string &name, InputParameters &parameters)
Adds an interface kernel.
FieldSplitPreconditionerBase * _fsp
The field split preconditioner if this sytem is using one.
auto index_range(const T &sizable)
virtual NumericVector< Number > & residualCopy() override
void reinitMortarFunctors()
Update the mortar functors if the mesh has changed.
virtual NumericVector< Number > & residualGhosted() override
DiracKernelBase is the base class for all DiracKernel type classes.
virtual void updateActive(THREAD_ID tid=0)
Updates the active objects storage.
MooseObjectTagWarehouse< ResidualObject > _kokkos_nodal_bcs
void assembleScalingVector()
Assemble the numeric vector of scaling factors such that it can be used during assembly of the system...
BoundaryID _primary_boundary
NumericVector< Number > * _u_dotdot_old
old solution vector for u^dotdot
Definition: SystemBase.h:1013
Base variable class.
MooseObjectTagWarehouse< ScalarKernelBase > _scalar_kernels
virtual void residualEnd(THREAD_ID tid=0) const
void setConstraintSecondaryValues(NumericVector< Number > &solution, bool displaced)
Sets the value of constrained variables in the solution vector.
unsigned int THREAD_ID
Definition: MooseTypes.h:237
uint8_t dof_id_type
virtual void addJacobianScalar(const THREAD_ID tid=0)
NearestNodeLocator & _nearest_node
const std::map< dof_id_type, std::vector< dof_id_type > > & nodeToElemMap()
If not already created, creates a map from every node to all elements to which they are connected...
Definition: MooseMesh.C:1228
const std::set< SubdomainID > & meshSubdomains() const
Returns a read-only reference to the set of subdomains currently present in the Mesh.
Definition: MooseMesh.C:3251
ParallelType
virtual void addCachedJacobian(const THREAD_ID tid) override
virtual void timestepSetup()
Definition: SystemBase.C:1578
virtual ~NonlinearSystemBase()
virtual void preInit() override
This is called prior to the libMesh system has been init&#39;d.
virtual void residualSetup() override
void setPreconditioner(std::shared_ptr< MoosePreconditioner > pc)
Sets a preconditioner.
virtual void localize(std::vector< T > &v_local) const=0
MooseObjectWarehouse< ADDirichletBCBase > _ad_preset_nodal_bcs
virtual libMesh::System & system() override
Get the reference to the libMesh system.
Key structure for APIs manipulating global vectors/matrices.
Definition: Assembly.h:844