Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #pragma once
11 :
12 : #include "ParallelUniqueId.h"
13 : #include "MooseMesh.h"
14 : #include "MooseTypes.h"
15 : #include "MooseException.h"
16 : #include "MooseVariableFieldBase.h"
17 : #include "FVFluxKernel.h"
18 : #include "FVFluxBC.h"
19 : #include "FVInterfaceKernel.h"
20 : #include "FEProblem.h"
21 : #include "DisplacedProblem.h"
22 : #include "SwapBackSentinel.h"
23 : #include "MaterialBase.h"
24 : #include "libmesh/libmesh_exceptions.h"
25 : #include "libmesh/elem.h"
26 :
27 : #include <set>
28 :
29 : class MooseVariableFVBase;
30 :
31 : /// This works like the Sentinel helper classes in MOOSE, except it is more
32 : /// flexible and concise to use. You just initialize it with a lambda and can
33 : /// return it from within another function scope. So a function that has other
34 : /// cleanup functions associated with it can just wrap those cleanup funcs in
35 : /// an OnScopeExit object and return it. The caller of the function needing
36 : /// cleanup simply catches its return value and voila - the cleanup function
37 : /// are called at the end of the calling functions scope. Like this:
38 : ///
39 : /// OnScopeExit doSomethingSpecial()
40 : /// {
41 : /// ...
42 : /// return OnScopeExit([]{cleanupSomethingSpecial();});
43 : /// }
44 : /// void cleanupSomethingSpecial() {...}
45 : ///
46 : /// void main()
47 : /// {
48 : /// auto cleaner_uper = doSomethingSpecial();
49 : /// }
50 : struct OnScopeExit
51 : {
52 : std::function<void()> _f;
53 : OnScopeExit(std::function<void()> f) noexcept : _f(std::move(f)) {}
54 : OnScopeExit(OnScopeExit && other) : _f(std::move(other._f)) {}
55 : OnScopeExit & operator=(OnScopeExit && other)
56 : {
57 : _f = std::move(other._f);
58 : return *this;
59 : }
60 : ~OnScopeExit()
61 : {
62 : if (_f)
63 : _f();
64 : }
65 : };
66 :
67 : /**
68 : * This loops over a set of mesh faces (i.e. FaceInfo objects). Callback
69 : * routines are provided for visiting each face, for visiting boundary faces,
70 : * for sudomain changes, and pre/post many of these events.
71 : */
72 : template <typename RangeType>
73 : class ThreadedFaceLoop
74 : {
75 : public:
76 : ThreadedFaceLoop(FEProblemBase & fe_problem,
77 : const unsigned int nl_system_num,
78 : const std::set<TagID> & tags,
79 : bool on_displaced);
80 :
81 : ThreadedFaceLoop(ThreadedFaceLoop & x, Threads::split split);
82 :
83 : virtual ~ThreadedFaceLoop();
84 :
85 : virtual void operator()(const RangeType & range, bool bypass_threading = false);
86 :
87 : void join(const ThreadedFaceLoop & y);
88 :
89 : virtual void onFace(const FaceInfo & fi) = 0;
90 : /// This is called once for each face after all face and boundary callbacks have been
91 : /// finished for that face.
92 0 : virtual void postFace(const FaceInfo & /*fi*/) {}
93 : /// This is called once before all face-looping
94 0 : virtual void pre() {}
95 : /// This is called once after all face-looping is finished.
96 0 : virtual void post() {}
97 :
98 : /// This is called once for every face that is on a boundary *after* onFace
99 : /// is called for the face.
100 : virtual void onBoundary(const FaceInfo & fi, BoundaryID boundary) = 0;
101 :
102 : /// Called every time the current subdomain changes (i.e. the subdomain of *this* face's elem element
103 : /// is not the same as the subdomain of the last face's elem element).
104 237498 : virtual void subdomainChanged() { _fe_problem.subdomainSetup(_subdomain, _tid); }
105 :
106 : /// Called every time the neighbor subdomain changes (i.e. the subdomain of *this* face's neighbor element
107 : /// is not the same as the subdomain of the last face's neighbor element).
108 3220994 : virtual void neighborSubdomainChanged()
109 : {
110 3220994 : _fe_problem.neighborSubdomainSetup(_neighbor_subdomain, _tid);
111 3220994 : }
112 :
113 : /// Called if a MooseException is caught anywhere during the computation.
114 0 : void caughtMooseException(MooseException & e)
115 : {
116 0 : Threads::spin_mutex::scoped_lock lock(threaded_element_mutex);
117 0 : std::string what(e.what());
118 0 : _fe_problem.setException(what);
119 0 : }
120 :
121 : protected:
122 : /// Print list of object types executed and in which order
123 0 : virtual void printGeneralExecutionInformation() const {}
124 :
125 : /// Print ordering of objects executed on each block
126 0 : virtual void printBlockExecutionInformation() const {}
127 :
128 : /// Print ordering of objects exected on each boundary
129 0 : virtual void printBoundaryExecutionInformation(const BoundaryID /* bnd_id */) const {}
130 :
131 : /// Reset lists of blocks and boundaries for which execution printing has been done
132 188783 : void resetExecutionPrinting()
133 : {
134 188783 : _blocks_exec_printed.clear();
135 188783 : _boundaries_exec_printed.clear();
136 188783 : }
137 :
138 : FEProblemBase & _fe_problem;
139 : MooseMesh & _mesh;
140 : const std::set<TagID> & _tags;
141 : THREAD_ID _tid;
142 : const unsigned int _nl_system_num;
143 :
144 : /// Whether this loop is operating on the displaced mesh
145 : const bool _on_displaced;
146 :
147 : /// FEProblemBase or DisplacedProblem depending on \p _on_displaced
148 : SubProblem & _subproblem;
149 :
150 : /// The subdomain for the current element
151 : SubdomainID _subdomain;
152 :
153 : /// The subdomain for the last element
154 : SubdomainID _old_subdomain;
155 :
156 : /// The subdomain for the current neighbor
157 : SubdomainID _neighbor_subdomain;
158 :
159 : /// The subdomain for the last neighbor
160 : SubdomainID _old_neighbor_subdomain;
161 :
162 : /// Set to keep track of blocks for which we have printed the execution pattern
163 : mutable std::set<std::pair<const SubdomainID, const SubdomainID>> _blocks_exec_printed;
164 :
165 : /// Set to keep track of boundaries for which we have printed the execution pattern
166 : mutable std::set<BoundaryID> _boundaries_exec_printed;
167 :
168 : /// Holds caught runtime error messages
169 : std::string _error_message;
170 :
171 : private:
172 : /// Whether this is the zeroth threaded copy of this body
173 : const bool _zeroth_copy;
174 :
175 : /// The value of \p Moose::_throw_on_error at the time of construction. This data member only has
176 : /// meaning and will only be read if this is the thread 0 copy of the class
177 : const bool _incoming_throw_on_error;
178 : };
179 :
180 : template <typename RangeType>
181 181380 : ThreadedFaceLoop<RangeType>::ThreadedFaceLoop(FEProblemBase & fe_problem,
182 : const unsigned int nl_system_num,
183 : const std::set<TagID> & tags,
184 : const bool on_displaced)
185 181380 : : _fe_problem(fe_problem),
186 362760 : _mesh(fe_problem.mesh()),
187 181380 : _tags(tags),
188 181380 : _nl_system_num(nl_system_num),
189 181380 : _on_displaced(on_displaced),
190 362760 : _subproblem(_on_displaced ? static_cast<SubProblem &>(*_fe_problem.getDisplacedProblem())
191 181380 : : static_cast<SubProblem &>(_fe_problem)),
192 181380 : _zeroth_copy(true),
193 362760 : _incoming_throw_on_error(Moose::_throw_on_error)
194 : {
195 181380 : Moose::_throw_on_error = true;
196 181380 : }
197 :
198 : template <typename RangeType>
199 8556 : ThreadedFaceLoop<RangeType>::ThreadedFaceLoop(ThreadedFaceLoop & x, Threads::split /*split*/)
200 8556 : : _fe_problem(x._fe_problem),
201 8556 : _mesh(x._mesh),
202 8556 : _tags(x._tags),
203 8556 : _nl_system_num(x._nl_system_num),
204 8556 : _on_displaced(x._on_displaced),
205 8556 : _subproblem(x._subproblem),
206 8556 : _zeroth_copy(false),
207 8556 : _incoming_throw_on_error(false)
208 : {
209 8556 : }
210 :
211 : template <typename RangeType>
212 189936 : ThreadedFaceLoop<RangeType>::~ThreadedFaceLoop()
213 : {
214 189936 : if (_zeroth_copy)
215 : {
216 181380 : Moose::_throw_on_error = _incoming_throw_on_error;
217 :
218 181380 : if (!_error_message.empty())
219 8 : mooseError(_error_message);
220 : }
221 371308 : }
222 :
223 : template <typename RangeType>
224 : void
225 8556 : ThreadedFaceLoop<RangeType>::join(const ThreadedFaceLoop<RangeType> & y)
226 : {
227 8556 : if (_error_message.empty() && !y._error_message.empty())
228 0 : _error_message = y._error_message;
229 8556 : }
230 :
231 : // TODO: ensure the vector<faceinfo> data structure needs to be built such
232 : // that for all sides on an interface between two subdomains, the elements of
233 : // the same subdomain are used consistently for all the "elem" (i.e. not
234 : // "neighbor") parameters in order to avoid jumping back and forth along the
235 : // boundary between using one or the other subdomains' FV kernels
236 : // unpredictably.
237 : template <typename RangeType>
238 : void
239 189936 : ThreadedFaceLoop<RangeType>::operator()(const RangeType & range, bool bypass_threading)
240 : {
241 : // TODO: make this query fv flux kernel specific or somehow integrate the
242 : // fv source kernels into this loop. Also this check will need to increase
243 : // in generality if/when other systems and objects besides FV stuff get
244 : // added to this loop.
245 189936 : std::vector<FVKernel *> kernels;
246 189936 : _fe_problem.theWarehouse()
247 : .query()
248 379872 : .template condition<AttribSysNum>(_nl_system_num)
249 189936 : .template condition<AttribSystem>("FVFluxKernel")
250 189936 : .template condition<AttribDisplaced>(_on_displaced)
251 189936 : .queryInto(kernels);
252 189936 : if (kernels.size() == 0)
253 1143 : return;
254 :
255 : try
256 : {
257 : try
258 : {
259 188793 : ParallelUniqueId puid;
260 188793 : _tid = bypass_threading ? 0 : puid.id;
261 :
262 188793 : pre();
263 188793 : printGeneralExecutionInformation();
264 :
265 188793 : _subdomain = Moose::INVALID_BLOCK_ID;
266 188793 : _neighbor_subdomain = Moose::INVALID_BLOCK_ID;
267 :
268 188793 : typename RangeType::const_iterator faceinfo = range.begin();
269 23734391 : for (faceinfo = range.begin(); faceinfo != range.end(); ++faceinfo)
270 : {
271 23545598 : const Elem & elem = (*faceinfo)->elem();
272 :
273 23545598 : _fe_problem.setCurrentSubdomainID(&elem, _tid);
274 :
275 23545598 : _old_subdomain = _subdomain;
276 23545598 : _subdomain = elem.subdomain_id();
277 23545598 : if (_subdomain != _old_subdomain)
278 : {
279 237498 : subdomainChanged();
280 237498 : printBlockExecutionInformation();
281 : }
282 :
283 23545598 : _old_neighbor_subdomain = _neighbor_subdomain;
284 23545598 : if (const Elem * const neighbor = (*faceinfo)->neighborPtr())
285 : {
286 21742782 : _fe_problem.setNeighborSubdomainID(neighbor, _tid);
287 21742782 : _neighbor_subdomain = neighbor->subdomain_id();
288 : }
289 : else
290 1802816 : _neighbor_subdomain = Moose::INVALID_BLOCK_ID;
291 :
292 23545598 : if (_neighbor_subdomain != _old_neighbor_subdomain)
293 : {
294 3220994 : neighborSubdomainChanged();
295 : // This is going to cause a lot more printing
296 3220994 : printBlockExecutionInformation();
297 : }
298 :
299 23545598 : onFace(**faceinfo);
300 : // Cache data now because onBoundary may clear it. E.g. there was a nasty bug for two
301 : // variable FV systems where if one variable was executing an FVFluxKernel on a boundary
302 : // while the other was executing an FVFluxBC, the FVFluxKernel data would get lost because
303 : // onBoundary would clear the residual/Jacobian data before it was cached
304 23545598 : postFace(**faceinfo);
305 :
306 23545598 : const std::set<BoundaryID> boundary_ids = (*faceinfo)->boundaryIDs();
307 25602607 : for (auto & it : boundary_ids)
308 : {
309 2057019 : printBoundaryExecutionInformation(it);
310 2057019 : onBoundary(**faceinfo, it);
311 : }
312 :
313 23545588 : postFace(**faceinfo);
314 :
315 : } // range
316 188783 : post();
317 :
318 : // Clear execution printing sets to start printing on every block and boundary again
319 188783 : resetExecutionPrinting();
320 188793 : }
321 10 : catch (libMesh::LogicError & e)
322 : {
323 0 : mooseException("We caught a libMesh error: ", e.what());
324 : }
325 0 : catch (MetaPhysicL::LogicError & e)
326 : {
327 0 : moose::translateMetaPhysicLError(e);
328 : }
329 : }
330 10 : catch (MooseException & e)
331 : {
332 0 : caughtMooseException(e);
333 : }
334 20 : catch (std::runtime_error & e)
335 : {
336 10 : _error_message = e.what();
337 : }
338 189936 : }
339 :
340 : /**
341 : * Base class for assembly-like calculations.
342 : */
343 : template <typename RangeType, typename AttributeTagType>
344 : class ComputeFVFluxThread : public ThreadedFaceLoop<RangeType>
345 : {
346 : public:
347 : ComputeFVFluxThread(FEProblemBase & fe_problem,
348 : const unsigned int nl_system_num,
349 : const std::set<TagID> & tags,
350 : bool on_displaced);
351 :
352 : ComputeFVFluxThread(ComputeFVFluxThread & x, Threads::split split);
353 :
354 : virtual ~ComputeFVFluxThread();
355 :
356 : virtual void onFace(const FaceInfo & fi) override;
357 : virtual void pre() override;
358 : virtual void post() override;
359 :
360 : virtual void onBoundary(const FaceInfo & fi, BoundaryID boundary) override;
361 :
362 : virtual void subdomainChanged() override;
363 : virtual void neighborSubdomainChanged() override;
364 :
365 : protected:
366 : /**
367 : * call either computeResidual, computeJacobian, or computeResidualAndJacobian on the provided
368 : * residual object depending on what derived class of this class is instantiated
369 : */
370 : virtual void compute(FVFaceResidualObject & ro, const FaceInfo & fi) = 0;
371 :
372 : /**
373 : * call either residualSetup or jacobianSetup depending on what derived class of this class is
374 : * instantiated
375 : */
376 : virtual void setup(SetupInterface & obj) = 0;
377 :
378 : /**
379 : * call either addCachedJacobian or addCachedResidual or both depending on what derived class of
380 : * this class is instantiated
381 : */
382 : virtual void addCached() = 0;
383 :
384 : unsigned int _num_cached = 0;
385 :
386 : using ThreadedFaceLoop<RangeType>::_fe_problem;
387 : using ThreadedFaceLoop<RangeType>::_mesh;
388 : using ThreadedFaceLoop<RangeType>::_tid;
389 : using ThreadedFaceLoop<RangeType>::_tags;
390 : using ThreadedFaceLoop<RangeType>::_nl_system_num;
391 : using ThreadedFaceLoop<RangeType>::_on_displaced;
392 : using ThreadedFaceLoop<RangeType>::_subdomain;
393 : using ThreadedFaceLoop<RangeType>::_neighbor_subdomain;
394 : using ThreadedFaceLoop<RangeType>::_blocks_exec_printed;
395 : using ThreadedFaceLoop<RangeType>::_boundaries_exec_printed;
396 :
397 : private:
398 : void reinitVariables(const FaceInfo & fi);
399 : void finalizeContainers();
400 :
401 : /// Print list of object types executed and in which order
402 : virtual void printGeneralExecutionInformation() const override;
403 :
404 : /// Print ordering of objects executed on each block
405 : virtual void printBlockExecutionInformation() const override;
406 :
407 : /// Print ordering of objects exected on each boundary
408 : virtual void printBoundaryExecutionInformation(const BoundaryID bnd_id) const override;
409 :
410 : /// Utility to get the subdomain names from the ids
411 : std::pair<SubdomainName, SubdomainName> getBlockNames() const;
412 :
413 : /// Variables
414 : std::set<MooseVariableFieldBase *> _fv_vars;
415 : std::set<MooseVariableFieldBase *> _elem_sub_fv_vars;
416 : std::set<MooseVariableFieldBase *> _neigh_sub_fv_vars;
417 :
418 : /// FVFluxKernels
419 : std::set<FVFluxKernel *> _fv_flux_kernels;
420 : std::set<FVFluxKernel *> _elem_sub_fv_flux_kernels;
421 : std::set<FVFluxKernel *> _neigh_sub_fv_flux_kernels;
422 :
423 : /// Element face materials
424 : std::vector<std::shared_ptr<MaterialBase>> _elem_face_mats;
425 : std::vector<std::shared_ptr<MaterialBase>> _elem_sub_elem_face_mats;
426 : std::vector<std::shared_ptr<MaterialBase>> _neigh_sub_elem_face_mats;
427 :
428 : // Neighbor face materials
429 : std::vector<std::shared_ptr<MaterialBase>> _neigh_face_mats;
430 : std::vector<std::shared_ptr<MaterialBase>> _elem_sub_neigh_face_mats;
431 : std::vector<std::shared_ptr<MaterialBase>> _neigh_sub_neigh_face_mats;
432 :
433 : const bool _scaling_jacobian;
434 : const bool _scaling_residual;
435 : };
436 :
437 : template <typename RangeType, typename AttributeTagType>
438 181380 : ComputeFVFluxThread<RangeType, AttributeTagType>::ComputeFVFluxThread(FEProblemBase & fe_problem,
439 : unsigned int nl_system_num,
440 : const std::set<TagID> & tags,
441 : bool on_displaced)
442 : : ThreadedFaceLoop<RangeType>(fe_problem, nl_system_num, tags, on_displaced),
443 181380 : _scaling_jacobian(fe_problem.computingScalingJacobian()),
444 362760 : _scaling_residual(fe_problem.computingScalingResidual())
445 : {
446 181380 : }
447 :
448 : template <typename RangeType, typename AttributeTagType>
449 8556 : ComputeFVFluxThread<RangeType, AttributeTagType>::ComputeFVFluxThread(ComputeFVFluxThread & x,
450 : Threads::split split)
451 : : ThreadedFaceLoop<RangeType>(x, split),
452 8556 : _fv_vars(x._fv_vars),
453 8556 : _scaling_jacobian(x._scaling_jacobian),
454 8556 : _scaling_residual(x._scaling_residual)
455 : {
456 8556 : }
457 :
458 : template <typename RangeType, typename AttributeTagType>
459 189936 : ComputeFVFluxThread<RangeType, AttributeTagType>::~ComputeFVFluxThread()
460 : {
461 189936 : }
462 :
463 : template <typename RangeType, typename AttributeTagType>
464 : void
465 23545598 : ComputeFVFluxThread<RangeType, AttributeTagType>::reinitVariables(const FaceInfo & fi)
466 : {
467 : // TODO: this skips necessary FE reinit. In addition to this call, we need
468 : // to conditionally do some FE-specific reinit here if we have any active FE
469 : // variables. However, we still want to keep/do FV-style quadrature.
470 : // Figure out how to do all this some day.
471 23545598 : this->_subproblem.reinitFVFace(_tid, fi);
472 :
473 : // TODO: for FE variables, this is handled via setting needed vars through
474 : // fe problem API which passes the value on to the system class. Then
475 : // reinit is called on fe problem which forwards its calls to the system
476 : // function and then fe problem also calls displaced problem reinit. All
477 : // the call forwarding seems silly, but it does allow the displaced problem
478 : // to be easily kept in sync. However, the displaced problem has different
479 : // pointers for its own face info objects, etc, we can't just pass in fe
480 : // problem's face info down to the sub-problem -- centroids are different,
481 : // volumes are different, etc. To support displaced meshes correctly, we
482 : // need to be able to reinit the subproblems variables using its equivalent
483 : // face info object. How? See https://github.com/idaholab/moose/issues/15064
484 :
485 50632301 : for (auto var : _fv_vars)
486 27086703 : var->computeFaceValues(fi);
487 :
488 23545598 : _fe_problem.resizeMaterialData(Moose::MaterialDataType::FACE_MATERIAL_DATA, /*nqp=*/1, _tid);
489 :
490 23557166 : for (std::shared_ptr<MaterialBase> mat : _elem_face_mats)
491 : {
492 11568 : mat->setFaceInfo(fi);
493 11568 : mat->computeProperties();
494 : }
495 :
496 23545598 : if (fi.neighborPtr())
497 : {
498 21742782 : _fe_problem.resizeMaterialData(
499 : Moose::MaterialDataType::NEIGHBOR_MATERIAL_DATA, /*nqp=*/1, _tid);
500 :
501 21750606 : for (std::shared_ptr<MaterialBase> mat : _neigh_face_mats)
502 : {
503 7824 : mat->setFaceInfo(fi);
504 7824 : mat->computeProperties();
505 : }
506 : }
507 23545598 : }
508 :
509 : template <typename RangeType, typename AttributeTagType>
510 : void
511 23545598 : ComputeFVFluxThread<RangeType, AttributeTagType>::onFace(const FaceInfo & fi)
512 : {
513 23545598 : reinitVariables(fi);
514 :
515 53537076 : for (auto * const k : _fv_flux_kernels)
516 29991478 : compute(*k, fi);
517 23545598 : }
518 :
519 : template <typename RangeType, typename AttributeTagType>
520 : void
521 2057019 : ComputeFVFluxThread<RangeType, AttributeTagType>::onBoundary(const FaceInfo & fi, BoundaryID bnd_id)
522 : {
523 2057019 : if (_scaling_jacobian || _scaling_residual)
524 52 : return;
525 :
526 2056967 : std::vector<FVFluxBC *> bcs;
527 2056967 : _fe_problem.theWarehouse()
528 : .query()
529 4113934 : .template condition<AttribSysNum>(_nl_system_num)
530 2056967 : .template condition<AttribSystem>("FVFluxBC")
531 2056967 : .template condition<AttribDisplaced>(_on_displaced)
532 2056967 : .template condition<AttribThread>(_tid)
533 2056967 : .template condition<AttributeTagType>(_tags)
534 2056967 : .template condition<AttribBoundaries>(bnd_id)
535 2056967 : .queryInto(bcs);
536 :
537 2748824 : for (auto * const bc : bcs)
538 691867 : compute(*bc, fi);
539 :
540 2056957 : std::vector<FVInterfaceKernel *> iks;
541 2056957 : _fe_problem.theWarehouse()
542 : .query()
543 4113914 : .template condition<AttribSysNum>(_nl_system_num)
544 2056957 : .template condition<AttribSystem>("FVInterfaceKernel")
545 2056957 : .template condition<AttribDisplaced>(_on_displaced)
546 2056957 : .template condition<AttribThread>(_tid)
547 2056957 : .template condition<AttributeTagType>(_tags)
548 2056957 : .template condition<AttribBoundaries>(bnd_id)
549 2056957 : .queryInto(iks);
550 :
551 2070773 : for (auto * const ik : iks)
552 13816 : compute(*ik, fi);
553 2056967 : }
554 :
555 : template <typename RangeType, typename AttributeTagType>
556 : void
557 188783 : ComputeFVFluxThread<RangeType, AttributeTagType>::post()
558 : {
559 : // make sure we add any remaining cached residuals/jacobians to add/record
560 188783 : Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
561 188783 : addCached();
562 :
563 188783 : _fe_problem.clearActiveElementalMooseVariables(_tid);
564 188783 : _fe_problem.clearActiveMaterialProperties(_tid);
565 188783 : }
566 :
567 : template <typename RangeType, typename AttributeTagType>
568 : void
569 3458492 : ComputeFVFluxThread<RangeType, AttributeTagType>::finalizeContainers()
570 : {
571 : //
572 : // Finalize our variables
573 : //
574 6916984 : std::set_union(_elem_sub_fv_vars.begin(),
575 : _elem_sub_fv_vars.end(),
576 : _neigh_sub_fv_vars.begin(),
577 : _neigh_sub_fv_vars.end(),
578 3458492 : std::inserter(_fv_vars, _fv_vars.begin()));
579 :
580 : //
581 : // Finalize our kernels
582 : //
583 3458492 : const bool same_kernels = _elem_sub_fv_flux_kernels == _neigh_sub_fv_flux_kernels;
584 3458492 : if (same_kernels)
585 1691356 : _fv_flux_kernels = _elem_sub_fv_flux_kernels;
586 : else
587 3534272 : std::set_union(_elem_sub_fv_flux_kernels.begin(),
588 : _elem_sub_fv_flux_kernels.end(),
589 : _neigh_sub_fv_flux_kernels.begin(),
590 : _neigh_sub_fv_flux_kernels.end(),
591 1767136 : std::inserter(_fv_flux_kernels, _fv_flux_kernels.begin()));
592 3458492 : const bool need_ghosting = !same_kernels;
593 :
594 : //
595 : // Finalize our element face materials
596 : //
597 3458492 : _elem_face_mats = _elem_sub_elem_face_mats;
598 :
599 3458492 : if (need_ghosting)
600 : // Add any element face materials from the neighboring subdomain that do not exist on the
601 : // element subdomain
602 1767136 : for (std::shared_ptr<MaterialBase> neigh_sub_elem_face_mat : _neigh_sub_elem_face_mats)
603 0 : if (std::find(_elem_sub_elem_face_mats.begin(),
604 : _elem_sub_elem_face_mats.end(),
605 0 : neigh_sub_elem_face_mat) == _elem_sub_elem_face_mats.end())
606 0 : _elem_face_mats.push_back(neigh_sub_elem_face_mat);
607 :
608 : //
609 : // Finalize our neighbor face materials
610 : //
611 3458492 : _neigh_face_mats = _neigh_sub_neigh_face_mats;
612 :
613 3458492 : if (need_ghosting)
614 : // Add any neighbor face materials from the element subdomain that do not exist on the
615 : // neighbor subdomain
616 1769670 : for (std::shared_ptr<MaterialBase> elem_sub_neigh_face_mat : _elem_sub_neigh_face_mats)
617 2534 : if (std::find(_neigh_sub_neigh_face_mats.begin(),
618 : _neigh_sub_neigh_face_mats.end(),
619 5068 : elem_sub_neigh_face_mat) == _neigh_sub_neigh_face_mats.end())
620 2534 : _neigh_face_mats.push_back(elem_sub_neigh_face_mat);
621 3458492 : }
622 :
623 : template <typename RangeType, typename AttributeTagType>
624 : void
625 237498 : ComputeFVFluxThread<RangeType, AttributeTagType>::subdomainChanged()
626 : {
627 237498 : ThreadedFaceLoop<RangeType>::subdomainChanged();
628 :
629 : // Clear variables
630 237498 : _fv_vars.clear();
631 237498 : _elem_sub_fv_vars.clear();
632 :
633 : // Clear kernels
634 237498 : _fv_flux_kernels.clear();
635 237498 : _elem_sub_fv_flux_kernels.clear();
636 :
637 : // Clear element face materials
638 237498 : _elem_face_mats.clear();
639 237498 : _elem_sub_elem_face_mats.clear();
640 :
641 : // Clear neighbor face materials
642 237498 : _neigh_face_mats.clear();
643 237498 : _elem_sub_neigh_face_mats.clear();
644 :
645 : // TODO: do this for other relevant objects - like FV BCs, FV source term
646 : // kernels, etc. - but we don't need to add them for other types of objects
647 : // like FE or DG kernels because those kernels don't run in this loop. Do we
648 : // really want to integrate fv source kernels into this loop?
649 237498 : std::vector<FVFluxKernel *> kernels;
650 237498 : _fe_problem.theWarehouse()
651 : .query()
652 474996 : .template condition<AttribSysNum>(_nl_system_num)
653 237498 : .template condition<AttribSystem>("FVFluxKernel")
654 237498 : .template condition<AttribDisplaced>(_on_displaced)
655 237498 : .template condition<AttribSubdomains>(_subdomain)
656 237498 : .template condition<AttribThread>(_tid)
657 237498 : .template condition<AttributeTagType>(_tags)
658 237498 : .queryInto(kernels);
659 :
660 237498 : _elem_sub_fv_flux_kernels = std::set<FVFluxKernel *>(kernels.begin(), kernels.end());
661 :
662 554257 : for (auto * k : _elem_sub_fv_flux_kernels)
663 : {
664 : // TODO: we need a better way to do this - especially when FE objects begin to
665 : // couple to FV vars. This code shoud be refactored out into one place
666 : // where it is easy for users to say initialize all materials and
667 : // variables needed by these objects for me.
668 316759 : const auto & deps = k->getMooseVariableDependencies();
669 633518 : for (auto var : deps)
670 : {
671 : mooseAssert(var->isFV(),
672 : "We do not currently support coupling of FE variables into FV objects");
673 316759 : _elem_sub_fv_vars.insert(var);
674 : }
675 : }
676 :
677 237498 : _fe_problem.getFVMatsAndDependencies(
678 237498 : _subdomain, _elem_sub_elem_face_mats, _elem_sub_neigh_face_mats, _elem_sub_fv_vars, _tid);
679 :
680 237498 : finalizeContainers();
681 237498 : }
682 :
683 : template <typename RangeType, typename AttributeTagType>
684 : void
685 3220994 : ComputeFVFluxThread<RangeType, AttributeTagType>::neighborSubdomainChanged()
686 : {
687 3220994 : ThreadedFaceLoop<RangeType>::neighborSubdomainChanged();
688 :
689 : // Clear variables
690 3220994 : _fv_vars.clear();
691 3220994 : _neigh_sub_fv_vars.clear();
692 :
693 : // Clear kernels
694 3220994 : _fv_flux_kernels.clear();
695 3220994 : _neigh_sub_fv_flux_kernels.clear();
696 :
697 : // Clear element face materials
698 3220994 : _elem_face_mats.clear();
699 3220994 : _neigh_sub_elem_face_mats.clear();
700 :
701 : // Clear neighbor face materials
702 3220994 : _neigh_face_mats.clear();
703 3220994 : _neigh_sub_neigh_face_mats.clear();
704 :
705 : // TODO: do this for other relevant objects - like FV BCs, FV source term
706 : // kernels, etc. - but we don't need to add them for other types of objects
707 : // like FE or DG kernels because those kernels don't run in this loop. Do we
708 : // really want to integrate fv source kernels into this loop?
709 3220994 : std::vector<FVFluxKernel *> kernels;
710 3220994 : _fe_problem.theWarehouse()
711 : .query()
712 6441988 : .template condition<AttribSysNum>(_nl_system_num)
713 3220994 : .template condition<AttribSystem>("FVFluxKernel")
714 3220994 : .template condition<AttribDisplaced>(_on_displaced)
715 3220994 : .template condition<AttribSubdomains>(_neighbor_subdomain)
716 3220994 : .template condition<AttribThread>(_tid)
717 3220994 : .template condition<AttributeTagType>(_tags)
718 3220994 : .queryInto(kernels);
719 :
720 3220994 : _neigh_sub_fv_flux_kernels = std::set<FVFluxKernel *>(kernels.begin(), kernels.end());
721 :
722 5024031 : for (auto * k : _neigh_sub_fv_flux_kernels)
723 : {
724 : // TODO: we need a better way to do this - especially when FE objects begin to
725 : // couple to FV vars. This code shoud be refactored out into one place
726 : // where it is easy for users to say initialize all materials and
727 : // variables needed by these objects for me.
728 1803037 : const auto & deps = k->getMooseVariableDependencies();
729 3606074 : for (auto var : deps)
730 : {
731 : mooseAssert(var->isFV(),
732 : "We do not currently support coupling of FE variables into FV objects");
733 1803037 : _neigh_sub_fv_vars.insert(var);
734 : }
735 : }
736 :
737 3220994 : _fe_problem.getFVMatsAndDependencies(_neighbor_subdomain,
738 3220994 : _neigh_sub_elem_face_mats,
739 3220994 : _neigh_sub_neigh_face_mats,
740 3220994 : _neigh_sub_fv_vars,
741 : _tid);
742 :
743 3220994 : finalizeContainers();
744 3220994 : }
745 :
746 : template <typename RangeType, typename AttributeTagType>
747 : void
748 188793 : ComputeFVFluxThread<RangeType, AttributeTagType>::pre()
749 : {
750 188793 : std::vector<FVFluxBC *> bcs;
751 188793 : _fe_problem.theWarehouse()
752 : .query()
753 377586 : .template condition<AttribSysNum>(_nl_system_num)
754 188793 : .template condition<AttribSystem>("FVFluxBC")
755 188793 : .template condition<AttribDisplaced>(_on_displaced)
756 188793 : .template condition<AttribThread>(_tid)
757 188793 : .template condition<AttributeTagType>(_tags)
758 188793 : .queryInto(bcs);
759 :
760 188793 : std::vector<FVInterfaceKernel *> iks;
761 188793 : _fe_problem.theWarehouse()
762 : .query()
763 377586 : .template condition<AttribSysNum>(_nl_system_num)
764 188793 : .template condition<AttribSystem>("FVInterfaceKernel")
765 188793 : .template condition<AttribDisplaced>(_on_displaced)
766 188793 : .template condition<AttribThread>(_tid)
767 188793 : .template condition<AttributeTagType>(_tags)
768 188793 : .queryInto(iks);
769 :
770 188793 : std::vector<FVFluxKernel *> kernels;
771 188793 : _fe_problem.theWarehouse()
772 : .query()
773 377586 : .template condition<AttribSysNum>(_nl_system_num)
774 188793 : .template condition<AttribSystem>("FVFluxKernel")
775 188793 : .template condition<AttribDisplaced>(_on_displaced)
776 188793 : .template condition<AttribThread>(_tid)
777 188793 : .template condition<AttributeTagType>(_tags)
778 188793 : .queryInto(kernels);
779 :
780 417043 : for (auto * const bc : bcs)
781 228250 : setup(*bc);
782 204484 : for (auto * const ik : iks)
783 15691 : setup(*ik);
784 487404 : for (auto * const kernel : kernels)
785 298611 : setup(*kernel);
786 :
787 : // Clear variables
788 188793 : _fv_vars.clear();
789 188793 : _elem_sub_fv_vars.clear();
790 188793 : _neigh_sub_fv_vars.clear();
791 :
792 : // Clear kernels
793 188793 : _fv_flux_kernels.clear();
794 188793 : _elem_sub_fv_flux_kernels.clear();
795 188793 : _neigh_sub_fv_flux_kernels.clear();
796 :
797 : // Clear element face materials
798 188793 : _elem_face_mats.clear();
799 188793 : _elem_sub_elem_face_mats.clear();
800 188793 : _neigh_sub_elem_face_mats.clear();
801 :
802 : // Clear neighbor face materials
803 188793 : _neigh_face_mats.clear();
804 188793 : _elem_sub_neigh_face_mats.clear();
805 188793 : _neigh_sub_neigh_face_mats.clear();
806 188793 : }
807 :
808 : template <typename RangeType>
809 : class ComputeFVFluxResidualThread : public ComputeFVFluxThread<RangeType, AttribVectorTags>
810 : {
811 : public:
812 : ComputeFVFluxResidualThread(FEProblemBase & fe_problem,
813 : const unsigned int nl_system_num,
814 : const std::set<TagID> & tags,
815 : bool on_displaced);
816 :
817 : ComputeFVFluxResidualThread(ComputeFVFluxResidualThread & x, Threads::split split);
818 :
819 : protected:
820 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_fe_problem;
821 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_tid;
822 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_nl_system_num;
823 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_on_displaced;
824 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_num_cached;
825 :
826 : void postFace(const FaceInfo & fi) override;
827 23212036 : void compute(FVFaceResidualObject & ro, const FaceInfo & fi) override { ro.computeResidual(fi); }
828 443548 : void setup(SetupInterface & obj) override { obj.residualSetup(); }
829 150559 : void addCached() override { this->_subproblem.SubProblem::addCachedResidual(_tid); }
830 : };
831 :
832 : template <typename RangeType>
833 144832 : ComputeFVFluxResidualThread<RangeType>::ComputeFVFluxResidualThread(
834 : FEProblemBase & fe_problem,
835 : const unsigned int nl_system_num,
836 : const std::set<TagID> & tags,
837 : bool on_displaced)
838 144832 : : ComputeFVFluxThread<RangeType, AttribVectorTags>(fe_problem, nl_system_num, tags, on_displaced)
839 : {
840 144832 : }
841 :
842 : template <typename RangeType>
843 6317 : ComputeFVFluxResidualThread<RangeType>::ComputeFVFluxResidualThread(ComputeFVFluxResidualThread & x,
844 : Threads::split split)
845 6317 : : ComputeFVFluxThread<RangeType, AttribVectorTags>(x, split)
846 : {
847 6317 : }
848 :
849 : template <typename RangeType>
850 : void
851 35251404 : ComputeFVFluxResidualThread<RangeType>::postFace(const FaceInfo & /*fi*/)
852 : {
853 35251404 : _num_cached++;
854 : // TODO: do we need both calls - or just the neighbor one? - confirm this
855 35251404 : this->_subproblem.SubProblem::cacheResidual(_tid);
856 35251404 : this->_subproblem.SubProblem::cacheResidualNeighbor(_tid);
857 :
858 35251404 : this->_subproblem.SubProblem::addCachedResidual(_tid);
859 35251404 : }
860 :
861 : template <typename RangeType>
862 : class ComputeFVFluxJacobianThread : public ComputeFVFluxThread<RangeType, AttribMatrixTags>
863 : {
864 : public:
865 : ComputeFVFluxJacobianThread(FEProblemBase & fe_problem,
866 : const unsigned int nl_system_num,
867 : const std::set<TagID> & tags,
868 : bool on_displaced);
869 :
870 : ComputeFVFluxJacobianThread(ComputeFVFluxJacobianThread & x, Threads::split split);
871 :
872 : protected:
873 : using ComputeFVFluxThread<RangeType, AttribMatrixTags>::_fe_problem;
874 : using ComputeFVFluxThread<RangeType, AttribMatrixTags>::_tid;
875 : using ComputeFVFluxThread<RangeType, AttribMatrixTags>::_num_cached;
876 :
877 : void postFace(const FaceInfo & fi) override;
878 7328047 : void compute(FVFaceResidualObject & ro, const FaceInfo & fi) override { ro.computeJacobian(fi); }
879 97736 : void setup(SetupInterface & obj) override { obj.jacobianSetup(); }
880 37164 : void addCached() override { this->_subproblem.SubProblem::addCachedJacobian(_tid); }
881 : };
882 :
883 : template <typename RangeType>
884 35190 : ComputeFVFluxJacobianThread<RangeType>::ComputeFVFluxJacobianThread(
885 : FEProblemBase & fe_problem,
886 : const unsigned int nl_system_num,
887 : const std::set<TagID> & tags,
888 : bool on_displaced)
889 35190 : : ComputeFVFluxThread<RangeType, AttribMatrixTags>(fe_problem, nl_system_num, tags, on_displaced)
890 : {
891 35190 : }
892 :
893 : template <typename RangeType>
894 2096 : ComputeFVFluxJacobianThread<RangeType>::ComputeFVFluxJacobianThread(ComputeFVFluxJacobianThread & x,
895 : Threads::split split)
896 2096 : : ComputeFVFluxThread<RangeType, AttribMatrixTags>(x, split)
897 : {
898 2096 : }
899 :
900 : template <typename RangeType>
901 : void
902 11525122 : ComputeFVFluxJacobianThread<RangeType>::postFace(const FaceInfo & /*fi*/)
903 : {
904 11525122 : _num_cached++;
905 : // FV objects do not store their Jacobian data in TaggingInterface data structures; instead they
906 : // add into the cache directly. So we do not need the same cache calls for the Jacobian that we
907 : // need for the residual
908 11525122 : if (_num_cached % 20 == 0)
909 : {
910 560316 : Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
911 560316 : this->_subproblem.SubProblem::addCachedJacobian(_tid);
912 560316 : }
913 11525122 : }
914 :
915 : template <typename RangeType>
916 : class ComputeFVFluxRJThread : public ComputeFVFluxThread<RangeType, AttribVectorTags>
917 : {
918 : public:
919 : ComputeFVFluxRJThread(FEProblemBase & fe_problem,
920 : const unsigned int nl_system_num,
921 : const std::set<TagID> & vector_tags,
922 : const std::set<TagID> & /*matrix_tags*/,
923 : bool on_displaced);
924 :
925 : ComputeFVFluxRJThread(ComputeFVFluxRJThread & x, Threads::split split);
926 :
927 : protected:
928 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_fe_problem;
929 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_tid;
930 : using ComputeFVFluxThread<RangeType, AttribVectorTags>::_num_cached;
931 :
932 : void postFace(const FaceInfo & fi) override;
933 157078 : void compute(FVFaceResidualObject & ro, const FaceInfo & fi) override
934 : {
935 157078 : ro.computeResidualAndJacobian(fi);
936 157078 : }
937 1268 : void setup(SetupInterface & obj) override { obj.residualSetup(); }
938 1060 : void addCached() override
939 : {
940 1060 : this->_subproblem.SubProblem::addCachedResidual(_tid);
941 1060 : this->_subproblem.SubProblem::addCachedJacobian(_tid);
942 1060 : }
943 : };
944 :
945 : template <typename RangeType>
946 1358 : ComputeFVFluxRJThread<RangeType>::ComputeFVFluxRJThread(FEProblemBase & fe_problem,
947 : const unsigned int nl_system_num,
948 : const std::set<TagID> & vector_tags,
949 : const std::set<TagID> & /*matrix_tags*/,
950 : bool on_displaced)
951 : : ComputeFVFluxThread<RangeType, AttribVectorTags>(
952 1358 : fe_problem, nl_system_num, vector_tags, on_displaced)
953 : {
954 1358 : }
955 :
956 : template <typename RangeType>
957 143 : ComputeFVFluxRJThread<RangeType>::ComputeFVFluxRJThread(ComputeFVFluxRJThread & x,
958 : Threads::split split)
959 143 : : ComputeFVFluxThread<RangeType, AttribVectorTags>(x, split)
960 : {
961 143 : }
962 :
963 : template <typename RangeType>
964 : void
965 314660 : ComputeFVFluxRJThread<RangeType>::postFace(const FaceInfo & /*fi*/)
966 : {
967 314660 : _num_cached++;
968 : // TODO: do we need both calls - or just the neighbor one? - confirm this
969 314660 : this->_subproblem.SubProblem::cacheResidual(_tid);
970 314660 : this->_subproblem.SubProblem::cacheResidualNeighbor(_tid);
971 : // FV objects do not store their Jacobian data in TaggingInterface data structures; instead they
972 : // add into the cache directly. So we do not need the same cache calls for the Jacobian that we
973 : // need for the residual
974 314660 : if (_num_cached % 20 == 0)
975 : {
976 15475 : Threads::spin_mutex::scoped_lock lock(Threads::spin_mtx);
977 15475 : this->_subproblem.SubProblem::addCachedResidual(_tid);
978 15475 : this->_subproblem.SubProblem::addCachedJacobian(_tid);
979 15475 : }
980 314660 : }
981 :
982 : template <typename RangeType, typename AttributeTagType>
983 : void
984 188793 : ComputeFVFluxThread<RangeType, AttributeTagType>::printGeneralExecutionInformation() const
985 : {
986 188793 : if (!_fe_problem.shouldPrintExecution(_tid))
987 188737 : return;
988 56 : auto & console = _fe_problem.console();
989 56 : auto execute_on = _fe_problem.getCurrentExecuteOnFlag();
990 56 : console << "[DBG] Beginning finite volume flux objects loop on " << execute_on << std::endl;
991 56 : mooseDoOnce(console << "[DBG] Loop on faces (FaceInfo), objects ordered on each face: "
992 : << std::endl;
993 : console << "[DBG] - (finite volume) flux kernels" << std::endl;
994 : console << "[DBG] - (finite volume) flux boundary conditions" << std::endl;
995 : console << "[DBG] - (finite volume) interface kernels" << std::endl;);
996 56 : }
997 :
998 : template <typename RangeType, typename AttributeTagType>
999 : void
1000 3458492 : ComputeFVFluxThread<RangeType, AttributeTagType>::printBlockExecutionInformation() const
1001 : {
1002 3458492 : if (!_fe_problem.shouldPrintExecution(_tid) || !_fv_flux_kernels.size())
1003 3458324 : return;
1004 :
1005 : // Print the location of the execution
1006 168 : const auto block_pair = std::make_pair(_subdomain, _neighbor_subdomain);
1007 168 : const auto block_pair_names = this->getBlockNames();
1008 168 : if (_blocks_exec_printed.count(block_pair))
1009 0 : return;
1010 168 : auto & console = _fe_problem.console();
1011 168 : console << "[DBG] Flux kernels on block " << block_pair_names.first;
1012 168 : if (_neighbor_subdomain != Moose::INVALID_BLOCK_ID)
1013 112 : console << " and neighbor " << block_pair_names.second << std::endl;
1014 : else
1015 56 : console << " with no neighbor block" << std::endl;
1016 :
1017 : // Print the list of objects
1018 168 : std::vector<MooseObject *> fv_flux_kernels;
1019 504 : for (const auto & fv_kernel : _fv_flux_kernels)
1020 336 : fv_flux_kernels.push_back(dynamic_cast<MooseObject *>(fv_kernel));
1021 336 : console << ConsoleUtils::formatString(ConsoleUtils::mooseObjectVectorToString(fv_flux_kernels),
1022 : "[DBG]")
1023 168 : << std::endl;
1024 168 : _blocks_exec_printed.insert(block_pair);
1025 168 : }
1026 :
1027 : template <typename RangeType, typename AttributeTagType>
1028 : void
1029 2057019 : ComputeFVFluxThread<RangeType, AttributeTagType>::printBoundaryExecutionInformation(
1030 : const BoundaryID bnd_id) const
1031 : {
1032 2057019 : if (!_fe_problem.shouldPrintExecution(_tid))
1033 2056859 : return;
1034 160 : if (_boundaries_exec_printed.count(bnd_id))
1035 0 : return;
1036 160 : std::vector<MooseObject *> bcs;
1037 160 : _fe_problem.theWarehouse()
1038 : .query()
1039 320 : .template condition<AttribSystem>("FVFluxBC")
1040 160 : .template condition<AttribDisplaced>(_on_displaced)
1041 160 : .template condition<AttribThread>(_tid)
1042 160 : .template condition<AttributeTagType>(_tags)
1043 160 : .template condition<AttribBoundaries>(bnd_id)
1044 160 : .queryInto(bcs);
1045 :
1046 160 : std::vector<MooseObject *> iks;
1047 160 : _fe_problem.theWarehouse()
1048 : .query()
1049 320 : .template condition<AttribSystem>("FVInterfaceKernel")
1050 160 : .template condition<AttribDisplaced>(_on_displaced)
1051 160 : .template condition<AttribThread>(_tid)
1052 160 : .template condition<AttributeTagType>(_tags)
1053 160 : .template condition<AttribBoundaries>(bnd_id)
1054 160 : .queryInto(iks);
1055 :
1056 160 : const auto block_pair_names = this->getBlockNames();
1057 160 : if (bcs.size())
1058 : {
1059 112 : auto & console = _fe_problem.console();
1060 112 : console << "[DBG] FVBCs on boundary " << bnd_id << " between subdomain "
1061 112 : << block_pair_names.first;
1062 112 : if (_neighbor_subdomain != Moose::INVALID_BLOCK_ID)
1063 56 : console << " and neighbor " << block_pair_names.second << std::endl;
1064 : else
1065 56 : console << " and the exterior of the mesh " << std::endl;
1066 112 : const std::string fv_bcs = ConsoleUtils::mooseObjectVectorToString(bcs);
1067 112 : console << ConsoleUtils::formatString(fv_bcs, "[DBG]") << std::endl;
1068 112 : }
1069 160 : if (iks.size())
1070 : {
1071 56 : auto & console = _fe_problem.console();
1072 56 : console << "[DBG] FVIKs on boundary " << bnd_id << " between subdomain "
1073 56 : << block_pair_names.first;
1074 56 : if (_neighbor_subdomain != Moose::INVALID_BLOCK_ID)
1075 56 : console << " and neighbor " << block_pair_names.second << std::endl;
1076 : else
1077 0 : console << " and the exterior of the mesh " << std::endl;
1078 56 : const std::string fv_iks = ConsoleUtils::mooseObjectVectorToString(iks);
1079 56 : console << ConsoleUtils::formatString(fv_iks, "[DBG]") << std::endl;
1080 56 : }
1081 160 : _boundaries_exec_printed.insert(bnd_id);
1082 160 : }
1083 :
1084 : template <typename RangeType, typename AttributeTagType>
1085 : std::pair<SubdomainName, SubdomainName>
1086 328 : ComputeFVFluxThread<RangeType, AttributeTagType>::getBlockNames() const
1087 : {
1088 328 : auto block_names = std::make_pair(_mesh.getSubdomainName(_subdomain),
1089 328 : _mesh.getSubdomainName(_neighbor_subdomain));
1090 328 : if (block_names.first == "")
1091 328 : block_names.first = Moose::stringify(_subdomain);
1092 328 : if (block_names.second == "")
1093 328 : block_names.second = Moose::stringify(_neighbor_subdomain);
1094 656 : return block_names;
1095 328 : }
|