www.mooseframework.org
PorousFlowAdvectiveFluxCalculatorBase.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://www.mooseframework.org
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
11 #include "Assembly.h"
12 #include "libmesh/string_to_enum.h"
13 #include "libmesh/parallel_sync.h"
14 
15 template <>
16 InputParameters
18 {
19  InputParameters params = validParams<AdvectiveFluxCalculatorBase>();
20  params.addClassDescription(
21  "Base class to compute the advective flux of fluid in PorousFlow situations. The velocity "
22  "is U * (-permeability * (grad(P) - density * gravity)), while derived classes define U. "
23  "The Kuzmin-Turek FEM-TVD multidimensional stabilization scheme is used");
24  params.addRequiredParam<RealVectorValue>("gravity",
25  "Gravitational acceleration vector downwards (m/s^2)");
26  params.addRequiredParam<UserObjectName>(
27  "PorousFlowDictator", "The UserObject that holds the list of PorousFlow variable names");
28  params.addParam<unsigned int>(
29  "phase", 0, "The index corresponding to the phase for this UserObject");
30  MooseEnum families("LAGRANGE MONOMIAL HERMITE SCALAR HIERARCHIC CLOUGH XYZ SZABAB BERNSTEIN");
31  params.addParam<MooseEnum>(
32  "fe_family",
33  families,
34  "FE Family to use (eg Lagrange). You only need to specify this is your porous_flow_vars in "
35  "your PorousFlowDictator have different families or orders");
36  MooseEnum orders("CONSTANT FIRST SECOND THIRD FOURTH");
37  params.addParam<MooseEnum>(
38  "fe_order",
39  orders,
40  "FE Order to use (eg First). You only need to specify this is your porous_flow_vars in your "
41  "PorousFlowDictator have different families or orders");
42  return params;
43 }
44 
46  const InputParameters & parameters)
47  : AdvectiveFluxCalculatorBase(parameters),
48  _dictator(getUserObject<PorousFlowDictator>("PorousFlowDictator")),
49  _num_vars(_dictator.numVariables()),
50  _gravity(getParam<RealVectorValue>("gravity")),
51  _phase(getParam<unsigned int>("phase")),
52  _permeability(getMaterialProperty<RealTensorValue>("PorousFlow_permeability_qp")),
53  _dpermeability_dvar(
54  getMaterialProperty<std::vector<RealTensorValue>>("dPorousFlow_permeability_qp_dvar")),
55  _dpermeability_dgradvar(getMaterialProperty<std::vector<std::vector<RealTensorValue>>>(
56  "dPorousFlow_permeability_qp_dgradvar")),
57  _fluid_density_qp(getMaterialProperty<std::vector<Real>>("PorousFlow_fluid_phase_density_qp")),
58  _dfluid_density_qp_dvar(getMaterialProperty<std::vector<std::vector<Real>>>(
59  "dPorousFlow_fluid_phase_density_qp_dvar")),
60  _grad_p(getMaterialProperty<std::vector<RealGradient>>("PorousFlow_grad_porepressure_qp")),
61  _dgrad_p_dgrad_var(getMaterialProperty<std::vector<std::vector<Real>>>(
62  "dPorousFlow_grad_porepressure_qp_dgradvar")),
63  _dgrad_p_dvar(getMaterialProperty<std::vector<std::vector<RealGradient>>>(
64  "dPorousFlow_grad_porepressure_qp_dvar")),
65  _fe_type(isParamValid("fe_family") && isParamValid("fe_order")
66  ? FEType(Utility::string_to_enum<Order>(getParam<MooseEnum>("fe_order")),
67  Utility::string_to_enum<FEFamily>(getParam<MooseEnum>("fe_family")))
68  : _dictator.feType()),
69  _phi(_assembly.fePhi<Real>(_fe_type)),
70  _grad_phi(_assembly.feGradPhi<Real>(_fe_type)),
71  _du_dvar(),
72  _du_dvar_computed_by_thread(),
73  _dkij_dvar(),
74  _dflux_out_dvars(),
75  _triples_to_receive(),
76  _triples_to_send()
77 
78 {
79  if (_phase >= _dictator.numPhases())
80  paramError("phase",
81  "Phase number entered is greater than the number of phases specified in the "
82  "Dictator. Remember that indexing starts at 0");
83 
84  if (isParamValid("fe_family") && !isParamValid("fe_order"))
85  paramError("fe_order", "If you specify fe_family you must also specify fe_order");
86  if (isParamValid("fe_order") && !isParamValid("fe_family"))
87  paramError("fe_family", "If you specify fe_order you must also specify fe_family");
88  if (!_dictator.consistentFEType() && !isParamValid("fe_family"))
89  paramError("fe_family",
90  "The PorousFlowDictator cannot determine the appropriate FE type to use because "
91  "your porous_flow_vars are of different types. You must specify the appropriate "
92  "fe_family and fe_order to use.");
93 }
94 
95 Real
96 PorousFlowAdvectiveFluxCalculatorBase::computeVelocity(unsigned i, unsigned j, unsigned qp) const
97 {
98  // The following is but one choice for PorousFlow situations
99  // If you change this, you will probably have to change
100  // - the derivative in executeOnElement
101  // - computeU
102  // - computedU_dvar
103  return -_grad_phi[i][qp] *
104  (_permeability[qp] * (_grad_p[qp][_phase] - _fluid_density_qp[qp][_phase] * _gravity)) *
105  _phi[j][qp];
106 }
107 
108 void
110  dof_id_type global_i, dof_id_type global_j, unsigned local_i, unsigned local_j, unsigned qp)
111 {
112  AdvectiveFluxCalculatorBase::executeOnElement(global_i, global_j, local_i, local_j, qp);
113  const dof_id_type sequential_i = _connections.sequentialID(global_i);
114  const unsigned j = _connections.indexOfGlobalConnection(global_i, global_j);
115 
116  // compute d(Kij)/d(porous_flow_variables)
117  for (unsigned local_k = 0; local_k < _current_elem->n_nodes(); ++local_k)
118  {
119  const dof_id_type global_k = _current_elem->node_id(local_k);
120  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
121  {
122  RealVectorValue deriv = _dpermeability_dvar[qp][pvar] * _phi[local_k][qp] *
124  for (unsigned i = 0; i < LIBMESH_DIM; ++i)
125  deriv += _dpermeability_dgradvar[qp][i][pvar] * _grad_phi[local_k][qp](i) *
127  deriv += _permeability[qp] *
128  (_grad_phi[local_k][qp] * _dgrad_p_dgrad_var[qp][_phase][pvar] -
129  _phi[local_k][qp] * _dfluid_density_qp_dvar[qp][_phase][pvar] * _gravity);
130  deriv += _permeability[qp] * (_dgrad_p_dvar[qp][_phase][pvar] * _phi[local_k][qp]);
131  _dkij_dvar[sequential_i][j][global_k][pvar] +=
132  _JxW[qp] * _coord[qp] * (-_grad_phi[local_i][qp] * deriv * _phi[local_j][qp]);
133  }
134  }
135 }
136 
137 void
139 {
140  const bool resizing_was_needed =
141  _resizing_needed; // _resizing_needed gets set to false at the end of
142  // AdvectiveFluxCalculatorBase::timestepSetup()
144 
145  // clear and appropriately size all the derivative info
146  // d(U)/d(porous_flow_variables) and
147  // d(Kij)/d(porous_flow_variables) and
148  // d(flux_out)/d(porous_flow_variables)
149  if (resizing_was_needed)
150  {
151  const std::size_t num_nodes = _connections.numNodes();
152  _du_dvar.assign(num_nodes, std::vector<Real>(_num_vars, 0.0));
153  _du_dvar_computed_by_thread.assign(num_nodes, false);
154  _dflux_out_dvars.assign(num_nodes, std::map<dof_id_type, std::vector<Real>>());
155  _dkij_dvar.resize(num_nodes);
156  for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
157  {
158  const std::vector<dof_id_type> con_i =
160  const std::size_t num_con_i = con_i.size();
161  _dkij_dvar[sequential_i].assign(num_con_i, std::map<dof_id_type, std::vector<Real>>());
162  for (unsigned j = 0; j < num_con_i; ++j)
163  for (const auto & global_neighbor_to_i : con_i)
164  _dkij_dvar[sequential_i][j][global_neighbor_to_i] = std::vector<Real>(_num_vars, 0.0);
165  }
166  }
167 }
168 
169 void
171 {
173  const std::size_t num_nodes = _connections.numNodes();
174  _du_dvar_computed_by_thread.assign(num_nodes, false);
175  for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
176  {
177  const std::vector<dof_id_type> con_i =
179  const std::size_t num_con_i = con_i.size();
180  for (unsigned j = 0; j < num_con_i; ++j)
181  for (const auto & global_neighbor_to_i : con_i)
182  _dkij_dvar[sequential_i][j][global_neighbor_to_i] = std::vector<Real>(_num_vars, 0.0);
183  }
184 }
185 
186 void
188 {
190 
191  // compute d(U)/d(porous_flow_variables) for nodes in _current_elem and for this
192  // execution thread. In threadJoin all these computations get gathered
193  // using _du_dvar_computed_by_thread
194  for (unsigned i = 0; i < _current_elem->n_nodes(); ++i)
195  {
196  const dof_id_type global_i = _current_elem->node_id(i);
197  const dof_id_type sequential_i = _connections.sequentialID(global_i);
198  if (_du_dvar_computed_by_thread[sequential_i])
199  continue;
200  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
201  _du_dvar[sequential_i][pvar] = computedU_dvar(i, pvar);
202  _du_dvar_computed_by_thread[sequential_i] = true;
203  }
204 }
205 
206 void
208 {
211  static_cast<const PorousFlowAdvectiveFluxCalculatorBase &>(uo);
212  // add the values of _dkij_dvar computed by different threads
213  const std::size_t num_nodes = _connections.numNodes();
214  for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
215  {
216  const std::vector<dof_id_type> con_i =
218  const std::size_t num_con_i = con_i.size();
219  for (unsigned j = 0; j < num_con_i; ++j)
220  for (const auto & global_derivs : pfafc._dkij_dvar[sequential_i][j])
221  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
222  _dkij_dvar[sequential_i][j][global_derivs.first][pvar] += global_derivs.second[pvar];
223  }
224 
225  // gather the values of _du_dvar computed by different threads
226  for (dof_id_type sequential_i = 0; sequential_i < _number_of_nodes; ++sequential_i)
227  if (!_du_dvar_computed_by_thread[sequential_i] &&
228  pfafc._du_dvar_computed_by_thread[sequential_i])
229  _du_dvar[sequential_i] = pfafc._du_dvar[sequential_i];
230 }
231 
232 const std::map<dof_id_type, std::vector<Real>> &
233 PorousFlowAdvectiveFluxCalculatorBase::getdK_dvar(dof_id_type node_i, dof_id_type node_j) const
234 {
235  const dof_id_type sequential_i = _connections.sequentialID(node_i);
236  const unsigned j = _connections.indexOfGlobalConnection(node_i, node_j);
237  return _dkij_dvar[sequential_i][j];
238 }
239 
240 const std::map<dof_id_type, std::vector<Real>> &
242 {
243  return _dflux_out_dvars[_connections.sequentialID(node_id)];
244 }
245 
246 void
248 {
250 
251  // compute d(flux_out)/d(porous flow variable)
253  for (const auto & node_i : _connections.globalIDs())
254  {
255  const dof_id_type sequential_i = _connections.sequentialID(node_i);
256  _dflux_out_dvars[sequential_i].clear();
257 
258  const std::map<dof_id_type, Real> dflux_out_du =
260  for (const auto & node_du : dflux_out_du)
261  {
262  const dof_id_type j = node_du.first;
263  const Real dflux_out_du_j = node_du.second;
264  _dflux_out_dvars[sequential_i][j] = _du_dvar[_connections.sequentialID(j)];
265  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
266  _dflux_out_dvars[sequential_i][j][pvar] *= dflux_out_du_j;
267  }
268 
269  // _dflux_out_dvars is now sized correctly, because getdFluxOutdu(i) contains all nodes
270  // connected to i and all nodes connected to nodes connected to i. The
271  // getdFluxOutdKij contains no extra nodes, so just += the dflux/dK terms
272  const std::vector<std::vector<Real>> dflux_out_dKjk =
274  const std::vector<dof_id_type> con_i = _connections.globalConnectionsToGlobalID(node_i);
275  for (std::size_t index_j = 0; index_j < con_i.size(); ++index_j)
276  {
277  const dof_id_type node_j = con_i[index_j];
278  const std::vector<dof_id_type> con_j = _connections.globalConnectionsToGlobalID(node_j);
279  for (std::size_t index_k = 0; index_k < con_j.size(); ++index_k)
280  {
281  const dof_id_type node_k = con_j[index_k];
282  const Real dflux_out_dK_jk = dflux_out_dKjk[index_j][index_k];
283  const std::map<dof_id_type, std::vector<Real>> dkj_dvarl = getdK_dvar(node_j, node_k);
284  for (const auto & nodel_deriv : dkj_dvarl)
285  {
286  const dof_id_type l = nodel_deriv.first;
287  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
288  _dflux_out_dvars[sequential_i][l][pvar] += dflux_out_dK_jk * nodel_deriv.second[pvar];
289  }
290  }
291  }
292  }
293 }
294 
295 void
297 {
298  // build nodes and pairs to exchange
300 
301  // Build _triples_to_receive
302  // tpr_global is essentially _triples_to_receive, but its key-pairs have global nodal IDs: later
303  // we build _triples_to_receive by flattening the data structure and making these key-pairs into
304  // sequential nodal IDs
305  std::map<processor_id_type,
306  std::map<std::pair<dof_id_type, dof_id_type>, std::vector<dof_id_type>>>
307  tpr_global;
308  for (const auto & elem : _fe_problem.getEvaluableElementRange())
309  if (this->hasBlocks(elem->subdomain_id()))
310  {
311  const processor_id_type elem_pid = elem->processor_id();
312  if (elem_pid != _my_pid)
313  {
314  if (tpr_global.find(elem_pid) == tpr_global.end())
315  tpr_global[elem_pid] =
316  std::map<std::pair<dof_id_type, dof_id_type>, std::vector<dof_id_type>>();
317  for (unsigned i = 0; i < elem->n_nodes(); ++i)
318  for (unsigned j = 0; j < elem->n_nodes(); ++j)
319  {
320  std::pair<dof_id_type, dof_id_type> the_pair(elem->node_id(i), elem->node_id(j));
321  if (tpr_global[elem_pid].find(the_pair) == tpr_global[elem_pid].end())
322  tpr_global[elem_pid][the_pair] = std::vector<dof_id_type>();
323 
324  for (const auto & global_neighbor_to_i :
325  _connections.globalConnectionsToGlobalID(elem->node_id(i)))
326  if (std::find(tpr_global[elem_pid][the_pair].begin(),
327  tpr_global[elem_pid][the_pair].end(),
328  global_neighbor_to_i) == tpr_global[elem_pid][the_pair].end())
329  tpr_global[elem_pid][the_pair].push_back(global_neighbor_to_i);
330  }
331  }
332  }
333 
334  // flattening makes later manipulations a lot more concise. Store the result in
335  // _triples_to_receive
336  _triples_to_receive.clear();
337  for (const auto & kv : tpr_global)
338  {
339  const processor_id_type pid = kv.first;
340  _triples_to_receive[pid] = std::vector<dof_id_type>();
341  for (const auto & pr_vec : kv.second)
342  {
343  const dof_id_type i = pr_vec.first.first;
344  const dof_id_type j = pr_vec.first.second;
345  for (const auto & global_nd : pr_vec.second)
346  {
347  _triples_to_receive[pid].push_back(i);
348  _triples_to_receive[pid].push_back(j);
349  _triples_to_receive[pid].push_back(global_nd);
350  }
351  }
352  }
353 
354  _triples_to_send.clear();
355  auto triples_action_functor = [this](processor_id_type pid,
356  const std::vector<dof_id_type> & tts) {
357  _triples_to_send[pid] = tts;
358  };
359  Parallel::push_parallel_vector_data(this->comm(), _triples_to_receive, triples_action_functor);
360 
361  // _triples_to_send and _triples_to_receive have been built using global node IDs
362  // since all processors know about that. However, using global IDs means
363  // that every time we send/receive, we keep having to do things like
364  // _dkij_dvar[_connections.sequentialID(_triples_to_send[pid][i])][_connections.indexOfGlobalConnection(_triples_to_send[pid][i],
365  // _triples_to_send[pid][i + 1])] which is quite inefficient. So:
366  for (auto & kv : _triples_to_send)
367  {
368  const processor_id_type pid = kv.first;
369  const std::size_t num = kv.second.size();
370  for (std::size_t i = 0; i < num; i += 3)
371  {
373  _triples_to_send[pid][i], _triples_to_send[pid][i + 1]);
375  }
376  }
377  for (auto & kv : _triples_to_receive)
378  {
379  const processor_id_type pid = kv.first;
380  const std::size_t num = kv.second.size();
381  for (std::size_t i = 0; i < num; i += 3)
382  {
384  _triples_to_receive[pid][i], _triples_to_receive[pid][i + 1]);
386  }
387  }
388 }
389 
390 void
392 {
393  // Exchange u_nodal and k_ij
395 
396  // Exchange _du_dvar
397  std::map<processor_id_type, std::vector<std::vector<Real>>> du_dvar_to_send;
398  for (const auto & kv : _nodes_to_send)
399  {
400  const processor_id_type pid = kv.first;
401  du_dvar_to_send[pid] = std::vector<std::vector<Real>>();
402  for (const auto & nd : kv.second)
403  du_dvar_to_send[pid].push_back(_du_dvar[nd]);
404  }
405 
406  auto du_action_functor = [this](processor_id_type pid,
407  const std::vector<std::vector<Real>> & du_dvar_received) {
408  const std::size_t msg_size = du_dvar_received.size();
409  mooseAssert(
410  msg_size == _nodes_to_receive[pid].size(),
411  "Message size, "
412  << msg_size
413  << ", in du_dvar communication is incompatible with nodes_to_receive, which has size "
414  << _nodes_to_receive[pid].size());
415  for (unsigned i = 0; i < msg_size; ++i)
416  _du_dvar[_nodes_to_receive[pid][i]] = du_dvar_received[i];
417  };
418  Parallel::push_parallel_vector_data(this->comm(), du_dvar_to_send, du_action_functor);
419 
420  // Exchange _dkij_dvar
421  std::map<processor_id_type, std::vector<std::vector<Real>>> dkij_dvar_to_send;
422  for (const auto & kv : _triples_to_send)
423  {
424  const processor_id_type pid = kv.first;
425  dkij_dvar_to_send[pid] = std::vector<std::vector<Real>>();
426  const std::size_t num = kv.second.size();
427  for (std::size_t i = 0; i < num; i += 3)
428  {
429  const dof_id_type sequential_id = kv.second[i];
430  const unsigned index_to_seq = kv.second[i + 1];
431  const dof_id_type global_id = kv.second[i + 2];
432  dkij_dvar_to_send[pid].push_back(_dkij_dvar[sequential_id][index_to_seq][global_id]);
433  }
434  }
435 
436  auto dk_action_functor = [this](processor_id_type pid,
437  const std::vector<std::vector<Real>> & dkij_dvar_received) {
438  const std::size_t num = _triples_to_receive[pid].size();
439  mooseAssert(dkij_dvar_received.size() == num / 3,
440  "Message size, " << dkij_dvar_received.size()
441  << ", in dkij_dvar communication is incompatible with "
442  "triples_to_receive, which has size "
443  << _triples_to_receive[pid].size());
444  for (std::size_t i = 0; i < num; i += 3)
445  {
446  const dof_id_type sequential_id = _triples_to_receive[pid][i];
447  const unsigned index_to_seq = _triples_to_receive[pid][i + 1];
448  const dof_id_type global_id = _triples_to_receive[pid][i + 2];
449  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
450  _dkij_dvar[sequential_id][index_to_seq][global_id][pvar] += dkij_dvar_received[i / 3][pvar];
451  }
452  };
453  Parallel::push_parallel_vector_data(this->comm(), dkij_dvar_to_send, dk_action_functor);
454 }
const std::vector< dof_id_type > & globalIDs() const
Vector of all global node IDs (node numbers in the mesh)
VectorValue< Real > RealGradient
virtual Real computedU_dvar(unsigned i, unsigned pvar) const =0
Compute d(u)/d(porous_flow_variable)
virtual void buildCommLists() override
When using multiple processors, other processors will compute:
const MaterialProperty< RealTensorValue > & _permeability
Permeability of porous material.
const VariablePhiGradient & _grad_phi
grad(Kuzmin-Turek shape function)
virtual void executeOnElement(dof_id_type global_i, dof_id_type global_j, unsigned local_i, unsigned local_j, unsigned qp) override
This is called by multiple times in execute() in a double loop over _current_elem&#39;s nodes (local_i an...
const MaterialProperty< std::vector< std::vector< RealTensorValue > > > & _dpermeability_dgradvar
d(permeabiity)/d(grad(PorousFlow variable))
dof_id_type sequentialID(dof_id_type global_node_ID) const
Return the sequential node ID corresponding to the global node ID This is guaranteed to lie in the ra...
const std::map< dof_id_type, std::vector< Real > > & getdFluxOut_dvars(unsigned node_id) const
Returns d(flux_out)/d(porous_flow_variables.
unsigned indexOfGlobalConnection(dof_id_type global_node_ID_from, dof_id_type global_node_ID_to) const
Return the index of global_node_ID_to in the globalConnectionsToGlobalID(global_node_ID_from) vector...
const VariablePhiValue & _phi
Kuzmin-Turek shape function.
virtual void executeOnElement(dof_id_type global_i, dof_id_type global_j, unsigned local_i, unsigned local_j, unsigned qp)
This is called by multiple times in execute() in a double loop over _current_elem&#39;s nodes (local_i an...
const MaterialProperty< std::vector< RealTensorValue > > & _dpermeability_dvar
d(permeabiity)/d(PorousFlow variable)
std::size_t _number_of_nodes
Number of nodes held by the _connections object.
Base class to compute the advective flux of fluid in PorousFlow situations using the Kuzmin-Turek FEM...
const MaterialProperty< std::vector< RealGradient > > & _grad_p
Gradient of the pore pressure in each phase.
std::map< processor_id_type, std::vector< dof_id_type > > _triples_to_receive
_triples_to_receive[proc_id] indicates the dk(i, j)/du_nodal information that we will receive from pr...
const std::vector< dof_id_type > & globalConnectionsToSequentialID(dof_id_type sequential_node_ID) const
Return all the nodes (global node IDs) connected to the given sequential node ID. ...
PorousFlowAdvectiveFluxCalculatorBase(const InputParameters &parameters)
virtual Real computeVelocity(unsigned i, unsigned j, unsigned qp) const override
Computes the transfer velocity between current node i and current node j at the current qp in the cur...
virtual void threadJoin(const UserObject &uo) override
virtual void exchangeGhostedInfo() override
Sends and receives multi-processor information regarding u_nodal and k_ij.
std::vector< bool > _du_dvar_computed_by_thread
Whether _du_dvar has been computed by the local thread.
const MaterialProperty< std::vector< Real > > & _fluid_density_qp
Fluid density for each phase (at the qp)
std::vector< std::map< dof_id_type, std::vector< Real > > > _dflux_out_dvars
_dflux_out_dvars[sequential_i][global_j][pvar] = d(flux_out[global version of sequential_i])/d(porous...
Base class to compute Advective fluxes.
PorousFlowConnectedNodes _connections
Holds the sequential and global nodal IDs, and info regarding mesh connections between them...
bool _resizing_needed
whether _kij, etc, need to be sized appropriately (and valence recomputed) at the start of the timest...
std::map< processor_id_type, std::vector< dof_id_type > > _nodes_to_send
_nodes_to_send[proc_id] = list of sequential nodal IDs.
const std::map< dof_id_type, Real > & getdFluxOutdu(dof_id_type node_i) const
Returns r where r[j] = d(flux out of global node i)/du(global node j) used in Jacobian computations...
const MaterialProperty< std::vector< std::vector< Real > > > & _dfluid_density_qp_dvar
Derivative of the fluid density for each phase wrt PorousFlow variables (at the qp) ...
std::vector< std::vector< Real > > _du_dvar
_du_dvar[sequential_i][a] = d(u[global version of sequential node i])/d(porous_flow_variable[a]) ...
std::map< processor_id_type, std::vector< dof_id_type > > _triples_to_send
_triples_to_send[proc_id] indicates the dk(i, j)/du_nodal information that we will send to proc_id...
const std::vector< std::vector< Real > > & getdFluxOutdKjk(dof_id_type node_i) const
Returns r where r[j][k] = d(flux out of global node i)/dK[connected node j][connected node k] used in...
unsigned int numPhases() const
The number of fluid phases.
processor_id_type _my_pid
processor ID of this object
virtual void exchangeGhostedInfo()
Sends and receives multi-processor information regarding u_nodal and k_ij.
const MaterialProperty< std::vector< std::vector< Real > > > & _dgrad_p_dgrad_var
Derivative of Grad porepressure in each phase wrt grad(PorousFlow variables)
InputParameters validParams< AdvectiveFluxCalculatorBase >()
std::size_t numNodes() const
number of nodes known by this class
This holds maps between the nonlinear variables used in a PorousFlow simulation and the variable numb...
const unsigned _num_vars
Number of PorousFlow variables.
const PorousFlowDictator & _dictator
PorousFlowDictator UserObject.
virtual void buildCommLists()
When using multiple processors, other processors will compute:
const MaterialProperty< std::vector< std::vector< RealGradient > > > & _dgrad_p_dvar
Derivative of Grad porepressure in each phase wrt PorousFlow variables.
std::map< processor_id_type, std::vector< dof_id_type > > _nodes_to_receive
_nodes_to_receive[proc_id] = list of sequential nodal IDs.
const std::vector< dof_id_type > & globalConnectionsToGlobalID(dof_id_type global_node_ID) const
Return all the nodes (global node IDs) connected to the given global node ID.
InputParameters validParams< PorousFlowAdvectiveFluxCalculatorBase >()
virtual void threadJoin(const UserObject &uo) override
std::vector< std::vector< std::map< dof_id_type, std::vector< Real > > > > _dkij_dvar
_dkij_dvar[sequential_i][j][global_k][a] = d(K[sequential_i][j])/d(porous_flow_variable[global_k][por...
const std::map< dof_id_type, std::vector< Real > > & getdK_dvar(dof_id_type node_i, dof_id_type node_j) const
Returns, r, where r[global node k][a] = d(K[node_i][node_j])/d(porous_flow_variable[global node k][po...
bool consistentFEType() const
Whether the porous_flow_vars all have the same FEType or if no porous_flow_vars were provided...