https://mooseframework.inl.gov
PorousFlowAdvectiveFluxCalculatorBase.C
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
11 #include "Assembly.h"
12 #include "libmesh/string_to_enum.h"
13 #include "libmesh/parallel_sync.h"
14 
17 {
19  params.addClassDescription(
20  "Base class to compute the advective flux of fluid in PorousFlow situations. The velocity "
21  "is U * (-permeability * (grad(P) - density * gravity)), while derived classes define U. "
22  "The Kuzmin-Turek FEM-TVD multidimensional stabilization scheme is used");
23  params.addRequiredParam<RealVectorValue>("gravity",
24  "Gravitational acceleration vector downwards (m/s^2)");
25  params.addRequiredParam<UserObjectName>(
26  "PorousFlowDictator", "The UserObject that holds the list of PorousFlow variable names");
27  params.addParam<unsigned int>(
28  "phase", 0, "The index corresponding to the phase for this UserObject");
29  MooseEnum families("LAGRANGE MONOMIAL HERMITE SCALAR HIERARCHIC CLOUGH XYZ SZABAB BERNSTEIN");
30  params.addParam<MooseEnum>(
31  "fe_family",
32  families,
33  "FE Family to use (eg Lagrange). You only need to specify this is your porous_flow_vars in "
34  "your PorousFlowDictator have different families or orders");
35  MooseEnum orders("CONSTANT FIRST SECOND THIRD FOURTH");
36  params.addParam<MooseEnum>(
37  "fe_order",
38  orders,
39  "FE Order to use (eg First). You only need to specify this is your porous_flow_vars in your "
40  "PorousFlowDictator have different families or orders");
41  return params;
42 }
43 
45  const InputParameters & parameters)
46  : AdvectiveFluxCalculatorBase(parameters),
47  _dictator(getUserObject<PorousFlowDictator>("PorousFlowDictator")),
48  _num_vars(_dictator.numVariables()),
49  _gravity(getParam<RealVectorValue>("gravity")),
50  _phase(getParam<unsigned int>("phase")),
51  _permeability(getMaterialProperty<RealTensorValue>("PorousFlow_permeability_qp")),
52  _dpermeability_dvar(
53  getMaterialProperty<std::vector<RealTensorValue>>("dPorousFlow_permeability_qp_dvar")),
54  _dpermeability_dgradvar(getMaterialProperty<std::vector<std::vector<RealTensorValue>>>(
55  "dPorousFlow_permeability_qp_dgradvar")),
56  _fluid_density_qp(getMaterialProperty<std::vector<Real>>("PorousFlow_fluid_phase_density_qp")),
57  _dfluid_density_qp_dvar(getMaterialProperty<std::vector<std::vector<Real>>>(
58  "dPorousFlow_fluid_phase_density_qp_dvar")),
59  _grad_p(getMaterialProperty<std::vector<RealGradient>>("PorousFlow_grad_porepressure_qp")),
60  _dgrad_p_dgrad_var(getMaterialProperty<std::vector<std::vector<Real>>>(
61  "dPorousFlow_grad_porepressure_qp_dgradvar")),
62  _dgrad_p_dvar(getMaterialProperty<std::vector<std::vector<RealGradient>>>(
63  "dPorousFlow_grad_porepressure_qp_dvar")),
64  _fe_type(isParamValid("fe_family") && isParamValid("fe_order")
65  ? FEType(Utility::string_to_enum<Order>(getParam<MooseEnum>("fe_order")),
66  Utility::string_to_enum<FEFamily>(getParam<MooseEnum>("fe_family")))
67  : _dictator.feType()),
68  _phi(_assembly.fePhi<Real>(_fe_type)),
69  _grad_phi(_assembly.feGradPhi<Real>(_fe_type)),
70  _du_dvar(),
71  _du_dvar_computed_by_thread(),
72  _dkij_dvar(),
73  _dflux_out_dvars(),
74  _triples_to_receive(),
75  _triples_to_send(),
76  _perm_derivs(_dictator.usePermDerivs())
77 {
78  if (_phase >= _dictator.numPhases())
79  paramError("phase",
80  "Phase number entered is greater than the number of phases specified in the "
81  "Dictator. Remember that indexing starts at 0");
82 
83  if (isParamValid("fe_family") && !isParamValid("fe_order"))
84  paramError("fe_order", "If you specify fe_family you must also specify fe_order");
85  if (isParamValid("fe_order") && !isParamValid("fe_family"))
86  paramError("fe_family", "If you specify fe_order you must also specify fe_family");
87  if (!_dictator.consistentFEType() && !isParamValid("fe_family"))
88  paramError("fe_family",
89  "The PorousFlowDictator cannot determine the appropriate FE type to use because "
90  "your porous_flow_vars are of different types. You must specify the appropriate "
91  "fe_family and fe_order to use.");
92 }
93 
94 Real
95 PorousFlowAdvectiveFluxCalculatorBase::computeVelocity(unsigned i, unsigned j, unsigned qp) const
96 {
97  // The following is but one choice for PorousFlow situations
98  // If you change this, you will probably have to change
99  // - the derivative in executeOnElement
100  // - computeU
101  // - computedU_dvar
102  return -_grad_phi[i][qp] *
103  (_permeability[qp] * (_grad_p[qp][_phase] - _fluid_density_qp[qp][_phase] * _gravity)) *
104  _phi[j][qp];
105 }
106 
107 void
109  dof_id_type global_i, dof_id_type global_j, unsigned local_i, unsigned local_j, unsigned qp)
110 {
111  AdvectiveFluxCalculatorBase::executeOnElement(global_i, global_j, local_i, local_j, qp);
112  const dof_id_type sequential_i = _connections.sequentialID(global_i);
113  const unsigned j = _connections.indexOfGlobalConnection(global_i, global_j);
114 
115  // compute d(Kij)/d(porous_flow_variables)
116  for (unsigned local_k = 0; local_k < _current_elem->n_nodes(); ++local_k)
117  {
118  const dof_id_type global_k = _current_elem->node_id(local_k);
119  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
120  {
122  _permeability[qp] *
123  (_grad_phi[local_k][qp] * _dgrad_p_dgrad_var[qp][_phase][pvar] -
124  _phi[local_k][qp] * _dfluid_density_qp_dvar[qp][_phase][pvar] * _gravity);
125  deriv += _permeability[qp] * (_dgrad_p_dvar[qp][_phase][pvar] * _phi[local_k][qp]);
126 
127  if (_perm_derivs)
128  {
129  deriv += _dpermeability_dvar[qp][pvar] * _phi[local_k][qp] *
131  for (unsigned i = 0; i < LIBMESH_DIM; ++i)
132  deriv += _dpermeability_dgradvar[qp][i][pvar] * _grad_phi[local_k][qp](i) *
134  }
135 
136  _dkij_dvar[sequential_i][j][global_k][pvar] +=
137  _JxW[qp] * _coord[qp] * (-_grad_phi[local_i][qp] * deriv * _phi[local_j][qp]);
138  }
139  }
140 }
141 
142 void
144 {
145  const bool resizing_was_needed =
146  _resizing_needed; // _resizing_needed gets set to false at the end of
147  // AdvectiveFluxCalculatorBase::timestepSetup()
149 
150  // clear and appropriately size all the derivative info
151  // d(U)/d(porous_flow_variables) and
152  // d(Kij)/d(porous_flow_variables) and
153  // d(flux_out)/d(porous_flow_variables)
154  if (resizing_was_needed)
155  {
156  const std::size_t num_nodes = _connections.numNodes();
157  _du_dvar.assign(num_nodes, std::vector<Real>(_num_vars, 0.0));
158  _du_dvar_computed_by_thread.assign(num_nodes, false);
159  _dflux_out_dvars.assign(num_nodes, std::map<dof_id_type, std::vector<Real>>());
160  _dkij_dvar.resize(num_nodes);
161  for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
162  {
163  const std::vector<dof_id_type> con_i =
165  const std::size_t num_con_i = con_i.size();
166  _dkij_dvar[sequential_i].assign(num_con_i, std::map<dof_id_type, std::vector<Real>>());
167  for (unsigned j = 0; j < num_con_i; ++j)
168  for (const auto & global_neighbor_to_i : con_i)
169  _dkij_dvar[sequential_i][j][global_neighbor_to_i] = std::vector<Real>(_num_vars, 0.0);
170  }
171  }
172 }
173 
174 void
176 {
178  const std::size_t num_nodes = _connections.numNodes();
179  _du_dvar_computed_by_thread.assign(num_nodes, false);
180  for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
181  {
182  const std::vector<dof_id_type> & con_i =
184  const std::size_t num_con_i = con_i.size();
185  for (unsigned j = 0; j < num_con_i; ++j)
186  for (const auto & global_neighbor_to_i : con_i)
187  _dkij_dvar[sequential_i][j][global_neighbor_to_i] = std::vector<Real>(_num_vars, 0.0);
188  }
189 }
190 
191 void
193 {
195 
196  // compute d(U)/d(porous_flow_variables) for nodes in _current_elem and for this
197  // execution thread. In threadJoin all these computations get gathered
198  // using _du_dvar_computed_by_thread
199  for (unsigned i = 0; i < _current_elem->n_nodes(); ++i)
200  {
201  const dof_id_type global_i = _current_elem->node_id(i);
202  const dof_id_type sequential_i = _connections.sequentialID(global_i);
203  if (_du_dvar_computed_by_thread[sequential_i])
204  continue;
205  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
206  _du_dvar[sequential_i][pvar] = computedU_dvar(i, pvar);
207  _du_dvar_computed_by_thread[sequential_i] = true;
208  }
209 }
210 
211 void
213 {
215  const auto & pfafc = static_cast<const PorousFlowAdvectiveFluxCalculatorBase &>(uo);
216  // add the values of _dkij_dvar computed by different threads
217  const std::size_t num_nodes = _connections.numNodes();
218  for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
219  {
220  const std::vector<dof_id_type> & con_i =
222  const std::size_t num_con_i = con_i.size();
223  for (unsigned j = 0; j < num_con_i; ++j)
224  for (const auto & global_derivs : pfafc._dkij_dvar[sequential_i][j])
225  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
226  _dkij_dvar[sequential_i][j][global_derivs.first][pvar] += global_derivs.second[pvar];
227  }
228 
229  // gather the values of _du_dvar computed by different threads
230  for (dof_id_type sequential_i = 0; sequential_i < _number_of_nodes; ++sequential_i)
231  if (!_du_dvar_computed_by_thread[sequential_i] &&
232  pfafc._du_dvar_computed_by_thread[sequential_i])
233  _du_dvar[sequential_i] = pfafc._du_dvar[sequential_i];
234 }
235 
236 const std::map<dof_id_type, std::vector<Real>> &
238 {
239  const dof_id_type sequential_i = _connections.sequentialID(node_i);
240  const unsigned j = _connections.indexOfGlobalConnection(node_i, node_j);
241  return _dkij_dvar[sequential_i][j];
242 }
243 
244 const std::map<dof_id_type, std::vector<Real>> &
246 {
247  return _dflux_out_dvars[_connections.sequentialID(node_id)];
248 }
249 
250 void
252 {
254 
255  // compute d(flux_out)/d(porous flow variable)
257  for (const auto & node_i : _connections.globalIDs())
258  {
259  const dof_id_type sequential_i = _connections.sequentialID(node_i);
260  _dflux_out_dvars[sequential_i].clear();
261 
262  const std::map<dof_id_type, Real> & dflux_out_du =
264  for (const auto & node_du : dflux_out_du)
265  {
266  const dof_id_type j = node_du.first;
267  const Real dflux_out_du_j = node_du.second;
269  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
270  _dflux_out_dvars[sequential_i][j][pvar] *= dflux_out_du_j;
271  }
272 
273  // _dflux_out_dvars is now sized correctly, because getdFluxOutdu(i) contains all nodes
274  // connected to i and all nodes connected to nodes connected to i. The
275  // getdFluxOutdKij contains no extra nodes, so just += the dflux/dK terms
276  const std::vector<std::vector<Real>> & dflux_out_dKjk =
278  const std::vector<dof_id_type> & con_i = _connections.globalConnectionsToGlobalID(node_i);
279  for (std::size_t index_j = 0; index_j < con_i.size(); ++index_j)
280  {
281  const dof_id_type node_j = con_i[index_j];
282  const std::vector<dof_id_type> & con_j = _connections.globalConnectionsToGlobalID(node_j);
283  for (std::size_t index_k = 0; index_k < con_j.size(); ++index_k)
284  {
285  const dof_id_type node_k = con_j[index_k];
286  const Real dflux_out_dK_jk = dflux_out_dKjk[index_j][index_k];
287  const std::map<dof_id_type, std::vector<Real>> & dkj_dvarl = getdK_dvar(node_j, node_k);
288  for (const auto & nodel_deriv : dkj_dvarl)
289  {
290  const dof_id_type l = nodel_deriv.first;
291  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
292  _dflux_out_dvars[sequential_i][l][pvar] += dflux_out_dK_jk * nodel_deriv.second[pvar];
293  }
294  }
295  }
296  }
297 }
298 
299 void
301 {
302  // build nodes and pairs to exchange
304 
305  // Build _triples_to_receive
306  // tpr_global is essentially _triples_to_receive, but its key-pairs have global nodal IDs: later
307  // we build _triples_to_receive by flattening the data structure and making these key-pairs into
308  // sequential nodal IDs
309  std::map<processor_id_type,
310  std::map<std::pair<dof_id_type, dof_id_type>, std::vector<dof_id_type>>>
311  tpr_global;
312  for (const auto & elem : _fe_problem.getNonlinearEvaluableElementRange())
313  if (this->hasBlocks(elem->subdomain_id()))
314  {
315  const processor_id_type elem_pid = elem->processor_id();
316  if (elem_pid != _my_pid)
317  {
318  if (tpr_global.find(elem_pid) == tpr_global.end())
319  tpr_global[elem_pid] =
320  std::map<std::pair<dof_id_type, dof_id_type>, std::vector<dof_id_type>>();
321  for (unsigned i = 0; i < elem->n_nodes(); ++i)
322  for (unsigned j = 0; j < elem->n_nodes(); ++j)
323  {
324  std::pair<dof_id_type, dof_id_type> the_pair(elem->node_id(i), elem->node_id(j));
325  if (tpr_global[elem_pid].find(the_pair) == tpr_global[elem_pid].end())
326  tpr_global[elem_pid][the_pair] = std::vector<dof_id_type>();
327 
328  for (const auto & global_neighbor_to_i :
329  _connections.globalConnectionsToGlobalID(elem->node_id(i)))
330  if (std::find(tpr_global[elem_pid][the_pair].begin(),
331  tpr_global[elem_pid][the_pair].end(),
332  global_neighbor_to_i) == tpr_global[elem_pid][the_pair].end())
333  tpr_global[elem_pid][the_pair].push_back(global_neighbor_to_i);
334  }
335  }
336  }
337 
338  // flattening makes later manipulations a lot more concise. Store the result in
339  // _triples_to_receive
340  _triples_to_receive.clear();
341  for (const auto & kv : tpr_global)
342  {
343  const processor_id_type pid = kv.first;
344  _triples_to_receive[pid] = std::vector<dof_id_type>();
345  for (const auto & pr_vec : kv.second)
346  {
347  const dof_id_type i = pr_vec.first.first;
348  const dof_id_type j = pr_vec.first.second;
349  for (const auto & global_nd : pr_vec.second)
350  {
351  _triples_to_receive[pid].push_back(i);
352  _triples_to_receive[pid].push_back(j);
353  _triples_to_receive[pid].push_back(global_nd);
354  }
355  }
356  }
357 
358  _triples_to_send.clear();
359  auto triples_action_functor = [this](processor_id_type pid, const std::vector<dof_id_type> & tts)
360  { _triples_to_send[pid] = tts; };
361  Parallel::push_parallel_vector_data(this->comm(), _triples_to_receive, triples_action_functor);
362 
363  // _triples_to_send and _triples_to_receive have been built using global node IDs
364  // since all processors know about that. However, using global IDs means
365  // that every time we send/receive, we keep having to do things like
366  // _dkij_dvar[_connections.sequentialID(_triples_to_send[pid][i])][_connections.indexOfGlobalConnection(_triples_to_send[pid][i],
367  // _triples_to_send[pid][i + 1])] which is quite inefficient. So:
368  for (auto & kv : _triples_to_send)
369  {
370  const processor_id_type pid = kv.first;
371  const std::size_t num = kv.second.size();
372  for (std::size_t i = 0; i < num; i += 3)
373  {
375  _triples_to_send[pid][i], _triples_to_send[pid][i + 1]);
377  }
378  }
379  for (auto & kv : _triples_to_receive)
380  {
381  const processor_id_type pid = kv.first;
382  const std::size_t num = kv.second.size();
383  for (std::size_t i = 0; i < num; i += 3)
384  {
386  _triples_to_receive[pid][i], _triples_to_receive[pid][i + 1]);
388  }
389  }
390 }
391 
392 void
394 {
395  // Exchange u_nodal and k_ij
397 
398  // Exchange _du_dvar
399  std::map<processor_id_type, std::vector<std::vector<Real>>> du_dvar_to_send;
400  for (const auto & kv : _nodes_to_send)
401  {
402  const processor_id_type pid = kv.first;
403  du_dvar_to_send[pid] = std::vector<std::vector<Real>>();
404  for (const auto & nd : kv.second)
405  du_dvar_to_send[pid].push_back(_du_dvar[nd]);
406  }
407 
408  auto du_action_functor =
409  [this](processor_id_type pid, const std::vector<std::vector<Real>> & du_dvar_received)
410  {
411  const std::size_t msg_size = du_dvar_received.size();
412  mooseAssert(
413  msg_size == _nodes_to_receive[pid].size(),
414  "Message size, "
415  << msg_size
416  << ", in du_dvar communication is incompatible with nodes_to_receive, which has size "
417  << _nodes_to_receive[pid].size());
418  for (unsigned i = 0; i < msg_size; ++i)
419  _du_dvar[_nodes_to_receive[pid][i]] = du_dvar_received[i];
420  };
421  Parallel::push_parallel_vector_data(this->comm(), du_dvar_to_send, du_action_functor);
422 
423  // Exchange _dkij_dvar
424  std::map<processor_id_type, std::vector<std::vector<Real>>> dkij_dvar_to_send;
425  for (const auto & kv : _triples_to_send)
426  {
427  const processor_id_type pid = kv.first;
428  dkij_dvar_to_send[pid] = std::vector<std::vector<Real>>();
429  const std::size_t num = kv.second.size();
430  for (std::size_t i = 0; i < num; i += 3)
431  {
432  const dof_id_type sequential_id = kv.second[i];
433  const unsigned index_to_seq = kv.second[i + 1];
434  const dof_id_type global_id = kv.second[i + 2];
435  dkij_dvar_to_send[pid].push_back(_dkij_dvar[sequential_id][index_to_seq][global_id]);
436  }
437  }
438 
439  auto dk_action_functor =
440  [this](processor_id_type pid, const std::vector<std::vector<Real>> & dkij_dvar_received)
441  {
442  const std::size_t num = _triples_to_receive[pid].size();
443  mooseAssert(dkij_dvar_received.size() == num / 3,
444  "Message size, " << dkij_dvar_received.size()
445  << ", in dkij_dvar communication is incompatible with "
446  "triples_to_receive, which has size "
447  << _triples_to_receive[pid].size());
448  for (std::size_t i = 0; i < num; i += 3)
449  {
450  const dof_id_type sequential_id = _triples_to_receive[pid][i];
451  const unsigned index_to_seq = _triples_to_receive[pid][i + 1];
452  const dof_id_type global_id = _triples_to_receive[pid][i + 2];
453  for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
454  _dkij_dvar[sequential_id][index_to_seq][global_id][pvar] += dkij_dvar_received[i / 3][pvar];
455  }
456  };
457  Parallel::push_parallel_vector_data(this->comm(), dkij_dvar_to_send, dk_action_functor);
458 }
const std::vector< dof_id_type > & globalIDs() const
Vector of all global node IDs (node numbers in the mesh)
virtual Real computedU_dvar(unsigned i, unsigned pvar) const =0
Compute d(u)/d(porous_flow_variable)
Order
virtual void buildCommLists() override
When using multiple processors, other processors will compute:
const bool _perm_derivs
Flag to check whether permeabiity derivatives are non-zero.
const MooseArray< Real > & _coord
void addParam(const std::string &name, const std::initializer_list< typename T::value_type > &value, const std::string &doc_string)
const MaterialProperty< RealTensorValue > & _permeability
Permeability of porous material.
const VariablePhiGradient & _grad_phi
grad(Kuzmin-Turek shape function)
virtual void executeOnElement(dof_id_type global_i, dof_id_type global_j, unsigned local_i, unsigned local_j, unsigned qp) override
This is called by multiple times in execute() in a double loop over _current_elem&#39;s nodes (local_i an...
const MaterialProperty< std::vector< std::vector< RealTensorValue > > > & _dpermeability_dgradvar
d(permeabiity)/d(grad(PorousFlow variable))
dof_id_type sequentialID(dof_id_type global_node_ID) const
Return the sequential node ID corresponding to the global node ID This is guaranteed to lie in the ra...
const Parallel::Communicator & comm() const
const std::map< dof_id_type, std::vector< Real > > & getdFluxOut_dvars(unsigned node_id) const
Returns d(flux_out)/d(porous_flow_variables.
unsigned indexOfGlobalConnection(dof_id_type global_node_ID_from, dof_id_type global_node_ID_to) const
Return the index of global_node_ID_to in the globalConnectionsToGlobalID(global_node_ID_from) vector...
const VariablePhiValue & _phi
Kuzmin-Turek shape function.
virtual void executeOnElement(dof_id_type global_i, dof_id_type global_j, unsigned local_i, unsigned local_j, unsigned qp)
This is called by multiple times in execute() in a double loop over _current_elem&#39;s nodes (local_i an...
const MaterialProperty< std::vector< RealTensorValue > > & _dpermeability_dvar
d(permeabiity)/d(PorousFlow variable)
std::size_t _number_of_nodes
Number of nodes held by the _connections object.
Base class to compute the advective flux of fluid in PorousFlow situations using the Kuzmin-Turek FEM...
const MaterialProperty< std::vector< RealGradient > > & _grad_p
Gradient of the pore pressure in each phase.
std::map< processor_id_type, std::vector< dof_id_type > > _triples_to_receive
_triples_to_receive[proc_id] indicates the dk(i, j)/du_nodal information that we will receive from pr...
void addRequiredParam(const std::string &name, const std::string &doc_string)
bool isParamValid(const std::string &name) const
const std::vector< dof_id_type > & globalConnectionsToSequentialID(dof_id_type sequential_node_ID) const
Return all the nodes (global node IDs) connected to the given sequential node ID. ...
PorousFlowAdvectiveFluxCalculatorBase(const InputParameters &parameters)
TensorValue< Real > RealTensorValue
uint8_t processor_id_type
virtual Real computeVelocity(unsigned i, unsigned j, unsigned qp) const override
Computes the transfer velocity between current node i and current node j at the current qp in the cur...
virtual void threadJoin(const UserObject &uo) override
virtual void exchangeGhostedInfo() override
Sends and receives multi-processor information regarding u_nodal and k_ij.
static InputParameters validParams()
std::vector< bool > _du_dvar_computed_by_thread
Whether _du_dvar has been computed by the local thread.
const MaterialProperty< std::vector< Real > > & _fluid_density_qp
Fluid density for each phase (at the qp)
Real deriv(unsigned n, unsigned alpha, unsigned beta, Real x)
std::vector< std::map< dof_id_type, std::vector< Real > > > _dflux_out_dvars
_dflux_out_dvars[sequential_i][global_j][pvar] = d(flux_out[global version of sequential_i])/d(porous...
T string_to_enum(const std::string &s)
Base class to compute Advective fluxes.
PorousFlowConnectedNodes _connections
Holds the sequential and global nodal IDs, and info regarding mesh connections between them...
bool _resizing_needed
whether _kij, etc, need to be sized appropriately (and valence recomputed) at the start of the timest...
std::map< processor_id_type, std::vector< dof_id_type > > _nodes_to_send
_nodes_to_send[proc_id] = list of sequential nodal IDs.
const std::map< dof_id_type, Real > & getdFluxOutdu(dof_id_type node_i) const
Returns r where r[j] = d(flux out of global node i)/du(global node j) used in Jacobian computations...
const MaterialProperty< std::vector< std::vector< Real > > > & _dfluid_density_qp_dvar
Derivative of the fluid density for each phase wrt PorousFlow variables (at the qp) ...
std::vector< std::vector< Real > > _du_dvar
_du_dvar[sequential_i][a] = d(u[global version of sequential node i])/d(porous_flow_variable[a]) ...
std::map< processor_id_type, std::vector< dof_id_type > > _triples_to_send
_triples_to_send[proc_id] indicates the dk(i, j)/du_nodal information that we will send to proc_id...
const std::vector< std::vector< Real > > & getdFluxOutdKjk(dof_id_type node_i) const
Returns r where r[j][k] = d(flux out of global node i)/dK[connected node j][connected node k] used in...
void paramError(const std::string &param, Args... args) const
unsigned int numPhases() const
The number of fluid phases.
processor_id_type _my_pid
processor ID of this object
virtual void exchangeGhostedInfo()
Sends and receives multi-processor information regarding u_nodal and k_ij.
const MaterialProperty< std::vector< std::vector< Real > > > & _dgrad_p_dgrad_var
Derivative of Grad porepressure in each phase wrt grad(PorousFlow variables)
std::size_t numNodes() const
number of nodes known by this class
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
const Elem *const & _current_elem
This holds maps between the nonlinear variables used in a PorousFlow simulation and the variable numb...
FEProblemBase & _fe_problem
const MooseArray< Real > & _JxW
const unsigned _num_vars
Number of PorousFlow variables.
const PorousFlowDictator & _dictator
PorousFlowDictator UserObject.
virtual void buildCommLists()
When using multiple processors, other processors will compute:
const MaterialProperty< std::vector< std::vector< RealGradient > > > & _dgrad_p_dvar
Derivative of Grad porepressure in each phase wrt PorousFlow variables.
void addClassDescription(const std::string &doc_string)
static const std::complex< double > j(0, 1)
Complex number "j" (also known as "i")
std::map< processor_id_type, std::vector< dof_id_type > > _nodes_to_receive
_nodes_to_receive[proc_id] = list of sequential nodal IDs.
const std::vector< dof_id_type > & globalConnectionsToGlobalID(dof_id_type global_node_ID) const
Return all the nodes (global node IDs) connected to the given global node ID.
bool hasBlocks(const SubdomainName &name) const
FEFamily
const libMesh::ConstElemRange & getNonlinearEvaluableElementRange()
virtual void threadJoin(const UserObject &uo) override
std::vector< std::vector< std::map< dof_id_type, std::vector< Real > > > > _dkij_dvar
_dkij_dvar[sequential_i][j][global_k][a] = d(K[sequential_i][j])/d(porous_flow_variable[global_k][por...
void ErrorVector unsigned int
const std::map< dof_id_type, std::vector< Real > > & getdK_dvar(dof_id_type node_i, dof_id_type node_j) const
Returns, r, where r[global node k][a] = d(K[node_i][node_j])/d(porous_flow_variable[global node k][po...
bool consistentFEType() const
Whether the porous_flow_vars all have the same FEType or if no porous_flow_vars were provided...
uint8_t dof_id_type