Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #include "PorousFlowAdvectiveFluxCalculatorBase.h"
11 : #include "Assembly.h"
12 : #include "libmesh/string_to_enum.h"
13 : #include "libmesh/parallel_sync.h"
14 :
15 : InputParameters
16 2845 : PorousFlowAdvectiveFluxCalculatorBase::validParams()
17 : {
18 2845 : InputParameters params = AdvectiveFluxCalculatorBase::validParams();
19 2845 : params.addClassDescription(
20 : "Base class to compute the advective flux of fluid in PorousFlow situations. The velocity "
21 : "is U * (-permeability * (grad(P) - density * gravity)), while derived classes define U. "
22 : "The Kuzmin-Turek FEM-TVD multidimensional stabilization scheme is used");
23 5690 : params.addRequiredParam<RealVectorValue>("gravity",
24 : "Gravitational acceleration vector downwards (m/s^2)");
25 5690 : params.addRequiredParam<UserObjectName>(
26 : "PorousFlowDictator", "The UserObject that holds the list of PorousFlow variable names");
27 5690 : params.addParam<unsigned int>(
28 5690 : "phase", 0, "The index corresponding to the phase for this UserObject");
29 5690 : MooseEnum families("LAGRANGE MONOMIAL HERMITE SCALAR HIERARCHIC CLOUGH XYZ SZABAB BERNSTEIN");
30 5690 : params.addParam<MooseEnum>(
31 : "fe_family",
32 : families,
33 : "FE Family to use (eg Lagrange). You only need to specify this is your porous_flow_vars in "
34 : "your PorousFlowDictator have different families or orders");
35 5690 : MooseEnum orders("CONSTANT FIRST SECOND THIRD FOURTH");
36 5690 : params.addParam<MooseEnum>(
37 : "fe_order",
38 : orders,
39 : "FE Order to use (eg First). You only need to specify this is your porous_flow_vars in your "
40 : "PorousFlowDictator have different families or orders");
41 2845 : return params;
42 2845 : }
43 :
44 1111 : PorousFlowAdvectiveFluxCalculatorBase::PorousFlowAdvectiveFluxCalculatorBase(
45 1111 : const InputParameters & parameters)
46 : : AdvectiveFluxCalculatorBase(parameters),
47 1111 : _dictator(getUserObject<PorousFlowDictator>("PorousFlowDictator")),
48 1111 : _num_vars(_dictator.numVariables()),
49 2222 : _gravity(getParam<RealVectorValue>("gravity")),
50 2222 : _phase(getParam<unsigned int>("phase")),
51 2222 : _permeability(getMaterialProperty<RealTensorValue>("PorousFlow_permeability_qp")),
52 1111 : _dpermeability_dvar(
53 1111 : getMaterialProperty<std::vector<RealTensorValue>>("dPorousFlow_permeability_qp_dvar")),
54 2222 : _dpermeability_dgradvar(getMaterialProperty<std::vector<std::vector<RealTensorValue>>>(
55 : "dPorousFlow_permeability_qp_dgradvar")),
56 2222 : _fluid_density_qp(getMaterialProperty<std::vector<Real>>("PorousFlow_fluid_phase_density_qp")),
57 2222 : _dfluid_density_qp_dvar(getMaterialProperty<std::vector<std::vector<Real>>>(
58 : "dPorousFlow_fluid_phase_density_qp_dvar")),
59 2222 : _grad_p(getMaterialProperty<std::vector<RealGradient>>("PorousFlow_grad_porepressure_qp")),
60 2222 : _dgrad_p_dgrad_var(getMaterialProperty<std::vector<std::vector<Real>>>(
61 : "dPorousFlow_grad_porepressure_qp_dgradvar")),
62 2222 : _dgrad_p_dvar(getMaterialProperty<std::vector<std::vector<RealGradient>>>(
63 : "dPorousFlow_grad_porepressure_qp_dvar")),
64 2226 : _fe_type(isParamValid("fe_family") && isParamValid("fe_order")
65 2222 : ? FEType(Utility::string_to_enum<Order>(getParam<MooseEnum>("fe_order")),
66 1111 : Utility::string_to_enum<FEFamily>(getParam<MooseEnum>("fe_family")))
67 1111 : : _dictator.feType()),
68 1111 : _phi(_assembly.fePhi<Real>(_fe_type)),
69 1111 : _grad_phi(_assembly.feGradPhi<Real>(_fe_type)),
70 : _du_dvar(),
71 : _du_dvar_computed_by_thread(),
72 : _dkij_dvar(),
73 : _dflux_out_dvars(),
74 : _triples_to_receive(),
75 : _triples_to_send(),
76 2222 : _perm_derivs(_dictator.usePermDerivs())
77 : {
78 1111 : if (_phase >= _dictator.numPhases())
79 2 : paramError("phase",
80 : "Phase number entered is greater than the number of phases specified in the "
81 : "Dictator. Remember that indexing starts at 0");
82 :
83 2222 : if (isParamValid("fe_family") && !isParamValid("fe_order"))
84 2 : paramError("fe_order", "If you specify fe_family you must also specify fe_order");
85 2218 : if (isParamValid("fe_order") && !isParamValid("fe_family"))
86 2 : paramError("fe_family", "If you specify fe_order you must also specify fe_family");
87 1109 : if (!_dictator.consistentFEType() && !isParamValid("fe_family"))
88 2 : paramError("fe_family",
89 : "The PorousFlowDictator cannot determine the appropriate FE type to use because "
90 : "your porous_flow_vars are of different types. You must specify the appropriate "
91 : "fe_family and fe_order to use.");
92 1103 : }
93 :
94 : Real
95 14771696 : PorousFlowAdvectiveFluxCalculatorBase::computeVelocity(unsigned i, unsigned j, unsigned qp) const
96 : {
97 : // The following is but one choice for PorousFlow situations
98 : // If you change this, you will probably have to change
99 : // - the derivative in executeOnElement
100 : // - computeU
101 : // - computedU_dvar
102 14771696 : return -_grad_phi[i][qp] *
103 14771696 : (_permeability[qp] * (_grad_p[qp][_phase] - _fluid_density_qp[qp][_phase] * _gravity)) *
104 14771696 : _phi[j][qp];
105 : }
106 :
107 : void
108 14771696 : PorousFlowAdvectiveFluxCalculatorBase::executeOnElement(
109 : dof_id_type global_i, dof_id_type global_j, unsigned local_i, unsigned local_j, unsigned qp)
110 : {
111 14771696 : AdvectiveFluxCalculatorBase::executeOnElement(global_i, global_j, local_i, local_j, qp);
112 14771696 : const dof_id_type sequential_i = _connections.sequentialID(global_i);
113 14771696 : const unsigned j = _connections.indexOfGlobalConnection(global_i, global_j);
114 :
115 : // compute d(Kij)/d(porous_flow_variables)
116 61436368 : for (unsigned local_k = 0; local_k < _current_elem->n_nodes(); ++local_k)
117 : {
118 46664672 : const dof_id_type global_k = _current_elem->node_id(local_k);
119 155832736 : for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
120 : {
121 : RealVectorValue deriv =
122 109168064 : _permeability[qp] *
123 109168064 : (_grad_phi[local_k][qp] * _dgrad_p_dgrad_var[qp][_phase][pvar] -
124 109168064 : _phi[local_k][qp] * _dfluid_density_qp_dvar[qp][_phase][pvar] * _gravity);
125 109168064 : deriv += _permeability[qp] * (_dgrad_p_dvar[qp][_phase][pvar] * _phi[local_k][qp]);
126 :
127 109168064 : if (_perm_derivs)
128 : {
129 0 : deriv += _dpermeability_dvar[qp][pvar] * _phi[local_k][qp] *
130 0 : (_grad_p[qp][_phase] - _fluid_density_qp[qp][_phase] * _gravity);
131 0 : for (unsigned i = 0; i < LIBMESH_DIM; ++i)
132 0 : deriv += _dpermeability_dgradvar[qp][i][pvar] * _grad_phi[local_k][qp](i) *
133 0 : (_grad_p[qp][_phase] - _fluid_density_qp[qp][_phase] * _gravity);
134 : }
135 :
136 109168064 : _dkij_dvar[sequential_i][j][global_k][pvar] +=
137 109168064 : _JxW[qp] * _coord[qp] * (-_grad_phi[local_i][qp] * deriv * _phi[local_j][qp]);
138 : }
139 : }
140 14771696 : }
141 :
142 : void
143 26519 : PorousFlowAdvectiveFluxCalculatorBase::timestepSetup()
144 : {
145 26519 : const bool resizing_was_needed =
146 : _resizing_needed; // _resizing_needed gets set to false at the end of
147 : // AdvectiveFluxCalculatorBase::timestepSetup()
148 26519 : AdvectiveFluxCalculatorBase::timestepSetup();
149 :
150 : // clear and appropriately size all the derivative info
151 : // d(U)/d(porous_flow_variables) and
152 : // d(Kij)/d(porous_flow_variables) and
153 : // d(flux_out)/d(porous_flow_variables)
154 26519 : if (resizing_was_needed)
155 : {
156 1543 : const std::size_t num_nodes = _connections.numNodes();
157 3086 : _du_dvar.assign(num_nodes, std::vector<Real>(_num_vars, 0.0));
158 1543 : _du_dvar_computed_by_thread.assign(num_nodes, false);
159 1543 : _dflux_out_dvars.assign(num_nodes, std::map<dof_id_type, std::vector<Real>>());
160 1543 : _dkij_dvar.resize(num_nodes);
161 32899 : for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
162 : {
163 : const std::vector<dof_id_type> con_i =
164 31356 : _connections.globalConnectionsToSequentialID(sequential_i);
165 : const std::size_t num_con_i = con_i.size();
166 31356 : _dkij_dvar[sequential_i].assign(num_con_i, std::map<dof_id_type, std::vector<Real>>());
167 177804 : for (unsigned j = 0; j < num_con_i; ++j)
168 1035140 : for (const auto & global_neighbor_to_i : con_i)
169 1777384 : _dkij_dvar[sequential_i][j][global_neighbor_to_i] = std::vector<Real>(_num_vars, 0.0);
170 31356 : }
171 : }
172 26519 : }
173 :
174 : void
175 107728 : PorousFlowAdvectiveFluxCalculatorBase::initialize()
176 : {
177 107728 : AdvectiveFluxCalculatorBase::initialize();
178 107728 : const std::size_t num_nodes = _connections.numNodes();
179 107728 : _du_dvar_computed_by_thread.assign(num_nodes, false);
180 2587146 : for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
181 : {
182 : const std::vector<dof_id_type> & con_i =
183 2479418 : _connections.globalConnectionsToSequentialID(sequential_i);
184 : const std::size_t num_con_i = con_i.size();
185 10761864 : for (unsigned j = 0; j < num_con_i; ++j)
186 41654352 : for (const auto & global_neighbor_to_i : con_i)
187 66743812 : _dkij_dvar[sequential_i][j][global_neighbor_to_i] = std::vector<Real>(_num_vars, 0.0);
188 : }
189 107728 : }
190 :
191 : void
192 1204142 : PorousFlowAdvectiveFluxCalculatorBase::execute()
193 : {
194 1204142 : AdvectiveFluxCalculatorBase::execute();
195 :
196 : // compute d(U)/d(porous_flow_variables) for nodes in _current_elem and for this
197 : // execution thread. In threadJoin all these computations get gathered
198 : // using _du_dvar_computed_by_thread
199 3762346 : for (unsigned i = 0; i < _current_elem->n_nodes(); ++i)
200 : {
201 2558204 : const dof_id_type global_i = _current_elem->node_id(i);
202 2558204 : const dof_id_type sequential_i = _connections.sequentialID(global_i);
203 2558204 : if (_du_dvar_computed_by_thread[sequential_i])
204 1196894 : continue;
205 4161330 : for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
206 2800020 : _du_dvar[sequential_i][pvar] = computedU_dvar(i, pvar);
207 : _du_dvar_computed_by_thread[sequential_i] = true;
208 : }
209 1204142 : }
210 :
211 : void
212 45848 : PorousFlowAdvectiveFluxCalculatorBase::threadJoin(const UserObject & uo)
213 : {
214 45848 : AdvectiveFluxCalculatorBase::threadJoin(uo);
215 : const auto & pfafc = static_cast<const PorousFlowAdvectiveFluxCalculatorBase &>(uo);
216 : // add the values of _dkij_dvar computed by different threads
217 45848 : const std::size_t num_nodes = _connections.numNodes();
218 1115662 : for (dof_id_type sequential_i = 0; sequential_i < num_nodes; ++sequential_i)
219 : {
220 : const std::vector<dof_id_type> & con_i =
221 1069814 : _connections.globalConnectionsToSequentialID(sequential_i);
222 : const std::size_t num_con_i = con_i.size();
223 4594844 : for (unsigned j = 0; j < num_con_i; ++j)
224 17279796 : for (const auto & global_derivs : pfafc._dkij_dvar[sequential_i][j])
225 42690338 : for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
226 28935572 : _dkij_dvar[sequential_i][j][global_derivs.first][pvar] += global_derivs.second[pvar];
227 : }
228 :
229 : // gather the values of _du_dvar computed by different threads
230 1115662 : for (dof_id_type sequential_i = 0; sequential_i < _number_of_nodes; ++sequential_i)
231 1069814 : if (!_du_dvar_computed_by_thread[sequential_i] &&
232 : pfafc._du_dvar_computed_by_thread[sequential_i])
233 375306 : _du_dvar[sequential_i] = pfafc._du_dvar[sequential_i];
234 45848 : }
235 :
236 : const std::map<dof_id_type, std::vector<Real>> &
237 19617140 : PorousFlowAdvectiveFluxCalculatorBase::getdK_dvar(dof_id_type node_i, dof_id_type node_j) const
238 : {
239 19617140 : const dof_id_type sequential_i = _connections.sequentialID(node_i);
240 19617140 : const unsigned j = _connections.indexOfGlobalConnection(node_i, node_j);
241 19617140 : return _dkij_dvar[sequential_i][j];
242 : }
243 :
244 : const std::map<dof_id_type, std::vector<Real>> &
245 1877844 : PorousFlowAdvectiveFluxCalculatorBase::getdFluxOut_dvars(unsigned node_id) const
246 : {
247 1877844 : return _dflux_out_dvars[_connections.sequentialID(node_id)];
248 : }
249 :
250 : void
251 61880 : PorousFlowAdvectiveFluxCalculatorBase::finalize()
252 : {
253 61880 : AdvectiveFluxCalculatorBase::finalize();
254 :
255 : // compute d(flux_out)/d(porous flow variable)
256 : /// _dflux_out_dvars[sequential_i][global_j][pvar] = d(flux_out[global version of sequential_i])/d(porous_flow_variable pvar at global node j)
257 1471484 : for (const auto & node_i : _connections.globalIDs())
258 : {
259 1409604 : const dof_id_type sequential_i = _connections.sequentialID(node_i);
260 : _dflux_out_dvars[sequential_i].clear();
261 :
262 : const std::map<dof_id_type, Real> & dflux_out_du =
263 1409604 : AdvectiveFluxCalculatorBase::getdFluxOutdu(node_i);
264 9674192 : for (const auto & node_du : dflux_out_du)
265 : {
266 8264588 : const dof_id_type j = node_du.first;
267 8264588 : const Real dflux_out_du_j = node_du.second;
268 8264588 : _dflux_out_dvars[sequential_i][j] = _du_dvar[_connections.sequentialID(j)];
269 25388740 : for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
270 17124152 : _dflux_out_dvars[sequential_i][j][pvar] *= dflux_out_du_j;
271 : }
272 :
273 : // _dflux_out_dvars is now sized correctly, because getdFluxOutdu(i) contains all nodes
274 : // connected to i and all nodes connected to nodes connected to i. The
275 : // getdFluxOutdKij contains no extra nodes, so just += the dflux/dK terms
276 : const std::vector<std::vector<Real>> & dflux_out_dKjk =
277 1409604 : AdvectiveFluxCalculatorBase::getdFluxOutdKjk(node_i);
278 1409604 : const std::vector<dof_id_type> & con_i = _connections.globalConnectionsToGlobalID(node_i);
279 6167020 : for (std::size_t index_j = 0; index_j < con_i.size(); ++index_j)
280 : {
281 4757416 : const dof_id_type node_j = con_i[index_j];
282 4757416 : const std::vector<dof_id_type> & con_j = _connections.globalConnectionsToGlobalID(node_j);
283 24374556 : for (std::size_t index_k = 0; index_k < con_j.size(); ++index_k)
284 : {
285 19617140 : const dof_id_type node_k = con_j[index_k];
286 19617140 : const Real dflux_out_dK_jk = dflux_out_dKjk[index_j][index_k];
287 19617140 : const std::map<dof_id_type, std::vector<Real>> & dkj_dvarl = getdK_dvar(node_j, node_k);
288 128405820 : for (const auto & nodel_deriv : dkj_dvarl)
289 : {
290 108788680 : const dof_id_type l = nodel_deriv.first;
291 355575352 : for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
292 246786672 : _dflux_out_dvars[sequential_i][l][pvar] += dflux_out_dK_jk * nodel_deriv.second[pvar];
293 : }
294 : }
295 : }
296 : }
297 61880 : }
298 :
299 : void
300 1120 : PorousFlowAdvectiveFluxCalculatorBase::buildCommLists()
301 : {
302 : // build nodes and pairs to exchange
303 1120 : AdvectiveFluxCalculatorBase::buildCommLists();
304 :
305 : // Build _triples_to_receive
306 : // tpr_global is essentially _triples_to_receive, but its key-pairs have global nodal IDs: later
307 : // we build _triples_to_receive by flattening the data structure and making these key-pairs into
308 : // sequential nodal IDs
309 : std::map<processor_id_type,
310 : std::map<std::pair<dof_id_type, dof_id_type>, std::vector<dof_id_type>>>
311 : tpr_global;
312 17488 : for (const auto & elem : _fe_problem.getNonlinearEvaluableElementRange())
313 16368 : if (this->hasBlocks(elem->subdomain_id()))
314 : {
315 16368 : const processor_id_type elem_pid = elem->processor_id();
316 16368 : if (elem_pid != _my_pid)
317 : {
318 3484 : if (tpr_global.find(elem_pid) == tpr_global.end())
319 1088 : tpr_global[elem_pid] =
320 1088 : std::map<std::pair<dof_id_type, dof_id_type>, std::vector<dof_id_type>>();
321 13900 : for (unsigned i = 0; i < elem->n_nodes(); ++i)
322 45904 : for (unsigned j = 0; j < elem->n_nodes(); ++j)
323 : {
324 35488 : std::pair<dof_id_type, dof_id_type> the_pair(elem->node_id(i), elem->node_id(j));
325 35488 : if (tpr_global[elem_pid].find(the_pair) == tpr_global[elem_pid].end())
326 53056 : tpr_global[elem_pid][the_pair] = std::vector<dof_id_type>();
327 :
328 35488 : for (const auto & global_neighbor_to_i :
329 316376 : _connections.globalConnectionsToGlobalID(elem->node_id(i)))
330 245400 : if (std::find(tpr_global[elem_pid][the_pair].begin(),
331 245400 : tpr_global[elem_pid][the_pair].end(),
332 245400 : global_neighbor_to_i) == tpr_global[elem_pid][the_pair].end())
333 175224 : tpr_global[elem_pid][the_pair].push_back(global_neighbor_to_i);
334 : }
335 : }
336 : }
337 :
338 : // flattening makes later manipulations a lot more concise. Store the result in
339 : // _triples_to_receive
340 : _triples_to_receive.clear();
341 2208 : for (const auto & kv : tpr_global)
342 : {
343 1088 : const processor_id_type pid = kv.first;
344 2176 : _triples_to_receive[pid] = std::vector<dof_id_type>();
345 27616 : for (const auto & pr_vec : kv.second)
346 : {
347 26528 : const dof_id_type i = pr_vec.first.first;
348 26528 : const dof_id_type j = pr_vec.first.second;
349 201752 : for (const auto & global_nd : pr_vec.second)
350 : {
351 175224 : _triples_to_receive[pid].push_back(i);
352 175224 : _triples_to_receive[pid].push_back(j);
353 175224 : _triples_to_receive[pid].push_back(global_nd);
354 : }
355 : }
356 : }
357 :
358 : _triples_to_send.clear();
359 1088 : auto triples_action_functor = [this](processor_id_type pid, const std::vector<dof_id_type> & tts)
360 1088 : { _triples_to_send[pid] = tts; };
361 1120 : Parallel::push_parallel_vector_data(this->comm(), _triples_to_receive, triples_action_functor);
362 :
363 : // _triples_to_send and _triples_to_receive have been built using global node IDs
364 : // since all processors know about that. However, using global IDs means
365 : // that every time we send/receive, we keep having to do things like
366 : // _dkij_dvar[_connections.sequentialID(_triples_to_send[pid][i])][_connections.indexOfGlobalConnection(_triples_to_send[pid][i],
367 : // _triples_to_send[pid][i + 1])] which is quite inefficient. So:
368 2208 : for (auto & kv : _triples_to_send)
369 : {
370 1088 : const processor_id_type pid = kv.first;
371 : const std::size_t num = kv.second.size();
372 176312 : for (std::size_t i = 0; i < num; i += 3)
373 : {
374 175224 : _triples_to_send[pid][i + 1] = _connections.indexOfGlobalConnection(
375 175224 : _triples_to_send[pid][i], _triples_to_send[pid][i + 1]);
376 175224 : _triples_to_send[pid][i] = _connections.sequentialID(_triples_to_send[pid][i]);
377 : }
378 : }
379 2208 : for (auto & kv : _triples_to_receive)
380 : {
381 1088 : const processor_id_type pid = kv.first;
382 : const std::size_t num = kv.second.size();
383 176312 : for (std::size_t i = 0; i < num; i += 3)
384 : {
385 175224 : _triples_to_receive[pid][i + 1] = _connections.indexOfGlobalConnection(
386 175224 : _triples_to_receive[pid][i], _triples_to_receive[pid][i + 1]);
387 175224 : _triples_to_receive[pid][i] = _connections.sequentialID(_triples_to_receive[pid][i]);
388 : }
389 : }
390 1120 : }
391 :
392 : void
393 44824 : PorousFlowAdvectiveFluxCalculatorBase::exchangeGhostedInfo()
394 : {
395 : // Exchange u_nodal and k_ij
396 44824 : AdvectiveFluxCalculatorBase::exchangeGhostedInfo();
397 :
398 : // Exchange _du_dvar
399 : std::map<processor_id_type, std::vector<std::vector<Real>>> du_dvar_to_send;
400 89148 : for (const auto & kv : _nodes_to_send)
401 : {
402 44324 : const processor_id_type pid = kv.first;
403 88648 : du_dvar_to_send[pid] = std::vector<std::vector<Real>>();
404 206576 : for (const auto & nd : kv.second)
405 162252 : du_dvar_to_send[pid].push_back(_du_dvar[nd]);
406 : }
407 :
408 : auto du_action_functor =
409 44324 : [this](processor_id_type pid, const std::vector<std::vector<Real>> & du_dvar_received)
410 : {
411 : const std::size_t msg_size = du_dvar_received.size();
412 : mooseAssert(
413 : msg_size == _nodes_to_receive[pid].size(),
414 : "Message size, "
415 : << msg_size
416 : << ", in du_dvar communication is incompatible with nodes_to_receive, which has size "
417 : << _nodes_to_receive[pid].size());
418 206576 : for (unsigned i = 0; i < msg_size; ++i)
419 162252 : _du_dvar[_nodes_to_receive[pid][i]] = du_dvar_received[i];
420 44824 : };
421 44824 : Parallel::push_parallel_vector_data(this->comm(), du_dvar_to_send, du_action_functor);
422 :
423 : // Exchange _dkij_dvar
424 : std::map<processor_id_type, std::vector<std::vector<Real>>> dkij_dvar_to_send;
425 89148 : for (const auto & kv : _triples_to_send)
426 : {
427 44324 : const processor_id_type pid = kv.first;
428 88648 : dkij_dvar_to_send[pid] = std::vector<std::vector<Real>>();
429 : const std::size_t num = kv.second.size();
430 2983320 : for (std::size_t i = 0; i < num; i += 3)
431 : {
432 2938996 : const dof_id_type sequential_id = kv.second[i];
433 2938996 : const unsigned index_to_seq = kv.second[i + 1];
434 2938996 : const dof_id_type global_id = kv.second[i + 2];
435 2938996 : dkij_dvar_to_send[pid].push_back(_dkij_dvar[sequential_id][index_to_seq][global_id]);
436 : }
437 : }
438 :
439 : auto dk_action_functor =
440 44324 : [this](processor_id_type pid, const std::vector<std::vector<Real>> & dkij_dvar_received)
441 : {
442 44324 : const std::size_t num = _triples_to_receive[pid].size();
443 : mooseAssert(dkij_dvar_received.size() == num / 3,
444 : "Message size, " << dkij_dvar_received.size()
445 : << ", in dkij_dvar communication is incompatible with "
446 : "triples_to_receive, which has size "
447 : << _triples_to_receive[pid].size());
448 2983320 : for (std::size_t i = 0; i < num; i += 3)
449 : {
450 2938996 : const dof_id_type sequential_id = _triples_to_receive[pid][i];
451 2938996 : const unsigned index_to_seq = _triples_to_receive[pid][i + 1];
452 2938996 : const dof_id_type global_id = _triples_to_receive[pid][i + 2];
453 9771324 : for (unsigned pvar = 0; pvar < _num_vars; ++pvar)
454 6832328 : _dkij_dvar[sequential_id][index_to_seq][global_id][pvar] += dkij_dvar_received[i / 3][pvar];
455 : }
456 44824 : };
457 44824 : Parallel::push_parallel_vector_data(this->comm(), dkij_dvar_to_send, dk_action_functor);
458 44824 : }
|