Line data Source code
1 : /**********************************************************************/
2 : /* DO NOT MODIFY THIS HEADER */
3 : /* MAGPIE - Mesoscale Atomistic Glue Program for Integrated Execution */
4 : /* */
5 : /* Copyright 2017 Battelle Energy Alliance, LLC */
6 : /* ALL RIGHTS RESERVED */
7 : /**********************************************************************/
8 :
9 : #include "SPPARKSUserObject.h"
10 :
11 : #include "MooseRandom.h"
12 : #include "libmesh/mesh_tools.h"
13 : #include <numeric>
14 :
15 : registerMooseObject("MagpieApp", SPPARKSUserObject);
16 :
17 : InputParameters
18 18 : SPPARKSUserObject::validParams()
19 : {
20 18 : InputParameters params = GeneralUserObject::validParams();
21 36 : params.addParam<std::string>("file", "", "SPPARKS input file");
22 36 : params.addParam<bool>("spparks_only", false, "Whether to run SPPARKS independently of MOOSE");
23 :
24 36 : params.addParam<std::vector<unsigned int>>(
25 : "from_ivar", {}, "Index into SPPARKS iarray. This data will be extracted from SPPARKS.");
26 36 : params.addParam<std::vector<unsigned int>>(
27 : "from_dvar", {}, "Index into SPPARKS darray. This data will be extracted from SPPARKS.");
28 36 : params.addParam<std::vector<unsigned int>>(
29 : "to_ivar", {}, "Index into SPPARKS iarray. This data will be copied to SPPARKS.");
30 36 : params.addParam<std::vector<unsigned int>>(
31 : "to_dvar", {}, "Index into SPPARKS darray. This data will be copied to SPPARKS.");
32 :
33 36 : params.addParam<std::vector<std::string>>("int_vars", {}, "Integer Vars to send to SPPARKS.");
34 36 : params.addParam<std::vector<std::string>>("double_vars", {}, "Double Vars to send to SPPARKS.");
35 36 : params.addParam<std::vector<std::string>>(
36 : "sol_vars", {}, "Double solving-for Vars obtained from SPPARKS.");
37 :
38 36 : params.addParam<Real>("xmin", 0.0, "Lower X Coordinate of the generated mesh");
39 36 : params.addParam<Real>("ymin", 0.0, "Lower Y Coordinate of the generated mesh");
40 36 : params.addParam<Real>("zmin", 0.0, "Lower Z Coordinate of the generated mesh");
41 36 : params.addParam<Real>("xmax", 1.0, "Upper X Coordinate of the generated mesh");
42 36 : params.addParam<Real>("ymax", 1.0, "Upper Y Coordinate of the generated mesh");
43 36 : params.addParam<Real>("zmax", 1.0, "Upper Z Coordinate of the generated mesh");
44 :
45 36 : params.addParam<bool>("init_spparks", false, "Set values of SPPARKS variables at time zero.");
46 36 : params.addParam<bool>(
47 36 : "one_time_run", false, "Run SPPARKS just once at time zero."); // added by YF
48 36 : params.addParam<Real>("time_spparks_time_ratio", 1, "Ratio of time to SPPARKS time.");
49 :
50 : // Hide from input file dump
51 : // params.addPrivateParam<std::string>("built_by_action", "");
52 18 : return params;
53 0 : }
54 :
55 9 : SPPARKSUserObject::SPPARKSUserObject(const InputParameters & params)
56 : : GeneralUserObject(params),
57 9 : _spparks(nullptr),
58 9 : _file(getParam<std::string>("file")),
59 18 : _spparks_only(getParam<bool>("spparks_only")),
60 18 : _from_ivar(getParam<std::vector<unsigned int>>("from_ivar")),
61 18 : _from_dvar(getParam<std::vector<unsigned int>>("from_dvar")),
62 18 : _to_ivar(getParam<std::vector<unsigned int>>("to_ivar")),
63 18 : _to_dvar(getParam<std::vector<unsigned int>>("to_dvar")),
64 18 : _xmin(getParam<Real>("xmin")),
65 18 : _ymin(getParam<Real>("ymin")),
66 18 : _zmin(getParam<Real>("zmin")),
67 18 : _xmax(getParam<Real>("xmax")),
68 18 : _ymax(getParam<Real>("ymax")),
69 18 : _zmax(getParam<Real>("zmax")),
70 18 : _init_spparks(getParam<bool>("init_spparks")),
71 18 : _time_ratio(getParam<Real>("time_spparks_time_ratio")),
72 9 : _initialized(false),
73 18 : _one_time_run(getParam<bool>("one_time_run")),
74 9 : _n_spparks_run(0),
75 9 : _int_vars(),
76 9 : _double_vars(),
77 9 : _sol_vars(),
78 9 : _last_time(std::numeric_limits<Real>::min())
79 : {
80 18 : for (const auto & name : getParam<std::vector<std::string>>("int_vars"))
81 0 : _int_vars.push_back(&_fe_problem.getStandardVariable(0, name));
82 9 : if (_int_vars.size() != _to_ivar.size())
83 0 : mooseError("Mismatch with int_vars and to_ivar");
84 :
85 18 : for (const auto & name : getParam<std::vector<std::string>>("double_vars"))
86 0 : _double_vars.push_back(&_fe_problem.getStandardVariable(0, name));
87 9 : if (_double_vars.size() != _to_dvar.size())
88 0 : mooseError("Mismatch with double_vars and to_dvar");
89 :
90 27 : for (const auto & name : getParam<std::vector<std::string>>("sol_vars"))
91 9 : _sol_vars.push_back(&_fe_problem.getStandardVariable(0, name));
92 :
93 9 : _console << "\n>>>> STARTING SPPARKS <<<<\n";
94 9 : spparks_open(0, nullptr, _communicator.get(), &_spparks);
95 9 : if (!_spparks)
96 0 : mooseError("Error initializing SPPARKS");
97 :
98 9 : _console << "\n>>>> RUNNING SPPARKS FILE " << _file << " <<<<\n";
99 9 : spparks_file(_spparks, _file.c_str());
100 :
101 : int * iptr;
102 : double * dptr;
103 :
104 : // Extract and print information about the SPPARKS internals
105 9 : getSPPARKSPointer(iptr, "dimension");
106 9 : _dim = *iptr;
107 9 : _console << "\n>>>> SPPARKS DIMENSION: " << _dim << " <<<<\n";
108 :
109 9 : getSPPARKSPointer(dptr, "boxxlo");
110 9 : _console << "\n>>>> SPPARKS BOXXLO: " << *dptr << " <<<<\n";
111 :
112 9 : getSPPARKSPointer(iptr, "nlocal");
113 9 : _console << "\n>>>> SPPARKS NLOCAL: " << *iptr << " <<<<\n";
114 9 : }
115 :
116 27 : SPPARKSUserObject::~SPPARKSUserObject() { spparks_close(_spparks); }
117 :
118 : void
119 9 : SPPARKSUserObject::initialize()
120 : {
121 : if (_spparks_only)
122 : return;
123 :
124 : // getSPPARKSData();
125 : // setSPPARKSData();
126 : }
127 :
128 : int
129 0 : SPPARKSUserObject::getIntValue(unsigned int fem_node_id, unsigned int index) const
130 : {
131 0 : return _spparks_only ? 0 : getValue(_int_data_for_fem, fem_node_id, index);
132 : }
133 :
134 : Real
135 0 : SPPARKSUserObject::getDoubleValue(unsigned int fem_node_id, unsigned int index) const
136 : {
137 0 : return _spparks_only ? 0.0 : getValue(_double_data_for_fem, fem_node_id, index);
138 : }
139 :
140 : char *
141 9 : SPPARKSUserObject::runSPPARKSCommand(const std::string & cmd)
142 : {
143 9 : return spparks_command(_spparks, cmd.c_str());
144 : }
145 :
146 : void
147 9 : SPPARKSUserObject::getSPPARKSData()
148 : {
149 : // Update the integer data
150 27 : for (unsigned int i = 0; i < _from_ivar.size(); ++i)
151 36 : getSPPARKSData(_int_data_for_fem[_from_ivar[i]], "iarray", _from_ivar[i]);
152 :
153 : // Update the double data
154 18 : for (unsigned int i = 0; i < _from_dvar.size(); ++i)
155 18 : getSPPARKSData(_double_data_for_fem[_from_dvar[i]], "darray", _from_dvar[i]);
156 9 : }
157 :
158 : void
159 9 : SPPARKSUserObject::setSPPARKSData()
160 : {
161 : // Update the integer data
162 : int * ip = nullptr;
163 9 : for (unsigned int i = 0; i < _to_ivar.size(); ++i)
164 0 : setSPPARKSData(ip, "iarray", _to_ivar[i], *_int_vars[i]);
165 :
166 : // Update the double data
167 : double * dp = nullptr;
168 9 : for (unsigned int i = 0; i < _to_dvar.size(); ++i)
169 0 : setSPPARKSData(dp, "darray", _to_dvar[i], *_double_vars[i]);
170 9 : }
171 :
172 : void
173 9 : SPPARKSUserObject::setFEMData()
174 : {
175 : // Update the double solving variables using the data from SPPARKS, added by YF
176 18 : for (unsigned int i = 0; i < _sol_vars.size(); ++i)
177 18 : setFEMData<double>("darray", _from_dvar[i], *_sol_vars[i]);
178 9 : }
179 :
180 : void
181 9 : SPPARKSUserObject::execute()
182 : {
183 9 : if ((_one_time_run && _n_spparks_run > 0) || _spparks_only || _t == _last_time)
184 0 : return;
185 :
186 9 : initSPPARKS();
187 :
188 9 : _last_time = _t;
189 :
190 : // set variables in SPPARKS defined by to_ivar and to_dvar
191 9 : setSPPARKSData();
192 :
193 : // Run SPPARKS over a certain time defined by sp_time
194 9 : const Real sp_time = getSPPARKSTime(_dt);
195 9 : std::stringstream cmd;
196 9 : cmd << "run ";
197 : cmd << sp_time;
198 9 : cmd << " pre no" << std::endl;
199 9 : runSPPARKSCommand(cmd.str());
200 :
201 : // times that SPPARKS has been called
202 9 : _n_spparks_run++;
203 :
204 : // obtain data from SPPARKS
205 : // getSPPARKSData update the auxvariables defined by from_ivar and from_dvar
206 : // setFEMData update the MOOSEvariables defined by sol__vars
207 9 : getSPPARKSData();
208 9 : setFEMData();
209 9 : }
210 :
211 : void
212 9 : SPPARKSUserObject::initSPPARKS()
213 : {
214 : // Set SPPARKS values based on 3.3.2 in Veena's paper
215 : // This is needed only to couple POTTS model with MARMOT
216 :
217 : // Loop over local FEM nodes
218 : // For each node, pick a random spin
219 : // Half alpha phase, half beta phase
220 : // Composition for alpha phase is 0.25; for beta phase, 0.75.
221 :
222 9 : if (!_init_spparks)
223 9 : return;
224 :
225 0 : if (_from_ivar.size() != 2)
226 0 : mooseError("Must have two integer variables from SPPARKS");
227 :
228 0 : if (_double_vars.size() != 1)
229 0 : mooseError("Must have one double variable to send to SPPARKS");
230 :
231 0 : SystemBase & sys = _double_vars[0]->sys();
232 : NumericVector<Number> & solution = sys.solution();
233 :
234 0 : std::map<libMesh::dof_id_type, int> ints[2];
235 : std::map<libMesh::dof_id_type, Real> comp;
236 0 : ConstNodeRange & node_range = *_fe_problem.mesh().getLocalNodeRange();
237 0 : for (auto i = node_range.begin(); i < node_range.end(); ++i)
238 : {
239 0 : int value = std::floor(100 * MooseRandom::rand()) + 1;
240 0 : ints[0][(*i)->id()] = value;
241 0 : ints[1][(*i)->id()] = value < 51;
242 :
243 : Real comp = 0.75;
244 0 : if (value < 51)
245 : comp = 0.25;
246 :
247 : // Set data
248 0 : solution.set((*i)->dof_number(sys.number(), _double_vars[0]->number(), 0), comp);
249 : }
250 0 : solution.close();
251 :
252 0 : int * pint = nullptr;
253 0 : char iarray[] = "iarray";
254 0 : for (unsigned int i = 0; i < 2; ++i)
255 : {
256 0 : getSPPARKSDataPointer(pint, iarray, _from_ivar[i]);
257 :
258 : // Index into data is SPPARKS node id.
259 0 : for (auto it = _fem_to_spparks.begin(); it != _fem_to_spparks.end(); ++it)
260 0 : pint[it->second.id] = ints[i][it->first.id];
261 :
262 : // Copy data across processors
263 0 : if (n_processors() > 1)
264 0 : sendRecvFEMData(ints[i], pint);
265 : }
266 :
267 : // do not re-init SPPARKS during this simulation
268 0 : _init_spparks = false;
269 0 : }
270 :
271 : void
272 9 : SPPARKSUserObject::initialSetup()
273 : {
274 9 : if (_spparks_only || _initialized)
275 3 : return;
276 :
277 9 : _initialized = true;
278 :
279 : // Initialize communication maps
280 : // _console << "\ninitialSetup: begin\n";
281 :
282 : // 1. Get on-processor map from SPPARKS ID to FEM ID
283 : int * iptr;
284 9 : getSPPARKSPointer(iptr, "nlocal");
285 9 : int nlcl = *iptr;
286 :
287 : double ** xyz_array;
288 18 : getSPPARKSPointer(xyz_array, "xyz");
289 :
290 : std::set<SPPARKSID> spparks_id; // Local SPPARKS nodes
291 3081 : for (int i = 0; i < nlcl; ++i)
292 : {
293 3072 : Point p(xyz_array[i][0], _dim > 1 ? xyz_array[i][1] : 0, _dim > 2 ? xyz_array[i][2] : 0);
294 3072 : spparks_id.insert(SPPARKSID(i, p));
295 : }
296 9 : _num_local_spparks_nodes = spparks_id.size();
297 :
298 : // MOOSE FEM nodes are set up with periodic bcs (nodes at each end of periodicity),
299 : // but SPPARKS nodes are not. We may have two or more MOOSE FEM nodes at SPPARKS
300 : // node locations.
301 : std::multiset<FEMID> fem_id; // Local MOOSE FEM nodes
302 2439 : for (auto & node : *_fe_problem.mesh().getLocalNodeRange())
303 : {
304 2430 : Point coor(*node);
305 :
306 : // TODO: this is horrible
307 2430 : if (coor(0) == _xmax)
308 270 : coor(0) = _xmin;
309 2430 : if (coor(1) == _ymax)
310 270 : coor(1) = _ymin;
311 2430 : if (coor(2) == _zmax)
312 486 : coor(2) = _zmin;
313 :
314 2430 : fem_id.insert(FEMID(node->id(), coor));
315 : }
316 9 : _num_local_fem_nodes = fem_id.size();
317 :
318 : // SPPARKS nodes not found on this processor
319 : std::set<SPPARKSID> unmatched_spparks;
320 :
321 3081 : for (auto & id : spparks_id)
322 : {
323 : auto fem_iter = fem_id.find(id);
324 3072 : if (fem_iter != fem_id.end())
325 864 : _spparks_to_fem.insert(std::pair<SPPARKSID, FEMID>(id, *fem_iter)); // SPPARKSID to FEMID
326 : else
327 2208 : unmatched_spparks.insert(id);
328 : }
329 :
330 2439 : for (auto & id : fem_id)
331 : {
332 : auto spparks_iter = spparks_id.find(id);
333 2430 : if (spparks_iter != spparks_id.end())
334 1350 : _fem_to_spparks.insert(std::pair<FEMID, SPPARKSID>(id, *spparks_iter)); // FEMID to SPPARKSID
335 : // else
336 : // _spparks_to_proc.insert(std::pair<SPPARKSID, unsigned int>(*i, -1));
337 : }
338 :
339 : const unsigned int num_procs = n_processors();
340 :
341 9 : if (num_procs == 1)
342 : {
343 3 : if (spparks_id.size() != _spparks_to_fem.size())
344 : {
345 : // Error skipped to allow unmatched meshes
346 : // mooseError("Did not find MOOSE FEM node for each SPPARKS node, ", spparks_id.size()
347 : // , ", ", fem_id.size(), ", ", _spparks_to_fem.size());
348 :
349 3 : _console << "\n spparks size " << spparks_id.size() << "not equal to fem size "
350 3 : << fem_id.size() << '\n';
351 : }
352 :
353 : return;
354 : }
355 :
356 : // 2. Get send map (spparks id -> proc id)
357 :
358 : // A. Get local SPPARKS bounding box
359 : // TODO: Expand bounding boxes by half cell width?
360 : // May not be necessary.
361 :
362 : unsigned int proc_id = processor_id();
363 :
364 6 : std::vector<Real> spparks_bounds(num_procs * 6, 0.0);
365 6 : unsigned int offset = proc_id * 6;
366 6 : Real * s_bounds = &spparks_bounds[0] + offset;
367 6 : s_bounds[0] = s_bounds[1] = s_bounds[2] = std::numeric_limits<Real>::max();
368 6 : s_bounds[3] = s_bounds[4] = s_bounds[5] = std::numeric_limits<Real>::min();
369 1446 : for (auto & id : unmatched_spparks)
370 : {
371 1440 : s_bounds[0] = std::min(s_bounds[0], id.coor(0));
372 1440 : s_bounds[1] = std::min(s_bounds[1], id.coor(1));
373 1440 : s_bounds[2] = std::min(s_bounds[2], id.coor(2));
374 1530 : s_bounds[3] = std::max(s_bounds[3], id.coor(0));
375 1440 : s_bounds[4] = std::max(s_bounds[4], id.coor(1));
376 1440 : s_bounds[5] = std::max(s_bounds[5], id.coor(2));
377 : }
378 6 : libMesh::BoundingBox spparks_bb(Point(s_bounds[0], s_bounds[1], s_bounds[2]),
379 6 : Point(s_bounds[3], s_bounds[4], s_bounds[5]));
380 6 : _communicator.sum(spparks_bounds);
381 :
382 : //
383 : // B: Get MOOSE bounding boxes
384 : //
385 :
386 : //
387 : // TODO: These bboxes could/should be based on non-matched fem nodes.
388 : //
389 6 : std::vector<Real> fem_bounds(num_procs * 6, 0.0);
390 6 : Real * e_bounds = &fem_bounds[0] + offset;
391 6 : e_bounds[0] = e_bounds[1] = e_bounds[2] = std::numeric_limits<Real>::max();
392 6 : e_bounds[3] = e_bounds[4] = e_bounds[5] = std::numeric_limits<Real>::min();
393 1221 : for (auto & id : fem_id)
394 : {
395 1215 : e_bounds[0] = std::min(e_bounds[0], id.coor(0));
396 1215 : e_bounds[1] = std::min(e_bounds[1], id.coor(1));
397 1215 : e_bounds[2] = std::min(e_bounds[2], id.coor(2));
398 1257 : e_bounds[3] = std::max(e_bounds[3], id.coor(0));
399 1215 : e_bounds[4] = std::max(e_bounds[4], id.coor(1));
400 1215 : e_bounds[5] = std::max(e_bounds[5], id.coor(2));
401 : }
402 6 : libMesh::BoundingBox fem_bb(Point(e_bounds[0], e_bounds[1], e_bounds[2]),
403 6 : Point(e_bounds[3], e_bounds[4], e_bounds[5]));
404 6 : _communicator.sum(fem_bounds);
405 :
406 : //
407 : // C: Get number of processors that overlap my SPPARKS and MOOSE domains
408 : //
409 :
410 : std::vector<unsigned int> procs_overlapping_spparks_domain;
411 : std::vector<unsigned int> procs_overlapping_fem_domain;
412 18 : for (unsigned int i = 0; i < num_procs; ++i)
413 : {
414 12 : if (i == proc_id)
415 6 : continue;
416 :
417 6 : offset = i * 6;
418 6 : e_bounds = &fem_bounds[0] + offset;
419 6 : libMesh::BoundingBox e_box(Point(e_bounds[0], e_bounds[1], e_bounds[2]),
420 6 : Point(e_bounds[3], e_bounds[4], e_bounds[5]));
421 6 : if (spparks_bb.intersects(e_box))
422 6 : procs_overlapping_spparks_domain.push_back(i);
423 :
424 6 : s_bounds = &spparks_bounds[0] + offset;
425 6 : libMesh::BoundingBox s_box(Point(s_bounds[0], s_bounds[1], s_bounds[2]),
426 6 : Point(s_bounds[3], s_bounds[4], s_bounds[5]));
427 6 : if (fem_bb.intersects(s_box))
428 6 : procs_overlapping_fem_domain.push_back(i);
429 : }
430 :
431 : //
432 : // D: Communicate number of MOOSE FEM nodes, number of SPPARKS nodes
433 : //
434 :
435 : // Number remote fem nodes for each proc overlapping my spparks
436 6 : std::vector<unsigned int> num_fem_nodes(procs_overlapping_spparks_domain.size(), 0);
437 6 : std::vector<unsigned int> num_spparks_nodes(procs_overlapping_fem_domain.size(), 0);
438 :
439 : std::vector<MPI_Request> recv_request1(
440 6 : std::max(procs_overlapping_spparks_domain.size(), procs_overlapping_fem_domain.size()));
441 : std::vector<MPI_Request> recv_request2(
442 6 : std::max(procs_overlapping_spparks_domain.size(), procs_overlapping_fem_domain.size()));
443 : int comm_tag = 100;
444 :
445 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
446 : // if (num_fem_nodes.size() && procs_overlapping_spparks_domain.size())
447 6 : MPI_Irecv(&num_fem_nodes[i],
448 : 1,
449 : MPI_UNSIGNED,
450 : procs_overlapping_spparks_domain[i],
451 : comm_tag,
452 : _communicator.get(),
453 : &recv_request1[i]);
454 :
455 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
456 : // if (num_spparks_nodes.size() && procs_overlapping_fem_domain.size())
457 6 : MPI_Irecv(&num_spparks_nodes[i],
458 : 1,
459 : MPI_UNSIGNED,
460 : procs_overlapping_fem_domain[i],
461 : comm_tag + 11,
462 : _communicator.get(),
463 : &recv_request2[i]);
464 :
465 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
466 : // if (procs_overlapping_fem_domain.size())
467 6 : MPI_Send(&_num_local_fem_nodes,
468 : 1,
469 : MPI_UNSIGNED,
470 : procs_overlapping_fem_domain[i],
471 : comm_tag,
472 : _communicator.get());
473 :
474 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
475 : // if (procs_overlapping_spparks_domain.size())
476 6 : MPI_Send(&_num_local_spparks_nodes,
477 : 1,
478 : MPI_UNSIGNED,
479 : procs_overlapping_spparks_domain[i],
480 : comm_tag + 11,
481 : _communicator.get());
482 :
483 : std::vector<MPI_Status> recv_status1(
484 6 : std::max(procs_overlapping_spparks_domain.size(), procs_overlapping_fem_domain.size()));
485 : std::vector<MPI_Status> recv_status2(
486 6 : std::max(procs_overlapping_spparks_domain.size(), procs_overlapping_fem_domain.size()));
487 6 : MPI_Waitall(procs_overlapping_spparks_domain.size(), &recv_request1[0], &recv_status1[0]);
488 6 : MPI_Waitall(procs_overlapping_fem_domain.size(), &recv_request2[0], &recv_status2[0]);
489 :
490 : //
491 : // E: Communicate MOOSE FEM nodes, SPPARKS nodes
492 : //
493 :
494 : comm_tag = 200;
495 : int comm_tag_double = comm_tag + 1;
496 : const unsigned int num_fem_nodes_total =
497 6 : std::accumulate(&num_fem_nodes[0], &num_fem_nodes[0] + num_fem_nodes.size(), 0);
498 : const unsigned int num_spparks_nodes_total =
499 6 : std::accumulate(&num_spparks_nodes[0], &num_spparks_nodes[0] + num_spparks_nodes.size(), 0);
500 6 : std::vector<unsigned int> remote_fem_nodes(num_fem_nodes_total);
501 6 : std::vector<Real> remote_fem_coords(num_fem_nodes_total * 3);
502 6 : std::vector<unsigned int> remote_spparks_nodes(num_spparks_nodes_total);
503 6 : std::vector<Real> remote_spparks_coords(num_spparks_nodes_total * 3);
504 : offset = 0;
505 :
506 : // Receive MOOSE FEM nodes
507 6 : std::vector<MPI_Request> recv_request_coor1(procs_overlapping_spparks_domain.size());
508 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
509 : {
510 6 : MPI_Irecv(&remote_fem_nodes[offset],
511 : num_fem_nodes[i],
512 : MPI_UNSIGNED,
513 : procs_overlapping_spparks_domain[i],
514 : comm_tag,
515 : _communicator.get(),
516 : &recv_request1[i]);
517 6 : MPI_Irecv(&remote_fem_coords[offset * 3],
518 : num_fem_nodes[i] * 3,
519 : MPI_DOUBLE,
520 : procs_overlapping_spparks_domain[i],
521 : comm_tag_double,
522 : _communicator.get(),
523 : &recv_request_coor1[i]);
524 6 : offset += num_fem_nodes[i];
525 : }
526 :
527 : // Receive SPPARKS nodes
528 6 : std::vector<MPI_Request> recv_request_coor2(procs_overlapping_fem_domain.size());
529 : offset = 0;
530 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
531 : {
532 6 : MPI_Irecv(&remote_spparks_nodes[offset],
533 : num_spparks_nodes[i],
534 : MPI_UNSIGNED,
535 : procs_overlapping_fem_domain[i],
536 : comm_tag + 11,
537 : _communicator.get(),
538 : &recv_request2[i]);
539 6 : MPI_Irecv(&remote_spparks_coords[offset * 3],
540 : num_spparks_nodes[i] * 3,
541 : MPI_DOUBLE,
542 : procs_overlapping_fem_domain[i],
543 : comm_tag_double + 11,
544 : _communicator.get(),
545 : &recv_request_coor2[i]);
546 6 : offset += num_spparks_nodes[i];
547 : }
548 :
549 : // Prepare vectors of MOOSE FEM ids, coordinates
550 6 : std::vector<unsigned int> fem_ids(_num_local_fem_nodes);
551 6 : std::vector<Real> fem_coords(3 * _num_local_fem_nodes);
552 : offset = 0;
553 1221 : for (auto & id : fem_id)
554 : {
555 1215 : fem_ids[offset] = id.id;
556 1215 : fem_coords[offset * 3 + 0] = id.coor(0);
557 1215 : fem_coords[offset * 3 + 1] = id.coor(1);
558 1215 : fem_coords[offset * 3 + 2] = id.coor(2);
559 1215 : ++offset;
560 : }
561 :
562 : // Send MOOSE FEM ids, coordinates
563 : offset = 0;
564 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
565 : {
566 6 : MPI_Send(&fem_ids[0],
567 : _num_local_fem_nodes,
568 : MPI_UNSIGNED,
569 : procs_overlapping_fem_domain[i],
570 : comm_tag,
571 : _communicator.get());
572 6 : MPI_Send(&fem_coords[0],
573 : _num_local_fem_nodes * 3,
574 : MPI_DOUBLE,
575 : procs_overlapping_fem_domain[i],
576 : comm_tag_double,
577 : _communicator.get());
578 6 : ++offset;
579 : }
580 :
581 : // Prepare SPPARKS ids, coordinates
582 6 : std::vector<unsigned int> spparks_ids(_num_local_spparks_nodes);
583 6 : std::vector<Real> spparks_coords(3 * _num_local_spparks_nodes);
584 : offset = 0;
585 1542 : for (auto & id : spparks_id)
586 : {
587 1536 : spparks_ids[offset] = id.id;
588 1536 : spparks_coords[offset * 3 + 0] = id.coor(0);
589 1536 : spparks_coords[offset * 3 + 1] = id.coor(1);
590 1536 : spparks_coords[offset * 3 + 2] = id.coor(2);
591 1536 : ++offset;
592 : }
593 :
594 : // Send SPPARKS ids, coordinates
595 : offset = 0;
596 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
597 : {
598 6 : MPI_Send(&spparks_ids[0],
599 : _num_local_spparks_nodes,
600 : MPI_UNSIGNED,
601 : procs_overlapping_spparks_domain[i],
602 : comm_tag + 11,
603 : _communicator.get());
604 6 : MPI_Send(&spparks_coords[0],
605 : _num_local_spparks_nodes * 3,
606 : MPI_DOUBLE,
607 : procs_overlapping_spparks_domain[i],
608 : comm_tag_double + 11,
609 : _communicator.get());
610 6 : ++offset;
611 : }
612 :
613 6 : MPI_Waitall(procs_overlapping_spparks_domain.size(), &recv_request1[0], &recv_status1[0]);
614 6 : MPI_Waitall(procs_overlapping_spparks_domain.size(), &recv_request_coor1[0], &recv_status1[0]);
615 6 : MPI_Waitall(procs_overlapping_fem_domain.size(), &recv_request2[0], &recv_status2[0]);
616 6 : MPI_Waitall(procs_overlapping_fem_domain.size(), &recv_request_coor2[0], &recv_status2[0]);
617 :
618 : //
619 : // F: Count matching nodes for each proc that sent MOOSE FEM nodes, SPPARKS nodes
620 : //
621 :
622 : comm_tag = 300;
623 : // Receive number of MOOSE FEM nodes on this processor that match SPPARKS nodes on another
624 6 : std::vector<unsigned int> num_remote_fem_matches(procs_overlapping_fem_domain.size());
625 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
626 6 : MPI_Irecv(&num_remote_fem_matches[i],
627 : 1,
628 : MPI_UNSIGNED,
629 : procs_overlapping_fem_domain[i],
630 : comm_tag,
631 : _communicator.get(),
632 : &recv_request1[i]);
633 :
634 : // Receive number of SPPARKS nodes on this processor that match MOOSE FEM nodes on another
635 6 : std::vector<unsigned int> num_remote_spparks_matches(procs_overlapping_spparks_domain.size());
636 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
637 6 : MPI_Irecv(&num_remote_spparks_matches[i],
638 : 1,
639 : MPI_UNSIGNED,
640 : procs_overlapping_spparks_domain[i],
641 : comm_tag + 11,
642 : _communicator.get(),
643 : &recv_request2[i]);
644 :
645 : // Count number of remote MOOSE FEM nodes that match the processor's SPPARKS nodes
646 : // Store the matches
647 6 : std::vector<std::vector<unsigned int>> spparks_matches(procs_overlapping_spparks_domain.size());
648 : offset = 0;
649 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
650 1221 : for (unsigned int j = 0; j < num_fem_nodes[i]; ++j)
651 : {
652 1215 : SPPARKSID tmp(remote_fem_nodes[offset],
653 1215 : Point(remote_fem_coords[offset * 3 + 0],
654 1215 : remote_fem_coords[offset * 3 + 1],
655 1215 : remote_fem_coords[offset * 3 + 2]));
656 : auto iter = spparks_id.find(tmp);
657 1215 : if (iter != spparks_id.end())
658 : {
659 1080 : spparks_matches[i].push_back(tmp.id);
660 1080 : _spparks_to_proc[*iter].push_back(procs_overlapping_spparks_domain[i]);
661 : }
662 :
663 1215 : ++offset;
664 : }
665 :
666 : // Count number of remote SPPARKS nodes that match this processor's MOOSE FEM nodes
667 : // Store the matches
668 6 : std::vector<std::vector<unsigned int>> fem_matches(procs_overlapping_fem_domain.size());
669 : offset = 0;
670 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
671 1542 : for (unsigned int j = 0; j < num_spparks_nodes[i]; ++j)
672 : {
673 1536 : FEMID tmp(remote_spparks_nodes[offset],
674 1536 : Point(remote_spparks_coords[offset * 3 + 0],
675 1536 : remote_spparks_coords[offset * 3 + 1],
676 1536 : remote_spparks_coords[offset * 3 + 2]));
677 : auto iter = fem_id.find(tmp);
678 1536 : if (iter != fem_id.end())
679 : {
680 768 : fem_matches[i].push_back(tmp.id);
681 768 : _fem_to_proc[*iter].push_back(procs_overlapping_fem_domain[i]);
682 : }
683 :
684 1536 : ++offset;
685 : }
686 :
687 : // Send number of MOOSE FEM nodes that match this processor's SPPARKS nodes
688 6 : std::vector<unsigned int> spparks_sizes(procs_overlapping_spparks_domain.size());
689 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
690 : {
691 6 : spparks_sizes[i] = spparks_matches[i].size();
692 6 : MPI_Send(&spparks_sizes[i],
693 : 1,
694 : MPI_UNSIGNED,
695 : procs_overlapping_spparks_domain[i],
696 : comm_tag,
697 : _communicator.get());
698 : }
699 :
700 : // Send number of SPPARKS nodes that match this processor's MOOSE FEM nodes
701 6 : std::vector<unsigned int> fem_sizes(procs_overlapping_fem_domain.size());
702 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
703 : {
704 6 : fem_sizes[i] = fem_matches[i].size();
705 6 : MPI_Send(&fem_sizes[i],
706 : 1,
707 : MPI_UNSIGNED,
708 : procs_overlapping_fem_domain[i],
709 : comm_tag + 11,
710 : _communicator.get());
711 : }
712 :
713 6 : MPI_Waitall(procs_overlapping_fem_domain.size(), &recv_request1[0], &recv_status1[0]);
714 6 : MPI_Waitall(procs_overlapping_spparks_domain.size(), &recv_request2[0], &recv_status2[0]);
715 :
716 : //
717 : // G: Communicate matching nodes
718 : //
719 :
720 : comm_tag = 400;
721 : // Receive MOOSE DEM ids that match SPPARKS nodes on another processor
722 6 : std::vector<std::vector<unsigned int>> matched_fem_ids(procs_overlapping_fem_domain.size());
723 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
724 : {
725 6 : matched_fem_ids[i].resize(num_remote_fem_matches[i]);
726 6 : if (matched_fem_ids[i].size())
727 6 : MPI_Irecv(&matched_fem_ids[i][0],
728 : num_remote_fem_matches[i],
729 : MPI_UNSIGNED,
730 : procs_overlapping_fem_domain[i],
731 : comm_tag,
732 : _communicator.get(),
733 : &recv_request1[i]);
734 : }
735 :
736 : // Receive SPPARKS ids that match MOOSE FEM nodes on another processor
737 : std::vector<std::vector<unsigned int>> matched_spparks_ids(
738 6 : procs_overlapping_spparks_domain.size());
739 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
740 : {
741 6 : matched_spparks_ids[i].resize(num_remote_spparks_matches[i]);
742 6 : if (matched_spparks_ids[i].size())
743 6 : MPI_Irecv(&matched_spparks_ids[i][0],
744 : num_remote_spparks_matches[i],
745 : MPI_UNSIGNED,
746 : procs_overlapping_spparks_domain[i],
747 : comm_tag + 11,
748 : _communicator.get(),
749 : &recv_request2[i]);
750 : }
751 :
752 : // Send remote MOOSE FEM ids that match SPPARKS nodes on this processor
753 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
754 : {
755 6 : spparks_matches[i].resize(spparks_sizes[i]);
756 6 : if (spparks_matches[i].size())
757 6 : MPI_Send(&spparks_matches[i][0],
758 : spparks_sizes[i],
759 : MPI_UNSIGNED,
760 : procs_overlapping_spparks_domain[i],
761 : comm_tag,
762 : _communicator.get());
763 : }
764 : // Send remote SPPARKS ids that match MOOSE FEM nodes on this processor
765 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
766 : {
767 6 : fem_matches[i].resize(fem_sizes[i]);
768 6 : if (fem_matches[i].size())
769 6 : MPI_Send(&fem_matches[i][0],
770 : fem_sizes[i],
771 : MPI_UNSIGNED,
772 : procs_overlapping_fem_domain[i],
773 : comm_tag + 11,
774 : _communicator.get());
775 : }
776 :
777 6 : MPI_Waitall(procs_overlapping_fem_domain.size(), &recv_request1[0], &recv_status1[0]);
778 6 : MPI_Waitall(procs_overlapping_spparks_domain.size(), &recv_request2[0], &recv_status2[0]);
779 :
780 : //
781 : // H: Generate final recv communication maps
782 : //
783 :
784 12 : for (unsigned int i = 0; i < procs_overlapping_fem_domain.size(); ++i)
785 1086 : for (unsigned int j = 0; j < num_remote_fem_matches[i]; ++j)
786 1080 : _sending_proc_to_fem_id[procs_overlapping_fem_domain[i]].push_back(matched_fem_ids[i][j]);
787 :
788 12 : for (unsigned int i = 0; i < procs_overlapping_spparks_domain.size(); ++i)
789 774 : for (unsigned int j = 0; j < num_remote_spparks_matches[i]; ++j)
790 768 : _sending_proc_to_spparks_id[procs_overlapping_spparks_domain[i]].push_back(
791 768 : matched_spparks_ids[i][j]);
792 12 : }
|