Line data Source code
1 : // The libMesh Finite Element Library.
2 : // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 :
4 : // This library is free software; you can redistribute it and/or
5 : // modify it under the terms of the GNU Lesser General Public
6 : // License as published by the Free Software Foundation; either
7 : // version 2.1 of the License, or (at your option) any later version.
8 :
9 : // This library is distributed in the hope that it will be useful,
10 : // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 : // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 : // Lesser General Public License for more details.
13 :
14 : // You should have received a copy of the GNU Lesser General Public
15 : // License along with this library; if not, write to the Free Software
16 : // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 :
18 :
19 : // LibMesh includes
20 : #include "libmesh/distributed_mesh.h"
21 : #include "libmesh/dof_map.h" // local_index
22 : #include "libmesh/elem.h"
23 : #include "libmesh/exodusII_io.h"
24 : #include "libmesh/libmesh_logging.h"
25 : #include "libmesh/nemesis_io.h"
26 : #include "libmesh/nemesis_io_helper.h"
27 : #include "libmesh/node.h"
28 : #include "libmesh/parallel.h"
29 : #include "libmesh/utility.h" // deallocate
30 : #include "libmesh/boundary_info.h"
31 : #include "libmesh/mesh_communication.h"
32 : #include "libmesh/fe_type.h"
33 : #include "libmesh/equation_systems.h"
34 : #include "libmesh/numeric_vector.h"
35 : #include "libmesh/int_range.h"
36 :
37 : // C++ includes
38 : #include <memory>
39 : #include <numeric> // std::accumulate
40 :
41 : namespace libMesh
42 : {
43 :
44 :
45 : //-----------------------------------------------
46 : // anonymous namespace for implementation details
47 : namespace {
48 :
49 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
50 : struct CompareGlobalIdxMappings
51 : {
52 : // strict weak ordering for a.first -> a.second mapping. since we can only map to one
53 : // value only order the first entry
54 80 : bool operator()(const std::pair<unsigned int, unsigned int> & a,
55 : const std::pair<unsigned int, unsigned int> & b) const
56 720 : { return a.first < b.first; }
57 :
58 : // strict weak ordering for a.first -> a.second mapping. lookups will
59 : // be in terms of a single integer, which is why we need this method.
60 140 : bool operator()(const std::pair<unsigned int, unsigned int> & a,
61 : const unsigned int b) const
62 1820 : { return a.first < b; }
63 : };
64 : #endif // defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
65 :
66 : // Nemesis & ExodusII use int for all integer values, even the ones which
67 : // should never be negative. we like to use unsigned as a force of habit,
68 : // this trivial little method saves some typing & also makes sure something
69 : // is not horribly wrong.
70 : template <typename T>
71 2014 : inline unsigned int to_uint ( const T & t )
72 : {
73 : libmesh_assert_equal_to (t, static_cast<T>(static_cast<unsigned int>(t)));
74 :
75 27992 : return static_cast<unsigned int>(t);
76 : }
77 :
78 : // test equality for a.first -> a.second mapping. since we can only map to one
79 : // value only test the first entry
80 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API) && !defined(NDEBUG)
81 40 : inline bool global_idx_mapping_equality (const std::pair<unsigned int, unsigned int> & a,
82 : const std::pair<unsigned int, unsigned int> & b)
83 : {
84 40 : return a.first == b.first;
85 : }
86 : #endif
87 :
88 : }
89 :
90 :
91 :
92 : // ------------------------------------------------------------
93 : // Nemesis_IO class members
94 8729 : Nemesis_IO::Nemesis_IO (MeshBase & mesh,
95 8729 : bool single_precision) :
96 : MeshInput<MeshBase> (mesh, /*is_parallel_format=*/true),
97 : MeshOutput<MeshBase> (mesh, /*is_parallel_format=*/true),
98 : ParallelObject (mesh),
99 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
100 0 : nemhelper(std::make_unique<Nemesis_IO_Helper>(*this, false, single_precision)),
101 8237 : _timestep(1),
102 : #endif
103 8237 : _verbose (false),
104 8237 : _append(false),
105 8729 : _allow_empty_variables(false)
106 : {
107 : // if !LIBMESH_HAVE_EXODUS_API, we didn't use this
108 246 : libmesh_ignore(single_precision);
109 8729 : }
110 :
111 :
112 :
113 142 : Nemesis_IO::Nemesis_IO (const MeshBase & mesh,
114 142 : bool single_precision) :
115 : MeshInput<MeshBase> (),
116 : MeshOutput<MeshBase> (mesh, /*is_parallel_format=*/true),
117 : ParallelObject (mesh),
118 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
119 0 : nemhelper(std::make_unique<Nemesis_IO_Helper>(*this, false, single_precision)),
120 134 : _timestep(1),
121 : #endif
122 134 : _verbose (false),
123 134 : _append(false),
124 142 : _allow_empty_variables(false)
125 : {
126 : // if !LIBMESH_HAVE_EXODUS_API, we didn't use this
127 4 : libmesh_ignore(single_precision);
128 142 : }
129 :
130 :
131 :
132 : // Destructor. Defined in the C file so we can be sure to get away
133 : // with a forward declaration of Nemesis_IO_Helper in the header file.
134 8371 : Nemesis_IO::~Nemesis_IO () = default;
135 :
136 :
137 :
138 0 : void Nemesis_IO::verbose (bool set_verbosity)
139 : {
140 0 : _verbose = set_verbosity;
141 :
142 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
143 : // Set the verbose flag in the helper object
144 : // as well.
145 0 : nemhelper->verbose = _verbose;
146 : #endif
147 0 : }
148 :
149 :
150 :
151 0 : void Nemesis_IO::write_complex_magnitude (bool val)
152 : {
153 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
154 0 : nemhelper->write_complex_abs = val;
155 : #endif
156 0 : libmesh_ignore(val);
157 0 : }
158 :
159 :
160 :
161 0 : void Nemesis_IO::append(bool val)
162 : {
163 0 : _append = val;
164 0 : }
165 :
166 :
167 :
168 0 : void Nemesis_IO::set_output_variables(const std::vector<std::string> & output_variables,
169 : bool allow_empty)
170 : {
171 0 : _output_variables = output_variables;
172 0 : _allow_empty_variables = allow_empty;
173 0 : }
174 :
175 :
176 :
177 8871 : void Nemesis_IO::assert_symmetric_cmaps()
178 : {
179 : #ifndef NDEBUG
180 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
181 : // We expect the communication maps to be symmetric - e.g. if processor i thinks it
182 : // communicates with processor j, then processor j should also be expecting to
183 : // communicate with i. We can assert that here easily enough with an alltoall,
184 : // but let's only do it when not in optimized mode to limit unnecessary communication.
185 : {
186 500 : std::vector<unsigned char> pid_send_partner (this->n_processors(), 0);
187 :
188 : // strictly speaking, we should expect to communicate with ourself...
189 250 : pid_send_partner[this->processor_id()] = 1;
190 :
191 : // mark each processor id we reference with a node cmap
192 452 : for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
193 : {
194 202 : libmesh_assert_less (nemhelper->node_cmap_ids[cmap], this->n_processors());
195 :
196 202 : pid_send_partner[nemhelper->node_cmap_ids[cmap]] = 1;
197 : }
198 :
199 : // Copy the send pairing so we can catch the receive paring and
200 : // test for equality
201 500 : const std::vector<unsigned char> pid_recv_partner (pid_send_partner);
202 :
203 250 : this->comm().alltoall (pid_send_partner);
204 :
205 250 : libmesh_assert (pid_send_partner == pid_recv_partner);
206 : }
207 : #endif
208 : #endif
209 8871 : }
210 :
211 :
212 :
213 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
214 850 : void Nemesis_IO::read (const std::string & base_filename)
215 : {
216 48 : LOG_SCOPE ("read()","Nemesis_IO");
217 :
218 : // This function must be run on all processors at once
219 24 : parallel_object_only();
220 :
221 850 : if (_verbose)
222 : {
223 0 : libMesh::out << "[" << this->processor_id() << "] ";
224 0 : libMesh::out << "Reading Nemesis file on processor: " << this->processor_id() << std::endl;
225 : }
226 :
227 : // Construct the Nemesis filename based on the number of processors and the
228 : // current processor ID.
229 874 : std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
230 :
231 850 : if (_verbose)
232 0 : libMesh::out << "Opening file: " << nemesis_filename << std::endl;
233 :
234 : // Open the Exodus file in EX_READ mode
235 874 : nemhelper->open(nemesis_filename.c_str(), /*read_only=*/true);
236 :
237 : // Get a reference to the mesh. We need to be specific
238 : // since Nemesis_IO is multiply-inherited
239 : // MeshBase & mesh = this->mesh();
240 850 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
241 :
242 : // We're reading a file on each processor, so our mesh is
243 : // partitioned into that many parts as it's created
244 48 : this->set_n_partitions(this->n_processors());
245 :
246 : // Local information: Read the following information from the standard Exodus header
247 : // title[0]
248 : // num_dim
249 : // num_nodes
250 : // num_elem
251 : // num_elem_blk
252 : // num_node_sets
253 : // num_side_sets
254 850 : nemhelper->read_and_store_header_info();
255 850 : nemhelper->print_header();
256 :
257 : // Get global information: number of nodes, elems, blocks, nodesets and sidesets
258 850 : nemhelper->get_init_global();
259 :
260 : // Get "load balance" information. This includes the number of internal & border
261 : // nodes and elements as well as the number of communication maps.
262 850 : nemhelper->get_loadbal_param();
263 :
264 : // Do some error checking
265 850 : libmesh_error_msg_if(nemhelper->num_external_nodes,
266 : "ERROR: there should be no external nodes in an element-based partitioning!");
267 :
268 24 : libmesh_assert_equal_to (nemhelper->num_nodes,
269 : (nemhelper->num_internal_nodes +
270 : nemhelper->num_border_nodes));
271 :
272 24 : libmesh_assert_equal_to (nemhelper->num_elem,
273 : (nemhelper->num_internal_elems +
274 : nemhelper->num_border_elems));
275 :
276 24 : libmesh_assert_less_equal (nemhelper->num_nodes, nemhelper->num_nodes_global);
277 24 : libmesh_assert_less_equal (nemhelper->num_elem, nemhelper->num_elems_global);
278 :
279 : // Read nodes from the exodus file: this fills the nemhelper->x,y,z arrays.
280 850 : nemhelper->read_nodes();
281 :
282 : // Reads the nemhelper->node_num_map array, node_num_map[i] is the global node number for
283 : // local node number i.
284 850 : nemhelper->read_node_num_map();
285 :
286 : // The get_cmap_params() function reads in the:
287 : // node_cmap_ids[],
288 : // node_cmap_node_cnts[],
289 : // elem_cmap_ids[],
290 : // elem_cmap_elem_cnts[],
291 850 : nemhelper->get_cmap_params();
292 :
293 : // Read the IDs of the interior, boundary, and external nodes. This function
294 : // fills the vectors:
295 : // node_mapi[],
296 : // node_mapb[],
297 : // node_mape[]
298 850 : nemhelper->get_node_map();
299 :
300 : // Read each node communication map for this processor. This function
301 : // fills the vectors of vectors named:
302 : // node_cmap_node_ids[][]
303 : // node_cmap_proc_ids[][]
304 850 : nemhelper->get_node_cmap();
305 :
306 24 : libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_cnts.size());
307 24 : libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_ids.size());
308 24 : libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_proc_ids.size());
309 :
310 850 : this->assert_symmetric_cmaps();
311 :
312 : // We now have enough information to infer node ownership. We start by assuming
313 : // we own all the nodes on this processor. We will then interrogate the
314 : // node cmaps and see if a lower-rank processor is associated with any of
315 : // our nodes. If so, then that processor owns the node, not us...
316 922 : std::vector<processor_id_type> node_ownership (nemhelper->num_internal_nodes +
317 850 : nemhelper->num_border_nodes,
318 946 : this->processor_id());
319 :
320 : // a map from processor id to cmap number, to be used later
321 48 : std::map<unsigned int, unsigned int> pid_to_cmap_map;
322 :
323 : // For each node_cmap...
324 1850 : for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
325 : {
326 : // Good time for error checking...
327 20 : libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
328 : nemhelper->node_cmap_node_ids[cmap].size());
329 :
330 20 : libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
331 : nemhelper->node_cmap_proc_ids[cmap].size());
332 :
333 : // In all the samples I have seen, node_cmap_ids[cmap] is the processor
334 : // rank of the remote processor...
335 : const processor_id_type adjcnt_pid_idx =
336 1020 : cast_int<processor_id_type>(nemhelper->node_cmap_ids[cmap]);
337 :
338 20 : libmesh_assert_less (adjcnt_pid_idx, this->n_processors());
339 20 : libmesh_assert_not_equal_to (adjcnt_pid_idx, this->processor_id());
340 :
341 : // We only expect one cmap per adjacent processor
342 20 : libmesh_assert (!pid_to_cmap_map.count(adjcnt_pid_idx));
343 :
344 1000 : pid_to_cmap_map[adjcnt_pid_idx] = cmap;
345 :
346 : // ...and each node in that cmap...
347 3304 : for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
348 : {
349 : // Are the node_cmap_ids and node_cmap_proc_ids really redundant?
350 100 : libmesh_assert_equal_to
351 : (adjcnt_pid_idx,
352 : cast_int<processor_id_type>(nemhelper->node_cmap_proc_ids[cmap][idx]));
353 :
354 : // we are expecting the exodus node numbering to be 1-based...
355 2284 : const unsigned int local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1;
356 :
357 100 : libmesh_assert_less (local_node_idx, node_ownership.size());
358 :
359 : // if the adjacent processor is lower rank than the current
360 : // owner for this node, then it will get the node...
361 2184 : node_ownership[local_node_idx] =
362 2994 : std::min(node_ownership[local_node_idx], adjcnt_pid_idx);
363 : }
364 : } // We now should have established proper node ownership.
365 :
366 : // now that ownership is established, we can figure out how many nodes we
367 : // will be responsible for numbering.
368 24 : unsigned int num_nodes_i_must_number = 0;
369 :
370 3526 : for (const auto & pid : node_ownership)
371 2880 : if (pid == this->processor_id())
372 1816 : num_nodes_i_must_number++;
373 :
374 : // more error checking...
375 24 : libmesh_assert_greater_equal (num_nodes_i_must_number, nemhelper->num_internal_nodes);
376 24 : libmesh_assert (num_nodes_i_must_number <= to_uint(nemhelper->num_internal_nodes +
377 : nemhelper->num_border_nodes));
378 850 : if (_verbose)
379 0 : libMesh::out << "[" << this->processor_id() << "] "
380 0 : << "num_nodes_i_must_number="
381 0 : << num_nodes_i_must_number
382 0 : << std::endl;
383 :
384 : // The call to get_loadbal_param() gets 7 pieces of information. We allgather
385 : // these now across all processors to determine some global numberings. We should
386 : // also gather the number of nodes each processor thinks it will number so that
387 : // we can (i) determine our offset, and (ii) do some error checking.
388 874 : std::vector<int> all_loadbal_data ( 8 );
389 850 : all_loadbal_data[0] = nemhelper->num_internal_nodes;
390 850 : all_loadbal_data[1] = nemhelper->num_border_nodes;
391 850 : all_loadbal_data[2] = nemhelper->num_external_nodes;
392 850 : all_loadbal_data[3] = nemhelper->num_internal_elems;
393 850 : all_loadbal_data[4] = nemhelper->num_border_elems;
394 850 : all_loadbal_data[5] = nemhelper->num_node_cmaps;
395 850 : all_loadbal_data[6] = nemhelper->num_elem_cmaps;
396 850 : all_loadbal_data[7] = num_nodes_i_must_number;
397 :
398 850 : this->comm().allgather (all_loadbal_data, /* identical_buffer_sizes = */ true);
399 :
400 : // OK, we are now in a position to request new global indices for all the nodes
401 : // we do not own
402 :
403 : // Let's get a unique message tag to use for send()/receive()
404 898 : Parallel::MessageTag nodes_tag = mesh.comm().get_unique_tag();
405 :
406 : std::vector<std::vector<int>>
407 898 : needed_node_idxs (nemhelper->num_node_cmaps); // the indices we will ask for
408 :
409 : std::vector<Parallel::Request>
410 898 : needed_nodes_requests (nemhelper->num_node_cmaps);
411 :
412 1850 : for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
413 : {
414 : // We know we will need no more indices than there are nodes
415 : // in this cmap, but that number is an upper bound in general
416 : // since the neighboring processor associated with the cmap
417 : // may not actually own it
418 1040 : needed_node_idxs[cmap].reserve (nemhelper->node_cmap_node_cnts[cmap]);
419 :
420 1020 : const unsigned int adjcnt_pid_idx = nemhelper->node_cmap_ids[cmap];
421 :
422 : // ...and each node in that cmap...
423 3204 : for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
424 : {
425 : const unsigned int
426 2284 : local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1,
427 2184 : owning_pid_idx = node_ownership[local_node_idx];
428 :
429 : // add it to the request list for its owning processor.
430 2184 : if (owning_pid_idx == adjcnt_pid_idx)
431 : {
432 : const unsigned int
433 860 : global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
434 910 : needed_node_idxs[cmap].push_back(global_node_idx);
435 : }
436 : }
437 : // now post the send for this cmap
438 1020 : this->comm().send (adjcnt_pid_idx, // destination
439 40 : needed_node_idxs[cmap], // send buffer
440 40 : needed_nodes_requests[cmap], // request
441 : nodes_tag);
442 : } // all communication requests for getting updated global indices for border
443 : // nodes have been initiated
444 :
445 : // Figure out how many nodes each processor thinks it will number and make sure
446 : // that it adds up to the global number of nodes. Also, set up global node
447 : // index offsets for each processor.
448 : std::vector<unsigned int>
449 898 : all_num_nodes_i_must_number (this->n_processors());
450 :
451 9164 : for (auto pid : make_range(this->n_processors()))
452 8410 : all_num_nodes_i_must_number[pid] = all_loadbal_data[8*pid + 7];
453 :
454 : // The sum of all the entries in this vector should sum to the number of global nodes
455 24 : libmesh_assert (std::accumulate(all_num_nodes_i_must_number.begin(),
456 : all_num_nodes_i_must_number.end(),
457 : 0) == nemhelper->num_nodes_global);
458 :
459 850 : unsigned int my_next_node = 0;
460 4582 : for (auto pid : make_range(this->processor_id()))
461 3744 : my_next_node += all_num_nodes_i_must_number[pid];
462 :
463 850 : const unsigned int my_node_offset = my_next_node;
464 :
465 850 : if (_verbose)
466 0 : libMesh::out << "[" << this->processor_id() << "] "
467 0 : << "my_node_offset="
468 0 : << my_node_offset
469 0 : << std::endl;
470 :
471 : // Add internal nodes to the DistributedMesh, using the node ID offset we
472 : // computed and the current processor's ID.
473 2014 : for (unsigned int i=0; i<to_uint(nemhelper->num_internal_nodes); ++i)
474 : {
475 1164 : const unsigned int local_node_idx = nemhelper->node_mapi[i]-1;
476 : #ifndef NDEBUG
477 104 : const unsigned int owning_pid_idx = node_ownership[local_node_idx];
478 : #endif
479 :
480 : // an internal node we do not own? huh??
481 104 : libmesh_assert_equal_to (owning_pid_idx, this->processor_id());
482 104 : libmesh_assert_less (my_next_node, nemhelper->num_nodes_global);
483 :
484 : // "Catch" the node pointer after addition, make sure the
485 : // ID matches the requested value.
486 : Node * added_node =
487 1372 : mesh.add_point (Point(nemhelper->x[local_node_idx],
488 208 : nemhelper->y[local_node_idx],
489 1164 : nemhelper->z[local_node_idx]),
490 : my_next_node,
491 208 : this->processor_id());
492 :
493 : // Make sure the node we added has the ID we thought it would
494 1164 : if (added_node->id() != my_next_node)
495 : {
496 0 : libMesh::err << "Error, node added with ID " << added_node->id()
497 0 : << ", but we wanted ID " << my_next_node << std::endl;
498 : }
499 :
500 : // Set a unique_id ourselves since ReplicatedMesh can't handle
501 : // distributed unique_id generation. Make sure it doesn't
502 : // overlap element unique_id() values either.
503 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
504 1268 : added_node->set_unique_id(added_node->id() + nemhelper->num_elems_global);
505 : #endif
506 :
507 : // update the local->global index map, keeping it 1-based
508 1268 : nemhelper->node_num_map[local_node_idx] = my_next_node++ + 1;
509 : }
510 :
511 : // Now, for the boundary nodes... We may very well own some of them,
512 : // but there may be others for which we have requested the new global
513 : // id. We expect to be asked for the ids of the ones we own, so
514 : // we need to create a map from the old global id to the new one
515 : // we are about to create.
516 : typedef std::vector<std::pair<unsigned int, unsigned int>> global_idx_mapping_type;
517 48 : global_idx_mapping_type old_global_to_new_global_map;
518 850 : old_global_to_new_global_map.reserve (num_nodes_i_must_number // total # i will have
519 850 : - (my_next_node // amount i have thus far
520 : - my_node_offset)); // this should be exact!
521 : CompareGlobalIdxMappings global_idx_mapping_comp;
522 :
523 2362 : for (unsigned int i=0; i<to_uint(nemhelper->num_border_nodes); ++i)
524 : {
525 : const unsigned int
526 1512 : local_node_idx = nemhelper->node_mapb[i]-1,
527 1512 : owning_pid_idx = node_ownership[local_node_idx];
528 :
529 : // if we own it...
530 1612 : if (owning_pid_idx == this->processor_id())
531 : {
532 : const unsigned int
533 652 : global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
534 :
535 : // we will number it, and create a mapping from its old global index to
536 : // the new global index, for lookup purposes when neighbors come calling
537 652 : old_global_to_new_global_map.emplace_back(global_node_idx, my_next_node);
538 :
539 : // "Catch" the node pointer after addition, make sure the
540 : // ID matches the requested value.
541 : Node * added_node =
542 802 : mesh.add_point (Point(nemhelper->x[local_node_idx],
543 100 : nemhelper->y[local_node_idx],
544 100 : nemhelper->z[local_node_idx]),
545 : my_next_node,
546 100 : this->processor_id());
547 :
548 : // Make sure the node we added has the ID we thought it would
549 652 : if (added_node->id() != my_next_node)
550 : {
551 0 : libMesh::err << "Error, node added with ID " << added_node->id()
552 0 : << ", but we wanted ID " << my_next_node << std::endl;
553 : }
554 :
555 : // Set a unique_id ourselves since ReplicatedMesh can't handle
556 : // distributed unique_id generation. Make sure it doesn't
557 : // overlap element unique_id() values either.
558 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
559 702 : added_node->set_unique_id(added_node->id() + nemhelper->num_elems_global);
560 : #endif
561 :
562 : // update the local->global index map, keeping it 1-based
563 702 : nemhelper->node_num_map[local_node_idx] = my_next_node++ + 1;
564 : }
565 : }
566 : // That should cover numbering all the nodes which belong to us...
567 24 : libmesh_assert_equal_to (num_nodes_i_must_number, (my_next_node - my_node_offset));
568 :
569 : // Let's sort the mapping so we can efficiently answer requests
570 850 : std::sort (old_global_to_new_global_map.begin(),
571 : old_global_to_new_global_map.end(),
572 : global_idx_mapping_comp);
573 :
574 : // and it had better be unique...
575 24 : libmesh_assert (std::unique (old_global_to_new_global_map.begin(),
576 : old_global_to_new_global_map.end(),
577 : global_idx_mapping_equality) ==
578 : old_global_to_new_global_map.end());
579 :
580 : // We can now catch incoming requests and process them. for efficiency
581 : // let's do whatever is available next
582 48 : std::map<unsigned int, std::vector<int>> requested_node_idxs; // the indices asked of us
583 :
584 898 : std::vector<Parallel::Request> requested_nodes_requests(nemhelper->num_node_cmaps);
585 :
586 : // We know we will receive the request from a given processor before
587 : // we receive its reply to our request. However, we may receive
588 : // a request and a response from one processor before getting
589 : // a request from another processor. So what we are doing here
590 : // is processing whatever message comes next, while recognizing
591 : // we will receive a request from a processor before receiving
592 : // its reply
593 898 : std::vector<bool> processed_cmap (nemhelper->num_node_cmaps, false);
594 :
595 2850 : for (unsigned int comm_step=0; comm_step<2*to_uint(nemhelper->num_node_cmaps); comm_step++)
596 : {
597 : // query the first message which is available
598 : const Parallel::Status
599 2040 : status (this->comm().probe (Parallel::any_source,
600 80 : nodes_tag));
601 : const unsigned int
602 2000 : requesting_pid_idx = status.source(),
603 2000 : source_pid_idx = status.source();
604 :
605 : // this had better be from a processor we are expecting...
606 40 : libmesh_assert (pid_to_cmap_map.count(requesting_pid_idx));
607 :
608 : // the local cmap which corresponds to the source processor
609 2000 : const unsigned int cmap = pid_to_cmap_map[source_pid_idx];
610 :
611 2040 : if (!processed_cmap[cmap])
612 : {
613 20 : processed_cmap[cmap] = true;
614 :
615 : // we should only get one request per paired processor
616 20 : libmesh_assert (!requested_node_idxs.count(requesting_pid_idx));
617 :
618 : // get a reference to the request buffer for this processor to
619 : // avoid repeated map lookups
620 1000 : std::vector<int> & xfer_buf (requested_node_idxs[requesting_pid_idx]);
621 :
622 : // actually receive the message.
623 1000 : this->comm().receive (requesting_pid_idx, xfer_buf, nodes_tag);
624 :
625 : // Fill the request
626 1860 : for (auto i : index_range(xfer_buf))
627 : {
628 : // the requested old global node index, *now 0-based*
629 860 : const unsigned int old_global_node_idx = xfer_buf[i];
630 :
631 : // find the new global node index for the requested node -
632 : // note that requesting_pid_idx thinks we own this node,
633 : // so we better!
634 : const global_idx_mapping_type::const_iterator it =
635 810 : std::lower_bound (old_global_to_new_global_map.begin(),
636 : old_global_to_new_global_map.end(),
637 : old_global_node_idx,
638 50 : global_idx_mapping_comp);
639 :
640 50 : libmesh_assert (it != old_global_to_new_global_map.end());
641 50 : libmesh_assert_equal_to (it->first, old_global_node_idx);
642 50 : libmesh_assert_greater_equal (it->second, my_node_offset);
643 50 : libmesh_assert_less (it->second, my_next_node);
644 :
645 : // overwrite the requested old global node index with the new global index
646 860 : xfer_buf[i] = it->second;
647 : }
648 :
649 : // and send the new global indices back to the processor which asked for them
650 1020 : this->comm().send (requesting_pid_idx,
651 : xfer_buf,
652 40 : requested_nodes_requests[cmap],
653 : nodes_tag);
654 : } // done processing the request
655 :
656 : // this is the second time we have heard from this processor,
657 : // so it must be its reply to our request
658 : else
659 : {
660 : // a long time ago, we sent off our own requests. now it is time to catch the
661 : // replies and get the new global node numbering. note that for any reply
662 : // we receive, the corresponding nonblocking send from above *must* have been
663 : // completed, since the reply is in response to that request!!
664 :
665 : // if we have received a reply, our send *must* have completed
666 : // (note we never actually need to wait on the request)
667 20 : libmesh_assert (needed_nodes_requests[cmap].test());
668 20 : libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_ids[cmap]), source_pid_idx);
669 :
670 : // now post the receive for this cmap
671 1000 : this->comm().receive (source_pid_idx,
672 40 : needed_node_idxs[cmap],
673 60 : nodes_tag);
674 :
675 20 : libmesh_assert_less_equal (needed_node_idxs[cmap].size(),
676 : nemhelper->node_cmap_node_ids[cmap].size());
677 :
678 3204 : for (std::size_t i=0, j=0, ncnis=nemhelper->node_cmap_node_ids[cmap].size(); i < ncnis; i++)
679 : {
680 : const unsigned int
681 2284 : local_node_idx = nemhelper->node_cmap_node_ids[cmap][i]-1,
682 2184 : owning_pid_idx = node_ownership[local_node_idx];
683 :
684 : // if this node is owned by source_pid_idx, its new global id
685 : // is in the buffer we just received
686 2184 : if (owning_pid_idx == source_pid_idx)
687 : {
688 50 : libmesh_assert_less (j, needed_node_idxs[cmap].size());
689 :
690 : const unsigned int // now 0-based!
691 910 : global_node_idx = needed_node_idxs[cmap][j++];
692 :
693 : // "Catch" the node pointer after addition, make sure the
694 : // ID matches the requested value.
695 : Node * added_node =
696 1010 : mesh.add_point (Point(nemhelper->x[local_node_idx],
697 100 : nemhelper->y[local_node_idx],
698 100 : nemhelper->z[local_node_idx]),
699 : cast_int<dof_id_type>(global_node_idx),
700 100 : cast_int<processor_id_type>(source_pid_idx));
701 :
702 : // Make sure the node we added has the ID we thought it would
703 860 : if (added_node->id() != global_node_idx)
704 : {
705 0 : libMesh::err << "Error, node added with ID " << added_node->id()
706 0 : << ", but we wanted ID " << global_node_idx << std::endl;
707 : }
708 :
709 : // Set a unique_id ourselves since ReplicatedMesh can't handle
710 : // distributed unique_id generation. Make sure it doesn't
711 : // overlap element unique_id() values either.
712 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
713 910 : added_node->set_unique_id(added_node->id() + nemhelper->num_elems_global);
714 : #endif
715 :
716 : // update the local->global index map, keeping it 1-based
717 860 : nemhelper->node_num_map[local_node_idx] = global_node_idx + 1;
718 :
719 : // we are not really going to use my_next_node again, but we can
720 : // keep incrementing it to track how many nodes we have added
721 : // to the mesh
722 860 : my_next_node++;
723 : }
724 : }
725 : }
726 : } // end of node index communication loop
727 :
728 : // we had better have added all the nodes we need to!
729 24 : libmesh_assert_equal_to ((my_next_node - my_node_offset), to_uint(nemhelper->num_nodes));
730 :
731 : // After all that, we should be done with all node-related arrays
732 : // *except* the node_num_map.
733 : // So let's clean up the arrays we are done with.
734 : {
735 826 : Utility::deallocate (nemhelper->node_mapi);
736 826 : Utility::deallocate (nemhelper->node_mapb);
737 826 : Utility::deallocate (nemhelper->node_mape);
738 826 : Utility::deallocate (nemhelper->node_cmap_ids);
739 826 : Utility::deallocate (nemhelper->node_cmap_node_cnts);
740 826 : Utility::deallocate (nemhelper->node_cmap_node_ids);
741 826 : Utility::deallocate (nemhelper->node_cmap_proc_ids);
742 826 : Utility::deallocate (nemhelper->x);
743 826 : Utility::deallocate (nemhelper->y);
744 826 : Utility::deallocate (nemhelper->z);
745 826 : Utility::deallocate (needed_node_idxs);
746 826 : Utility::deallocate (node_ownership);
747 : }
748 :
749 850 : Parallel::wait (needed_nodes_requests);
750 850 : Parallel::wait (requested_nodes_requests);
751 24 : requested_node_idxs.clear();
752 :
753 : // See what the node count is up to now.
754 850 : if (_verbose)
755 : {
756 : // Report the number of nodes which have been added locally
757 0 : libMesh::out << "[" << this->processor_id() << "] ";
758 0 : libMesh::out << "mesh.n_nodes()=" << mesh.n_nodes() << std::endl;
759 :
760 : // Reports the number of nodes that have been added in total.
761 0 : libMesh::out << "[" << this->processor_id() << "] ";
762 0 : libMesh::out << "mesh.parallel_n_nodes()=" << mesh.parallel_n_nodes() << std::endl;
763 : }
764 :
765 :
766 :
767 : // --------------------------------------------------------------------------------
768 : // --------------------------------------------------------------------------------
769 : // --------------------------------------------------------------------------------
770 :
771 :
772 : // We can now read in the elements...Exodus stores them in blocks in which all
773 : // elements have the same geometric type. This code is adapted directly from exodusII_io.C
774 :
775 : // Assertion: The sum of the border and internal elements on all processors
776 : // should equal nemhelper->num_elems_global
777 : #ifndef NDEBUG
778 : {
779 24 : int sum_internal_elems=0, sum_border_elems=0;
780 72 : for (unsigned int j=3,c=0; c<this->n_processors(); j+=8,++c)
781 48 : sum_internal_elems += all_loadbal_data[j];
782 :
783 72 : for (unsigned int j=4,c=0; c<this->n_processors(); j+=8,++c)
784 48 : sum_border_elems += all_loadbal_data[j];
785 :
786 24 : if (_verbose)
787 : {
788 0 : libMesh::out << "[" << this->processor_id() << "] ";
789 0 : libMesh::out << "sum_internal_elems=" << sum_internal_elems << std::endl;
790 :
791 0 : libMesh::out << "[" << this->processor_id() << "] ";
792 0 : libMesh::out << "sum_border_elems=" << sum_border_elems << std::endl;
793 : }
794 :
795 24 : libmesh_assert_equal_to (sum_internal_elems+sum_border_elems, nemhelper->num_elems_global);
796 : }
797 : #endif
798 :
799 : // We need to set the mesh dimension, but the following...
800 : // mesh.set_mesh_dimension(static_cast<unsigned int>(nemhelper->num_dim));
801 :
802 : // ... is not sufficient since some codes report num_dim==3 for two dimensional
803 : // meshes living in 3D, even though all the elements are of 2D type. Therefore,
804 : // we instead use the dimension of the highest element found for the Mesh dimension,
805 : // similar to what is done by the Exodus reader, except here it requires a
806 : // parallel communication.
807 850 : elems_of_dimension.resize(4, false); // will use 1-based
808 :
809 : // Fills in the:
810 : // global_elem_blk_ids[] and
811 : // global_elem_blk_cnts[] arrays.
812 850 : nemhelper->get_eb_info_global();
813 :
814 : // // Fills in the vectors
815 : // // elem_mapi[num_internal_elems]
816 : // // elem_mapb[num_border_elems ]
817 : // // These tell which of the (locally-numbered) elements are internal and which are border elements.
818 : // // In our test example these arrays are sorted (but non-contiguous), which makes it possible to
819 : // // binary search for each element ID... however I don't think we need to distinguish between the
820 : // // two types, since either can have nodes the boundary!
821 : // nemhelper->get_elem_map();
822 :
823 : // Fills in the vectors of vectors:
824 : // elem_cmap_elem_ids[][]
825 : // elem_cmap_side_ids[][]
826 : // elem_cmap_proc_ids[][]
827 : // These arrays are of size num_elem_cmaps * elem_cmap_elem_cnts[i], i = 0..num_elem_cmaps
828 850 : nemhelper->get_elem_cmap();
829 :
830 : // Get information about the element blocks:
831 : // (read in the array nemhelper->block_ids[])
832 850 : nemhelper->read_block_info();
833 :
834 : // Reads the nemhelper->elem_num_map array.
835 : // elem_num_map[i] is the exodus element number for local element
836 : // number i, which makes elem_num_map[i]-1 the libMesh element
837 : // number.
838 850 : nemhelper->read_elem_num_map();
839 :
840 24 : std::size_t local_elem_num = 0;
841 :
842 : // Read in the element connectivity for each block by
843 : // looping over all the blocks.
844 1700 : for (unsigned int i=0; i<to_uint(nemhelper->num_elem_blk); i++)
845 : {
846 : // Read the information for block i: For nemhelper->block_ids[i], reads
847 : // elem_type
848 : // num_elem_this_blk
849 : // num_nodes_per_elem
850 : // num_attr
851 : // connect <-- the nodal connectivity array for each element in the block.
852 850 : nemhelper->read_elem_in_block(i);
853 :
854 : // Note that with parallel files it is possible we have no elements in
855 : // this block!
856 850 : if (!nemhelper->num_elem_this_blk) continue;
857 :
858 : // Set subdomain ID based on the block ID.
859 : subdomain_id_type subdomain_id =
860 432 : cast_int<subdomain_id_type>(nemhelper->block_ids[i]);
861 :
862 : // Create a type string (this uses the null-terminated string ctor).
863 432 : const std::string type_str ( nemhelper->elem_type.data() );
864 :
865 : // Set any relevant node/edge maps for this element
866 410 : const auto & conv = nemhelper->get_conversion(type_str);
867 :
868 410 : if (_verbose)
869 0 : libMesh::out << "Reading a block of " << type_str << " elements." << std::endl;
870 :
871 : // Loop over all the elements in this block
872 1328 : for (unsigned int j=0; j<to_uint(nemhelper->num_elem_this_blk); j++)
873 : {
874 996 : auto uelem = Elem::build (conv.libmesh_elem_type());
875 :
876 : // Assign subdomain and processor ID to the newly-created Elem.
877 : // Assigning the processor ID beforehand ensures that the Elem is
878 : // not added as an "unpartitioned" element. Note that the element
879 : // numbering in Exodus is also 1-based.
880 918 : uelem->subdomain_id() = subdomain_id;
881 996 : uelem->processor_id() = this->processor_id();
882 918 : uelem->set_id() = nemhelper->elem_num_map[local_elem_num++]-1;
883 :
884 : // Handle unique_id numbering, just in case we're using a
885 : // ReplicatedMesh that doesn't know how to handle it in
886 : // parallel.
887 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
888 216 : uelem->set_unique_id(uelem->id());
889 : #endif
890 :
891 : // Mark that we have seen an element of the current element's
892 : // dimension.
893 918 : elems_of_dimension[uelem->dim()] = true;
894 :
895 : // Add the created Elem to the Mesh, catch the Elem
896 : // pointer that the Mesh throws back.
897 996 : Elem * elem = mesh.add_elem(std::move(uelem));
898 :
899 : // We are expecting the element "thrown back" by libmesh to have the ID we specified for it...
900 : // Check to see that really is the case. Note that local_elem_num was post-incremented, so
901 : // subtract 1 when performing the check.
902 78 : libmesh_assert_equal_to(elem->id(),
903 : cast_int<dof_id_type>(nemhelper->elem_num_map[local_elem_num-1]-1));
904 :
905 : // Set all the nodes for this element
906 918 : if (_verbose)
907 0 : libMesh::out << "[" << this->processor_id() << "] "
908 0 : << "Setting nodes for Elem " << elem->id() << std::endl;
909 :
910 4686 : for (unsigned int k=0; k<to_uint(nemhelper->num_nodes_per_elem); k++)
911 : {
912 : const unsigned int
913 3768 : gi = (j*nemhelper->num_nodes_per_elem + // index into connectivity array
914 3768 : conv.get_node_map(k)),
915 3768 : local_node_idx = nemhelper->connect[gi]-1, // local node index
916 3768 : global_node_idx = nemhelper->node_num_map[local_node_idx]-1; // new global node index
917 :
918 : // Set node number
919 3768 : elem->set_node(k, mesh.node_ptr(global_node_idx));
920 : }
921 762 : } // for (unsigned int j=0; j<nemhelper->num_elem_this_blk; j++)
922 : } // end for (unsigned int i=0; i<nemhelper->num_elem_blk; i++)
923 :
924 1700 : for (const auto & [id, name] : nemhelper->id_to_block_names)
925 850 : if (name != "")
926 0 : mesh.subdomain_name(id) = name;
927 :
928 850 : if (_verbose)
929 : {
930 : // Print local elems_of_dimension information
931 0 : for (auto i : IntRange<std::size_t>(1, elems_of_dimension.size()))
932 0 : libMesh::out << "[" << this->processor_id() << "] "
933 0 : << "elems_of_dimension[" << i << "]=" << elems_of_dimension[i] << std::endl;
934 : }
935 :
936 : // Get the max dimension seen on the current processor
937 850 : unsigned char max_dim_seen = 0;
938 3400 : for (auto i : IntRange<std::size_t>(1, elems_of_dimension.size()))
939 2550 : if (elems_of_dimension[i])
940 410 : max_dim_seen = static_cast<unsigned char>(i);
941 :
942 : // Do a global max to determine the max dimension seen by all processors.
943 : // It should match -- I don't think we even support calculations on meshes
944 : // with elements of different dimension...
945 850 : this->comm().max(max_dim_seen);
946 :
947 850 : if (_verbose)
948 : {
949 : // Print the max element dimension from all processors
950 0 : libMesh::out << "[" << this->processor_id() << "] "
951 0 : << "max_dim_seen=" << +max_dim_seen << std::endl;
952 : }
953 :
954 : // Set the mesh dimension to the largest encountered for an element
955 1652 : mesh.set_mesh_dimension(max_dim_seen);
956 :
957 : #if LIBMESH_DIM < 3
958 : libmesh_error_msg_if(mesh.mesh_dimension() > LIBMESH_DIM,
959 : "Cannot open dimension "
960 : << mesh.mesh_dimension()
961 : << " mesh file when configured without "
962 : << mesh.mesh_dimension()
963 : << "D support." );
964 : #endif
965 :
966 :
967 : // Global sideset information, they are distributed as well, not sure if they will require communication...?
968 850 : nemhelper->get_ss_param_global();
969 :
970 850 : if (_verbose)
971 : {
972 0 : libMesh::out << "[" << this->processor_id() << "] "
973 0 : << "Read global sideset parameter information." << std::endl;
974 :
975 : // These global values should be the same on all processors...
976 0 : libMesh::out << "[" << this->processor_id() << "] "
977 0 : << "Number of global sideset IDs: " << nemhelper->global_sideset_ids.size() << std::endl;
978 : }
979 :
980 : // Read *local* sideset info the same way it is done in
981 : // exodusII_io_helper. May be called any time after
982 : // nemhelper->read_and_store_header_info(); This sets num_side_sets and resizes
983 : // elem_list, side_list, and id_list to num_elem_all_sidesets. Note
984 : // that there appears to be the same number of sidesets in each file
985 : // but they all have different numbers of entries (some are empty).
986 : // Note that the sum of "nemhelper->num_elem_all_sidesets" over all
987 : // processors should equal the sum of the entries in the "num_global_side_counts" array
988 : // filled up by nemhelper->get_ss_param_global()
989 850 : nemhelper->read_sideset_info();
990 :
991 850 : if (_verbose)
992 : {
993 0 : libMesh::out << "[" << this->processor_id() << "] "
994 0 : << "nemhelper->num_side_sets = " << nemhelper->num_side_sets << std::endl;
995 :
996 0 : libMesh::out << "[" << this->processor_id() << "] "
997 0 : << "nemhelper->num_elem_all_sidesets = " << nemhelper->num_elem_all_sidesets << std::endl;
998 :
999 0 : if (nemhelper->num_side_sets > 0)
1000 : {
1001 0 : libMesh::out << "Sideset names are: ";
1002 0 : for (const auto & [id, name] : nemhelper->id_to_ss_names)
1003 0 : libMesh::out << "(" << id << "," << name << ") ";
1004 0 : libMesh::out << std::endl;
1005 : }
1006 : }
1007 :
1008 : #ifdef DEBUG
1009 : {
1010 : // In DEBUG mode, check that the global number of sidesets reported
1011 : // in each nemesis file matches the sum of all local sideset counts
1012 : // from each processor. This requires a small communication, so only
1013 : // do it in DEBUG mode.
1014 24 : int sum_num_global_side_counts = std::accumulate(nemhelper->num_global_side_counts.begin(),
1015 24 : nemhelper->num_global_side_counts.end(),
1016 : 0);
1017 :
1018 : // MPI sum up the local files contributions
1019 24 : int sum_num_elem_all_sidesets = nemhelper->num_elem_all_sidesets;
1020 24 : this->comm().sum(sum_num_elem_all_sidesets);
1021 :
1022 24 : libmesh_error_msg_if(sum_num_global_side_counts != sum_num_elem_all_sidesets,
1023 : "Error! global side count reported by Nemesis does not "
1024 : "match the side count reported by the individual files!");
1025 : }
1026 : #endif
1027 :
1028 : // Note that exodus stores sidesets in separate vectors but we want to pack
1029 : // them all into a single vector. So when we call read_sideset(), we pass an offset
1030 : // into the single vector of all IDs
1031 4250 : for (int offset=0, i=0; i<nemhelper->num_side_sets; i++)
1032 : {
1033 3400 : offset += (i > 0 ? nemhelper->num_sides_per_set[i-1] : 0); // Compute new offset
1034 3400 : nemhelper->read_sideset (i, offset);
1035 : }
1036 :
1037 : // Now that we have the lists of elements, sides, and IDs, we are ready to set them
1038 : // in the BoundaryInfo object of our Mesh object. This is slightly different in parallel...
1039 : // For example, I think the IDs in each of the split Exodus files are numbered locally,
1040 : // and we have to know the appropriate ID for this processor to be able to set the
1041 : // entry in BoundaryInfo. This id should be given by
1042 : // elem_num_map[i]-1 for the local index i
1043 :
1044 : // Debugging:
1045 : // Print entries of elem_list
1046 : // libMesh::out << "[" << this->processor_id() << "] "
1047 : // << "elem_list = ";
1048 : // for (const auto & id : nemhelper->elem_list)
1049 : // libMesh::out << id << ", ";
1050 : // libMesh::out << std::endl;
1051 :
1052 : // Print entries of side_list
1053 : // libMesh::out << "[" << this->processor_id() << "] "
1054 : // << "side_list = ";
1055 : // for (const auto & id : nemhelper->side_list)
1056 : // libMesh::out << id << ", ";
1057 : // libMesh::out << std::endl;
1058 :
1059 :
1060 : // Loop over the entries of the elem_list, get their pointers from the
1061 : // Mesh data structure, and assign the appropriate side to the BoundaryInfo object.
1062 2170 : for (auto e : index_range(nemhelper->elem_list))
1063 : {
1064 : // Exodus numbering is 1-based
1065 1320 : const std::size_t local_id = nemhelper->elem_list[e]-1;
1066 1320 : const dof_id_type elem_id = nemhelper->elem_num_map[local_id]-1;
1067 :
1068 1320 : Elem * elem = mesh.elem_ptr(elem_id);
1069 :
1070 : // The side numberings in libmesh and exodus are not 1:1, so we need to map
1071 : // whatever side number is stored in Exodus into a libmesh side number using
1072 : // a conv object...
1073 1320 : const auto & conv = nemhelper->get_conversion(elem->type());
1074 :
1075 : // Finally, we are ready to add the element and its side to the BoundaryInfo object.
1076 : // Call the version of add_side which takes a pointer, since we have already gone to
1077 : // the trouble of getting said pointer...
1078 1320 : mesh.get_boundary_info().add_side(elem,
1079 1432 : cast_int<unsigned short>(conv.get_side_map(nemhelper->side_list[e]-1)), // Exodus numbering is 1-based
1080 1432 : cast_int<boundary_id_type>(nemhelper->id_list[e]));
1081 : }
1082 :
1083 4250 : for (const auto & [id, name] : nemhelper->id_to_ss_names)
1084 3400 : if (name != "")
1085 3400 : mesh.get_boundary_info().sideset_name(id) = name;
1086 :
1087 : // Debugging: make sure there are as many boundary conditions in the
1088 : // boundary ID object as expected. Note that, at this point, the
1089 : // mesh still thinks it's serial, so n_boundary_conds() returns the
1090 : // local number of boundary conditions (and is therefore cheap)
1091 : // which should match nemhelper->elem_list.size().
1092 : {
1093 850 : std::size_t nbcs = mesh.get_boundary_info().n_boundary_conds();
1094 874 : libmesh_error_msg_if(nbcs != nemhelper->elem_list.size(),
1095 : "[" << this->processor_id() << "] "
1096 : << "BoundaryInfo contains "
1097 : << nbcs
1098 : << " boundary conditions, while the Exodus file had "
1099 : << nemhelper->elem_list.size());
1100 : }
1101 :
1102 : // Read global nodeset parameters? We might be able to use this to verify
1103 : // something about the local files, but I haven't figured out what yet...
1104 850 : nemhelper->get_ns_param_global();
1105 :
1106 : // Read local nodeset info
1107 850 : nemhelper->read_nodeset_info();
1108 :
1109 850 : if (_verbose)
1110 : {
1111 0 : libMesh::out << "[" << this->processor_id() << "] ";
1112 0 : libMesh::out << "nemhelper->num_node_sets=" << nemhelper->num_node_sets << std::endl;
1113 0 : if (nemhelper->num_node_sets > 0)
1114 : {
1115 0 : libMesh::out << "Nodeset names are: ";
1116 0 : for (const auto & [id, name] : nemhelper->id_to_ns_names)
1117 0 : libMesh::out << "(" << id << "," << name << ") ";
1118 0 : libMesh::out << std::endl;
1119 : }
1120 : }
1121 :
1122 : // // Debugging, what is currently in nemhelper->node_num_map anyway?
1123 : // libMesh::out << "[" << this->processor_id() << "] "
1124 : // << "nemhelper->node_num_map = ";
1125 : //
1126 : // for (const auto & id : nemhelper->node_num_map)
1127 : // libMesh::out << id << ", ";
1128 : // libMesh::out << std::endl;
1129 :
1130 : // For each nodeset,
1131 4250 : for (int nodeset=0; nodeset<nemhelper->num_node_sets; nodeset++)
1132 : {
1133 : // Get the user-defined ID associated with the nodeset
1134 3400 : int nodeset_id = nemhelper->nodeset_ids[nodeset];
1135 :
1136 3400 : if (_verbose)
1137 : {
1138 0 : libMesh::out << "[" << this->processor_id() << "] ";
1139 0 : libMesh::out << "nemhelper->nodeset_ids[" << nodeset << "]=" << nodeset_id << std::endl;
1140 : }
1141 :
1142 : // Read the nodeset from file, store them in a vector
1143 3400 : nemhelper->read_nodeset(nodeset);
1144 :
1145 : // Add nodes from the node_list to the BoundaryInfo object
1146 5752 : for (auto node : index_range(nemhelper->node_list))
1147 : {
1148 : // Don't run past the end of our node map!
1149 2736 : libmesh_error_msg_if(to_uint(nemhelper->node_list[node]-1) >= nemhelper->node_num_map.size(),
1150 : "Error, index is past the end of node_num_map array!");
1151 :
1152 : // We should be able to use the node_num_map data structure set up previously to determine
1153 : // the proper global node index.
1154 2352 : unsigned global_node_id = nemhelper->node_num_map[ nemhelper->node_list[node]-1 /*Exodus is 1-based!*/ ]-1;
1155 :
1156 2352 : if (_verbose)
1157 : {
1158 0 : libMesh::out << "[" << this->processor_id() << "] "
1159 0 : << "nodeset " << nodeset
1160 0 : << ", local node number: " << nemhelper->node_list[node]-1
1161 0 : << ", global node id: " << global_node_id
1162 0 : << std::endl;
1163 : }
1164 :
1165 : // Add the node to the BoundaryInfo object with the proper nodeset_id
1166 192 : mesh.get_boundary_info().add_node
1167 2352 : (cast_int<dof_id_type>(global_node_id),
1168 192 : cast_int<boundary_id_type>(nodeset_id));
1169 : }
1170 : }
1171 :
1172 4250 : for (const auto & [id, name] : nemhelper->id_to_ns_names)
1173 3400 : if (name != "")
1174 3400 : mesh.get_boundary_info().nodeset_name(id) = name;
1175 :
1176 : // See what the elem count is up to now.
1177 850 : if (_verbose)
1178 : {
1179 : // Report the number of elements which have been added locally
1180 0 : libMesh::out << "[" << this->processor_id() << "] ";
1181 0 : libMesh::out << "mesh.n_elem()=" << mesh.n_elem() << std::endl;
1182 :
1183 : // Reports the number of elements that have been added in total.
1184 0 : libMesh::out << "[" << this->processor_id() << "] ";
1185 0 : libMesh::out << "mesh.parallel_n_elem()=" << mesh.parallel_n_elem() << std::endl;
1186 : }
1187 :
1188 : // For DistributedMesh, it seems that _is_serial is true by default. A hack to
1189 : // make the Mesh think it's parallel might be to call:
1190 850 : mesh.update_post_partitioning();
1191 850 : MeshCommunication().make_node_unique_ids_parallel_consistent(mesh);
1192 850 : mesh.delete_remote_elements();
1193 :
1194 : // If that didn't work, then we're actually reading into a
1195 : // ReplicatedMesh, so we want to gather *all* elements
1196 850 : if (mesh.is_serial())
1197 : // Don't just use mesh.allgather(); that's a no-op, since
1198 : // ReplicatedMesh didn't expect to be distributed in the first
1199 : // place!
1200 425 : MeshCommunication().allgather(mesh);
1201 : else
1202 : // Gather neighboring elements so that a distributed mesh has the
1203 : // proper "ghost" neighbor information.
1204 425 : MeshCommunication().gather_neighboring_elements(cast_ref<DistributedMesh &>(mesh));
1205 :
1206 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
1207 : // We've been setting unique_ids by hand; let's make sure that later
1208 : // ones are consistent with them.
1209 850 : mesh.set_next_unique_id(mesh.parallel_max_unique_id()+1);
1210 : #endif
1211 2454 : }
1212 :
1213 : #else
1214 :
1215 : void Nemesis_IO::read (const std::string &)
1216 : {
1217 : libmesh_error_msg("ERROR, Nemesis API is not defined!");
1218 : }
1219 :
1220 : #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1221 :
1222 :
1223 :
1224 :
1225 :
1226 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1227 :
1228 7313 : void Nemesis_IO::write (const std::string & base_filename)
1229 : {
1230 : // Get a constant reference to the mesh for writing
1231 412 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1232 :
1233 : // Create the filename for this processor given the base_filename passed in.
1234 7519 : std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
1235 :
1236 : // If the user has set the append flag here, it doesn't really make
1237 : // sense: the intent of this function is to write a Mesh with no
1238 : // data, while "appending" is really intended to add data to an
1239 : // existing file. If we're verbose, print a message to this effect.
1240 206 : if (_append && _verbose)
1241 : libmesh_warning("Warning: Appending in Nemesis_IO::write() does not make sense.\n"
1242 : "Creating a new file instead!");
1243 :
1244 14420 : nemhelper->create(nemesis_filename);
1245 :
1246 : // Initialize data structures and write some global Nemesis-specific data, such as
1247 : // communication maps, to file.
1248 7313 : nemhelper->initialize(nemesis_filename,mesh);
1249 :
1250 : // Make sure we're writing communication maps we can reuse as
1251 : // expected when reading
1252 7313 : this->assert_symmetric_cmaps();
1253 :
1254 : // Call the Nemesis-specialized version of write_nodal_coordinates() to write
1255 : // the nodal coordinates.
1256 7313 : nemhelper->write_nodal_coordinates(mesh);
1257 :
1258 : // Call the Nemesis-specialized version of write_elements() to write
1259 : // the elements. Note: Must write a zero if a given global block ID has no
1260 : // elements...
1261 7313 : nemhelper->write_elements(mesh);
1262 :
1263 : // Call our specialized function to write the nodesets
1264 7313 : nemhelper->write_nodesets(mesh);
1265 :
1266 : // Call our specialized write_sidesets() function to write the sidesets to file
1267 7313 : nemhelper->write_sidesets(mesh);
1268 :
1269 : // Not sure if this is really necessary, but go ahead and flush the file
1270 : // once we have written all this stuff.
1271 7313 : nemhelper->update();
1272 :
1273 7313 : if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1274 : libmesh_warning("Warning: Mesh contains edge boundary IDs, but these "
1275 : "are not supported by the Nemesis format.");
1276 7313 : }
1277 :
1278 : #else
1279 :
1280 : void Nemesis_IO::write (const std::string & )
1281 : {
1282 : libmesh_error_msg("ERROR, Nemesis API is not defined!");
1283 : }
1284 :
1285 : #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1286 :
1287 :
1288 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1289 :
1290 0 : void Nemesis_IO::write_timestep (const std::string & fname,
1291 : const EquationSystems & es,
1292 : const int timestep,
1293 : const Real time)
1294 : {
1295 0 : _timestep=timestep;
1296 0 : write_equation_systems(fname,es);
1297 :
1298 0 : nemhelper->write_timestep(timestep, time);
1299 0 : }
1300 :
1301 : #else
1302 :
1303 : void Nemesis_IO::write_timestep (const std::string &,
1304 : const EquationSystems &,
1305 : const int,
1306 : const Real)
1307 : {
1308 : libmesh_error_msg("ERROR, Nemesis API is not defined!");
1309 : }
1310 :
1311 : #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1312 :
1313 :
1314 :
1315 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1316 :
1317 708 : void Nemesis_IO::prepare_to_write_nodal_data (const std::string & fname,
1318 : const std::vector<std::string> & names)
1319 : {
1320 40 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1321 :
1322 708 : std::string nemesis_filename = nemhelper->construct_nemesis_filename(fname);
1323 :
1324 708 : if (!nemhelper->opened_for_writing)
1325 : {
1326 : // If we're appending, open() the file with read_only=false,
1327 : // otherwise create() it and write the contents of the mesh to
1328 : // it.
1329 708 : if (_append)
1330 : {
1331 0 : nemhelper->open(nemesis_filename.c_str(), /*read_only=*/false);
1332 : // After opening the file, read the header so that certain
1333 : // fields, such as the number of nodes and the number of
1334 : // elements, are correctly initialized for the subsequent
1335 : // call to write the nodal solution.
1336 0 : nemhelper->read_and_store_header_info();
1337 :
1338 : // ...and reading the block info
1339 0 : nemhelper->read_block_info();
1340 :
1341 : // ...and rebuild the "exodus_node_num_to_libmesh" map
1342 0 : nemhelper->compute_num_global_elem_blocks(mesh);
1343 0 : nemhelper->build_element_and_node_maps(mesh);
1344 : }
1345 : else
1346 : {
1347 1396 : nemhelper->create(nemesis_filename);
1348 708 : nemhelper->initialize(nemesis_filename,mesh);
1349 :
1350 : // Make sure we're writing communication maps we can reuse
1351 : // as expected when reading
1352 708 : this->assert_symmetric_cmaps();
1353 :
1354 708 : nemhelper->write_nodal_coordinates(mesh);
1355 708 : nemhelper->write_elements(mesh);
1356 708 : nemhelper->write_nodesets(mesh);
1357 708 : nemhelper->write_sidesets(mesh);
1358 :
1359 708 : if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1360 : libmesh_warning("Warning: Mesh contains edge boundary IDs, but these "
1361 : "are not supported by the ExodusII format.");
1362 : }
1363 : }
1364 :
1365 : // Even if we were already open for writing, we might not have
1366 : // initialized the nodal variable names yet. Even if we did, it
1367 : // should not hurt to call this twice because the routine sets a
1368 : // flag the first time it is called.
1369 : #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1370 : std::vector<std::string> complex_names =
1371 8 : nemhelper->get_complex_names(names, nemhelper->write_complex_abs);
1372 8 : nemhelper->initialize_nodal_variables(complex_names);
1373 : #else
1374 700 : nemhelper->initialize_nodal_variables(names);
1375 : #endif
1376 716 : }
1377 :
1378 : #else
1379 :
1380 : void Nemesis_IO::prepare_to_write_nodal_data (const std::string &,
1381 : const std::vector<std::string> &)
1382 : {
1383 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1384 : }
1385 :
1386 : #endif
1387 :
1388 :
1389 :
1390 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1391 :
1392 0 : void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1393 : const NumericVector<Number> & parallel_soln,
1394 : const std::vector<std::string> & names)
1395 : {
1396 0 : LOG_SCOPE("write_nodal_data(parallel)", "Nemesis_IO");
1397 :
1398 : // Only prepare and write nodal variables that are also in
1399 : // _output_variables, unless _output_variables is empty. This is the
1400 : // same logic that is in ExodusII_IO::write_nodal_data().
1401 0 : std::vector<std::string> output_names;
1402 :
1403 0 : if (_allow_empty_variables || !_output_variables.empty())
1404 0 : output_names = _output_variables;
1405 : else
1406 0 : output_names = names;
1407 :
1408 0 : this->prepare_to_write_nodal_data(base_filename, output_names);
1409 :
1410 : // Call the new version of write_nodal_solution() that takes a
1411 : // NumericVector directly without localizing.
1412 0 : nemhelper->write_nodal_solution(parallel_soln, names, _timestep, output_names);
1413 0 : }
1414 :
1415 :
1416 :
1417 708 : void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1418 : const EquationSystems & es,
1419 : const std::set<std::string> * system_names)
1420 : {
1421 40 : LOG_SCOPE("write_nodal_data(parallel)", "Nemesis_IO");
1422 :
1423 : // Only prepare and write nodal variables that are also in
1424 : // _output_variables, unless _output_variables is empty. This is the
1425 : // same logic that is in ExodusII_IO::write_nodal_data().
1426 60 : std::vector<std::string> output_names;
1427 :
1428 708 : if (_allow_empty_variables || !_output_variables.empty())
1429 0 : output_names = _output_variables;
1430 : else
1431 708 : es.build_variable_names (output_names, nullptr, system_names);
1432 :
1433 708 : this->prepare_to_write_nodal_data(base_filename, output_names);
1434 :
1435 40 : std::vector<std::pair<unsigned int, unsigned int>> var_nums;
1436 : // If we pass in an empty vector below, it will return all of the
1437 : // var nums in es, which we don't want.
1438 708 : if (!output_names.empty())
1439 560 : var_nums = es.find_variable_numbers(output_names);
1440 :
1441 708 : nemhelper->write_nodal_solution(es, var_nums, _timestep, output_names);
1442 708 : }
1443 :
1444 : #else
1445 :
1446 : void Nemesis_IO::write_nodal_data (const std::string &,
1447 : const NumericVector<Number> &,
1448 : const std::vector<std::string> &)
1449 : {
1450 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1451 : }
1452 :
1453 : void Nemesis_IO::write_nodal_data (const std::string &,
1454 : const EquationSystems &,
1455 : const std::set<std::string> *)
1456 : {
1457 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1458 : }
1459 :
1460 : #endif
1461 :
1462 :
1463 :
1464 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1465 :
1466 7595 : void Nemesis_IO::write_element_data (const EquationSystems & es)
1467 : {
1468 7595 : libmesh_error_msg_if(!nemhelper->opened_for_writing,
1469 : "ERROR, Nemesis file must be initialized before outputting elemental variables.");
1470 :
1471 : // To be (possibly) filled with a filtered list of variable names to output.
1472 428 : std::vector<std::string> names;
1473 :
1474 : // All of which should be low order monomials for now
1475 7595 : const std::vector<FEType> type = {FEType(CONSTANT, MONOMIAL), FEType(CONSTANT, MONOMIAL_VEC)};
1476 :
1477 : // If _output_variables is populated, only output the monomials which are
1478 : // also in the _output_variables vector.
1479 7595 : if (_output_variables.size())
1480 : {
1481 0 : std::vector<std::string> monomials;
1482 :
1483 : // Create a list of monomial variable names
1484 0 : es.build_variable_names(monomials, &type[0]); /*scalars*/
1485 0 : es.build_variable_names(monomials, &type[1]); /*vectors*/
1486 :
1487 : // Filter that list against the _output_variables list. Note: if names is still empty after
1488 : // all this filtering, all the monomial variables will be gathered
1489 0 : for (const auto & var : monomials)
1490 0 : if (std::find(_output_variables.begin(), _output_variables.end(), var) != _output_variables.end())
1491 0 : names.push_back(var);
1492 0 : }
1493 :
1494 : // The 'names' vector will here be updated with the variable's names
1495 : // that are actually eligible to write
1496 : std::vector<std::pair<unsigned int, unsigned int>> var_nums =
1497 7595 : es.find_variable_numbers (names, /*type=*/nullptr, &type);
1498 :
1499 : // find_variable_numbers() can return a nullptr, in which case there are no constant monomial
1500 : // variables to write, and we can just return.
1501 7595 : if (var_nums.empty())
1502 : {
1503 0 : if (_verbose)
1504 0 : libMesh::out << "No CONSTANT, MONOMIAL or CONSTANT, MONOMIAL_VEC data to be written." << std::endl;
1505 0 : return;
1506 : }
1507 :
1508 : // Store the list of subdomains on which each variable *that we are
1509 : // going to plot* is active. Note: if any of these sets is _empty_,
1510 : // the variable in question is active on _all_ subdomains.
1511 642 : std::vector<std::set<subdomain_id_type>> vars_active_subdomains;
1512 7595 : es.get_vars_active_subdomains(names, vars_active_subdomains);
1513 :
1514 428 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1515 :
1516 : #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1517 : std::vector<std::string> complex_names =
1518 105 : nemhelper->get_complex_names(names, nemhelper->write_complex_abs);
1519 :
1520 : std::vector<std::set<subdomain_id_type>>
1521 : complex_vars_active_subdomains =
1522 : nemhelper->get_complex_vars_active_subdomains(vars_active_subdomains,
1523 105 : nemhelper->write_complex_abs);
1524 105 : nemhelper->initialize_element_variables(complex_names, complex_vars_active_subdomains);
1525 :
1526 : // Call (non-virtual) function to write the elemental data in
1527 : // parallel. This function is named similarly to the corresponding
1528 : // function in the Exodus helper, but it has a different calling
1529 : // sequence and is not virtual or an override.
1530 105 : nemhelper->write_element_values(mesh,
1531 : es,
1532 : var_nums,
1533 : _timestep,
1534 : complex_vars_active_subdomains);
1535 :
1536 : #else
1537 : // Call the Nemesis version of initialize_element_variables().
1538 : //
1539 : // The Exodus helper version of this function writes an incorrect
1540 : // truth table in parallel that somehow does not account for the
1541 : // case where a subdomain does not appear on one or more of the
1542 : // processors. So, we override that function's behavior in the
1543 : // Nemesis helper.
1544 7490 : nemhelper->initialize_element_variables(names, vars_active_subdomains);
1545 :
1546 : // Call (non-virtual) function to write the elemental data in
1547 : // parallel. This function is named similarly to the corresponding
1548 : // function in the Exodus helper, but it has a different calling
1549 : // sequence and is not virtual or an override.
1550 7490 : nemhelper->write_element_values(mesh,
1551 : es,
1552 : var_nums,
1553 : _timestep,
1554 : vars_active_subdomains);
1555 : #endif
1556 14334 : }
1557 :
1558 : #else
1559 :
1560 : void Nemesis_IO::write_element_data (const EquationSystems &)
1561 : {
1562 : libmesh_not_implemented();
1563 : }
1564 :
1565 : #endif
1566 :
1567 :
1568 :
1569 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1570 :
1571 0 : void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1572 : const std::vector<Number> & soln,
1573 : const std::vector<std::string> & names)
1574 : {
1575 0 : LOG_SCOPE("write_nodal_data(serialized)", "Nemesis_IO");
1576 :
1577 0 : this->prepare_to_write_nodal_data(base_filename, names);
1578 :
1579 0 : nemhelper->write_nodal_solution(soln, names, _timestep);
1580 0 : }
1581 :
1582 : #else
1583 :
1584 : void Nemesis_IO::write_nodal_data (const std::string &,
1585 : const std::vector<Number> &,
1586 : const std::vector<std::string> &)
1587 : {
1588 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1589 : }
1590 :
1591 : #endif
1592 :
1593 :
1594 :
1595 :
1596 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1597 :
1598 0 : void Nemesis_IO::write_global_data (const std::vector<Number> & soln,
1599 : const std::vector<std::string> & names)
1600 : {
1601 0 : libmesh_error_msg_if(!nemhelper->opened_for_writing,
1602 : "ERROR, Nemesis file must be initialized before outputting global variables.");
1603 :
1604 : #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1605 :
1606 : std::vector<std::string> complex_names =
1607 0 : nemhelper->get_complex_names(names, nemhelper->write_complex_abs);
1608 :
1609 0 : nemhelper->initialize_global_variables(complex_names);
1610 :
1611 0 : unsigned int num_values = soln.size();
1612 0 : unsigned int num_vars = names.size();
1613 0 : unsigned int num_elems = num_values / num_vars;
1614 :
1615 : // This will contain the real and imaginary parts and the magnitude
1616 : // of the values in soln
1617 0 : int nco = nemhelper->write_complex_abs ? 3 : 2;
1618 0 : std::vector<Real> complex_soln(nco * num_values);
1619 :
1620 0 : for (unsigned i=0; i<num_vars; ++i)
1621 : {
1622 0 : for (unsigned int j=0; j<num_elems; ++j)
1623 : {
1624 0 : Number value = soln[i*num_vars + j];
1625 0 : complex_soln[nco*i*num_elems + j] = value.real();
1626 : }
1627 0 : for (unsigned int j=0; j<num_elems; ++j)
1628 : {
1629 0 : Number value = soln[i*num_vars + j];
1630 0 : complex_soln[nco*i*num_elems + num_elems + j] = value.imag();
1631 : }
1632 0 : if (nemhelper->write_complex_abs)
1633 : {
1634 0 : for (unsigned int j=0; j<num_elems; ++j)
1635 : {
1636 0 : Number value = soln[i*num_vars + j];
1637 0 : complex_soln[3*i*num_elems + 2*num_elems + j] = std::abs(value);
1638 : }
1639 : }
1640 : }
1641 :
1642 0 : nemhelper->write_global_values(complex_soln, _timestep);
1643 :
1644 : #else
1645 :
1646 : // Call the Exodus writer implementation
1647 0 : nemhelper->initialize_global_variables( names );
1648 0 : nemhelper->write_global_values( soln, _timestep);
1649 :
1650 : #endif
1651 :
1652 0 : }
1653 :
1654 : #else
1655 :
1656 : void Nemesis_IO::write_global_data (const std::vector<Number> &,
1657 : const std::vector<std::string> &)
1658 : {
1659 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1660 : }
1661 :
1662 : #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1663 :
1664 :
1665 :
1666 : #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1667 :
1668 0 : void Nemesis_IO::write_information_records (const std::vector<std::string> & records)
1669 : {
1670 0 : libmesh_error_msg_if(!nemhelper->opened_for_writing,
1671 : "ERROR, Nemesis file must be initialized before outputting information records.");
1672 :
1673 : // Call the Exodus writer implementation
1674 0 : nemhelper->write_information_records( records );
1675 0 : }
1676 :
1677 :
1678 142 : const std::vector<std::string> & Nemesis_IO::get_nodal_var_names()
1679 : {
1680 142 : nemhelper->read_var_names(ExodusII_IO_Helper::NODAL);
1681 142 : return nemhelper->nodal_var_names;
1682 : }
1683 :
1684 :
1685 :
1686 580 : void Nemesis_IO::copy_nodal_solution(System & system,
1687 : std::string system_var_name,
1688 : std::string exodus_var_name,
1689 : unsigned int timestep)
1690 : {
1691 580 : libmesh_error_msg_if(!nemhelper->opened_for_reading,
1692 : "ERROR, Nemesis file must be opened for reading before copying a nodal solution!");
1693 :
1694 1144 : nemhelper->read_nodal_var_values(exodus_var_name, timestep);
1695 :
1696 580 : const unsigned int var_num = system.variable_number(system_var_name);
1697 :
1698 2100 : for (auto p : nemhelper->nodal_var_values)
1699 : {
1700 115 : dof_id_type i = p.first;
1701 1520 : const Node * node = MeshInput<MeshBase>::mesh().node_ptr(i);
1702 :
1703 1520 : if (node && node->n_comp(system.number(), var_num) > 0)
1704 : {
1705 1520 : dof_id_type dof_index = node->dof_number(system.number(), var_num, 0);
1706 :
1707 : // If the dof_index is local to this processor, set the value
1708 1405 : if (system.get_dof_map().local_index(dof_index))
1709 1140 : system.solution->set (dof_index, p.second);
1710 : }
1711 : }
1712 :
1713 580 : system.solution->close();
1714 580 : system.update();
1715 580 : }
1716 :
1717 :
1718 :
1719 564 : void Nemesis_IO::copy_elemental_solution(System & system,
1720 : std::string system_var_name,
1721 : std::string exodus_var_name,
1722 : unsigned int timestep)
1723 : {
1724 16 : parallel_object_only();
1725 :
1726 564 : const unsigned int var_num = system.variable_number(system_var_name);
1727 20 : libmesh_error_msg_if(system.variable_type(var_num) != FEType(CONSTANT, MONOMIAL),
1728 : "Error! Trying to copy elemental solution into a variable that is not of CONSTANT MONOMIAL type.");
1729 :
1730 564 : const MeshBase & mesh = MeshInput<MeshBase>::mesh();
1731 :
1732 : // Map from element ID to elemental variable value. We need to use
1733 : // a map here rather than a vector (e.g. elem_var_values) since the
1734 : // libmesh element numbering can contain "holes". This is the case
1735 : // if we are reading elemental var values from an adaptively refined
1736 : // mesh that has not been sequentially renumbered.
1737 32 : std::map<dof_id_type, Real> elem_var_value_map;
1738 :
1739 564 : libmesh_error_msg_if(!nemhelper->opened_for_reading,
1740 : "ERROR, Nemesis file must be opened for reading before copying an elemental solution!");
1741 :
1742 1112 : nemhelper->read_elemental_var_values(exodus_var_name, timestep, elem_var_value_map);
1743 :
1744 : std::map<dof_id_type, Real>::iterator
1745 16 : it = elem_var_value_map.begin(),
1746 16 : end = elem_var_value_map.end();
1747 :
1748 1200 : for (; it!=end; ++it)
1749 : {
1750 636 : const Elem * elem = mesh.query_elem_ptr(it->first);
1751 :
1752 636 : if (elem && elem->n_comp(system.number(), var_num) > 0)
1753 : {
1754 636 : dof_id_type dof_index = elem->dof_number(system.number(), var_num, 0);
1755 56 : libmesh_assert(system.get_dof_map().local_index(dof_index));
1756 636 : system.solution->set (dof_index, it->second);
1757 : }
1758 : }
1759 :
1760 564 : system.solution->close();
1761 564 : system.update();
1762 :
1763 16 : parallel_object_only();
1764 564 : }
1765 :
1766 :
1767 :
1768 0 : void Nemesis_IO::copy_scalar_solution(System & system,
1769 : std::vector<std::string> system_var_names,
1770 : std::vector<std::string> exodus_var_names,
1771 : unsigned int timestep)
1772 : {
1773 0 : libmesh_error_msg_if(!nemhelper->opened_for_reading,
1774 : "ERROR, Nemesis file must be opened for reading before copying a scalar solution!");
1775 :
1776 0 : libmesh_error_msg_if(system_var_names.size() != exodus_var_names.size(),
1777 : "ERROR, the number of system_var_names must match exodus_var_names.");
1778 :
1779 0 : std::vector<Real> values_from_exodus;
1780 0 : read_global_variable(exodus_var_names, timestep, values_from_exodus);
1781 :
1782 0 : if (system.processor_id() == (system.n_processors()-1))
1783 : {
1784 0 : const DofMap & dof_map = system.get_dof_map();
1785 :
1786 0 : for (auto i : index_range(system_var_names))
1787 : {
1788 0 : const unsigned int var_num = system.variable_scalar_number(system_var_names[i], 0);
1789 :
1790 0 : std::vector<dof_id_type> SCALAR_dofs;
1791 0 : dof_map.SCALAR_dof_indices(SCALAR_dofs, var_num);
1792 :
1793 0 : system.solution->set (SCALAR_dofs[0], values_from_exodus[i]);
1794 : }
1795 : }
1796 :
1797 0 : system.solution->close();
1798 0 : system.update();
1799 0 : }
1800 :
1801 :
1802 0 : void Nemesis_IO::read_global_variable(std::vector<std::string> global_var_names,
1803 : unsigned int timestep,
1804 : std::vector<Real> & global_values)
1805 : {
1806 0 : std::size_t size = global_var_names.size();
1807 0 : libmesh_error_msg_if(size == 0, "ERROR, empty list of global variables to read from the Nemesis file.");
1808 :
1809 : // read the values for all global variables
1810 0 : std::vector<Real> values_from_exodus;
1811 0 : nemhelper->read_var_names(ExodusII_IO_Helper::GLOBAL);
1812 0 : nemhelper->read_global_values(values_from_exodus, timestep);
1813 0 : std::vector<std::string> global_var_names_exodus = nemhelper->global_var_names;
1814 :
1815 0 : if (values_from_exodus.size() == 0)
1816 0 : return; // This will happen in parallel on procs that are not 0
1817 :
1818 0 : global_values.clear();
1819 0 : for (std::size_t i = 0; i != size; ++i)
1820 : {
1821 : // for each global variable in global_var_names, look the corresponding one in global_var_names_from_exodus
1822 : // and fill global_values accordingly
1823 0 : auto it = find(global_var_names_exodus.begin(), global_var_names_exodus.end(), global_var_names[i]);
1824 0 : if (it != global_var_names_exodus.end())
1825 0 : global_values.push_back(values_from_exodus[it - global_var_names_exodus.begin()]);
1826 : else
1827 0 : libmesh_error_msg("ERROR, Global variable " << global_var_names[i] << \
1828 : " not found in Nemesis file.");
1829 : }
1830 0 : }
1831 :
1832 0 : void Nemesis_IO::set_hdf5_writing(bool write_hdf5)
1833 : {
1834 0 : nemhelper->set_hdf5_writing(write_hdf5);
1835 0 : }
1836 :
1837 : #else
1838 :
1839 : void Nemesis_IO::write_information_records ( const std::vector<std::string> & )
1840 : {
1841 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1842 : }
1843 :
1844 : const std::vector<std::string> & Nemesis_IO::get_nodal_var_names()
1845 : {
1846 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1847 :
1848 : // Prevent potential compiler warnings about missing return statement
1849 : return _output_variables;
1850 : }
1851 :
1852 : void Nemesis_IO::copy_nodal_solution(System &, std::string, std::string, unsigned int)
1853 : {
1854 : libmesh_error_msg("ERROR, Nemesis API is not defined.");
1855 : }
1856 :
1857 : void Nemesis_IO::set_hdf5_writing(bool) {}
1858 :
1859 : #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1860 :
1861 :
1862 :
1863 : } // namespace libMesh
|