libMesh
nemesis_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2019 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 // C++ includes
20 #include <numeric> // std::accumulate
21 
22 // LibMesh includes
23 #include "libmesh/distributed_mesh.h"
24 #include "libmesh/elem.h"
25 #include "libmesh/exodusII_io.h"
26 #include "libmesh/libmesh_logging.h"
27 #include "libmesh/nemesis_io.h"
28 #include "libmesh/nemesis_io_helper.h"
29 #include "libmesh/node.h"
30 #include "libmesh/parallel.h"
31 #include "libmesh/utility.h" // is_sorted, deallocate
32 #include "libmesh/boundary_info.h"
33 #include "libmesh/mesh_communication.h"
34 #include "libmesh/fe_type.h"
35 #include "libmesh/equation_systems.h"
36 #include "libmesh/numeric_vector.h"
37 #include "libmesh/int_range.h"
38 #include "libmesh/auto_ptr.h"
39 
40 namespace libMesh
41 {
42 
43 
44 //-----------------------------------------------
45 // anonymous namespace for implementation details
46 namespace {
47 struct CompareGlobalIdxMappings
48 {
49  // strict weak ordering for a.first -> a.second mapping. since we can only map to one
50  // value only order the first entry
51  bool operator()(const std::pair<unsigned int, unsigned int> & a,
52  const std::pair<unsigned int, unsigned int> & b) const
53  { return a.first < b.first; }
54 
55  // strict weak ordering for a.first -> a.second mapping. lookups will
56  // be in terms of a single integer, which is why we need this method.
57  bool operator()(const std::pair<unsigned int, unsigned int> & a,
58  const unsigned int b) const
59  { return a.first < b; }
60 };
61 
62 // Nemesis & ExodusII use int for all integer values, even the ones which
63 // should never be negative. we like to use unsigned as a force of habit,
64 // this trivial little method saves some typing & also makes sure something
65 // is not horribly wrong.
66 template <typename T>
67 inline unsigned int to_uint ( const T & t )
68 {
69  libmesh_assert_equal_to (t, static_cast<T>(static_cast<unsigned int>(t)));
70 
71  return static_cast<unsigned int>(t);
72 }
73 
74 // test equality for a.first -> a.second mapping. since we can only map to one
75 // value only test the first entry
76 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API) && !defined(NDEBUG)
77 inline bool global_idx_mapping_equality (const std::pair<unsigned int, unsigned int> & a,
78  const std::pair<unsigned int, unsigned int> & b)
79 {
80  return a.first == b.first;
81 }
82 #endif
83 
84 }
85 
86 
87 
88 // ------------------------------------------------------------
89 // Nemesis_IO class members
91 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
92  bool single_precision
93 #else
94  bool
95 #endif
96  ) :
97  MeshInput<MeshBase> (mesh, /*is_parallel_format=*/true),
98  MeshOutput<MeshBase> (mesh, /*is_parallel_format=*/true),
100 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
101  nemhelper(libmesh_make_unique<Nemesis_IO_Helper>(*this, false, single_precision)),
102  _timestep(1),
103 #endif
104  _verbose (false),
105  _append(false),
106  _allow_empty_variables(false)
107 {
108 }
109 
110 
111 
112 // Destructor. Defined in the C file so we can be sure to get away
113 // with a forward declaration of Nemesis_IO_Helper in the header file.
115 {
116 }
117 
118 
119 
120 void Nemesis_IO::verbose (bool set_verbosity)
121 {
122  _verbose = set_verbosity;
123 
124 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
125  // Set the verbose flag in the helper object
126  // as well.
127  nemhelper->verbose = _verbose;
128 #endif
129 }
130 
131 
132 
133 void Nemesis_IO::append(bool val)
134 {
135  _append = val;
136 }
137 
138 
139 
140 void Nemesis_IO::set_output_variables(const std::vector<std::string> & output_variables,
141  bool allow_empty)
142 {
143  _output_variables = output_variables;
144  _allow_empty_variables = allow_empty;
145 }
146 
147 
148 
149 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
150 void Nemesis_IO::read (const std::string & base_filename)
151 {
152  // On one processor, Nemesis and ExodusII should be equivalent, so
153  // let's cowardly defer to that implementation...
154  if (this->n_processors() == 1)
155  {
156  // We can do this in one line but if the verbose flag was set in this
157  // object, it will no longer be set... thus no extra print-outs for serial runs.
158  // ExodusII_IO(this->mesh()).read (base_filename); // ambiguous when Nemesis_IO is multiply-inherited
159 
161  ExodusII_IO(mesh).read (base_filename);
162  return;
163  }
164 
165  LOG_SCOPE ("read()","Nemesis_IO");
166 
167  // This function must be run on all processors at once
168  parallel_object_only();
169 
170  if (_verbose)
171  {
172  libMesh::out << "[" << this->processor_id() << "] ";
173  libMesh::out << "Reading Nemesis file on processor: " << this->processor_id() << std::endl;
174  }
175 
176  // Construct the Nemesis filename based on the number of processors and the
177  // current processor ID.
178  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
179 
180  if (_verbose)
181  libMesh::out << "Opening file: " << nemesis_filename << std::endl;
182 
183  // Open the Exodus file in EX_READ mode
184  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/true);
185 
186  // Get a reference to the mesh. We need to be specific
187  // since Nemesis_IO is multiply-inherited
188  // MeshBase & mesh = this->mesh();
190 
191  // We're reading a file on each processor, so our mesh is
192  // partitioned into that many parts as it's created
193  this->set_n_partitions(this->n_processors());
194 
195  // Local information: Read the following information from the standard Exodus header
196  // title[0]
197  // num_dim
198  // num_nodes
199  // num_elem
200  // num_elem_blk
201  // num_node_sets
202  // num_side_sets
203  nemhelper->read_header();
204  nemhelper->print_header();
205 
206  // Get global information: number of nodes, elems, blocks, nodesets and sidesets
207  nemhelper->get_init_global();
208 
209  // Get "load balance" information. This includes the number of internal & border
210  // nodes and elements as well as the number of communication maps.
211  nemhelper->get_loadbal_param();
212 
213  // Do some error checking
214  if (nemhelper->num_external_nodes)
215  libmesh_error_msg("ERROR: there should be no external nodes in an element-based partitioning!");
216 
217  libmesh_assert_equal_to (nemhelper->num_nodes,
218  (nemhelper->num_internal_nodes +
219  nemhelper->num_border_nodes));
220 
221  libmesh_assert_equal_to (nemhelper->num_elem,
222  (nemhelper->num_internal_elems +
223  nemhelper->num_border_elems));
224 
225  libmesh_assert_less_equal (nemhelper->num_nodes, nemhelper->num_nodes_global);
226  libmesh_assert_less_equal (nemhelper->num_elem, nemhelper->num_elems_global);
227 
228  // Read nodes from the exodus file: this fills the nemhelper->x,y,z arrays.
229  nemhelper->read_nodes();
230 
231  // Reads the nemhelper->node_num_map array, node_num_map[i] is the global node number for
232  // local node number i.
233  nemhelper->read_node_num_map();
234 
235  // The get_cmap_params() function reads in the:
236  // node_cmap_ids[],
237  // node_cmap_node_cnts[],
238  // elem_cmap_ids[],
239  // elem_cmap_elem_cnts[],
240  nemhelper->get_cmap_params();
241 
242  // Read the IDs of the interior, boundary, and external nodes. This function
243  // fills the vectors:
244  // node_mapi[],
245  // node_mapb[],
246  // node_mape[]
247  nemhelper->get_node_map();
248 
249  // Read each node communication map for this processor. This function
250  // fills the vectors of vectors named:
251  // node_cmap_node_ids[][]
252  // node_cmap_proc_ids[][]
253  nemhelper->get_node_cmap();
254 
255  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_cnts.size());
256  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_node_ids.size());
257  libmesh_assert_equal_to (to_uint(nemhelper->num_node_cmaps), nemhelper->node_cmap_proc_ids.size());
258 
259 #ifndef NDEBUG
260  // We expect the communication maps to be symmetric - e.g. if processor i thinks it
261  // communicates with processor j, then processor j should also be expecting to
262  // communicate with i. We can assert that here easily enough with an alltoall,
263  // but let's only do it when not in optimized mode to limit unnecessary communication.
264  {
265  std::vector<unsigned char> pid_send_partner (this->n_processors(), 0);
266 
267  // strictly speaking, we should expect to communicate with ourself...
268  pid_send_partner[this->processor_id()] = 1;
269 
270  // mark each processor id we reference with a node cmap
271  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
272  {
273  libmesh_assert_less (nemhelper->node_cmap_ids[cmap], this->n_processors());
274 
275  pid_send_partner[nemhelper->node_cmap_ids[cmap]] = 1;
276  }
277 
278  // Copy the send pairing so we can catch the receive paring and
279  // test for equality
280  const std::vector<unsigned char> pid_recv_partner (pid_send_partner);
281 
282  this->comm().alltoall (pid_send_partner);
283 
284  libmesh_assert (pid_send_partner == pid_recv_partner);
285  }
286 #endif
287 
288  // We now have enough information to infer node ownership. We start by assuming
289  // we own all the nodes on this processor. We will then interrogate the
290  // node cmaps and see if a lower-rank processor is associated with any of
291  // our nodes. If so, then that processor owns the node, not us...
292  std::vector<processor_id_type> node_ownership (nemhelper->num_internal_nodes +
293  nemhelper->num_border_nodes,
294  this->processor_id());
295 
296  // a map from processor id to cmap number, to be used later
297  std::map<unsigned int, unsigned int> pid_to_cmap_map;
298 
299  // For each node_cmap...
300  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
301  {
302  // Good time for error checking...
303  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
304  nemhelper->node_cmap_node_ids[cmap].size());
305 
306  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_node_cnts[cmap]),
307  nemhelper->node_cmap_proc_ids[cmap].size());
308 
309  // In all the samples I have seen, node_cmap_ids[cmap] is the processor
310  // rank of the remote processor...
311  const processor_id_type adjcnt_pid_idx =
312  cast_int<processor_id_type>(nemhelper->node_cmap_ids[cmap]);
313 
314  libmesh_assert_less (adjcnt_pid_idx, this->n_processors());
315  libmesh_assert_not_equal_to (adjcnt_pid_idx, this->processor_id());
316 
317  // We only expect one cmap per adjacent processor
318  libmesh_assert (!pid_to_cmap_map.count(adjcnt_pid_idx));
319 
320  pid_to_cmap_map[adjcnt_pid_idx] = cmap;
321 
322  // ...and each node in that cmap...
323  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
324  {
325  // Are the node_cmap_ids and node_cmap_proc_ids really redundant?
326  libmesh_assert_equal_to
327  (adjcnt_pid_idx,
328  cast_int<processor_id_type>(nemhelper->node_cmap_proc_ids[cmap][idx]));
329 
330  // we are expecting the exodus node numbering to be 1-based...
331  const unsigned int local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1;
332 
333  libmesh_assert_less (local_node_idx, node_ownership.size());
334 
335  // if the adjacent processor is lower rank than the current
336  // owner for this node, then it will get the node...
337  node_ownership[local_node_idx] =
338  std::min(node_ownership[local_node_idx], adjcnt_pid_idx);
339  }
340  } // We now should have established proper node ownership.
341 
342  // now that ownership is established, we can figure out how many nodes we
343  // will be responsible for numbering.
344  unsigned int num_nodes_i_must_number = 0;
345 
346  for (const auto & pid : node_ownership)
347  if (pid == this->processor_id())
348  num_nodes_i_must_number++;
349 
350  // more error checking...
351  libmesh_assert_greater_equal (num_nodes_i_must_number, nemhelper->num_internal_nodes);
352  libmesh_assert (num_nodes_i_must_number <= to_uint(nemhelper->num_internal_nodes +
353  nemhelper->num_border_nodes));
354  if (_verbose)
355  libMesh::out << "[" << this->processor_id() << "] "
356  << "num_nodes_i_must_number="
357  << num_nodes_i_must_number
358  << std::endl;
359 
360  // The call to get_loadbal_param() gets 7 pieces of information. We allgather
361  // these now across all processors to determine some global numberings. We should
362  // also gather the number of nodes each processor thinks it will number so that
363  // we can (i) determine our offset, and (ii) do some error checking.
364  std::vector<int> all_loadbal_data ( 8 );
365  all_loadbal_data[0] = nemhelper->num_internal_nodes;
366  all_loadbal_data[1] = nemhelper->num_border_nodes;
367  all_loadbal_data[2] = nemhelper->num_external_nodes;
368  all_loadbal_data[3] = nemhelper->num_internal_elems;
369  all_loadbal_data[4] = nemhelper->num_border_elems;
370  all_loadbal_data[5] = nemhelper->num_node_cmaps;
371  all_loadbal_data[6] = nemhelper->num_elem_cmaps;
372  all_loadbal_data[7] = num_nodes_i_must_number;
373 
374  this->comm().allgather (all_loadbal_data, /* identical_buffer_sizes = */ true);
375 
376  // OK, we are now in a position to request new global indices for all the nodes
377  // we do not own
378 
379  // Let's get a unique message tag to use for send()/receive()
380  Parallel::MessageTag nodes_tag = mesh.comm().get_unique_tag();
381 
382  std::vector<std::vector<int>>
383  needed_node_idxs (nemhelper->num_node_cmaps); // the indices we will ask for
384 
385  std::vector<Parallel::Request>
386  needed_nodes_requests (nemhelper->num_node_cmaps);
387 
388  for (unsigned int cmap=0; cmap<to_uint(nemhelper->num_node_cmaps); cmap++)
389  {
390  // We know we will need no more indices than there are nodes
391  // in this cmap, but that number is an upper bound in general
392  // since the neighboring processor associated with the cmap
393  // may not actually own it
394  needed_node_idxs[cmap].reserve (nemhelper->node_cmap_node_cnts[cmap]);
395 
396  const unsigned int adjcnt_pid_idx = nemhelper->node_cmap_ids[cmap];
397 
398  // ...and each node in that cmap...
399  for (unsigned int idx=0; idx<to_uint(nemhelper->node_cmap_node_cnts[cmap]); idx++)
400  {
401  const unsigned int
402  local_node_idx = nemhelper->node_cmap_node_ids[cmap][idx]-1,
403  owning_pid_idx = node_ownership[local_node_idx];
404 
405  // add it to the request list for its owning processor.
406  if (owning_pid_idx == adjcnt_pid_idx)
407  {
408  const unsigned int
409  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
410  needed_node_idxs[cmap].push_back(global_node_idx);
411  }
412  }
413  // now post the send for this cmap
414  this->comm().send (adjcnt_pid_idx, // destination
415  needed_node_idxs[cmap], // send buffer
416  needed_nodes_requests[cmap], // request
417  nodes_tag);
418  } // all communication requests for getting updated global indices for border
419  // nodes have been initiated
420 
421  // Figure out how many nodes each processor thinks it will number and make sure
422  // that it adds up to the global number of nodes. Also, set up global node
423  // index offsets for each processor.
424  std::vector<unsigned int>
425  all_num_nodes_i_must_number (this->n_processors());
426 
427  for (auto pid : IntRange<unsigned int>(0, this->n_processors()))
428  all_num_nodes_i_must_number[pid] = all_loadbal_data[8*pid + 7];
429 
430  // The sum of all the entries in this vector should sum to the number of global nodes
431  libmesh_assert (std::accumulate(all_num_nodes_i_must_number.begin(),
432  all_num_nodes_i_must_number.end(),
433  0) == nemhelper->num_nodes_global);
434 
435  unsigned int my_next_node = 0;
436  for (auto pid : IntRange<unsigned int>(0, this->processor_id()))
437  my_next_node += all_num_nodes_i_must_number[pid];
438 
439  const unsigned int my_node_offset = my_next_node;
440 
441  if (_verbose)
442  libMesh::out << "[" << this->processor_id() << "] "
443  << "my_node_offset="
444  << my_node_offset
445  << std::endl;
446 
447  // Add internal nodes to the DistributedMesh, using the node ID offset we
448  // computed and the current processor's ID.
449  for (unsigned int i=0; i<to_uint(nemhelper->num_internal_nodes); ++i)
450  {
451  const unsigned int local_node_idx = nemhelper->node_mapi[i]-1;
452 #ifndef NDEBUG
453  const unsigned int owning_pid_idx = node_ownership[local_node_idx];
454 #endif
455 
456  // an internal node we do not own? huh??
457  libmesh_assert_equal_to (owning_pid_idx, this->processor_id());
458  libmesh_assert_less (my_next_node, nemhelper->num_nodes_global);
459 
460  // "Catch" the node pointer after addition, make sure the
461  // ID matches the requested value.
462  Node * added_node =
463  mesh.add_point (Point(nemhelper->x[local_node_idx],
464  nemhelper->y[local_node_idx],
465  nemhelper->z[local_node_idx]),
466  my_next_node,
467  this->processor_id());
468 
469  // Make sure the node we added has the ID we thought it would
470  if (added_node->id() != my_next_node)
471  {
472  libMesh::err << "Error, node added with ID " << added_node->id()
473  << ", but we wanted ID " << my_next_node << std::endl;
474  }
475 
476  // update the local->global index map, when we are done
477  // it will be 0-based.
478  nemhelper->node_num_map[local_node_idx] = my_next_node++;
479  }
480 
481  // Now, for the boundary nodes... We may very well own some of them,
482  // but there may be others for which we have requested the new global
483  // id. We expect to be asked for the ids of the ones we own, so
484  // we need to create a map from the old global id to the new one
485  // we are about to create.
486  typedef std::vector<std::pair<unsigned int, unsigned int>> global_idx_mapping_type;
487  global_idx_mapping_type old_global_to_new_global_map;
488  old_global_to_new_global_map.reserve (num_nodes_i_must_number // total # i will have
489  - (my_next_node // amount i have thus far
490  - my_node_offset)); // this should be exact!
491  CompareGlobalIdxMappings global_idx_mapping_comp;
492 
493  for (unsigned int i=0; i<to_uint(nemhelper->num_border_nodes); ++i)
494  {
495  const unsigned int
496  local_node_idx = nemhelper->node_mapb[i]-1,
497  owning_pid_idx = node_ownership[local_node_idx];
498 
499  // if we own it...
500  if (owning_pid_idx == this->processor_id())
501  {
502  const unsigned int
503  global_node_idx = nemhelper->node_num_map[local_node_idx]-1;
504 
505  // we will number it, and create a mapping from its old global index to
506  // the new global index, for lookup purposes when neighbors come calling
507  old_global_to_new_global_map.push_back(std::make_pair(global_node_idx,
508  my_next_node));
509 
510  // "Catch" the node pointer after addition, make sure the
511  // ID matches the requested value.
512  Node * added_node =
513  mesh.add_point (Point(nemhelper->x[local_node_idx],
514  nemhelper->y[local_node_idx],
515  nemhelper->z[local_node_idx]),
516  my_next_node,
517  this->processor_id());
518 
519  // Make sure the node we added has the ID we thought it would
520  if (added_node->id() != my_next_node)
521  {
522  libMesh::err << "Error, node added with ID " << added_node->id()
523  << ", but we wanted ID " << my_next_node << std::endl;
524  }
525 
526  // update the local->global index map, when we are done
527  // it will be 0-based.
528  nemhelper->node_num_map[local_node_idx] = my_next_node++;
529  }
530  }
531  // That should cover numbering all the nodes which belong to us...
532  libmesh_assert_equal_to (num_nodes_i_must_number, (my_next_node - my_node_offset));
533 
534  // Let's sort the mapping so we can efficiently answer requests
535  std::sort (old_global_to_new_global_map.begin(),
536  old_global_to_new_global_map.end(),
537  global_idx_mapping_comp);
538 
539  // and it had better be unique...
540  libmesh_assert (std::unique (old_global_to_new_global_map.begin(),
541  old_global_to_new_global_map.end(),
542  global_idx_mapping_equality) ==
543  old_global_to_new_global_map.end());
544 
545  // We can now catch incoming requests and process them. for efficiency
546  // let's do whatever is available next
547  std::map<unsigned int, std::vector<int>> requested_node_idxs; // the indices asked of us
548 
549  std::vector<Parallel::Request> requested_nodes_requests(nemhelper->num_node_cmaps);
550 
551  // We know we will receive the request from a given processor before
552  // we receive its reply to our request. However, we may receive
553  // a request and a response from one processor before getting
554  // a request from another processor. So what we are doing here
555  // is processing whatever message comes next, while recognizing
556  // we will receive a request from a processor before receiving
557  // its reply
558  std::vector<bool> processed_cmap (nemhelper->num_node_cmaps, false);
559 
560  for (unsigned int comm_step=0; comm_step<2*to_uint(nemhelper->num_node_cmaps); comm_step++)
561  {
562  // query the first message which is available
563  const Parallel::Status
564  status (this->comm().probe (Parallel::any_source,
565  nodes_tag));
566  const unsigned int
567  requesting_pid_idx = status.source(),
568  source_pid_idx = status.source();
569 
570  // this had better be from a processor we are expecting...
571  libmesh_assert (pid_to_cmap_map.count(requesting_pid_idx));
572 
573  // the local cmap which corresponds to the source processor
574  const unsigned int cmap = pid_to_cmap_map[source_pid_idx];
575 
576  if (!processed_cmap[cmap])
577  {
578  processed_cmap[cmap] = true;
579 
580  // we should only get one request per paired processor
581  libmesh_assert (!requested_node_idxs.count(requesting_pid_idx));
582 
583  // get a reference to the request buffer for this processor to
584  // avoid repeated map lookups
585  std::vector<int> & xfer_buf (requested_node_idxs[requesting_pid_idx]);
586 
587  // actually receive the message.
588  this->comm().receive (requesting_pid_idx, xfer_buf, nodes_tag);
589 
590  // Fill the request
591  for (auto i : index_range(xfer_buf))
592  {
593  // the requested old global node index, *now 0-based*
594  const unsigned int old_global_node_idx = xfer_buf[i];
595 
596  // find the new global node index for the requested node -
597  // note that requesting_pid_idx thinks we own this node,
598  // so we better!
599  const global_idx_mapping_type::const_iterator it =
600  std::lower_bound (old_global_to_new_global_map.begin(),
601  old_global_to_new_global_map.end(),
602  old_global_node_idx,
603  global_idx_mapping_comp);
604 
605  libmesh_assert (it != old_global_to_new_global_map.end());
606  libmesh_assert_equal_to (it->first, old_global_node_idx);
607  libmesh_assert_greater_equal (it->second, my_node_offset);
608  libmesh_assert_less (it->second, my_next_node);
609 
610  // overwrite the requested old global node index with the new global index
611  xfer_buf[i] = it->second;
612  }
613 
614  // and send the new global indices back to the processor which asked for them
615  this->comm().send (requesting_pid_idx,
616  xfer_buf,
617  requested_nodes_requests[cmap],
618  nodes_tag);
619  } // done processing the request
620 
621  // this is the second time we have heard from this processor,
622  // so it must be its reply to our request
623  else
624  {
625  // a long time ago, we sent off our own requests. now it is time to catch the
626  // replies and get the new global node numbering. note that for any reply
627  // we receive, the corresponding nonblocking send from above *must* have been
628  // completed, since the reply is in response to that request!!
629 
630  // if we have received a reply, our send *must* have completed
631  // (note we never actually need to wait on the request)
632  libmesh_assert (needed_nodes_requests[cmap].test());
633  libmesh_assert_equal_to (to_uint(nemhelper->node_cmap_ids[cmap]), source_pid_idx);
634 
635  // now post the receive for this cmap
636  this->comm().receive (source_pid_idx,
637  needed_node_idxs[cmap],
638  nodes_tag);
639 
640  libmesh_assert_less_equal (needed_node_idxs[cmap].size(),
641  nemhelper->node_cmap_node_ids[cmap].size());
642 
643  for (std::size_t i=0, j=0, ncnis=nemhelper->node_cmap_node_ids[cmap].size(); i < ncnis; i++)
644  {
645  const unsigned int
646  local_node_idx = nemhelper->node_cmap_node_ids[cmap][i]-1,
647  owning_pid_idx = node_ownership[local_node_idx];
648 
649  // if this node is owned by source_pid_idx, its new global id
650  // is in the buffer we just received
651  if (owning_pid_idx == source_pid_idx)
652  {
653  libmesh_assert_less (j, needed_node_idxs[cmap].size());
654 
655  const unsigned int // now 0-based!
656  global_node_idx = needed_node_idxs[cmap][j++];
657 
658  // "Catch" the node pointer after addition, make sure the
659  // ID matches the requested value.
660  Node * added_node =
661  mesh.add_point (Point(nemhelper->x[local_node_idx],
662  nemhelper->y[local_node_idx],
663  nemhelper->z[local_node_idx]),
664  cast_int<dof_id_type>(global_node_idx),
665  cast_int<processor_id_type>(source_pid_idx));
666 
667  // Make sure the node we added has the ID we thought it would
668  if (added_node->id() != global_node_idx)
669  {
670  libMesh::err << "Error, node added with ID " << added_node->id()
671  << ", but we wanted ID " << global_node_idx << std::endl;
672  }
673 
674  // update the local->global index map, when we are done
675  // it will be 0-based.
676  nemhelper->node_num_map[local_node_idx] = global_node_idx;
677 
678  // we are not really going to use my_next_node again, but we can
679  // keep incrementing it to track how many nodes we have added
680  // to the mesh
681  my_next_node++;
682  }
683  }
684  }
685  } // end of node index communication loop
686 
687  // we had better have added all the nodes we need to!
688  libmesh_assert_equal_to ((my_next_node - my_node_offset), to_uint(nemhelper->num_nodes));
689 
690  // After all that, we should be done with all node-related arrays *except* the
691  // node_num_map, which we have transformed to use our new numbering...
692  // So let's clean up the arrays we are done with.
693  {
694  Utility::deallocate (nemhelper->node_mapi);
695  Utility::deallocate (nemhelper->node_mapb);
696  Utility::deallocate (nemhelper->node_mape);
697  Utility::deallocate (nemhelper->node_cmap_ids);
698  Utility::deallocate (nemhelper->node_cmap_node_cnts);
699  Utility::deallocate (nemhelper->node_cmap_node_ids);
700  Utility::deallocate (nemhelper->node_cmap_proc_ids);
704  Utility::deallocate (needed_node_idxs);
705  Utility::deallocate (node_ownership);
706  }
707 
708  Parallel::wait (needed_nodes_requests);
709  Parallel::wait (requested_nodes_requests);
710  requested_node_idxs.clear();
711 
712  // See what the node count is up to now.
713  if (_verbose)
714  {
715  // Report the number of nodes which have been added locally
716  libMesh::out << "[" << this->processor_id() << "] ";
717  libMesh::out << "mesh.n_nodes()=" << mesh.n_nodes() << std::endl;
718 
719  // Reports the number of nodes that have been added in total.
720  libMesh::out << "[" << this->processor_id() << "] ";
721  libMesh::out << "mesh.parallel_n_nodes()=" << mesh.parallel_n_nodes() << std::endl;
722  }
723 
724 
725 
726  // --------------------------------------------------------------------------------
727  // --------------------------------------------------------------------------------
728  // --------------------------------------------------------------------------------
729 
730 
731  // We can now read in the elements...Exodus stores them in blocks in which all
732  // elements have the same geometric type. This code is adapted directly from exodusII_io.C
733 
734  // Assertion: The sum of the border and internal elements on all processors
735  // should equal nemhelper->num_elems_global
736 #ifndef NDEBUG
737  {
738  int sum_internal_elems=0, sum_border_elems=0;
739  for (unsigned int j=3,c=0; c<this->n_processors(); j+=8,++c)
740  sum_internal_elems += all_loadbal_data[j];
741 
742  for (unsigned int j=4,c=0; c<this->n_processors(); j+=8,++c)
743  sum_border_elems += all_loadbal_data[j];
744 
745  if (_verbose)
746  {
747  libMesh::out << "[" << this->processor_id() << "] ";
748  libMesh::out << "sum_internal_elems=" << sum_internal_elems << std::endl;
749 
750  libMesh::out << "[" << this->processor_id() << "] ";
751  libMesh::out << "sum_border_elems=" << sum_border_elems << std::endl;
752  }
753 
754  libmesh_assert_equal_to (sum_internal_elems+sum_border_elems, nemhelper->num_elems_global);
755  }
756 #endif
757 
758  // We need to set the mesh dimension, but the following...
759  // mesh.set_mesh_dimension(static_cast<unsigned int>(nemhelper->num_dim));
760 
761  // ... is not sufficient since some codes report num_dim==3 for two dimensional
762  // meshes living in 3D, even though all the elements are of 2D type. Therefore,
763  // we instead use the dimension of the highest element found for the Mesh dimension,
764  // similar to what is done by the Exodus reader, except here it requires a
765  // parallel communication.
766  elems_of_dimension.resize(4, false); // will use 1-based
767 
768  // Compute my_elem_offset, the amount by which to offset the local elem numbering
769  // on my processor.
770  unsigned int my_next_elem = 0;
771  for (auto pid : IntRange<unsigned int>(0, this->processor_id()))
772  my_next_elem += (all_loadbal_data[8*pid + 3]+ // num_internal_elems, proc pid
773  all_loadbal_data[8*pid + 4]); // num_border_elems, proc pid
774  const unsigned int my_elem_offset = my_next_elem;
775 
776  if (_verbose)
777  libMesh::out << "[" << this->processor_id() << "] "
778  << "my_elem_offset=" << my_elem_offset << std::endl;
779 
780 
781  // Fills in the:
782  // global_elem_blk_ids[] and
783  // global_elem_blk_cnts[] arrays.
784  nemhelper->get_eb_info_global();
785 
786  // // Fills in the vectors
787  // // elem_mapi[num_internal_elems]
788  // // elem_mapb[num_border_elems ]
789  // // These tell which of the (locally-numbered) elements are internal and which are border elements.
790  // // In our test example these arrays are sorted (but non-contiguous), which makes it possible to
791  // // binary search for each element ID... however I don't think we need to distinguish between the
792  // // two types, since either can have nodes the boundary!
793  // nemhelper->get_elem_map();
794 
795  // Fills in the vectors of vectors:
796  // elem_cmap_elem_ids[][]
797  // elem_cmap_side_ids[][]
798  // elem_cmap_proc_ids[][]
799  // These arrays are of size num_elem_cmaps * elem_cmap_elem_cnts[i], i = 0..num_elem_cmaps
800  nemhelper->get_elem_cmap();
801 
802  // Get information about the element blocks:
803  // (read in the array nemhelper->block_ids[])
804  nemhelper->read_block_info();
805 
806  // Reads the nemhelper->elem_num_map array, elem_num_map[i] is the global element number for
807  // local element number i.
808  nemhelper->read_elem_num_map();
809 
810  // Read in the element connectivity for each block by
811  // looping over all the blocks.
812  for (unsigned int i=0; i<to_uint(nemhelper->num_elem_blk); i++)
813  {
814  // Read the information for block i: For nemhelper->block_ids[i], reads
815  // elem_type
816  // num_elem_this_blk
817  // num_nodes_per_elem
818  // num_attr
819  // connect <-- the nodal connectivity array for each element in the block.
820  nemhelper->read_elem_in_block(i);
821 
822  // Note that with parallel files it is possible we have no elements in
823  // this block!
824  if (!nemhelper->num_elem_this_blk) continue;
825 
826  // Set subdomain ID based on the block ID.
827  subdomain_id_type subdomain_id =
828  cast_int<subdomain_id_type>(nemhelper->block_ids[i]);
829 
830  // Create a type string (this uses the null-terminated string ctor).
831  const std::string type_str ( nemhelper->elem_type.data() );
832 
833  // Set any relevant node/edge maps for this element
834  const auto & conv = nemhelper->get_conversion(type_str);
835 
836  if (_verbose)
837  libMesh::out << "Reading a block of " << type_str << " elements." << std::endl;
838 
839  // Loop over all the elements in this block
840  for (unsigned int j=0; j<to_uint(nemhelper->num_elem_this_blk); j++)
841  {
842  Elem * elem = Elem::build (conv.libmesh_elem_type()).release();
843  libmesh_assert (elem);
844 
845  // Assign subdomain and processor ID to the newly-created Elem.
846  // Assigning the processor ID beforehand ensures that the Elem is
847  // not added as an "unpartitioned" element. Note that the element
848  // numbering in Exodus is also 1-based.
849  elem->subdomain_id() = subdomain_id;
850  elem->processor_id() = this->processor_id();
851  elem->set_id() = my_next_elem++;
852 #ifdef LIBMESH_ENABLE_UNIQUE_ID
853  elem->set_unique_id() = elem->id();
854 #endif
855 
856  // Mark that we have seen an element of the current element's
857  // dimension.
858  elems_of_dimension[elem->dim()] = true;
859 
860  // Add the created Elem to the Mesh, catch the Elem
861  // pointer that the Mesh throws back.
862  elem = mesh.add_elem (elem);
863 
864  // We are expecting the element "thrown back" by libmesh to have the ID we specified for it...
865  // Check to see that really is the case. Note that my_next_elem was post-incremented, so
866  // subtract 1 when performing the check.
867  if (elem->id() != my_next_elem-1)
868  libmesh_error_msg("Unexpected ID " \
869  << elem->id() \
870  << " set by parallel mesh. (expecting " \
871  << my_next_elem-1 \
872  << ").");
873 
874  // Set all the nodes for this element
875  if (_verbose)
876  libMesh::out << "[" << this->processor_id() << "] "
877  << "Setting nodes for Elem " << elem->id() << std::endl;
878 
879  for (unsigned int k=0; k<to_uint(nemhelper->num_nodes_per_elem); k++)
880  {
881  const unsigned int
882  gi = (j*nemhelper->num_nodes_per_elem + // index into connectivity array
883  conv.get_node_map(k)),
884  local_node_idx = nemhelper->connect[gi]-1, // local node index
885  global_node_idx = nemhelper->node_num_map[local_node_idx]; // new global node index
886 
887  // Set node number
888  elem->set_node(k) = mesh.node_ptr(global_node_idx);
889  }
890  } // for (unsigned int j=0; j<nemhelper->num_elem_this_blk; j++)
891  } // end for (unsigned int i=0; i<nemhelper->num_elem_blk; i++)
892 
893  libmesh_assert_equal_to ((my_next_elem - my_elem_offset), to_uint(nemhelper->num_elem));
894 
895  if (_verbose)
896  {
897  // Print local elems_of_dimension information
898  for (auto i : IntRange<std::size_t>(1, elems_of_dimension.size()))
899  libMesh::out << "[" << this->processor_id() << "] "
900  << "elems_of_dimension[" << i << "]=" << elems_of_dimension[i] << std::endl;
901  }
902 
903  // Get the max dimension seen on the current processor
904  unsigned char max_dim_seen = 0;
905  for (auto i : IntRange<std::size_t>(1, elems_of_dimension.size()))
906  if (elems_of_dimension[i])
907  max_dim_seen = static_cast<unsigned char>(i);
908 
909  // Do a global max to determine the max dimension seen by all processors.
910  // It should match -- I don't think we even support calculations on meshes
911  // with elements of different dimension...
912  this->comm().max(max_dim_seen);
913 
914  if (_verbose)
915  {
916  // Print the max element dimension from all processors
917  libMesh::out << "[" << this->processor_id() << "] "
918  << "max_dim_seen=" << +max_dim_seen << std::endl;
919  }
920 
921  // Set the mesh dimension to the largest encountered for an element
922  mesh.set_mesh_dimension(max_dim_seen);
923 
924 #if LIBMESH_DIM < 3
925  if (mesh.mesh_dimension() > LIBMESH_DIM)
926  libmesh_error_msg("Cannot open dimension " \
927  << mesh.mesh_dimension() \
928  << " mesh file when configured without " \
929  << mesh.mesh_dimension() \
930  << "D support." );
931 #endif
932 
933 
934  // Global sideset information, they are distributed as well, not sure if they will require communication...?
935  nemhelper->get_ss_param_global();
936 
937  if (_verbose)
938  {
939  libMesh::out << "[" << this->processor_id() << "] "
940  << "Read global sideset parameter information." << std::endl;
941 
942  // These global values should be the same on all processors...
943  libMesh::out << "[" << this->processor_id() << "] "
944  << "Number of global sideset IDs: " << nemhelper->global_sideset_ids.size() << std::endl;
945  }
946 
947  // Read *local* sideset info the same way it is done in
948  // exodusII_io_helper. May be called any time after
949  // nem_helper->read_header(); This sets num_side_sets and resizes
950  // elem_list, side_list, and id_list to num_elem_all_sidesets. Note
951  // that there appears to be the same number of sidesets in each file
952  // but they all have different numbers of entries (some are empty).
953  // Note that the sum of "nemhelper->num_elem_all_sidesets" over all
954  // processors should equal the sum of the entries in the "num_global_side_counts" array
955  // filled up by nemhelper->get_ss_param_global()
956  nemhelper->read_sideset_info();
957 
958  if (_verbose)
959  {
960  libMesh::out << "[" << this->processor_id() << "] "
961  << "nemhelper->num_side_sets = " << nemhelper->num_side_sets << std::endl;
962 
963  libMesh::out << "[" << this->processor_id() << "] "
964  << "nemhelper->num_elem_all_sidesets = " << nemhelper->num_elem_all_sidesets << std::endl;
965 
966  if (nemhelper->num_side_sets > 0)
967  {
968  libMesh::out << "Sideset names are: ";
969  for (const auto & pr : nemhelper->id_to_ss_names)
970  libMesh::out << "(" << pr.first << "," << pr.second << ") ";
971  libMesh::out << std::endl;
972  }
973  }
974 
975 #ifdef DEBUG
976  {
977  // In DEBUG mode, check that the global number of sidesets reported
978  // in each nemesis file matches the sum of all local sideset counts
979  // from each processor. This requires a small communication, so only
980  // do it in DEBUG mode.
981  int sum_num_global_side_counts = std::accumulate(nemhelper->num_global_side_counts.begin(),
982  nemhelper->num_global_side_counts.end(),
983  0);
984 
985  // MPI sum up the local files contributions
986  int sum_num_elem_all_sidesets = nemhelper->num_elem_all_sidesets;
987  this->comm().sum(sum_num_elem_all_sidesets);
988 
989  if (sum_num_global_side_counts != sum_num_elem_all_sidesets)
990  libmesh_error_msg("Error! global side count reported by Nemesis does not " \
991  << "match the side count reported by the individual files!");
992  }
993 #endif
994 
995  // Note that exodus stores sidesets in separate vectors but we want to pack
996  // them all into a single vector. So when we call read_sideset(), we pass an offset
997  // into the single vector of all IDs
998  for (int offset=0, i=0; i<nemhelper->num_side_sets; i++)
999  {
1000  offset += (i > 0 ? nemhelper->num_sides_per_set[i-1] : 0); // Compute new offset
1001  nemhelper->read_sideset (i, offset);
1002  }
1003 
1004  // Now that we have the lists of elements, sides, and IDs, we are ready to set them
1005  // in the BoundaryInfo object of our Mesh object. This is slightly different in parallel...
1006  // For example, I think the IDs in each of the split Exodus files are numbered locally,
1007  // and we have to know the appropriate ID for this processor to be able to set the
1008  // entry in BoundaryInfo. This offset should be given by my_elem_offset determined in
1009  // this function...
1010 
1011  // Debugging:
1012  // Print entries of elem_list
1013  // libMesh::out << "[" << this->processor_id() << "] "
1014  // << "elem_list = ";
1015  // for (const auto & id : nemhelper->elem_list)
1016  // libMesh::out << id << ", ";
1017  // libMesh::out << std::endl;
1018 
1019  // Print entries of side_list
1020  // libMesh::out << "[" << this->processor_id() << "] "
1021  // << "side_list = ";
1022  // for (const auto & id : nemhelper->side_list)
1023  // libMesh::out << id << ", ";
1024  // libMesh::out << std::endl;
1025 
1026 
1027  // Loop over the entries of the elem_list, get their pointers from the
1028  // Mesh data structure, and assign the appropriate side to the BoundaryInfo object.
1029  for (auto e : index_range(nemhelper->elem_list))
1030  {
1031  // Calling mesh.elem_ptr() is an error if no element with that
1032  // id exists on this processor...
1033  //
1034  // Perhaps we should iterate over elements and look them up in
1035  // the elem list instead? Note that the IDs in elem_list are
1036  // not necessarily in order, so if we did instead loop over the
1037  // mesh, we would have to search the (unsorted) elem_list vector
1038  // for each entry! We'll settle for doing some error checking instead.
1039  Elem * elem = mesh.elem_ptr
1040  (my_elem_offset +
1041  (nemhelper->elem_list[e]-1)/*Exodus numbering is 1-based!*/);
1042 
1043  // The side numberings in libmesh and exodus are not 1:1, so we need to map
1044  // whatever side number is stored in Exodus into a libmesh side number using
1045  // a conv object...
1046  const auto & conv = nemhelper->get_conversion(elem->type());
1047 
1048  // Finally, we are ready to add the element and its side to the BoundaryInfo object.
1049  // Call the version of add_side which takes a pointer, since we have already gone to
1050  // the trouble of getting said pointer...
1052  cast_int<unsigned short>(conv.get_side_map(nemhelper->side_list[e]-1)), // Exodus numbering is 1-based
1053  cast_int<boundary_id_type>(nemhelper->id_list[e]));
1054  }
1055 
1056  // Debugging: make sure there are as many boundary conditions in the
1057  // boundary ID object as expected. Note that, at this point, the
1058  // mesh still thinks it's serial, so n_boundary_conds() returns the
1059  // local number of boundary conditions (and is therefore cheap)
1060  // which should match nemhelper->elem_list.size().
1061  {
1062  std::size_t nbcs = mesh.get_boundary_info().n_boundary_conds();
1063  if (nbcs != nemhelper->elem_list.size())
1064  libmesh_error_msg("[" << this->processor_id() << "] " \
1065  << "BoundaryInfo contains " \
1066  << nbcs \
1067  << " boundary conditions, while the Exodus file had " \
1068  << nemhelper->elem_list.size());
1069  }
1070 
1071  // Read global nodeset parameters? We might be able to use this to verify
1072  // something about the local files, but I haven't figured out what yet...
1073  nemhelper->get_ns_param_global();
1074 
1075  // Read local nodeset info
1076  nemhelper->read_nodeset_info();
1077 
1078  if (_verbose)
1079  {
1080  libMesh::out << "[" << this->processor_id() << "] ";
1081  libMesh::out << "nemhelper->num_node_sets=" << nemhelper->num_node_sets << std::endl;
1082  if (nemhelper->num_node_sets > 0)
1083  {
1084  libMesh::out << "Nodeset names are: ";
1085  for (const auto & pr : nemhelper->id_to_ns_names)
1086  libMesh::out << "(" << pr.first << "," << pr.second << ") ";
1087  libMesh::out << std::endl;
1088  }
1089  }
1090 
1091  // // Debugging, what is currently in nemhelper->node_num_map anyway?
1092  // libMesh::out << "[" << this->processor_id() << "] "
1093  // << "nemhelper->node_num_map = ";
1094  //
1095  // for (const auto & id : nemhelper->node_num_map)
1096  // libMesh::out << id << ", ";
1097  // libMesh::out << std::endl;
1098 
1099  // For each nodeset,
1100  for (int nodeset=0; nodeset<nemhelper->num_node_sets; nodeset++)
1101  {
1102  // Get the user-defined ID associated with the nodeset
1103  int nodeset_id = nemhelper->nodeset_ids[nodeset];
1104 
1105  if (_verbose)
1106  {
1107  libMesh::out << "[" << this->processor_id() << "] ";
1108  libMesh::out << "nemhelper->nodeset_ids[" << nodeset << "]=" << nodeset_id << std::endl;
1109  }
1110 
1111  // Read the nodeset from file, store them in a vector
1112  nemhelper->read_nodeset(nodeset);
1113 
1114  // Add nodes from the node_list to the BoundaryInfo object
1115  for (auto node : index_range(nemhelper->node_list))
1116  {
1117  // Don't run past the end of our node map!
1118  if (to_uint(nemhelper->node_list[node]-1) >= nemhelper->node_num_map.size())
1119  libmesh_error_msg("Error, index is past the end of node_num_map array!");
1120 
1121  // We should be able to use the node_num_map data structure set up previously to determine
1122  // the proper global node index.
1123  unsigned global_node_id = nemhelper->node_num_map[ nemhelper->node_list[node]-1 /*Exodus is 1-based!*/ ];
1124 
1125  if (_verbose)
1126  {
1127  libMesh::out << "[" << this->processor_id() << "] "
1128  << "nodeset " << nodeset
1129  << ", local node number: " << nemhelper->node_list[node]-1
1130  << ", global node id: " << global_node_id
1131  << std::endl;
1132  }
1133 
1134  // Add the node to the BoundaryInfo object with the proper nodeset_id
1136  (cast_int<dof_id_type>(global_node_id),
1137  cast_int<boundary_id_type>(nodeset_id));
1138  }
1139  }
1140 
1141  // See what the elem count is up to now.
1142  if (_verbose)
1143  {
1144  // Report the number of elements which have been added locally
1145  libMesh::out << "[" << this->processor_id() << "] ";
1146  libMesh::out << "mesh.n_elem()=" << mesh.n_elem() << std::endl;
1147 
1148  // Reports the number of elements that have been added in total.
1149  libMesh::out << "[" << this->processor_id() << "] ";
1150  libMesh::out << "mesh.parallel_n_elem()=" << mesh.parallel_n_elem() << std::endl;
1151  }
1152 
1153  // For DistributedMesh, it seems that _is_serial is true by default. A hack to
1154  // make the Mesh think it's parallel might be to call:
1158 
1159  // And if that didn't work, then we're actually reading into a
1160  // ReplicatedMesh, so forget about gathering neighboring elements
1161  if (mesh.is_serial())
1162  return;
1163 
1164  // Gather neighboring elements so that the mesh has the proper "ghost" neighbor information.
1165  MeshCommunication().gather_neighboring_elements(cast_ref<DistributedMesh &>(mesh));
1166 }
1167 
1168 #else
1169 
1170 void Nemesis_IO::read (const std::string &)
1171 {
1172  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1173 }
1174 
1175 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1176 
1177 
1178 
1179 
1180 
1181 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1182 
1183 void Nemesis_IO::write (const std::string & base_filename)
1184 {
1185  // Get a constant reference to the mesh for writing
1187 
1188  // Create the filename for this processor given the base_filename passed in.
1189  std::string nemesis_filename = nemhelper->construct_nemesis_filename(base_filename);
1190 
1191  // If the user has set the append flag here, it doesn't really make
1192  // sense: the intent of this function is to write a Mesh with no
1193  // data, while "appending" is really intended to add data to an
1194  // existing file. If we're verbose, print a message to this effect.
1195  if (_append && _verbose)
1196  libmesh_warning("Warning: Appending in Nemesis_IO::write() does not make sense.\n"
1197  "Creating a new file instead!");
1198 
1199  nemhelper->create(nemesis_filename);
1200 
1201  // Initialize data structures and write some global Nemesis-specific data, such as
1202  // communication maps, to file.
1203  nemhelper->initialize(nemesis_filename,mesh);
1204 
1205  // Call the Nemesis-specialized version of write_nodal_coordinates() to write
1206  // the nodal coordinates.
1207  nemhelper->write_nodal_coordinates(mesh);
1208 
1209  // Call the Nemesis-specialized version of write_elements() to write
1210  // the elements. Note: Must write a zero if a given global block ID has no
1211  // elements...
1212  nemhelper->write_elements(mesh);
1213 
1214  // Call our specialized function to write the nodesets
1215  nemhelper->write_nodesets(mesh);
1216 
1217  // Call our specialized write_sidesets() function to write the sidesets to file
1218  nemhelper->write_sidesets(mesh);
1219 
1220  // Not sure if this is really necessary, but go ahead and flush the file
1221  // once we have written all this stuff.
1222  nemhelper->ex_err = exII::ex_update(nemhelper->ex_id);
1223 
1224  if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1225  libmesh_warning("Warning: Mesh contains edge boundary IDs, but these "
1226  "are not supported by the Nemesis format.");
1227 }
1228 
1229 #else
1230 
1231 void Nemesis_IO::write (const std::string & )
1232 {
1233  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1234 }
1235 
1236 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1237 
1238 
1239 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1240 
1241 void Nemesis_IO::write_timestep (const std::string & fname,
1242  const EquationSystems & es,
1243  const int timestep,
1244  const Real time)
1245 {
1246  _timestep=timestep;
1247  write_equation_systems(fname,es);
1248 
1249  nemhelper->write_timestep(timestep, time);
1250 }
1251 
1252 #else
1253 
1254 void Nemesis_IO::write_timestep (const std::string &,
1255  const EquationSystems &,
1256  const int,
1257  const Real)
1258 {
1259  libmesh_error_msg("ERROR, Nemesis API is not defined!");
1260 }
1261 
1262 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1263 
1264 
1265 
1266 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1267 
1268 void Nemesis_IO::prepare_to_write_nodal_data (const std::string & fname,
1269  const std::vector<std::string> & names)
1270 {
1272 
1273  std::string nemesis_filename = nemhelper->construct_nemesis_filename(fname);
1274 
1275  if (!nemhelper->opened_for_writing)
1276  {
1277  // If we're appending, open() the file with read_only=false,
1278  // otherwise create() it and write the contents of the mesh to
1279  // it.
1280  if (_append)
1281  {
1282  nemhelper->open(nemesis_filename.c_str(), /*read_only=*/false);
1283  // After opening the file, read the header so that certain
1284  // fields, such as the number of nodes and the number of
1285  // elements, are correctly initialized for the subsequent
1286  // call to write the nodal solution.
1287  nemhelper->read_header();
1288  }
1289  else
1290  {
1291  nemhelper->create(nemesis_filename);
1292  nemhelper->initialize(nemesis_filename,mesh);
1293  nemhelper->write_nodal_coordinates(mesh);
1294  nemhelper->write_elements(mesh);
1295  nemhelper->write_nodesets(mesh);
1296  nemhelper->write_sidesets(mesh);
1297 
1298  if ((mesh.get_boundary_info().n_edge_conds() > 0) && _verbose)
1299  libmesh_warning("Warning: Mesh contains edge boundary IDs, but these "
1300  "are not supported by the ExodusII format.");
1301  }
1302  }
1303 
1304  // Even if we were already open for writing, we might not have
1305  // initialized the nodal variable names yet. Even if we did, it
1306  // should not hurt to call this twice because the routine sets a
1307  // flag the first time it is called.
1308 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1309  std::vector<std::string> complex_names = nemhelper->get_complex_names(names);
1310  nemhelper->initialize_nodal_variables(complex_names);
1311 #else
1312  nemhelper->initialize_nodal_variables(names);
1313 #endif
1314 }
1315 
1316 #else
1317 
1318 void Nemesis_IO::prepare_to_write_nodal_data (const std::string &,
1319  const std::vector<std::string> &)
1320 {
1321  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1322 }
1323 
1324 #endif
1325 
1326 
1327 
1328 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1329 
1330 void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1331  const NumericVector<Number> & parallel_soln,
1332  const std::vector<std::string> & names)
1333 {
1334  LOG_SCOPE("write_nodal_data(parallel)", "Nemesis_IO");
1335 
1336  // Only prepare and write nodal variables that are also in
1337  // _output_variables, unless _output_variables is empty. This is the
1338  // same logic that is in ExodusII_IO::write_nodal_data().
1339  std::vector<std::string> output_names;
1340 
1341  if (_allow_empty_variables || !_output_variables.empty())
1342  output_names = _output_variables;
1343  else
1344  output_names = names;
1345 
1346  this->prepare_to_write_nodal_data(base_filename, output_names);
1347 
1348  // Call the new version of write_nodal_solution() that takes a
1349  // NumericVector directly without localizing.
1350  nemhelper->write_nodal_solution(parallel_soln, names, _timestep, output_names);
1351 }
1352 
1353 
1354 
1355 void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1356  const EquationSystems & es,
1357  const std::set<std::string> * system_names)
1358 {
1359  LOG_SCOPE("write_nodal_data(parallel)", "Nemesis_IO");
1360 
1361  // Only prepare and write nodal variables that are also in
1362  // _output_variables, unless _output_variables is empty. This is the
1363  // same logic that is in ExodusII_IO::write_nodal_data().
1364  std::vector<std::string> output_names;
1365 
1366  if (_allow_empty_variables || !_output_variables.empty())
1367  output_names = _output_variables;
1368  else
1369  es.build_variable_names (output_names, nullptr, system_names);
1370 
1371  this->prepare_to_write_nodal_data(base_filename, output_names);
1372 
1373  std::vector<std::pair<unsigned int, unsigned int>> var_nums =
1374  es.find_variable_numbers(output_names);
1375 
1376  nemhelper->write_nodal_solution(es, var_nums, _timestep, output_names);
1377 }
1378 
1379 #else
1380 
1381 void Nemesis_IO::write_nodal_data (const std::string &,
1382  const NumericVector<Number> &,
1383  const std::vector<std::string> &)
1384 {
1385  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1386 }
1387 
1388 void Nemesis_IO::write_nodal_data (const std::string &,
1389  const EquationSystems &,
1390  const std::set<std::string> *)
1391 {
1392  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1393 }
1394 
1395 #endif
1396 
1397 
1398 
1399 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1400 
1402 {
1403  if (!nemhelper->opened_for_writing)
1404  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting elemental variables.");
1405 
1406  // To be (possibly) filled with a filtered list of variable names to output.
1407  std::vector<std::string> names;
1408 
1409  // All of which should be low order monomials for now
1410  const FEType type(CONSTANT, MONOMIAL);
1411 
1412  // If _output_variables is populated, only output the monomials which are
1413  // also in the _output_variables vector.
1414  if (_output_variables.size() > 0)
1415  {
1416  std::vector<std::string> monomials;
1417 
1418  // Create a list of monomial variable names
1419  es.build_variable_names(monomials, &type);
1420 
1421  // Filter that list against the _output_variables list. Note: if names is still empty after
1422  // all this filtering, all the monomial variables will be gathered
1423  for (const auto & var : monomials)
1424  if (std::find(_output_variables.begin(), _output_variables.end(), var) != _output_variables.end())
1425  names.push_back(var);
1426  }
1427 
1428  // The 'names' vector will here be updated with the variable's names
1429  // that are actually eligible to write
1430  std::vector<std::pair<unsigned int, unsigned int>> var_nums =
1431  es.find_variable_numbers (names, &type);
1432 
1433  // build_parallel_elemental_solution_vector() can return a nullptr,
1434  // in which case there are no constant monomial variables to write,
1435  // and we can just return.
1436  if (var_nums.empty())
1437  {
1438  if (_verbose)
1439  libMesh::out << "No CONSTANT, MONOMIAL data to be written." << std::endl;
1440  return;
1441  }
1442 
1443  // Store the list of subdomains on which each variable *that we are
1444  // going to plot* is active. Note: if any of these sets is _empty_,
1445  // the variable in question is active on _all_ subdomains.
1446  std::vector<std::set<subdomain_id_type>> vars_active_subdomains;
1447  es.get_vars_active_subdomains(names, vars_active_subdomains);
1448 
1449  // Call the Nemesis version of initialize_element_variables().
1450  //
1451  // The Exodus helper version of this function writes an incorrect
1452  // truth table in parallel that somehow does not account for the
1453  // case where a subdomain does not appear on one or more of the
1454  // processors. So, we override that function's behavior in the
1455  // Nemesis helper.
1456  nemhelper->initialize_element_variables(names, vars_active_subdomains);
1457 
1458  // Call (non-virtual) function to write the elemental data in
1459  // parallel. This function is named similarly to the corresponding
1460  // function in the Exodus helper, but it has a different calling
1461  // sequence and is not virtual or an override.
1463  nemhelper->write_element_values(mesh,
1464  es,
1465  var_nums,
1466  _timestep,
1467  vars_active_subdomains);
1468 }
1469 
1470 #else
1471 
1473 {
1474  libmesh_not_implemented();
1475 }
1476 
1477 #endif
1478 
1479 
1480 
1481 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1482 
1483 void Nemesis_IO::write_nodal_data (const std::string & base_filename,
1484  const std::vector<Number> & soln,
1485  const std::vector<std::string> & names)
1486 {
1487  LOG_SCOPE("write_nodal_data(serialized)", "Nemesis_IO");
1488 
1489  this->prepare_to_write_nodal_data(base_filename, names);
1490 
1491  nemhelper->write_nodal_solution(soln, names, _timestep);
1492 }
1493 
1494 #else
1495 
1496 void Nemesis_IO::write_nodal_data (const std::string &,
1497  const std::vector<Number> &,
1498  const std::vector<std::string> &)
1499 {
1500  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1501 }
1502 
1503 #endif
1504 
1505 
1506 
1507 
1508 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1509 
1510 void Nemesis_IO::write_global_data (const std::vector<Number> & soln,
1511  const std::vector<std::string> & names)
1512 {
1513  if (!nemhelper->opened_for_writing)
1514  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting global variables.");
1515 
1516 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
1517 
1518  std::vector<std::string> complex_names = nemhelper->get_complex_names(names);
1519 
1520  nemhelper->initialize_global_variables(complex_names);
1521 
1522  unsigned int num_values = soln.size();
1523  unsigned int num_vars = names.size();
1524  unsigned int num_elems = num_values / num_vars;
1525 
1526  // This will contain the real and imaginary parts and the magnitude
1527  // of the values in soln
1528  std::vector<Real> complex_soln(3*num_values);
1529 
1530  for (unsigned i=0; i<num_vars; ++i)
1531  {
1532  for (unsigned int j=0; j<num_elems; ++j)
1533  {
1534  Number value = soln[i*num_vars + j];
1535  complex_soln[3*i*num_elems + j] = value.real();
1536  }
1537  for (unsigned int j=0; j<num_elems; ++j)
1538  {
1539  Number value = soln[i*num_vars + j];
1540  complex_soln[3*i*num_elems + num_elems +j] = value.imag();
1541  }
1542  for (unsigned int j=0; j<num_elems; ++j)
1543  {
1544  Number value = soln[i*num_vars + j];
1545  complex_soln[3*i*num_elems + 2*num_elems + j] = std::abs(value);
1546  }
1547  }
1548 
1549  nemhelper->write_global_values(complex_soln, _timestep);
1550 
1551 #else
1552 
1553  // Call the Exodus writer implementation
1554  nemhelper->initialize_global_variables( names );
1555  nemhelper->write_global_values( soln, _timestep);
1556 
1557 #endif
1558 
1559 }
1560 
1561 #else
1562 
1563 void Nemesis_IO::write_global_data (const std::vector<Number> &,
1564  const std::vector<std::string> &)
1565 {
1566  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1567 }
1568 
1569 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1570 
1571 
1572 
1573 #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1574 
1575 void Nemesis_IO::write_information_records (const std::vector<std::string> & records)
1576 {
1577  if (!nemhelper->opened_for_writing)
1578  libmesh_error_msg("ERROR, Nemesis file must be initialized before outputting information records.");
1579 
1580  // Call the Exodus writer implementation
1581  nemhelper->write_information_records( records );
1582 }
1583 
1584 
1585 #else
1586 
1587 void Nemesis_IO::write_information_records ( const std::vector<std::string> & )
1588 {
1589  libmesh_error_msg("ERROR, Nemesis API is not defined.");
1590 }
1591 
1592 #endif // #if defined(LIBMESH_HAVE_EXODUS_API) && defined(LIBMESH_HAVE_NEMESIS_API)
1593 
1594 } // namespace libMesh
libMesh::Number
Real Number
Definition: libmesh_common.h:195
libMesh::Nemesis_IO::write
virtual void write(const std::string &base_filename) override
This method implements writing a mesh to a specified file.
Definition: nemesis_io.C:1183
libMesh::MeshBase::update_post_partitioning
virtual void update_post_partitioning()
Recalculate any cached data after elements and nodes have been repartitioned.
Definition: mesh_base.h:1006
libMesh::MeshBase::get_boundary_info
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:132
libMesh::MeshBase::is_serial
virtual bool is_serial() const
Definition: mesh_base.h:159
libMesh::BoundaryInfo::add_node
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
Definition: boundary_info.C:636
libMesh::Nemesis_IO::append
void append(bool val)
If true, this flag will cause the Nemesis_IO object to attempt to open an existing file for writing,...
Definition: nemesis_io.C:133
libMesh::BoundaryInfo::n_boundary_conds
std::size_t n_boundary_conds() const
Definition: boundary_info.C:1615
libMesh::Nemesis_IO::write_global_data
void write_global_data(const std::vector< Number > &, const std::vector< std::string > &)
Write out global variables.
Definition: nemesis_io.C:1510
libMesh::MeshBase::n_elem
virtual dof_id_type n_elem() const =0
libMesh::DofObject::set_id
dof_id_type & set_id()
Definition: dof_object.h:776
libMesh::MeshCommunication::gather_neighboring_elements
void gather_neighboring_elements(DistributedMesh &) const
Definition: mesh_communication.C:540
libMesh::index_range
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:106
libMesh
The libMesh namespace provides an interface to certain functionality in the library.
Definition: factoryfunction.C:55
libMesh::Elem::dim
virtual unsigned short dim() const =0
libMesh::Nemesis_IO::~Nemesis_IO
virtual ~Nemesis_IO()
Destructor.
Definition: nemesis_io.C:114
libMesh::ParallelObject::comm
const Parallel::Communicator & comm() const
Definition: parallel_object.h:94
libMesh::BoundaryInfo::n_edge_conds
std::size_t n_edge_conds() const
Definition: boundary_info.C:1636
mesh
MeshBase & mesh
Definition: mesh_communication.C:1257
libMesh::MeshBase::node_ptr
virtual const Node * node_ptr(const dof_id_type i) const =0
libMesh::MeshBase::mesh_dimension
unsigned int mesh_dimension() const
Definition: mesh_base.C:135
libMesh::DofObject::set_unique_id
unique_id_type & set_unique_id()
Definition: dof_object.h:797
libMesh::Nemesis_IO::write_timestep
void write_timestep(const std::string &fname, const EquationSystems &es, const int timestep, const Real time)
Write one timestep's worth of the solution.
Definition: nemesis_io.C:1241
libMesh::MeshBase::parallel_n_elem
virtual dof_id_type parallel_n_elem() const =0
libMesh::if
if(subdm)
Definition: petsc_dm_wrapper.C:77
libMesh::Nemesis_IO::prepare_to_write_nodal_data
void prepare_to_write_nodal_data(const std::string &fname, const std::vector< std::string > &names)
Helper function containing code shared between the two different versions of write_nodal_data which t...
Definition: nemesis_io.C:1268
libMesh::Nemesis_IO::nemhelper
std::unique_ptr< Nemesis_IO_Helper > nemhelper
Definition: nemesis_io.h:156
libMesh::ExodusII_IO
The ExodusII_IO class implements reading meshes in the ExodusII file format from Sandia National Labs...
Definition: exodusII_io.h:51
libMesh::Nemesis_IO::Nemesis_IO
Nemesis_IO(MeshBase &mesh, bool single_precision=false)
Constructor.
Definition: nemesis_io.C:90
libMesh::DofObject::processor_id
processor_id_type processor_id() const
Definition: dof_object.h:829
libMesh::MeshBase::elem_ptr
virtual const Elem * elem_ptr(const dof_id_type i) const =0
libMesh::NumericVector< Number >
libMesh::EquationSystems::find_variable_numbers
std::vector< std::pair< unsigned int, unsigned int > > find_variable_numbers(std::vector< std::string > &names, const FEType *type=nullptr) const
Finds system and variable numbers for any variables of type corresponding to the entries in the input...
Definition: equation_systems.C:904
libMesh::libmesh_assert
libmesh_assert(ctx)
libMesh::IntRange
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
libMesh::MeshCommunication::make_node_unique_ids_parallel_consistent
void make_node_unique_ids_parallel_consistent(MeshBase &)
Assuming all unique_ids on local nodes are globally unique, and assuming all processor ids are parall...
Definition: mesh_communication.C:1489
libMesh::Nemesis_IO::read
virtual void read(const std::string &base_filename) override
Implements reading the mesh from several different files.
Definition: nemesis_io.C:150
libMesh::MeshBase
This is the MeshBase class.
Definition: mesh_base.h:78
std::abs
MetaPhysicL::DualNumber< T, D > abs(const MetaPhysicL::DualNumber< T, D > &in)
libMesh::Nemesis_IO_Helper
This is the Nemesis_IO_Helper class.
Definition: nemesis_io_helper.h:65
libMesh::ParallelObject::n_processors
processor_id_type n_processors() const
Definition: parallel_object.h:100
libMesh::EquationSystems::build_variable_names
void build_variable_names(std::vector< std::string > &var_names, const FEType *type=nullptr, const std::set< std::string > *system_names=nullptr) const
Fill the input vector var_names with the names of the variables for each system.
Definition: equation_systems.C:476
libMesh::Nemesis_IO::_verbose
bool _verbose
Controls whether extra debugging information is printed to the screen or not.
Definition: nemesis_io.h:168
libMesh::ParallelObject::processor_id
processor_id_type processor_id() const
Definition: parallel_object.h:106
libMesh::Point
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:38
libMesh::processor_id_type
uint8_t processor_id_type
Definition: id_types.h:104
libMesh::Node
A Node is like a Point, but with more information.
Definition: node.h:52
libMesh::MeshTools::Generation::Private::idx
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
Definition: mesh_generation.C:72
libMesh::MeshBase::parallel_n_nodes
virtual dof_id_type parallel_n_nodes() const =0
libMesh::CONSTANT
Definition: enum_order.h:41
libMesh::Nemesis_IO::write_nodal_data
virtual void write_nodal_data(const std::string &fname, const std::vector< Number > &soln, const std::vector< std::string > &names) override
Output a nodal solution from data in soln.
Definition: nemesis_io.C:1483
libMesh::Nemesis_IO::set_output_variables
void set_output_variables(const std::vector< std::string > &output_variables, bool allow_empty=true)
Specify the list of variables which should be included in the output (whitelist) If empty,...
Definition: nemesis_io.C:140
libMesh::Nemesis_IO::_timestep
int _timestep
Keeps track of the current timestep index being written.
Definition: nemesis_io.h:162
libMesh::MONOMIAL
Definition: enum_fe_family.h:39
libMesh::EquationSystems
This is the EquationSystems class.
Definition: equation_systems.h:74
libMesh::MeshBase::delete_remote_elements
virtual void delete_remote_elements()
When supported, deletes all nonlocal elements of the mesh except for "ghosts" which touch a local ele...
Definition: mesh_base.h:201
libMesh::Elem::set_node
virtual Node *& set_node(const unsigned int i)
Definition: elem.h:2059
libMesh::Nemesis_IO::write_element_data
void write_element_data(const EquationSystems &es)
Write out element solution in parallel, without localizing the solution vector.
Definition: nemesis_io.C:1401
libMesh::MeshOutput< MeshBase >::write_equation_systems
virtual void write_equation_systems(const std::string &, const EquationSystems &, const std::set< std::string > *system_names=nullptr)
This method implements writing a mesh with data to a specified file where the data is taken from the ...
Definition: mesh_output.C:31
libMesh::MeshBase::n_nodes
virtual dof_id_type n_nodes() const =0
libMesh::Nemesis_IO::_allow_empty_variables
bool _allow_empty_variables
If true, _output_variables is allowed to remain empty.
Definition: nemesis_io.h:195
libMesh::MeshOutput
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
libMesh::MeshInput< MeshBase >::set_n_partitions
void set_n_partitions(unsigned int n_parts)
Sets the number of partitions in the mesh.
Definition: mesh_input.h:91
libMesh::FEType
class FEType hides (possibly multiple) FEFamily and approximation orders, thereby enabling specialize...
Definition: fe_type.h:178
libMesh::MeshOutput::mesh
const MT & mesh() const
Definition: mesh_output.h:247
value
static const bool value
Definition: xdr_io.C:56
libMesh::MeshBase::add_elem
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
libMesh::Nemesis_IO::_append
bool _append
Default false.
Definition: nemesis_io.h:174
libMesh::Nemesis_IO::_output_variables
std::vector< std::string > _output_variables
The names of the variables to be output.
Definition: nemesis_io.h:188
libMesh::Elem::subdomain_id
subdomain_id_type subdomain_id() const
Definition: elem.h:2069
libMesh::DofObject::id
dof_id_type id() const
Definition: dof_object.h:767
libMesh::Elem
This is the base class from which all geometric element types are derived.
Definition: elem.h:100
libMesh::Nemesis_IO::verbose
void verbose(bool set_verbosity)
Set the flag indicating if we should be verbose.
Definition: nemesis_io.C:120
libMesh::MeshInput< MeshBase >::mesh
MeshBase & mesh()
Definition: mesh_input.h:169
libMesh::Nemesis_IO::write_information_records
void write_information_records(const std::vector< std::string > &)
Write out information records.
Definition: nemesis_io.C:1575
libMesh::Utility::deallocate
void deallocate(std::vector< T > &vec)
A convenient method to truly empty a vector using the "swap trick".
Definition: utility.h:295
libMesh::MeshBase::add_point
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
Add a new Node at Point p to the end of the vertex array, with processor_id procid.
libMesh::err
OStreamProxy err
libMesh::TestClass
Definition: id_types.h:33
libMesh::Real
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
Definition: libmesh_common.h:121
libMesh::ParallelObject
An object whose state is distributed along a set of processors.
Definition: parallel_object.h:55
libMesh::ExodusII_IO::read
virtual void read(const std::string &name) override
This method implements reading a mesh from a specified file.
Definition: exodusII_io.C:143
libMesh::MeshCommunication
This is the MeshCommunication class.
Definition: mesh_communication.h:50
libMesh::MeshBase::set_mesh_dimension
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:218
libMesh::Elem::build
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:246
libMesh::out
OStreamProxy out
libMesh::BoundaryInfo::add_side
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure.
Definition: boundary_info.C:886
libMesh::Elem::type
virtual ElemType type() const =0
libMesh::MeshInput
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:60
libMesh::EquationSystems::get_vars_active_subdomains
void get_vars_active_subdomains(const std::vector< std::string > &names, std::vector< std::set< subdomain_id_type >> &vars_active_subdomains) const
Retrieve vars_active_subdomains, which indicates the active subdomains for each variable in names.
Definition: equation_systems.C:841
libMesh::MeshInput< MeshBase >::elems_of_dimension
std::vector< bool > elems_of_dimension
A vector of bools describing what dimension elements have been encountered when reading a mesh.
Definition: mesh_input.h:97