libMesh
checkpoint_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 // Local includes
19 #include "libmesh/checkpoint_io.h"
20 #include "libmesh/boundary_info.h"
21 #include "libmesh/distributed_mesh.h"
22 #include "libmesh/elem.h"
23 #include "libmesh/enum_xdr_mode.h"
24 #include "libmesh/libmesh_logging.h"
25 #include "libmesh/mesh_base.h"
26 #include "libmesh/mesh_communication.h"
27 #include "libmesh/mesh_tools.h"
28 #include "libmesh/node.h"
29 #include "libmesh/parallel.h"
30 #include "libmesh/partitioner.h"
31 #include "libmesh/metis_partitioner.h"
32 #include "libmesh/remote_elem.h"
33 #include "libmesh/xdr_io.h"
34 #include "libmesh/xdr_cxx.h"
35 #include "libmesh/utility.h"
36 #include "libmesh/int_range.h"
37 
38 // C++ includes
39 #include <iostream>
40 #include <iomanip>
41 #include <cstdio>
42 #include <vector>
43 #include <string>
44 #include <cstring>
45 #include <fstream>
46 #include <sstream> // for ostringstream
47 #include <unordered_map>
48 #include <unordered_set>
49 #ifdef LIBMESH_HAVE_DIRECT_H
50 #include <direct.h> // rmdir() on Windows
51 #endif
52 #ifdef LIBMESH_HAVE_UNISTD_H
53 #include <unistd.h> // rmdir() on Unix
54 #endif
55 
56 namespace
57 {
58 // chunking computes the number of chunks and first-chunk-offset when splitting a mesh
59 // into nsplits pieces using size procs for the given MPI rank. The number of chunks and offset
60 // are stored in nchunks and first_chunk respectively.
63 {
64  if (nsplits % size == 0) // the chunks divide evenly over the processors
65  {
66  nchunks = nsplits / size;
67  first_chunk = libMesh::cast_int<libMesh::processor_id_type>(nchunks * rank);
68  return;
69  }
70 
71  libMesh::processor_id_type nextra = nsplits % size;
72  if (rank < nextra) // leftover chunks cause an extra chunk to be added to this processor
73  {
74  nchunks = libMesh::cast_int<libMesh::processor_id_type>(nsplits / size + 1);
75  first_chunk = libMesh::cast_int<libMesh::processor_id_type>(nchunks * rank);
76  }
77  else // no extra chunks, but first chunk is offset by extras on earlier ranks
78  {
79  nchunks = nsplits / size;
80  // account for the case where nchunks is zero where we want max int
81  first_chunk = libMesh::cast_int<libMesh::processor_id_type>
82  (std::max((int)((nchunks + 1) * (nsplits % size) + nchunks * (rank - nsplits % size)),
83  (1 - (int)nchunks) * std::numeric_limits<int>::max()));
84  }
85 }
86 
87 std::string_view extension(std::string_view s)
88 {
89  auto pos = s.rfind(".");
90  if (pos == std::string::npos)
91  return "";
92  return s.substr(pos, s.size() - pos);
93 }
94 
95 std::string split_dir(const std::string & input_name, libMesh::processor_id_type n_procs)
96 {
97  return input_name + "/" + std::to_string(n_procs);
98 }
99 
100 
101 std::string header_file(const std::string & input_name, libMesh::processor_id_type n_procs)
102 {
103  return (split_dir(input_name, n_procs) + "/header").append(extension(input_name));
104 }
105 
106 std::string
107 split_file(const std::string & input_name,
110 {
111  return (split_dir(input_name, n_procs) + "/split-" + std::to_string(n_procs) + "-" +
112  std::to_string(proc_id)).append(extension(input_name));
113 }
114 
115 void make_dir(const std::string & input_name, libMesh::processor_id_type n_procs)
116 {
117  auto ret = libMesh::Utility::mkdir(input_name.c_str());
118  // error only if we failed to create dir - don't care if it was already there
119  libmesh_error_msg_if
120  (ret != 0 && ret != -1,
121  "Failed to create mesh split directory '" << input_name << "': " << std::strerror(ret));
122 
123  auto dir_name = split_dir(input_name, n_procs);
124  ret = libMesh::Utility::mkdir(dir_name.c_str());
125  if (ret == -1)
126  libmesh_warning("In CheckpointIO::write, directory '"
127  << dir_name << "' already exists, overwriting contents.");
128  else
129  libmesh_error_msg_if
130  (ret != 0, "Failed to create mesh split directory '" << dir_name << "': " << std::strerror(ret));
131 }
132 
133 } // namespace
134 
135 namespace libMesh
136 {
137 
138 std::unique_ptr<CheckpointIO> split_mesh(MeshBase & mesh, processor_id_type nsplits)
139 {
140  // There is currently an issue with DofObjects not being properly
141  // reset if the mesh is not first repartitioned onto 1 processor
142  // *before* being repartitioned onto the desired number of
143  // processors. So, this is a workaround, but not a particularly
144  // onerous one.
145  mesh.partition(1);
146  mesh.partition(nsplits);
147 
148  processor_id_type my_num_chunks = 0;
149  processor_id_type my_first_chunk = 0;
150  chunking(mesh.comm().size(), mesh.comm().rank(), nsplits, my_num_chunks, my_first_chunk);
151 
152  auto cpr = std::make_unique<CheckpointIO>(mesh);
153  cpr->current_processor_ids().clear();
154  for (processor_id_type i = my_first_chunk; i < my_first_chunk + my_num_chunks; i++)
155  cpr->current_processor_ids().push_back(i);
156  cpr->current_n_processors() = nsplits;
157  cpr->parallel() = true;
158  return cpr;
159 }
160 
161 
162 // ------------------------------------------------------------
163 // CheckpointIO members
164 CheckpointIO::CheckpointIO (MeshBase & mesh, const bool binary_in) :
165  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
166  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
168  _binary (binary_in),
169  _parallel (false),
170  _version ("checkpoint-1.5"),
171  _my_processor_ids (1, processor_id()),
172  _my_n_processors (mesh.is_replicated() ? 1 : n_processors())
173 {
174 }
175 
176 CheckpointIO::CheckpointIO (const MeshBase & mesh, const bool binary_in) :
177  MeshInput<MeshBase> (), // write-only
178  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
180  _binary (binary_in),
181  _parallel (false),
182  _version ("checkpoint-1.5"),
183  _my_processor_ids (1, processor_id()),
184  _my_n_processors (mesh.is_replicated() ? 1 : n_processors())
185 {
186 }
187 
188 CheckpointIO::~CheckpointIO () = default;
189 
190 processor_id_type CheckpointIO::select_split_config(const std::string & input_name, header_id_type & data_size)
191 {
192  std::string header_name;
193 
194  // We'll read a header file from processor 0 and broadcast.
195  if (this->processor_id() == 0)
196  {
197  header_name = header_file(input_name, _my_n_processors);
198 
199  {
200  // look for header+splits with nprocs equal to _my_n_processors
201  std::ifstream in (header_name.c_str());
202  if (!in.good())
203  {
204  // otherwise fall back to a serial/single-split mesh
205  auto orig_header_name = header_name;
206  header_name = header_file(input_name, 1);
207  std::ifstream in2 (header_name.c_str());
208  libmesh_error_msg_if(!in2.good(),
209  "ERROR: Neither one of the following files can be located:\n\t'"
210  << orig_header_name << "' nor\n\t'" << input_name << "'\n"
211  << "If you are running a parallel job, double check that you've "
212  << "created a split for " << _my_n_processors << " ranks.\n"
213  << "Note: One of paths above may refer to a valid directory on your "
214  << "system, however we are attempting to read a valid header file.");
215  }
216  }
217 
218  Xdr io (header_name, this->binary() ? DECODE : READ);
219 
220  // read the version, but don't care about it
221  std::string input_version;
222  io.data(input_version);
223 
224  // read the data type
225  io.data (data_size);
226  }
227 
228  this->comm().broadcast(data_size);
229  this->comm().broadcast(header_name);
230 
231  // How many per-processor files are here?
232  largest_id_type input_n_procs;
233 
234  switch (data_size) {
235  case 2:
236  input_n_procs = this->read_header<uint16_t>(header_name);
237  break;
238  case 4:
239  input_n_procs = this->read_header<uint32_t>(header_name);
240  break;
241  case 8:
242  input_n_procs = this->read_header<uint64_t>(header_name);
243  break;
244  default:
245  libmesh_error();
246  }
247 
248  if (!input_n_procs)
249  input_n_procs = 1;
250  return cast_int<processor_id_type>(input_n_procs);
251 }
252 
253 void CheckpointIO::cleanup(const std::string & input_name, processor_id_type n_procs)
254 {
255  auto header = header_file(input_name, n_procs);
256  auto ret = std::remove(header.c_str());
257  if (ret != 0)
258  libmesh_warning("Failed to clean up checkpoint header '" << header << "': " << std::strerror(ret));
259 
260  for (processor_id_type i = 0; i < n_procs; i++)
261  {
262  auto split = split_file(input_name, n_procs, i);
263  ret = std::remove(split.c_str());
264  if (ret != 0)
265  libmesh_warning("Failed to clean up checkpoint split file '" << split << "': " << std::strerror(ret));
266  }
267 
268  auto dir = split_dir(input_name, n_procs);
269  ret = rmdir(dir.c_str());
270  if (ret != 0)
271  libmesh_warning("Failed to clean up checkpoint split dir '" << dir << "': " << std::strerror(ret));
272 
273  // We expect that this may fail if there are other split configurations still present in this
274  // directory - so don't bother to check/warn for failure.
275  rmdir(input_name.c_str());
276 }
277 
278 
280 {
281  return (this->version().find("1.5") != std::string::npos);
282 }
283 
284 
285 void CheckpointIO::write (const std::string & name)
286 {
287  LOG_SCOPE("write()", "CheckpointIO");
288 
289  // convenient reference to our mesh
291 
292  // FIXME: For backwards compatibility, we'll assume for now that we
293  // only want to write distributed meshes in parallel. Later we can
294  // do a gather_to_zero() and support that case too.
296 
297  processor_id_type use_n_procs = 1;
298  if (_parallel)
299  use_n_procs = _my_n_processors;
300 
301  std::string header_file_name = header_file(name, use_n_procs);
302  make_dir(name, use_n_procs);
303 
304  // We'll write a header file from processor 0 to make it easier to do unambiguous
305  // restarts later:
306  if (this->processor_id() == 0)
307  {
308  Xdr io (header_file_name, this->binary() ? ENCODE : WRITE);
309 
310  // write the version
311  io.data(_version, "# version");
312 
313  // write what kind of data type we're using
314  header_id_type data_size = sizeof(largest_id_type);
315  io.data(data_size, "# integer size");
316 
317  // Write out the max mesh dimension for backwards compatibility
318  // with code that sets it independently of element dimensions
319  {
320  uint16_t mesh_dimension = cast_int<uint16_t>(mesh.mesh_dimension());
321  io.data(mesh_dimension, "# dimensions");
322  }
323 
324  // Write out whether or not this is serial output
325  {
326  uint16_t parallel = _parallel;
327  io.data(parallel, "# parallel");
328  }
329 
330  // If we're writing out a parallel mesh then we need to write the number of processors
331  // so we can check it upon reading the file
332  if (_parallel)
333  {
335  io.data(n_procs, "# n_procs");
336  }
337 
338  // write subdomain names
339  this->write_subdomain_names(io);
340 
341  // write boundary id names
342  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
343  write_bc_names(io, boundary_info, true); // sideset names
344  write_bc_names(io, boundary_info, false); // nodeset names
345 
346  // write extra integer names
347  const bool write_extra_integers = this->version_at_least_1_5();
348 
349  if (write_extra_integers)
350  {
351  largest_id_type n_node_integers = mesh.n_node_integers();
352  io.data(n_node_integers, "# n_extra_integers per node");
353 
354  std::vector<std::string> node_integer_names;
355  for (unsigned int i=0; i != n_node_integers; ++i)
356  node_integer_names.push_back(mesh.get_node_integer_name(i));
357 
358  io.data(node_integer_names);
359 
360  largest_id_type n_elem_integers = mesh.n_elem_integers();
361  io.data(n_elem_integers, "# n_extra_integers per elem");
362 
363  std::vector<std::string> elem_integer_names;
364  for (unsigned int i=0; i != n_elem_integers; ++i)
365  elem_integer_names.push_back(mesh.get_elem_integer_name(i));
366 
367  io.data(elem_integer_names);
368  }
369 
370 
371  }
372 
373  // If this is a serial mesh written to a serial file then we're only
374  // going to write local data from processor 0. If this is a mesh being
375  // written in parallel then we're going to write from every
376  // processor.
377  std::vector<processor_id_type> ids_to_write;
378 
379  // We're going to sort elements by pid in one pass, to avoid sending
380  // predicated iterators through the whole mesh N_p times.
381  //
382  // The data type here needs to be a non-const-pointer to whatever
383  // our element_iterator is a const-pointer to, for compatibility
384  // later.
385  typedef std::remove_const<MeshBase::const_element_iterator::value_type>::type nc_v_t;
386  std::unordered_map<processor_id_type, std::vector<nc_v_t>> elements_on_pid;
387 
388  if (_parallel)
389  {
390  ids_to_write = _my_processor_ids;
391  for (processor_id_type p : ids_to_write)
392  elements_on_pid[p].clear();
393  auto eop_end = elements_on_pid.end();
394  for (auto & elem : mesh.element_ptr_range())
395  {
396  const processor_id_type p = elem->processor_id();
397  if (auto eop_it = elements_on_pid.find(p);
398  eop_it != eop_end)
399  eop_it->second.push_back(elem);
400  }
401  }
402  else if (mesh.is_serial())
403  {
404  if (mesh.processor_id() == 0)
405  {
406  // placeholder
407  ids_to_write.push_back(0);
408  }
409  }
410  else
411  {
412  libmesh_error_msg("Cannot write serial checkpoint from distributed mesh");
413  }
414 
415  // Call build_side_list() and build_node_list() just *once* to avoid
416  // redundant expensive sorts during mesh splitting.
417  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
418  std::vector<std::tuple<dof_id_type, unsigned short int, boundary_id_type>>
419  bc_triples = boundary_info.build_side_list();
420  std::vector<std::tuple<dof_id_type, boundary_id_type>>
421  bc_tuples = boundary_info.build_node_list();
422 
423  for (const auto & my_pid : ids_to_write)
424  {
425  auto file_name = split_file(name, use_n_procs, my_pid);
426  Xdr io (file_name, this->binary() ? ENCODE : WRITE);
427 
428  std::set<const Elem *, CompareElemIdsByLevel> elements;
429 
430  // For serial files or for already-distributed meshs, we write
431  // everything we can see.
432  if (!_parallel || !mesh.is_serial())
433  elements.insert(mesh.elements_begin(), mesh.elements_end());
434  // For parallel files written from serial meshes we write what
435  // we'd be required to keep if we were to be deleting remote
436  // elements. This allows us to write proper parallel files even
437  // from a ReplicateMesh.
438  //
439  // WARNING: If we have a DistributedMesh which used
440  // "add_extra_ghost_elem" rather than ghosting functors to
441  // preserve elements and which is *also* currently serialized
442  // then we're not preserving those elements here. As a quick
443  // workaround user code should delete_remote_elements() before
444  // writing the checkpoint; as a long term workaround user code
445  // should use ghosting functors instead of extra_ghost_elem
446  // lists.
447  else
448  {
450  {
451  if (const auto elements_vec_it = elements_on_pid.find(p);
452  elements_vec_it != elements_on_pid.end())
453  {
454  auto & p_elements = elements_vec_it->second;
455 
456  // Be compatible with both deprecated and
457  // corrected MeshBase iterator types
459 
460  v_t * elempp = p_elements.data();
461  v_t * elemend = elempp + p_elements.size();
462 
464  pid_elements_begin = MeshBase::const_element_iterator
465  (elempp, elemend, Predicates::NotNull<v_t *>()),
466  pid_elements_end = MeshBase::const_element_iterator
467  (elemend, elemend, Predicates::NotNull<v_t *>()),
468  active_pid_elements_begin = MeshBase::const_element_iterator
469  (elempp, elemend, Predicates::Active<v_t *>()),
470  active_pid_elements_end = MeshBase::const_element_iterator
471  (elemend, elemend, Predicates::Active<v_t *>());
472 
474  (mesh, p, active_pid_elements_begin,
475  active_pid_elements_end, elements);
476  connect_children(mesh, pid_elements_begin,
477  pid_elements_end, elements);
478  }
479  }
480  }
481 
482  connected_node_set_type connected_nodes;
483  connect_element_dependencies(mesh, elements, connected_nodes);
484 
485  // write the nodal locations
486  this->write_nodes (io, connected_nodes);
487 
488  // write connectivity
489  this->write_connectivity (io, elements);
490 
491  // write remote_elem connectivity
492  this->write_remote_elem (io, elements);
493 
494  // write the boundary condition information
495  this->write_bcs (io, elements, bc_triples);
496 
497  // write the nodeset information
498  this->write_nodesets (io, connected_nodes, bc_tuples);
499 
500  // close it up
501  io.close();
502  }
503 
504  // this->comm().barrier();
505 }
506 
508 {
509  {
511 
512  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
513 
514  std::vector<largest_id_type> subdomain_ids; subdomain_ids.reserve(subdomain_map.size());
515  std::vector<std::string> subdomain_names; subdomain_names.reserve(subdomain_map.size());
516 
517  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
518  // return writable references in mesh_base, it's possible for the user to leave some entity names
519  // blank. We can't write those to the XDA file.
520  largest_id_type n_subdomain_names = 0;
521  for (const auto & [id, name] : subdomain_map)
522  if (!name.empty())
523  {
524  n_subdomain_names++;
525  subdomain_ids.push_back(id);
526  subdomain_names.push_back(name);
527  }
528 
529  io.data(n_subdomain_names, "# subdomain id to name map");
530  // Write out the ids and names in two vectors
531  if (n_subdomain_names)
532  {
533  io.data(subdomain_ids);
534  io.data(subdomain_names);
535  }
536  }
537 }
538 
539 
540 
542  const connected_node_set_type & nodeset) const
543 {
544  largest_id_type n_nodes_here = nodeset.size();
545 
546  io.data(n_nodes_here, "# n_nodes on proc");
547 
548  const bool write_extra_integers = this->version_at_least_1_5();
549  const unsigned int n_extra_integers =
550  write_extra_integers ? MeshOutput<MeshBase>::mesh().n_node_integers() : 0;
551 
552  // Will hold the node id and pid and extra integers
553  std::vector<largest_id_type> id_pid(2 + n_extra_integers);
554 
555  // For the coordinates
556  std::vector<Real> coords(LIBMESH_DIM);
557 
558  for (const auto & node : nodeset)
559  {
560  id_pid[0] = node->id();
561  id_pid[1] = node->processor_id();
562 
563  libmesh_assert_equal_to(n_extra_integers, node->n_extra_integers());
564  for (unsigned int i=0; i != n_extra_integers; ++i)
565  id_pid[2+i] = node->get_extra_integer(i);
566 
567  io.data_stream(id_pid.data(), 2 + n_extra_integers, 2 + n_extra_integers);
568 
569 #ifdef LIBMESH_ENABLE_UNIQUE_ID
570  largest_id_type unique_id = node->unique_id();
571 
572  io.data(unique_id, "# unique id");
573 #endif
574 
575  coords[0] = (*node)(0);
576 
577 #if LIBMESH_DIM > 1
578  coords[1] = (*node)(1);
579 #endif
580 
581 #if LIBMESH_DIM > 2
582  coords[2] = (*node)(2);
583 #endif
584 
585  io.data_stream(coords.data(), LIBMESH_DIM, 3);
586  }
587 }
588 
589 
590 
592  const std::set<const Elem *, CompareElemIdsByLevel> & elements) const
593 {
594  libmesh_assert (io.writing());
595 
596  const bool write_extra_integers = this->version_at_least_1_5();
597  const unsigned int n_extra_integers =
598  write_extra_integers ? MeshOutput<MeshBase>::mesh().n_elem_integers() : 0;
599 
600  // Put these out here to reduce memory churn
601  // id type pid subdomain_id parent_id extra_integer_0 ...
602  std::vector<largest_id_type> elem_data(6 + n_extra_integers);
603  std::vector<largest_id_type> conn_data;
604 
605  largest_id_type n_elems_here = elements.size();
606 
607  io.data(n_elems_here, "# number of elements");
608 
609  for (const auto & elem : elements)
610  {
611  unsigned int n_nodes = elem->n_nodes();
612 
613  elem_data[0] = elem->id();
614  elem_data[1] = elem->type();
615  elem_data[2] = elem->processor_id();
616  elem_data[3] = elem->subdomain_id();
617 
618 #ifdef LIBMESH_ENABLE_AMR
619  if (elem->parent() != nullptr)
620  {
621  elem_data[4] = elem->parent()->id();
622  elem_data[5] = elem->parent()->which_child_am_i(elem);
623  }
624  else
625 #endif
626  {
627  elem_data[4] = static_cast<largest_id_type>(-1);
628  elem_data[5] = static_cast<largest_id_type>(-1);
629  }
630 
631  for (unsigned int i=0; i != n_extra_integers; ++i)
632  elem_data[6+i] = elem->get_extra_integer(i);
633 
634  conn_data.resize(n_nodes);
635 
636  for (unsigned int i=0; i<n_nodes; i++)
637  conn_data[i] = elem->node_id(i);
638 
639  io.data_stream(elem_data.data(),
640  cast_int<unsigned int>(elem_data.size()),
641  cast_int<unsigned int>(elem_data.size()));
642 
643 #ifdef LIBMESH_ENABLE_UNIQUE_ID
644  largest_id_type unique_id = elem->unique_id();
645 
646  io.data(unique_id, "# unique id");
647 #endif
648 
649 #ifdef LIBMESH_ENABLE_AMR
650  uint16_t p_level = cast_int<uint16_t>(elem->p_level());
651  io.data(p_level, "# p_level");
652 
653  uint16_t rflag = elem->refinement_flag();
654  io.data(rflag, "# rflag");
655 
656  uint16_t pflag = elem->p_refinement_flag();
657  io.data(pflag, "# pflag");
658 #endif
659  io.data_stream(conn_data.data(),
660  cast_int<unsigned int>(conn_data.size()),
661  cast_int<unsigned int>(conn_data.size()));
662  }
663 }
664 
665 
667  const std::set<const Elem *, CompareElemIdsByLevel> & elements) const
668 {
669  libmesh_assert (io.writing());
670 
671  // Find the remote_elem neighbor and child links
672  std::vector<largest_id_type> elem_ids, parent_ids;
673  std::vector<uint16_t> elem_sides, child_numbers;
674 
675  for (const auto & elem : elements)
676  {
677  for (auto n : elem->side_index_range())
678  {
679  const Elem * neigh = elem->neighbor_ptr(n);
680  if (neigh == remote_elem ||
681  (neigh && !elements.count(neigh)))
682  {
683  elem_ids.push_back(elem->id());
684  elem_sides.push_back(n);
685  }
686  }
687 
688 #ifdef LIBMESH_ENABLE_AMR
689  if (elem->has_children())
690  {
691  for (unsigned short c = 0,
692  nc = cast_int<unsigned short>(elem->n_children());
693  c != nc; ++c)
694  {
695  const Elem * child = elem->child_ptr(c);
696  if (child == remote_elem ||
697  (child && !elements.count(child)))
698  {
699  parent_ids.push_back(elem->id());
700  child_numbers.push_back(c);
701  }
702  }
703  }
704 #endif
705  }
706 
707  io.data(elem_ids, "# remote neighbor elem_ids");
708  io.data(elem_sides, "# remote neighbor elem_sides");
709  io.data(parent_ids, "# remote child parent_ids");
710  io.data(child_numbers, "# remote child_numbers");
711 }
712 
713 
714 
716  const std::set<const Elem *, CompareElemIdsByLevel> & elements,
717  const std::vector<std::tuple<dof_id_type, unsigned short int, boundary_id_type>> & bc_triples) const
718 {
719  libmesh_assert (io.writing());
720 
721  // Build a list of (elem, side, bc) tuples.
722  std::size_t bc_size = bc_triples.size();
723 
724  std::vector<largest_id_type> element_id_list;
725  std::vector<uint16_t> side_list;
726  std::vector<largest_id_type> bc_id_list;
727 
728  element_id_list.reserve(bc_size);
729  side_list.reserve(bc_size);
730  bc_id_list.reserve(bc_size);
731 
732  std::unordered_set<dof_id_type> elems;
733  for (auto & e : elements)
734  elems.insert(e->id());
735 
736  for (const auto & t : bc_triples)
737  if (elems.count(std::get<0>(t)))
738  {
739  element_id_list.push_back(std::get<0>(t));
740  side_list.push_back(std::get<1>(t));
741  bc_id_list.push_back(std::get<2>(t));
742  }
743 
744 
745  io.data(element_id_list, "# element ids for bcs");
746  io.data(side_list, "# sides of elements for bcs");
747  io.data(bc_id_list, "# bc ids");
748 }
749 
750 
751 
753  const connected_node_set_type & nodeset,
754  const std::vector<std::tuple<dof_id_type, boundary_id_type>> & bc_tuples) const
755 {
756  libmesh_assert (io.writing());
757 
758  // convenient reference to our mesh
760 
761  // Build a list of (node, bc) tuples
762  std::size_t nodeset_size = bc_tuples.size();
763 
764  std::vector<largest_id_type> node_id_list;
765  std::vector<largest_id_type> bc_id_list;
766 
767  node_id_list.reserve(nodeset_size);
768  bc_id_list.reserve(nodeset_size);
769 
770  for (const auto & t : bc_tuples)
771  if (nodeset.count(mesh.node_ptr(std::get<0>(t))))
772  {
773  node_id_list.push_back(std::get<0>(t));
774  bc_id_list.push_back(std::get<1>(t));
775  }
776 
777  io.data(node_id_list, "# node id list");
778  io.data(bc_id_list, "# nodeset bc id list");
779 }
780 
781 
782 
783 void CheckpointIO::write_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
784 {
785  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
786  info.get_sideset_name_map() : info.get_nodeset_name_map();
787 
788  std::vector<largest_id_type> boundary_ids; boundary_ids.reserve(boundary_map.size());
789  std::vector<std::string> boundary_names; boundary_names.reserve(boundary_map.size());
790 
791  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
792  // return writable references in boundary_info, it's possible for the user to leave some entity names
793  // blank. We can't write those to the XDA file.
794  largest_id_type n_boundary_names = 0;
795  for (const auto & [id, name] : boundary_map)
796  if (!name.empty())
797  {
798  n_boundary_names++;
799  boundary_ids.push_back(id);
800  boundary_names.push_back(name);
801  }
802 
803  if (is_sideset)
804  io.data(n_boundary_names, "# sideset id to name map");
805  else
806  io.data(n_boundary_names, "# nodeset id to name map");
807  // Write out the ids and names in two vectors
808  if (n_boundary_names)
809  {
810  io.data(boundary_ids);
811  io.data(boundary_names);
812  }
813 }
814 
815 void CheckpointIO::read (const std::string & input_name)
816 {
817  LOG_SCOPE("read()","CheckpointIO");
818 
820 
822 
823  header_id_type data_size;
824  processor_id_type input_n_procs = select_split_config(input_name, data_size);
825  auto header_name = header_file(input_name, input_n_procs);
826  bool input_parallel = input_n_procs > 0;
827 
828  // If this is a serial read then we're going to only read the mesh
829  // on processor 0, then broadcast it
830  if ((input_parallel && !mesh.is_replicated()) || mesh.processor_id() == 0)
831  {
832  // If we're trying to read a parallel checkpoint file on a
833  // replicated mesh, we'll read every file on processor 0 so we
834  // can broadcast it later. If we're on a distributed mesh then
835  // we'll read every id to it's own processor and we'll "wrap
836  // around" with any ids that exceed our processor count.
837  const processor_id_type begin_proc_id =
838  (input_parallel && !mesh.is_replicated()) ?
839  mesh.processor_id() : 0;
840  const processor_id_type stride =
841  (input_parallel && !mesh.is_replicated()) ?
842  mesh.n_processors() : 1;
843 
844  for (processor_id_type proc_id = begin_proc_id; proc_id < input_n_procs;
845  proc_id = cast_int<processor_id_type>(proc_id + stride))
846  {
847  auto file_name = split_file(input_name, input_n_procs, proc_id);
848 
849  {
850  std::ifstream in (file_name.c_str());
851 
852  libmesh_error_msg_if(!in.good(), "ERROR: cannot locate specified file:\n\t" << file_name);
853  }
854 
855  // Do we expect all our files' remote_elem entries to really
856  // be remote? Only if we're not reading multiple input
857  // files on the same processor.
858  const bool expect_all_remote =
859  (input_n_procs <= mesh.n_processors() &&
860  !mesh.is_replicated());
861 
862  Xdr io (file_name, this->binary() ? DECODE : READ);
863 
864  switch (data_size) {
865  case 2:
866  this->read_subfile<uint16_t>(io, expect_all_remote);
867  break;
868  case 4:
869  this->read_subfile<uint32_t>(io, expect_all_remote);
870  break;
871  case 8:
872  this->read_subfile<uint64_t>(io, expect_all_remote);
873  break;
874  default:
875  libmesh_error();
876  }
877 
878  io.close();
879  }
880  }
881 
882  // If the mesh was only read on processor 0 then we need to broadcast it
883  if (mesh.is_replicated())
885  // If the mesh is really distributed then we need to make sure it
886  // knows that
887  else if (mesh.n_processors() > 1)
889 
890  // If the mesh isn't getting even critical partitioning then we
891  // should update cached data from the partitioning we just read in
892  if (mesh.skip_partitioning())
893  {
896  }
897 }
898 
899 
900 
901 template <typename file_id_type>
902 file_id_type CheckpointIO::read_header (const std::string & name)
903 {
905 
906  // Hack for codes which don't look at all elem dimensions
907  uint16_t mesh_dimension;
908 
909  // Will this be a parallel input file? With how many processors? Stay tuned!
910  uint16_t input_parallel;
911  file_id_type input_n_procs;
912 
913  std::vector<std::string> node_integer_names, elem_integer_names;
914 
915  // We'll write a header file from processor 0 and broadcast.
916  if (this->processor_id() == 0)
917  {
918  Xdr io (name, this->binary() ? DECODE : READ);
919 
920  // read the version, but don't care about it
921  std::string input_version;
922  io.data(input_version);
923 
924  // read the data type, don't care about it this time
925  header_id_type data_size;
926  io.data (data_size);
927 
928  // read the dimension
929  io.data (mesh_dimension);
930 
931  // Read whether or not this is a parallel file
932  io.data(input_parallel);
933 
934  // With how many processors?
935  if (input_parallel)
936  io.data(input_n_procs);
937 
938  // read subdomain names
939  this->read_subdomain_names<file_id_type>(io);
940 
941  // read boundary names
942  BoundaryInfo & boundary_info = mesh.get_boundary_info();
943 
944  this->read_bc_names<file_id_type>(io, boundary_info, true); // sideset names
945  this->read_bc_names<file_id_type>(io, boundary_info, false); // nodeset names
946 
947  // read extra integer names?
948  std::swap(input_version, this->version());
949  const bool read_extra_integers = this->version_at_least_1_5();
950  std::swap(input_version, this->version());
951 
952  if (read_extra_integers)
953  this->read_integers_names<file_id_type>
954  (io, node_integer_names, elem_integer_names);
955  }
956 
957  // broadcast data from processor 0, set values everywhere
958  this->comm().broadcast(mesh_dimension);
959  mesh.set_mesh_dimension(cast_int<unsigned char>(mesh_dimension));
960 
961  this->comm().broadcast(input_parallel);
962 
963  if (input_parallel)
964  this->comm().broadcast(input_n_procs);
965  else
966  input_n_procs = 1;
967 
968  std::map<subdomain_id_type, std::string> & subdomain_map =
970  this->comm().broadcast(subdomain_map);
971 
972  BoundaryInfo & boundary_info = mesh.get_boundary_info();
973  this->comm().broadcast(boundary_info.set_sideset_name_map());
974  this->comm().broadcast(boundary_info.set_nodeset_name_map());
975 
976  this->comm().broadcast(node_integer_names);
977  this->comm().broadcast(elem_integer_names);
978 
979  for (auto & int_name : node_integer_names)
980  mesh.add_node_integer(int_name);
981 
982  for (auto & int_name : elem_integer_names)
983  mesh.add_elem_integer(int_name);
984 
985  return input_parallel ? input_n_procs : 0;
986 }
987 
988 
989 
990 template <typename file_id_type>
991 void CheckpointIO::read_subfile (Xdr & io, bool expect_all_remote)
992 {
993  // read the nodal locations
994  this->read_nodes<file_id_type> (io);
995 
996  // read connectivity
997  this->read_connectivity<file_id_type> (io);
998 
999  // read remote_elem connectivity
1000  this->read_remote_elem<file_id_type> (io, expect_all_remote);
1001 
1002  // read the boundary conditions
1003  this->read_bcs<file_id_type> (io);
1004 
1005  // read the nodesets
1006  this->read_nodesets<file_id_type> (io);
1007 }
1008 
1009 
1010 
1011 template <typename file_id_type>
1013 {
1015 
1016  std::map<subdomain_id_type, std::string> & subdomain_map =
1018 
1019  std::vector<file_id_type> subdomain_ids;
1020  subdomain_ids.reserve(subdomain_map.size());
1021 
1022  std::vector<std::string> subdomain_names;
1023  subdomain_names.reserve(subdomain_map.size());
1024 
1025  file_id_type n_subdomain_names = 0;
1026  io.data(n_subdomain_names, "# subdomain id to name map");
1027 
1028  if (n_subdomain_names)
1029  {
1030  io.data(subdomain_ids);
1031  io.data(subdomain_names);
1032 
1033  for (auto i : index_range(subdomain_ids))
1034  subdomain_map[cast_int<subdomain_id_type>(subdomain_ids[i])] =
1035  subdomain_names[i];
1036  }
1037 }
1038 
1039 
1040 
1041 template <typename file_id_type>
1043 {
1044  // convenient reference to our mesh
1046 
1047  file_id_type n_nodes_here;
1048  io.data(n_nodes_here, "# n_nodes on proc");
1049 
1050  const bool read_extra_integers = this->version_at_least_1_5();
1051 
1052  const unsigned int n_extra_integers =
1053  read_extra_integers ? mesh.n_node_integers() : 0;
1054 
1055  // Will hold the node id and pid and extra integers
1056  std::vector<file_id_type> id_pid(2 + n_extra_integers);
1057 
1058  // For the coordinates
1059  std::vector<Real> coords(LIBMESH_DIM);
1060 
1061  for (unsigned int i=0; i<n_nodes_here; i++)
1062  {
1063  io.data_stream(id_pid.data(), 2 + n_extra_integers, 2 + n_extra_integers);
1064 
1065 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1066  file_id_type unique_id = 0;
1067  io.data(unique_id, "# unique id");
1068 #endif
1069 
1070  io.data_stream(coords.data(), LIBMESH_DIM, LIBMESH_DIM);
1071 
1072  Point p;
1073  p(0) = coords[0];
1074 
1075 #if LIBMESH_DIM > 1
1076  p(1) = coords[1];
1077 #endif
1078 
1079 #if LIBMESH_DIM > 2
1080  p(2) = coords[2];
1081 #endif
1082 
1083  const dof_id_type id = cast_int<dof_id_type>(id_pid[0]);
1084 
1085  // "Wrap around" if we see more processors than we're using.
1086  processor_id_type pid =
1087  cast_int<processor_id_type>(id_pid[1] % mesh.n_processors());
1088 
1089  // If we already have this node (e.g. from another file, when
1090  // reading multiple distributed CheckpointIO files into a
1091  // ReplicatedMesh) then we don't want to add it again (because
1092  // ReplicatedMesh can't handle that) but we do want to assert
1093  // consistency between what we're reading and what we have.
1094  const Node * old_node = mesh.query_node_ptr(id);
1095 
1096  if (old_node)
1097  {
1098  libmesh_assert_equal_to(pid, old_node->processor_id());
1099 
1100  libmesh_assert_equal_to(n_extra_integers, old_node->n_extra_integers());
1101 #ifndef NDEBUG
1102  for (unsigned int ei=0; ei != n_extra_integers; ++ei)
1103  {
1104  const dof_id_type extra_int = cast_int<dof_id_type>(id_pid[2+ei]);
1105  libmesh_assert_equal_to(extra_int, old_node->get_extra_integer(ei));
1106  }
1107 #endif
1108 
1109 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1110  libmesh_assert_equal_to(unique_id, old_node->unique_id());
1111 #endif
1112  }
1113  else
1114  {
1115  Node * node =
1116  mesh.add_point(p, id, pid);
1117 
1118 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1119  node->set_unique_id(unique_id);
1120 #endif
1121 
1122  libmesh_assert_equal_to(n_extra_integers, node->n_extra_integers());
1123 
1124  for (unsigned int ei=0; ei != n_extra_integers; ++ei)
1125  {
1126  const dof_id_type extra_int = cast_int<dof_id_type>(id_pid[2+ei]);
1127  node->set_extra_integer(ei, extra_int);
1128  }
1129  }
1130  }
1131 }
1132 
1133 
1134 
1135 template <typename file_id_type>
1137 {
1138  // convenient reference to our mesh
1140 
1141  const bool read_extra_integers = this->version_at_least_1_5();
1142 
1143  const unsigned int n_extra_integers =
1144  read_extra_integers ? mesh.n_elem_integers() : 0;
1145 
1146  file_id_type n_elems_here;
1147  io.data(n_elems_here);
1148 
1149  // Keep track of the highest dimensional element we've added to the mesh
1150  unsigned int highest_elem_dim = 1;
1151 
1152  // RHS: Originally we used invalid_processor_id as a "no parent" tag
1153  // number, because I'm an idiot. Let's try to support broken files
1154  // as much as possible.
1155  bool file_is_broken = false;
1156 
1157  for (unsigned int i=0; i<n_elems_here; i++)
1158  {
1159  // id type pid subdomain_id parent_id
1160  std::vector<file_id_type> elem_data(6 + n_extra_integers);
1161  io.data_stream
1162  (elem_data.data(), cast_int<unsigned int>(elem_data.size()),
1163  cast_int<unsigned int>(elem_data.size()));
1164 
1165 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1166  file_id_type unique_id = 0;
1167  io.data(unique_id, "# unique id");
1168 #endif
1169 
1170 #ifdef LIBMESH_ENABLE_AMR
1171  uint16_t p_level = 0;
1172  io.data(p_level, "# p_level");
1173 
1174  uint16_t rflag, pflag;
1175  io.data(rflag, "# rflag");
1176  io.data(pflag, "# pflag");
1177 #endif
1178 
1179  unsigned int n_nodes = Elem::type_to_n_nodes_map[elem_data[1]];
1180  if (n_nodes == invalid_uint)
1181  libmesh_not_implemented_msg("Support for Polygons/Polyhedra not yet implemented");
1182 
1183  // Snag the node ids this element was connected to
1184  std::vector<file_id_type> conn_data(n_nodes);
1185  io.data_stream
1186  (conn_data.data(), cast_int<unsigned int>(conn_data.size()),
1187  cast_int<unsigned int>(conn_data.size()));
1188 
1189  const dof_id_type id =
1190  cast_int<dof_id_type> (elem_data[0]);
1191  const ElemType elem_type =
1192  static_cast<ElemType> (elem_data[1]);
1193  const processor_id_type proc_id =
1194  cast_int<processor_id_type>
1195  (elem_data[2] % mesh.n_processors());
1196  const subdomain_id_type subdomain_id =
1197  cast_int<subdomain_id_type>(elem_data[3]);
1198 
1199  // Old broken files used processsor_id_type(-1)...
1200  // But we *know* our first element will be level 0
1201  if (i == 0 && elem_data[4] == 65535)
1202  file_is_broken = true;
1203 
1204  // On a broken file we can't tell whether a parent of 65535 is a
1205  // null parent or an actual parent of 65535. Assuming the
1206  // former will cause less breakage.
1207  Elem * parent =
1208  (elem_data[4] == static_cast<largest_id_type>(-1) ||
1209  (file_is_broken && elem_data[4] == 65535)) ?
1210  nullptr : mesh.elem_ptr(cast_int<dof_id_type>(elem_data[4]));
1211 
1212  const unsigned short int child_num =
1213  (elem_data[5] == static_cast<largest_id_type>(-1) ||
1214  (file_is_broken && elem_data[5] == 65535)) ?
1215  static_cast<unsigned short>(-1) :
1216  cast_int<unsigned short>(elem_data[5]);
1217 
1218  if (!parent)
1219  libmesh_assert_equal_to
1220  (child_num, static_cast<unsigned short>(-1));
1221 
1222  Elem * old_elem = mesh.query_elem_ptr(id);
1223 
1224  // If we already have this element (e.g. from another file,
1225  // when reading multiple distributed CheckpointIO files into
1226  // a ReplicatedMesh) then we don't want to add it again
1227  // (because ReplicatedMesh can't handle that) but we do want
1228  // to assert consistency between what we're reading and what
1229  // we have.
1230  if (old_elem)
1231  {
1232  libmesh_assert_equal_to(elem_type, old_elem->type());
1233  libmesh_assert_equal_to(proc_id, old_elem->processor_id());
1234  libmesh_assert_equal_to(subdomain_id, old_elem->subdomain_id());
1235  if (parent)
1236  libmesh_assert_equal_to(parent, old_elem->parent());
1237  else
1238  libmesh_assert(!old_elem->parent());
1239 
1240  libmesh_assert_equal_to(n_extra_integers, old_elem->n_extra_integers());
1241 #ifndef NDEBUG
1242  for (unsigned int ei=0; ei != n_extra_integers; ++ei)
1243  {
1244  const dof_id_type extra_int = cast_int<dof_id_type>(elem_data[6+ei]);
1245  libmesh_assert_equal_to(extra_int, old_elem->get_extra_integer(ei));
1246  }
1247 #endif
1248 
1249  libmesh_assert_equal_to(old_elem->n_nodes(), conn_data.size());
1250 
1251  for (unsigned int n=0,
1252  n_conn = cast_int<unsigned int>(conn_data.size());
1253  n != n_conn; n++)
1254  libmesh_assert_equal_to
1255  (old_elem->node_id(n),
1256  cast_int<dof_id_type>(conn_data[n]));
1257  }
1258  else
1259  {
1260  // Create the element
1261  auto elem = Elem::build(elem_type, parent);
1262 
1263 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1264  elem->set_unique_id(unique_id);
1265 #endif
1266 
1267  if (elem->dim() > highest_elem_dim)
1268  highest_elem_dim = elem->dim();
1269 
1270  elem->set_id() = id;
1271  elem->processor_id() = proc_id;
1272  elem->subdomain_id() = subdomain_id;
1273 
1274 #ifdef LIBMESH_ENABLE_AMR
1275  elem->hack_p_level(p_level);
1276 
1277  elem->set_refinement_flag (cast_int<Elem::RefinementState>(rflag));
1278  elem->set_p_refinement_flag(cast_int<Elem::RefinementState>(pflag));
1279 
1280  // Set parent connections
1281  if (parent)
1282  {
1283  // We must specify a child_num, because we will have
1284  // skipped adding any preceding remote_elem children
1285  parent->add_child(elem.get(), child_num);
1286  }
1287 #else
1288  libmesh_ignore(child_num);
1289 #endif
1290 
1291  libmesh_assert(elem->n_nodes() == conn_data.size());
1292 
1293  // Connect all the nodes to this element
1294  for (unsigned int n=0,
1295  n_conn = cast_int<unsigned int>(conn_data.size());
1296  n != n_conn; n++)
1297  elem->set_node(n,
1298  mesh.node_ptr(cast_int<dof_id_type>(conn_data[n])));
1299 
1300  Elem * added_elem = mesh.add_elem(std::move(elem));
1301 
1302  libmesh_assert_equal_to(n_extra_integers, added_elem->n_extra_integers());
1303  for (unsigned int ei=0; ei != n_extra_integers; ++ei)
1304  {
1305  const dof_id_type extra_int = cast_int<dof_id_type>(elem_data[6+ei]);
1306  added_elem->set_extra_integer(ei, extra_int);
1307  }
1308  }
1309  }
1310 
1311  mesh.set_mesh_dimension(cast_int<unsigned char>(highest_elem_dim));
1312 }
1313 
1314 
1315 template <typename file_id_type>
1316 void CheckpointIO::read_remote_elem (Xdr & io, bool libmesh_dbg_var(expect_all_remote))
1317 {
1318  // convenient reference to our mesh
1320 
1321  // Find the remote_elem neighbor links
1322  std::vector<file_id_type> elem_ids;
1323  std::vector<uint16_t> elem_sides;
1324 
1325  io.data(elem_ids, "# remote neighbor elem_ids");
1326  io.data(elem_sides, "# remote neighbor elem_sides");
1327 
1328  libmesh_assert_equal_to(elem_ids.size(), elem_sides.size());
1329 
1330  for (auto i : index_range(elem_ids))
1331  {
1332  Elem & elem = mesh.elem_ref(cast_int<dof_id_type>(elem_ids[i]));
1333  if (!elem.neighbor_ptr(elem_sides[i]))
1334  elem.set_neighbor(elem_sides[i],
1335  const_cast<RemoteElem *>(remote_elem));
1336  else
1337  libmesh_assert(!expect_all_remote);
1338  }
1339 
1340  // Find the remote_elem children links
1341  std::vector<file_id_type> parent_ids;
1342  std::vector<uint16_t> child_numbers;
1343 
1344  io.data(parent_ids, "# remote child parent_ids");
1345  io.data(child_numbers, "# remote child_numbers");
1346 
1347 #ifdef LIBMESH_ENABLE_AMR
1348  for (auto i : index_range(parent_ids))
1349  {
1350  Elem & elem = mesh.elem_ref(cast_int<dof_id_type>(parent_ids[i]));
1351 
1352  // We'd like to assert that no child pointer already exists to
1353  // be overwritten by remote_elem, but Elem doesn't actually have
1354  // an API that will return a child pointer without asserting
1355  // that it isn't nullptr.
1356  const Elem * child = elem.raw_child_ptr(child_numbers[i]);
1357 
1358  if (!child)
1359  elem.add_child(const_cast<RemoteElem *>(remote_elem),
1360  child_numbers[i]);
1361  else
1362  libmesh_assert(!expect_all_remote);
1363  }
1364 #endif
1365 }
1366 
1367 
1368 
1369 template <typename file_id_type>
1371 {
1372  // convenient reference to our mesh
1374 
1375  // and our boundary info object
1376  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1377 
1378  std::vector<file_id_type> element_id_list;
1379  std::vector<uint16_t> side_list;
1380  std::vector<file_id_type> bc_id_list;
1381 
1382  io.data(element_id_list, "# element ids for bcs");
1383  io.data(side_list, "# sides of elements for bcs");
1384  io.data(bc_id_list, "# bc ids");
1385 
1386  for (auto i : index_range(element_id_list))
1387  boundary_info.add_side
1388  (cast_int<dof_id_type>(element_id_list[i]), side_list[i],
1389  cast_int<boundary_id_type>(bc_id_list[i]));
1390 }
1391 
1392 
1393 
1394 template <typename file_id_type>
1396 {
1397  // convenient reference to our mesh
1399 
1400  // and our boundary info object
1401  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1402 
1403  std::vector<file_id_type> node_id_list;
1404  std::vector<file_id_type> bc_id_list;
1405 
1406  io.data(node_id_list, "# node id list");
1407  io.data(bc_id_list, "# nodeset bc id list");
1408 
1409  for (auto i : index_range(node_id_list))
1410  boundary_info.add_node
1411  (cast_int<dof_id_type>(node_id_list[i]),
1412  cast_int<boundary_id_type>(bc_id_list[i]));
1413 }
1414 
1415 
1416 
1417 template <typename file_id_type>
1418 void CheckpointIO::read_bc_names(Xdr & io, BoundaryInfo & info, bool is_sideset)
1419 {
1420  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1421  info.set_sideset_name_map() : info.set_nodeset_name_map();
1422 
1423  std::vector<file_id_type> boundary_ids;
1424  std::vector<std::string> boundary_names;
1425 
1426  file_id_type n_boundary_names = 0;
1427 
1428  if (is_sideset)
1429  io.data(n_boundary_names, "# sideset id to name map");
1430  else
1431  io.data(n_boundary_names, "# nodeset id to name map");
1432 
1433  if (n_boundary_names)
1434  {
1435  io.data(boundary_ids);
1436  io.data(boundary_names);
1437  }
1438 
1439  // Add them back into the map
1440  for (auto i : index_range(boundary_ids))
1441  boundary_map[cast_int<boundary_id_type>(boundary_ids[i])] =
1442  boundary_names[i];
1443 }
1444 
1445 
1446 template <typename file_id_type>
1448  (Xdr & io,
1449  std::vector<std::string> & node_integer_names,
1450  std::vector<std::string> & elem_integer_names)
1451 {
1452  file_id_type n_node_integers, n_elem_integers;
1453 
1454  io.data(n_node_integers, "# n_extra_integers per node");
1455  io.data(node_integer_names);
1456  io.data(n_elem_integers, "# n_extra_integers per elem");
1457  io.data(elem_integer_names);
1458 }
1459 
1460 
1463 {
1464  unsigned int max_level = 0;
1465 
1466  for (const auto & elem : as_range(begin, end))
1467  max_level = std::max(elem->level(), max_level);
1468 
1469  return max_level + 1;
1470 }
1471 
1472 } // namespace libMesh
std::string name(const ElemQuality q)
This function returns a string containing some name for q.
Definition: elem_quality.C:42
const MT & mesh() const
Definition: mesh_output.h:259
bool writing() const
Definition: xdr_cxx.h:129
ElemType
Defines an enum for geometric element types.
unique_id_type & set_unique_id()
Definition: dof_object.h:858
virtual void write(const std::string &name) override
This method implements writing a mesh to a specified file.
const Elem * parent() const
Definition: elem.h:3030
const std::string & version() const
Get/Set the version string.
A Node is like a Point, but with more information.
Definition: node.h:52
const unsigned int invalid_uint
A number which is used quite often to represent an invalid or uninitialized value for an unsigned int...
Definition: libmesh.h:310
void write_remote_elem(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements) const
Write the remote_elem neighbor and child links for part of a mesh.
unsigned int n_node_integers() const
Definition: mesh_base.h:1077
void write_connectivity(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements) const
Write the connectivity for part of a mesh.
MPI_Info info
void skip_partitioning(bool skip)
If true is passed in then nothing on this mesh will be (re)partitioned.
Definition: mesh_base.h:1254
The definition of the const_element_iterator struct.
Definition: mesh_base.h:2216
void write_nodes(Xdr &io, const connected_node_set_type &nodeset) const
Write the nodal locations for part of a mesh.
virtual void read(const std::string &input_name) override
This method implements reading a mesh from a specified file.
void close()
Closes the file if it is open.
Definition: xdr_cxx.C:277
unsigned int add_elem_integer(std::string name, bool allocate_data=true, dof_id_type default_value=DofObject::invalid_id)
Register an integer datum (of type dof_id_type) to be added to each element in the mesh...
Definition: mesh_base.C:560
unsigned int n_elem_integers() const
Definition: mesh_base.h:955
This is the base class from which all geometric element types are derived.
Definition: elem.h:94
uint64_t largest_id_type
Definition: id_types.h:148
MeshBase & mesh
void add_child(Elem *elem)
Adds a child pointer to the array of children of this element.
Definition: elem.C:2044
processor_id_type rank() const
const Parallel::Communicator & comm() const
int mkdir(const char *pathname)
Create a directory.
Definition: utility.C:152
Used to iterate over non-nullptr, active entries in a container.
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
void write_bcs(Xdr &io, const std::set< const Elem *, CompareElemIdsByLevel > &elements, const std::vector< std::tuple< dof_id_type, unsigned short int, boundary_id_type >> &bc_triples) const
Write the side boundary conditions for part of a mesh.
void read_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Read boundary names information (sideset and nodeset)
The libMesh namespace provides an interface to certain functionality in the library.
void read_remote_elem(Xdr &io, bool expect_all_remote)
Read the remote_elem neighbor and child links for a parallel, distributed mesh.
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:165
bool parallel() const
Get/Set the flag indicating if we should read/write binary.
unsigned int n_active_levels_in(MeshBase::const_element_iterator begin, MeshBase::const_element_iterator end) const
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
Add a new Node at Point p to the end of the vertex array, with processor_id procid.
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
This array maps the integer representation of the ElemType enum to the number of nodes in the element...
Definition: elem.h:650
Used to iterate over non-nullptr entries in a container.
const std::string & get_elem_integer_name(unsigned int i) const
Definition: mesh_base.h:944
processor_id_type select_split_config(const std::string &input_name, header_id_type &data_size)
uint8_t processor_id_type
Definition: id_types.h:104
This is the MeshBase class.
Definition: mesh_base.h:75
void build_side_list(std::vector< dof_id_type > &element_id_list, std::vector< unsigned short int > &side_list, std::vector< boundary_id_type > &bc_id_list) const
Creates a list of element numbers, sides, and ids for those sides.
virtual void set_distributed()
Asserts that not all elements and nodes of the mesh necessarily exist on the current processor...
Definition: mesh_base.h:226
virtual void partition(const unsigned int n_parts)
Call the default partitioner (currently metis_partition()).
Definition: mesh_base.C:1576
void data(T &a, std::string_view comment="")
Inputs or outputs a single value.
Definition: xdr_cxx.C:778
processor_id_type size() const
std::vector< processor_id_type > _my_processor_ids
std::map< boundary_id_type, std::string > & set_sideset_name_map()
processor_id_type n_processors() const
virtual bool is_serial() const
Definition: mesh_base.h:211
void libmesh_ignore(const Args &...)
void build_node_list(std::vector< dof_id_type > &node_id_list, std::vector< boundary_id_type > &bc_id_list) const
Creates a list of nodes and ids for those nodes.
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
const dof_id_type n_nodes
Definition: tecplot_io.C:67
This is the MeshCommunication class.
void query_ghosting_functors(const MeshBase &mesh, processor_id_type pid, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, connected_elem_set_type &connected_elements)
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1694
void write_subdomain_names(Xdr &io) const
Write subdomain name information.
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:57
virtual unsigned int n_nodes() const =0
static const processor_id_type invalid_processor_id
An invalid processor_id to distinguish DoFs that have not been assigned to a processor.
Definition: dof_object.h:493
processor_id_type _my_n_processors
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:444
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
virtual const Node * query_node_ptr(const dof_id_type i) const =0
CheckpointIO(MeshBase &, const bool=false)
Constructor.
The BoundaryInfo class contains information relevant to boundary conditions including storing faces...
Definition: boundary_info.h:57
libmesh_assert(ctx)
static void cleanup(const std::string &input_name, processor_id_type n_procs)
Used to remove a checkpoint directory and its corresponding files.
void write_nodesets(Xdr &io, const connected_node_set_type &nodeset, const std::vector< std::tuple< dof_id_type, boundary_id_type >> &bc_tuples) const
Write the nodal boundary conditions for part of a mesh.
void write_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Write boundary names information (sideset and nodeset)
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:275
void set_neighbor(const unsigned int i, Elem *n)
Assigns n as the neighbor.
Definition: elem.h:2618
An object whose state is distributed along a set of processors.
file_id_type read_header(const std::string &name)
Read header data on processor 0, then broadcast.
void broadcast(T &data, const unsigned int root_id=0, const bool identical_sizes=false) const
This class implements a C++ interface to the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:67
void connect_children(const MeshBase &mesh, MeshBase::const_element_iterator elem_it, MeshBase::const_element_iterator elem_end, connected_elem_set_type &connected_elements)
std::set< const Node * > connected_node_set_type
virtual const Elem * elem_ptr(const dof_id_type i) const =0
tbb::split split
Dummy "splitting object" used to distinguish splitting constructors from copy constructors.
Definition: threads_tbb.h:77
void read_subdomain_names(Xdr &io)
Read subdomain name information.
bool version_at_least_1_5() const
void read_bcs(Xdr &io)
Read the boundary conditions for a parallel, distributed mesh.
const Elem * neighbor_ptr(unsigned int i) const
Definition: elem.h:2598
unsigned int recalculate_n_partitions()
In a few (very rare) cases, the user may have manually tagged the elements with specific processor ID...
Definition: mesh_base.C:1618
std::unique_ptr< CheckpointIO > split_mesh(MeshBase &mesh, processor_id_type nsplits)
split_mesh takes the given initialized/opened mesh and partitions it into nsplits pieces or chunks...
const Elem * raw_child_ptr(unsigned int i) const
Definition: elem.h:3154
void read_subfile(Xdr &io, bool expect_all_remote)
Read a non-header file.
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
subdomain_id_type subdomain_id() const
Definition: elem.h:2582
void read_nodesets(Xdr &io)
Read the nodeset conditions for a parallel, distributed mesh.
void read_nodes(Xdr &io)
Read the nodal locations for a parallel, distributed mesh.
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
virtual bool is_replicated() const
Definition: mesh_base.h:233
void broadcast(MeshBase &) const
Finds all the processors that may contain elements that neighbor my elements.
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure...
virtual const Elem & elem_ref(const dof_id_type i) const
Definition: mesh_base.h:639
unsigned int mesh_dimension() const
Definition: mesh_base.C:372
void connect_element_dependencies(const MeshBase &mesh, connected_elem_set_type &connected_elements, connected_node_set_type &connected_nodes)
unsigned int add_node_integer(std::string name, bool allocate_data=true, dof_id_type default_value=DofObject::invalid_id)
Register an integer datum (of type dof_id_type) to be added to each node in the mesh.
Definition: mesh_base.C:649
const std::string & get_node_integer_name(unsigned int i) const
Definition: mesh_base.h:1066
unsigned int n_extra_integers() const
Returns how many extra integers are associated to the DofObject.
Definition: dof_object.h:1170
void read_integers_names(Xdr &io, std::vector< std::string > &node_integer_names, std::vector< std::string > &elem_integer_names)
Read extra integers names information.
bool binary() const
Get/Set the flag indicating if we should read/write binary.
virtual void update_post_partitioning()
Recalculate any cached data after elements and nodes have been repartitioned.
Definition: mesh_base.h:1177
virtual dof_id_type n_elem() const =0
virtual const Node * node_ptr(const dof_id_type i) const =0
processor_id_type processor_id() const
void read_connectivity(Xdr &io)
Read the connectivity for a parallel, distributed mesh.
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Inputs or outputs a raw data stream.
Definition: xdr_cxx.C:843
virtual ~CheckpointIO()
Destructor.
const DofMap &dof_map LIBMESH_COMMA unsigned int std::string & set_subdomain_name_map()
Definition: mesh_base.h:1692
processor_id_type processor_id() const
Definition: dof_object.h:905
virtual ElemType type() const =0
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:39
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:2475
auto index_range(const T &sizable)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:117
void set_extra_integer(const unsigned int index, const dof_id_type value)
Sets the value on this object of the extra integer associated with index, which should have been obta...
Definition: dof_object.h:1086
dof_id_type get_extra_integer(const unsigned int index) const
Gets the value on this object of the extra integer associated with index, which should have been obta...
Definition: dof_object.h:1102
const Elem * child_ptr(unsigned int i) const
Definition: elem.h:3163
uint8_t dof_id_type
Definition: id_types.h:67
const RemoteElem * remote_elem
Definition: remote_elem.C:57