libMesh
xdr_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // libMesh includes
21 #include "libmesh/xdr_io.h"
22 #include "libmesh/boundary_info.h"
23 #include "libmesh/elem.h"
24 #include "libmesh/enum_xdr_mode.h"
25 #include "libmesh/int_range.h"
26 #include "libmesh/libmesh_logging.h"
27 #include "libmesh/mesh_base.h"
28 #include "libmesh/mesh_tools.h"
29 #include "libmesh/node.h"
30 #include "libmesh/partitioner.h"
31 #include "libmesh/utility.h"
32 #include "libmesh/xdr_cxx.h"
33 
34 // TIMPI includes
36 #include "timpi/parallel_sync.h"
37 
38 // C++ includes
39 #include <iostream>
40 #include <iomanip>
41 #include <cstdio>
42 #include <vector>
43 #include <string>
44 #include <tuple>
45 
46 namespace libMesh
47 {
48 
49 //-----------------------------------------------
50 // anonymous namespace for implementation details
51 namespace {
52 
53 template <class T, class U>
54 struct libmesh_type_is_same {
55  static const bool value = false;
56 };
57 
58 template <class T>
59 struct libmesh_type_is_same<T, T> {
60  static const bool value = true;
61 };
62 
63 }
64 
65 
66 
67 // ------------------------------------------------------------
68 // XdrIO static data
69 const std::size_t XdrIO::io_blksize = 128000;
70 
71 
72 
73 // ------------------------------------------------------------
74 // XdrIO members
75 XdrIO::XdrIO (MeshBase & mesh, const bool binary_in) :
76  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
77  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
79  _binary (binary_in),
80  _legacy (false),
81  _write_serial (false),
82  _write_parallel (false),
83 #ifdef LIBMESH_ENABLE_UNIQUE_ID
84  _write_unique_id (true),
85 #else
86  _write_unique_id (false),
87 #endif
88  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
89  _version ("libMesh-1.8.0"),
90  _bc_file_name ("n/a"),
91  _partition_map_file ("n/a"),
92  _subdomain_map_file ("n/a"),
93  _p_level_file ("n/a")
94 {
95 }
96 
97 
98 
99 XdrIO::XdrIO (const MeshBase & mesh, const bool binary_in) :
100  MeshInput<MeshBase> (), // write-only
101  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
103  _binary (binary_in),
104  _legacy (false),
105  _write_serial (false),
106  _write_parallel (false),
107 #ifdef LIBMESH_ENABLE_UNIQUE_ID
108  _write_unique_id (true),
109 #else
110  _write_unique_id (false),
111 #endif
112  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
113  _version ("libMesh-1.8.0"),
114  _bc_file_name ("n/a"),
115  _partition_map_file ("n/a"),
116  _subdomain_map_file ("n/a"),
117  _p_level_file ("n/a")
118 {
119 }
120 
121 
122 
123 XdrIO::~XdrIO () = default;
124 
125 
126 
127 void XdrIO::write (const std::string & name)
128 {
129  libmesh_error_msg_if(this->legacy(), "We don't support writing parallel files in the legacy format.");
130 
131  Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
132 
133  LOG_SCOPE("write()","XdrIO");
134 
135  // convenient reference to our mesh
137 
139  new_header_id_type max_node_id = mesh.max_node_id();
140 
145  unsigned int n_p_levels = MeshTools::n_p_levels (mesh);
146  new_header_id_type n_elem_integers = mesh.n_elem_integers();
147  new_header_id_type n_node_integers = mesh.n_node_integers();
148  std::vector<dof_id_type> elemset_codes = mesh.get_elemset_codes();
149 
150  bool write_parallel_files = this->write_parallel();
151 
152  //-------------------------------------------------------------
153  // For all the optional files -- the default file name is "n/a".
154  // However, the user may specify an optional external file.
155 
156  // If there are BCs and the user has not already provided a
157  // file name then write to "."
158  if ((n_side_bcs || n_edge_bcs || n_shellface_bcs || n_nodesets) &&
159  this->boundary_condition_file_name() == "n/a")
160  this->boundary_condition_file_name() = ".";
161 
162  // If there are more than one subdomains and the user has not specified an
163  // external file then write the subdomain mapping to the default file "."
164  if ((mesh.n_subdomains() > 0) &&
165  (this->subdomain_map_file_name() == "n/a"))
166  this->subdomain_map_file_name() = ".";
167 
168  // In general we don't write the partition information.
169 
170  // If we have p levels and the user has not already provided
171  // a file name then write to "."
172  if ((n_p_levels > 1) &&
173  (this->polynomial_level_file_name() == "n/a"))
174  this->polynomial_level_file_name() = ".";
175 
176  // write the header
177  if (this->processor_id() == 0)
178  {
179  std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
180  io.data (full_ver);
181 
182  io.data (n_elem, "# number of elements");
183  io.data (max_node_id, "# number of nodes"); // We'll write invalid coords into gaps
184 
185  io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
186  io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
187  io.data (this->partition_map_file_name(), "# processor id specification file");
188  io.data (this->polynomial_level_file_name(), "# p-level specification file");
189 
190  // Version 0.9.2+ introduces sizes for each type
191  new_header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
192 
193  const bool
194  write_p_level = ("." == this->polynomial_level_file_name()),
195  write_partitioning = ("." == this->partition_map_file_name()),
196  write_subdomain_id = ("." == this->subdomain_map_file_name()),
197  write_bcs = ("." == this->boundary_condition_file_name());
198 
199  io.data (write_size, "# type size");
200  io.data (_write_unique_id ? write_size : zero_size, "# uid size");
201  io.data (write_partitioning ? write_size : zero_size, "# pid size");
202  io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
203  io.data (write_p_level ? write_size : zero_size, "# p-level size");
204  // Boundary Condition sizes
205  io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
206  io.data (write_bcs ? write_size : zero_size, "# side size"); // side number
207  io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
208 
209  // Write the data size for extra integers stored on the mesh. Like
210  // everything else in the header, they will be of size write_size.
211  io.data((n_elem_integers || n_node_integers) ? write_size : zero_size, "# extra integer size");
212 
213  // Write the names of the extra node integers (see also: CheckpointIO).
214  std::vector<std::string> node_integer_names;
215  for (unsigned int i=0; i != n_node_integers; ++i)
216  node_integer_names.push_back(mesh.get_node_integer_name(i));
217  io.data(node_integer_names, "# node integer names");
218 
219  // Write the names of the extra elem integers (see also: CheckpointIO).
220  std::vector<std::string> elem_integer_names;
221  for (unsigned int i=0; i != n_elem_integers; ++i)
222  elem_integer_names.push_back(mesh.get_elem_integer_name(i));
223  io.data(elem_integer_names, "# elem integer names");
224 
225  // Write the vector of elemset codes to the header (this will
226  // also write the size of the vector, which is the number of
227  // elemset codes).
228  io.data(elemset_codes, "# elemset codes");
229 
230  // For each elemset code, write out the associated elemset ids
231  MeshBase::elemset_type id_set_to_fill;
232  for (const auto & elemset_code : elemset_codes)
233  {
234  mesh.get_elemsets(elemset_code, id_set_to_fill);
235 
236  // Transfer elemset ids to vector for writing
237  std::vector<dof_id_type> elemset_id_vec(id_set_to_fill.begin(), id_set_to_fill.end());
238 
239  // Write vector of elemset ids to file with comment
240  std::string comment_string = "# elemset ids for elemset code " + std::to_string(elemset_code);
241  io.data(elemset_id_vec, comment_string);
242  }
243  }
244 
245  if (write_parallel_files)
246  {
247  // Parallel xdr mesh files aren't implemented yet; until they
248  // are we'll just warn the user and write a serial file.
249  libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
250  libMesh::out << "Writing a serialized file instead." << std::endl;
251 
252  // write subdomain names
254 
255  // write connectivity
256  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), n_elem_integers);
257 
258  // write the nodal locations
259  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id), n_node_integers);
260 
261  // write the side boundary condition information
262  this->write_serialized_side_bcs (io, n_side_bcs);
263 
264  // write the nodeset information
265  this->write_serialized_nodesets (io, n_nodesets);
266 
267  // write the edge boundary condition information
268  this->write_serialized_edge_bcs (io, n_edge_bcs);
269 
270  // write the "shell face" boundary condition information
271  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
272  }
273  else
274  {
275  // write subdomain names
277 
278  // write connectivity
279  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), n_elem_integers);
280 
281  // write the nodal locations
282  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id), n_node_integers);
283 
284  // write the side boundary condition information
285  this->write_serialized_side_bcs (io, n_side_bcs);
286 
287  // write the nodeset information
288  this->write_serialized_nodesets (io, n_nodesets);
289 
290  // write the edge boundary condition information
291  this->write_serialized_edge_bcs (io, n_edge_bcs);
292 
293  // write the "shell face" boundary condition information
294  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
295  }
296 
297  // pause all processes until the writing ends -- this will
298  // protect for the pathological case where a write is
299  // followed immediately by a read. The write must be
300  // guaranteed to complete first.
301  io.close();
302  this->comm().barrier();
303 }
304 
305 
306 
308 {
309  if (this->processor_id() == 0)
310  {
312 
313  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
314 
315  std::vector<new_header_id_type> subdomain_ids;
316  subdomain_ids.reserve(subdomain_map.size());
317 
318  std::vector<std::string> subdomain_names;
319  subdomain_names.reserve(subdomain_map.size());
320 
321  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
322  // return writable references in mesh_base, it's possible for the user to leave some entity names
323  // blank. We can't write those to the XDA file.
324  new_header_id_type n_subdomain_names = 0;
325  for (const auto & [sbd_id, name] : subdomain_map)
326  if (!name.empty())
327  {
328  n_subdomain_names++;
329  subdomain_ids.push_back(sbd_id);
330  subdomain_names.push_back(name);
331  }
332 
333  io.data(n_subdomain_names, "# subdomain id to name map");
334  // Write out the ids and names in two vectors
335  if (n_subdomain_names)
336  {
337  io.data(subdomain_ids);
338  io.data(subdomain_names);
339  }
340  }
341 }
342 
343 
344 
345 void
347  const dof_id_type libmesh_dbg_var(n_elem),
348  const new_header_id_type n_elem_integers) const
349 {
350  libmesh_assert (io.writing());
351 
352  const bool
353  write_p_level = ("." == this->polynomial_level_file_name()),
354  write_partitioning = ("." == this->partition_map_file_name()),
355  write_subdomain_id = ("." == this->subdomain_map_file_name());
356 
357  // convenient reference to our mesh
359  libmesh_assert_equal_to (n_elem, mesh.n_elem());
360 
361  // We will only write active elements and their parents.
362  const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
363  std::vector<xdr_id_type> n_global_elem_at_level(n_active_levels);
364 
365  // Find the number of local and global elements at each level
366 #ifndef NDEBUG
367  xdr_id_type tot_n_elem = 0;
368 #endif
369  for (unsigned int level=0; level<n_active_levels; level++)
370  {
371  n_global_elem_at_level[level] =
372  MeshTools::n_elem(mesh.local_level_elements_begin(level),
373  mesh.local_level_elements_end(level));
374 
375  this->comm().sum(n_global_elem_at_level[level]);
376 #ifndef NDEBUG
377  tot_n_elem += n_global_elem_at_level[level];
378 #endif
379  libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
380  libmesh_assert_less_equal (tot_n_elem, n_elem);
381  }
382 
383  std::vector<xdr_id_type>
384  xfer_conn, recv_conn;
385  std::vector<dof_id_type>
386  n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
387  std::vector<xdr_id_type> output_buffer;
388  std::vector<std::size_t>
389  xfer_buf_sizes(this->n_processors());
390 
391 #ifdef LIBMESH_ENABLE_AMR
392  typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type>> id_map_type;
393  id_map_type parent_id_map, child_id_map;
394 #endif
395 
396  dof_id_type my_next_elem=0, next_global_elem=0;
397 
398  //-------------------------------------------
399  // First write the level-0 elements directly.
400  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
401  mesh.local_level_elements_end(0)))
402  {
403  pack_element (xfer_conn, elem,
404  /*parent_id=*/DofObject::invalid_id,
405  /*parent_pid=*/DofObject::invalid_id,
406  n_elem_integers);
407 #ifdef LIBMESH_ENABLE_AMR
408  parent_id_map[elem->id()] = std::make_pair(this->processor_id(),
409  my_next_elem);
410 #endif
411  ++my_next_elem;
412  }
413  xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
414 
415  std::size_t my_size = xfer_conn.size();
416  this->comm().gather (0, my_next_elem, n_elem_on_proc);
417  this->comm().gather (0, my_size, xfer_buf_sizes);
418 
419  processor_offsets[0] = 0;
420  for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
421  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
422 
423  // All processors send their xfer buffers to processor 0.
424  // Processor 0 will receive the data and write out the elements.
425  if (this->processor_id() == 0)
426  {
427  // Write the number of elements at this level.
428  {
429  std::string comment = "# n_elem at level 0", legend = ", [ type ";
430  if (_write_unique_id)
431  legend += "uid ";
432  if (write_partitioning)
433  legend += "pid ";
434  if (write_subdomain_id)
435  legend += "sid ";
436  if (write_p_level)
437  legend += "p_level ";
438  legend += "(n0 ... nN-1) ]";
439  comment += legend;
440  io.data (n_global_elem_at_level[0], comment);
441  }
442 
443  for (auto pid : make_range(this->n_processors()))
444  {
445  recv_conn.resize(xfer_buf_sizes[pid]);
446  if (pid == 0)
447  recv_conn = xfer_conn;
448  else
449  this->comm().receive (pid, recv_conn);
450 
451  // at a minimum, the buffer should contain the number of elements,
452  // which could be 0.
453  libmesh_assert (!recv_conn.empty());
454 
455  for (auto [elem, recv_conn_iter, n_elem_received] =
456  std::tuple{xdr_id_type(0), recv_conn.begin(), recv_conn.back()};
457  elem<n_elem_received; elem++, next_global_elem++)
458  {
459  output_buffer.clear();
460 
461  // n. nodes
462  const xdr_id_type n_nodes = *recv_conn_iter++;
463 
464  // type
465  output_buffer.push_back(*recv_conn_iter++);
466 
467  // unique_id
468  xdr_id_type tmp = *recv_conn_iter++;
469  if (_write_unique_id)
470  output_buffer.push_back(tmp);
471 
472  // processor id
473  tmp = *recv_conn_iter++;
474  if (write_partitioning)
475  output_buffer.push_back(tmp);
476 
477  // subdomain id
478  tmp = *recv_conn_iter++;
479  if (write_subdomain_id)
480  output_buffer.push_back(tmp);
481 
482 #ifdef LIBMESH_ENABLE_AMR
483  // p level
484  tmp = *recv_conn_iter++;
485  if (write_p_level)
486  output_buffer.push_back(tmp);
487 #endif
488 
489  for (dof_id_type node=0; node<n_nodes; node++)
490  output_buffer.push_back(*recv_conn_iter++);
491 
492  // Write out the elem extra integers after the connectivity
493  for (dof_id_type n=0; n<n_elem_integers; n++)
494  output_buffer.push_back(*recv_conn_iter++);
495 
496  io.data_stream
497  (output_buffer.data(),
498  cast_int<unsigned int>(output_buffer.size()),
499  cast_int<unsigned int>(output_buffer.size()));
500  }
501  }
502  }
503  else
504  this->comm().send (0, xfer_conn);
505 
506 #ifdef LIBMESH_ENABLE_AMR
507  //--------------------------------------------------------------------
508  // Next write the remaining elements indirectly through their parents.
509  // This will insure that the children are written in the proper order
510  // so they can be reconstructed properly.
511  for (unsigned int level=1; level<n_active_levels; level++)
512  {
513  xfer_conn.clear();
514 
515  dof_id_type my_n_elem_written_at_level = 0;
516  for (const auto & parent : as_range(mesh.local_level_elements_begin(level-1),
517  mesh.local_level_elements_end(level-1)))
518  if (!parent->active()) // we only want the parents elements at this level, and
519  { // there is no direct iterator for this obscure use
520  id_map_type::iterator pos = parent_id_map.find(parent->id());
521  libmesh_assert (pos != parent_id_map.end());
522  const processor_id_type parent_pid = pos->second.first;
523  const dof_id_type parent_id = pos->second.second;
524  parent_id_map.erase(pos);
525 
526  for (auto & child : parent->child_ref_range())
527  {
528  pack_element (xfer_conn, &child, parent_id, parent_pid, n_elem_integers);
529 
530  // this approach introduces the possibility that we write
531  // non-local elements. These elements may well be parents
532  // at the next step
533  child_id_map[child.id()] = std::make_pair (child.processor_id(),
534  my_n_elem_written_at_level++);
535  my_next_elem++;
536  }
537  }
538  xfer_conn.push_back(my_n_elem_written_at_level);
539  my_size = xfer_conn.size();
540  this->comm().gather (0, my_size, xfer_buf_sizes);
541 
542  // Processor 0 will receive the data and write the elements.
543  if (this->processor_id() == 0)
544  {
545  // Write the number of elements at this level.
546  {
547  std::ostringstream buf;
548  buf << "# n_elem at level " << level << ", [ type ";
549 
550  if (_write_unique_id)
551  buf << "uid ";
552  buf << "parent ";
553  if (write_partitioning)
554  buf << "pid ";
555  if (write_subdomain_id)
556  buf << "sid ";
557  if (write_p_level)
558  buf << "p_level ";
559  buf << "(n0 ... nN-1) ]";
560 
561  io.data (n_global_elem_at_level[level], buf.str());
562  }
563 
564  for (auto pid : make_range(this->n_processors()))
565  {
566  recv_conn.resize(xfer_buf_sizes[pid]);
567  if (pid == 0)
568  recv_conn = xfer_conn;
569  else
570  this->comm().receive (pid, recv_conn);
571 
572  // at a minimum, the buffer should contain the number of elements,
573  // which could be 0.
574  libmesh_assert (!recv_conn.empty());
575 
576  for (auto [elem, recv_conn_iter, n_elem_received] =
577  std::tuple{xdr_id_type(0), recv_conn.begin(), recv_conn.back()};
578  elem<n_elem_received;
579  elem++, next_global_elem++)
580  {
581  output_buffer.clear();
582 
583  // n. nodes
584  const xdr_id_type n_nodes = *recv_conn_iter++;
585 
586  // type
587  output_buffer.push_back(*recv_conn_iter++);
588 
589  // unique_id
590  xdr_id_type tmp = *recv_conn_iter++;
591  if (_write_unique_id)
592  output_buffer.push_back(tmp);
593 
594  // parent local id
595  const xdr_id_type parent_local_id = *recv_conn_iter++;
596 
597  // parent processor id
598  const xdr_id_type parent_pid = *recv_conn_iter++;
599 
600  output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
601 
602  // processor id
603  tmp = *recv_conn_iter++;
604  if (write_partitioning)
605  output_buffer.push_back(tmp);
606 
607  // subdomain id
608  tmp = *recv_conn_iter++;
609  if (write_subdomain_id)
610  output_buffer.push_back(tmp);
611 
612  // p level
613  tmp = *recv_conn_iter++;
614  if (write_p_level)
615  output_buffer.push_back(tmp);
616 
617  // connectivity
618  for (xdr_id_type node=0; node<n_nodes; node++)
619  output_buffer.push_back(*recv_conn_iter++);
620 
621  // Write out the elem extra integers after the connectivity
622  for (dof_id_type n=0; n<n_elem_integers; n++)
623  output_buffer.push_back(*recv_conn_iter++);
624 
625  io.data_stream
626  (output_buffer.data(),
627  cast_int<unsigned int>(output_buffer.size()),
628  cast_int<unsigned int>(output_buffer.size()));
629  }
630  }
631  }
632  else
633  this->comm().send (0, xfer_conn);
634 
635  // update the processor_offsets
636  processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
637  this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
638  for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
639  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
640 
641  // Now, at the next level we will again iterate over local parents. However,
642  // those parents may have been written by other processors (at this step),
643  // so we need to gather them into our *_id_maps.
644  {
645  std::map<processor_id_type, std::vector<dof_id_type>> requested_ids;
646 
647  for (const auto & elem : as_range(mesh.local_level_elements_begin(level),
648  mesh.local_level_elements_end(level)))
649  if (!child_id_map.count(elem->id()))
650  {
651  libmesh_assert_not_equal_to (elem->parent()->processor_id(), this->processor_id());
652  const processor_id_type pid = elem->parent()->processor_id();
653  if (pid != this->processor_id())
654  requested_ids[pid].push_back(elem->id());
655  }
656 
657  auto gather_functor =
658  [& child_id_map]
659  (processor_id_type libmesh_dbg_var(pid),
660  const std::vector<dof_id_type> & ids,
661  std::vector<dof_id_type> & data)
662  {
663  const std::size_t ids_size = ids.size();
664  data.resize(ids_size);
665 
666  // Fill those requests by overwriting the requested ids
667  for (std::size_t i=0; i != ids_size; i++)
668  {
669  libmesh_assert (child_id_map.count(ids[i]));
670  libmesh_assert_equal_to (child_id_map[ids[i]].first, pid);
671 
672  data[i] = child_id_map[ids[i]].second;
673  }
674  };
675 
676  auto action_functor =
677  [& child_id_map]
678  (processor_id_type pid,
679  const std::vector<dof_id_type> & ids,
680  const std::vector<dof_id_type> & data)
681  {
682  std::size_t data_size = data.size();
683 
684  for (std::size_t i=0; i != data_size; i++)
685  child_id_map[ids[i]] =
686  std::make_pair (pid, data[i]);
687  };
688 
689  // Trade ids back and forth
690  const dof_id_type * ex = nullptr;
691  Parallel::pull_parallel_vector_data
692  (this->comm(), requested_ids, gather_functor, action_functor, ex);
693 
694  // overwrite the parent_id_map with the child_id_map, but
695  // use std::map::swap() for efficiency.
696  parent_id_map.swap(child_id_map);
697  child_id_map.clear();
698  }
699  }
700 #endif // LIBMESH_ENABLE_AMR
701  if (this->processor_id() == 0)
702  libmesh_assert_equal_to (next_global_elem, n_elem);
703 
704 }
705 
706 
707 
708 void XdrIO::write_serialized_nodes (Xdr & io, const dof_id_type max_node_id,
709  const new_header_id_type n_node_integers) const
710 {
711  // convenient reference to our mesh
713  libmesh_assert_equal_to (max_node_id, mesh.max_node_id());
714 
715  std::vector<dof_id_type> xfer_ids;
716  std::vector<Real> xfer_coords;
717  std::vector<Real> & coords=xfer_coords;
718 
719  std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
720  std::vector<std::vector<Real>> recv_coords(this->n_processors());
721 
722 #ifndef NDEBUG
723  std::size_t n_written=0;
724 #endif
725 
726  // Note: do not be tempted to replace the node loops below with
727  // range-based iterators, these iterators must be defined outside
728  // the blk loop since node_iter is updated at each iteration.
729  MeshBase::const_node_iterator node_iter = mesh.local_nodes_begin();
730  const MeshBase::const_node_iterator nodes_end = mesh.local_nodes_end();
731 
732  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
733  {
734  const std::size_t first_node = blk*io_blksize;
735  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
736 
737  const std::size_t tot_id_size = last_node - first_node;
738 
739  // Build up the xfer buffers on each processor
740  xfer_ids.clear();
741  xfer_coords.clear();
742 
743  for (; node_iter != nodes_end; ++node_iter)
744  {
745  const Node & node = **node_iter;
746  libmesh_assert_greater_equal(node.id(), first_node);
747  if (node.id() >= last_node)
748  break;
749 
750  xfer_ids.push_back(node.id());
751  xfer_coords.push_back(node(0));
752 #if LIBMESH_DIM > 1
753  xfer_coords.push_back(node(1));
754 #endif
755 #if LIBMESH_DIM > 2
756  xfer_coords.push_back(node(2));
757 #endif
758  }
759 
760  //-------------------------------------
761  // Send the xfer buffers to processor 0
762  std::vector<std::size_t> ids_size;
763 
764  const std::size_t my_ids_size = xfer_ids.size();
765 
766  // explicitly gather ids_size
767  this->comm().gather (0, my_ids_size, ids_size);
768 
769  // We will have lots of simultaneous receives if we are
770  // processor 0, so let's use nonblocking receives.
771  std::vector<Parallel::Request>
772  id_request_handles(this->n_processors()-1),
773  coord_request_handles(this->n_processors()-1);
774 
776  id_tag = mesh.comm().get_unique_tag(),
777  coord_tag = mesh.comm().get_unique_tag();
778 
779  // Post the receives -- do this on processor 0 only.
780  if (this->processor_id() == 0)
781  {
782  for (auto pid : make_range(this->n_processors()))
783  {
784  recv_ids[pid].resize(ids_size[pid]);
785  recv_coords[pid].resize(ids_size[pid]*LIBMESH_DIM);
786 
787  if (pid == 0)
788  {
789  recv_ids[0] = xfer_ids;
790  recv_coords[0] = xfer_coords;
791  }
792  else
793  {
794  this->comm().receive (pid, recv_ids[pid],
795  id_request_handles[pid-1],
796  id_tag);
797  this->comm().receive (pid, recv_coords[pid],
798  coord_request_handles[pid-1],
799  coord_tag);
800  }
801  }
802  }
803  else
804  {
805  // Send -- do this on all other processors.
806  this->comm().send(0, xfer_ids, id_tag);
807  this->comm().send(0, xfer_coords, coord_tag);
808  }
809 
810  // -------------------------------------------------------
811  // Receive the messages and write the output on processor 0.
812  if (this->processor_id() == 0)
813  {
814  // Wait for all the receives to complete. We have no
815  // need for the statuses since we already know the
816  // buffer sizes.
817  Parallel::wait (id_request_handles);
818  Parallel::wait (coord_request_handles);
819 
820 #ifndef NDEBUG
821  for (auto pid : make_range(this->n_processors()))
822  libmesh_assert_equal_to(recv_coords[pid].size(),
823  recv_ids[pid].size()*LIBMESH_DIM);
824 #endif
825 
826  // Write the coordinates in this block.
827  // Some of these coordinates may correspond to ids for which
828  // no node exists, if we have a discontiguous node
829  // numbering!
830  //
831  // The purpose of communicating the xfer_ids/recv_ids buffer
832  // is so that we can handle discontiguous node numberings:
833  // we do not actually write the node ids themselves anywhere
834  // in the xdr file. We do write the unique ids to file, if
835  // enabled (see next section).
836 
837  // Write invalid values for unused node ids
838  coords.clear();
839  coords.resize (3*tot_id_size, std::numeric_limits<Real>::quiet_NaN());
840 
841  for (auto pid : make_range(this->n_processors()))
842  for (auto idx : index_range(recv_ids[pid]))
843  {
844  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
845  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
846  libmesh_assert_less(local_idx, tot_id_size);
847 
848  libmesh_assert_less ((3*local_idx+2), coords.size());
849  libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
850 
851  coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
852 #if LIBMESH_DIM > 1
853  coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
854 #else
855  coords[3*local_idx+1] = 0.;
856 #endif
857 #if LIBMESH_DIM > 2
858  coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
859 #else
860  coords[3*local_idx+2] = 0.;
861 #endif
862 
863 #ifndef NDEBUG
864  n_written++;
865 #endif
866  }
867 
868  io.data_stream (coords.empty() ? nullptr : coords.data(),
869  cast_int<unsigned int>(coords.size()), /*line_break=*/3);
870  }
871  } // end for (block-based coord writes)
872 
873  if (this->processor_id() == 0)
874  libmesh_assert_less_equal (n_written, max_node_id);
875 
876 #ifdef LIBMESH_ENABLE_UNIQUE_ID
877  // XDR unsigned char doesn't work as anticipated
878  unsigned short write_unique_ids = 1;
879 #else
880  unsigned short write_unique_ids = 0;
881 #endif
882 
883  if (this->processor_id() == 0)
884  io.data (write_unique_ids, "# presence of unique ids");
885 
886 #ifdef LIBMESH_ENABLE_UNIQUE_ID
887 {
888  std::vector<xdr_id_type> xfer_unique_ids;
889  std::vector<xdr_id_type> & unique_ids=xfer_unique_ids;
890  std::vector<std::vector<xdr_id_type>> recv_unique_ids (this->n_processors());
891 
892 #ifndef NDEBUG
893  // Reset write counter
894  n_written = 0;
895 #endif
896 
897  // Return node iterator to the beginning
898  node_iter = mesh.local_nodes_begin();
899 
900  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
901  {
902  const std::size_t first_node = blk*io_blksize;
903  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
904 
905  const std::size_t tot_id_size = last_node - first_node;
906 
907  // Build up the xfer buffers on each processor
908  xfer_ids.clear();
909  xfer_ids.reserve(tot_id_size);
910  xfer_unique_ids.clear();
911  xfer_unique_ids.reserve(tot_id_size);
912 
913  for (; node_iter != nodes_end; ++node_iter)
914  {
915  const Node & node = **node_iter;
916  libmesh_assert_greater_equal(node.id(), first_node);
917  if (node.id() >= last_node)
918  break;
919 
920  xfer_ids.push_back(node.id());
921  xfer_unique_ids.push_back(node.unique_id());
922  }
923 
924  //-------------------------------------
925  // Send the xfer buffers to processor 0
926  std::vector<std::size_t> ids_size;
927 
928  const std::size_t my_ids_size = xfer_ids.size();
929 
930  // explicitly gather ids_size
931  this->comm().gather (0, my_ids_size, ids_size);
932 
933  // We will have lots of simultaneous receives if we are
934  // processor 0, so let's use nonblocking receives.
935  std::vector<Parallel::Request>
936  unique_id_request_handles(this->n_processors()-1),
937  id_request_handles(this->n_processors()-1);
938 
940  unique_id_tag = mesh.comm().get_unique_tag(),
941  id_tag = mesh.comm().get_unique_tag();
942 
943  // Post the receives -- do this on processor 0 only.
944  if (this->processor_id() == 0)
945  {
946  for (auto pid : make_range(this->n_processors()))
947  {
948  recv_ids[pid].resize(ids_size[pid]);
949  recv_unique_ids[pid].resize(ids_size[pid]);
950 
951  if (pid == 0)
952  {
953  recv_ids[0] = xfer_ids;
954  recv_unique_ids[0] = xfer_unique_ids;
955  }
956  else
957  {
958  this->comm().receive (pid, recv_ids[pid],
959  id_request_handles[pid-1],
960  id_tag);
961  this->comm().receive (pid, recv_unique_ids[pid],
962  unique_id_request_handles[pid-1],
963  unique_id_tag);
964  }
965  }
966  }
967  else
968  {
969  // Send -- do this on all other processors.
970  this->comm().send(0, xfer_ids, id_tag);
971  this->comm().send(0, xfer_unique_ids, unique_id_tag);
972  }
973 
974  // -------------------------------------------------------
975  // Receive the messages and write the output on processor 0.
976  if (this->processor_id() == 0)
977  {
978  // Wait for all the receives to complete. We have no
979  // need for the statuses since we already know the
980  // buffer sizes.
981  Parallel::wait (id_request_handles);
982  Parallel::wait (unique_id_request_handles);
983 
984 #ifndef NDEBUG
985  for (auto pid : make_range(this->n_processors()))
986  libmesh_assert_equal_to
987  (recv_ids[pid].size(), recv_unique_ids[pid].size());
988 #endif
989 
990  libmesh_assert_less_equal
991  (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
992 
993  // Write the unique ids in this block.
994  unique_ids.clear();
995  unique_ids.resize(tot_id_size, unique_id_type(-1));
996 
997  for (auto pid : make_range(this->n_processors()))
998  for (auto idx : index_range(recv_ids[pid]))
999  {
1000  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
1001  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1002  libmesh_assert_less (local_idx, unique_ids.size());
1003 
1004  unique_ids[local_idx] = recv_unique_ids[pid][idx];
1005 
1006 #ifndef NDEBUG
1007  n_written++;
1008 #endif
1009  }
1010 
1011  io.data_stream (unique_ids.empty() ? nullptr : unique_ids.data(),
1012  cast_int<unsigned int>(unique_ids.size()), /*line_break=*/1);
1013  }
1014  } // end for (block-based unique_id writes)
1015 
1016  if (this->processor_id() == 0)
1017  libmesh_assert_less_equal (n_written, max_node_id);
1018 }
1019 #endif // LIBMESH_ENABLE_UNIQUE_ID
1020 
1021  // Next: do "block"-based I/O for the extra node integers (if necessary)
1022  if (n_node_integers)
1023  {
1024 #ifndef NDEBUG
1025  // Reset write counter
1026  n_written = 0;
1027 #endif
1028 
1029  // Return node iterator to the beginning
1030  node_iter = mesh.local_nodes_begin();
1031 
1032  // Data structures for writing "extra" node integers
1033  std::vector<dof_id_type> xfer_node_integers;
1034  std::vector<dof_id_type> & node_integers = xfer_node_integers;
1035  std::vector<std::vector<dof_id_type>> recv_node_integers(this->n_processors());
1036 
1037  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
1038  {
1039  const std::size_t first_node = blk*io_blksize;
1040  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
1041 
1042  const std::size_t tot_id_size = last_node - first_node;
1043 
1044  // Build up the xfer buffers on each processor
1045  xfer_ids.clear();
1046  xfer_ids.reserve(tot_id_size);
1047  xfer_node_integers.clear();
1048  xfer_node_integers.reserve(tot_id_size * n_node_integers);
1049 
1050  for (; node_iter != nodes_end; ++node_iter)
1051  {
1052  const Node & node = **node_iter;
1053  libmesh_assert_greater_equal(node.id(), first_node);
1054  if (node.id() >= last_node)
1055  break;
1056 
1057  xfer_ids.push_back(node.id());
1058 
1059  // Append current node's node integers to xfer buffer
1060  for (unsigned int i=0; i != n_node_integers; ++i)
1061  xfer_node_integers.push_back(node.get_extra_integer(i));
1062  }
1063 
1064  //-------------------------------------
1065  // Send the xfer buffers to processor 0
1066  std::vector<std::size_t> ids_size;
1067 
1068  const std::size_t my_ids_size = xfer_ids.size();
1069 
1070  // explicitly gather ids_size
1071  this->comm().gather (0, my_ids_size, ids_size);
1072 
1073  // We will have lots of simultaneous receives if we are
1074  // processor 0, so let's use nonblocking receives.
1075  std::vector<Parallel::Request>
1076  node_integers_request_handles(this->n_processors()-1),
1077  id_request_handles(this->n_processors()-1);
1078 
1080  node_integers_tag = mesh.comm().get_unique_tag(),
1081  id_tag = mesh.comm().get_unique_tag();
1082 
1083  // Post the receives -- do this on processor 0 only.
1084  if (this->processor_id() == 0)
1085  {
1086  for (auto pid : make_range(this->n_processors()))
1087  {
1088  recv_ids[pid].resize(ids_size[pid]);
1089  recv_node_integers[pid].resize(n_node_integers * ids_size[pid]);
1090 
1091  if (pid == 0)
1092  {
1093  recv_ids[0] = xfer_ids;
1094  recv_node_integers[0] = xfer_node_integers;
1095  }
1096  else
1097  {
1098  this->comm().receive (pid, recv_ids[pid],
1099  id_request_handles[pid-1],
1100  id_tag);
1101  this->comm().receive (pid, recv_node_integers[pid],
1102  node_integers_request_handles[pid-1],
1103  node_integers_tag);
1104  }
1105  }
1106  }
1107  else
1108  {
1109  // Send -- do this on all other processors.
1110  this->comm().send(0, xfer_ids, id_tag);
1111  this->comm().send(0, xfer_node_integers, node_integers_tag);
1112  }
1113 
1114  // -------------------------------------------------------
1115  // Receive the messages and write the output on processor 0.
1116  if (this->processor_id() == 0)
1117  {
1118  // Wait for all the receives to complete. We have no
1119  // need for the statuses since we already know the
1120  // buffer sizes.
1121  Parallel::wait (id_request_handles);
1122  Parallel::wait (node_integers_request_handles);
1123 
1124 #ifndef NDEBUG
1125  for (auto pid : make_range(this->n_processors()))
1126  libmesh_assert_equal_to
1127  (recv_ids[pid].size(), recv_node_integers[pid].size() / n_node_integers);
1128 #endif
1129 
1130  libmesh_assert_less_equal
1131  (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
1132 
1133  // Write the node integers in this block. We will write
1134  // invalid node integers if no node exists, i.e. if we
1135  // have a discontiguous node numbering!
1136  //
1137  // The purpose of communicating the xfer_ids/recv_ids buffer
1138  // is so that we can handle discontiguous node numberings:
1139  // we do not actually write the node ids themselves anywhere
1140  // in the xdr file. We do write the unique ids to file, if
1141  // enabled (see previous section).
1142 
1143  // Note: we initialize the node_integers array with invalid values,
1144  // so any indices which don't get written to in the loop below will
1145  // just contain invalid values.
1146  node_integers.clear();
1147  node_integers.resize (n_node_integers*tot_id_size, static_cast<dof_id_type>(-1));
1148 
1149  for (auto pid : make_range(this->n_processors()))
1150  for (auto idx : index_range(recv_ids[pid]))
1151  {
1152  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
1153  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1154 
1155  // Assert that we won't index past the end of the node_integers array
1156  libmesh_assert_less (local_idx, tot_id_size);
1157 
1158  for (unsigned int i=0; i != n_node_integers; ++i)
1159  node_integers[n_node_integers*local_idx + i] = recv_node_integers[pid][n_node_integers*idx + i];
1160 
1161 #ifndef NDEBUG
1162  n_written++;
1163 #endif
1164  }
1165 
1166  io.data_stream (node_integers.empty() ? nullptr : node_integers.data(),
1167  cast_int<unsigned int>(node_integers.size()), /*line_break=*/n_node_integers);
1168  }
1169  } // end for (block-based unique_id writes)
1170 
1171  if (this->processor_id() == 0)
1172  libmesh_assert_less_equal (n_written, max_node_id);
1173 
1174  } // end if (n_node_integers)
1175 }
1176 
1177 
1178 
1179 void XdrIO::write_serialized_bcs_helper (Xdr & io, const new_header_id_type n_bcs, const std::string bc_type) const
1180 {
1181  libmesh_assert (io.writing());
1182 
1183  // convenient reference to our mesh
1185 
1186  // and our boundary info object
1187  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1188 
1189  // Version 0.9.2+ introduces entity names
1190  write_serialized_bc_names(io, boundary_info, true); // sideset names
1191 
1192  new_header_id_type n_bcs_out = n_bcs;
1193  if (this->processor_id() == 0)
1194  {
1195  std::stringstream comment_string;
1196  comment_string << "# number of " << bc_type << " boundary conditions";
1197  io.data (n_bcs_out, comment_string.str());
1198  }
1199  n_bcs_out = 0;
1200 
1201  if (!n_bcs) return;
1202 
1203  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1204  std::vector<std::size_t> bc_sizes(this->n_processors());
1205 
1206  // Container to catch boundary IDs handed back by BoundaryInfo
1207  std::vector<boundary_id_type> bc_ids;
1208 
1209  // Boundary conditions are only specified for level-0 elements
1210  dof_id_type n_local_level_0_elem=0;
1211  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
1212  mesh.local_level_elements_end(0)))
1213  {
1214  if (bc_type == "side")
1215  {
1216  for (auto s : elem->side_index_range())
1217  {
1218  boundary_info.boundary_ids (elem, s, bc_ids);
1219  for (const auto & bc_id : bc_ids)
1220  if (bc_id != BoundaryInfo::invalid_id)
1221  {
1222  xfer_bcs.push_back (n_local_level_0_elem);
1223  xfer_bcs.push_back (s) ;
1224  xfer_bcs.push_back (bc_id);
1225  }
1226  }
1227  }
1228  else if (bc_type == "edge")
1229  {
1230  for (auto e : elem->edge_index_range())
1231  {
1232  boundary_info.edge_boundary_ids (elem, e, bc_ids);
1233  for (const auto & bc_id : bc_ids)
1234  if (bc_id != BoundaryInfo::invalid_id)
1235  {
1236  xfer_bcs.push_back (n_local_level_0_elem);
1237  xfer_bcs.push_back (e) ;
1238  xfer_bcs.push_back (bc_id);
1239  }
1240  }
1241  }
1242  else if (bc_type == "shellface")
1243  {
1244  for (unsigned short sf=0; sf<2; sf++)
1245  {
1246  boundary_info.shellface_boundary_ids (elem, sf, bc_ids);
1247  for (const auto & bc_id : bc_ids)
1248  if (bc_id != BoundaryInfo::invalid_id)
1249  {
1250  xfer_bcs.push_back (n_local_level_0_elem);
1251  xfer_bcs.push_back (sf) ;
1252  xfer_bcs.push_back (bc_id);
1253  }
1254  }
1255  }
1256  else
1257  {
1258  libmesh_error_msg("bc_type not recognized: " + bc_type);
1259  }
1260 
1261  // Increment the level-0 element counter.
1262  n_local_level_0_elem++;
1263  }
1264 
1265  xfer_bcs.push_back(n_local_level_0_elem);
1266  std::size_t my_size = xfer_bcs.size();
1267  this->comm().gather (0, my_size, bc_sizes);
1268 
1269  // All processors send their xfer buffers to processor 0
1270  // Processor 0 will receive all buffers and write out the bcs
1271  if (this->processor_id() == 0)
1272  {
1273  dof_id_type elem_offset = 0;
1274  for (auto pid : make_range(this->n_processors()))
1275  {
1276  recv_bcs.resize(bc_sizes[pid]);
1277  if (pid == 0)
1278  recv_bcs = xfer_bcs;
1279  else
1280  this->comm().receive (pid, recv_bcs);
1281 
1282  const dof_id_type my_n_local_level_0_elem
1283  = cast_int<dof_id_type>(recv_bcs.back());
1284  recv_bcs.pop_back();
1285 
1286  for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 3, n_bcs_out++)
1287  recv_bcs[idx+0] += elem_offset;
1288 
1289  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1290  cast_int<unsigned int>(recv_bcs.size()), 3);
1291  elem_offset += my_n_local_level_0_elem;
1292  }
1293  libmesh_assert_equal_to (n_bcs, n_bcs_out);
1294  }
1295  else
1296  this->comm().send (0, xfer_bcs);
1297 }
1298 
1299 
1300 
1301 void XdrIO::write_serialized_side_bcs (Xdr & io, const new_header_id_type n_side_bcs) const
1302 {
1303  write_serialized_bcs_helper(io, n_side_bcs, "side");
1304 }
1305 
1306 
1307 
1308 void XdrIO::write_serialized_edge_bcs (Xdr & io, const new_header_id_type n_edge_bcs) const
1309 {
1310  write_serialized_bcs_helper(io, n_edge_bcs, "edge");
1311 }
1312 
1313 
1314 
1315 void XdrIO::write_serialized_shellface_bcs (Xdr & io, const new_header_id_type n_shellface_bcs) const
1316 {
1317  write_serialized_bcs_helper(io, n_shellface_bcs, "shellface");
1318 }
1319 
1320 
1321 
1322 void XdrIO::write_serialized_nodesets (Xdr & io, const new_header_id_type n_nodesets) const
1323 {
1324  libmesh_assert (io.writing());
1325 
1326  // convenient reference to our mesh
1328 
1329  // and our boundary info object
1330  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1331 
1332  // Version 0.9.2+ introduces entity names
1333  write_serialized_bc_names(io, boundary_info, false); // nodeset names
1334 
1335  new_header_id_type n_nodesets_out = n_nodesets;
1336  if (this->processor_id() == 0)
1337  io.data (n_nodesets_out, "# number of nodesets");
1338  n_nodesets_out = 0;
1339 
1340  if (!n_nodesets) return;
1341 
1342  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1343  std::vector<std::size_t> bc_sizes(this->n_processors());
1344 
1345  // Container to catch boundary IDs handed back by BoundaryInfo
1346  std::vector<boundary_id_type> nodeset_ids;
1347 
1348  dof_id_type n_node=0;
1349  for (const auto & node : mesh.local_node_ptr_range())
1350  {
1351  boundary_info.boundary_ids (node, nodeset_ids);
1352  for (const auto & bc_id : nodeset_ids)
1353  if (bc_id != BoundaryInfo::invalid_id)
1354  {
1355  xfer_bcs.push_back (node->id());
1356  xfer_bcs.push_back (bc_id);
1357  }
1358  }
1359 
1360  xfer_bcs.push_back(n_node);
1361  std::size_t my_size = xfer_bcs.size();
1362  this->comm().gather (0, my_size, bc_sizes);
1363 
1364  // All processors send their xfer buffers to processor 0
1365  // Processor 0 will receive all buffers and write out the bcs
1366  if (this->processor_id() == 0)
1367  {
1368  dof_id_type node_offset = 0;
1369  for (auto pid : make_range(this->n_processors()))
1370  {
1371  recv_bcs.resize(bc_sizes[pid]);
1372  if (pid == 0)
1373  recv_bcs = xfer_bcs;
1374  else
1375  this->comm().receive (pid, recv_bcs);
1376 
1377  const dof_id_type my_n_node =
1378  cast_int<dof_id_type>(recv_bcs.back());
1379  recv_bcs.pop_back();
1380 
1381  for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 2, n_nodesets_out++)
1382  recv_bcs[idx+0] += node_offset;
1383 
1384  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1385  cast_int<unsigned int>(recv_bcs.size()), 2);
1386  node_offset += my_n_node;
1387  }
1388  libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
1389  }
1390  else
1391  this->comm().send (0, xfer_bcs);
1392 }
1393 
1394 
1395 
1396 void XdrIO::write_serialized_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
1397 {
1398  if (this->processor_id() == 0)
1399  {
1400  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1401  info.get_sideset_name_map() : info.get_nodeset_name_map();
1402 
1403  std::vector<new_header_id_type> boundary_ids;
1404  boundary_ids.reserve(boundary_map.size());
1405 
1406  std::vector<std::string> boundary_names;
1407  boundary_names.reserve(boundary_map.size());
1408 
1409  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
1410  // return writable references in boundary_info, it's possible for the user to leave some entity names
1411  // blank. We can't write those to the XDA file.
1412  new_header_id_type n_boundary_names = 0;
1413  for (const auto & [bndry_id, name] : boundary_map)
1414  if (!name.empty())
1415  {
1416  n_boundary_names++;
1417  boundary_ids.push_back(bndry_id);
1418  boundary_names.push_back(name);
1419  }
1420 
1421  if (is_sideset)
1422  io.data(n_boundary_names, "# sideset id to name map");
1423  else
1424  io.data(n_boundary_names, "# nodeset id to name map");
1425  // Write out the ids and names in two vectors
1426  if (n_boundary_names)
1427  {
1428  io.data(boundary_ids);
1429  io.data(boundary_names);
1430  }
1431  }
1432 }
1433 
1434 
1435 
1436 void XdrIO::read (const std::string & name)
1437 {
1438  LOG_SCOPE("read()","XdrIO");
1439 
1440  // Only open the file on processor 0 -- this is especially important because
1441  // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1442  // calls will produce a race condition.
1443  Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1444 
1445  // convenient reference to our mesh
1447 
1448  // get the version string.
1449  if (this->processor_id() == 0)
1450  io.data (this->version());
1451  this->comm().broadcast (this->version());
1452 
1453  // note that for "legacy" files the first entry is an
1454  // integer -- not a string at all.
1455  this->legacy() = !(this->version().find("libMesh") < this->version().size());
1456 
1457  // Check for a legacy version format.
1458  libmesh_error_msg_if(this->legacy(), "We no longer support reading files in the legacy format.");
1459 
1460  // Read headers with the old id type if they're pre-1.3.0, or with
1461  // the new id type if they're post-1.3.0
1462  const unsigned int n_header_metadata_values = 13;
1463  std::vector<new_header_id_type> meta_data(n_header_metadata_values, sizeof(xdr_id_type));
1464  if (this->version_at_least_1_3_0())
1465  {
1466  this->read_header(io, meta_data);
1467  }
1468  else
1469  {
1470  // In pre-1.3.0 format there were only 10 metadata values in the header, but
1471  // in newer versions there are more. The unread values will be set to 0.
1472  std::vector<old_header_id_type> old_data(n_header_metadata_values, sizeof(xdr_id_type));
1473 
1474  this->read_header(io, old_data);
1475 
1476  meta_data.assign(old_data.begin(), old_data.end());
1477  }
1478 
1479  const new_header_id_type & n_elem = meta_data[0];
1480  const new_header_id_type & n_nodes = meta_data[1];
1481 
1487  if (version_at_least_0_9_2())
1488  _field_width = cast_int<unsigned int>(meta_data[2]);
1489 
1490  // On systems where uint64_t==unsigned long, we were previously
1491  // writing 64-bit unsigned integers via xdr_u_long(), a function
1492  // which is literally less suited for that task than abort() would
1493  // have been, because at least abort() would have *known* it
1494  // couldn't write rather than truncating writes to 32 bits.
1495  //
1496  // If we have files with version < 1.3.0, then we'll continue to use
1497  // 32 bit field width, regardless of whether the file thinks we
1498  // should, whenever we're on a system where the problem would have
1499  // occurred.
1500  if ((_field_width == 4) ||
1501  (!version_at_least_1_3_0() &&
1503  {
1504  uint32_t type_size = 0;
1505 
1506  // read subdomain names
1508 
1509  // read connectivity
1510  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1511 
1512  // read the nodal locations
1513  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes), meta_data);
1514 
1515  // read the side boundary conditions
1516  this->read_serialized_side_bcs (io, type_size);
1517 
1518  if (version_at_least_0_9_2())
1519  // read the nodesets
1520  this->read_serialized_nodesets (io, type_size);
1521 
1522  if (version_at_least_1_1_0())
1523  {
1524  // read the edge boundary conditions
1525  this->read_serialized_edge_bcs (io, type_size);
1526 
1527  // read the "shell face" boundary conditions
1528  this->read_serialized_shellface_bcs (io, type_size);
1529  }
1530  }
1531  else if (_field_width == 8)
1532  {
1533  uint64_t type_size = 0;
1534 
1535  // read subdomain names
1537 
1538  // read connectivity
1539  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1540 
1541  // read the nodal locations
1542  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes), meta_data);
1543 
1544  // read the boundary conditions
1545  this->read_serialized_side_bcs (io, type_size);
1546 
1547  if (version_at_least_0_9_2())
1548  // read the nodesets
1549  this->read_serialized_nodesets (io, type_size);
1550 
1551  if (version_at_least_1_1_0())
1552  {
1553  // read the edge boundary conditions
1554  this->read_serialized_edge_bcs (io, type_size);
1555 
1556  // read the "shell face" boundary conditions
1557  this->read_serialized_shellface_bcs (io, type_size);
1558  }
1559  }
1560 
1561  // set the node processor ids
1563 }
1564 
1565 
1566 
1567 template <typename T>
1568 void XdrIO::read_header (Xdr & io, std::vector<T> & meta_data)
1569 {
1570  LOG_SCOPE("read_header()","XdrIO");
1571 
1572  // convenient reference to our mesh
1574 
1575  // Header information to be read on processor 0 and broadcast to
1576  // other procs.
1577  std::vector<std::string> node_integer_names;
1578  std::vector<std::string> elem_integer_names;
1579  std::vector<dof_id_type> elemset_codes;
1580  std::vector<std::vector<dof_id_type>> elemset_id_vecs;
1581 
1582  if (this->processor_id() == 0)
1583  {
1584  io.data (meta_data[0]);
1585  io.data (meta_data[1]);
1586  io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1587  io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1588  io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1589  io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1590 
1591  if (version_at_least_0_9_2())
1592  {
1593  // Make sure there's enough room in meta_data
1594  libmesh_assert_greater_equal(meta_data.size(), 10);
1595 
1596  io.data (meta_data[2], "# type size");
1597  io.data (meta_data[3], "# uid size");
1598  io.data (meta_data[4], "# pid size");
1599  io.data (meta_data[5], "# sid size");
1600  io.data (meta_data[6], "# p-level size");
1601  // Boundary Condition sizes
1602  io.data (meta_data[7], "# eid size"); // elem id
1603  io.data (meta_data[8], "# side size"); // side number
1604  io.data (meta_data[9], "# bid size"); // boundary id
1605  }
1606 
1607  if (version_at_least_1_8_0())
1608  {
1609  // Make sure there's enough room in meta_data
1610  libmesh_assert_greater_equal(meta_data.size(), 13);
1611 
1612  io.data (meta_data[10], "# extra integer size"); // extra integer size
1613 
1614  // Read in the node integer names and store the count in meta_data
1615  io.data(node_integer_names, "# node integer names");
1616  meta_data[11] = node_integer_names.size();
1617 
1618  // Read in the elem integer names and store the count in meta_data
1619  io.data(elem_integer_names, "# elem integer names");
1620  meta_data[12] = elem_integer_names.size();
1621 
1622  // Read in vector of elemset codes from file
1623  io.data(elemset_codes, "# elemset codes");
1624 
1625  // For each elemset code, read in the associated elemset ids from file
1626  elemset_id_vecs.resize(elemset_codes.size());
1627  for (auto i : index_range(elemset_codes))
1628  io.data(elemset_id_vecs[i]);
1629  }
1630  }
1631 
1632  // broadcast the n_elems, n_nodes, and size information
1633  this->comm().broadcast (meta_data);
1634 
1635  this->comm().broadcast (this->boundary_condition_file_name());
1636  this->comm().broadcast (this->subdomain_map_file_name());
1637  this->comm().broadcast (this->partition_map_file_name());
1638  this->comm().broadcast (this->polynomial_level_file_name());
1639  this->comm().broadcast(node_integer_names);
1640  this->comm().broadcast(elem_integer_names);
1641  this->comm().broadcast(elemset_codes);
1642  this->comm().broadcast(elemset_id_vecs);
1643 
1644  // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1645  // this may allow for efficient adding of nodes/elements.
1646  const T & n_elem = meta_data[0];
1647  const T & n_nodes = meta_data[1];
1648 
1649  mesh.reserve_elem(cast_int<dof_id_type>(n_elem));
1650  mesh.reserve_nodes(cast_int<dof_id_type>(n_nodes));
1651 
1652  // Our mesh is pre-partitioned as it's created
1653  this->set_n_partitions(this->n_processors());
1654 
1660  if (version_at_least_0_9_2())
1661  _field_width = cast_int<unsigned int>(meta_data[2]);
1662 
1663  // Add extra node and elem integers on all procs. Note that adding
1664  // an extra node/elem "datum" of type T is implemented via repeated
1665  // calls to MeshBase::add_elem_integer(), so we only need to call
1666  // MeshBase::add_node/elem_integers() here in order to restore the
1667  // the data that we had on the Mesh previously.
1668  mesh.add_node_integers(node_integer_names);
1669  mesh.add_elem_integers(elem_integer_names);
1670 
1671  // Store the elemset_code -> {elemset ids} mapping on the Mesh
1672  for (auto i : index_range(elemset_codes))
1673  mesh.add_elemset_code(elemset_codes[i],
1674  MeshBase::elemset_type(elemset_id_vecs[i].begin(),
1675  elemset_id_vecs[i].end()));
1676 }
1677 
1678 
1679 
1681 {
1682  const bool read_entity_info = version_at_least_0_9_2();
1683  const bool use_new_header_type (this->version_at_least_1_3_0());
1684  if (read_entity_info)
1685  {
1687 
1688  new_header_id_type n_subdomain_names = 0;
1689  std::vector<new_header_id_type> subdomain_ids;
1690  std::vector<std::string> subdomain_names;
1691 
1692  // Read the sideset names
1693  if (this->processor_id() == 0)
1694  {
1695  if (use_new_header_type)
1696  io.data(n_subdomain_names);
1697  else
1698  {
1699  old_header_id_type temp;
1700  io.data(temp);
1701  n_subdomain_names = temp;
1702  }
1703 
1704  subdomain_ids.resize(n_subdomain_names);
1705  subdomain_names.resize(n_subdomain_names);
1706 
1707  if (n_subdomain_names)
1708  {
1709  if (use_new_header_type)
1710  io.data(subdomain_ids);
1711  else
1712  {
1713  std::vector<old_header_id_type> temp;
1714  io.data(temp);
1715  subdomain_ids.assign(temp.begin(), temp.end());
1716  }
1717 
1718  io.data(subdomain_names);
1719  }
1720  }
1721 
1722  // Broadcast the subdomain names to all processors
1723  this->comm().broadcast(n_subdomain_names);
1724  if (n_subdomain_names == 0)
1725  return;
1726 
1727  subdomain_ids.resize(n_subdomain_names);
1728  subdomain_names.resize(n_subdomain_names);
1729  this->comm().broadcast(subdomain_ids);
1730  this->comm().broadcast(subdomain_names);
1731 
1732  // Reassemble the named subdomain information
1733  std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1734 
1735  for (unsigned int i=0; i<n_subdomain_names; ++i)
1736  subdomain_map.emplace(subdomain_ids[i], subdomain_names[i]);
1737  }
1738 }
1739 
1740 
1741 template <typename T>
1742 void
1744  const dof_id_type n_elem,
1745  const std::vector<new_header_id_type> & meta_data,
1746  T /*type_size*/)
1747 {
1748  libmesh_assert (io.reading());
1749 
1750  if (!n_elem) return;
1751 
1752  const bool
1753  read_p_level = ("." == this->polynomial_level_file_name()),
1754  read_partitioning = ("." == this->partition_map_file_name()),
1755  read_subdomain_id = ("." == this->subdomain_map_file_name());
1756 
1757  // convenient reference to our mesh
1759 
1760  // Keep track of what kinds of elements this file contains
1761  elems_of_dimension.clear();
1762  elems_of_dimension.resize(4, false);
1763 
1764  std::vector<T> conn, input_buffer(100 /* oversized ! */);
1765 
1766  int level=-1;
1767 
1768  // Version 0.9.2+ introduces unique ids
1769  const size_t unique_id_size_index = 3;
1770 
1771  const bool read_unique_id =
1772  (version_at_least_0_9_2()) &&
1773  meta_data[unique_id_size_index];
1774 
1775  // Version 1.8.0+ introduces elem integers
1776  const std::size_t n_elem_integers_index = 12;
1777  new_header_id_type n_elem_integers = 0;
1778  if (version_at_least_1_8_0())
1779  {
1780  libmesh_assert_greater_equal(meta_data.size(), 13);
1781  n_elem_integers = meta_data[n_elem_integers_index];
1782  }
1783 
1784  T n_elem_at_level=0, n_processed_at_level=0;
1785  for (dof_id_type blk=0, first_elem=0, last_elem=0;
1786  last_elem<n_elem; blk++)
1787  {
1788  first_elem = cast_int<dof_id_type>(blk*io_blksize);
1789  last_elem = cast_int<dof_id_type>(std::min(cast_int<std::size_t>((blk+1)*io_blksize),
1790  cast_int<std::size_t>(n_elem)));
1791 
1792  conn.clear();
1793 
1794  if (this->processor_id() == 0)
1795  for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1796  {
1797  if (n_processed_at_level == n_elem_at_level)
1798  {
1799  // get the number of elements to read at this level
1800  io.data (n_elem_at_level);
1801  n_processed_at_level = 0;
1802  level++;
1803  }
1804 
1805  // "pos" is a cursor into input_buffer
1806  unsigned int pos = 0;
1807 
1808  // get the element type,
1809  io.data_stream (&input_buffer[pos++], 1);
1810 
1811  if (read_unique_id)
1812  io.data_stream (&input_buffer[pos++], 1);
1813  // Older versions won't have this field at all (no increment on pos)
1814 
1815  // maybe the parent
1816  if (level)
1817  io.data_stream (&input_buffer[pos++], 1);
1818  else
1819  // We can't always fit DofObject::invalid_id in an
1820  // xdr_id_type
1821  input_buffer[pos++] = static_cast<T>(-1);
1822 
1823  // maybe the processor id
1824  if (read_partitioning)
1825  io.data_stream (&input_buffer[pos++], 1);
1826  else
1827  input_buffer[pos++] = 0;
1828 
1829  // maybe the subdomain id
1830  if (read_subdomain_id)
1831  io.data_stream (&input_buffer[pos++], 1);
1832  else
1833  input_buffer[pos++] = 0;
1834 
1835  // maybe the p level
1836  if (read_p_level)
1837  io.data_stream (&input_buffer[pos++], 1);
1838  else
1839  input_buffer[pos++] = 0;
1840 
1841  const unsigned int n_nodes0 = Elem::type_to_n_nodes_map[input_buffer[0]];
1842  if (n_nodes0 == invalid_uint)
1843  libmesh_not_implemented();
1844 
1845  // and all the nodes
1846  libmesh_assert_less (pos+n_nodes0, input_buffer.size());
1847  io.data_stream (&input_buffer[pos], n_nodes0);
1848 
1849  // Advance input_buffer cursor by number of nodes in this element
1850  pos += n_nodes0;
1851 
1852  // and all the elem "extra" integers
1853  libmesh_assert_less (pos + n_elem_integers, input_buffer.size());
1854  io.data_stream (&input_buffer[pos], n_elem_integers);
1855 
1856  // Advance input_buffer cursor by number of extra integers read
1857  pos += n_elem_integers;
1858 
1859  // Insert input_buffer at end of "conn"
1860  conn.insert (conn.end(), input_buffer.begin(), input_buffer.begin() + pos);
1861  }
1862 
1863  std::size_t conn_size = conn.size();
1864  this->comm().broadcast(conn_size);
1865  conn.resize (conn_size);
1866  this->comm().broadcast (conn);
1867 
1868  // All processors now have the connectivity for this block.
1869  for (auto [e, it] = std::tuple{first_elem, conn.begin()}; e<last_elem; e++)
1870  {
1871  // Temporary variable for reading connectivity array
1872  // entries.
1873  T tmp;
1874 
1875  const ElemType elem_type = static_cast<ElemType>(*it++);
1876 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1877  // We are on all processors here, so the mesh can easily
1878  // assign consistent unique ids if the file doesn't specify
1879  // them later. We'll use element ids for elements and start
1880  // past those for nodes.
1881  unique_id_type unique_id = e;
1882 #endif
1883  if (read_unique_id)
1884  {
1885  tmp = *it++;
1886 
1887 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1888  unique_id = cast_int<unique_id_type>(tmp);
1889 #endif
1890  }
1891 
1892  tmp = *it++;
1893  const dof_id_type parent_id =
1894  (tmp == static_cast<T>(-1)) ? DofObject::invalid_id : cast_int<dof_id_type>(tmp);
1895 
1896  const processor_id_type proc_id =
1897  cast_int<processor_id_type>(*it++);
1898 
1899  const subdomain_id_type subdomain_id =
1900  cast_int<subdomain_id_type>(*it++);
1901 
1902  tmp = *it++;
1903 #ifdef LIBMESH_ENABLE_AMR
1904  const unsigned int p_level = cast_int<unsigned int>(tmp);
1905 #endif
1906 
1907  Elem * parent = (parent_id == DofObject::invalid_id) ?
1908  nullptr : mesh.elem_ptr(parent_id);
1909 
1910  auto elem = Elem::build(elem_type, parent);
1911 
1912  elem->set_id() = e;
1913 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1914  elem->set_unique_id(unique_id);
1915 #endif
1916  elem->processor_id() = proc_id;
1917  elem->subdomain_id() = subdomain_id;
1918 #ifdef LIBMESH_ENABLE_AMR
1919  elem->hack_p_level(p_level);
1920 
1921  if (parent)
1922  {
1923  parent->add_child(elem.get());
1924  parent->set_refinement_flag (Elem::INACTIVE);
1925  elem->set_refinement_flag (Elem::JUST_REFINED);
1926  }
1927 #endif
1928 
1929  // Node ids for this Elem
1930  for (unsigned int n=0, n_n = elem->n_nodes(); n != n_n;
1931  n++, ++it)
1932  {
1933  const dof_id_type global_node_number =
1934  cast_int<dof_id_type>(*it);
1935 
1936  Node *node = mesh.query_node_ptr(global_node_number);
1937 
1938  if (node)
1939  elem->set_node(n, node);
1940  else
1941  {
1942  std::unique_ptr<Node> new_node = Node::build(Point(), global_node_number);
1943 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1944  // If we have to overwrite this it'll happen later
1945  new_node->set_unique_id(n_elem + global_node_number);
1946 #endif
1947  elem->set_node(n, mesh.add_node(std::move(new_node)));
1948  }
1949  }
1950 
1951  elems_of_dimension[elem->dim()] = true;
1952  Elem * added_elem = mesh.add_elem(std::move(elem));
1953 
1954  // Make sure that the Elem we just added to the Mesh has
1955  // space for the required number of extra integers. Note
1956  // that we have to add the Elem to the Mesh prior to
1957  // accessing any of its extra integers, since otherwise the
1958  // Elem does not know that it should have extra integers...
1959  libmesh_assert_equal_to(n_elem_integers, added_elem->n_extra_integers());
1960 
1961  // Extra integers for this Elem
1962  for (unsigned int ei=0; ei<n_elem_integers; ++ei)
1963  {
1964  auto extra_int = cast_int<dof_id_type>(*it++);
1965  added_elem->set_extra_integer(ei, extra_int);
1966  }
1967  }
1968  }
1969 
1970  // Set the mesh dimension to the largest encountered for an element
1971  for (unsigned char i=0; i!=4; ++i)
1972  if (elems_of_dimension[i])
1974 
1975 #if LIBMESH_DIM < 3
1976  libmesh_error_msg_if(mesh.mesh_dimension() > LIBMESH_DIM,
1977  "Cannot open dimension "
1978  << mesh.mesh_dimension()
1979  << " mesh file when configured without "
1980  << mesh.mesh_dimension()
1981  << "D support.");
1982 #endif
1983 }
1984 
1985 
1986 
1987 void
1989  const dof_id_type n_nodes,
1990  const std::vector<new_header_id_type> & meta_data)
1991 {
1992  libmesh_assert (io.reading());
1993 
1994  // convenient reference to our mesh
1996 
1997  if (!mesh.n_nodes()) return;
1998 
1999  // At this point the elements have been read from file and placeholder nodes
2000  // have been assigned. These nodes, however, do not have the proper (x,y,z)
2001  // locations or unique_id values. This method will read all the
2002  // nodes from disk, and each processor can then grab the individual
2003  // values it needs.
2004 
2005  // If the file includes unique ids for nodes (as indicated by a
2006  // flag in 0.9.6+ files), those will be read next.
2007 
2008  // build up a list of the nodes contained in our local mesh. These are the nodes
2009  // stored on the local processor whose (x,y,z) and unique_id values
2010  // need to be corrected.
2011  std::vector<dof_id_type> needed_nodes;
2012  needed_nodes.reserve (mesh.n_nodes());
2013  {
2014  for (auto & node : mesh.node_ptr_range())
2015  needed_nodes.push_back(node->id());
2016 
2017  std::sort (needed_nodes.begin(), needed_nodes.end());
2018 
2019  // We should not have any duplicate node->id()s
2020  libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
2021  }
2022 
2023  // Version 1.8.0+ introduces node integers
2024  const std::size_t n_node_integers_index = 11;
2025  new_header_id_type n_node_integers = 0;
2026  if (version_at_least_1_8_0())
2027  {
2028  libmesh_assert_greater_equal(meta_data.size(), 13);
2029  n_node_integers = meta_data[n_node_integers_index];
2030  }
2031 
2032  // Get the nodes in blocks.
2033  std::vector<Real> coords;
2034  std::pair<std::vector<dof_id_type>::iterator,
2035  std::vector<dof_id_type>::iterator> pos;
2036  pos.first = needed_nodes.begin();
2037 
2038  // Broadcast node coordinates
2039  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2040  {
2041  first_node = blk*io_blksize;
2042  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2043 
2044  coords.resize(3*(last_node - first_node));
2045 
2046  if (this->processor_id() == 0)
2047  io.data_stream (coords.empty() ? nullptr : coords.data(),
2048  cast_int<unsigned int>(coords.size()));
2049 
2050  // For large numbers of processors the majority of processors at any given
2051  // block may not actually need these data. It may be worth profiling this,
2052  // although it is expected that disk IO will be the bottleneck
2053  this->comm().broadcast (coords);
2054 
2055  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
2056  {
2057  // first see if we need this node. use pos.first as a smart lower
2058  // bound, this will ensure that the size of the searched range
2059  // decreases as we match nodes.
2060  pos = std::equal_range (pos.first, needed_nodes.end(), n);
2061 
2062  if (pos.first != pos.second) // we need this node.
2063  {
2064  libmesh_assert_equal_to (*pos.first, n);
2065  libmesh_assert(!libmesh_isnan(coords[idx+0]));
2066  libmesh_assert(!libmesh_isnan(coords[idx+1]));
2067  libmesh_assert(!libmesh_isnan(coords[idx+2]));
2068  mesh.node_ref(cast_int<dof_id_type>(n)) =
2069  Point (coords[idx+0],
2070  coords[idx+1],
2071  coords[idx+2]);
2072 
2073  }
2074  }
2075  }
2076 
2077  // Check for node unique ids
2078  unsigned short read_unique_ids = false;
2079 
2080  if (version_at_least_0_9_6())
2081  {
2082  if (this->processor_id() == 0)
2083  io.data (read_unique_ids);
2084 
2085  this->comm().broadcast (read_unique_ids);
2086  }
2087 
2088  // If no node unique ids are in the file, well, we already
2089  // assigned our own ... *but*, our ids might conflict with the
2090  // file-assigned element unique ids. Let's check on that.
2091  // Should be easy since we're serialized.
2092  if (!read_unique_ids)
2093  {
2094 #ifdef LIBMESH_ENABLE_UNIQUE_ID
2095  unique_id_type max_elem_unique_id = 0;
2096  for (auto & elem : mesh.element_ptr_range())
2097  max_elem_unique_id = std::max(max_elem_unique_id,
2098  elem->unique_id()+1);
2099  if (max_elem_unique_id > mesh.n_elem())
2100  {
2101  for (auto & node : mesh.node_ptr_range())
2102  node->set_unique_id(max_elem_unique_id + node->id());
2103  }
2104  mesh.set_next_unique_id(max_elem_unique_id + mesh.n_nodes());
2105 #endif
2106  }
2107  else
2108  {
2109  std::vector<uint32_t> unique_32;
2110  std::vector<uint64_t> unique_64;
2111 
2112  // We're starting over from node 0 again
2113  pos.first = needed_nodes.begin();
2114 
2115  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2116  {
2117  first_node = blk*io_blksize;
2118  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2119 
2120  libmesh_assert((_field_width == 8) || (_field_width == 4));
2121 
2122  if (_field_width == 8)
2123  unique_64.resize(last_node - first_node);
2124  else
2125  unique_32.resize(last_node - first_node);
2126 
2127  if (this->processor_id() == 0)
2128  {
2129  if (_field_width == 8)
2130  io.data_stream (unique_64.empty() ? nullptr : unique_64.data(),
2131  cast_int<unsigned int>(unique_64.size()));
2132  else
2133  io.data_stream (unique_32.empty() ? nullptr : unique_32.data(),
2134  cast_int<unsigned int>(unique_32.size()));
2135  }
2136 
2137 #ifdef LIBMESH_ENABLE_UNIQUE_ID
2138  if (_field_width == 8)
2139  this->comm().broadcast (unique_64);
2140  else
2141  this->comm().broadcast (unique_32);
2142 
2143  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
2144  {
2145  // first see if we need this node. use pos.first as a smart lower
2146  // bound, this will ensure that the size of the searched range
2147  // decreases as we match nodes.
2148  pos = std::equal_range (pos.first, needed_nodes.end(), n);
2149 
2150  if (pos.first != pos.second) // we need this node.
2151  {
2152  libmesh_assert_equal_to (*pos.first, n);
2153  if (_field_width == 8)
2154  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id(unique_64[idx]);
2155  else
2156  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id(unique_32[idx]);
2157  }
2158  }
2159 #endif // LIBMESH_ENABLE_UNIQUE_ID
2160  }
2161  }
2162 
2163  // Read extra node integers (if present) in chunks on processor 0 and broadcast to
2164  // all other procs. For a ReplicatedMesh, all procs need to know about all Nodes'
2165  // extra integers. For a DistributedMesh, this broadcasting will transmit some
2166  // redundant information.
2167  if (n_node_integers)
2168  {
2169  std::vector<dof_id_type> extra_integers;
2170 
2171  // We're starting over from node 0 again
2172  pos.first = needed_nodes.begin();
2173 
2174  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2175  {
2176  first_node = blk*io_blksize;
2177  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2178 
2179  extra_integers.resize((last_node - first_node) * n_node_integers);
2180 
2181  // Read in block of node integers on proc 0 only...
2182  if (this->processor_id() == 0)
2183  io.data_stream (extra_integers.empty() ? nullptr : extra_integers.data(),
2184  cast_int<unsigned int>(extra_integers.size()));
2185 
2186  // ... and broadcast it to all other procs.
2187  this->comm().broadcast (extra_integers);
2188 
2189  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
2190  {
2191  // first see if we need this node. use pos.first as a smart lower
2192  // bound, this will ensure that the size of the searched range
2193  // decreases as we match nodes.
2194  pos = std::equal_range (pos.first, needed_nodes.end(), n);
2195 
2196  if (pos.first != pos.second) // we need this node.
2197  {
2198  libmesh_assert_equal_to (*pos.first, n);
2199 
2200  Node & node_ref = mesh.node_ref(cast_int<dof_id_type>(n));
2201  for (unsigned int i=0; i != n_node_integers; ++i)
2202  node_ref.set_extra_integer(i, extra_integers[n_node_integers*idx + i]);
2203  }
2204  }
2205  } // end for (block-based extra integer reads)
2206  } // end if (n_node_integers)
2207 }
2208 
2209 
2210 
2211 template <typename T>
2212 void XdrIO::read_serialized_bcs_helper (Xdr & io, T /*type_size*/, const std::string bc_type)
2213 {
2214  if (this->boundary_condition_file_name() == "n/a") return;
2215 
2216  libmesh_assert (io.reading());
2217 
2218  // convenient reference to our mesh
2220 
2221  // and our boundary info object
2222  BoundaryInfo & boundary_info = mesh.get_boundary_info();
2223 
2224  // Version 0.9.2+ introduces unique ids
2225  read_serialized_bc_names(io, boundary_info, true); // sideset names
2226 
2227  std::vector<T> input_buffer;
2228 
2229  new_header_id_type n_bcs=0;
2230  if (this->processor_id() == 0)
2231  {
2232  if (this->version_at_least_1_3_0())
2233  io.data (n_bcs);
2234  else
2235  {
2236  old_header_id_type temp;
2237  io.data (temp);
2238  n_bcs = temp;
2239  }
2240  }
2241  this->comm().broadcast (n_bcs);
2242 
2243  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
2244  {
2245  first_bc = blk*io_blksize;
2246  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
2247 
2248  input_buffer.resize (3*(last_bc - first_bc));
2249 
2250  if (this->processor_id() == 0)
2251  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
2252  cast_int<unsigned int>(input_buffer.size()));
2253 
2254  this->comm().broadcast (input_buffer);
2255 
2256  // Look for BCs in this block for all the level-0 elements we have
2257  // (not just local ones). Do this by checking all entries for
2258  // IDs matching an element we can query.
2259  // We cannot rely on nullptr neighbors at this point since the neighbor
2260  // data structure has not been initialized.
2261  for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=3)
2262  {
2263  const dof_id_type dof_id =
2264  cast_int<dof_id_type>(input_buffer[idx+0]);
2265  const unsigned short side =
2266  cast_int<unsigned short>(input_buffer[idx+1]);
2267  const boundary_id_type bc_id =
2268  cast_int<boundary_id_type>(input_buffer[idx+2]);
2269 
2270  const Elem * elem = mesh.query_elem_ptr(dof_id);
2271  if (!elem)
2272  continue;
2273 
2274  if (bc_type == "side")
2275  {
2276  libmesh_assert_less (side, elem->n_sides());
2277  boundary_info.add_side (elem, side, bc_id);
2278  }
2279  else if (bc_type == "edge")
2280  {
2281  libmesh_assert_less (side, elem->n_edges());
2282  boundary_info.add_edge (elem, side, bc_id);
2283  }
2284  else if (bc_type == "shellface")
2285  {
2286  // Shell face IDs can only be 0 or 1.
2287  libmesh_assert_less(side, 2);
2288 
2289  boundary_info.add_shellface (elem, side, bc_id);
2290  }
2291  else
2292  {
2293  libmesh_error_msg("bc_type not recognized: " + bc_type);
2294  }
2295  }
2296  input_buffer.clear();
2297  }
2298 }
2299 
2300 
2301 
2302 template <typename T>
2303 void XdrIO::read_serialized_side_bcs (Xdr & io, T type_size)
2304 {
2305  read_serialized_bcs_helper(io, type_size, "side");
2306 }
2307 
2308 
2309 
2310 template <typename T>
2311 void XdrIO::read_serialized_edge_bcs (Xdr & io, T type_size)
2312 {
2313  read_serialized_bcs_helper(io, type_size, "edge");
2314 }
2315 
2316 
2317 
2318 template <typename T>
2320 {
2321  read_serialized_bcs_helper(io, type_size, "shellface");
2322 }
2323 
2324 
2325 
2326 template <typename T>
2327 void XdrIO::read_serialized_nodesets (Xdr & io, T /*type_size*/)
2328 {
2329  if (this->boundary_condition_file_name() == "n/a") return;
2330 
2331  libmesh_assert (io.reading());
2332 
2333  // convenient reference to our mesh
2335 
2336  // and our boundary info object
2337  BoundaryInfo & boundary_info = mesh.get_boundary_info();
2338 
2339  // Version 0.9.2+ introduces unique ids
2340  read_serialized_bc_names(io, boundary_info, false); // nodeset names
2341 
2342  std::vector<T> input_buffer;
2343 
2344  new_header_id_type n_nodesets=0;
2345  if (this->processor_id() == 0)
2346  {
2347  if (this->version_at_least_1_3_0())
2348  io.data (n_nodesets);
2349  else
2350  {
2351  old_header_id_type temp;
2352  io.data (temp);
2353  n_nodesets = temp;
2354  }
2355  }
2356  this->comm().broadcast (n_nodesets);
2357 
2358  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
2359  {
2360  first_bc = blk*io_blksize;
2361  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
2362 
2363  input_buffer.resize (2*(last_bc - first_bc));
2364 
2365  if (this->processor_id() == 0)
2366  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
2367  cast_int<unsigned int>(input_buffer.size()));
2368 
2369  this->comm().broadcast (input_buffer);
2370 
2371  // Look for BCs in this block for all nodes we have (not just
2372  // local ones). Do this by checking all entries for
2373  // IDs matching a node we can query.
2374  for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=2)
2375  {
2376  const dof_id_type dof_id =
2377  cast_int<dof_id_type>(input_buffer[idx+0]);
2378  const boundary_id_type bc_id =
2379  cast_int<boundary_id_type>(input_buffer[idx+1]);
2380 
2381  const Node * node = mesh.query_node_ptr(dof_id);
2382  if (node)
2383  boundary_info.add_node (node, bc_id);
2384  }
2385  input_buffer.clear();
2386  }
2387 }
2388 
2389 
2390 
2392 {
2393  const bool read_entity_info = version_at_least_0_9_2();
2394  const bool use_new_header_type (this->version_at_least_1_3_0());
2395  if (read_entity_info)
2396  {
2397  new_header_id_type n_boundary_names = 0;
2398  std::vector<new_header_id_type> boundary_ids;
2399  std::vector<std::string> boundary_names;
2400 
2401  // Read the sideset names
2402  if (this->processor_id() == 0)
2403  {
2404  if (use_new_header_type)
2405  io.data(n_boundary_names);
2406  else
2407  {
2408  old_header_id_type temp;
2409  io.data(temp);
2410  n_boundary_names = temp;
2411  }
2412 
2413  boundary_names.resize(n_boundary_names);
2414 
2415  if (n_boundary_names)
2416  {
2417  if (use_new_header_type)
2418  io.data(boundary_ids);
2419  else
2420  {
2421  std::vector<old_header_id_type> temp(n_boundary_names);
2422  io.data(temp);
2423  boundary_ids.assign(temp.begin(), temp.end());
2424  }
2425  io.data(boundary_names);
2426  }
2427  }
2428 
2429  // Broadcast the boundary names to all processors
2430  this->comm().broadcast(n_boundary_names);
2431  if (n_boundary_names == 0)
2432  return;
2433 
2434  boundary_ids.resize(n_boundary_names);
2435  boundary_names.resize(n_boundary_names);
2436  this->comm().broadcast(boundary_ids);
2437  this->comm().broadcast(boundary_names);
2438 
2439  // Reassemble the named boundary information
2440  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
2441  info.set_sideset_name_map() : info.set_nodeset_name_map();
2442 
2443  for (unsigned int i=0; i<n_boundary_names; ++i)
2444  boundary_map.emplace(cast_int<boundary_id_type>(boundary_ids[i]), boundary_names[i]);
2445  }
2446 }
2447 
2448 
2449 
2450 void XdrIO::pack_element (std::vector<xdr_id_type> & conn,
2451  const Elem * elem,
2452  const dof_id_type parent_id,
2453  const dof_id_type parent_pid,
2454  const new_header_id_type n_elem_integers) const
2455 {
2456  libmesh_assert(elem);
2457  libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
2458 
2459  conn.push_back(elem->n_nodes());
2460 
2461  conn.push_back (elem->type());
2462 
2463  // In version 0.7.0+ "id" is stored but is not used. In version 0.9.2+
2464  // we will store unique_id instead, therefore there is no need to
2465  // check for the older version when writing the unique_id.
2466  conn.push_back (elem->unique_id());
2467 
2468  if (parent_id != DofObject::invalid_id)
2469  {
2470  conn.push_back (parent_id);
2471  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
2472  conn.push_back (parent_pid);
2473  }
2474 
2475  conn.push_back (elem->processor_id());
2476  conn.push_back (elem->subdomain_id());
2477 
2478 #ifdef LIBMESH_ENABLE_AMR
2479  conn.push_back (elem->p_level());
2480 #endif
2481 
2482  for (auto n : elem->node_index_range())
2483  conn.push_back (elem->node_id(n));
2484 
2485  // Write extra elem integers to connectivity array
2486  for (unsigned int i=0; i != n_elem_integers; ++i)
2487  conn.push_back(elem->get_extra_integer(i));
2488 }
2489 
2491 {
2492  return
2493  (this->version().find("0.9.2") != std::string::npos) ||
2494  (this->version().find("0.9.6") != std::string::npos) ||
2495  (this->version().find("1.1.0") != std::string::npos) ||
2496  (this->version().find("1.3.0") != std::string::npos) ||
2497  (this->version().find("1.8.0") != std::string::npos);
2498 }
2499 
2501 {
2502  return
2503  (this->version().find("0.9.6") != std::string::npos) ||
2504  (this->version().find("1.1.0") != std::string::npos) ||
2505  (this->version().find("1.3.0") != std::string::npos) ||
2506  (this->version().find("1.8.0") != std::string::npos);
2507 }
2508 
2510 {
2511  return
2512  (this->version().find("1.1.0") != std::string::npos) ||
2513  (this->version().find("1.3.0") != std::string::npos) ||
2514  (this->version().find("1.8.0") != std::string::npos);
2515 }
2516 
2518 {
2519  return
2520  (this->version().find("1.3.0") != std::string::npos) ||
2521  (this->version().find("1.8.0") != std::string::npos);
2522 }
2523 
2525 {
2526  return
2527  (this->version().find("1.8.0") != std::string::npos);
2528 }
2529 
2530 } // namespace libMesh
std::string name(const ElemQuality q)
This function returns a string containing some name for q.
Definition: elem_quality.C:42
void write_serialized_nodesets(Xdr &io, const new_header_id_type n_nodesets) const
Write the boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1322
const MT & mesh() const
Definition: mesh_output.h:259
bool writing() const
Definition: xdr_cxx.h:129
std::size_t n_boundary_conds() const
std::vector< unsigned int > add_elem_integers(const std::vector< std::string > &names, bool allocate_data=true, const std::vector< dof_id_type > *default_values=nullptr)
Register integer data (of type dof_id_type) to be added to each element in the mesh, one string name for each new integer.
Definition: mesh_base.C:583
ElemType
Defines an enum for geometric element types.
XdrIO(MeshBase &, const bool=false)
Constructor.
Definition: xdr_io.C:75
unique_id_type & set_unique_id()
Definition: dof_object.h:858
virtual void reserve_nodes(const dof_id_type nn)=0
Reserves space for a known number of nodes.
void read_serialized_nodes(Xdr &io, const dof_id_type n_nodes, const std::vector< new_header_id_type > &meta_data)
Read the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:1988
A Node is like a Point, but with more information.
Definition: node.h:52
const unsigned int invalid_uint
A number which is used quite often to represent an invalid or uninitialized value for an unsigned int...
Definition: libmesh.h:310
unsigned int n_node_integers() const
Definition: mesh_base.h:1077
bool write_parallel() const
Report whether we should write parallel files.
Definition: xdr_io.h:379
std::vector< dof_id_type > get_elemset_codes() const
Return a vector of all elemset codes defined on the mesh.
Definition: mesh_base.C:445
MPI_Info info
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
bool version_at_least_0_9_6() const
Definition: xdr_io.C:2500
std::size_t n_edge_conds() const
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:969
void write_serialized_side_bcs(Xdr &io, const new_header_id_type n_side_bcs) const
Write the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1301
void read_serialized_subdomain_names(Xdr &io)
Read subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:1680
void read_serialized_bcs_helper(Xdr &io, T type_size, const std::string bc_type)
Helper function used in read_serialized_side_bcs, read_serialized_edge_bcs, and read_serialized_shell...
Definition: xdr_io.C:2212
bool legacy() const
Get/Set the flag indicating if we should read/write legacy.
Definition: xdr_io.h:109
std::vector< bool > elems_of_dimension
A vector of bools describing what dimension elements have been encountered when reading a mesh...
Definition: mesh_input.h:107
void gather(const unsigned int root_id, const T &send_data, std::vector< T, A > &recv) const
MessageTag get_unique_tag(int tagvalue=MessageTag::invalid_tag) const
static void set_node_processor_ids(MeshBase &mesh)
This function is called after partitioning to set the processor IDs for the nodes.
Definition: partitioner.C:852
std::size_t n_shellface_conds() const
void close()
Closes the file if it is open.
Definition: xdr_cxx.C:277
void sum(T &r) const
void add_elemset_code(dof_id_type code, MeshBase::elemset_type id_set)
Tabulate a user-defined "code" for elements which belong to the element sets specified in id_set...
Definition: mesh_base.C:398
void barrier() const
unsigned int n_elem_integers() const
Definition: mesh_base.h:955
This is the base class from which all geometric element types are derived.
Definition: elem.h:94
MeshBase & mesh
uint32_t old_header_id_type
Definition: xdr_io.h:60
void read_serialized_nodesets(Xdr &io, T type_size)
Read the nodeset conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2327
unique_id_type unique_id() const
Definition: dof_object.h:844
const Parallel::Communicator & comm() const
std::vector< unsigned int > add_node_integers(const std::vector< std::string > &names, bool allocate_data=true, const std::vector< dof_id_type > *default_values=nullptr)
Register integer data (of type dof_id_type) to be added to each node in the mesh. ...
Definition: mesh_base.C:672
unsigned int p_level() const
Definition: elem.h:3108
void write_serialized_subdomain_names(Xdr &io) const
Write subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:307
void shellface_boundary_ids(const Elem *const elem, const unsigned short int shellface, std::vector< boundary_id_type > &vec_to_fill) const
void boundary_ids(const Node *node, std::vector< boundary_id_type > &vec_to_fill) const
Fills a user-provided std::vector with the boundary ids associated with Node node.
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
virtual void set_next_unique_id(unique_id_type id)=0
Sets the next available unique id to be used.
void read_serialized_side_bcs(Xdr &io, T type_size)
Read the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2303
void read_serialized_shellface_bcs(Xdr &io, T type_size)
Read the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2319
The libMesh namespace provides an interface to certain functionality in the library.
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:165
bool libmesh_isnan(T x)
void write_serialized_nodes(Xdr &io, const dof_id_type n_nodes, const new_header_id_type n_node_integers) const
Write the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:708
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
This array maps the integer representation of the ElemType enum to the number of nodes in the element...
Definition: elem.h:650
const std::string & get_elem_integer_name(unsigned int i) const
Definition: mesh_base.h:944
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Read boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:2391
uint8_t processor_id_type
Definition: id_types.h:104
This is the MeshBase class.
Definition: mesh_base.h:75
void get_elemsets(dof_id_type elemset_code, MeshBase::elemset_type &id_set_to_fill) const
Look up the element sets for a given elemset code and vice-versa.
Definition: mesh_base.C:429
void data(T &a, std::string_view comment="")
Inputs or outputs a single value.
Definition: xdr_cxx.C:778
largest_id_type xdr_id_type
Definition: xdr_io.h:57
void read_serialized_edge_bcs(Xdr &io, T type_size)
Read the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2311
processor_id_type n_processors() const
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
const dof_id_type n_nodes
Definition: tecplot_io.C:67
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
int8_t boundary_id_type
Definition: id_types.h:51
static const boundary_id_type invalid_id
Number used for internal use.
const std::string & boundary_condition_file_name() const
Get/Set the boundary condition file name.
Definition: xdr_io.h:146
dof_id_type id() const
Definition: dof_object.h:828
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1700
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:57
virtual unsigned int n_nodes() const =0
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2517
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:444
virtual void write(const std::string &) override
This method implements writing a mesh to a specified file.
Definition: xdr_io.C:127
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
const std::string & subdomain_map_file_name() const
Get/Set the subdomain file name.
Definition: xdr_io.h:158
virtual const Node * query_node_ptr(const dof_id_type i) const =0
The BoundaryInfo class contains information relevant to boundary conditions including storing faces...
Definition: boundary_info.h:57
libmesh_assert(ctx)
unsigned int _field_width
Definition: xdr_io.h:361
static const dof_id_type invalid_id
An invalid id to distinguish an uninitialized DofObject.
Definition: dof_object.h:482
void read_header(Xdr &io, std::vector< T > &meta_data)
Read header information - templated to handle old (4-byte) or new (8-byte) header id types...
Definition: xdr_io.C:1568
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:275
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Write boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:1396
bool version_at_least_1_1_0() const
Definition: xdr_io.C:2509
An object whose state is distributed along a set of processors.
uint64_t new_header_id_type
Definition: xdr_io.h:63
void broadcast(T &data, const unsigned int root_id=0, const bool identical_sizes=false) const
This class implements a C++ interface to the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:67
const std::string & partition_map_file_name() const
Get/Set the partitioning file name.
Definition: xdr_io.h:152
virtual Node * add_node(Node *n)=0
Add Node n to the end of the vertex array.
virtual const Elem * elem_ptr(const dof_id_type i) const =0
void write_serialized_connectivity(Xdr &io, const dof_id_type n_elem, const new_header_id_type n_elem_integers) const
Write the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:346
bool _write_unique_id
Definition: xdr_io.h:360
static std::unique_ptr< Node > build(const Node &n)
Definition: node.h:315
std::set< elemset_id_type > elemset_type
Typedef for the "set" container used to store elemset ids.
Definition: mesh_base.h:318
subdomain_id_type n_subdomains() const
Definition: mesh_base.C:1034
const std::string & version() const
Get/Set the version string.
Definition: xdr_io.h:140
const std::string & polynomial_level_file_name() const
Get/Set the polynomial degree file name.
Definition: xdr_io.h:164
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
subdomain_id_type subdomain_id() const
Definition: elem.h:2582
static const std::size_t io_blksize
Define the block size to use for chunked IO.
Definition: xdr_io.h:371
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
OStreamProxy out
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure...
void add_shellface(const dof_id_type elem, const unsigned short int shellface, const boundary_id_type id)
Add shell face shellface of element number elem with boundary id id to the boundary information data ...
static const bool value
Definition: xdr_io.C:55
The definition of the const_node_iterator struct.
Definition: mesh_base.h:2273
IntRange< T > make_range(T beg, T end)
The 2-parameter make_range() helper function returns an IntRange<T> when both input parameters are of...
Definition: int_range.h:140
void set_n_partitions(unsigned int n_parts)
Sets the number of partitions in the mesh.
Definition: mesh_input.h:101
unsigned int mesh_dimension() const
Definition: mesh_base.C:372
virtual ~XdrIO()
Destructor.
bool reading() const
Definition: xdr_cxx.h:123
std::size_t n_nodeset_conds() const
IntRange< unsigned short > node_index_range() const
Definition: elem.h:2683
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2490
void pack_element(std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id, const dof_id_type parent_pid, const new_header_id_type n_elem_integers) const
Pack an element into a transfer buffer for parallel communication.
Definition: xdr_io.C:2450
const std::string & get_node_integer_name(unsigned int i) const
Definition: mesh_base.h:1066
unsigned int n_extra_integers() const
Returns how many extra integers are associated to the DofObject.
Definition: dof_object.h:1170
void read_serialized_connectivity(Xdr &io, const dof_id_type n_elem, const std::vector< new_header_id_type > &meta_data, T type_size)
Read the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:1743
virtual const Node & node_ref(const dof_id_type i) const
Definition: mesh_base.h:596
void edge_boundary_ids(const Elem *const elem, const unsigned short int edge, std::vector< boundary_id_type > &vec_to_fill) const
void write_serialized_shellface_bcs(Xdr &io, const new_header_id_type n_shellface_bcs) const
Write the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1315
bool binary() const
Get/Set the flag indicating if we should read/write binary.
Definition: xdr_io.h:103
void write_serialized_bcs_helper(Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
Helper function used in write_serialized_side_bcs, write_serialized_edge_bcs, and write_serialized_sh...
Definition: xdr_io.C:1179
virtual dof_id_type max_node_id() const =0
virtual dof_id_type n_elem() const =0
processor_id_type processor_id() const
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Inputs or outputs a raw data stream.
Definition: xdr_cxx.C:843
const DofMap &dof_map LIBMESH_COMMA unsigned int std::string & set_subdomain_name_map()
Definition: mesh_base.h:1698
unsigned int n_p_levels(const MeshBase &mesh)
Definition: mesh_tools.C:1014
processor_id_type processor_id() const
Definition: dof_object.h:905
virtual ElemType type() const =0
void write_serialized_edge_bcs(Xdr &io, const new_header_id_type n_edge_bcs) const
Write the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1308
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:39
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:2475
virtual void reserve_elem(const dof_id_type ne)=0
Reserves space for a known number of elements.
uint8_t unique_id_type
Definition: id_types.h:86
auto index_range(const T &sizable)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:117
void set_extra_integer(const unsigned int index, const dof_id_type value)
Sets the value on this object of the extra integer associated with index, which should have been obta...
Definition: dof_object.h:1086
virtual dof_id_type n_nodes() const =0
virtual void read(const std::string &) override
This method implements reading a mesh from a specified file.
Definition: xdr_io.C:1436
dof_id_type get_extra_integer(const unsigned int index) const
Gets the value on this object of the extra integer associated with index, which should have been obta...
Definition: dof_object.h:1102
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
uint8_t dof_id_type
Definition: id_types.h:67
unsigned int n_active_levels(const MeshBase &mesh)
Definition: mesh_tools.C:772
void add_edge(const dof_id_type elem, const unsigned short int edge, const boundary_id_type id)
Add edge edge of element number elem with boundary id id to the boundary information data structure...
bool version_at_least_1_8_0() const
Definition: xdr_io.C:2524