libMesh
xdr_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // libMesh includes
21 #include "libmesh/xdr_io.h"
22 #include "libmesh/boundary_info.h"
23 #include "libmesh/elem.h"
24 #include "libmesh/enum_xdr_mode.h"
25 #include "libmesh/int_range.h"
26 #include "libmesh/libmesh_logging.h"
27 #include "libmesh/mesh_base.h"
28 #include "libmesh/mesh_tools.h"
29 #include "libmesh/node.h"
30 #include "libmesh/partitioner.h"
31 #include "libmesh/xdr_cxx.h"
32 
33 // TIMPI includes
35 #include "timpi/parallel_sync.h"
36 
37 // C++ includes
38 #include <iostream>
39 #include <iomanip>
40 #include <cstdio>
41 #include <vector>
42 #include <string>
43 #include <tuple>
44 
45 namespace libMesh
46 {
47 
48 //-----------------------------------------------
49 // anonymous namespace for implementation details
50 namespace {
51 
52 template <class T, class U>
53 struct libmesh_type_is_same {
54  static const bool value = false;
55 };
56 
57 template <class T>
58 struct libmesh_type_is_same<T, T> {
59  static const bool value = true;
60 };
61 
62 }
63 
64 
65 
66 // ------------------------------------------------------------
67 // XdrIO static data
68 const std::size_t XdrIO::io_blksize = 128000;
69 
70 
71 
72 // ------------------------------------------------------------
73 // XdrIO members
74 XdrIO::XdrIO (MeshBase & mesh, const bool binary_in) :
75  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
76  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
78  _binary (binary_in),
79  _legacy (false),
80  _write_serial (false),
81  _write_parallel (false),
82 #ifdef LIBMESH_ENABLE_UNIQUE_ID
83  _write_unique_id (true),
84 #else
85  _write_unique_id (false),
86 #endif
87  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
88  _version ("libMesh-1.8.0"),
89  _bc_file_name ("n/a"),
90  _partition_map_file ("n/a"),
91  _subdomain_map_file ("n/a"),
92  _p_level_file ("n/a")
93 {
94 }
95 
96 
97 
98 XdrIO::XdrIO (const MeshBase & mesh, const bool binary_in) :
99  MeshInput<MeshBase> (), // write-only
100  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
102  _binary (binary_in),
103  _legacy (false),
104  _write_serial (false),
105  _write_parallel (false),
106 #ifdef LIBMESH_ENABLE_UNIQUE_ID
107  _write_unique_id (true),
108 #else
109  _write_unique_id (false),
110 #endif
111  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
112  _version ("libMesh-1.8.0"),
113  _bc_file_name ("n/a"),
114  _partition_map_file ("n/a"),
115  _subdomain_map_file ("n/a"),
116  _p_level_file ("n/a")
117 {
118 }
119 
120 
121 
122 XdrIO::~XdrIO () = default;
123 
124 
125 
126 void XdrIO::write (const std::string & name)
127 {
128  libmesh_error_msg_if(this->legacy(), "We don't support writing parallel files in the legacy format.");
129 
130  Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
131 
132  LOG_SCOPE("write()","XdrIO");
133 
134  // convenient reference to our mesh
136 
138  new_header_id_type max_node_id = mesh.max_node_id();
139 
144  unsigned int n_p_levels = MeshTools::n_p_levels (mesh);
145  new_header_id_type n_elem_integers = mesh.n_elem_integers();
146  new_header_id_type n_node_integers = mesh.n_node_integers();
147  std::vector<dof_id_type> elemset_codes = mesh.get_elemset_codes();
148 
149  bool write_parallel_files = this->write_parallel();
150 
151  //-------------------------------------------------------------
152  // For all the optional files -- the default file name is "n/a".
153  // However, the user may specify an optional external file.
154 
155  // If there are BCs and the user has not already provided a
156  // file name then write to "."
157  if ((n_side_bcs || n_edge_bcs || n_shellface_bcs || n_nodesets) &&
158  this->boundary_condition_file_name() == "n/a")
159  this->boundary_condition_file_name() = ".";
160 
161  // If there are more than one subdomains and the user has not specified an
162  // external file then write the subdomain mapping to the default file "."
163  if ((mesh.n_subdomains() > 0) &&
164  (this->subdomain_map_file_name() == "n/a"))
165  this->subdomain_map_file_name() = ".";
166 
167  // In general we don't write the partition information.
168 
169  // If we have p levels and the user has not already provided
170  // a file name then write to "."
171  if ((n_p_levels > 1) &&
172  (this->polynomial_level_file_name() == "n/a"))
173  this->polynomial_level_file_name() = ".";
174 
175  // write the header
176  if (this->processor_id() == 0)
177  {
178  std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
179  io.data (full_ver);
180 
181  io.data (n_elem, "# number of elements");
182  io.data (max_node_id, "# number of nodes"); // We'll write invalid coords into gaps
183 
184  io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
185  io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
186  io.data (this->partition_map_file_name(), "# processor id specification file");
187  io.data (this->polynomial_level_file_name(), "# p-level specification file");
188 
189  // Version 0.9.2+ introduces sizes for each type
190  new_header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
191 
192  const bool
193  write_p_level = ("." == this->polynomial_level_file_name()),
194  write_partitioning = ("." == this->partition_map_file_name()),
195  write_subdomain_id = ("." == this->subdomain_map_file_name()),
196  write_bcs = ("." == this->boundary_condition_file_name());
197 
198  io.data (write_size, "# type size");
199  io.data (_write_unique_id ? write_size : zero_size, "# uid size");
200  io.data (write_partitioning ? write_size : zero_size, "# pid size");
201  io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
202  io.data (write_p_level ? write_size : zero_size, "# p-level size");
203  // Boundary Condition sizes
204  io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
205  io.data (write_bcs ? write_size : zero_size, "# side size"); // side number
206  io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
207 
208  // Write the data size for extra integers stored on the mesh. Like
209  // everything else in the header, they will be of size write_size.
210  io.data((n_elem_integers || n_node_integers) ? write_size : zero_size, "# extra integer size");
211 
212  // Write the names of the extra node integers (see also: CheckpointIO).
213  std::vector<std::string> node_integer_names;
214  for (unsigned int i=0; i != n_node_integers; ++i)
215  node_integer_names.push_back(mesh.get_node_integer_name(i));
216  io.data(node_integer_names, "# node integer names");
217 
218  // Write the names of the extra elem integers (see also: CheckpointIO).
219  std::vector<std::string> elem_integer_names;
220  for (unsigned int i=0; i != n_elem_integers; ++i)
221  elem_integer_names.push_back(mesh.get_elem_integer_name(i));
222  io.data(elem_integer_names, "# elem integer names");
223 
224  // Write the vector of elemset codes to the header (this will
225  // also write the size of the vector, which is the number of
226  // elemset codes).
227  io.data(elemset_codes, "# elemset codes");
228 
229  // For each elemset code, write out the associated elemset ids
230  MeshBase::elemset_type id_set_to_fill;
231  for (const auto & elemset_code : elemset_codes)
232  {
233  mesh.get_elemsets(elemset_code, id_set_to_fill);
234 
235  // Transfer elemset ids to vector for writing
236  std::vector<dof_id_type> elemset_id_vec(id_set_to_fill.begin(), id_set_to_fill.end());
237 
238  // Write vector of elemset ids to file with comment
239  std::string comment_string = "# elemset ids for elemset code " + std::to_string(elemset_code);
240  io.data(elemset_id_vec, comment_string);
241  }
242  }
243 
244  if (write_parallel_files)
245  {
246  // Parallel xdr mesh files aren't implemented yet; until they
247  // are we'll just warn the user and write a serial file.
248  libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
249  libMesh::out << "Writing a serialized file instead." << std::endl;
250 
251  // write subdomain names
253 
254  // write connectivity
255  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), n_elem_integers);
256 
257  // write the nodal locations
258  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id), n_node_integers);
259 
260  // write the side boundary condition information
261  this->write_serialized_side_bcs (io, n_side_bcs);
262 
263  // write the nodeset information
264  this->write_serialized_nodesets (io, n_nodesets);
265 
266  // write the edge boundary condition information
267  this->write_serialized_edge_bcs (io, n_edge_bcs);
268 
269  // write the "shell face" boundary condition information
270  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
271  }
272  else
273  {
274  // write subdomain names
276 
277  // write connectivity
278  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), n_elem_integers);
279 
280  // write the nodal locations
281  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id), n_node_integers);
282 
283  // write the side boundary condition information
284  this->write_serialized_side_bcs (io, n_side_bcs);
285 
286  // write the nodeset information
287  this->write_serialized_nodesets (io, n_nodesets);
288 
289  // write the edge boundary condition information
290  this->write_serialized_edge_bcs (io, n_edge_bcs);
291 
292  // write the "shell face" boundary condition information
293  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
294  }
295 
296  // pause all processes until the writing ends -- this will
297  // protect for the pathological case where a write is
298  // followed immediately by a read. The write must be
299  // guaranteed to complete first.
300  io.close();
301  this->comm().barrier();
302 }
303 
304 
305 
307 {
308  if (this->processor_id() == 0)
309  {
311 
312  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
313 
314  std::vector<new_header_id_type> subdomain_ids;
315  subdomain_ids.reserve(subdomain_map.size());
316 
317  std::vector<std::string> subdomain_names;
318  subdomain_names.reserve(subdomain_map.size());
319 
320  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
321  // return writable references in mesh_base, it's possible for the user to leave some entity names
322  // blank. We can't write those to the XDA file.
323  new_header_id_type n_subdomain_names = 0;
324  for (const auto & [sbd_id, name] : subdomain_map)
325  if (!name.empty())
326  {
327  n_subdomain_names++;
328  subdomain_ids.push_back(sbd_id);
329  subdomain_names.push_back(name);
330  }
331 
332  io.data(n_subdomain_names, "# subdomain id to name map");
333  // Write out the ids and names in two vectors
334  if (n_subdomain_names)
335  {
336  io.data(subdomain_ids);
337  io.data(subdomain_names);
338  }
339  }
340 }
341 
342 
343 
344 void
346  const dof_id_type libmesh_dbg_var(n_elem),
347  const new_header_id_type n_elem_integers) const
348 {
349  libmesh_assert (io.writing());
350 
351  const bool
352  write_p_level = ("." == this->polynomial_level_file_name()),
353  write_partitioning = ("." == this->partition_map_file_name()),
354  write_subdomain_id = ("." == this->subdomain_map_file_name());
355 
356  // convenient reference to our mesh
358  libmesh_assert_equal_to (n_elem, mesh.n_elem());
359 
360  // We will only write active elements and their parents.
361  const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
362  std::vector<xdr_id_type> n_global_elem_at_level(n_active_levels);
363 
364  // Find the number of local and global elements at each level
365 #ifndef NDEBUG
366  xdr_id_type tot_n_elem = 0;
367 #endif
368  for (unsigned int level=0; level<n_active_levels; level++)
369  {
370  n_global_elem_at_level[level] =
371  MeshTools::n_elem(mesh.local_level_elements_begin(level),
372  mesh.local_level_elements_end(level));
373 
374  this->comm().sum(n_global_elem_at_level[level]);
375 #ifndef NDEBUG
376  tot_n_elem += n_global_elem_at_level[level];
377 #endif
378  libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
379  libmesh_assert_less_equal (tot_n_elem, n_elem);
380  }
381 
382  std::vector<xdr_id_type>
383  xfer_conn, recv_conn;
384  std::vector<dof_id_type>
385  n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
386  std::vector<xdr_id_type> output_buffer;
387  std::vector<std::size_t>
388  xfer_buf_sizes(this->n_processors());
389 
390 #ifdef LIBMESH_ENABLE_AMR
391  typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type>> id_map_type;
392  id_map_type parent_id_map, child_id_map;
393 #endif
394 
395  dof_id_type my_next_elem=0, next_global_elem=0;
396 
397  //-------------------------------------------
398  // First write the level-0 elements directly.
399  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
400  mesh.local_level_elements_end(0)))
401  {
402  pack_element (xfer_conn, elem,
403  /*parent_id=*/DofObject::invalid_id,
404  /*parent_pid=*/DofObject::invalid_id,
405  n_elem_integers);
406 #ifdef LIBMESH_ENABLE_AMR
407  parent_id_map[elem->id()] = std::make_pair(this->processor_id(),
408  my_next_elem);
409 #endif
410  ++my_next_elem;
411  }
412  xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
413 
414  std::size_t my_size = xfer_conn.size();
415  this->comm().gather (0, my_next_elem, n_elem_on_proc);
416  this->comm().gather (0, my_size, xfer_buf_sizes);
417 
418  processor_offsets[0] = 0;
419  for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
420  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
421 
422  // All processors send their xfer buffers to processor 0.
423  // Processor 0 will receive the data and write out the elements.
424  if (this->processor_id() == 0)
425  {
426  // Write the number of elements at this level.
427  {
428  std::string comment = "# n_elem at level 0", legend = ", [ type ";
429  if (_write_unique_id)
430  legend += "uid ";
431  if (write_partitioning)
432  legend += "pid ";
433  if (write_subdomain_id)
434  legend += "sid ";
435  if (write_p_level)
436  legend += "p_level ";
437  legend += "(n0 ... nN-1) ]";
438  comment += legend;
439  io.data (n_global_elem_at_level[0], comment);
440  }
441 
442  for (auto pid : make_range(this->n_processors()))
443  {
444  recv_conn.resize(xfer_buf_sizes[pid]);
445  if (pid == 0)
446  recv_conn = xfer_conn;
447  else
448  this->comm().receive (pid, recv_conn);
449 
450  // at a minimum, the buffer should contain the number of elements,
451  // which could be 0.
452  libmesh_assert (!recv_conn.empty());
453 
454  for (auto [elem, recv_conn_iter, n_elem_received] =
455  std::tuple{xdr_id_type(0), recv_conn.begin(), recv_conn.back()};
456  elem<n_elem_received; elem++, next_global_elem++)
457  {
458  output_buffer.clear();
459 
460  // n. nodes
461  const xdr_id_type n_nodes = *recv_conn_iter++;
462 
463  // type
464  output_buffer.push_back(*recv_conn_iter++);
465 
466  // unique_id
467  xdr_id_type tmp = *recv_conn_iter++;
468  if (_write_unique_id)
469  output_buffer.push_back(tmp);
470 
471  // processor id
472  tmp = *recv_conn_iter++;
473  if (write_partitioning)
474  output_buffer.push_back(tmp);
475 
476  // subdomain id
477  tmp = *recv_conn_iter++;
478  if (write_subdomain_id)
479  output_buffer.push_back(tmp);
480 
481 #ifdef LIBMESH_ENABLE_AMR
482  // p level
483  tmp = *recv_conn_iter++;
484  if (write_p_level)
485  output_buffer.push_back(tmp);
486 #endif
487 
488  for (dof_id_type node=0; node<n_nodes; node++)
489  output_buffer.push_back(*recv_conn_iter++);
490 
491  // Write out the elem extra integers after the connectivity
492  for (dof_id_type n=0; n<n_elem_integers; n++)
493  output_buffer.push_back(*recv_conn_iter++);
494 
495  io.data_stream
496  (output_buffer.data(),
497  cast_int<unsigned int>(output_buffer.size()),
498  cast_int<unsigned int>(output_buffer.size()));
499  }
500  }
501  }
502  else
503  this->comm().send (0, xfer_conn);
504 
505 #ifdef LIBMESH_ENABLE_AMR
506  //--------------------------------------------------------------------
507  // Next write the remaining elements indirectly through their parents.
508  // This will insure that the children are written in the proper order
509  // so they can be reconstructed properly.
510  for (unsigned int level=1; level<n_active_levels; level++)
511  {
512  xfer_conn.clear();
513 
514  dof_id_type my_n_elem_written_at_level = 0;
515  for (const auto & parent : as_range(mesh.local_level_elements_begin(level-1),
516  mesh.local_level_elements_end(level-1)))
517  if (!parent->active()) // we only want the parents elements at this level, and
518  { // there is no direct iterator for this obscure use
519  id_map_type::iterator pos = parent_id_map.find(parent->id());
520  libmesh_assert (pos != parent_id_map.end());
521  const processor_id_type parent_pid = pos->second.first;
522  const dof_id_type parent_id = pos->second.second;
523  parent_id_map.erase(pos);
524 
525  for (auto & child : parent->child_ref_range())
526  {
527  pack_element (xfer_conn, &child, parent_id, parent_pid, n_elem_integers);
528 
529  // this approach introduces the possibility that we write
530  // non-local elements. These elements may well be parents
531  // at the next step
532  child_id_map[child.id()] = std::make_pair (child.processor_id(),
533  my_n_elem_written_at_level++);
534  my_next_elem++;
535  }
536  }
537  xfer_conn.push_back(my_n_elem_written_at_level);
538  my_size = xfer_conn.size();
539  this->comm().gather (0, my_size, xfer_buf_sizes);
540 
541  // Processor 0 will receive the data and write the elements.
542  if (this->processor_id() == 0)
543  {
544  // Write the number of elements at this level.
545  {
546  std::ostringstream buf;
547  buf << "# n_elem at level " << level << ", [ type ";
548 
549  if (_write_unique_id)
550  buf << "uid ";
551  buf << "parent ";
552  if (write_partitioning)
553  buf << "pid ";
554  if (write_subdomain_id)
555  buf << "sid ";
556  if (write_p_level)
557  buf << "p_level ";
558  buf << "(n0 ... nN-1) ]";
559 
560  io.data (n_global_elem_at_level[level], buf.str());
561  }
562 
563  for (auto pid : make_range(this->n_processors()))
564  {
565  recv_conn.resize(xfer_buf_sizes[pid]);
566  if (pid == 0)
567  recv_conn = xfer_conn;
568  else
569  this->comm().receive (pid, recv_conn);
570 
571  // at a minimum, the buffer should contain the number of elements,
572  // which could be 0.
573  libmesh_assert (!recv_conn.empty());
574 
575  for (auto [elem, recv_conn_iter, n_elem_received] =
576  std::tuple{xdr_id_type(0), recv_conn.begin(), recv_conn.back()};
577  elem<n_elem_received;
578  elem++, next_global_elem++)
579  {
580  output_buffer.clear();
581 
582  // n. nodes
583  const xdr_id_type n_nodes = *recv_conn_iter++;
584 
585  // type
586  output_buffer.push_back(*recv_conn_iter++);
587 
588  // unique_id
589  xdr_id_type tmp = *recv_conn_iter++;
590  if (_write_unique_id)
591  output_buffer.push_back(tmp);
592 
593  // parent local id
594  const xdr_id_type parent_local_id = *recv_conn_iter++;
595 
596  // parent processor id
597  const xdr_id_type parent_pid = *recv_conn_iter++;
598 
599  output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
600 
601  // processor id
602  tmp = *recv_conn_iter++;
603  if (write_partitioning)
604  output_buffer.push_back(tmp);
605 
606  // subdomain id
607  tmp = *recv_conn_iter++;
608  if (write_subdomain_id)
609  output_buffer.push_back(tmp);
610 
611  // p level
612  tmp = *recv_conn_iter++;
613  if (write_p_level)
614  output_buffer.push_back(tmp);
615 
616  // connectivity
617  for (xdr_id_type node=0; node<n_nodes; node++)
618  output_buffer.push_back(*recv_conn_iter++);
619 
620  // Write out the elem extra integers after the connectivity
621  for (dof_id_type n=0; n<n_elem_integers; n++)
622  output_buffer.push_back(*recv_conn_iter++);
623 
624  io.data_stream
625  (output_buffer.data(),
626  cast_int<unsigned int>(output_buffer.size()),
627  cast_int<unsigned int>(output_buffer.size()));
628  }
629  }
630  }
631  else
632  this->comm().send (0, xfer_conn);
633 
634  // update the processor_offsets
635  processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
636  this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
637  for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
638  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
639 
640  // Now, at the next level we will again iterate over local parents. However,
641  // those parents may have been written by other processors (at this step),
642  // so we need to gather them into our *_id_maps.
643  {
644  std::map<processor_id_type, std::vector<dof_id_type>> requested_ids;
645 
646  for (const auto & elem : as_range(mesh.local_level_elements_begin(level),
647  mesh.local_level_elements_end(level)))
648  if (!child_id_map.count(elem->id()))
649  {
650  libmesh_assert_not_equal_to (elem->parent()->processor_id(), this->processor_id());
651  const processor_id_type pid = elem->parent()->processor_id();
652  if (pid != this->processor_id())
653  requested_ids[pid].push_back(elem->id());
654  }
655 
656  auto gather_functor =
657  [& child_id_map]
658  (processor_id_type libmesh_dbg_var(pid),
659  const std::vector<dof_id_type> & ids,
660  std::vector<dof_id_type> & data)
661  {
662  const std::size_t ids_size = ids.size();
663  data.resize(ids_size);
664 
665  // Fill those requests by overwriting the requested ids
666  for (std::size_t i=0; i != ids_size; i++)
667  {
668  libmesh_assert (child_id_map.count(ids[i]));
669  libmesh_assert_equal_to (child_id_map[ids[i]].first, pid);
670 
671  data[i] = child_id_map[ids[i]].second;
672  }
673  };
674 
675  auto action_functor =
676  [& child_id_map]
677  (processor_id_type pid,
678  const std::vector<dof_id_type> & ids,
679  const std::vector<dof_id_type> & data)
680  {
681  std::size_t data_size = data.size();
682 
683  for (std::size_t i=0; i != data_size; i++)
684  child_id_map[ids[i]] =
685  std::make_pair (pid, data[i]);
686  };
687 
688  // Trade ids back and forth
689  const dof_id_type * ex = nullptr;
690  Parallel::pull_parallel_vector_data
691  (this->comm(), requested_ids, gather_functor, action_functor, ex);
692 
693  // overwrite the parent_id_map with the child_id_map, but
694  // use std::map::swap() for efficiency.
695  parent_id_map.swap(child_id_map);
696  child_id_map.clear();
697  }
698  }
699 #endif // LIBMESH_ENABLE_AMR
700  if (this->processor_id() == 0)
701  libmesh_assert_equal_to (next_global_elem, n_elem);
702 
703 }
704 
705 
706 
707 void XdrIO::write_serialized_nodes (Xdr & io, const dof_id_type max_node_id,
708  const new_header_id_type n_node_integers) const
709 {
710  // convenient reference to our mesh
712  libmesh_assert_equal_to (max_node_id, mesh.max_node_id());
713 
714  std::vector<dof_id_type> xfer_ids;
715  std::vector<Real> xfer_coords;
716  std::vector<Real> & coords=xfer_coords;
717 
718  std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
719  std::vector<std::vector<Real>> recv_coords(this->n_processors());
720 
721 #ifndef NDEBUG
722  std::size_t n_written=0;
723 #endif
724 
725  // Note: do not be tempted to replace the node loops below with
726  // range-based iterators, these iterators must be defined outside
727  // the blk loop since node_iter is updated at each iteration.
728  MeshBase::const_node_iterator node_iter = mesh.local_nodes_begin();
729  const MeshBase::const_node_iterator nodes_end = mesh.local_nodes_end();
730 
731  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
732  {
733  const std::size_t first_node = blk*io_blksize;
734  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
735 
736  const std::size_t tot_id_size = last_node - first_node;
737 
738  // Build up the xfer buffers on each processor
739  xfer_ids.clear();
740  xfer_coords.clear();
741 
742  for (; node_iter != nodes_end; ++node_iter)
743  {
744  const Node & node = **node_iter;
745  libmesh_assert_greater_equal(node.id(), first_node);
746  if (node.id() >= last_node)
747  break;
748 
749  xfer_ids.push_back(node.id());
750  xfer_coords.push_back(node(0));
751 #if LIBMESH_DIM > 1
752  xfer_coords.push_back(node(1));
753 #endif
754 #if LIBMESH_DIM > 2
755  xfer_coords.push_back(node(2));
756 #endif
757  }
758 
759  //-------------------------------------
760  // Send the xfer buffers to processor 0
761  std::vector<std::size_t> ids_size;
762 
763  const std::size_t my_ids_size = xfer_ids.size();
764 
765  // explicitly gather ids_size
766  this->comm().gather (0, my_ids_size, ids_size);
767 
768  // We will have lots of simultaneous receives if we are
769  // processor 0, so let's use nonblocking receives.
770  std::vector<Parallel::Request>
771  id_request_handles(this->n_processors()-1),
772  coord_request_handles(this->n_processors()-1);
773 
775  id_tag = mesh.comm().get_unique_tag(),
776  coord_tag = mesh.comm().get_unique_tag();
777 
778  // Post the receives -- do this on processor 0 only.
779  if (this->processor_id() == 0)
780  {
781  for (auto pid : make_range(this->n_processors()))
782  {
783  recv_ids[pid].resize(ids_size[pid]);
784  recv_coords[pid].resize(ids_size[pid]*LIBMESH_DIM);
785 
786  if (pid == 0)
787  {
788  recv_ids[0] = xfer_ids;
789  recv_coords[0] = xfer_coords;
790  }
791  else
792  {
793  this->comm().receive (pid, recv_ids[pid],
794  id_request_handles[pid-1],
795  id_tag);
796  this->comm().receive (pid, recv_coords[pid],
797  coord_request_handles[pid-1],
798  coord_tag);
799  }
800  }
801  }
802  else
803  {
804  // Send -- do this on all other processors.
805  this->comm().send(0, xfer_ids, id_tag);
806  this->comm().send(0, xfer_coords, coord_tag);
807  }
808 
809  // -------------------------------------------------------
810  // Receive the messages and write the output on processor 0.
811  if (this->processor_id() == 0)
812  {
813  // Wait for all the receives to complete. We have no
814  // need for the statuses since we already know the
815  // buffer sizes.
816  Parallel::wait (id_request_handles);
817  Parallel::wait (coord_request_handles);
818 
819 #ifndef NDEBUG
820  for (auto pid : make_range(this->n_processors()))
821  libmesh_assert_equal_to(recv_coords[pid].size(),
822  recv_ids[pid].size()*LIBMESH_DIM);
823 #endif
824 
825  // Write the coordinates in this block.
826  // Some of these coordinates may correspond to ids for which
827  // no node exists, if we have a discontiguous node
828  // numbering!
829  //
830  // The purpose of communicating the xfer_ids/recv_ids buffer
831  // is so that we can handle discontiguous node numberings:
832  // we do not actually write the node ids themselves anywhere
833  // in the xdr file. We do write the unique ids to file, if
834  // enabled (see next section).
835 
836  // Write invalid values for unused node ids
837  coords.clear();
838  coords.resize (3*tot_id_size, std::numeric_limits<Real>::quiet_NaN());
839 
840  for (auto pid : make_range(this->n_processors()))
841  for (auto idx : index_range(recv_ids[pid]))
842  {
843  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
844  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
845  libmesh_assert_less(local_idx, tot_id_size);
846 
847  libmesh_assert_less ((3*local_idx+2), coords.size());
848  libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
849 
850  coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
851 #if LIBMESH_DIM > 1
852  coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
853 #else
854  coords[3*local_idx+1] = 0.;
855 #endif
856 #if LIBMESH_DIM > 2
857  coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
858 #else
859  coords[3*local_idx+2] = 0.;
860 #endif
861 
862 #ifndef NDEBUG
863  n_written++;
864 #endif
865  }
866 
867  io.data_stream (coords.empty() ? nullptr : coords.data(),
868  cast_int<unsigned int>(coords.size()), /*line_break=*/3);
869  }
870  } // end for (block-based coord writes)
871 
872  if (this->processor_id() == 0)
873  libmesh_assert_less_equal (n_written, max_node_id);
874 
875 #ifdef LIBMESH_ENABLE_UNIQUE_ID
876  // XDR unsigned char doesn't work as anticipated
877  unsigned short write_unique_ids = 1;
878 #else
879  unsigned short write_unique_ids = 0;
880 #endif
881 
882  if (this->processor_id() == 0)
883  io.data (write_unique_ids, "# presence of unique ids");
884 
885 #ifdef LIBMESH_ENABLE_UNIQUE_ID
886 {
887  std::vector<xdr_id_type> xfer_unique_ids;
888  std::vector<xdr_id_type> & unique_ids=xfer_unique_ids;
889  std::vector<std::vector<xdr_id_type>> recv_unique_ids (this->n_processors());
890 
891 #ifndef NDEBUG
892  // Reset write counter
893  n_written = 0;
894 #endif
895 
896  // Return node iterator to the beginning
897  node_iter = mesh.local_nodes_begin();
898 
899  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
900  {
901  const std::size_t first_node = blk*io_blksize;
902  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
903 
904  const std::size_t tot_id_size = last_node - first_node;
905 
906  // Build up the xfer buffers on each processor
907  xfer_ids.clear();
908  xfer_ids.reserve(tot_id_size);
909  xfer_unique_ids.clear();
910  xfer_unique_ids.reserve(tot_id_size);
911 
912  for (; node_iter != nodes_end; ++node_iter)
913  {
914  const Node & node = **node_iter;
915  libmesh_assert_greater_equal(node.id(), first_node);
916  if (node.id() >= last_node)
917  break;
918 
919  xfer_ids.push_back(node.id());
920  xfer_unique_ids.push_back(node.unique_id());
921  }
922 
923  //-------------------------------------
924  // Send the xfer buffers to processor 0
925  std::vector<std::size_t> ids_size;
926 
927  const std::size_t my_ids_size = xfer_ids.size();
928 
929  // explicitly gather ids_size
930  this->comm().gather (0, my_ids_size, ids_size);
931 
932  // We will have lots of simultaneous receives if we are
933  // processor 0, so let's use nonblocking receives.
934  std::vector<Parallel::Request>
935  unique_id_request_handles(this->n_processors()-1),
936  id_request_handles(this->n_processors()-1);
937 
939  unique_id_tag = mesh.comm().get_unique_tag(),
940  id_tag = mesh.comm().get_unique_tag();
941 
942  // Post the receives -- do this on processor 0 only.
943  if (this->processor_id() == 0)
944  {
945  for (auto pid : make_range(this->n_processors()))
946  {
947  recv_ids[pid].resize(ids_size[pid]);
948  recv_unique_ids[pid].resize(ids_size[pid]);
949 
950  if (pid == 0)
951  {
952  recv_ids[0] = xfer_ids;
953  recv_unique_ids[0] = xfer_unique_ids;
954  }
955  else
956  {
957  this->comm().receive (pid, recv_ids[pid],
958  id_request_handles[pid-1],
959  id_tag);
960  this->comm().receive (pid, recv_unique_ids[pid],
961  unique_id_request_handles[pid-1],
962  unique_id_tag);
963  }
964  }
965  }
966  else
967  {
968  // Send -- do this on all other processors.
969  this->comm().send(0, xfer_ids, id_tag);
970  this->comm().send(0, xfer_unique_ids, unique_id_tag);
971  }
972 
973  // -------------------------------------------------------
974  // Receive the messages and write the output on processor 0.
975  if (this->processor_id() == 0)
976  {
977  // Wait for all the receives to complete. We have no
978  // need for the statuses since we already know the
979  // buffer sizes.
980  Parallel::wait (id_request_handles);
981  Parallel::wait (unique_id_request_handles);
982 
983 #ifndef NDEBUG
984  for (auto pid : make_range(this->n_processors()))
985  libmesh_assert_equal_to
986  (recv_ids[pid].size(), recv_unique_ids[pid].size());
987 #endif
988 
989  libmesh_assert_less_equal
990  (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
991 
992  // Write the unique ids in this block.
993  unique_ids.clear();
994  unique_ids.resize(tot_id_size, unique_id_type(-1));
995 
996  for (auto pid : make_range(this->n_processors()))
997  for (auto idx : index_range(recv_ids[pid]))
998  {
999  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
1000  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1001  libmesh_assert_less (local_idx, unique_ids.size());
1002 
1003  unique_ids[local_idx] = recv_unique_ids[pid][idx];
1004 
1005 #ifndef NDEBUG
1006  n_written++;
1007 #endif
1008  }
1009 
1010  io.data_stream (unique_ids.empty() ? nullptr : unique_ids.data(),
1011  cast_int<unsigned int>(unique_ids.size()), /*line_break=*/1);
1012  }
1013  } // end for (block-based unique_id writes)
1014 
1015  if (this->processor_id() == 0)
1016  libmesh_assert_less_equal (n_written, max_node_id);
1017 }
1018 #endif // LIBMESH_ENABLE_UNIQUE_ID
1019 
1020  // Next: do "block"-based I/O for the extra node integers (if necessary)
1021  if (n_node_integers)
1022  {
1023 #ifndef NDEBUG
1024  // Reset write counter
1025  n_written = 0;
1026 #endif
1027 
1028  // Return node iterator to the beginning
1029  node_iter = mesh.local_nodes_begin();
1030 
1031  // Data structures for writing "extra" node integers
1032  std::vector<dof_id_type> xfer_node_integers;
1033  std::vector<dof_id_type> & node_integers = xfer_node_integers;
1034  std::vector<std::vector<dof_id_type>> recv_node_integers(this->n_processors());
1035 
1036  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
1037  {
1038  const std::size_t first_node = blk*io_blksize;
1039  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
1040 
1041  const std::size_t tot_id_size = last_node - first_node;
1042 
1043  // Build up the xfer buffers on each processor
1044  xfer_ids.clear();
1045  xfer_ids.reserve(tot_id_size);
1046  xfer_node_integers.clear();
1047  xfer_node_integers.reserve(tot_id_size * n_node_integers);
1048 
1049  for (; node_iter != nodes_end; ++node_iter)
1050  {
1051  const Node & node = **node_iter;
1052  libmesh_assert_greater_equal(node.id(), first_node);
1053  if (node.id() >= last_node)
1054  break;
1055 
1056  xfer_ids.push_back(node.id());
1057 
1058  // Append current node's node integers to xfer buffer
1059  for (unsigned int i=0; i != n_node_integers; ++i)
1060  xfer_node_integers.push_back(node.get_extra_integer(i));
1061  }
1062 
1063  //-------------------------------------
1064  // Send the xfer buffers to processor 0
1065  std::vector<std::size_t> ids_size;
1066 
1067  const std::size_t my_ids_size = xfer_ids.size();
1068 
1069  // explicitly gather ids_size
1070  this->comm().gather (0, my_ids_size, ids_size);
1071 
1072  // We will have lots of simultaneous receives if we are
1073  // processor 0, so let's use nonblocking receives.
1074  std::vector<Parallel::Request>
1075  node_integers_request_handles(this->n_processors()-1),
1076  id_request_handles(this->n_processors()-1);
1077 
1079  node_integers_tag = mesh.comm().get_unique_tag(),
1080  id_tag = mesh.comm().get_unique_tag();
1081 
1082  // Post the receives -- do this on processor 0 only.
1083  if (this->processor_id() == 0)
1084  {
1085  for (auto pid : make_range(this->n_processors()))
1086  {
1087  recv_ids[pid].resize(ids_size[pid]);
1088  recv_node_integers[pid].resize(n_node_integers * ids_size[pid]);
1089 
1090  if (pid == 0)
1091  {
1092  recv_ids[0] = xfer_ids;
1093  recv_node_integers[0] = xfer_node_integers;
1094  }
1095  else
1096  {
1097  this->comm().receive (pid, recv_ids[pid],
1098  id_request_handles[pid-1],
1099  id_tag);
1100  this->comm().receive (pid, recv_node_integers[pid],
1101  node_integers_request_handles[pid-1],
1102  node_integers_tag);
1103  }
1104  }
1105  }
1106  else
1107  {
1108  // Send -- do this on all other processors.
1109  this->comm().send(0, xfer_ids, id_tag);
1110  this->comm().send(0, xfer_node_integers, node_integers_tag);
1111  }
1112 
1113  // -------------------------------------------------------
1114  // Receive the messages and write the output on processor 0.
1115  if (this->processor_id() == 0)
1116  {
1117  // Wait for all the receives to complete. We have no
1118  // need for the statuses since we already know the
1119  // buffer sizes.
1120  Parallel::wait (id_request_handles);
1121  Parallel::wait (node_integers_request_handles);
1122 
1123 #ifndef NDEBUG
1124  for (auto pid : make_range(this->n_processors()))
1125  libmesh_assert_equal_to
1126  (recv_ids[pid].size(), recv_node_integers[pid].size() / n_node_integers);
1127 #endif
1128 
1129  libmesh_assert_less_equal
1130  (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
1131 
1132  // Write the node integers in this block. We will write
1133  // invalid node integers if no node exists, i.e. if we
1134  // have a discontiguous node numbering!
1135  //
1136  // The purpose of communicating the xfer_ids/recv_ids buffer
1137  // is so that we can handle discontiguous node numberings:
1138  // we do not actually write the node ids themselves anywhere
1139  // in the xdr file. We do write the unique ids to file, if
1140  // enabled (see previous section).
1141 
1142  // Note: we initialize the node_integers array with invalid values,
1143  // so any indices which don't get written to in the loop below will
1144  // just contain invalid values.
1145  node_integers.clear();
1146  node_integers.resize (n_node_integers*tot_id_size, static_cast<dof_id_type>(-1));
1147 
1148  for (auto pid : make_range(this->n_processors()))
1149  for (auto idx : index_range(recv_ids[pid]))
1150  {
1151  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
1152  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1153 
1154  // Assert that we won't index past the end of the node_integers array
1155  libmesh_assert_less (local_idx, tot_id_size);
1156 
1157  for (unsigned int i=0; i != n_node_integers; ++i)
1158  node_integers[n_node_integers*local_idx + i] = recv_node_integers[pid][n_node_integers*idx + i];
1159 
1160 #ifndef NDEBUG
1161  n_written++;
1162 #endif
1163  }
1164 
1165  io.data_stream (node_integers.empty() ? nullptr : node_integers.data(),
1166  cast_int<unsigned int>(node_integers.size()), /*line_break=*/n_node_integers);
1167  }
1168  } // end for (block-based unique_id writes)
1169 
1170  if (this->processor_id() == 0)
1171  libmesh_assert_less_equal (n_written, max_node_id);
1172 
1173  } // end if (n_node_integers)
1174 }
1175 
1176 
1177 
1178 void XdrIO::write_serialized_bcs_helper (Xdr & io, const new_header_id_type n_bcs, const std::string bc_type) const
1179 {
1180  libmesh_assert (io.writing());
1181 
1182  // convenient reference to our mesh
1184 
1185  // and our boundary info object
1186  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1187 
1188  // Version 0.9.2+ introduces entity names
1189  write_serialized_bc_names(io, boundary_info, true); // sideset names
1190 
1191  new_header_id_type n_bcs_out = n_bcs;
1192  if (this->processor_id() == 0)
1193  {
1194  std::stringstream comment_string;
1195  comment_string << "# number of " << bc_type << " boundary conditions";
1196  io.data (n_bcs_out, comment_string.str());
1197  }
1198  n_bcs_out = 0;
1199 
1200  if (!n_bcs) return;
1201 
1202  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1203  std::vector<std::size_t> bc_sizes(this->n_processors());
1204 
1205  // Container to catch boundary IDs handed back by BoundaryInfo
1206  std::vector<boundary_id_type> bc_ids;
1207 
1208  // Boundary conditions are only specified for level-0 elements
1209  dof_id_type n_local_level_0_elem=0;
1210  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
1211  mesh.local_level_elements_end(0)))
1212  {
1213  if (bc_type == "side")
1214  {
1215  for (auto s : elem->side_index_range())
1216  {
1217  boundary_info.boundary_ids (elem, s, bc_ids);
1218  for (const auto & bc_id : bc_ids)
1219  if (bc_id != BoundaryInfo::invalid_id)
1220  {
1221  xfer_bcs.push_back (n_local_level_0_elem);
1222  xfer_bcs.push_back (s) ;
1223  xfer_bcs.push_back (bc_id);
1224  }
1225  }
1226  }
1227  else if (bc_type == "edge")
1228  {
1229  for (auto e : elem->edge_index_range())
1230  {
1231  boundary_info.edge_boundary_ids (elem, e, bc_ids);
1232  for (const auto & bc_id : bc_ids)
1233  if (bc_id != BoundaryInfo::invalid_id)
1234  {
1235  xfer_bcs.push_back (n_local_level_0_elem);
1236  xfer_bcs.push_back (e) ;
1237  xfer_bcs.push_back (bc_id);
1238  }
1239  }
1240  }
1241  else if (bc_type == "shellface")
1242  {
1243  for (unsigned short sf=0; sf<2; sf++)
1244  {
1245  boundary_info.shellface_boundary_ids (elem, sf, bc_ids);
1246  for (const auto & bc_id : bc_ids)
1247  if (bc_id != BoundaryInfo::invalid_id)
1248  {
1249  xfer_bcs.push_back (n_local_level_0_elem);
1250  xfer_bcs.push_back (sf) ;
1251  xfer_bcs.push_back (bc_id);
1252  }
1253  }
1254  }
1255  else
1256  {
1257  libmesh_error_msg("bc_type not recognized: " + bc_type);
1258  }
1259 
1260  // Increment the level-0 element counter.
1261  n_local_level_0_elem++;
1262  }
1263 
1264  xfer_bcs.push_back(n_local_level_0_elem);
1265  std::size_t my_size = xfer_bcs.size();
1266  this->comm().gather (0, my_size, bc_sizes);
1267 
1268  // All processors send their xfer buffers to processor 0
1269  // Processor 0 will receive all buffers and write out the bcs
1270  if (this->processor_id() == 0)
1271  {
1272  dof_id_type elem_offset = 0;
1273  for (auto pid : make_range(this->n_processors()))
1274  {
1275  recv_bcs.resize(bc_sizes[pid]);
1276  if (pid == 0)
1277  recv_bcs = xfer_bcs;
1278  else
1279  this->comm().receive (pid, recv_bcs);
1280 
1281  const dof_id_type my_n_local_level_0_elem
1282  = cast_int<dof_id_type>(recv_bcs.back());
1283  recv_bcs.pop_back();
1284 
1285  for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 3, n_bcs_out++)
1286  recv_bcs[idx+0] += elem_offset;
1287 
1288  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1289  cast_int<unsigned int>(recv_bcs.size()), 3);
1290  elem_offset += my_n_local_level_0_elem;
1291  }
1292  libmesh_assert_equal_to (n_bcs, n_bcs_out);
1293  }
1294  else
1295  this->comm().send (0, xfer_bcs);
1296 }
1297 
1298 
1299 
1300 void XdrIO::write_serialized_side_bcs (Xdr & io, const new_header_id_type n_side_bcs) const
1301 {
1302  write_serialized_bcs_helper(io, n_side_bcs, "side");
1303 }
1304 
1305 
1306 
1307 void XdrIO::write_serialized_edge_bcs (Xdr & io, const new_header_id_type n_edge_bcs) const
1308 {
1309  write_serialized_bcs_helper(io, n_edge_bcs, "edge");
1310 }
1311 
1312 
1313 
1314 void XdrIO::write_serialized_shellface_bcs (Xdr & io, const new_header_id_type n_shellface_bcs) const
1315 {
1316  write_serialized_bcs_helper(io, n_shellface_bcs, "shellface");
1317 }
1318 
1319 
1320 
1321 void XdrIO::write_serialized_nodesets (Xdr & io, const new_header_id_type n_nodesets) const
1322 {
1323  libmesh_assert (io.writing());
1324 
1325  // convenient reference to our mesh
1327 
1328  // and our boundary info object
1329  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1330 
1331  // Version 0.9.2+ introduces entity names
1332  write_serialized_bc_names(io, boundary_info, false); // nodeset names
1333 
1334  new_header_id_type n_nodesets_out = n_nodesets;
1335  if (this->processor_id() == 0)
1336  io.data (n_nodesets_out, "# number of nodesets");
1337  n_nodesets_out = 0;
1338 
1339  if (!n_nodesets) return;
1340 
1341  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1342  std::vector<std::size_t> bc_sizes(this->n_processors());
1343 
1344  // Container to catch boundary IDs handed back by BoundaryInfo
1345  std::vector<boundary_id_type> nodeset_ids;
1346 
1347  dof_id_type n_node=0;
1348  for (const auto & node : mesh.local_node_ptr_range())
1349  {
1350  boundary_info.boundary_ids (node, nodeset_ids);
1351  for (const auto & bc_id : nodeset_ids)
1352  if (bc_id != BoundaryInfo::invalid_id)
1353  {
1354  xfer_bcs.push_back (node->id());
1355  xfer_bcs.push_back (bc_id);
1356  }
1357  }
1358 
1359  xfer_bcs.push_back(n_node);
1360  std::size_t my_size = xfer_bcs.size();
1361  this->comm().gather (0, my_size, bc_sizes);
1362 
1363  // All processors send their xfer buffers to processor 0
1364  // Processor 0 will receive all buffers and write out the bcs
1365  if (this->processor_id() == 0)
1366  {
1367  dof_id_type node_offset = 0;
1368  for (auto pid : make_range(this->n_processors()))
1369  {
1370  recv_bcs.resize(bc_sizes[pid]);
1371  if (pid == 0)
1372  recv_bcs = xfer_bcs;
1373  else
1374  this->comm().receive (pid, recv_bcs);
1375 
1376  const dof_id_type my_n_node =
1377  cast_int<dof_id_type>(recv_bcs.back());
1378  recv_bcs.pop_back();
1379 
1380  for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 2, n_nodesets_out++)
1381  recv_bcs[idx+0] += node_offset;
1382 
1383  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1384  cast_int<unsigned int>(recv_bcs.size()), 2);
1385  node_offset += my_n_node;
1386  }
1387  libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
1388  }
1389  else
1390  this->comm().send (0, xfer_bcs);
1391 }
1392 
1393 
1394 
1395 void XdrIO::write_serialized_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
1396 {
1397  if (this->processor_id() == 0)
1398  {
1399  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1400  info.get_sideset_name_map() : info.get_nodeset_name_map();
1401 
1402  std::vector<new_header_id_type> boundary_ids;
1403  boundary_ids.reserve(boundary_map.size());
1404 
1405  std::vector<std::string> boundary_names;
1406  boundary_names.reserve(boundary_map.size());
1407 
1408  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
1409  // return writable references in boundary_info, it's possible for the user to leave some entity names
1410  // blank. We can't write those to the XDA file.
1411  new_header_id_type n_boundary_names = 0;
1412  for (const auto & [bndry_id, name] : boundary_map)
1413  if (!name.empty())
1414  {
1415  n_boundary_names++;
1416  boundary_ids.push_back(bndry_id);
1417  boundary_names.push_back(name);
1418  }
1419 
1420  if (is_sideset)
1421  io.data(n_boundary_names, "# sideset id to name map");
1422  else
1423  io.data(n_boundary_names, "# nodeset id to name map");
1424  // Write out the ids and names in two vectors
1425  if (n_boundary_names)
1426  {
1427  io.data(boundary_ids);
1428  io.data(boundary_names);
1429  }
1430  }
1431 }
1432 
1433 
1434 
1435 void XdrIO::read (const std::string & name)
1436 {
1437  LOG_SCOPE("read()","XdrIO");
1438 
1439  // Only open the file on processor 0 -- this is especially important because
1440  // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1441  // calls will produce a race condition.
1442  Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1443 
1444  // convenient reference to our mesh
1446 
1447  // get the version string.
1448  if (this->processor_id() == 0)
1449  io.data (this->version());
1450  this->comm().broadcast (this->version());
1451 
1452  // note that for "legacy" files the first entry is an
1453  // integer -- not a string at all.
1454  this->legacy() = !(this->version().find("libMesh") < this->version().size());
1455 
1456  // Check for a legacy version format.
1457  libmesh_error_msg_if(this->legacy(), "We no longer support reading files in the legacy format.");
1458 
1459  // Read headers with the old id type if they're pre-1.3.0, or with
1460  // the new id type if they're post-1.3.0
1461  const unsigned int n_header_metadata_values = 13;
1462  std::vector<new_header_id_type> meta_data(n_header_metadata_values, sizeof(xdr_id_type));
1463  if (this->version_at_least_1_3_0())
1464  {
1465  this->read_header(io, meta_data);
1466  }
1467  else
1468  {
1469  // In pre-1.3.0 format there were only 10 metadata values in the header, but
1470  // in newer versions there are more. The unread values will be set to 0.
1471  std::vector<old_header_id_type> old_data(n_header_metadata_values, sizeof(xdr_id_type));
1472 
1473  this->read_header(io, old_data);
1474 
1475  meta_data.assign(old_data.begin(), old_data.end());
1476  }
1477 
1478  const new_header_id_type & n_elem = meta_data[0];
1479  const new_header_id_type & n_nodes = meta_data[1];
1480 
1486  if (version_at_least_0_9_2())
1487  _field_width = cast_int<unsigned int>(meta_data[2]);
1488 
1489  // On systems where uint64_t==unsigned long, we were previously
1490  // writing 64-bit unsigned integers via xdr_u_long(), a function
1491  // which is literally less suited for that task than abort() would
1492  // have been, because at least abort() would have *known* it
1493  // couldn't write rather than truncating writes to 32 bits.
1494  //
1495  // If we have files with version < 1.3.0, then we'll continue to use
1496  // 32 bit field width, regardless of whether the file thinks we
1497  // should, whenever we're on a system where the problem would have
1498  // occurred.
1499  if ((_field_width == 4) ||
1500  (!version_at_least_1_3_0() &&
1502  {
1503  uint32_t type_size = 0;
1504 
1505  // read subdomain names
1507 
1508  // read connectivity
1509  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1510 
1511  // read the nodal locations
1512  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes), meta_data);
1513 
1514  // read the side boundary conditions
1515  this->read_serialized_side_bcs (io, type_size);
1516 
1517  if (version_at_least_0_9_2())
1518  // read the nodesets
1519  this->read_serialized_nodesets (io, type_size);
1520 
1521  if (version_at_least_1_1_0())
1522  {
1523  // read the edge boundary conditions
1524  this->read_serialized_edge_bcs (io, type_size);
1525 
1526  // read the "shell face" boundary conditions
1527  this->read_serialized_shellface_bcs (io, type_size);
1528  }
1529  }
1530  else if (_field_width == 8)
1531  {
1532  uint64_t type_size = 0;
1533 
1534  // read subdomain names
1536 
1537  // read connectivity
1538  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1539 
1540  // read the nodal locations
1541  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes), meta_data);
1542 
1543  // read the boundary conditions
1544  this->read_serialized_side_bcs (io, type_size);
1545 
1546  if (version_at_least_0_9_2())
1547  // read the nodesets
1548  this->read_serialized_nodesets (io, type_size);
1549 
1550  if (version_at_least_1_1_0())
1551  {
1552  // read the edge boundary conditions
1553  this->read_serialized_edge_bcs (io, type_size);
1554 
1555  // read the "shell face" boundary conditions
1556  this->read_serialized_shellface_bcs (io, type_size);
1557  }
1558  }
1559 
1560  // set the node processor ids
1562 }
1563 
1564 
1565 
1566 template <typename T>
1567 void XdrIO::read_header (Xdr & io, std::vector<T> & meta_data)
1568 {
1569  LOG_SCOPE("read_header()","XdrIO");
1570 
1571  // convenient reference to our mesh
1573 
1574  // Header information to be read on processor 0 and broadcast to
1575  // other procs.
1576  std::vector<std::string> node_integer_names;
1577  std::vector<std::string> elem_integer_names;
1578  std::vector<dof_id_type> elemset_codes;
1579  std::vector<std::vector<dof_id_type>> elemset_id_vecs;
1580 
1581  if (this->processor_id() == 0)
1582  {
1583  io.data (meta_data[0]);
1584  io.data (meta_data[1]);
1585  io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1586  io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1587  io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1588  io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1589 
1590  if (version_at_least_0_9_2())
1591  {
1592  // Make sure there's enough room in meta_data
1593  libmesh_assert_greater_equal(meta_data.size(), 10);
1594 
1595  io.data (meta_data[2], "# type size");
1596  io.data (meta_data[3], "# uid size");
1597  io.data (meta_data[4], "# pid size");
1598  io.data (meta_data[5], "# sid size");
1599  io.data (meta_data[6], "# p-level size");
1600  // Boundary Condition sizes
1601  io.data (meta_data[7], "# eid size"); // elem id
1602  io.data (meta_data[8], "# side size"); // side number
1603  io.data (meta_data[9], "# bid size"); // boundary id
1604  }
1605 
1606  if (version_at_least_1_8_0())
1607  {
1608  // Make sure there's enough room in meta_data
1609  libmesh_assert_greater_equal(meta_data.size(), 13);
1610 
1611  io.data (meta_data[10], "# extra integer size"); // extra integer size
1612 
1613  // Read in the node integer names and store the count in meta_data
1614  io.data(node_integer_names, "# node integer names");
1615  meta_data[11] = node_integer_names.size();
1616 
1617  // Read in the elem integer names and store the count in meta_data
1618  io.data(elem_integer_names, "# elem integer names");
1619  meta_data[12] = elem_integer_names.size();
1620 
1621  // Read in vector of elemset codes from file
1622  io.data(elemset_codes, "# elemset codes");
1623 
1624  // For each elemset code, read in the associated elemset ids from file
1625  elemset_id_vecs.resize(elemset_codes.size());
1626  for (auto i : index_range(elemset_codes))
1627  io.data(elemset_id_vecs[i]);
1628  }
1629  }
1630 
1631  // broadcast the n_elems, n_nodes, and size information
1632  this->comm().broadcast (meta_data);
1633 
1634  this->comm().broadcast (this->boundary_condition_file_name());
1635  this->comm().broadcast (this->subdomain_map_file_name());
1636  this->comm().broadcast (this->partition_map_file_name());
1637  this->comm().broadcast (this->polynomial_level_file_name());
1638  this->comm().broadcast(node_integer_names);
1639  this->comm().broadcast(elem_integer_names);
1640  this->comm().broadcast(elemset_codes);
1641  this->comm().broadcast(elemset_id_vecs);
1642 
1643  // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1644  // this may allow for efficient adding of nodes/elements.
1645  const T & n_elem = meta_data[0];
1646  const T & n_nodes = meta_data[1];
1647 
1648  mesh.reserve_elem(cast_int<dof_id_type>(n_elem));
1649  mesh.reserve_nodes(cast_int<dof_id_type>(n_nodes));
1650 
1651  // Our mesh is pre-partitioned as it's created
1652  this->set_n_partitions(this->n_processors());
1653 
1659  if (version_at_least_0_9_2())
1660  _field_width = cast_int<unsigned int>(meta_data[2]);
1661 
1662  // Add extra node and elem integers on all procs. Note that adding
1663  // an extra node/elem "datum" of type T is implemented via repeated
1664  // calls to MeshBase::add_elem_integer(), so we only need to call
1665  // MeshBase::add_node/elem_integers() here in order to restore the
1666  // the data that we had on the Mesh previously.
1667  mesh.add_node_integers(node_integer_names);
1668  mesh.add_elem_integers(elem_integer_names);
1669 
1670  // Store the elemset_code -> {elemset ids} mapping on the Mesh
1671  for (auto i : index_range(elemset_codes))
1672  mesh.add_elemset_code(elemset_codes[i],
1673  MeshBase::elemset_type(elemset_id_vecs[i].begin(),
1674  elemset_id_vecs[i].end()));
1675 }
1676 
1677 
1678 
1680 {
1681  const bool read_entity_info = version_at_least_0_9_2();
1682  const bool use_new_header_type (this->version_at_least_1_3_0());
1683  if (read_entity_info)
1684  {
1686 
1687  new_header_id_type n_subdomain_names = 0;
1688  std::vector<new_header_id_type> subdomain_ids;
1689  std::vector<std::string> subdomain_names;
1690 
1691  // Read the sideset names
1692  if (this->processor_id() == 0)
1693  {
1694  if (use_new_header_type)
1695  io.data(n_subdomain_names);
1696  else
1697  {
1698  old_header_id_type temp;
1699  io.data(temp);
1700  n_subdomain_names = temp;
1701  }
1702 
1703  subdomain_ids.resize(n_subdomain_names);
1704  subdomain_names.resize(n_subdomain_names);
1705 
1706  if (n_subdomain_names)
1707  {
1708  if (use_new_header_type)
1709  io.data(subdomain_ids);
1710  else
1711  {
1712  std::vector<old_header_id_type> temp;
1713  io.data(temp);
1714  subdomain_ids.assign(temp.begin(), temp.end());
1715  }
1716 
1717  io.data(subdomain_names);
1718  }
1719  }
1720 
1721  // Broadcast the subdomain names to all processors
1722  this->comm().broadcast(n_subdomain_names);
1723  if (n_subdomain_names == 0)
1724  return;
1725 
1726  subdomain_ids.resize(n_subdomain_names);
1727  subdomain_names.resize(n_subdomain_names);
1728  this->comm().broadcast(subdomain_ids);
1729  this->comm().broadcast(subdomain_names);
1730 
1731  // Reassemble the named subdomain information
1732  std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1733 
1734  for (unsigned int i=0; i<n_subdomain_names; ++i)
1735  subdomain_map.emplace(subdomain_ids[i], subdomain_names[i]);
1736  }
1737 }
1738 
1739 
1740 template <typename T>
1741 void
1743  const dof_id_type n_elem,
1744  const std::vector<new_header_id_type> & meta_data,
1745  T /*type_size*/)
1746 {
1747  libmesh_assert (io.reading());
1748 
1749  if (!n_elem) return;
1750 
1751  const bool
1752  read_p_level = ("." == this->polynomial_level_file_name()),
1753  read_partitioning = ("." == this->partition_map_file_name()),
1754  read_subdomain_id = ("." == this->subdomain_map_file_name());
1755 
1756  // convenient reference to our mesh
1758 
1759  // Keep track of what kinds of elements this file contains
1760  elems_of_dimension.clear();
1761  elems_of_dimension.resize(4, false);
1762 
1763  std::vector<T> conn, input_buffer(100 /* oversized ! */);
1764 
1765  int level=-1;
1766 
1767  // Version 0.9.2+ introduces unique ids
1768  const size_t unique_id_size_index = 3;
1769 
1770  const bool read_unique_id =
1771  (version_at_least_0_9_2()) &&
1772  meta_data[unique_id_size_index];
1773 
1774  // Version 1.8.0+ introduces elem integers
1775  const std::size_t n_elem_integers_index = 12;
1776  new_header_id_type n_elem_integers = 0;
1777  if (version_at_least_1_8_0())
1778  {
1779  libmesh_assert_greater_equal(meta_data.size(), 13);
1780  n_elem_integers = meta_data[n_elem_integers_index];
1781  }
1782 
1783  T n_elem_at_level=0, n_processed_at_level=0;
1784  for (dof_id_type blk=0, first_elem=0, last_elem=0;
1785  last_elem<n_elem; blk++)
1786  {
1787  first_elem = cast_int<dof_id_type>(blk*io_blksize);
1788  last_elem = cast_int<dof_id_type>(std::min(cast_int<std::size_t>((blk+1)*io_blksize),
1789  cast_int<std::size_t>(n_elem)));
1790 
1791  conn.clear();
1792 
1793  if (this->processor_id() == 0)
1794  for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1795  {
1796  if (n_processed_at_level == n_elem_at_level)
1797  {
1798  // get the number of elements to read at this level
1799  io.data (n_elem_at_level);
1800  n_processed_at_level = 0;
1801  level++;
1802  }
1803 
1804  // "pos" is a cursor into input_buffer
1805  unsigned int pos = 0;
1806 
1807  // get the element type,
1808  io.data_stream (&input_buffer[pos++], 1);
1809 
1810  if (read_unique_id)
1811  io.data_stream (&input_buffer[pos++], 1);
1812  // Older versions won't have this field at all (no increment on pos)
1813 
1814  // maybe the parent
1815  if (level)
1816  io.data_stream (&input_buffer[pos++], 1);
1817  else
1818  // We can't always fit DofObject::invalid_id in an
1819  // xdr_id_type
1820  input_buffer[pos++] = static_cast<T>(-1);
1821 
1822  // maybe the processor id
1823  if (read_partitioning)
1824  io.data_stream (&input_buffer[pos++], 1);
1825  else
1826  input_buffer[pos++] = 0;
1827 
1828  // maybe the subdomain id
1829  if (read_subdomain_id)
1830  io.data_stream (&input_buffer[pos++], 1);
1831  else
1832  input_buffer[pos++] = 0;
1833 
1834  // maybe the p level
1835  if (read_p_level)
1836  io.data_stream (&input_buffer[pos++], 1);
1837  else
1838  input_buffer[pos++] = 0;
1839 
1840  const unsigned int n_nodes0 = Elem::type_to_n_nodes_map[input_buffer[0]];
1841  if (n_nodes0 == invalid_uint)
1842  libmesh_not_implemented();
1843 
1844  // and all the nodes
1845  libmesh_assert_less (pos+n_nodes0, input_buffer.size());
1846  io.data_stream (&input_buffer[pos], n_nodes0);
1847 
1848  // Advance input_buffer cursor by number of nodes in this element
1849  pos += n_nodes0;
1850 
1851  // and all the elem "extra" integers
1852  libmesh_assert_less (pos + n_elem_integers, input_buffer.size());
1853  io.data_stream (&input_buffer[pos], n_elem_integers);
1854 
1855  // Advance input_buffer cursor by number of extra integers read
1856  pos += n_elem_integers;
1857 
1858  // Insert input_buffer at end of "conn"
1859  conn.insert (conn.end(), input_buffer.begin(), input_buffer.begin() + pos);
1860  }
1861 
1862  std::size_t conn_size = conn.size();
1863  this->comm().broadcast(conn_size);
1864  conn.resize (conn_size);
1865  this->comm().broadcast (conn);
1866 
1867  // All processors now have the connectivity for this block.
1868  for (auto [e, it] = std::tuple{first_elem, conn.begin()}; e<last_elem; e++)
1869  {
1870  // Temporary variable for reading connectivity array
1871  // entries.
1872  T tmp;
1873 
1874  const ElemType elem_type = static_cast<ElemType>(*it++);
1875 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1876  // We are on all processors here, so the mesh can easily
1877  // assign consistent unique ids if the file doesn't specify
1878  // them later. We'll use element ids for elements and start
1879  // past those for nodes.
1880  unique_id_type unique_id = e;
1881 #endif
1882  if (read_unique_id)
1883  {
1884  tmp = *it++;
1885 
1886 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1887  unique_id = cast_int<unique_id_type>(tmp);
1888 #endif
1889  }
1890 
1891  tmp = *it++;
1892  const dof_id_type parent_id =
1893  (tmp == static_cast<T>(-1)) ? DofObject::invalid_id : cast_int<dof_id_type>(tmp);
1894 
1895  const processor_id_type proc_id =
1896  cast_int<processor_id_type>(*it++);
1897 
1898  const subdomain_id_type subdomain_id =
1899  cast_int<subdomain_id_type>(*it++);
1900 
1901  tmp = *it++;
1902 #ifdef LIBMESH_ENABLE_AMR
1903  const unsigned int p_level = cast_int<unsigned int>(tmp);
1904 #endif
1905 
1906  Elem * parent = (parent_id == DofObject::invalid_id) ?
1907  nullptr : mesh.elem_ptr(parent_id);
1908 
1909  auto elem = Elem::build(elem_type, parent);
1910 
1911  elem->set_id() = e;
1912 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1913  elem->set_unique_id(unique_id);
1914 #endif
1915  elem->processor_id() = proc_id;
1916  elem->subdomain_id() = subdomain_id;
1917 #ifdef LIBMESH_ENABLE_AMR
1918  elem->hack_p_level(p_level);
1919 
1920  if (parent)
1921  {
1922  parent->add_child(elem.get());
1923  parent->set_refinement_flag (Elem::INACTIVE);
1924  elem->set_refinement_flag (Elem::JUST_REFINED);
1925  }
1926 #endif
1927 
1928  // Node ids for this Elem
1929  for (unsigned int n=0, n_n = elem->n_nodes(); n != n_n;
1930  n++, ++it)
1931  {
1932  const dof_id_type global_node_number =
1933  cast_int<dof_id_type>(*it);
1934 
1935  Node *node = mesh.query_node_ptr(global_node_number);
1936 
1937  if (node)
1938  elem->set_node(n, node);
1939  else
1940  {
1941  std::unique_ptr<Node> new_node = Node::build(Point(), global_node_number);
1942 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1943  // If we have to overwrite this it'll happen later
1944  new_node->set_unique_id(n_elem + global_node_number);
1945 #endif
1946  elem->set_node(n, mesh.add_node(std::move(new_node)));
1947  }
1948  }
1949 
1950  elems_of_dimension[elem->dim()] = true;
1951  Elem * added_elem = mesh.add_elem(std::move(elem));
1952 
1953  // Make sure that the Elem we just added to the Mesh has
1954  // space for the required number of extra integers. Note
1955  // that we have to add the Elem to the Mesh prior to
1956  // accessing any of its extra integers, since otherwise the
1957  // Elem does not know that it should have extra integers...
1958  libmesh_assert_equal_to(n_elem_integers, added_elem->n_extra_integers());
1959 
1960  // Extra integers for this Elem
1961  for (unsigned int ei=0; ei<n_elem_integers; ++ei)
1962  {
1963  auto extra_int = cast_int<dof_id_type>(*it++);
1964  added_elem->set_extra_integer(ei, extra_int);
1965  }
1966  }
1967  }
1968 
1969  // Set the mesh dimension to the largest encountered for an element
1970  for (unsigned char i=0; i!=4; ++i)
1971  if (elems_of_dimension[i])
1973 
1974 #if LIBMESH_DIM < 3
1975  libmesh_error_msg_if(mesh.mesh_dimension() > LIBMESH_DIM,
1976  "Cannot open dimension "
1977  << mesh.mesh_dimension()
1978  << " mesh file when configured without "
1979  << mesh.mesh_dimension()
1980  << "D support.");
1981 #endif
1982 }
1983 
1984 
1985 
1986 void
1988  const dof_id_type n_nodes,
1989  const std::vector<new_header_id_type> & meta_data)
1990 {
1991  libmesh_assert (io.reading());
1992 
1993  // convenient reference to our mesh
1995 
1996  if (!mesh.n_nodes()) return;
1997 
1998  // At this point the elements have been read from file and placeholder nodes
1999  // have been assigned. These nodes, however, do not have the proper (x,y,z)
2000  // locations or unique_id values. This method will read all the
2001  // nodes from disk, and each processor can then grab the individual
2002  // values it needs.
2003 
2004  // If the file includes unique ids for nodes (as indicated by a
2005  // flag in 0.9.6+ files), those will be read next.
2006 
2007  // build up a list of the nodes contained in our local mesh. These are the nodes
2008  // stored on the local processor whose (x,y,z) and unique_id values
2009  // need to be corrected.
2010  std::vector<dof_id_type> needed_nodes;
2011  needed_nodes.reserve (mesh.n_nodes());
2012  {
2013  for (auto & node : mesh.node_ptr_range())
2014  needed_nodes.push_back(node->id());
2015 
2016  std::sort (needed_nodes.begin(), needed_nodes.end());
2017 
2018  // We should not have any duplicate node->id()s
2019  libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
2020  }
2021 
2022  // Version 1.8.0+ introduces node integers
2023  const std::size_t n_node_integers_index = 11;
2024  new_header_id_type n_node_integers = 0;
2025  if (version_at_least_1_8_0())
2026  {
2027  libmesh_assert_greater_equal(meta_data.size(), 13);
2028  n_node_integers = meta_data[n_node_integers_index];
2029  }
2030 
2031  // Get the nodes in blocks.
2032  std::vector<Real> coords;
2033  std::pair<std::vector<dof_id_type>::iterator,
2034  std::vector<dof_id_type>::iterator> pos;
2035  pos.first = needed_nodes.begin();
2036 
2037  // Broadcast node coordinates
2038  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2039  {
2040  first_node = blk*io_blksize;
2041  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2042 
2043  coords.resize(3*(last_node - first_node));
2044 
2045  if (this->processor_id() == 0)
2046  io.data_stream (coords.empty() ? nullptr : coords.data(),
2047  cast_int<unsigned int>(coords.size()));
2048 
2049  // For large numbers of processors the majority of processors at any given
2050  // block may not actually need these data. It may be worth profiling this,
2051  // although it is expected that disk IO will be the bottleneck
2052  this->comm().broadcast (coords);
2053 
2054  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
2055  {
2056  // first see if we need this node. use pos.first as a smart lower
2057  // bound, this will ensure that the size of the searched range
2058  // decreases as we match nodes.
2059  pos = std::equal_range (pos.first, needed_nodes.end(), n);
2060 
2061  if (pos.first != pos.second) // we need this node.
2062  {
2063  libmesh_assert_equal_to (*pos.first, n);
2064  libmesh_assert(!libmesh_isnan(coords[idx+0]));
2065  libmesh_assert(!libmesh_isnan(coords[idx+1]));
2066  libmesh_assert(!libmesh_isnan(coords[idx+2]));
2067  mesh.node_ref(cast_int<dof_id_type>(n)) =
2068  Point (coords[idx+0],
2069  coords[idx+1],
2070  coords[idx+2]);
2071 
2072  }
2073  }
2074  }
2075 
2076  // Check for node unique ids
2077  unsigned short read_unique_ids = false;
2078 
2079  if (version_at_least_0_9_6())
2080  {
2081  if (this->processor_id() == 0)
2082  io.data (read_unique_ids);
2083 
2084  this->comm().broadcast (read_unique_ids);
2085  }
2086 
2087  // If no node unique ids are in the file, well, we already
2088  // assigned our own ... *but*, our ids might conflict with the
2089  // file-assigned element unique ids. Let's check on that.
2090  // Should be easy since we're serialized.
2091  if (!read_unique_ids)
2092  {
2093 #ifdef LIBMESH_ENABLE_UNIQUE_ID
2094  unique_id_type max_elem_unique_id = 0;
2095  for (auto & elem : mesh.element_ptr_range())
2096  max_elem_unique_id = std::max(max_elem_unique_id,
2097  elem->unique_id()+1);
2098  if (max_elem_unique_id > mesh.n_elem())
2099  {
2100  for (auto & node : mesh.node_ptr_range())
2101  node->set_unique_id(max_elem_unique_id + node->id());
2102  }
2103  mesh.set_next_unique_id(max_elem_unique_id + mesh.n_nodes());
2104 #endif
2105  }
2106  else
2107  {
2108  std::vector<uint32_t> unique_32;
2109  std::vector<uint64_t> unique_64;
2110 
2111  // We're starting over from node 0 again
2112  pos.first = needed_nodes.begin();
2113 
2114  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2115  {
2116  first_node = blk*io_blksize;
2117  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2118 
2119  libmesh_assert((_field_width == 8) || (_field_width == 4));
2120 
2121  if (_field_width == 8)
2122  unique_64.resize(last_node - first_node);
2123  else
2124  unique_32.resize(last_node - first_node);
2125 
2126  if (this->processor_id() == 0)
2127  {
2128  if (_field_width == 8)
2129  io.data_stream (unique_64.empty() ? nullptr : unique_64.data(),
2130  cast_int<unsigned int>(unique_64.size()));
2131  else
2132  io.data_stream (unique_32.empty() ? nullptr : unique_32.data(),
2133  cast_int<unsigned int>(unique_32.size()));
2134  }
2135 
2136 #ifdef LIBMESH_ENABLE_UNIQUE_ID
2137  if (_field_width == 8)
2138  this->comm().broadcast (unique_64);
2139  else
2140  this->comm().broadcast (unique_32);
2141 
2142  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
2143  {
2144  // first see if we need this node. use pos.first as a smart lower
2145  // bound, this will ensure that the size of the searched range
2146  // decreases as we match nodes.
2147  pos = std::equal_range (pos.first, needed_nodes.end(), n);
2148 
2149  if (pos.first != pos.second) // we need this node.
2150  {
2151  libmesh_assert_equal_to (*pos.first, n);
2152  if (_field_width == 8)
2153  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id(unique_64[idx]);
2154  else
2155  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id(unique_32[idx]);
2156  }
2157  }
2158 #endif // LIBMESH_ENABLE_UNIQUE_ID
2159  }
2160  }
2161 
2162  // Read extra node integers (if present) in chunks on processor 0 and broadcast to
2163  // all other procs. For a ReplicatedMesh, all procs need to know about all Nodes'
2164  // extra integers. For a DistributedMesh, this broadcasting will transmit some
2165  // redundant information.
2166  if (n_node_integers)
2167  {
2168  std::vector<dof_id_type> extra_integers;
2169 
2170  // We're starting over from node 0 again
2171  pos.first = needed_nodes.begin();
2172 
2173  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2174  {
2175  first_node = blk*io_blksize;
2176  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2177 
2178  extra_integers.resize((last_node - first_node) * n_node_integers);
2179 
2180  // Read in block of node integers on proc 0 only...
2181  if (this->processor_id() == 0)
2182  io.data_stream (extra_integers.empty() ? nullptr : extra_integers.data(),
2183  cast_int<unsigned int>(extra_integers.size()));
2184 
2185  // ... and broadcast it to all other procs.
2186  this->comm().broadcast (extra_integers);
2187 
2188  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
2189  {
2190  // first see if we need this node. use pos.first as a smart lower
2191  // bound, this will ensure that the size of the searched range
2192  // decreases as we match nodes.
2193  pos = std::equal_range (pos.first, needed_nodes.end(), n);
2194 
2195  if (pos.first != pos.second) // we need this node.
2196  {
2197  libmesh_assert_equal_to (*pos.first, n);
2198 
2199  Node & node_ref = mesh.node_ref(cast_int<dof_id_type>(n));
2200  for (unsigned int i=0; i != n_node_integers; ++i)
2201  node_ref.set_extra_integer(i, extra_integers[n_node_integers*idx + i]);
2202  }
2203  }
2204  } // end for (block-based extra integer reads)
2205  } // end if (n_node_integers)
2206 }
2207 
2208 
2209 
2210 template <typename T>
2211 void XdrIO::read_serialized_bcs_helper (Xdr & io, T /*type_size*/, const std::string bc_type)
2212 {
2213  if (this->boundary_condition_file_name() == "n/a") return;
2214 
2215  libmesh_assert (io.reading());
2216 
2217  // convenient reference to our mesh
2219 
2220  // and our boundary info object
2221  BoundaryInfo & boundary_info = mesh.get_boundary_info();
2222 
2223  // Version 0.9.2+ introduces unique ids
2224  read_serialized_bc_names(io, boundary_info, true); // sideset names
2225 
2226  std::vector<T> input_buffer;
2227 
2228  new_header_id_type n_bcs=0;
2229  if (this->processor_id() == 0)
2230  {
2231  if (this->version_at_least_1_3_0())
2232  io.data (n_bcs);
2233  else
2234  {
2235  old_header_id_type temp;
2236  io.data (temp);
2237  n_bcs = temp;
2238  }
2239  }
2240  this->comm().broadcast (n_bcs);
2241 
2242  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
2243  {
2244  first_bc = blk*io_blksize;
2245  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
2246 
2247  input_buffer.resize (3*(last_bc - first_bc));
2248 
2249  if (this->processor_id() == 0)
2250  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
2251  cast_int<unsigned int>(input_buffer.size()));
2252 
2253  this->comm().broadcast (input_buffer);
2254 
2255  // Look for BCs in this block for all the level-0 elements we have
2256  // (not just local ones). Do this by checking all entries for
2257  // IDs matching an element we can query.
2258  // We cannot rely on nullptr neighbors at this point since the neighbor
2259  // data structure has not been initialized.
2260  for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=3)
2261  {
2262  const dof_id_type dof_id =
2263  cast_int<dof_id_type>(input_buffer[idx+0]);
2264  const unsigned short side =
2265  cast_int<unsigned short>(input_buffer[idx+1]);
2266  const boundary_id_type bc_id =
2267  cast_int<boundary_id_type>(input_buffer[idx+2]);
2268 
2269  const Elem * elem = mesh.query_elem_ptr(dof_id);
2270  if (!elem)
2271  continue;
2272 
2273  if (bc_type == "side")
2274  {
2275  libmesh_assert_less (side, elem->n_sides());
2276  boundary_info.add_side (elem, side, bc_id);
2277  }
2278  else if (bc_type == "edge")
2279  {
2280  libmesh_assert_less (side, elem->n_edges());
2281  boundary_info.add_edge (elem, side, bc_id);
2282  }
2283  else if (bc_type == "shellface")
2284  {
2285  // Shell face IDs can only be 0 or 1.
2286  libmesh_assert_less(side, 2);
2287 
2288  boundary_info.add_shellface (elem, side, bc_id);
2289  }
2290  else
2291  {
2292  libmesh_error_msg("bc_type not recognized: " + bc_type);
2293  }
2294  }
2295  input_buffer.clear();
2296  }
2297 }
2298 
2299 
2300 
2301 template <typename T>
2302 void XdrIO::read_serialized_side_bcs (Xdr & io, T type_size)
2303 {
2304  read_serialized_bcs_helper(io, type_size, "side");
2305 }
2306 
2307 
2308 
2309 template <typename T>
2310 void XdrIO::read_serialized_edge_bcs (Xdr & io, T type_size)
2311 {
2312  read_serialized_bcs_helper(io, type_size, "edge");
2313 }
2314 
2315 
2316 
2317 template <typename T>
2319 {
2320  read_serialized_bcs_helper(io, type_size, "shellface");
2321 }
2322 
2323 
2324 
2325 template <typename T>
2326 void XdrIO::read_serialized_nodesets (Xdr & io, T /*type_size*/)
2327 {
2328  if (this->boundary_condition_file_name() == "n/a") return;
2329 
2330  libmesh_assert (io.reading());
2331 
2332  // convenient reference to our mesh
2334 
2335  // and our boundary info object
2336  BoundaryInfo & boundary_info = mesh.get_boundary_info();
2337 
2338  // Version 0.9.2+ introduces unique ids
2339  read_serialized_bc_names(io, boundary_info, false); // nodeset names
2340 
2341  std::vector<T> input_buffer;
2342 
2343  new_header_id_type n_nodesets=0;
2344  if (this->processor_id() == 0)
2345  {
2346  if (this->version_at_least_1_3_0())
2347  io.data (n_nodesets);
2348  else
2349  {
2350  old_header_id_type temp;
2351  io.data (temp);
2352  n_nodesets = temp;
2353  }
2354  }
2355  this->comm().broadcast (n_nodesets);
2356 
2357  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
2358  {
2359  first_bc = blk*io_blksize;
2360  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
2361 
2362  input_buffer.resize (2*(last_bc - first_bc));
2363 
2364  if (this->processor_id() == 0)
2365  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
2366  cast_int<unsigned int>(input_buffer.size()));
2367 
2368  this->comm().broadcast (input_buffer);
2369 
2370  // Look for BCs in this block for all nodes we have (not just
2371  // local ones). Do this by checking all entries for
2372  // IDs matching a node we can query.
2373  for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=2)
2374  {
2375  const dof_id_type dof_id =
2376  cast_int<dof_id_type>(input_buffer[idx+0]);
2377  const boundary_id_type bc_id =
2378  cast_int<boundary_id_type>(input_buffer[idx+1]);
2379 
2380  const Node * node = mesh.query_node_ptr(dof_id);
2381  if (node)
2382  boundary_info.add_node (node, bc_id);
2383  }
2384  input_buffer.clear();
2385  }
2386 }
2387 
2388 
2389 
2391 {
2392  const bool read_entity_info = version_at_least_0_9_2();
2393  const bool use_new_header_type (this->version_at_least_1_3_0());
2394  if (read_entity_info)
2395  {
2396  new_header_id_type n_boundary_names = 0;
2397  std::vector<new_header_id_type> boundary_ids;
2398  std::vector<std::string> boundary_names;
2399 
2400  // Read the sideset names
2401  if (this->processor_id() == 0)
2402  {
2403  if (use_new_header_type)
2404  io.data(n_boundary_names);
2405  else
2406  {
2407  old_header_id_type temp;
2408  io.data(temp);
2409  n_boundary_names = temp;
2410  }
2411 
2412  boundary_names.resize(n_boundary_names);
2413 
2414  if (n_boundary_names)
2415  {
2416  if (use_new_header_type)
2417  io.data(boundary_ids);
2418  else
2419  {
2420  std::vector<old_header_id_type> temp(n_boundary_names);
2421  io.data(temp);
2422  boundary_ids.assign(temp.begin(), temp.end());
2423  }
2424  io.data(boundary_names);
2425  }
2426  }
2427 
2428  // Broadcast the boundary names to all processors
2429  this->comm().broadcast(n_boundary_names);
2430  if (n_boundary_names == 0)
2431  return;
2432 
2433  boundary_ids.resize(n_boundary_names);
2434  boundary_names.resize(n_boundary_names);
2435  this->comm().broadcast(boundary_ids);
2436  this->comm().broadcast(boundary_names);
2437 
2438  // Reassemble the named boundary information
2439  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
2440  info.set_sideset_name_map() : info.set_nodeset_name_map();
2441 
2442  for (unsigned int i=0; i<n_boundary_names; ++i)
2443  boundary_map.emplace(cast_int<boundary_id_type>(boundary_ids[i]), boundary_names[i]);
2444  }
2445 }
2446 
2447 
2448 
2449 void XdrIO::pack_element (std::vector<xdr_id_type> & conn,
2450  const Elem * elem,
2451  const dof_id_type parent_id,
2452  const dof_id_type parent_pid,
2453  const new_header_id_type n_elem_integers) const
2454 {
2455  libmesh_assert(elem);
2456  libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
2457 
2458  conn.push_back(elem->n_nodes());
2459 
2460  conn.push_back (elem->type());
2461 
2462  // In version 0.7.0+ "id" is stored but is not used. In version 0.9.2+
2463  // we will store unique_id instead, therefore there is no need to
2464  // check for the older version when writing the unique_id.
2465  conn.push_back (elem->unique_id());
2466 
2467  if (parent_id != DofObject::invalid_id)
2468  {
2469  conn.push_back (parent_id);
2470  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
2471  conn.push_back (parent_pid);
2472  }
2473 
2474  conn.push_back (elem->processor_id());
2475  conn.push_back (elem->subdomain_id());
2476 
2477 #ifdef LIBMESH_ENABLE_AMR
2478  conn.push_back (elem->p_level());
2479 #endif
2480 
2481  for (auto n : elem->node_index_range())
2482  conn.push_back (elem->node_id(n));
2483 
2484  // Write extra elem integers to connectivity array
2485  for (unsigned int i=0; i != n_elem_integers; ++i)
2486  conn.push_back(elem->get_extra_integer(i));
2487 }
2488 
2490 {
2491  return
2492  (this->version().find("0.9.2") != std::string::npos) ||
2493  (this->version().find("0.9.6") != std::string::npos) ||
2494  (this->version().find("1.1.0") != std::string::npos) ||
2495  (this->version().find("1.3.0") != std::string::npos) ||
2496  (this->version().find("1.8.0") != std::string::npos);
2497 }
2498 
2500 {
2501  return
2502  (this->version().find("0.9.6") != std::string::npos) ||
2503  (this->version().find("1.1.0") != std::string::npos) ||
2504  (this->version().find("1.3.0") != std::string::npos) ||
2505  (this->version().find("1.8.0") != std::string::npos);
2506 }
2507 
2509 {
2510  return
2511  (this->version().find("1.1.0") != std::string::npos) ||
2512  (this->version().find("1.3.0") != std::string::npos) ||
2513  (this->version().find("1.8.0") != std::string::npos);
2514 }
2515 
2517 {
2518  return
2519  (this->version().find("1.3.0") != std::string::npos) ||
2520  (this->version().find("1.8.0") != std::string::npos);
2521 }
2522 
2524 {
2525  return
2526  (this->version().find("1.8.0") != std::string::npos);
2527 }
2528 
2529 } // namespace libMesh
std::string name(const ElemQuality q)
This function returns a string containing some name for q.
Definition: elem_quality.C:42
void write_serialized_nodesets(Xdr &io, const new_header_id_type n_nodesets) const
Write the boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1321
const MT & mesh() const
Definition: mesh_output.h:259
bool writing() const
Definition: xdr_cxx.h:129
std::size_t n_boundary_conds() const
std::vector< unsigned int > add_elem_integers(const std::vector< std::string > &names, bool allocate_data=true, const std::vector< dof_id_type > *default_values=nullptr)
Register integer data (of type dof_id_type) to be added to each element in the mesh, one string name for each new integer.
Definition: mesh_base.C:583
ElemType
Defines an enum for geometric element types.
XdrIO(MeshBase &, const bool=false)
Constructor.
Definition: xdr_io.C:74
unique_id_type & set_unique_id()
Definition: dof_object.h:858
virtual void reserve_nodes(const dof_id_type nn)=0
Reserves space for a known number of nodes.
void read_serialized_nodes(Xdr &io, const dof_id_type n_nodes, const std::vector< new_header_id_type > &meta_data)
Read the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:1987
A Node is like a Point, but with more information.
Definition: node.h:52
const unsigned int invalid_uint
A number which is used quite often to represent an invalid or uninitialized value for an unsigned int...
Definition: libmesh.h:310
unsigned int n_node_integers() const
Definition: mesh_base.h:1077
bool write_parallel() const
Report whether we should write parallel files.
Definition: xdr_io.h:379
std::vector< dof_id_type > get_elemset_codes() const
Return a vector of all elemset codes defined on the mesh.
Definition: mesh_base.C:445
MPI_Info info
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
bool version_at_least_0_9_6() const
Definition: xdr_io.C:2499
std::size_t n_edge_conds() const
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:969
void write_serialized_side_bcs(Xdr &io, const new_header_id_type n_side_bcs) const
Write the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1300
void read_serialized_subdomain_names(Xdr &io)
Read subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:1679
void read_serialized_bcs_helper(Xdr &io, T type_size, const std::string bc_type)
Helper function used in read_serialized_side_bcs, read_serialized_edge_bcs, and read_serialized_shell...
Definition: xdr_io.C:2211
bool legacy() const
Get/Set the flag indicating if we should read/write legacy.
Definition: xdr_io.h:109
std::vector< bool > elems_of_dimension
A vector of bools describing what dimension elements have been encountered when reading a mesh...
Definition: mesh_input.h:107
void gather(const unsigned int root_id, const T &send_data, std::vector< T, A > &recv) const
MessageTag get_unique_tag(int tagvalue=MessageTag::invalid_tag) const
static void set_node_processor_ids(MeshBase &mesh)
This function is called after partitioning to set the processor IDs for the nodes.
Definition: partitioner.C:852
std::size_t n_shellface_conds() const
void close()
Closes the file if it is open.
Definition: xdr_cxx.C:277
void sum(T &r) const
void add_elemset_code(dof_id_type code, MeshBase::elemset_type id_set)
Tabulate a user-defined "code" for elements which belong to the element sets specified in id_set...
Definition: mesh_base.C:398
void barrier() const
unsigned int n_elem_integers() const
Definition: mesh_base.h:955
This is the base class from which all geometric element types are derived.
Definition: elem.h:94
MeshBase & mesh
uint32_t old_header_id_type
Definition: xdr_io.h:60
void read_serialized_nodesets(Xdr &io, T type_size)
Read the nodeset conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2326
unique_id_type unique_id() const
Definition: dof_object.h:844
const Parallel::Communicator & comm() const
std::vector< unsigned int > add_node_integers(const std::vector< std::string > &names, bool allocate_data=true, const std::vector< dof_id_type > *default_values=nullptr)
Register integer data (of type dof_id_type) to be added to each node in the mesh. ...
Definition: mesh_base.C:672
unsigned int p_level() const
Definition: elem.h:3108
void write_serialized_subdomain_names(Xdr &io) const
Write subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:306
void shellface_boundary_ids(const Elem *const elem, const unsigned short int shellface, std::vector< boundary_id_type > &vec_to_fill) const
void boundary_ids(const Node *node, std::vector< boundary_id_type > &vec_to_fill) const
Fills a user-provided std::vector with the boundary ids associated with Node node.
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
virtual void set_next_unique_id(unique_id_type id)=0
Sets the next available unique id to be used.
void read_serialized_side_bcs(Xdr &io, T type_size)
Read the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2302
void read_serialized_shellface_bcs(Xdr &io, T type_size)
Read the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2318
The libMesh namespace provides an interface to certain functionality in the library.
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:165
bool libmesh_isnan(T x)
void write_serialized_nodes(Xdr &io, const dof_id_type n_nodes, const new_header_id_type n_node_integers) const
Write the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:707
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
This array maps the integer representation of the ElemType enum to the number of nodes in the element...
Definition: elem.h:650
const std::string & get_elem_integer_name(unsigned int i) const
Definition: mesh_base.h:944
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Read boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:2390
uint8_t processor_id_type
Definition: id_types.h:104
This is the MeshBase class.
Definition: mesh_base.h:75
void get_elemsets(dof_id_type elemset_code, MeshBase::elemset_type &id_set_to_fill) const
Look up the element sets for a given elemset code and vice-versa.
Definition: mesh_base.C:429
void data(T &a, std::string_view comment="")
Inputs or outputs a single value.
Definition: xdr_cxx.C:778
largest_id_type xdr_id_type
Definition: xdr_io.h:57
void read_serialized_edge_bcs(Xdr &io, T type_size)
Read the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:2310
processor_id_type n_processors() const
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
const dof_id_type n_nodes
Definition: tecplot_io.C:67
Status receive(const unsigned int dest_processor_id, T &buf, const MessageTag &tag=any_tag) const
int8_t boundary_id_type
Definition: id_types.h:51
static const boundary_id_type invalid_id
Number used for internal use.
const std::string & boundary_condition_file_name() const
Get/Set the boundary condition file name.
Definition: xdr_io.h:146
dof_id_type id() const
Definition: dof_object.h:828
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1694
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:57
virtual unsigned int n_nodes() const =0
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2516
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:444
virtual void write(const std::string &) override
This method implements writing a mesh to a specified file.
Definition: xdr_io.C:126
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
const std::string & subdomain_map_file_name() const
Get/Set the subdomain file name.
Definition: xdr_io.h:158
virtual const Node * query_node_ptr(const dof_id_type i) const =0
The BoundaryInfo class contains information relevant to boundary conditions including storing faces...
Definition: boundary_info.h:57
libmesh_assert(ctx)
unsigned int _field_width
Definition: xdr_io.h:361
static const dof_id_type invalid_id
An invalid id to distinguish an uninitialized DofObject.
Definition: dof_object.h:482
void read_header(Xdr &io, std::vector< T > &meta_data)
Read header information - templated to handle old (4-byte) or new (8-byte) header id types...
Definition: xdr_io.C:1567
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:275
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Write boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:1395
bool version_at_least_1_1_0() const
Definition: xdr_io.C:2508
An object whose state is distributed along a set of processors.
uint64_t new_header_id_type
Definition: xdr_io.h:63
void broadcast(T &data, const unsigned int root_id=0, const bool identical_sizes=false) const
This class implements a C++ interface to the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:67
const std::string & partition_map_file_name() const
Get/Set the partitioning file name.
Definition: xdr_io.h:152
virtual Node * add_node(Node *n)=0
Add Node n to the end of the vertex array.
virtual const Elem * elem_ptr(const dof_id_type i) const =0
void write_serialized_connectivity(Xdr &io, const dof_id_type n_elem, const new_header_id_type n_elem_integers) const
Write the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:345
bool _write_unique_id
Definition: xdr_io.h:360
static std::unique_ptr< Node > build(const Node &n)
Definition: node.h:315
std::set< elemset_id_type > elemset_type
Typedef for the "set" container used to store elemset ids.
Definition: mesh_base.h:318
subdomain_id_type n_subdomains() const
Definition: mesh_base.C:997
const std::string & version() const
Get/Set the version string.
Definition: xdr_io.h:140
const std::string & polynomial_level_file_name() const
Get/Set the polynomial degree file name.
Definition: xdr_io.h:164
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
subdomain_id_type subdomain_id() const
Definition: elem.h:2582
static const std::size_t io_blksize
Define the block size to use for chunked IO.
Definition: xdr_io.h:371
void send(const unsigned int dest_processor_id, const T &buf, const MessageTag &tag=no_tag) const
OStreamProxy out
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure...
void add_shellface(const dof_id_type elem, const unsigned short int shellface, const boundary_id_type id)
Add shell face shellface of element number elem with boundary id id to the boundary information data ...
static const bool value
Definition: xdr_io.C:54
The definition of the const_node_iterator struct.
Definition: mesh_base.h:2267
IntRange< T > make_range(T beg, T end)
The 2-parameter make_range() helper function returns an IntRange<T> when both input parameters are of...
Definition: int_range.h:140
void set_n_partitions(unsigned int n_parts)
Sets the number of partitions in the mesh.
Definition: mesh_input.h:101
unsigned int mesh_dimension() const
Definition: mesh_base.C:372
virtual ~XdrIO()
Destructor.
bool reading() const
Definition: xdr_cxx.h:123
std::size_t n_nodeset_conds() const
IntRange< unsigned short > node_index_range() const
Definition: elem.h:2683
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2489
void pack_element(std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id, const dof_id_type parent_pid, const new_header_id_type n_elem_integers) const
Pack an element into a transfer buffer for parallel communication.
Definition: xdr_io.C:2449
const std::string & get_node_integer_name(unsigned int i) const
Definition: mesh_base.h:1066
unsigned int n_extra_integers() const
Returns how many extra integers are associated to the DofObject.
Definition: dof_object.h:1170
void read_serialized_connectivity(Xdr &io, const dof_id_type n_elem, const std::vector< new_header_id_type > &meta_data, T type_size)
Read the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:1742
virtual const Node & node_ref(const dof_id_type i) const
Definition: mesh_base.h:596
void edge_boundary_ids(const Elem *const elem, const unsigned short int edge, std::vector< boundary_id_type > &vec_to_fill) const
void write_serialized_shellface_bcs(Xdr &io, const new_header_id_type n_shellface_bcs) const
Write the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1314
bool binary() const
Get/Set the flag indicating if we should read/write binary.
Definition: xdr_io.h:103
void write_serialized_bcs_helper(Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
Helper function used in write_serialized_side_bcs, write_serialized_edge_bcs, and write_serialized_sh...
Definition: xdr_io.C:1178
virtual dof_id_type max_node_id() const =0
virtual dof_id_type n_elem() const =0
processor_id_type processor_id() const
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Inputs or outputs a raw data stream.
Definition: xdr_cxx.C:843
const DofMap &dof_map LIBMESH_COMMA unsigned int std::string & set_subdomain_name_map()
Definition: mesh_base.h:1692
unsigned int n_p_levels(const MeshBase &mesh)
Definition: mesh_tools.C:1014
processor_id_type processor_id() const
Definition: dof_object.h:905
virtual ElemType type() const =0
void write_serialized_edge_bcs(Xdr &io, const new_header_id_type n_edge_bcs) const
Write the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1307
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:39
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:2475
virtual void reserve_elem(const dof_id_type ne)=0
Reserves space for a known number of elements.
uint8_t unique_id_type
Definition: id_types.h:86
auto index_range(const T &sizable)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:117
void set_extra_integer(const unsigned int index, const dof_id_type value)
Sets the value on this object of the extra integer associated with index, which should have been obta...
Definition: dof_object.h:1086
virtual dof_id_type n_nodes() const =0
virtual void read(const std::string &) override
This method implements reading a mesh from a specified file.
Definition: xdr_io.C:1435
dof_id_type get_extra_integer(const unsigned int index) const
Gets the value on this object of the extra integer associated with index, which should have been obta...
Definition: dof_object.h:1102
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
uint8_t dof_id_type
Definition: id_types.h:67
unsigned int n_active_levels(const MeshBase &mesh)
Definition: mesh_tools.C:772
void add_edge(const dof_id_type elem, const unsigned short int edge, const boundary_id_type id)
Add edge edge of element number elem with boundary id id to the boundary information data structure...
bool version_at_least_1_8_0() const
Definition: xdr_io.C:2523