libMesh
xdr_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2019 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 
20 // Local includes
21 #include "libmesh/xdr_io.h"
22 
23 // libMesh includes
24 #include "libmesh/boundary_info.h"
25 #include "libmesh/elem.h"
26 #include "libmesh/enum_xdr_mode.h"
27 #include "libmesh/int_range.h"
28 #include "libmesh/libmesh_logging.h"
29 #include "libmesh/mesh_base.h"
30 #include "libmesh/mesh_tools.h"
31 #include "libmesh/node.h"
32 #include "libmesh/partitioner.h"
33 #include "libmesh/xdr_cxx.h"
34 
35 // TIMPI includes
37 #include "timpi/parallel_sync.h"
38 
39 // C++ includes
40 #include <iostream>
41 #include <iomanip>
42 #include <cstdio>
43 #include <vector>
44 #include <string>
45 
46 
47 namespace libMesh
48 {
49 
50 //-----------------------------------------------
51 // anonymous namespace for implementation details
52 namespace {
53 
54 template <class T, class U>
55 struct libmesh_type_is_same {
56  static const bool value = false;
57 };
58 
59 template <class T>
60 struct libmesh_type_is_same<T, T> {
61  static const bool value = true;
62 };
63 
64 }
65 
66 
67 
68 // ------------------------------------------------------------
69 // XdrIO static data
70 const std::size_t XdrIO::io_blksize = 128000;
71 
72 
73 
74 // ------------------------------------------------------------
75 // XdrIO members
76 XdrIO::XdrIO (MeshBase & mesh, const bool binary_in) :
77  MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
78  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
80  _binary (binary_in),
81  _legacy (false),
82  _write_serial (false),
83  _write_parallel (false),
84 #ifdef LIBMESH_ENABLE_UNIQUE_ID
85  _write_unique_id (true),
86 #else
87  _write_unique_id (false),
88 #endif
89  _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
90  _version ("libMesh-1.3.0"),
91  _bc_file_name ("n/a"),
92  _partition_map_file ("n/a"),
93  _subdomain_map_file ("n/a"),
94  _p_level_file ("n/a")
95 {
96 }
97 
98 
99 
100 XdrIO::XdrIO (const MeshBase & mesh, const bool binary_in) :
101  MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
103  _binary (binary_in)
104 {
105 }
106 
107 
108 
110 {
111 }
112 
113 
114 
115 void XdrIO::write (const std::string & name)
116 {
117  if (this->legacy())
118  libmesh_error_msg("We don't support writing parallel files in the legacy format.");
119 
120  Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
121 
122  START_LOG("write()","XdrIO");
123 
124  // convenient reference to our mesh
126 
128  new_header_id_type max_node_id = mesh.max_node_id();
129 
134  unsigned int n_p_levels = MeshTools::n_p_levels (mesh);
135 
136  bool write_parallel_files = this->write_parallel();
137 
138  //-------------------------------------------------------------
139  // For all the optional files -- the default file name is "n/a".
140  // However, the user may specify an optional external file.
141 
142  // If there are BCs and the user has not already provided a
143  // file name then write to "."
144  if ((n_side_bcs || n_edge_bcs || n_shellface_bcs || n_nodesets) &&
145  this->boundary_condition_file_name() == "n/a")
146  this->boundary_condition_file_name() = ".";
147 
148  // If there are more than one subdomains and the user has not specified an
149  // external file then write the subdomain mapping to the default file "."
150  if ((mesh.n_subdomains() > 0) &&
151  (this->subdomain_map_file_name() == "n/a"))
152  this->subdomain_map_file_name() = ".";
153 
154  // In general we don't write the partition information.
155 
156  // If we have p levels and the user has not already provided
157  // a file name then write to "."
158  if ((n_p_levels > 1) &&
159  (this->polynomial_level_file_name() == "n/a"))
160  this->polynomial_level_file_name() = ".";
161 
162  // write the header
163  if (this->processor_id() == 0)
164  {
165  std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
166  io.data (full_ver);
167 
168  io.data (n_elem, "# number of elements");
169  io.data (max_node_id, "# number of nodes"); // We'll write invalid coords into gaps
170 
171  io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
172  io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
173  io.data (this->partition_map_file_name(), "# processor id specification file");
174  io.data (this->polynomial_level_file_name(), "# p-level specification file");
175 
176  // Version 0.9.2+ introduces sizes for each type
177  new_header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
178 
179  const bool
180  write_p_level = ("." == this->polynomial_level_file_name()),
181  write_partitioning = ("." == this->partition_map_file_name()),
182  write_subdomain_id = ("." == this->subdomain_map_file_name()),
183  write_bcs = ("." == this->boundary_condition_file_name());
184 
185  io.data (write_size, "# type size");
186  io.data (_write_unique_id ? write_size : zero_size, "# uid size");
187  io.data (write_partitioning ? write_size : zero_size, "# pid size");
188  io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
189  io.data (write_p_level ? write_size : zero_size, "# p-level size");
190  // Boundary Condition sizes
191  io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
192  io.data (write_bcs ? write_size : zero_size, "# side size"); // side number
193  io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
194  }
195 
196  if (write_parallel_files)
197  {
198  // Parallel xdr mesh files aren't implemented yet; until they
199  // are we'll just warn the user and write a serial file.
200  libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
201  libMesh::out << "Writing a serialized file instead." << std::endl;
202 
203  // write subdomain names
205 
206  // write connectivity
207  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem));
208 
209  // write the nodal locations
210  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id));
211 
212  // write the side boundary condition information
213  this->write_serialized_side_bcs (io, n_side_bcs);
214 
215  // write the nodeset information
216  this->write_serialized_nodesets (io, n_nodesets);
217 
218  // write the edge boundary condition information
219  this->write_serialized_edge_bcs (io, n_edge_bcs);
220 
221  // write the "shell face" boundary condition information
222  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
223  }
224  else
225  {
226  // write subdomain names
228 
229  // write connectivity
230  this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem));
231 
232  // write the nodal locations
233  this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id));
234 
235  // write the side boundary condition information
236  this->write_serialized_side_bcs (io, n_side_bcs);
237 
238  // write the nodeset information
239  this->write_serialized_nodesets (io, n_nodesets);
240 
241  // write the edge boundary condition information
242  this->write_serialized_edge_bcs (io, n_edge_bcs);
243 
244  // write the "shell face" boundary condition information
245  this->write_serialized_shellface_bcs (io, n_shellface_bcs);
246  }
247 
248  STOP_LOG("write()","XdrIO");
249 
250  // pause all processes until the writing ends -- this will
251  // protect for the pathological case where a write is
252  // followed immediately by a read. The write must be
253  // guaranteed to complete first.
254  io.close();
255  this->comm().barrier();
256 }
257 
258 
259 
261 {
262  if (this->processor_id() == 0)
263  {
265 
266  const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
267 
268  std::vector<new_header_id_type> subdomain_ids;
269  subdomain_ids.reserve(subdomain_map.size());
270 
271  std::vector<std::string> subdomain_names;
272  subdomain_names.reserve(subdomain_map.size());
273 
274  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
275  // return writable references in mesh_base, it's possible for the user to leave some entity names
276  // blank. We can't write those to the XDA file.
277  new_header_id_type n_subdomain_names = 0;
278  for (const auto & pr : subdomain_map)
279  if (!pr.second.empty())
280  {
281  n_subdomain_names++;
282  subdomain_ids.push_back(pr.first);
283  subdomain_names.push_back(pr.second);
284  }
285 
286  io.data(n_subdomain_names, "# subdomain id to name map");
287  // Write out the ids and names in two vectors
288  if (n_subdomain_names)
289  {
290  io.data(subdomain_ids);
291  io.data(subdomain_names);
292  }
293  }
294 }
295 
296 
297 
298 void XdrIO::write_serialized_connectivity (Xdr & io, const dof_id_type libmesh_dbg_var(n_elem)) const
299 {
300  libmesh_assert (io.writing());
301 
302  const bool
303  write_p_level = ("." == this->polynomial_level_file_name()),
304  write_partitioning = ("." == this->partition_map_file_name()),
305  write_subdomain_id = ("." == this->subdomain_map_file_name());
306 
307  // convenient reference to our mesh
309  libmesh_assert_equal_to (n_elem, mesh.n_elem());
310 
311  // We will only write active elements and their parents.
312  const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
313  std::vector<xdr_id_type> n_global_elem_at_level(n_active_levels);
314 
315  // Find the number of local and global elements at each level
316 #ifndef NDEBUG
317  xdr_id_type tot_n_elem = 0;
318 #endif
319  for (unsigned int level=0; level<n_active_levels; level++)
320  {
321  n_global_elem_at_level[level] =
324 
325  this->comm().sum(n_global_elem_at_level[level]);
326 #ifndef NDEBUG
327  tot_n_elem += n_global_elem_at_level[level];
328 #endif
329  libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
330  libmesh_assert_less_equal (tot_n_elem, n_elem);
331  }
332 
333  std::vector<xdr_id_type>
334  xfer_conn, recv_conn;
335  std::vector<dof_id_type>
336  n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
337  std::vector<xdr_id_type> output_buffer;
338  std::vector<std::size_t>
339  xfer_buf_sizes(this->n_processors());
340 
341 #ifdef LIBMESH_ENABLE_AMR
342  typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type>> id_map_type;
343  id_map_type parent_id_map, child_id_map;
344 #endif
345 
346  dof_id_type my_next_elem=0, next_global_elem=0;
347 
348  //-------------------------------------------
349  // First write the level-0 elements directly.
350  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
352  {
353  pack_element (xfer_conn, elem);
354 #ifdef LIBMESH_ENABLE_AMR
355  parent_id_map[elem->id()] = std::make_pair(this->processor_id(),
356  my_next_elem);
357 #endif
358  ++my_next_elem;
359  }
360  xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
361 
362  std::size_t my_size = xfer_conn.size();
363  this->comm().gather (0, my_next_elem, n_elem_on_proc);
364  this->comm().gather (0, my_size, xfer_buf_sizes);
365 
366  processor_offsets[0] = 0;
367  for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
368  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
369 
370  // All processors send their xfer buffers to processor 0.
371  // Processor 0 will receive the data and write out the elements.
372  if (this->processor_id() == 0)
373  {
374  // Write the number of elements at this level.
375  {
376  std::string comment = "# n_elem at level 0", legend = ", [ type ";
377  if (_write_unique_id)
378  legend += "uid ";
379  if (write_partitioning)
380  legend += "pid ";
381  if (write_subdomain_id)
382  legend += "sid ";
383  if (write_p_level)
384  legend += "p_level ";
385  legend += "(n0 ... nN-1) ]";
386  comment += legend;
387  io.data (n_global_elem_at_level[0], comment.c_str());
388  }
389 
390  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
391  {
392  recv_conn.resize(xfer_buf_sizes[pid]);
393  if (pid == 0)
394  recv_conn = xfer_conn;
395  else
396  this->comm().receive (pid, recv_conn);
397 
398  // at a minimum, the buffer should contain the number of elements,
399  // which could be 0.
400  libmesh_assert (!recv_conn.empty());
401 
402  {
403  const xdr_id_type n_elem_received = recv_conn.back();
404  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
405 
406  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
407  {
408  output_buffer.clear();
409 
410  // n. nodes
411  const xdr_id_type n_nodes = *recv_conn_iter;
412  ++recv_conn_iter;
413 
414  // type
415  output_buffer.push_back(*recv_conn_iter);
416  ++recv_conn_iter;
417 
418  // unique_id
419  if (_write_unique_id)
420  output_buffer.push_back(*recv_conn_iter);
421  ++recv_conn_iter;
422 
423  // processor id
424  if (write_partitioning)
425  output_buffer.push_back(*recv_conn_iter);
426  ++recv_conn_iter;
427 
428  // subdomain id
429  if (write_subdomain_id)
430  output_buffer.push_back(*recv_conn_iter);
431  ++recv_conn_iter;
432 
433 #ifdef LIBMESH_ENABLE_AMR
434  // p level
435  if (write_p_level)
436  output_buffer.push_back(*recv_conn_iter);
437  ++recv_conn_iter;
438 #endif
439  for (dof_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
440  output_buffer.push_back(*recv_conn_iter);
441 
442  io.data_stream
443  (output_buffer.data(),
444  cast_int<unsigned int>(output_buffer.size()),
445  cast_int<unsigned int>(output_buffer.size()));
446  }
447  }
448  }
449  }
450  else
451  this->comm().send (0, xfer_conn);
452 
453 #ifdef LIBMESH_ENABLE_AMR
454  //--------------------------------------------------------------------
455  // Next write the remaining elements indirectly through their parents.
456  // This will insure that the children are written in the proper order
457  // so they can be reconstructed properly.
458  for (unsigned int level=1; level<n_active_levels; level++)
459  {
460  xfer_conn.clear();
461 
462  dof_id_type my_n_elem_written_at_level = 0;
463  for (const auto & parent : as_range(mesh.local_level_elements_begin(level-1),
464  mesh.local_level_elements_end(level-1)))
465  if (!parent->active()) // we only want the parents elements at this level, and
466  { // there is no direct iterator for this obscure use
467  id_map_type::iterator pos = parent_id_map.find(parent->id());
468  libmesh_assert (pos != parent_id_map.end());
469  const processor_id_type parent_pid = pos->second.first;
470  const dof_id_type parent_id = pos->second.second;
471  parent_id_map.erase(pos);
472 
473  for (auto & child : parent->child_ref_range())
474  {
475  pack_element (xfer_conn, &child, parent_id, parent_pid);
476 
477  // this aproach introduces the possibility that we write
478  // non-local elements. These elements may well be parents
479  // at the next step
480  child_id_map[child.id()] = std::make_pair (child.processor_id(),
481  my_n_elem_written_at_level++);
482  my_next_elem++;
483  }
484  }
485  xfer_conn.push_back(my_n_elem_written_at_level);
486  my_size = xfer_conn.size();
487  this->comm().gather (0, my_size, xfer_buf_sizes);
488 
489  // Processor 0 will receive the data and write the elements.
490  if (this->processor_id() == 0)
491  {
492  // Write the number of elements at this level.
493  {
494  char buf[80];
495  std::sprintf(buf, "# n_elem at level %u", level);
496  std::string comment(buf), legend = ", [ type ";
497 
498  if (_write_unique_id)
499  legend += "uid ";
500  legend += "parent ";
501  if (write_partitioning)
502  legend += "pid ";
503  if (write_subdomain_id)
504  legend += "sid ";
505  if (write_p_level)
506  legend += "p_level ";
507  legend += "(n0 ... nN-1) ]";
508  comment += legend;
509  io.data (n_global_elem_at_level[level], comment.c_str());
510  }
511 
512  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
513  {
514  recv_conn.resize(xfer_buf_sizes[pid]);
515  if (pid == 0)
516  recv_conn = xfer_conn;
517  else
518  this->comm().receive (pid, recv_conn);
519 
520  // at a minimum, the buffer should contain the number of elements,
521  // which could be 0.
522  libmesh_assert (!recv_conn.empty());
523 
524  {
525  const xdr_id_type n_elem_received = recv_conn.back();
526  std::vector<xdr_id_type>::const_iterator recv_conn_iter = recv_conn.begin();
527 
528  for (xdr_id_type elem=0; elem<n_elem_received; elem++, next_global_elem++)
529  {
530  output_buffer.clear();
531 
532  // n. nodes
533  const xdr_id_type n_nodes = *recv_conn_iter;
534  ++recv_conn_iter;
535 
536  // type
537  output_buffer.push_back(*recv_conn_iter);
538  ++recv_conn_iter;
539 
540  // unique_id
541  if (_write_unique_id)
542  output_buffer.push_back(*recv_conn_iter);
543  ++recv_conn_iter;
544 
545  // parent local id
546  const xdr_id_type parent_local_id = *recv_conn_iter;
547  ++recv_conn_iter;
548 
549  // parent processor id
550  const xdr_id_type parent_pid = *recv_conn_iter;
551  ++recv_conn_iter;
552 
553  output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
554 
555  // processor id
556  if (write_partitioning)
557  output_buffer.push_back(*recv_conn_iter);
558  ++recv_conn_iter;
559 
560  // subdomain id
561  if (write_subdomain_id)
562  output_buffer.push_back(*recv_conn_iter);
563  ++recv_conn_iter;
564 
565  // p level
566  if (write_p_level)
567  output_buffer.push_back(*recv_conn_iter);
568  ++recv_conn_iter;
569 
570  for (xdr_id_type node=0; node<n_nodes; node++, ++recv_conn_iter)
571  output_buffer.push_back(*recv_conn_iter);
572 
573  io.data_stream
574  (output_buffer.data(),
575  cast_int<unsigned int>(output_buffer.size()),
576  cast_int<unsigned int>(output_buffer.size()));
577  }
578  }
579  }
580  }
581  else
582  this->comm().send (0, xfer_conn);
583 
584  // update the processor_offsets
585  processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
586  this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
587  for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
588  processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
589 
590  // Now, at the next level we will again iterate over local parents. However,
591  // those parents may have been written by other processors (at this step),
592  // so we need to gather them into our *_id_maps.
593  {
594  std::map<processor_id_type, std::vector<dof_id_type>> requested_ids;
595 
596  for (const auto & elem : as_range(mesh.local_level_elements_begin(level),
598  if (!child_id_map.count(elem->id()))
599  {
600  libmesh_assert_not_equal_to (elem->parent()->processor_id(), this->processor_id());
601  const processor_id_type pid = elem->parent()->processor_id();
602  if (pid != this->processor_id())
603  requested_ids[pid].push_back(elem->id());
604  }
605 
606  auto gather_functor =
607  [& child_id_map]
608  (processor_id_type libmesh_dbg_var(pid),
609  const std::vector<dof_id_type> & ids,
610  std::vector<dof_id_type> & data)
611  {
612  const std::size_t ids_size = ids.size();
613  data.resize(ids_size);
614 
615  // Fill those requests by overwriting the requested ids
616  for (std::size_t i=0; i != ids_size; i++)
617  {
618  libmesh_assert (child_id_map.count(ids[i]));
619  libmesh_assert_equal_to (child_id_map[ids[i]].first, pid);
620 
621  data[i] = child_id_map[ids[i]].second;
622  }
623  };
624 
625  auto action_functor =
626  [& child_id_map]
627  (processor_id_type pid,
628  const std::vector<dof_id_type> & ids,
629  const std::vector<dof_id_type> & data)
630  {
631  std::size_t data_size = data.size();
632 
633  for (std::size_t i=0; i != data_size; i++)
634  child_id_map[ids[i]] =
635  std::make_pair (pid, data[i]);
636  };
637 
638  // Trade ids back and forth
639  const dof_id_type * ex = nullptr;
640  Parallel::pull_parallel_vector_data
641  (this->comm(), requested_ids, gather_functor, action_functor, ex);
642 
643  // overwrite the parent_id_map with the child_id_map, but
644  // use std::map::swap() for efficiency.
645  parent_id_map.swap(child_id_map);
646  child_id_map.clear();
647  }
648  }
649 #endif // LIBMESH_ENABLE_AMR
650  if (this->processor_id() == 0)
651  libmesh_assert_equal_to (next_global_elem, n_elem);
652 
653 }
654 
655 
656 
657 void XdrIO::write_serialized_nodes (Xdr & io, const dof_id_type max_node_id) const
658 {
659  // convenient reference to our mesh
661  libmesh_assert_equal_to (max_node_id, mesh.max_node_id());
662 
663  std::vector<dof_id_type> xfer_ids;
664  std::vector<Real> xfer_coords;
665  std::vector<Real> & coords=xfer_coords;
666 
667  std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
668  std::vector<std::vector<Real>> recv_coords(this->n_processors());
669 
670 #ifdef LIBMESH_ENABLE_UNIQUE_ID
671  std::vector<xdr_id_type> xfer_unique_ids;
672  std::vector<xdr_id_type> & unique_ids=xfer_unique_ids;
673  std::vector<std::vector<xdr_id_type>> recv_unique_ids (this->n_processors());
674 #endif // LIBMESH_ENABLE_UNIQUE_ID
675 
676  std::size_t n_written=0;
677 
680 
681  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
682  {
683  const std::size_t first_node = blk*io_blksize;
684  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
685 
686  const std::size_t tot_id_size = last_node - first_node;
687 
688  // Build up the xfer buffers on each processor
689  xfer_ids.clear();
690  xfer_coords.clear();
691 
692  for (; node_iter != nodes_end; ++node_iter)
693  {
694  const Node & node = **node_iter;
695  libmesh_assert_greater_equal(node.id(), first_node);
696  if (node.id() >= last_node)
697  break;
698 
699  xfer_ids.push_back(node.id());
700  xfer_coords.push_back(node(0));
701 #if LIBMESH_DIM > 1
702  xfer_coords.push_back(node(1));
703 #endif
704 #if LIBMESH_DIM > 2
705  xfer_coords.push_back(node(2));
706 #endif
707  }
708 
709  //-------------------------------------
710  // Send the xfer buffers to processor 0
711  std::vector<std::size_t> ids_size;
712 
713  const std::size_t my_ids_size = xfer_ids.size();
714 
715  // explicitly gather ids_size
716  this->comm().gather (0, my_ids_size, ids_size);
717 
718  // We will have lots of simultaneous receives if we are
719  // processor 0, so let's use nonblocking receives.
720  std::vector<Parallel::Request>
721  id_request_handles(this->n_processors()-1),
722  coord_request_handles(this->n_processors()-1);
723 
724  Parallel::MessageTag
725  id_tag = mesh.comm().get_unique_tag(),
726  coord_tag = mesh.comm().get_unique_tag();
727 
728  // Post the receives -- do this on processor 0 only.
729  if (this->processor_id() == 0)
730  {
731  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
732  {
733  recv_ids[pid].resize(ids_size[pid]);
734  recv_coords[pid].resize(ids_size[pid]*LIBMESH_DIM);
735 
736  if (pid == 0)
737  {
738  recv_ids[0] = xfer_ids;
739  recv_coords[0] = xfer_coords;
740  }
741  else
742  {
743  this->comm().receive (pid, recv_ids[pid],
744  id_request_handles[pid-1],
745  id_tag);
746  this->comm().receive (pid, recv_coords[pid],
747  coord_request_handles[pid-1],
748  coord_tag);
749  }
750  }
751  }
752  else
753  {
754  // Send -- do this on all other processors.
755  this->comm().send(0, xfer_ids, id_tag);
756  this->comm().send(0, xfer_coords, coord_tag);
757  }
758 
759  // -------------------------------------------------------
760  // Receive the messages and write the output on processor 0.
761  if (this->processor_id() == 0)
762  {
763  // Wait for all the receives to complete. We have no
764  // need for the statuses since we already know the
765  // buffer sizes.
766  Parallel::wait (id_request_handles);
767  Parallel::wait (coord_request_handles);
768 
769 #ifndef NDEBUG
770  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
771  libmesh_assert_equal_to(recv_coords[pid].size(),
772  recv_ids[pid].size()*LIBMESH_DIM);
773 #endif
774 
775  // Write the coordinates in this block.
776  // Some of these coordinates may correspond to ids for which
777  // no node exists, if we have a discontiguous node
778  // numbering!
779 
780  // Write invalid values for unused node ids
781  coords.clear();
782  coords.resize (3*tot_id_size, std::numeric_limits<Real>::quiet_NaN());
783 
784  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
785  for (auto idx : index_range(recv_ids[pid]))
786  {
787  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
788  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
789  libmesh_assert_less(local_idx, tot_id_size);
790 
791  libmesh_assert_less ((3*local_idx+2), coords.size());
792  libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
793 
794  coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
795 #if LIBMESH_DIM > 1
796  coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
797 #else
798  coords[3*local_idx+1] = 0.;
799 #endif
800 #if LIBMESH_DIM > 2
801  coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
802 #else
803  coords[3*local_idx+2] = 0.;
804 #endif
805 
806  n_written++;
807  }
808 
809  io.data_stream (coords.empty() ? nullptr : coords.data(),
810  cast_int<unsigned int>(coords.size()), 3);
811  }
812  }
813 
814  if (this->processor_id() == 0)
815  libmesh_assert_less_equal (n_written, max_node_id);
816 
817 #ifdef LIBMESH_ENABLE_UNIQUE_ID
818  // XDR unsigned char doesn't work as anticipated
819  unsigned short write_unique_ids = 1;
820 #else
821  unsigned short write_unique_ids = 0;
822 #endif
823  if (this->processor_id() == 0)
824  io.data (write_unique_ids, "# presence of unique ids");
825 
826 #ifdef LIBMESH_ENABLE_UNIQUE_ID
827  n_written = 0;
828 
829  node_iter = mesh.local_nodes_begin();
830 
831  for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
832  {
833  const std::size_t first_node = blk*io_blksize;
834  last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
835 
836  const std::size_t tot_id_size = last_node - first_node;
837 
838  // Build up the xfer buffers on each processor
839  xfer_ids.clear();
840  xfer_ids.reserve(tot_id_size);
841  xfer_unique_ids.clear();
842  xfer_unique_ids.reserve(tot_id_size);
843 
844  for (; node_iter != nodes_end; ++node_iter)
845  {
846  const Node & node = **node_iter;
847  libmesh_assert_greater_equal(node.id(), first_node);
848  if (node.id() >= last_node)
849  break;
850 
851  xfer_ids.push_back(node.id());
852  xfer_unique_ids.push_back(node.unique_id());
853  }
854 
855  //-------------------------------------
856  // Send the xfer buffers to processor 0
857  std::vector<std::size_t> ids_size;
858 
859  const std::size_t my_ids_size = xfer_ids.size();
860 
861  // explicitly gather ids_size
862  this->comm().gather (0, my_ids_size, ids_size);
863 
864  // We will have lots of simultaneous receives if we are
865  // processor 0, so let's use nonblocking receives.
866  std::vector<Parallel::Request>
867  unique_id_request_handles(this->n_processors()-1),
868  id_request_handles(this->n_processors()-1);
869 
870  Parallel::MessageTag
871  unique_id_tag = mesh.comm().get_unique_tag(),
872  id_tag = mesh.comm().get_unique_tag();
873 
874  // Post the receives -- do this on processor 0 only.
875  if (this->processor_id() == 0)
876  {
877  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
878  {
879  recv_ids[pid].resize(ids_size[pid]);
880  recv_unique_ids[pid].resize(ids_size[pid]);
881 
882  if (pid == 0)
883  {
884  recv_ids[0] = xfer_ids;
885  recv_unique_ids[0] = xfer_unique_ids;
886  }
887  else
888  {
889  this->comm().receive (pid, recv_ids[pid],
890  id_request_handles[pid-1],
891  id_tag);
892  this->comm().receive (pid, recv_unique_ids[pid],
893  unique_id_request_handles[pid-1],
894  unique_id_tag);
895  }
896  }
897  }
898  else
899  {
900  // Send -- do this on all other processors.
901  this->comm().send(0, xfer_ids, id_tag);
902  this->comm().send(0, xfer_unique_ids, unique_id_tag);
903  }
904 
905  // -------------------------------------------------------
906  // Receive the messages and write the output on processor 0.
907  if (this->processor_id() == 0)
908  {
909  // Wait for all the receives to complete. We have no
910  // need for the statuses since we already know the
911  // buffer sizes.
912  Parallel::wait (id_request_handles);
913  Parallel::wait (unique_id_request_handles);
914 
915 #ifndef NDEBUG
916  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
917  libmesh_assert_equal_to
918  (recv_ids[pid].size(), recv_unique_ids[pid].size());
919 #endif
920 
921  libmesh_assert_less_equal
922  (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
923 
924  // Write the unique ids in this block.
925  unique_ids.clear();
926  unique_ids.resize(tot_id_size, unique_id_type(-1));
927 
928  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
929  for (auto idx : index_range(recv_ids[pid]))
930  {
931  libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
932  const std::size_t local_idx = recv_ids[pid][idx] - first_node;
933  libmesh_assert_less (local_idx, unique_ids.size());
934 
935  unique_ids[local_idx] = recv_unique_ids[pid][idx];
936 
937  n_written++;
938  }
939 
940  io.data_stream (unique_ids.empty() ? nullptr : unique_ids.data(),
941  cast_int<unsigned int>(unique_ids.size()), 1);
942  }
943  }
944 
945  if (this->processor_id() == 0)
946  libmesh_assert_less_equal (n_written, max_node_id);
947 
948 #endif // LIBMESH_ENABLE_UNIQUE_ID
949 }
950 
951 
952 
953 void XdrIO::write_serialized_bcs_helper (Xdr & io, const new_header_id_type n_bcs, const std::string bc_type) const
954 {
955  libmesh_assert (io.writing());
956 
957  // convenient reference to our mesh
959 
960  // and our boundary info object
961  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
962 
963  // Version 0.9.2+ introduces entity names
964  write_serialized_bc_names(io, boundary_info, true); // sideset names
965 
966  new_header_id_type n_bcs_out = n_bcs;
967  if (this->processor_id() == 0)
968  {
969  std::stringstream comment_string;
970  comment_string << "# number of " << bc_type << " boundary conditions";
971  io.data (n_bcs_out, comment_string.str().c_str());
972  }
973  n_bcs_out = 0;
974 
975  if (!n_bcs) return;
976 
977  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
978  std::vector<std::size_t> bc_sizes(this->n_processors());
979 
980  // Container to catch boundary IDs handed back by BoundaryInfo
981  std::vector<boundary_id_type> bc_ids;
982 
983  // Boundary conditions are only specified for level-0 elements
984  dof_id_type n_local_level_0_elem=0;
985  for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
987  {
988  if (bc_type == "side")
989  {
990  for (auto s : elem->side_index_range())
991  {
992  boundary_info.boundary_ids (elem, s, bc_ids);
993  for (const auto & bc_id : bc_ids)
994  if (bc_id != BoundaryInfo::invalid_id)
995  {
996  xfer_bcs.push_back (n_local_level_0_elem);
997  xfer_bcs.push_back (s) ;
998  xfer_bcs.push_back (bc_id);
999  }
1000  }
1001  }
1002  else if (bc_type == "edge")
1003  {
1004  for (auto e : elem->edge_index_range())
1005  {
1006  boundary_info.edge_boundary_ids (elem, e, bc_ids);
1007  for (const auto & bc_id : bc_ids)
1008  if (bc_id != BoundaryInfo::invalid_id)
1009  {
1010  xfer_bcs.push_back (n_local_level_0_elem);
1011  xfer_bcs.push_back (e) ;
1012  xfer_bcs.push_back (bc_id);
1013  }
1014  }
1015  }
1016  else if (bc_type == "shellface")
1017  {
1018  for (unsigned short sf=0; sf<2; sf++)
1019  {
1020  boundary_info.shellface_boundary_ids (elem, sf, bc_ids);
1021  for (const auto & bc_id : bc_ids)
1022  if (bc_id != BoundaryInfo::invalid_id)
1023  {
1024  xfer_bcs.push_back (n_local_level_0_elem);
1025  xfer_bcs.push_back (sf) ;
1026  xfer_bcs.push_back (bc_id);
1027  }
1028  }
1029  }
1030  else
1031  {
1032  libmesh_error_msg("bc_type not recognized: " + bc_type);
1033  }
1034 
1035  // Increment the level-0 element counter.
1036  n_local_level_0_elem++;
1037  }
1038 
1039  xfer_bcs.push_back(n_local_level_0_elem);
1040  std::size_t my_size = xfer_bcs.size();
1041  this->comm().gather (0, my_size, bc_sizes);
1042 
1043  // All processors send their xfer buffers to processor 0
1044  // Processor 0 will receive all buffers and write out the bcs
1045  if (this->processor_id() == 0)
1046  {
1047  dof_id_type elem_offset = 0;
1048  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
1049  {
1050  recv_bcs.resize(bc_sizes[pid]);
1051  if (pid == 0)
1052  recv_bcs = xfer_bcs;
1053  else
1054  this->comm().receive (pid, recv_bcs);
1055 
1056  const dof_id_type my_n_local_level_0_elem
1057  = cast_int<dof_id_type>(recv_bcs.back());
1058  recv_bcs.pop_back();
1059 
1060  for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 3, n_bcs_out++)
1061  recv_bcs[idx+0] += elem_offset;
1062 
1063  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1064  cast_int<unsigned int>(recv_bcs.size()), 3);
1065  elem_offset += my_n_local_level_0_elem;
1066  }
1067  libmesh_assert_equal_to (n_bcs, n_bcs_out);
1068  }
1069  else
1070  this->comm().send (0, xfer_bcs);
1071 }
1072 
1073 
1074 
1075 void XdrIO::write_serialized_side_bcs (Xdr & io, const new_header_id_type n_side_bcs) const
1076 {
1077  write_serialized_bcs_helper(io, n_side_bcs, "side");
1078 }
1079 
1080 
1081 
1082 void XdrIO::write_serialized_edge_bcs (Xdr & io, const new_header_id_type n_edge_bcs) const
1083 {
1084  write_serialized_bcs_helper(io, n_edge_bcs, "edge");
1085 }
1086 
1087 
1088 
1089 void XdrIO::write_serialized_shellface_bcs (Xdr & io, const new_header_id_type n_shellface_bcs) const
1090 {
1091  write_serialized_bcs_helper(io, n_shellface_bcs, "shellface");
1092 }
1093 
1094 
1095 
1096 void XdrIO::write_serialized_nodesets (Xdr & io, const new_header_id_type n_nodesets) const
1097 {
1098  libmesh_assert (io.writing());
1099 
1100  // convenient reference to our mesh
1102 
1103  // and our boundary info object
1104  const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1105 
1106  // Version 0.9.2+ introduces entity names
1107  write_serialized_bc_names(io, boundary_info, false); // nodeset names
1108 
1109  new_header_id_type n_nodesets_out = n_nodesets;
1110  if (this->processor_id() == 0)
1111  io.data (n_nodesets_out, "# number of nodesets");
1112  n_nodesets_out = 0;
1113 
1114  if (!n_nodesets) return;
1115 
1116  std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1117  std::vector<std::size_t> bc_sizes(this->n_processors());
1118 
1119  // Container to catch boundary IDs handed back by BoundaryInfo
1120  std::vector<boundary_id_type> nodeset_ids;
1121 
1122  dof_id_type n_node=0;
1123  for (const auto & node : mesh.local_node_ptr_range())
1124  {
1125  boundary_info.boundary_ids (node, nodeset_ids);
1126  for (const auto & bc_id : nodeset_ids)
1127  if (bc_id != BoundaryInfo::invalid_id)
1128  {
1129  xfer_bcs.push_back (node->id());
1130  xfer_bcs.push_back (bc_id);
1131  }
1132  }
1133 
1134  xfer_bcs.push_back(n_node);
1135  std::size_t my_size = xfer_bcs.size();
1136  this->comm().gather (0, my_size, bc_sizes);
1137 
1138  // All processors send their xfer buffers to processor 0
1139  // Processor 0 will receive all buffers and write out the bcs
1140  if (this->processor_id() == 0)
1141  {
1142  dof_id_type node_offset = 0;
1143  for (auto pid : IntRange<processor_id_type>(0, this->n_processors()))
1144  {
1145  recv_bcs.resize(bc_sizes[pid]);
1146  if (pid == 0)
1147  recv_bcs = xfer_bcs;
1148  else
1149  this->comm().receive (pid, recv_bcs);
1150 
1151  const dof_id_type my_n_node =
1152  cast_int<dof_id_type>(recv_bcs.back());
1153  recv_bcs.pop_back();
1154 
1155  for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 2, n_nodesets_out++)
1156  recv_bcs[idx+0] += node_offset;
1157 
1158  io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1159  cast_int<unsigned int>(recv_bcs.size()), 2);
1160  node_offset += my_n_node;
1161  }
1162  libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
1163  }
1164  else
1165  this->comm().send (0, xfer_bcs);
1166 }
1167 
1168 
1169 
1170 void XdrIO::write_serialized_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
1171 {
1172  if (this->processor_id() == 0)
1173  {
1174  const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1176 
1177  std::vector<new_header_id_type> boundary_ids;
1178  boundary_ids.reserve(boundary_map.size());
1179 
1180  std::vector<std::string> boundary_names;
1181  boundary_names.reserve(boundary_map.size());
1182 
1183  // We need to loop over the map and make sure that there aren't any invalid entries. Since we
1184  // return writable references in boundary_info, it's possible for the user to leave some entity names
1185  // blank. We can't write those to the XDA file.
1186  new_header_id_type n_boundary_names = 0;
1187  for (const auto & pr : boundary_map)
1188  if (!pr.second.empty())
1189  {
1190  n_boundary_names++;
1191  boundary_ids.push_back(pr.first);
1192  boundary_names.push_back(pr.second);
1193  }
1194 
1195  if (is_sideset)
1196  io.data(n_boundary_names, "# sideset id to name map");
1197  else
1198  io.data(n_boundary_names, "# nodeset id to name map");
1199  // Write out the ids and names in two vectors
1200  if (n_boundary_names)
1201  {
1202  io.data(boundary_ids);
1203  io.data(boundary_names);
1204  }
1205  }
1206 }
1207 
1208 
1209 
1210 void XdrIO::read (const std::string & name)
1211 {
1212  LOG_SCOPE("read()","XdrIO");
1213 
1214  // Only open the file on processor 0 -- this is especially important because
1215  // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1216  // calls will produce a race condition.
1217  Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1218 
1219  // convenient reference to our mesh
1221 
1222  // get the version string.
1223  if (this->processor_id() == 0)
1224  io.data (this->version());
1225  this->comm().broadcast (this->version());
1226 
1227  // note that for "legacy" files the first entry is an
1228  // integer -- not a string at all.
1229  this->legacy() = !(this->version().find("libMesh") < this->version().size());
1230 
1231  // Check for a legacy version format.
1232  if (this->legacy())
1233  libmesh_error_msg("We no longer support reading files in the legacy format.");
1234 
1235  // Read headers with the old id type if they're pre-1.3.0, or with
1236  // the new id type if they're post-1.3.0
1237  std::vector<new_header_id_type> meta_data(10, sizeof(xdr_id_type));
1238  if (this->version_at_least_1_3_0())
1239  {
1240  this->read_header(io, meta_data);
1241  }
1242  else
1243  {
1244  std::vector<old_header_id_type> old_data(10, sizeof(xdr_id_type));
1245 
1246  this->read_header(io, old_data);
1247 
1248  meta_data.assign(old_data.begin(), old_data.end());
1249  }
1250 
1251  const new_header_id_type & n_elem = meta_data[0];
1252  const new_header_id_type & n_nodes = meta_data[1];
1253 
1259  if (version_at_least_0_9_2())
1260  _field_width = cast_int<unsigned int>(meta_data[2]);
1261 
1262  // On systems where uint64_t==unsigned long, we were previously
1263  // writing 64-bit unsigned integers via xdr_u_long(), a function
1264  // which is literally less suited for that task than abort() would
1265  // have been, because at least abort() would have *known* it
1266  // couldn't write rather than truncating writes to 32 bits.
1267  //
1268  // If we have files with version < 1.3.0, then we'll continue to use
1269  // 32 bit field width, regardless of whether the file thinks we
1270  // should, whenever we're on a system where the problem would have
1271  // occurred.
1272  if ((_field_width == 4) ||
1273  (!version_at_least_1_3_0() &&
1275  {
1276  uint32_t type_size = 0;
1277 
1278  // read subdomain names
1280 
1281  // read connectivity
1282  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1283 
1284  // read the nodal locations
1285  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes));
1286 
1287  // read the side boundary conditions
1288  this->read_serialized_side_bcs (io, type_size);
1289 
1290  if (version_at_least_0_9_2())
1291  // read the nodesets
1292  this->read_serialized_nodesets (io, type_size);
1293 
1294  if (version_at_least_1_1_0())
1295  {
1296  // read the edge boundary conditions
1297  this->read_serialized_edge_bcs (io, type_size);
1298 
1299  // read the "shell face" boundary conditions
1300  this->read_serialized_shellface_bcs (io, type_size);
1301  }
1302  }
1303  else if (_field_width == 8)
1304  {
1305  uint64_t type_size = 0;
1306 
1307  // read subdomain names
1309 
1310  // read connectivity
1311  this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1312 
1313  // read the nodal locations
1314  this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes));
1315 
1316  // read the boundary conditions
1317  this->read_serialized_side_bcs (io, type_size);
1318 
1319  if (version_at_least_0_9_2())
1320  // read the nodesets
1321  this->read_serialized_nodesets (io, type_size);
1322 
1323  if (version_at_least_1_1_0())
1324  {
1325  // read the edge boundary conditions
1326  this->read_serialized_edge_bcs (io, type_size);
1327 
1328  // read the "shell face" boundary conditions
1329  this->read_serialized_shellface_bcs (io, type_size);
1330  }
1331  }
1332 
1333  // set the node processor ids
1335 }
1336 
1337 
1338 
1339 template <typename T>
1340 void XdrIO::read_header (Xdr & io, std::vector<T> & meta_data)
1341 {
1342  LOG_SCOPE("read_header()","XdrIO");
1343 
1344  // convenient reference to our mesh
1346 
1347  if (this->processor_id() == 0)
1348  {
1349  unsigned int pos=0;
1350 
1351  io.data (meta_data[pos++]);
1352  io.data (meta_data[pos++]);
1353  io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1354  io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1355  io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1356  io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1357 
1358  if (version_at_least_0_9_2())
1359  {
1360  io.data (meta_data[pos++], "# type size");
1361  io.data (meta_data[pos++], "# uid size");
1362  io.data (meta_data[pos++], "# pid size");
1363  io.data (meta_data[pos++], "# sid size");
1364  io.data (meta_data[pos++], "# p-level size");
1365  // Boundary Condition sizes
1366  io.data (meta_data[pos++], "# eid size"); // elem id
1367  io.data (meta_data[pos++], "# side size"); // side number
1368  io.data (meta_data[pos++], "# bid size"); // boundary id
1369  }
1370  }
1371 
1372  // broadcast the n_elems, n_nodes, and size information
1373  this->comm().broadcast (meta_data);
1374 
1375  this->comm().broadcast (this->boundary_condition_file_name());
1376  this->comm().broadcast (this->subdomain_map_file_name());
1377  this->comm().broadcast (this->partition_map_file_name());
1378  this->comm().broadcast (this->polynomial_level_file_name());
1379 
1380  // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1381  // this may allow for efficient adding of nodes/elements.
1382  const T & n_elem = meta_data[0];
1383  const T & n_nodes = meta_data[1];
1384 
1385  mesh.reserve_elem(cast_int<dof_id_type>(n_elem));
1386  mesh.reserve_nodes(cast_int<dof_id_type>(n_nodes));
1387 
1388  // Our mesh is pre-partitioned as it's created
1389  this->set_n_partitions(this->n_processors());
1390 
1396  if (version_at_least_0_9_2())
1397  _field_width = cast_int<unsigned int>(meta_data[2]);
1398 }
1399 
1400 
1401 
1403 {
1404  const bool read_entity_info = version_at_least_0_9_2();
1405  const bool use_new_header_type (this->version_at_least_1_3_0());
1406  if (read_entity_info)
1407  {
1409 
1410  new_header_id_type n_subdomain_names = 0;
1411  std::vector<new_header_id_type> subdomain_ids;
1412  std::vector<std::string> subdomain_names;
1413 
1414  // Read the sideset names
1415  if (this->processor_id() == 0)
1416  {
1417  if (use_new_header_type)
1418  io.data(n_subdomain_names);
1419  else
1420  {
1421  old_header_id_type temp;
1422  io.data(temp);
1423  n_subdomain_names = temp;
1424  }
1425 
1426  subdomain_ids.resize(n_subdomain_names);
1427  subdomain_names.resize(n_subdomain_names);
1428 
1429  if (n_subdomain_names)
1430  {
1431  if (use_new_header_type)
1432  io.data(subdomain_ids);
1433  else
1434  {
1435  std::vector<old_header_id_type> temp;
1436  io.data(temp);
1437  subdomain_ids.assign(temp.begin(), temp.end());
1438  }
1439 
1440  io.data(subdomain_names);
1441  }
1442  }
1443 
1444  // Broadcast the subdomain names to all processors
1445  this->comm().broadcast(n_subdomain_names);
1446  if (n_subdomain_names == 0)
1447  return;
1448 
1449  subdomain_ids.resize(n_subdomain_names);
1450  subdomain_names.resize(n_subdomain_names);
1451  this->comm().broadcast(subdomain_ids);
1452  this->comm().broadcast(subdomain_names);
1453 
1454  // Reassemble the named subdomain information
1455  std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1456 
1457  for (unsigned int i=0; i<n_subdomain_names; ++i)
1458  subdomain_map.insert(std::make_pair(subdomain_ids[i], subdomain_names[i]));
1459  }
1460 }
1461 
1462 
1463 template <typename T>
1464 void XdrIO::read_serialized_connectivity (Xdr & io, const dof_id_type n_elem, std::vector<new_header_id_type> & sizes, T)
1465 {
1466  libmesh_assert (io.reading());
1467 
1468  if (!n_elem) return;
1469 
1470  const bool
1471  read_p_level = ("." == this->polynomial_level_file_name()),
1472  read_partitioning = ("." == this->partition_map_file_name()),
1473  read_subdomain_id = ("." == this->subdomain_map_file_name());
1474 
1475  // convenient reference to our mesh
1477 
1478  // Keep track of what kinds of elements this file contains
1479  elems_of_dimension.clear();
1480  elems_of_dimension.resize(4, false);
1481 
1482  std::vector<T> conn, input_buffer(100 /* oversized ! */);
1483 
1484  int level=-1;
1485 
1486  // Version 0.9.2+ introduces unique ids
1487  const size_t unique_id_size_index = 3;
1488 
1489  const bool read_unique_id =
1490  (version_at_least_0_9_2()) &&
1491  sizes[unique_id_size_index];
1492 
1493  T n_elem_at_level=0, n_processed_at_level=0;
1494  for (dof_id_type blk=0, first_elem=0, last_elem=0;
1495  last_elem<n_elem; blk++)
1496  {
1497  first_elem = cast_int<dof_id_type>(blk*io_blksize);
1498  last_elem = cast_int<dof_id_type>(std::min(cast_int<std::size_t>((blk+1)*io_blksize),
1499  cast_int<std::size_t>(n_elem)));
1500 
1501  conn.clear();
1502 
1503  if (this->processor_id() == 0)
1504  for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1505  {
1506  if (n_processed_at_level == n_elem_at_level)
1507  {
1508  // get the number of elements to read at this level
1509  io.data (n_elem_at_level);
1510  n_processed_at_level = 0;
1511  level++;
1512  }
1513 
1514  unsigned int pos = 0;
1515  // get the element type,
1516  io.data_stream (&input_buffer[pos++], 1);
1517 
1518  if (read_unique_id)
1519  io.data_stream (&input_buffer[pos++], 1);
1520  // Older versions won't have this field at all (no increment on pos)
1521 
1522  // maybe the parent
1523  if (level)
1524  io.data_stream (&input_buffer[pos++], 1);
1525  else
1526  // We can't always fit DofObject::invalid_id in an
1527  // xdr_id_type
1528  input_buffer[pos++] = static_cast<T>(-1);
1529 
1530  // maybe the processor id
1531  if (read_partitioning)
1532  io.data_stream (&input_buffer[pos++], 1);
1533  else
1534  input_buffer[pos++] = 0;
1535 
1536  // maybe the subdomain id
1537  if (read_subdomain_id)
1538  io.data_stream (&input_buffer[pos++], 1);
1539  else
1540  input_buffer[pos++] = 0;
1541 
1542  // maybe the p level
1543  if (read_p_level)
1544  io.data_stream (&input_buffer[pos++], 1);
1545  else
1546  input_buffer[pos++] = 0;
1547 
1548  // and all the nodes
1549  libmesh_assert_less (pos+Elem::type_to_n_nodes_map[input_buffer[0]], input_buffer.size());
1550  io.data_stream (&input_buffer[pos], Elem::type_to_n_nodes_map[input_buffer[0]]);
1551  conn.insert (conn.end(),
1552  input_buffer.begin(),
1553  input_buffer.begin() + pos + Elem::type_to_n_nodes_map[input_buffer[0]]);
1554  }
1555 
1556  std::size_t conn_size = conn.size();
1557  this->comm().broadcast(conn_size);
1558  conn.resize (conn_size);
1559  this->comm().broadcast (conn);
1560 
1561  // All processors now have the connectivity for this block.
1562  typename std::vector<T>::const_iterator it = conn.begin();
1563  for (dof_id_type e=first_elem; e<last_elem; e++)
1564  {
1565  const ElemType elem_type = static_cast<ElemType>(*it); ++it;
1566 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1567  // We are on all processors here, so we can easily assign
1568  // consistent unique ids if the file doesn't specify them
1569  // later.
1570  unique_id_type unique_id = e;
1571 #endif
1572  if (read_unique_id)
1573  {
1574 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1575  unique_id = cast_int<unique_id_type>(*it);
1576 #endif
1577  ++it;
1578  }
1579  const dof_id_type parent_id =
1580  (*it == static_cast<T>(-1)) ?
1582  cast_int<dof_id_type>(*it);
1583  ++it;
1584  const processor_id_type proc_id =
1585  cast_int<processor_id_type>(*it);
1586  ++it;
1587  const subdomain_id_type subdomain_id =
1588  cast_int<subdomain_id_type>(*it);
1589  ++it;
1590 #ifdef LIBMESH_ENABLE_AMR
1591  const unsigned int p_level =
1592  cast_int<unsigned int>(*it);
1593 #endif
1594  ++it;
1595 
1596  Elem * parent = (parent_id == DofObject::invalid_id) ?
1597  nullptr : mesh.elem_ptr(parent_id);
1598 
1599  Elem * elem = Elem::build (elem_type, parent).release();
1600 
1601  elem->set_id() = e;
1602 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1603  elem->set_unique_id() = unique_id;
1604 #endif
1605  elem->processor_id() = proc_id;
1606  elem->subdomain_id() = subdomain_id;
1607 #ifdef LIBMESH_ENABLE_AMR
1608  elem->hack_p_level(p_level);
1609 
1610  if (parent)
1611  {
1612  parent->add_child(elem);
1615  }
1616 #endif
1617 
1618  for (unsigned int n=0, n_n = elem->n_nodes(); n != n_n;
1619  n++, ++it)
1620  {
1621  const dof_id_type global_node_number =
1622  cast_int<dof_id_type>(*it);
1623 
1624  elem->set_node(n) =
1625  mesh.add_point (Point(), global_node_number);
1626  }
1627 
1628  elems_of_dimension[elem->dim()] = true;
1629  mesh.add_elem(elem);
1630  }
1631  }
1632 
1633  // Set the mesh dimension to the largest encountered for an element
1634  for (unsigned char i=0; i!=4; ++i)
1635  if (elems_of_dimension[i])
1637 
1638 #if LIBMESH_DIM < 3
1639  if (mesh.mesh_dimension() > LIBMESH_DIM)
1640  libmesh_error_msg("Cannot open dimension " \
1641  << mesh.mesh_dimension() \
1642  << " mesh file when configured without " \
1643  << mesh.mesh_dimension() \
1644  << "D support.");
1645 #endif
1646 }
1647 
1648 
1649 
1651 {
1652  libmesh_assert (io.reading());
1653 
1654  // convenient reference to our mesh
1656 
1657  if (!mesh.n_nodes()) return;
1658 
1659  // At this point the elements have been read from file and placeholder nodes
1660  // have been assigned. These nodes, however, do not have the proper (x,y,z)
1661  // locations or unique_id values. This method will read all the
1662  // nodes from disk, and each processor can then grab the individual
1663  // values it needs.
1664 
1665  // If the file includes unique ids for nodes (as indicated by a
1666  // flag in 0.9.6+ files), those will be read next.
1667 
1668  // build up a list of the nodes contained in our local mesh. These are the nodes
1669  // stored on the local processor whose (x,y,z) and unique_id values
1670  // need to be corrected.
1671  std::vector<dof_id_type> needed_nodes; needed_nodes.reserve (mesh.n_nodes());
1672  {
1673  for (auto & node : mesh.node_ptr_range())
1674  needed_nodes.push_back(node->id());
1675 
1676  std::sort (needed_nodes.begin(), needed_nodes.end());
1677 
1678  // We should not have any duplicate node->id()s
1679  libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
1680  }
1681 
1682  // Get the nodes in blocks.
1683  std::vector<Real> coords;
1684  std::pair<std::vector<dof_id_type>::iterator,
1685  std::vector<dof_id_type>::iterator> pos;
1686  pos.first = needed_nodes.begin();
1687 
1688  // Broadcast node coordinates
1689  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1690  {
1691  first_node = blk*io_blksize;
1692  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1693 
1694  coords.resize(3*(last_node - first_node));
1695 
1696  if (this->processor_id() == 0)
1697  io.data_stream (coords.empty() ? nullptr : coords.data(),
1698  cast_int<unsigned int>(coords.size()));
1699 
1700  // For large numbers of processors the majority of processors at any given
1701  // block may not actually need these data. It may be worth profiling this,
1702  // although it is expected that disk IO will be the bottleneck
1703  this->comm().broadcast (coords);
1704 
1705  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
1706  {
1707  // first see if we need this node. use pos.first as a smart lower
1708  // bound, this will ensure that the size of the searched range
1709  // decreases as we match nodes.
1710  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1711 
1712  if (pos.first != pos.second) // we need this node.
1713  {
1714  libmesh_assert_equal_to (*pos.first, n);
1715  libmesh_assert(!libmesh_isnan(coords[idx+0]));
1716  libmesh_assert(!libmesh_isnan(coords[idx+1]));
1717  libmesh_assert(!libmesh_isnan(coords[idx+2]));
1718  mesh.node_ref(cast_int<dof_id_type>(n)) =
1719  Point (coords[idx+0],
1720  coords[idx+1],
1721  coords[idx+2]);
1722 
1723  }
1724  }
1725  }
1726 
1727  if (version_at_least_0_9_6())
1728  {
1729  // Check for node unique ids
1730  unsigned short read_unique_ids;
1731 
1732  if (this->processor_id() == 0)
1733  io.data (read_unique_ids);
1734 
1735  this->comm().broadcast (read_unique_ids);
1736 
1737  // If no unique ids are in the file, we're done.
1738  if (!read_unique_ids)
1739  return;
1740 
1741  std::vector<uint32_t> unique_32;
1742  std::vector<uint64_t> unique_64;
1743 
1744  // We're starting over from node 0 again
1745  pos.first = needed_nodes.begin();
1746 
1747  for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
1748  {
1749  first_node = blk*io_blksize;
1750  last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
1751 
1752  libmesh_assert((_field_width == 8) || (_field_width == 4));
1753 
1754  if (_field_width == 8)
1755  unique_64.resize(last_node - first_node);
1756  else
1757  unique_32.resize(last_node - first_node);
1758 
1759  if (this->processor_id() == 0)
1760  {
1761  if (_field_width == 8)
1762  io.data_stream (unique_64.empty() ? nullptr : unique_64.data(),
1763  cast_int<unsigned int>(unique_64.size()));
1764  else
1765  io.data_stream (unique_32.empty() ? nullptr : unique_32.data(),
1766  cast_int<unsigned int>(unique_32.size()));
1767  }
1768 
1769 #ifdef LIBMESH_ENABLE_UNIQUE_ID
1770  if (_field_width == 8)
1771  this->comm().broadcast (unique_64);
1772  else
1773  this->comm().broadcast (unique_32);
1774 
1775  for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
1776  {
1777  // first see if we need this node. use pos.first as a smart lower
1778  // bound, this will ensure that the size of the searched range
1779  // decreases as we match nodes.
1780  pos = std::equal_range (pos.first, needed_nodes.end(), n);
1781 
1782  if (pos.first != pos.second) // we need this node.
1783  {
1784  libmesh_assert_equal_to (*pos.first, n);
1785  if (_field_width == 8)
1786  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1787  = unique_64[idx];
1788  else
1789  mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id()
1790  = unique_32[idx];
1791  }
1792  }
1793 #endif // LIBMESH_ENABLE_UNIQUE_ID
1794  }
1795  }
1796 }
1797 
1798 
1799 
1800 template <typename T>
1801 void XdrIO::read_serialized_bcs_helper (Xdr & io, T, const std::string bc_type)
1802 {
1803  if (this->boundary_condition_file_name() == "n/a") return;
1804 
1805  libmesh_assert (io.reading());
1806 
1807  // convenient reference to our mesh
1809 
1810  // and our boundary info object
1811  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1812 
1813  // Version 0.9.2+ introduces unique ids
1814  read_serialized_bc_names(io, boundary_info, true); // sideset names
1815 
1816  std::vector<T> input_buffer;
1817 
1818  new_header_id_type n_bcs=0;
1819  if (this->processor_id() == 0)
1820  {
1821  if (this->version_at_least_1_3_0())
1822  io.data (n_bcs);
1823  else
1824  {
1825  old_header_id_type temp;
1826  io.data (temp);
1827  n_bcs = temp;
1828  }
1829  }
1830  this->comm().broadcast (n_bcs);
1831 
1832  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
1833  {
1834  first_bc = blk*io_blksize;
1835  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
1836 
1837  input_buffer.resize (3*(last_bc - first_bc));
1838 
1839  if (this->processor_id() == 0)
1840  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
1841  cast_int<unsigned int>(input_buffer.size()));
1842 
1843  this->comm().broadcast (input_buffer);
1844 
1845  // Look for BCs in this block for all the level-0 elements we have
1846  // (not just local ones). Do this by checking all entries for
1847  // IDs matching an element we can query.
1848  // We cannot rely on nullptr neighbors at this point since the neighbor
1849  // data structure has not been initialized.
1850  for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=3)
1851  {
1852  const dof_id_type dof_id =
1853  cast_int<dof_id_type>(input_buffer[idx+0]);
1854  const unsigned short side =
1855  cast_int<unsigned short>(input_buffer[idx+1]);
1856  const boundary_id_type bc_id =
1857  cast_int<boundary_id_type>(input_buffer[idx+2]);
1858 
1859  const Elem * elem = mesh.query_elem_ptr(dof_id);
1860  if (!elem)
1861  continue;
1862 
1863  if (bc_type == "side")
1864  {
1865  libmesh_assert_less (side, elem->n_sides());
1866  boundary_info.add_side (elem, side, bc_id);
1867  }
1868  else if (bc_type == "edge")
1869  {
1870  libmesh_assert_less (side, elem->n_edges());
1871  boundary_info.add_edge (elem, side, bc_id);
1872  }
1873  else if (bc_type == "shellface")
1874  {
1875  // Shell face IDs can only be 0 or 1.
1876  libmesh_assert_less(side, 2);
1877 
1878  boundary_info.add_shellface (elem, side, bc_id);
1879  }
1880  else
1881  {
1882  libmesh_error_msg("bc_type not recognized: " + bc_type);
1883  }
1884  }
1885  input_buffer.clear();
1886  }
1887 }
1888 
1889 
1890 
1891 template <typename T>
1893 {
1894  read_serialized_bcs_helper(io, value, "side");
1895 }
1896 
1897 
1898 
1899 template <typename T>
1901 {
1902  read_serialized_bcs_helper(io, value, "edge");
1903 }
1904 
1905 
1906 
1907 template <typename T>
1909 {
1910  read_serialized_bcs_helper(io, value, "shellface");
1911 }
1912 
1913 
1914 
1915 template <typename T>
1917 {
1918  if (this->boundary_condition_file_name() == "n/a") return;
1919 
1920  libmesh_assert (io.reading());
1921 
1922  // convenient reference to our mesh
1924 
1925  // and our boundary info object
1926  BoundaryInfo & boundary_info = mesh.get_boundary_info();
1927 
1928  // Version 0.9.2+ introduces unique ids
1929  read_serialized_bc_names(io, boundary_info, false); // nodeset names
1930 
1931  std::vector<T> input_buffer;
1932 
1933  new_header_id_type n_nodesets=0;
1934  if (this->processor_id() == 0)
1935  {
1936  if (this->version_at_least_1_3_0())
1937  io.data (n_nodesets);
1938  else
1939  {
1940  old_header_id_type temp;
1941  io.data (temp);
1942  n_nodesets = temp;
1943  }
1944  }
1945  this->comm().broadcast (n_nodesets);
1946 
1947  for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
1948  {
1949  first_bc = blk*io_blksize;
1950  last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
1951 
1952  input_buffer.resize (2*(last_bc - first_bc));
1953 
1954  if (this->processor_id() == 0)
1955  io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
1956  cast_int<unsigned int>(input_buffer.size()));
1957 
1958  this->comm().broadcast (input_buffer);
1959 
1960  // Look for BCs in this block for all nodes we have (not just
1961  // local ones). Do this by checking all entries for
1962  // IDs matching a node we can query.
1963  for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=2)
1964  {
1965  const dof_id_type dof_id =
1966  cast_int<dof_id_type>(input_buffer[idx+0]);
1967  const boundary_id_type bc_id =
1968  cast_int<boundary_id_type>(input_buffer[idx+1]);
1969 
1970  const Node * node = mesh.query_node_ptr(dof_id);
1971  if (node)
1972  boundary_info.add_node (node, bc_id);
1973  }
1974  input_buffer.clear();
1975  }
1976 }
1977 
1978 
1979 
1980 void XdrIO::read_serialized_bc_names(Xdr & io, BoundaryInfo & info, bool is_sideset)
1981 {
1982  const bool read_entity_info = version_at_least_0_9_2();
1983  const bool use_new_header_type (this->version_at_least_1_3_0());
1984  if (read_entity_info)
1985  {
1986  new_header_id_type n_boundary_names = 0;
1987  std::vector<new_header_id_type> boundary_ids;
1988  std::vector<std::string> boundary_names;
1989 
1990  // Read the sideset names
1991  if (this->processor_id() == 0)
1992  {
1993  if (use_new_header_type)
1994  io.data(n_boundary_names);
1995  else
1996  {
1997  old_header_id_type temp;
1998  io.data(temp);
1999  n_boundary_names = temp;
2000  }
2001 
2002  boundary_names.resize(n_boundary_names);
2003 
2004  if (n_boundary_names)
2005  {
2006  if (use_new_header_type)
2007  io.data(boundary_ids);
2008  else
2009  {
2010  std::vector<old_header_id_type> temp(n_boundary_names);
2011  io.data(temp);
2012  boundary_ids.assign(temp.begin(), temp.end());
2013  }
2014  io.data(boundary_names);
2015  }
2016  }
2017 
2018  // Broadcast the boundary names to all processors
2019  this->comm().broadcast(n_boundary_names);
2020  if (n_boundary_names == 0)
2021  return;
2022 
2023  boundary_ids.resize(n_boundary_names);
2024  boundary_names.resize(n_boundary_names);
2025  this->comm().broadcast(boundary_ids);
2026  this->comm().broadcast(boundary_names);
2027 
2028  // Reassemble the named boundary information
2029  std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
2031 
2032  for (unsigned int i=0; i<n_boundary_names; ++i)
2033  boundary_map.insert(std::make_pair(cast_int<boundary_id_type>(boundary_ids[i]), boundary_names[i]));
2034  }
2035 }
2036 
2037 
2038 
2039 void XdrIO::pack_element (std::vector<xdr_id_type> & conn, const Elem * elem,
2040  const dof_id_type parent_id, const dof_id_type parent_pid) const
2041 {
2042  libmesh_assert(elem);
2043  libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
2044 
2045  conn.push_back(elem->n_nodes());
2046 
2047  conn.push_back (elem->type());
2048 
2049  // In version 0.7.0+ "id" is stored but it not used. In version 0.9.2+
2050  // we will store unique_id instead, therefore there is no need to
2051  // check for the older version when writing the unique_id.
2052  conn.push_back (elem->unique_id());
2053 
2054  if (parent_id != DofObject::invalid_id)
2055  {
2056  conn.push_back (parent_id);
2057  libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
2058  conn.push_back (parent_pid);
2059  }
2060 
2061  conn.push_back (elem->processor_id());
2062  conn.push_back (elem->subdomain_id());
2063 
2064 #ifdef LIBMESH_ENABLE_AMR
2065  conn.push_back (elem->p_level());
2066 #endif
2067 
2068  for (auto n : elem->node_index_range())
2069  conn.push_back (elem->node_id(n));
2070 }
2071 
2073 {
2074  return
2075  (this->version().find("0.9.2") != std::string::npos) ||
2076  (this->version().find("0.9.6") != std::string::npos) ||
2077  (this->version().find("1.1.0") != std::string::npos) ||
2078  (this->version().find("1.3.0") != std::string::npos);
2079 }
2080 
2082 {
2083  return
2084  (this->version().find("0.9.6") != std::string::npos) ||
2085  (this->version().find("1.1.0") != std::string::npos) ||
2086  (this->version().find("1.3.0") != std::string::npos);
2087 }
2088 
2090 {
2091  return
2092  (this->version().find("1.1.0") != std::string::npos) ||
2093  (this->version().find("1.3.0") != std::string::npos);
2094 }
2095 
2097 {
2098  return
2099  (this->version().find("1.3.0") != std::string::npos);
2100 }
2101 
2102 } // namespace libMesh
libMesh::XdrIO::write_serialized_subdomain_names
void write_serialized_subdomain_names(Xdr &io) const
Write subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:260
libMesh::XdrIO::io_blksize
static const std::size_t io_blksize
Define the block size to use for chunked IO.
Definition: xdr_io.h:350
libMesh::dof_id_type
uint8_t dof_id_type
Definition: id_types.h:67
libMesh::XdrIO::xdr_id_type
largest_id_type xdr_id_type
Definition: xdr_io.h:57
libMesh::BoundaryInfo::boundary_ids
std::vector< boundary_id_type > boundary_ids(const Node *node) const
Definition: boundary_info.C:985
libMesh::BoundaryInfo
The BoundaryInfo class contains information relevant to boundary conditions including storing faces,...
Definition: boundary_info.h:57
libMesh::Elem::n_edges
virtual unsigned int n_edges() const =0
libMesh::MeshBase::reserve_nodes
virtual void reserve_nodes(const dof_id_type nn)=0
Reserves space for a known number of nodes.
libMesh::MeshTools::n_elem
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:705
libMesh::Elem::JUST_REFINED
Definition: elem.h:1172
libMesh::unique_id_type
uint8_t unique_id_type
Definition: id_types.h:86
libMesh::XdrIO::version
const std::string & version() const
Get/Set the version string.
Definition: xdr_io.h:140
libMesh::MeshBase::get_boundary_info
const BoundaryInfo & get_boundary_info() const
The information about boundary ids on the mesh.
Definition: mesh_base.h:132
libMesh::XdrIO::binary
bool binary() const
Get/Set the flag indicating if we should read/write binary.
Definition: xdr_io.h:103
libMesh::XdrIO::read_serialized_edge_bcs
void read_serialized_edge_bcs(Xdr &io, T)
Read the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1900
libMesh::XdrIO::boundary_condition_file_name
const std::string & boundary_condition_file_name() const
Get/Set the boundary condition file name.
Definition: xdr_io.h:146
libMesh::XdrIO::write_serialized_bcs_helper
void write_serialized_bcs_helper(Xdr &io, const new_header_id_type n_side_bcs, const std::string bc_type) const
Helper function used in write_serialized_side_bcs, write_serialized_edge_bcs, and write_serialized_sh...
Definition: xdr_io.C:953
libMesh::Elem::n_nodes
virtual unsigned int n_nodes() const =0
libMesh::BoundaryInfo::add_node
void add_node(const Node *node, const boundary_id_type id)
Add Node node with boundary id id to the boundary information data structures.
Definition: boundary_info.C:636
libMesh::BoundaryInfo::n_boundary_conds
std::size_t n_boundary_conds() const
Definition: boundary_info.C:1615
libMesh::BoundaryInfo::shellface_boundary_ids
void shellface_boundary_ids(const Elem *const elem, const unsigned short int shellface, std::vector< boundary_id_type > &vec_to_fill) const
Definition: boundary_info.C:1133
libMesh::BoundaryInfo::edge_boundary_ids
std::vector< boundary_id_type > edge_boundary_ids(const Elem *const elem, const unsigned short int edge) const
Definition: boundary_info.C:1018
libMesh::MeshBase::local_nodes_begin
virtual node_iterator local_nodes_begin()=0
Iterate over local nodes (nodes whose processor_id() matches the current processor).
libMesh::MeshBase::n_elem
virtual dof_id_type n_elem() const =0
libMesh::DofObject::set_id
dof_id_type & set_id()
Definition: dof_object.h:776
libMesh::index_range
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:106
libMesh
The libMesh namespace provides an interface to certain functionality in the library.
Definition: factoryfunction.C:55
libMesh::Elem::dim
virtual unsigned short dim() const =0
libMesh::XdrIO::read_header
void read_header(Xdr &io, std::vector< T > &meta_data)
Read header information - templated to handle old (4-byte) or new (8-byte) header id types.
Definition: xdr_io.C:1340
libMesh::Xdr
This class implements a C++ interface to the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:65
libMesh::Elem::add_child
void add_child(Elem *elem)
Adds a child pointer to the array of children of this element.
Definition: elem.C:1384
libMesh::XdrIO::subdomain_map_file_name
const std::string & subdomain_map_file_name() const
Get/Set the subdomain file name.
Definition: xdr_io.h:158
libMesh::ParallelObject::comm
const Parallel::Communicator & comm() const
Definition: parallel_object.h:94
libMesh::MeshBase::set_subdomain_name_map
std::map< subdomain_id_type, std::string > & set_subdomain_name_map()
Definition: mesh_base.h:1631
libMesh::BoundaryInfo::get_sideset_name_map
const std::map< boundary_id_type, std::string > & get_sideset_name_map() const
Definition: boundary_info.h:876
libMesh::BoundaryInfo::n_shellface_conds
std::size_t n_shellface_conds() const
Definition: boundary_info.C:1658
libMesh::Elem::p_level
unsigned int p_level() const
Definition: elem.h:2512
libMesh::Elem::node_index_range
IntRange< unsigned short > node_index_range() const
Definition: elem.h:2170
libMesh::XdrIO::write
virtual void write(const std::string &) override
This method implements writing a mesh to a specified file.
Definition: xdr_io.C:115
libMesh::MeshBase::local_level_elements_end
virtual element_iterator local_level_elements_end(unsigned int level)=0
libMesh::WRITE
Definition: enum_xdr_mode.h:40
libMesh::BoundaryInfo::n_edge_conds
std::size_t n_edge_conds() const
Definition: boundary_info.C:1636
mesh
MeshBase & mesh
Definition: mesh_communication.C:1257
libMesh::MeshBase::mesh_dimension
unsigned int mesh_dimension() const
Definition: mesh_base.C:135
libMesh::DofObject::set_unique_id
unique_id_type & set_unique_id()
Definition: dof_object.h:797
libMesh::XdrIO::XdrIO
XdrIO(MeshBase &, const bool=false)
Constructor.
Definition: xdr_io.C:76
libMesh::MeshBase::max_node_id
virtual dof_id_type max_node_id() const =0
libMesh::XdrIO::read_serialized_side_bcs
void read_serialized_side_bcs(Xdr &io, T)
Read the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1892
libMesh::XdrIO::write_serialized_connectivity
void write_serialized_connectivity(Xdr &io, const dof_id_type n_elem) const
Write the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:298
libMesh::XdrIO::read
virtual void read(const std::string &) override
This method implements reading a mesh from a specified file.
Definition: xdr_io.C:1210
libMesh::boundary_id_type
int8_t boundary_id_type
Definition: id_types.h:51
libMesh::DofObject::processor_id
processor_id_type processor_id() const
Definition: dof_object.h:829
libMesh::MeshBase::elem_ptr
virtual const Elem * elem_ptr(const dof_id_type i) const =0
libMesh::MeshBase::local_level_elements_begin
virtual element_iterator local_level_elements_begin(unsigned int level)=0
parallel_sync.h
libMesh::XdrIO::write_parallel
bool write_parallel() const
Report whether we should write parallel files.
Definition: xdr_io.h:358
libMesh::MeshBase::query_elem_ptr
virtual const Elem * query_elem_ptr(const dof_id_type i) const =0
libMesh::XdrIO::read_serialized_shellface_bcs
void read_serialized_shellface_bcs(Xdr &io, T)
Read the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1908
libMesh::XdrIO::polynomial_level_file_name
const std::string & polynomial_level_file_name() const
Get/Set the polynomial degree file name.
Definition: xdr_io.h:164
libMesh::XdrIO::_write_unique_id
bool _write_unique_id
Definition: xdr_io.h:339
libMesh::XdrIO::write_serialized_bc_names
void write_serialized_bc_names(Xdr &io, const BoundaryInfo &info, bool is_sideset) const
Write boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:1170
libMesh::XdrIO::version_at_least_1_1_0
bool version_at_least_1_1_0() const
Definition: xdr_io.C:2089
libMesh::XdrIO::read_serialized_nodes
void read_serialized_nodes(Xdr &io, const dof_id_type n_nodes)
Read the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:1650
libMesh::libmesh_assert
libmesh_assert(ctx)
libMesh::IntRange
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
libMesh::Xdr::writing
bool writing() const
Definition: xdr_cxx.h:116
libMesh::MeshBase::n_subdomains
subdomain_id_type n_subdomains() const
Definition: mesh_base.C:477
libMesh::Xdr::reading
bool reading() const
Definition: xdr_cxx.h:110
libMesh::XdrIO::~XdrIO
virtual ~XdrIO()
Destructor.
Definition: xdr_io.C:109
libMesh::MeshBase
This is the MeshBase class.
Definition: mesh_base.h:78
libMesh::Xdr::data
void data(T &a, const char *comment="")
Inputs or outputs a single value.
Definition: xdr_cxx.C:766
libMesh::ParallelObject::n_processors
processor_id_type n_processors() const
Definition: parallel_object.h:100
libMesh::XdrIO::partition_map_file_name
const std::string & partition_map_file_name() const
Get/Set the partitioning file name.
Definition: xdr_io.h:152
libMesh::XdrIO::version_at_least_0_9_2
bool version_at_least_0_9_2() const
Definition: xdr_io.C:2072
libMesh::DECODE
Definition: enum_xdr_mode.h:39
libMesh::MeshBase::node_ptr_range
virtual SimpleRange< node_iterator > node_ptr_range()=0
libMesh::XdrIO::write_serialized_nodes
void write_serialized_nodes(Xdr &io, const dof_id_type n_nodes) const
Write the nodal locations for a parallel, distributed mesh.
Definition: xdr_io.C:657
libMesh::MeshBase::const_node_iterator
The definition of the const_node_iterator struct.
Definition: mesh_base.h:1945
libMesh::libmesh_isnan
bool libmesh_isnan(T x)
Definition: libmesh_common.h:177
libMesh::ParallelObject::processor_id
processor_id_type processor_id() const
Definition: parallel_object.h:106
libMesh::DofObject::invalid_id
static const dof_id_type invalid_id
An invalid id to distinguish an uninitialized DofObject.
Definition: dof_object.h:421
parallel_implementation.h
libMesh::MeshBase::local_node_ptr_range
virtual SimpleRange< node_iterator > local_node_ptr_range()=0
libMesh::BoundaryInfo::n_nodeset_conds
std::size_t n_nodeset_conds() const
Definition: boundary_info.C:1680
libMesh::Point
A Point defines a location in LIBMESH_DIM dimensional Real space.
Definition: point.h:38
libMesh::processor_id_type
uint8_t processor_id_type
Definition: id_types.h:104
libMesh::XdrIO::version_at_least_0_9_6
bool version_at_least_0_9_6() const
Definition: xdr_io.C:2081
libMesh::MeshTools::n_active_levels
unsigned int n_active_levels(const MeshBase &mesh)
Definition: mesh_tools.C:626
libMesh::Node
A Node is like a Point, but with more information.
Definition: node.h:52
libMesh::MeshTools::Generation::Private::idx
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
Definition: mesh_generation.C:72
libMesh::as_range
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
n_nodes
const dof_id_type n_nodes
Definition: tecplot_io.C:68
libMesh::DofObject::unique_id
unique_id_type unique_id() const
Definition: dof_object.h:784
libMesh::READ
Definition: enum_xdr_mode.h:41
libMesh::Xdr::data_stream
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Inputs or outputs a raw data stream.
Definition: xdr_cxx.C:831
libMesh::XdrIO::write_serialized_nodesets
void write_serialized_nodesets(Xdr &io, const new_header_id_type n_nodesets) const
Write the boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1096
libMesh::XdrIO::read_serialized_bcs_helper
void read_serialized_bcs_helper(Xdr &io, T, const std::string bc_type)
Helper function used in read_serialized_side_bcs, read_serialized_edge_bcs, and read_serialized_shell...
Definition: xdr_io.C:1801
libMesh::MeshBase::get_subdomain_name_map
const std::map< subdomain_id_type, std::string > & get_subdomain_name_map() const
Definition: mesh_base.h:1633
libMesh::XdrIO::write_serialized_shellface_bcs
void write_serialized_shellface_bcs(Xdr &io, const new_header_id_type n_shellface_bcs) const
Write the "shell face" boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1089
libMesh::XdrIO::write_serialized_edge_bcs
void write_serialized_edge_bcs(Xdr &io, const new_header_id_type n_edge_bcs) const
Write the edge boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1082
libMesh::Elem::set_node
virtual Node *& set_node(const unsigned int i)
Definition: elem.h:2059
libMesh::MeshBase::node_ref
virtual const Node & node_ref(const dof_id_type i) const
Definition: mesh_base.h:451
libMesh::XdrIO::read_serialized_bc_names
void read_serialized_bc_names(Xdr &io, BoundaryInfo &info, bool is_sideset)
Read boundary names information (sideset and nodeset) - NEW in 0.9.2 format.
Definition: xdr_io.C:1980
libMesh::MeshBase::n_nodes
virtual dof_id_type n_nodes() const =0
libMesh::ENCODE
Definition: enum_xdr_mode.h:38
libMesh::MeshOutput
This class defines an abstract interface for Mesh output.
Definition: mesh_output.h:53
libMesh::MeshInput< MeshBase >::set_n_partitions
void set_n_partitions(unsigned int n_parts)
Sets the number of partitions in the mesh.
Definition: mesh_input.h:91
libMesh::Xdr::close
void close()
Closes the file if it is open.
Definition: xdr_cxx.C:273
libMesh::MeshOutput::mesh
const MT & mesh() const
Definition: mesh_output.h:247
libMesh::Elem::type_to_n_nodes_map
static const unsigned int type_to_n_nodes_map[INVALID_ELEM]
This array maps the integer representation of the ElemType enum to the number of nodes in the element...
Definition: elem.h:576
libMesh::BoundaryInfo::set_nodeset_name_map
std::map< boundary_id_type, std::string > & set_nodeset_name_map()
Definition: boundary_info.h:882
value
static const bool value
Definition: xdr_io.C:56
libMesh::MeshBase::add_elem
virtual Elem * add_elem(Elem *e)=0
Add elem e to the end of the element array.
libMesh::Partitioner::set_node_processor_ids
static void set_node_processor_ids(MeshBase &mesh)
This function is called after partitioning to set the processor IDs for the nodes.
Definition: partitioner.C:691
libMesh::Elem::subdomain_id
subdomain_id_type subdomain_id() const
Definition: elem.h:2069
libMesh::DofObject::id
dof_id_type id() const
Definition: dof_object.h:767
libMesh::BoundaryInfo::add_edge
void add_edge(const dof_id_type elem, const unsigned short int edge, const boundary_id_type id)
Add edge edge of element number elem with boundary id id to the boundary information data structure.
Definition: boundary_info.C:707
libMesh::XdrIO::read_serialized_connectivity
void read_serialized_connectivity(Xdr &io, const dof_id_type n_elem, std::vector< new_header_id_type > &sizes, T)
Read the connectivity for a parallel, distributed mesh.
Definition: xdr_io.C:1464
libMesh::MeshTools::n_p_levels
unsigned int n_p_levels(const MeshBase &mesh)
Definition: mesh_tools.C:721
libMesh::Elem
This is the base class from which all geometric element types are derived.
Definition: elem.h:100
libMesh::XdrIO::write_serialized_side_bcs
void write_serialized_side_bcs(Xdr &io, const new_header_id_type n_side_bcs) const
Write the side boundary conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1075
libMesh::XdrIO::legacy
bool legacy() const
Get/Set the flag indicating if we should read/write legacy.
Definition: xdr_io.h:109
data
IterBase * data
Ideally this private member data should have protected access.
Definition: variant_filter_iterator.h:337
libMesh::XdrIO::version_at_least_1_3_0
bool version_at_least_1_3_0() const
Definition: xdr_io.C:2096
libMesh::MeshInput< MeshBase >::mesh
MeshBase & mesh()
Definition: mesh_input.h:169
libMesh::Elem::set_refinement_flag
void set_refinement_flag(const RefinementState rflag)
Sets the value of the refinement flag for the element.
Definition: elem.h:2622
libMesh::BoundaryInfo::add_shellface
void add_shellface(const dof_id_type elem, const unsigned short int shellface, const boundary_id_type id)
Add shell face shellface of element number elem with boundary id id to the boundary information data ...
Definition: boundary_info.C:794
libMesh::MeshBase::add_point
virtual Node * add_point(const Point &p, const dof_id_type id=DofObject::invalid_id, const processor_id_type proc_id=DofObject::invalid_processor_id)=0
Add a new Node at Point p to the end of the vertex array, with processor_id procid.
libMesh::XdrIO::new_header_id_type
uint64_t new_header_id_type
Definition: xdr_io.h:63
libMesh::TestClass
Definition: id_types.h:33
libMesh::Elem::node_id
dof_id_type node_id(const unsigned int i) const
Definition: elem.h:1977
libMesh::ParallelObject
An object whose state is distributed along a set of processors.
Definition: parallel_object.h:55
libMesh::Elem::INACTIVE
Definition: elem.h:1174
libMesh::MeshBase::set_mesh_dimension
void set_mesh_dimension(unsigned char d)
Resets the logical dimension of the mesh.
Definition: mesh_base.h:218
libMesh::Elem::n_sides
virtual unsigned int n_sides() const =0
libMesh::Elem::build
static std::unique_ptr< Elem > build(const ElemType type, Elem *p=nullptr)
Definition: elem.C:246
libMesh::MeshBase::query_node_ptr
virtual const Node * query_node_ptr(const dof_id_type i) const =0
libMesh::XdrIO::read_serialized_subdomain_names
void read_serialized_subdomain_names(Xdr &io)
Read subdomain name information - NEW in 0.9.2 format.
Definition: xdr_io.C:1402
libMesh::XdrIO::_field_width
unsigned int _field_width
Definition: xdr_io.h:340
libMesh::BoundaryInfo::invalid_id
static const boundary_id_type invalid_id
Number used for internal use.
Definition: boundary_info.h:899
libMesh::out
OStreamProxy out
libMesh::MeshBase::local_nodes_end
virtual node_iterator local_nodes_end()=0
libMesh::XdrIO::old_header_id_type
uint32_t old_header_id_type
Definition: xdr_io.h:60
libMesh::BoundaryInfo::add_side
void add_side(const dof_id_type elem, const unsigned short int side, const boundary_id_type id)
Add side side of element number elem with boundary id id to the boundary information data structure.
Definition: boundary_info.C:886
libMesh::Elem::hack_p_level
void hack_p_level(const unsigned int p)
Sets the value of the p-refinement level for the element without altering the p-level of its ancestor...
Definition: elem.h:2668
libMesh::BoundaryInfo::get_nodeset_name_map
const std::map< boundary_id_type, std::string > & get_nodeset_name_map() const
Definition: boundary_info.h:884
libMesh::XdrIO::pack_element
void pack_element(std::vector< xdr_id_type > &conn, const Elem *elem, const dof_id_type parent_id=DofObject::invalid_id, const dof_id_type parent_pid=DofObject::invalid_id) const
Pack an element into a transfer buffer for parallel communication.
Definition: xdr_io.C:2039
libMesh::BoundaryInfo::set_sideset_name_map
std::map< boundary_id_type, std::string > & set_sideset_name_map()
Definition: boundary_info.h:874
libMesh::Elem::type
virtual ElemType type() const =0
libMesh::XdrIO::read_serialized_nodesets
void read_serialized_nodesets(Xdr &io, T)
Read the nodeset conditions for a parallel, distributed mesh.
Definition: xdr_io.C:1916
libMesh::MeshBase::reserve_elem
virtual void reserve_elem(const dof_id_type ne)=0
Reserves space for a known number of elements.
libMesh::Quality::name
std::string name(const ElemQuality q)
This function returns a string containing some name for q.
Definition: elem_quality.C:42
libMesh::MeshInput
This class defines an abstract interface for Mesh input.
Definition: mesh_base.h:60
libMesh::MeshInput< MeshBase >::elems_of_dimension
std::vector< bool > elems_of_dimension
A vector of bools describing what dimension elements have been encountered when reading a mesh.
Definition: mesh_input.h:97
libMesh::ElemType
ElemType
Defines an enum for geometric element types.
Definition: enum_elem_type.h:33