Line data Source code
1 : // The libMesh Finite Element Library.
2 : // Copyright (C) 2002-2025 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 :
4 : // This library is free software; you can redistribute it and/or
5 : // modify it under the terms of the GNU Lesser General Public
6 : // License as published by the Free Software Foundation; either
7 : // version 2.1 of the License, or (at your option) any later version.
8 :
9 : // This library is distributed in the hope that it will be useful,
10 : // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 : // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 : // Lesser General Public License for more details.
13 :
14 : // You should have received a copy of the GNU Lesser General Public
15 : // License along with this library; if not, write to the Free Software
16 : // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 :
18 :
19 :
20 : // libMesh includes
21 : #include "libmesh/xdr_io.h"
22 : #include "libmesh/boundary_info.h"
23 : #include "libmesh/elem.h"
24 : #include "libmesh/enum_xdr_mode.h"
25 : #include "libmesh/int_range.h"
26 : #include "libmesh/libmesh_logging.h"
27 : #include "libmesh/mesh_base.h"
28 : #include "libmesh/mesh_tools.h"
29 : #include "libmesh/node.h"
30 : #include "libmesh/partitioner.h"
31 : #include "libmesh/utility.h"
32 : #include "libmesh/xdr_cxx.h"
33 :
34 : // TIMPI includes
35 : #include "timpi/parallel_implementation.h"
36 : #include "timpi/parallel_sync.h"
37 :
38 : // C++ includes
39 : #include <iostream>
40 : #include <iomanip>
41 : #include <cstdio>
42 : #include <vector>
43 : #include <string>
44 : #include <tuple>
45 :
46 : namespace libMesh
47 : {
48 :
49 : //-----------------------------------------------
50 : // anonymous namespace for implementation details
51 : namespace {
52 :
53 : template <class T, class U>
54 : struct libmesh_type_is_same {
55 : static const bool value = false;
56 : };
57 :
58 : template <class T>
59 : struct libmesh_type_is_same<T, T> {
60 : static const bool value = true;
61 : };
62 :
63 : }
64 :
65 :
66 :
67 : // ------------------------------------------------------------
68 : // XdrIO static data
69 : const std::size_t XdrIO::io_blksize = 128000;
70 :
71 :
72 :
73 : // ------------------------------------------------------------
74 : // XdrIO members
75 3512 : XdrIO::XdrIO (MeshBase & mesh, const bool binary_in) :
76 : MeshInput<MeshBase> (mesh,/* is_parallel_format = */ true),
77 : MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
78 : ParallelObject (mesh),
79 3284 : _binary (binary_in),
80 3284 : _legacy (false),
81 3284 : _write_serial (false),
82 3284 : _write_parallel (false),
83 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
84 3284 : _write_unique_id (true),
85 : #else
86 : _write_unique_id (false),
87 : #endif
88 3284 : _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
89 0 : _version ("libMesh-1.8.0"),
90 3284 : _bc_file_name ("n/a"),
91 3284 : _partition_map_file ("n/a"),
92 3284 : _subdomain_map_file ("n/a"),
93 6796 : _p_level_file ("n/a")
94 : {
95 3512 : }
96 :
97 :
98 :
99 1425 : XdrIO::XdrIO (const MeshBase & mesh, const bool binary_in) :
100 : MeshInput<MeshBase> (), // write-only
101 : MeshOutput<MeshBase>(mesh,/* is_parallel_format = */ true),
102 : ParallelObject (mesh),
103 1341 : _binary (binary_in),
104 1341 : _legacy (false),
105 1341 : _write_serial (false),
106 1341 : _write_parallel (false),
107 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
108 1341 : _write_unique_id (true),
109 : #else
110 : _write_unique_id (false),
111 : #endif
112 1341 : _field_width (4), // In 0.7.0, all fields are 4 bytes, in 0.9.2+ they can vary
113 0 : _version ("libMesh-1.8.0"),
114 1341 : _bc_file_name ("n/a"),
115 1341 : _partition_map_file ("n/a"),
116 1341 : _subdomain_map_file ("n/a"),
117 2766 : _p_level_file ("n/a")
118 : {
119 1425 : }
120 :
121 :
122 :
123 4625 : XdrIO::~XdrIO () = default;
124 :
125 :
126 :
127 1425 : void XdrIO::write (const std::string & name)
128 : {
129 1425 : libmesh_error_msg_if(this->legacy(), "We don't support writing parallel files in the legacy format.");
130 :
131 3828 : Xdr io ((this->processor_id() == 0) ? name : "", this->binary() ? ENCODE : WRITE);
132 :
133 84 : LOG_SCOPE("write()","XdrIO");
134 :
135 : // convenient reference to our mesh
136 84 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
137 :
138 1425 : new_header_id_type n_elem = mesh.n_elem();
139 1425 : new_header_id_type max_node_id = mesh.max_node_id();
140 :
141 1425 : new_header_id_type n_side_bcs = mesh.get_boundary_info().n_boundary_conds();
142 1425 : new_header_id_type n_edge_bcs = mesh.get_boundary_info().n_edge_conds();
143 1425 : new_header_id_type n_shellface_bcs = mesh.get_boundary_info().n_shellface_conds();
144 1425 : new_header_id_type n_nodesets = mesh.get_boundary_info().n_nodeset_conds();
145 1425 : unsigned int n_p_levels = MeshTools::n_p_levels (mesh);
146 1425 : new_header_id_type n_elem_integers = mesh.n_elem_integers();
147 1425 : new_header_id_type n_node_integers = mesh.n_node_integers();
148 1467 : std::vector<dof_id_type> elemset_codes = mesh.get_elemset_codes();
149 :
150 1383 : bool write_parallel_files = this->write_parallel();
151 :
152 : //-------------------------------------------------------------
153 : // For all the optional files -- the default file name is "n/a".
154 : // However, the user may specify an optional external file.
155 :
156 : // If there are BCs and the user has not already provided a
157 : // file name then write to "."
158 2708 : if ((n_side_bcs || n_edge_bcs || n_shellface_bcs || n_nodesets) &&
159 38 : this->boundary_condition_file_name() == "n/a")
160 38 : this->boundary_condition_file_name() = ".";
161 :
162 : // If there are more than one subdomains and the user has not specified an
163 : // external file then write the subdomain mapping to the default file "."
164 2850 : if ((mesh.n_subdomains() > 0) &&
165 42 : (this->subdomain_map_file_name() == "n/a"))
166 42 : this->subdomain_map_file_name() = ".";
167 :
168 : // In general we don't write the partition information.
169 :
170 : // If we have p levels and the user has not already provided
171 : // a file name then write to "."
172 1425 : if ((n_p_levels > 1) &&
173 0 : (this->polynomial_level_file_name() == "n/a"))
174 0 : this->polynomial_level_file_name() = ".";
175 :
176 : // write the header
177 1467 : if (this->processor_id() == 0)
178 : {
179 352 : std::string full_ver = this->version() + (write_parallel_files ? " parallel" : "");
180 243 : io.data (full_ver);
181 :
182 243 : io.data (n_elem, "# number of elements");
183 243 : io.data (max_node_id, "# number of nodes"); // We'll write invalid coords into gaps
184 :
185 243 : io.data (this->boundary_condition_file_name(), "# boundary condition specification file");
186 243 : io.data (this->subdomain_map_file_name(), "# subdomain id specification file");
187 243 : io.data (this->partition_map_file_name(), "# processor id specification file");
188 243 : io.data (this->polynomial_level_file_name(), "# p-level specification file");
189 :
190 : // Version 0.9.2+ introduces sizes for each type
191 243 : new_header_id_type write_size = sizeof(xdr_id_type), zero_size = 0;
192 :
193 : const bool
194 21 : write_p_level = ("." == this->polynomial_level_file_name()),
195 21 : write_partitioning = ("." == this->partition_map_file_name()),
196 21 : write_subdomain_id = ("." == this->subdomain_map_file_name()),
197 21 : write_bcs = ("." == this->boundary_condition_file_name());
198 :
199 243 : io.data (write_size, "# type size");
200 243 : io.data (_write_unique_id ? write_size : zero_size, "# uid size");
201 465 : io.data (write_partitioning ? write_size : zero_size, "# pid size");
202 243 : io.data (write_subdomain_id ? write_size : zero_size, "# sid size");
203 465 : io.data (write_p_level ? write_size : zero_size, "# p-level size");
204 : // Boundary Condition sizes
205 265 : io.data (write_bcs ? write_size : zero_size, "# eid size"); // elem id
206 243 : io.data (write_bcs ? write_size : zero_size, "# side size"); // side number
207 243 : io.data (write_bcs ? write_size : zero_size, "# bid size"); // boundary id
208 :
209 : // Write the data size for extra integers stored on the mesh. Like
210 : // everything else in the header, they will be of size write_size.
211 410 : io.data((n_elem_integers || n_node_integers) ? write_size : zero_size, "# extra integer size");
212 :
213 : // Write the names of the extra node integers (see also: CheckpointIO).
214 63 : std::vector<std::string> node_integer_names;
215 459 : for (unsigned int i=0; i != n_node_integers; ++i)
216 216 : node_integer_names.push_back(mesh.get_node_integer_name(i));
217 243 : io.data(node_integer_names, "# node integer names");
218 :
219 : // Write the names of the extra elem integers (see also: CheckpointIO).
220 63 : std::vector<std::string> elem_integer_names;
221 363 : for (unsigned int i=0; i != n_elem_integers; ++i)
222 120 : elem_integer_names.push_back(mesh.get_elem_integer_name(i));
223 243 : io.data(elem_integer_names, "# elem integer names");
224 :
225 : // Write the vector of elemset codes to the header (this will
226 : // also write the size of the vector, which is the number of
227 : // elemset codes).
228 243 : io.data(elemset_codes, "# elemset codes");
229 :
230 : // For each elemset code, write out the associated elemset ids
231 42 : MeshBase::elemset_type id_set_to_fill;
232 279 : for (const auto & elemset_code : elemset_codes)
233 : {
234 36 : mesh.get_elemsets(elemset_code, id_set_to_fill);
235 :
236 : // Transfer elemset ids to vector for writing
237 39 : std::vector<dof_id_type> elemset_id_vec(id_set_to_fill.begin(), id_set_to_fill.end());
238 :
239 : // Write vector of elemset ids to file with comment
240 72 : std::string comment_string = "# elemset ids for elemset code " + std::to_string(elemset_code);
241 36 : io.data(elemset_id_vec, comment_string);
242 : }
243 201 : }
244 :
245 1425 : if (write_parallel_files)
246 : {
247 : // Parallel xdr mesh files aren't implemented yet; until they
248 : // are we'll just warn the user and write a serial file.
249 4 : libMesh::out << "Warning! Parallel xda/xdr is not yet implemented.\n";
250 4 : libMesh::out << "Writing a serialized file instead." << std::endl;
251 :
252 : // write subdomain names
253 1038 : this->write_serialized_subdomain_names(io);
254 :
255 : // write connectivity
256 1042 : this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), n_elem_integers);
257 :
258 : // write the nodal locations
259 1042 : this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id), n_node_integers);
260 :
261 : // write the side boundary condition information
262 1038 : this->write_serialized_side_bcs (io, n_side_bcs);
263 :
264 : // write the nodeset information
265 1038 : this->write_serialized_nodesets (io, n_nodesets);
266 :
267 : // write the edge boundary condition information
268 1038 : this->write_serialized_edge_bcs (io, n_edge_bcs);
269 :
270 : // write the "shell face" boundary condition information
271 1038 : this->write_serialized_shellface_bcs (io, n_shellface_bcs);
272 : }
273 : else
274 : {
275 : // write subdomain names
276 387 : this->write_serialized_subdomain_names(io);
277 :
278 : // write connectivity
279 425 : this->write_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), n_elem_integers);
280 :
281 : // write the nodal locations
282 425 : this->write_serialized_nodes (io, cast_int<dof_id_type>(max_node_id), n_node_integers);
283 :
284 : // write the side boundary condition information
285 387 : this->write_serialized_side_bcs (io, n_side_bcs);
286 :
287 : // write the nodeset information
288 387 : this->write_serialized_nodesets (io, n_nodesets);
289 :
290 : // write the edge boundary condition information
291 387 : this->write_serialized_edge_bcs (io, n_edge_bcs);
292 :
293 : // write the "shell face" boundary condition information
294 387 : this->write_serialized_shellface_bcs (io, n_shellface_bcs);
295 : }
296 :
297 : // pause all processes until the writing ends -- this will
298 : // protect for the pathological case where a write is
299 : // followed immediately by a read. The write must be
300 : // guaranteed to complete first.
301 1425 : io.close();
302 1425 : this->comm().barrier();
303 1425 : }
304 :
305 :
306 :
307 1425 : void XdrIO::write_serialized_subdomain_names(Xdr & io) const
308 : {
309 1467 : if (this->processor_id() == 0)
310 : {
311 42 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
312 :
313 21 : const std::map<subdomain_id_type, std::string> & subdomain_map = mesh.get_subdomain_name_map();
314 :
315 42 : std::vector<new_header_id_type> subdomain_ids;
316 243 : subdomain_ids.reserve(subdomain_map.size());
317 :
318 63 : std::vector<std::string> subdomain_names;
319 243 : subdomain_names.reserve(subdomain_map.size());
320 :
321 : // We need to loop over the map and make sure that there aren't any invalid entries. Since we
322 : // return writable references in mesh_base, it's possible for the user to leave some entity names
323 : // blank. We can't write those to the XDA file.
324 243 : new_header_id_type n_subdomain_names = 0;
325 243 : for (const auto & [sbd_id, name] : subdomain_map)
326 0 : if (!name.empty())
327 : {
328 0 : n_subdomain_names++;
329 0 : subdomain_ids.push_back(sbd_id);
330 0 : subdomain_names.push_back(name);
331 : }
332 :
333 243 : io.data(n_subdomain_names, "# subdomain id to name map");
334 : // Write out the ids and names in two vectors
335 243 : if (n_subdomain_names)
336 : {
337 0 : io.data(subdomain_ids);
338 0 : io.data(subdomain_names);
339 : }
340 201 : }
341 1425 : }
342 :
343 :
344 :
345 : void
346 1425 : XdrIO::write_serialized_connectivity (Xdr & io,
347 : const dof_id_type libmesh_dbg_var(n_elem),
348 : const new_header_id_type n_elem_integers) const
349 : {
350 42 : libmesh_assert (io.writing());
351 :
352 : const bool
353 42 : write_p_level = ("." == this->polynomial_level_file_name()),
354 42 : write_partitioning = ("." == this->partition_map_file_name()),
355 42 : write_subdomain_id = ("." == this->subdomain_map_file_name());
356 :
357 : // convenient reference to our mesh
358 84 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
359 42 : libmesh_assert_equal_to (n_elem, mesh.n_elem());
360 :
361 : // We will only write active elements and their parents.
362 1425 : const unsigned int n_active_levels = MeshTools::n_active_levels (mesh);
363 1467 : std::vector<xdr_id_type> n_global_elem_at_level(n_active_levels);
364 :
365 : // Find the number of local and global elements at each level
366 : #ifndef NDEBUG
367 42 : xdr_id_type tot_n_elem = 0;
368 : #endif
369 4153 : for (unsigned int level=0; level<n_active_levels; level++)
370 : {
371 2816 : n_global_elem_at_level[level] =
372 5236 : MeshTools::n_elem(mesh.local_level_elements_begin(level),
373 5368 : mesh.local_level_elements_end(level));
374 :
375 2728 : this->comm().sum(n_global_elem_at_level[level]);
376 : #ifndef NDEBUG
377 88 : tot_n_elem += n_global_elem_at_level[level];
378 : #endif
379 88 : libmesh_assert_less_equal (n_global_elem_at_level[level], n_elem);
380 88 : libmesh_assert_less_equal (tot_n_elem, n_elem);
381 : }
382 :
383 : std::vector<xdr_id_type>
384 84 : xfer_conn, recv_conn;
385 : std::vector<dof_id_type>
386 1551 : n_elem_on_proc(this->n_processors()), processor_offsets(this->n_processors());
387 84 : std::vector<xdr_id_type> output_buffer;
388 : std::vector<std::size_t>
389 1509 : xfer_buf_sizes(this->n_processors());
390 :
391 : #ifdef LIBMESH_ENABLE_AMR
392 : typedef std::map<dof_id_type, std::pair<processor_id_type, dof_id_type>> id_map_type;
393 84 : id_map_type parent_id_map, child_id_map;
394 : #endif
395 :
396 1425 : dof_id_type my_next_elem=0, next_global_elem=0;
397 :
398 : //-------------------------------------------
399 : // First write the level-0 elements directly.
400 4714 : for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
401 28452 : mesh.local_level_elements_end(0)))
402 : {
403 22836 : pack_element (xfer_conn, elem,
404 : /*parent_id=*/DofObject::invalid_id,
405 : /*parent_pid=*/DofObject::invalid_id,
406 : n_elem_integers);
407 : #ifdef LIBMESH_ENABLE_AMR
408 24742 : parent_id_map[elem->id()] = std::make_pair(this->processor_id(),
409 5718 : my_next_elem);
410 : #endif
411 22836 : ++my_next_elem;
412 1341 : }
413 1425 : xfer_conn.push_back(my_next_elem); // toss in the number of elements transferred.
414 :
415 1425 : std::size_t my_size = xfer_conn.size();
416 1425 : this->comm().gather (0, my_next_elem, n_elem_on_proc);
417 1425 : this->comm().gather (0, my_size, xfer_buf_sizes);
418 :
419 1425 : processor_offsets[0] = 0;
420 13869 : for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
421 12570 : processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
422 :
423 : // All processors send their xfer buffers to processor 0.
424 : // Processor 0 will receive the data and write out the elements.
425 1425 : if (this->processor_id() == 0)
426 : {
427 : // Write the number of elements at this level.
428 : {
429 285 : std::string comment = "# n_elem at level 0", legend = ", [ type ";
430 243 : if (_write_unique_id)
431 21 : legend += "uid ";
432 243 : if (write_partitioning)
433 0 : legend += "pid ";
434 243 : if (write_subdomain_id)
435 21 : legend += "sid ";
436 243 : if (write_p_level)
437 0 : legend += "p_level ";
438 21 : legend += "(n0 ... nN-1) ]";
439 21 : comment += legend;
440 243 : io.data (n_global_elem_at_level[0], comment);
441 : }
442 :
443 1668 : for (auto pid : make_range(this->n_processors()))
444 : {
445 1467 : recv_conn.resize(xfer_buf_sizes[pid]);
446 1425 : if (pid == 0)
447 243 : recv_conn = xfer_conn;
448 : else
449 1182 : this->comm().receive (pid, recv_conn);
450 :
451 : // at a minimum, the buffer should contain the number of elements,
452 : // which could be 0.
453 42 : libmesh_assert (!recv_conn.empty());
454 :
455 3289 : for (auto [elem, recv_conn_iter, n_elem_received] =
456 42 : std::tuple{xdr_id_type(0), recv_conn.begin(), recv_conn.back()};
457 24261 : elem<n_elem_received; elem++, next_global_elem++)
458 : {
459 1906 : output_buffer.clear();
460 :
461 : // n. nodes
462 22836 : const xdr_id_type n_nodes = *recv_conn_iter++;
463 :
464 : // type
465 22836 : output_buffer.push_back(*recv_conn_iter++);
466 :
467 : // unique_id
468 22836 : xdr_id_type tmp = *recv_conn_iter++;
469 22836 : if (_write_unique_id)
470 22836 : output_buffer.push_back(tmp);
471 :
472 : // processor id
473 22836 : tmp = *recv_conn_iter++;
474 22836 : if (write_partitioning)
475 0 : output_buffer.push_back(tmp);
476 :
477 : // subdomain id
478 22836 : tmp = *recv_conn_iter++;
479 22836 : if (write_subdomain_id)
480 22836 : output_buffer.push_back(tmp);
481 :
482 : #ifdef LIBMESH_ENABLE_AMR
483 : // p level
484 22836 : tmp = *recv_conn_iter++;
485 22836 : if (write_p_level)
486 0 : output_buffer.push_back(tmp);
487 : #endif
488 :
489 160752 : for (dof_id_type node=0; node<n_nodes; node++)
490 137916 : output_buffer.push_back(*recv_conn_iter++);
491 :
492 : // Write out the elem extra integers after the connectivity
493 24486 : for (dof_id_type n=0; n<n_elem_integers; n++)
494 1650 : output_buffer.push_back(*recv_conn_iter++);
495 :
496 : io.data_stream
497 24742 : (output_buffer.data(),
498 : cast_int<unsigned int>(output_buffer.size()),
499 : cast_int<unsigned int>(output_buffer.size()));
500 : }
501 : }
502 : }
503 : else
504 1161 : this->comm().send (0, xfer_conn);
505 :
506 : #ifdef LIBMESH_ENABLE_AMR
507 : //--------------------------------------------------------------------
508 : // Next write the remaining elements indirectly through their parents.
509 : // This will insure that the children are written in the proper order
510 : // so they can be reconstructed properly.
511 2728 : for (unsigned int level=1; level<n_active_levels; level++)
512 : {
513 46 : xfer_conn.clear();
514 :
515 1303 : dof_id_type my_n_elem_written_at_level = 0;
516 3605 : for (const auto & parent : as_range(mesh.local_level_elements_begin(level-1),
517 22075 : mesh.local_level_elements_end(level-1)))
518 9000 : if (!parent->active()) // we only want the parents elements at this level, and
519 : { // there is no direct iterator for this obscure use
520 5114 : id_map_type::iterator pos = parent_id_map.find(parent->id());
521 591 : libmesh_assert (pos != parent_id_map.end());
522 5114 : const processor_id_type parent_pid = pos->second.first;
523 5114 : const dof_id_type parent_id = pos->second.second;
524 4523 : parent_id_map.erase(pos);
525 :
526 26041 : for (auto & child : parent->child_ref_range())
527 : {
528 20336 : pack_element (xfer_conn, &child, parent_id, parent_pid, n_elem_integers);
529 :
530 : // this approach introduces the possibility that we write
531 : // non-local elements. These elements may well be parents
532 : // at the next step
533 17982 : child_id_map[child.id()] = std::make_pair (child.processor_id(),
534 22690 : my_n_elem_written_at_level++);
535 20336 : my_next_elem++;
536 : }
537 1211 : }
538 1303 : xfer_conn.push_back(my_n_elem_written_at_level);
539 1303 : my_size = xfer_conn.size();
540 1303 : this->comm().gather (0, my_size, xfer_buf_sizes);
541 :
542 : // Processor 0 will receive the data and write the elements.
543 1349 : if (this->processor_id() == 0)
544 : {
545 : // Write the number of elements at this level.
546 : {
547 231 : std::ostringstream buf;
548 416 : buf << "# n_elem at level " << level << ", [ type ";
549 :
550 231 : if (_write_unique_id)
551 231 : buf << "uid ";
552 231 : buf << "parent ";
553 231 : if (write_partitioning)
554 0 : buf << "pid ";
555 231 : if (write_subdomain_id)
556 231 : buf << "sid ";
557 231 : if (write_p_level)
558 0 : buf << "p_level ";
559 231 : buf << "(n0 ... nN-1) ]";
560 :
561 277 : io.data (n_global_elem_at_level[level], buf.str());
562 185 : }
563 :
564 1534 : for (auto pid : make_range(this->n_processors()))
565 : {
566 1349 : recv_conn.resize(xfer_buf_sizes[pid]);
567 1303 : if (pid == 0)
568 231 : recv_conn = xfer_conn;
569 : else
570 1072 : this->comm().receive (pid, recv_conn);
571 :
572 : // at a minimum, the buffer should contain the number of elements,
573 : // which could be 0.
574 46 : libmesh_assert (!recv_conn.empty());
575 :
576 3611 : for (auto [elem, recv_conn_iter, n_elem_received] =
577 46 : std::tuple{xdr_id_type(0), recv_conn.begin(), recv_conn.back()};
578 21639 : elem<n_elem_received;
579 17982 : elem++, next_global_elem++)
580 : {
581 2354 : output_buffer.clear();
582 :
583 : // n. nodes
584 20336 : const xdr_id_type n_nodes = *recv_conn_iter++;
585 :
586 : // type
587 20336 : output_buffer.push_back(*recv_conn_iter++);
588 :
589 : // unique_id
590 20336 : xdr_id_type tmp = *recv_conn_iter++;
591 20336 : if (_write_unique_id)
592 20336 : output_buffer.push_back(tmp);
593 :
594 : // parent local id
595 20336 : const xdr_id_type parent_local_id = *recv_conn_iter++;
596 :
597 : // parent processor id
598 20336 : const xdr_id_type parent_pid = *recv_conn_iter++;
599 :
600 22690 : output_buffer.push_back (parent_local_id+processor_offsets[parent_pid]);
601 :
602 : // processor id
603 20336 : tmp = *recv_conn_iter++;
604 20336 : if (write_partitioning)
605 0 : output_buffer.push_back(tmp);
606 :
607 : // subdomain id
608 20336 : tmp = *recv_conn_iter++;
609 20336 : if (write_subdomain_id)
610 20336 : output_buffer.push_back(tmp);
611 :
612 : // p level
613 20336 : tmp = *recv_conn_iter++;
614 20336 : if (write_p_level)
615 0 : output_buffer.push_back(tmp);
616 :
617 : // connectivity
618 103160 : for (xdr_id_type node=0; node<n_nodes; node++)
619 82824 : output_buffer.push_back(*recv_conn_iter++);
620 :
621 : // Write out the elem extra integers after the connectivity
622 24062 : for (dof_id_type n=0; n<n_elem_integers; n++)
623 3726 : output_buffer.push_back(*recv_conn_iter++);
624 :
625 : io.data_stream
626 22690 : (output_buffer.data(),
627 : cast_int<unsigned int>(output_buffer.size()),
628 : cast_int<unsigned int>(output_buffer.size()));
629 : }
630 : }
631 : }
632 : else
633 1049 : this->comm().send (0, xfer_conn);
634 :
635 : // update the processor_offsets
636 1303 : processor_offsets[0] = processor_offsets.back() + n_elem_on_proc.back();
637 1303 : this->comm().gather (0, my_n_elem_written_at_level, n_elem_on_proc);
638 12519 : for (auto pid : IntRange<processor_id_type>(1, this->n_processors()))
639 11354 : processor_offsets[pid] = processor_offsets[pid-1] + n_elem_on_proc[pid-1];
640 :
641 : // Now, at the next level we will again iterate over local parents. However,
642 : // those parents may have been written by other processors (at this step),
643 : // so we need to gather them into our *_id_maps.
644 : {
645 92 : std::map<processor_id_type, std::vector<dof_id_type>> requested_ids;
646 :
647 4914 : for (const auto & elem : as_range(mesh.local_level_elements_begin(level),
648 43438 : mesh.local_level_elements_end(level)))
649 20336 : if (!child_id_map.count(elem->id()))
650 : {
651 97 : libmesh_assert_not_equal_to (elem->parent()->processor_id(), this->processor_id());
652 2086 : const processor_id_type pid = elem->parent()->processor_id();
653 2183 : if (pid != this->processor_id())
654 2086 : requested_ids[pid].push_back(elem->id());
655 1211 : }
656 :
657 : auto gather_functor =
658 801 : [& child_id_map]
659 : (processor_id_type libmesh_dbg_var(pid),
660 : const std::vector<dof_id_type> & ids,
661 2209 : std::vector<dof_id_type> & data)
662 : {
663 26 : const std::size_t ids_size = ids.size();
664 827 : data.resize(ids_size);
665 :
666 : // Fill those requests by overwriting the requested ids
667 2913 : for (std::size_t i=0; i != ids_size; i++)
668 : {
669 97 : libmesh_assert (child_id_map.count(ids[i]));
670 97 : libmesh_assert_equal_to (child_id_map[ids[i]].first, pid);
671 :
672 2183 : data[i] = child_id_map[ids[i]].second;
673 : }
674 2084 : };
675 :
676 : auto action_functor =
677 801 : [& child_id_map]
678 : (processor_id_type pid,
679 : const std::vector<dof_id_type> & ids,
680 2112 : const std::vector<dof_id_type> & data)
681 : {
682 26 : std::size_t data_size = data.size();
683 :
684 2913 : for (std::size_t i=0; i != data_size; i++)
685 2086 : child_id_map[ids[i]] =
686 388 : std::make_pair (pid, data[i]);
687 1283 : };
688 :
689 : // Trade ids back and forth
690 46 : const dof_id_type * ex = nullptr;
691 : Parallel::pull_parallel_vector_data
692 1303 : (this->comm(), requested_ids, gather_functor, action_functor, ex);
693 :
694 : // overwrite the parent_id_map with the child_id_map, but
695 : // use std::map::swap() for efficiency.
696 46 : parent_id_map.swap(child_id_map);
697 46 : child_id_map.clear();
698 : }
699 : }
700 : #endif // LIBMESH_ENABLE_AMR
701 42 : if (this->processor_id() == 0)
702 21 : libmesh_assert_equal_to (next_global_elem, n_elem);
703 :
704 1425 : }
705 :
706 :
707 :
708 1425 : void XdrIO::write_serialized_nodes (Xdr & io, const dof_id_type max_node_id,
709 : const new_header_id_type n_node_integers) const
710 : {
711 : // convenient reference to our mesh
712 84 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
713 42 : libmesh_assert_equal_to (max_node_id, mesh.max_node_id());
714 :
715 84 : std::vector<dof_id_type> xfer_ids;
716 84 : std::vector<Real> xfer_coords;
717 42 : std::vector<Real> & coords=xfer_coords;
718 :
719 1551 : std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
720 1551 : std::vector<std::vector<Real>> recv_coords(this->n_processors());
721 :
722 : #ifndef NDEBUG
723 42 : std::size_t n_written=0;
724 : #endif
725 :
726 : // Note: do not be tempted to replace the node loops below with
727 : // range-based iterators, these iterators must be defined outside
728 : // the blk loop since node_iter is updated at each iteration.
729 1467 : MeshBase::const_node_iterator node_iter = mesh.local_nodes_begin();
730 1467 : const MeshBase::const_node_iterator nodes_end = mesh.local_nodes_end();
731 :
732 2850 : for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
733 : {
734 1425 : const std::size_t first_node = blk*io_blksize;
735 1425 : last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
736 :
737 1425 : const std::size_t tot_id_size = last_node - first_node;
738 :
739 : // Build up the xfer buffers on each processor
740 42 : xfer_ids.clear();
741 42 : xfer_coords.clear();
742 :
743 123519 : for (; node_iter != nodes_end; ++node_iter)
744 : {
745 67199 : const Node & node = **node_iter;
746 6152 : libmesh_assert_greater_equal(node.id(), first_node);
747 67199 : if (node.id() >= last_node)
748 0 : break;
749 :
750 67199 : xfer_ids.push_back(node.id());
751 67199 : xfer_coords.push_back(node(0));
752 : #if LIBMESH_DIM > 1
753 67199 : xfer_coords.push_back(node(1));
754 : #endif
755 : #if LIBMESH_DIM > 2
756 67199 : xfer_coords.push_back(node(2));
757 : #endif
758 : }
759 :
760 : //-------------------------------------
761 : // Send the xfer buffers to processor 0
762 84 : std::vector<std::size_t> ids_size;
763 :
764 1425 : const std::size_t my_ids_size = xfer_ids.size();
765 :
766 : // explicitly gather ids_size
767 1425 : this->comm().gather (0, my_ids_size, ids_size);
768 :
769 : // We will have lots of simultaneous receives if we are
770 : // processor 0, so let's use nonblocking receives.
771 : std::vector<Parallel::Request>
772 1551 : id_request_handles(this->n_processors()-1),
773 1551 : coord_request_handles(this->n_processors()-1);
774 :
775 : Parallel::MessageTag
776 1509 : id_tag = mesh.comm().get_unique_tag(),
777 1509 : coord_tag = mesh.comm().get_unique_tag();
778 :
779 : // Post the receives -- do this on processor 0 only.
780 1467 : if (this->processor_id() == 0)
781 : {
782 1668 : for (auto pid : make_range(this->n_processors()))
783 : {
784 1509 : recv_ids[pid].resize(ids_size[pid]);
785 1509 : recv_coords[pid].resize(ids_size[pid]*LIBMESH_DIM);
786 :
787 1425 : if (pid == 0)
788 : {
789 243 : recv_ids[0] = xfer_ids;
790 243 : recv_coords[0] = xfer_coords;
791 : }
792 : else
793 : {
794 1203 : this->comm().receive (pid, recv_ids[pid],
795 1182 : id_request_handles[pid-1],
796 : id_tag);
797 1203 : this->comm().receive (pid, recv_coords[pid],
798 42 : coord_request_handles[pid-1],
799 : coord_tag);
800 : }
801 : }
802 : }
803 : else
804 : {
805 : // Send -- do this on all other processors.
806 1161 : this->comm().send(0, xfer_ids, id_tag);
807 1182 : this->comm().send(0, xfer_coords, coord_tag);
808 : }
809 :
810 : // -------------------------------------------------------
811 : // Receive the messages and write the output on processor 0.
812 1467 : if (this->processor_id() == 0)
813 : {
814 : // Wait for all the receives to complete. We have no
815 : // need for the statuses since we already know the
816 : // buffer sizes.
817 243 : Parallel::wait (id_request_handles);
818 243 : Parallel::wait (coord_request_handles);
819 :
820 : #ifndef NDEBUG
821 63 : for (auto pid : make_range(this->n_processors()))
822 42 : libmesh_assert_equal_to(recv_coords[pid].size(),
823 : recv_ids[pid].size()*LIBMESH_DIM);
824 : #endif
825 :
826 : // Write the coordinates in this block.
827 : // Some of these coordinates may correspond to ids for which
828 : // no node exists, if we have a discontiguous node
829 : // numbering!
830 : //
831 : // The purpose of communicating the xfer_ids/recv_ids buffer
832 : // is so that we can handle discontiguous node numberings:
833 : // we do not actually write the node ids themselves anywhere
834 : // in the xdr file. We do write the unique ids to file, if
835 : // enabled (see next section).
836 :
837 : // Write invalid values for unused node ids
838 21 : coords.clear();
839 243 : coords.resize (3*tot_id_size, std::numeric_limits<Real>::quiet_NaN());
840 :
841 1668 : for (auto pid : make_range(this->n_processors()))
842 68624 : for (auto idx : index_range(recv_ids[pid]))
843 : {
844 6152 : libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
845 67199 : const std::size_t local_idx = recv_ids[pid][idx] - first_node;
846 6152 : libmesh_assert_less(local_idx, tot_id_size);
847 :
848 6152 : libmesh_assert_less ((3*local_idx+2), coords.size());
849 6152 : libmesh_assert_less ((LIBMESH_DIM*idx+LIBMESH_DIM-1), recv_coords[pid].size());
850 :
851 79503 : coords[3*local_idx+0] = recv_coords[pid][LIBMESH_DIM*idx+0];
852 : #if LIBMESH_DIM > 1
853 73351 : coords[3*local_idx+1] = recv_coords[pid][LIBMESH_DIM*idx+1];
854 : #else
855 : coords[3*local_idx+1] = 0.;
856 : #endif
857 : #if LIBMESH_DIM > 2
858 79503 : coords[3*local_idx+2] = recv_coords[pid][LIBMESH_DIM*idx+2];
859 : #else
860 : coords[3*local_idx+2] = 0.;
861 : #endif
862 :
863 : #ifndef NDEBUG
864 6152 : n_written++;
865 : #endif
866 : }
867 :
868 486 : io.data_stream (coords.empty() ? nullptr : coords.data(),
869 : cast_int<unsigned int>(coords.size()), /*line_break=*/3);
870 : }
871 1341 : } // end for (block-based coord writes)
872 :
873 84 : if (this->processor_id() == 0)
874 21 : libmesh_assert_less_equal (n_written, max_node_id);
875 :
876 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
877 : // XDR unsigned char doesn't work as anticipated
878 1425 : unsigned short write_unique_ids = 1;
879 : #else
880 : unsigned short write_unique_ids = 0;
881 : #endif
882 :
883 1425 : if (this->processor_id() == 0)
884 243 : io.data (write_unique_ids, "# presence of unique ids");
885 :
886 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
887 : {
888 84 : std::vector<xdr_id_type> xfer_unique_ids;
889 42 : std::vector<xdr_id_type> & unique_ids=xfer_unique_ids;
890 1551 : std::vector<std::vector<xdr_id_type>> recv_unique_ids (this->n_processors());
891 :
892 : #ifndef NDEBUG
893 : // Reset write counter
894 42 : n_written = 0;
895 : #endif
896 :
897 : // Return node iterator to the beginning
898 1467 : node_iter = mesh.local_nodes_begin();
899 :
900 2850 : for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
901 : {
902 1425 : const std::size_t first_node = blk*io_blksize;
903 1425 : last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
904 :
905 1425 : const std::size_t tot_id_size = last_node - first_node;
906 :
907 : // Build up the xfer buffers on each processor
908 42 : xfer_ids.clear();
909 1425 : xfer_ids.reserve(tot_id_size);
910 42 : xfer_unique_ids.clear();
911 1425 : xfer_unique_ids.reserve(tot_id_size);
912 :
913 68624 : for (; node_iter != nodes_end; ++node_iter)
914 : {
915 67199 : const Node & node = **node_iter;
916 6152 : libmesh_assert_greater_equal(node.id(), first_node);
917 67199 : if (node.id() >= last_node)
918 0 : break;
919 :
920 67199 : xfer_ids.push_back(node.id());
921 67199 : xfer_unique_ids.push_back(node.unique_id());
922 : }
923 :
924 : //-------------------------------------
925 : // Send the xfer buffers to processor 0
926 84 : std::vector<std::size_t> ids_size;
927 :
928 1425 : const std::size_t my_ids_size = xfer_ids.size();
929 :
930 : // explicitly gather ids_size
931 1425 : this->comm().gather (0, my_ids_size, ids_size);
932 :
933 : // We will have lots of simultaneous receives if we are
934 : // processor 0, so let's use nonblocking receives.
935 : std::vector<Parallel::Request>
936 1551 : unique_id_request_handles(this->n_processors()-1),
937 1551 : id_request_handles(this->n_processors()-1);
938 :
939 : Parallel::MessageTag
940 1509 : unique_id_tag = mesh.comm().get_unique_tag(),
941 1509 : id_tag = mesh.comm().get_unique_tag();
942 :
943 : // Post the receives -- do this on processor 0 only.
944 1467 : if (this->processor_id() == 0)
945 : {
946 1668 : for (auto pid : make_range(this->n_processors()))
947 : {
948 1509 : recv_ids[pid].resize(ids_size[pid]);
949 1509 : recv_unique_ids[pid].resize(ids_size[pid]);
950 :
951 1425 : if (pid == 0)
952 : {
953 243 : recv_ids[0] = xfer_ids;
954 243 : recv_unique_ids[0] = xfer_unique_ids;
955 : }
956 : else
957 : {
958 1203 : this->comm().receive (pid, recv_ids[pid],
959 1182 : id_request_handles[pid-1],
960 : id_tag);
961 1203 : this->comm().receive (pid, recv_unique_ids[pid],
962 42 : unique_id_request_handles[pid-1],
963 : unique_id_tag);
964 : }
965 : }
966 : }
967 : else
968 : {
969 : // Send -- do this on all other processors.
970 1161 : this->comm().send(0, xfer_ids, id_tag);
971 1182 : this->comm().send(0, xfer_unique_ids, unique_id_tag);
972 : }
973 :
974 : // -------------------------------------------------------
975 : // Receive the messages and write the output on processor 0.
976 1467 : if (this->processor_id() == 0)
977 : {
978 : // Wait for all the receives to complete. We have no
979 : // need for the statuses since we already know the
980 : // buffer sizes.
981 243 : Parallel::wait (id_request_handles);
982 243 : Parallel::wait (unique_id_request_handles);
983 :
984 : #ifndef NDEBUG
985 63 : for (auto pid : make_range(this->n_processors()))
986 42 : libmesh_assert_equal_to
987 : (recv_ids[pid].size(), recv_unique_ids[pid].size());
988 : #endif
989 :
990 21 : libmesh_assert_less_equal
991 : (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
992 :
993 : // Write the unique ids in this block.
994 21 : unique_ids.clear();
995 243 : unique_ids.resize(tot_id_size, unique_id_type(-1));
996 :
997 1668 : for (auto pid : make_range(this->n_processors()))
998 68624 : for (auto idx : index_range(recv_ids[pid]))
999 : {
1000 6152 : libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
1001 67199 : const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1002 6152 : libmesh_assert_less (local_idx, unique_ids.size());
1003 :
1004 85655 : unique_ids[local_idx] = recv_unique_ids[pid][idx];
1005 :
1006 : #ifndef NDEBUG
1007 6152 : n_written++;
1008 : #endif
1009 : }
1010 :
1011 486 : io.data_stream (unique_ids.empty() ? nullptr : unique_ids.data(),
1012 : cast_int<unsigned int>(unique_ids.size()), /*line_break=*/1);
1013 : }
1014 1341 : } // end for (block-based unique_id writes)
1015 :
1016 42 : if (this->processor_id() == 0)
1017 21 : libmesh_assert_less_equal (n_written, max_node_id);
1018 1341 : }
1019 : #endif // LIBMESH_ENABLE_UNIQUE_ID
1020 :
1021 : // Next: do "block"-based I/O for the extra node integers (if necessary)
1022 1425 : if (n_node_integers)
1023 : {
1024 : #ifndef NDEBUG
1025 : // Reset write counter
1026 8 : n_written = 0;
1027 : #endif
1028 :
1029 : // Return node iterator to the beginning
1030 560 : node_iter = mesh.local_nodes_begin();
1031 :
1032 : // Data structures for writing "extra" node integers
1033 16 : std::vector<dof_id_type> xfer_node_integers;
1034 8 : std::vector<dof_id_type> & node_integers = xfer_node_integers;
1035 308 : std::vector<std::vector<dof_id_type>> recv_node_integers(this->n_processors());
1036 :
1037 568 : for (std::size_t blk=0, last_node=0; last_node<max_node_id; blk++)
1038 : {
1039 284 : const std::size_t first_node = blk*io_blksize;
1040 284 : last_node = std::min((blk+1)*io_blksize, std::size_t(max_node_id));
1041 :
1042 284 : const std::size_t tot_id_size = last_node - first_node;
1043 :
1044 : // Build up the xfer buffers on each processor
1045 8 : xfer_ids.clear();
1046 284 : xfer_ids.reserve(tot_id_size);
1047 8 : xfer_node_integers.clear();
1048 284 : xfer_node_integers.reserve(tot_id_size * n_node_integers);
1049 :
1050 3144 : for (; node_iter != nodes_end; ++node_iter)
1051 : {
1052 1560 : const Node & node = **node_iter;
1053 130 : libmesh_assert_greater_equal(node.id(), first_node);
1054 1560 : if (node.id() >= last_node)
1055 0 : break;
1056 :
1057 1560 : xfer_ids.push_back(node.id());
1058 :
1059 : // Append current node's node integers to xfer buffer
1060 8580 : for (unsigned int i=0; i != n_node_integers; ++i)
1061 7800 : xfer_node_integers.push_back(node.get_extra_integer(i));
1062 : }
1063 :
1064 : //-------------------------------------
1065 : // Send the xfer buffers to processor 0
1066 16 : std::vector<std::size_t> ids_size;
1067 :
1068 284 : const std::size_t my_ids_size = xfer_ids.size();
1069 :
1070 : // explicitly gather ids_size
1071 284 : this->comm().gather (0, my_ids_size, ids_size);
1072 :
1073 : // We will have lots of simultaneous receives if we are
1074 : // processor 0, so let's use nonblocking receives.
1075 : std::vector<Parallel::Request>
1076 308 : node_integers_request_handles(this->n_processors()-1),
1077 308 : id_request_handles(this->n_processors()-1);
1078 :
1079 : Parallel::MessageTag
1080 300 : node_integers_tag = mesh.comm().get_unique_tag(),
1081 300 : id_tag = mesh.comm().get_unique_tag();
1082 :
1083 : // Post the receives -- do this on processor 0 only.
1084 292 : if (this->processor_id() == 0)
1085 : {
1086 332 : for (auto pid : make_range(this->n_processors()))
1087 : {
1088 300 : recv_ids[pid].resize(ids_size[pid]);
1089 300 : recv_node_integers[pid].resize(n_node_integers * ids_size[pid]);
1090 :
1091 284 : if (pid == 0)
1092 : {
1093 48 : recv_ids[0] = xfer_ids;
1094 48 : recv_node_integers[0] = xfer_node_integers;
1095 : }
1096 : else
1097 : {
1098 240 : this->comm().receive (pid, recv_ids[pid],
1099 236 : id_request_handles[pid-1],
1100 : id_tag);
1101 240 : this->comm().receive (pid, recv_node_integers[pid],
1102 8 : node_integers_request_handles[pid-1],
1103 : node_integers_tag);
1104 : }
1105 : }
1106 : }
1107 : else
1108 : {
1109 : // Send -- do this on all other processors.
1110 232 : this->comm().send(0, xfer_ids, id_tag);
1111 236 : this->comm().send(0, xfer_node_integers, node_integers_tag);
1112 : }
1113 :
1114 : // -------------------------------------------------------
1115 : // Receive the messages and write the output on processor 0.
1116 292 : if (this->processor_id() == 0)
1117 : {
1118 : // Wait for all the receives to complete. We have no
1119 : // need for the statuses since we already know the
1120 : // buffer sizes.
1121 48 : Parallel::wait (id_request_handles);
1122 48 : Parallel::wait (node_integers_request_handles);
1123 :
1124 : #ifndef NDEBUG
1125 12 : for (auto pid : make_range(this->n_processors()))
1126 8 : libmesh_assert_equal_to
1127 : (recv_ids[pid].size(), recv_node_integers[pid].size() / n_node_integers);
1128 : #endif
1129 :
1130 4 : libmesh_assert_less_equal
1131 : (tot_id_size, std::min(io_blksize, std::size_t(max_node_id)));
1132 :
1133 : // Write the node integers in this block. We will write
1134 : // invalid node integers if no node exists, i.e. if we
1135 : // have a discontiguous node numbering!
1136 : //
1137 : // The purpose of communicating the xfer_ids/recv_ids buffer
1138 : // is so that we can handle discontiguous node numberings:
1139 : // we do not actually write the node ids themselves anywhere
1140 : // in the xdr file. We do write the unique ids to file, if
1141 : // enabled (see previous section).
1142 :
1143 : // Note: we initialize the node_integers array with invalid values,
1144 : // so any indices which don't get written to in the loop below will
1145 : // just contain invalid values.
1146 4 : node_integers.clear();
1147 48 : node_integers.resize (n_node_integers*tot_id_size, static_cast<dof_id_type>(-1));
1148 :
1149 332 : for (auto pid : make_range(this->n_processors()))
1150 1844 : for (auto idx : index_range(recv_ids[pid]))
1151 : {
1152 130 : libmesh_assert_less_equal(first_node, recv_ids[pid][idx]);
1153 1560 : const std::size_t local_idx = recv_ids[pid][idx] - first_node;
1154 :
1155 : // Assert that we won't index past the end of the node_integers array
1156 130 : libmesh_assert_less (local_idx, tot_id_size);
1157 :
1158 8580 : for (unsigned int i=0; i != n_node_integers; ++i)
1159 9360 : node_integers[n_node_integers*local_idx + i] = recv_node_integers[pid][n_node_integers*idx + i];
1160 :
1161 : #ifndef NDEBUG
1162 130 : n_written++;
1163 : #endif
1164 : }
1165 :
1166 96 : io.data_stream (node_integers.empty() ? nullptr : node_integers.data(),
1167 : cast_int<unsigned int>(node_integers.size()), /*line_break=*/n_node_integers);
1168 : }
1169 268 : } // end for (block-based unique_id writes)
1170 :
1171 8 : if (this->processor_id() == 0)
1172 4 : libmesh_assert_less_equal (n_written, max_node_id);
1173 :
1174 268 : } // end if (n_node_integers)
1175 2766 : }
1176 :
1177 :
1178 :
1179 4275 : void XdrIO::write_serialized_bcs_helper (Xdr & io, const new_header_id_type n_bcs, const std::string bc_type) const
1180 : {
1181 126 : libmesh_assert (io.writing());
1182 :
1183 : // convenient reference to our mesh
1184 252 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1185 :
1186 : // and our boundary info object
1187 126 : const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1188 :
1189 : // Version 0.9.2+ introduces entity names
1190 4275 : write_serialized_bc_names(io, boundary_info, true); // sideset names
1191 :
1192 4275 : new_header_id_type n_bcs_out = n_bcs;
1193 4401 : if (this->processor_id() == 0)
1194 : {
1195 729 : std::stringstream comment_string;
1196 1332 : comment_string << "# number of " << bc_type << " boundary conditions";
1197 792 : io.data (n_bcs_out, comment_string.str());
1198 603 : }
1199 4275 : n_bcs_out = 0;
1200 :
1201 4275 : if (!n_bcs) return;
1202 :
1203 96 : std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1204 1734 : std::vector<std::size_t> bc_sizes(this->n_processors());
1205 :
1206 : // Container to catch boundary IDs handed back by BoundaryInfo
1207 96 : std::vector<boundary_id_type> bc_ids;
1208 :
1209 : // Boundary conditions are only specified for level-0 elements
1210 1566 : dof_id_type n_local_level_0_elem=0;
1211 6905 : for (const auto & elem : as_range(mesh.local_level_elements_begin(0),
1212 50544 : mesh.local_level_elements_end(0)))
1213 : {
1214 44088 : if (bc_type == "side")
1215 : {
1216 117604 : for (auto s : elem->side_index_range())
1217 : {
1218 93096 : boundary_info.boundary_ids (elem, s, bc_ids);
1219 100176 : for (const auto & bc_id : bc_ids)
1220 7080 : if (bc_id != BoundaryInfo::invalid_id)
1221 : {
1222 7080 : xfer_bcs.push_back (n_local_level_0_elem);
1223 7080 : xfer_bcs.push_back (s) ;
1224 7080 : xfer_bcs.push_back (bc_id);
1225 : }
1226 : }
1227 : }
1228 21468 : else if (bc_type == "edge")
1229 : {
1230 19625 : for (auto e : elem->edge_index_range())
1231 : {
1232 18000 : boundary_info.edge_boundary_ids (elem, e, bc_ids);
1233 18060 : for (const auto & bc_id : bc_ids)
1234 60 : if (bc_id != BoundaryInfo::invalid_id)
1235 : {
1236 60 : xfer_bcs.push_back (n_local_level_0_elem);
1237 60 : xfer_bcs.push_back (e) ;
1238 60 : xfer_bcs.push_back (bc_id);
1239 : }
1240 : }
1241 : }
1242 19968 : else if (bc_type == "shellface")
1243 : {
1244 59904 : for (unsigned short sf=0; sf<2; sf++)
1245 : {
1246 39936 : boundary_info.shellface_boundary_ids (elem, sf, bc_ids);
1247 79872 : for (const auto & bc_id : bc_ids)
1248 39936 : if (bc_id != BoundaryInfo::invalid_id)
1249 : {
1250 39936 : xfer_bcs.push_back (n_local_level_0_elem);
1251 39936 : xfer_bcs.push_back (sf) ;
1252 39936 : xfer_bcs.push_back (bc_id);
1253 : }
1254 : }
1255 : }
1256 : else
1257 : {
1258 0 : libmesh_error_msg("bc_type not recognized: " + bc_type);
1259 : }
1260 :
1261 : // Increment the level-0 element counter.
1262 44088 : n_local_level_0_elem++;
1263 1542 : }
1264 :
1265 1638 : xfer_bcs.push_back(n_local_level_0_elem);
1266 1638 : std::size_t my_size = xfer_bcs.size();
1267 1638 : this->comm().gather (0, my_size, bc_sizes);
1268 :
1269 : // All processors send their xfer buffers to processor 0
1270 : // Processor 0 will receive all buffers and write out the bcs
1271 1686 : if (this->processor_id() == 0)
1272 : {
1273 24 : dof_id_type elem_offset = 0;
1274 1917 : for (auto pid : make_range(this->n_processors()))
1275 : {
1276 1686 : recv_bcs.resize(bc_sizes[pid]);
1277 1638 : if (pid == 0)
1278 279 : recv_bcs = xfer_bcs;
1279 : else
1280 1359 : this->comm().receive (pid, recv_bcs);
1281 :
1282 : const dof_id_type my_n_local_level_0_elem
1283 1638 : = cast_int<dof_id_type>(recv_bcs.back());
1284 48 : recv_bcs.pop_back();
1285 :
1286 48714 : for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 3, n_bcs_out++)
1287 47076 : recv_bcs[idx+0] += elem_offset;
1288 :
1289 2643 : io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1290 : cast_int<unsigned int>(recv_bcs.size()), 3);
1291 1638 : elem_offset += my_n_local_level_0_elem;
1292 : }
1293 24 : libmesh_assert_equal_to (n_bcs, n_bcs_out);
1294 : }
1295 : else
1296 1335 : this->comm().send (0, xfer_bcs);
1297 : }
1298 :
1299 :
1300 :
1301 1425 : void XdrIO::write_serialized_side_bcs (Xdr & io, const new_header_id_type n_side_bcs) const
1302 : {
1303 1425 : write_serialized_bcs_helper(io, n_side_bcs, "side");
1304 1425 : }
1305 :
1306 :
1307 :
1308 1425 : void XdrIO::write_serialized_edge_bcs (Xdr & io, const new_header_id_type n_edge_bcs) const
1309 : {
1310 1425 : write_serialized_bcs_helper(io, n_edge_bcs, "edge");
1311 1425 : }
1312 :
1313 :
1314 :
1315 1425 : void XdrIO::write_serialized_shellface_bcs (Xdr & io, const new_header_id_type n_shellface_bcs) const
1316 : {
1317 1425 : write_serialized_bcs_helper(io, n_shellface_bcs, "shellface");
1318 1425 : }
1319 :
1320 :
1321 :
1322 1425 : void XdrIO::write_serialized_nodesets (Xdr & io, const new_header_id_type n_nodesets) const
1323 : {
1324 42 : libmesh_assert (io.writing());
1325 :
1326 : // convenient reference to our mesh
1327 84 : const MeshBase & mesh = MeshOutput<MeshBase>::mesh();
1328 :
1329 : // and our boundary info object
1330 42 : const BoundaryInfo & boundary_info = mesh.get_boundary_info();
1331 :
1332 : // Version 0.9.2+ introduces entity names
1333 1425 : write_serialized_bc_names(io, boundary_info, false); // nodeset names
1334 :
1335 1425 : new_header_id_type n_nodesets_out = n_nodesets;
1336 1467 : if (this->processor_id() == 0)
1337 243 : io.data (n_nodesets_out, "# number of nodesets");
1338 1425 : n_nodesets_out = 0;
1339 :
1340 1425 : if (!n_nodesets) return;
1341 :
1342 68 : std::vector<xdr_id_type> xfer_bcs, recv_bcs;
1343 1209 : std::vector<std::size_t> bc_sizes(this->n_processors());
1344 :
1345 : // Container to catch boundary IDs handed back by BoundaryInfo
1346 68 : std::vector<boundary_id_type> nodeset_ids;
1347 :
1348 1090 : dof_id_type n_node=0;
1349 103932 : for (const auto & node : mesh.local_node_ptr_range())
1350 : {
1351 56067 : boundary_info.boundary_ids (node, nodeset_ids);
1352 64959 : for (const auto & bc_id : nodeset_ids)
1353 8892 : if (bc_id != BoundaryInfo::invalid_id)
1354 : {
1355 8892 : xfer_bcs.push_back (node->id());
1356 8892 : xfer_bcs.push_back (bc_id);
1357 : }
1358 1073 : }
1359 :
1360 1141 : xfer_bcs.push_back(n_node);
1361 1141 : std::size_t my_size = xfer_bcs.size();
1362 1141 : this->comm().gather (0, my_size, bc_sizes);
1363 :
1364 : // All processors send their xfer buffers to processor 0
1365 : // Processor 0 will receive all buffers and write out the bcs
1366 1175 : if (this->processor_id() == 0)
1367 : {
1368 17 : dof_id_type node_offset = 0;
1369 1336 : for (auto pid : make_range(this->n_processors()))
1370 : {
1371 1175 : recv_bcs.resize(bc_sizes[pid]);
1372 1141 : if (pid == 0)
1373 195 : recv_bcs = xfer_bcs;
1374 : else
1375 946 : this->comm().receive (pid, recv_bcs);
1376 :
1377 : const dof_id_type my_n_node =
1378 1141 : cast_int<dof_id_type>(recv_bcs.back());
1379 34 : recv_bcs.pop_back();
1380 :
1381 10033 : for (std::size_t idx=0, rbs=recv_bcs.size(); idx<rbs; idx += 2, n_nodesets_out++)
1382 8892 : recv_bcs[idx+0] += node_offset;
1383 :
1384 1958 : io.data_stream (recv_bcs.empty() ? nullptr : recv_bcs.data(),
1385 : cast_int<unsigned int>(recv_bcs.size()), 2);
1386 1141 : node_offset += my_n_node;
1387 : }
1388 17 : libmesh_assert_equal_to (n_nodesets, n_nodesets_out);
1389 : }
1390 : else
1391 929 : this->comm().send (0, xfer_bcs);
1392 : }
1393 :
1394 :
1395 :
1396 5700 : void XdrIO::write_serialized_bc_names (Xdr & io, const BoundaryInfo & info, bool is_sideset) const
1397 : {
1398 5868 : if (this->processor_id() == 0)
1399 : {
1400 972 : const std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
1401 84 : info.get_sideset_name_map() : info.get_nodeset_name_map();
1402 :
1403 168 : std::vector<new_header_id_type> boundary_ids;
1404 972 : boundary_ids.reserve(boundary_map.size());
1405 :
1406 252 : std::vector<std::string> boundary_names;
1407 972 : boundary_names.reserve(boundary_map.size());
1408 :
1409 : // We need to loop over the map and make sure that there aren't any invalid entries. Since we
1410 : // return writable references in boundary_info, it's possible for the user to leave some entity names
1411 : // blank. We can't write those to the XDA file.
1412 972 : new_header_id_type n_boundary_names = 0;
1413 3324 : for (const auto & [bndry_id, name] : boundary_map)
1414 2352 : if (!name.empty())
1415 : {
1416 2352 : n_boundary_names++;
1417 2352 : boundary_ids.push_back(bndry_id);
1418 2352 : boundary_names.push_back(name);
1419 : }
1420 :
1421 972 : if (is_sideset)
1422 729 : io.data(n_boundary_names, "# sideset id to name map");
1423 : else
1424 243 : io.data(n_boundary_names, "# nodeset id to name map");
1425 : // Write out the ids and names in two vectors
1426 972 : if (n_boundary_names)
1427 : {
1428 588 : io.data(boundary_ids);
1429 588 : io.data(boundary_names);
1430 : }
1431 804 : }
1432 5700 : }
1433 :
1434 :
1435 :
1436 3512 : void XdrIO::read (const std::string & name)
1437 : {
1438 228 : LOG_SCOPE("read()","XdrIO");
1439 :
1440 : // Only open the file on processor 0 -- this is especially important because
1441 : // there may be an underlying bzip/bunzip going on, and multiple simultaneous
1442 : // calls will produce a race condition.
1443 6907 : Xdr io (this->processor_id() == 0 ? name : "", this->binary() ? DECODE : READ);
1444 :
1445 : // convenient reference to our mesh
1446 3512 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
1447 :
1448 : // get the version string.
1449 3626 : if (this->processor_id() == 0)
1450 603 : io.data (this->version());
1451 3626 : this->comm().broadcast (this->version());
1452 :
1453 : // note that for "legacy" files the first entry is an
1454 : // integer -- not a string at all.
1455 3512 : this->legacy() = !(this->version().find("libMesh") < this->version().size());
1456 :
1457 : // Check for a legacy version format.
1458 3512 : libmesh_error_msg_if(this->legacy(), "We no longer support reading files in the legacy format.");
1459 :
1460 : // Read headers with the old id type if they're pre-1.3.0, or with
1461 : // the new id type if they're post-1.3.0
1462 114 : const unsigned int n_header_metadata_values = 13;
1463 3626 : std::vector<new_header_id_type> meta_data(n_header_metadata_values, sizeof(xdr_id_type));
1464 3512 : if (this->version_at_least_1_3_0())
1465 : {
1466 1563 : this->read_header(io, meta_data);
1467 : }
1468 : else
1469 : {
1470 : // In pre-1.3.0 format there were only 10 metadata values in the header, but
1471 : // in newer versions there are more. The unread values will be set to 0.
1472 2017 : std::vector<old_header_id_type> old_data(n_header_metadata_values, sizeof(xdr_id_type));
1473 :
1474 1949 : this->read_header(io, old_data);
1475 :
1476 1881 : meta_data.assign(old_data.begin(), old_data.end());
1477 : }
1478 :
1479 228 : const new_header_id_type & n_elem = meta_data[0];
1480 114 : const new_header_id_type & n_nodes = meta_data[1];
1481 :
1482 : /**
1483 : * We are future proofing the layout of this file by adding in size information for all stored types.
1484 : * TODO: All types are stored as the same size. Use the size information to pack things efficiently.
1485 : * For now we will assume that "type size" is how the entire file will be encoded.
1486 : */
1487 3512 : if (version_at_least_0_9_2())
1488 2958 : _field_width = cast_int<unsigned int>(meta_data[2]);
1489 :
1490 : // On systems where uint64_t==unsigned long, we were previously
1491 : // writing 64-bit unsigned integers via xdr_u_long(), a function
1492 : // which is literally less suited for that task than abort() would
1493 : // have been, because at least abort() would have *known* it
1494 : // couldn't write rather than truncating writes to 32 bits.
1495 : //
1496 : // If we have files with version < 1.3.0, then we'll continue to use
1497 : // 32 bit field width, regardless of whether the file thinks we
1498 : // should, whenever we're on a system where the problem would have
1499 : // occurred.
1500 6378 : if ((_field_width == 4) ||
1501 2866 : (!version_at_least_1_3_0() &&
1502 : libmesh_type_is_same<uint64_t, unsigned long>::value))
1503 : {
1504 68 : uint32_t type_size = 0;
1505 :
1506 : // read subdomain names
1507 1949 : this->read_serialized_subdomain_names(io);
1508 :
1509 : // read connectivity
1510 2017 : this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1511 :
1512 : // read the nodal locations
1513 2017 : this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes), meta_data);
1514 :
1515 : // read the side boundary conditions
1516 1949 : this->read_serialized_side_bcs (io, type_size);
1517 :
1518 1949 : if (version_at_least_0_9_2())
1519 : // read the nodesets
1520 1303 : this->read_serialized_nodesets (io, type_size);
1521 :
1522 1949 : if (version_at_least_1_1_0())
1523 : {
1524 : // read the edge boundary conditions
1525 0 : this->read_serialized_edge_bcs (io, type_size);
1526 :
1527 : // read the "shell face" boundary conditions
1528 0 : this->read_serialized_shellface_bcs (io, type_size);
1529 : }
1530 : }
1531 1563 : else if (_field_width == 8)
1532 : {
1533 46 : uint64_t type_size = 0;
1534 :
1535 : // read subdomain names
1536 1563 : this->read_serialized_subdomain_names(io);
1537 :
1538 : // read connectivity
1539 1609 : this->read_serialized_connectivity (io, cast_int<dof_id_type>(n_elem), meta_data, type_size);
1540 :
1541 : // read the nodal locations
1542 1609 : this->read_serialized_nodes (io, cast_int<dof_id_type>(n_nodes), meta_data);
1543 :
1544 : // read the boundary conditions
1545 1563 : this->read_serialized_side_bcs (io, type_size);
1546 :
1547 1563 : if (version_at_least_0_9_2())
1548 : // read the nodesets
1549 1563 : this->read_serialized_nodesets (io, type_size);
1550 :
1551 1563 : if (version_at_least_1_1_0())
1552 : {
1553 : // read the edge boundary conditions
1554 1563 : this->read_serialized_edge_bcs (io, type_size);
1555 :
1556 : // read the "shell face" boundary conditions
1557 1563 : this->read_serialized_shellface_bcs (io, type_size);
1558 : }
1559 : }
1560 :
1561 : // set the node processor ids
1562 3512 : Partitioner::set_node_processor_ids(mesh);
1563 3512 : }
1564 :
1565 :
1566 :
1567 : template <typename T>
1568 3512 : void XdrIO::read_header (Xdr & io, std::vector<T> & meta_data)
1569 : {
1570 228 : LOG_SCOPE("read_header()","XdrIO");
1571 :
1572 : // convenient reference to our mesh
1573 3512 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
1574 :
1575 : // Header information to be read on processor 0 and broadcast to
1576 : // other procs.
1577 342 : std::vector<std::string> node_integer_names;
1578 342 : std::vector<std::string> elem_integer_names;
1579 228 : std::vector<dof_id_type> elemset_codes;
1580 342 : std::vector<std::vector<dof_id_type>> elemset_id_vecs;
1581 :
1582 3626 : if (this->processor_id() == 0)
1583 : {
1584 603 : io.data (meta_data[0]);
1585 660 : io.data (meta_data[1]);
1586 603 : io.data (this->boundary_condition_file_name()); // libMesh::out << "bc_file=" << this->boundary_condition_file_name() << std::endl;
1587 603 : io.data (this->subdomain_map_file_name()); // libMesh::out << "sid_file=" << this->subdomain_map_file_name() << std::endl;
1588 603 : io.data (this->partition_map_file_name()); // libMesh::out << "pid_file=" << this->partition_map_file_name() << std::endl;
1589 603 : io.data (this->polynomial_level_file_name()); // libMesh::out << "pl_file=" << this->polynomial_level_file_name() << std::endl;
1590 :
1591 603 : if (version_at_least_0_9_2())
1592 : {
1593 : // Make sure there's enough room in meta_data
1594 46 : libmesh_assert_greater_equal(meta_data.size(), 10);
1595 :
1596 538 : io.data (meta_data[2], "# type size");
1597 538 : io.data (meta_data[3], "# uid size");
1598 538 : io.data (meta_data[4], "# pid size");
1599 538 : io.data (meta_data[5], "# sid size");
1600 538 : io.data (meta_data[6], "# p-level size");
1601 : // Boundary Condition sizes
1602 538 : io.data (meta_data[7], "# eid size"); // elem id
1603 538 : io.data (meta_data[8], "# side size"); // side number
1604 538 : io.data (meta_data[9], "# bid size"); // boundary id
1605 : }
1606 :
1607 603 : if (version_at_least_1_8_0())
1608 : {
1609 : // Make sure there's enough room in meta_data
1610 19 : libmesh_assert_greater_equal(meta_data.size(), 13);
1611 :
1612 234 : io.data (meta_data[10], "# extra integer size"); // extra integer size
1613 :
1614 : // Read in the node integer names and store the count in meta_data
1615 215 : io.data(node_integer_names, "# node integer names");
1616 234 : meta_data[11] = node_integer_names.size();
1617 :
1618 : // Read in the elem integer names and store the count in meta_data
1619 215 : io.data(elem_integer_names, "# elem integer names");
1620 234 : meta_data[12] = elem_integer_names.size();
1621 :
1622 : // Read in vector of elemset codes from file
1623 215 : io.data(elemset_codes, "# elemset codes");
1624 :
1625 : // For each elemset code, read in the associated elemset ids from file
1626 234 : elemset_id_vecs.resize(elemset_codes.size());
1627 251 : for (auto i : index_range(elemset_codes))
1628 39 : io.data(elemset_id_vecs[i]);
1629 : }
1630 : }
1631 :
1632 : // broadcast the n_elems, n_nodes, and size information
1633 3512 : this->comm().broadcast (meta_data);
1634 :
1635 3626 : this->comm().broadcast (this->boundary_condition_file_name());
1636 3626 : this->comm().broadcast (this->subdomain_map_file_name());
1637 3626 : this->comm().broadcast (this->partition_map_file_name());
1638 3626 : this->comm().broadcast (this->polynomial_level_file_name());
1639 3512 : this->comm().broadcast(node_integer_names);
1640 3512 : this->comm().broadcast(elem_integer_names);
1641 3512 : this->comm().broadcast(elemset_codes);
1642 3512 : this->comm().broadcast(elemset_id_vecs);
1643 :
1644 : // Tell the mesh how many nodes/elements to expect. Depending on the mesh type,
1645 : // this may allow for efficient adding of nodes/elements.
1646 228 : const T & n_elem = meta_data[0];
1647 114 : const T & n_nodes = meta_data[1];
1648 :
1649 3558 : mesh.reserve_elem(cast_int<dof_id_type>(n_elem));
1650 3558 : mesh.reserve_nodes(cast_int<dof_id_type>(n_nodes));
1651 :
1652 : // Our mesh is pre-partitioned as it's created
1653 228 : this->set_n_partitions(this->n_processors());
1654 :
1655 : /**
1656 : * We are future proofing the layout of this file by adding in size information for all stored types.
1657 : * TODO: All types are stored as the same size. Use the size information to pack things efficiently.
1658 : * For now we will assume that "type size" is how the entire file will be encoded.
1659 : */
1660 3512 : if (version_at_least_0_9_2())
1661 2912 : _field_width = cast_int<unsigned int>(meta_data[2]);
1662 :
1663 : // Add extra node and elem integers on all procs. Note that adding
1664 : // an extra node/elem "datum" of type T is implemented via repeated
1665 : // calls to MeshBase::add_elem_integer(), so we only need to call
1666 : // MeshBase::add_node/elem_integers() here in order to restore the
1667 : // the data that we had on the Mesh previously.
1668 3512 : mesh.add_node_integers(node_integer_names);
1669 6910 : mesh.add_elem_integers(elem_integer_names);
1670 :
1671 : // Store the elemset_code -> {elemset ids} mapping on the Mesh
1672 3725 : for (auto i : index_range(elemset_codes))
1673 225 : mesh.add_elemset_code(elemset_codes[i],
1674 408 : MeshBase::elemset_type(elemset_id_vecs[i].begin(),
1675 12 : elemset_id_vecs[i].end()));
1676 6796 : }
1677 :
1678 :
1679 :
1680 3512 : void XdrIO::read_serialized_subdomain_names(Xdr & io)
1681 : {
1682 3512 : const bool read_entity_info = version_at_least_0_9_2();
1683 3512 : const bool use_new_header_type (this->version_at_least_1_3_0());
1684 3512 : if (read_entity_info)
1685 : {
1686 2866 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
1687 :
1688 2866 : new_header_id_type n_subdomain_names = 0;
1689 92 : std::vector<new_header_id_type> subdomain_ids;
1690 92 : std::vector<std::string> subdomain_names;
1691 :
1692 : // Read the sideset names
1693 2958 : if (this->processor_id() == 0)
1694 : {
1695 492 : if (use_new_header_type)
1696 263 : io.data(n_subdomain_names);
1697 : else
1698 : {
1699 : old_header_id_type temp;
1700 229 : io.data(temp);
1701 229 : n_subdomain_names = temp;
1702 : }
1703 :
1704 492 : subdomain_ids.resize(n_subdomain_names);
1705 492 : subdomain_names.resize(n_subdomain_names);
1706 :
1707 492 : if (n_subdomain_names)
1708 : {
1709 0 : if (use_new_header_type)
1710 0 : io.data(subdomain_ids);
1711 : else
1712 : {
1713 0 : std::vector<old_header_id_type> temp;
1714 0 : io.data(temp);
1715 0 : subdomain_ids.assign(temp.begin(), temp.end());
1716 : }
1717 :
1718 0 : io.data(subdomain_names);
1719 : }
1720 : }
1721 :
1722 : // Broadcast the subdomain names to all processors
1723 2866 : this->comm().broadcast(n_subdomain_names);
1724 2866 : if (n_subdomain_names == 0)
1725 184 : return;
1726 :
1727 0 : subdomain_ids.resize(n_subdomain_names);
1728 0 : subdomain_names.resize(n_subdomain_names);
1729 0 : this->comm().broadcast(subdomain_ids);
1730 0 : this->comm().broadcast(subdomain_names);
1731 :
1732 : // Reassemble the named subdomain information
1733 0 : std::map<subdomain_id_type, std::string> & subdomain_map = mesh.set_subdomain_name_map();
1734 :
1735 0 : for (unsigned int i=0; i<n_subdomain_names; ++i)
1736 0 : subdomain_map.emplace(subdomain_ids[i], subdomain_names[i]);
1737 2682 : }
1738 : }
1739 :
1740 :
1741 : template <typename T>
1742 : void
1743 3512 : XdrIO::read_serialized_connectivity (Xdr & io,
1744 : const dof_id_type n_elem,
1745 : const std::vector<new_header_id_type> & meta_data,
1746 : T /*type_size*/)
1747 : {
1748 114 : libmesh_assert (io.reading());
1749 :
1750 3512 : if (!n_elem) return;
1751 :
1752 : const bool
1753 114 : read_p_level = ("." == this->polynomial_level_file_name()),
1754 114 : read_partitioning = ("." == this->partition_map_file_name()),
1755 114 : read_subdomain_id = ("." == this->subdomain_map_file_name());
1756 :
1757 : // convenient reference to our mesh
1758 3512 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
1759 :
1760 : // Keep track of what kinds of elements this file contains
1761 3512 : elems_of_dimension.clear();
1762 3512 : elems_of_dimension.resize(4, false);
1763 :
1764 3626 : std::vector<T> conn, input_buffer(100 /* oversized ! */);
1765 :
1766 114 : int level=-1;
1767 :
1768 : // Version 0.9.2+ introduces unique ids
1769 114 : const size_t unique_id_size_index = 3;
1770 :
1771 114 : const bool read_unique_id =
1772 3512 : (version_at_least_0_9_2()) &&
1773 : meta_data[unique_id_size_index];
1774 :
1775 : // Version 1.8.0+ introduces elem integers
1776 114 : const std::size_t n_elem_integers_index = 12;
1777 114 : new_header_id_type n_elem_integers = 0;
1778 3512 : if (version_at_least_1_8_0())
1779 : {
1780 38 : libmesh_assert_greater_equal(meta_data.size(), 13);
1781 1279 : n_elem_integers = meta_data[n_elem_integers_index];
1782 : }
1783 :
1784 3512 : T n_elem_at_level=0, n_processed_at_level=0;
1785 3626 : for (dof_id_type blk=0, first_elem=0, last_elem=0;
1786 7024 : last_elem<n_elem; blk++)
1787 : {
1788 3512 : first_elem = cast_int<dof_id_type>(blk*io_blksize);
1789 6750 : last_elem = cast_int<dof_id_type>(std::min(cast_int<std::size_t>((blk+1)*io_blksize),
1790 6910 : cast_int<std::size_t>(n_elem)));
1791 :
1792 114 : conn.clear();
1793 :
1794 3626 : if (this->processor_id() == 0)
1795 48970 : for (dof_id_type e=first_elem; e<last_elem; e++, n_processed_at_level++)
1796 : {
1797 48367 : if (n_processed_at_level == n_elem_at_level)
1798 : {
1799 : // get the number of elements to read at this level
1800 798 : io.data (n_elem_at_level);
1801 77 : n_processed_at_level = 0;
1802 798 : level++;
1803 : }
1804 :
1805 : // "pos" is a cursor into input_buffer
1806 4751 : unsigned int pos = 0;
1807 :
1808 : // get the element type,
1809 48367 : io.data_stream (&input_buffer[pos++], 1);
1810 :
1811 48367 : if (read_unique_id)
1812 50074 : io.data_stream (&input_buffer[pos++], 1);
1813 : // Older versions won't have this field at all (no increment on pos)
1814 :
1815 : // maybe the parent
1816 48367 : if (level)
1817 22066 : io.data_stream (&input_buffer[pos++], 1);
1818 : else
1819 : // We can't always fit DofObject::invalid_id in an
1820 : // xdr_id_type
1821 31052 : input_buffer[pos++] = static_cast<T>(-1);
1822 :
1823 : // maybe the processor id
1824 48367 : if (read_partitioning)
1825 0 : io.data_stream (&input_buffer[pos++], 1);
1826 : else
1827 53118 : input_buffer[pos++] = 0;
1828 :
1829 : // maybe the subdomain id
1830 48367 : if (read_subdomain_id)
1831 52950 : io.data_stream (&input_buffer[pos++], 1);
1832 : else
1833 168 : input_buffer[pos++] = 0;
1834 :
1835 : // maybe the p level
1836 48367 : if (read_p_level)
1837 0 : io.data_stream (&input_buffer[pos++], 1);
1838 : else
1839 53118 : input_buffer[pos++] = 0;
1840 :
1841 48367 : const unsigned int n_nodes0 = Elem::type_to_n_nodes_map[input_buffer[0]];
1842 48367 : if (n_nodes0 == invalid_uint)
1843 0 : libmesh_not_implemented();
1844 :
1845 : // and all the nodes
1846 4751 : libmesh_assert_less (pos+n_nodes0, input_buffer.size());
1847 53118 : io.data_stream (&input_buffer[pos], n_nodes0);
1848 :
1849 : // Advance input_buffer cursor by number of nodes in this element
1850 48367 : pos += n_nodes0;
1851 :
1852 : // and all the elem "extra" integers
1853 4751 : libmesh_assert_less (pos + n_elem_integers, input_buffer.size());
1854 53118 : io.data_stream (&input_buffer[pos], n_elem_integers);
1855 :
1856 : // Advance input_buffer cursor by number of extra integers read
1857 48367 : pos += n_elem_integers;
1858 :
1859 : // Insert input_buffer at end of "conn"
1860 48367 : conn.insert (conn.end(), input_buffer.begin(), input_buffer.begin() + pos);
1861 : }
1862 :
1863 3512 : std::size_t conn_size = conn.size();
1864 3512 : this->comm().broadcast(conn_size);
1865 3512 : conn.resize (conn_size);
1866 3512 : this->comm().broadcast (conn);
1867 :
1868 : // All processors now have the connectivity for this block.
1869 280153 : for (auto [e, it] = std::tuple{first_elem, conn.begin()}; e<last_elem; e++)
1870 : {
1871 : // Temporary variable for reading connectivity array
1872 : // entries.
1873 : T tmp;
1874 :
1875 276641 : const ElemType elem_type = static_cast<ElemType>(*it++);
1876 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
1877 : // We are on all processors here, so the mesh can easily
1878 : // assign consistent unique ids if the file doesn't specify
1879 : // them later. We'll use element ids for elements and start
1880 : // past those for nodes.
1881 23321 : unique_id_type unique_id = e;
1882 : #endif
1883 276641 : if (read_unique_id)
1884 : {
1885 260338 : tmp = *it++;
1886 :
1887 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
1888 8988 : unique_id = cast_int<unique_id_type>(tmp);
1889 : #endif
1890 : }
1891 :
1892 276641 : tmp = *it++;
1893 9502 : const dof_id_type parent_id =
1894 276641 : (tmp == static_cast<T>(-1)) ? DofObject::invalid_id : cast_int<dof_id_type>(tmp);
1895 :
1896 : const processor_id_type proc_id =
1897 276641 : cast_int<processor_id_type>(*it++);
1898 :
1899 : const subdomain_id_type subdomain_id =
1900 276641 : cast_int<subdomain_id_type>(*it++);
1901 :
1902 276641 : tmp = *it++;
1903 : #ifdef LIBMESH_ENABLE_AMR
1904 9502 : const unsigned int p_level = cast_int<unsigned int>(tmp);
1905 : #endif
1906 :
1907 276641 : Elem * parent = (parent_id == DofObject::invalid_id) ?
1908 105654 : nullptr : mesh.elem_ptr(parent_id);
1909 :
1910 286143 : auto elem = Elem::build(elem_type, parent);
1911 :
1912 276641 : elem->set_id() = e;
1913 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
1914 9502 : elem->set_unique_id(unique_id);
1915 : #endif
1916 276641 : elem->processor_id() = proc_id;
1917 276641 : elem->subdomain_id() = subdomain_id;
1918 : #ifdef LIBMESH_ENABLE_AMR
1919 9502 : elem->hack_p_level(p_level);
1920 :
1921 276641 : if (parent)
1922 : {
1923 105654 : parent->add_child(elem.get());
1924 4612 : parent->set_refinement_flag (Elem::INACTIVE);
1925 4612 : elem->set_refinement_flag (Elem::JUST_REFINED);
1926 : }
1927 : #endif
1928 :
1929 : // Node ids for this Elem
1930 1781695 : for (unsigned int n=0, n_n = elem->n_nodes(); n != n_n;
1931 49268 : n++, ++it)
1932 : {
1933 : const dof_id_type global_node_number =
1934 1505054 : cast_int<dof_id_type>(*it);
1935 :
1936 1505054 : Node *node = mesh.query_node_ptr(global_node_number);
1937 :
1938 1505054 : if (node)
1939 993063 : elem->set_node(n, node);
1940 : else
1941 : {
1942 511991 : std::unique_ptr<Node> new_node = Node::build(Point(), global_node_number);
1943 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
1944 : // If we have to overwrite this it'll happen later
1945 511991 : new_node->set_unique_id(n_elem + global_node_number);
1946 : #endif
1947 543883 : elem->set_node(n, mesh.add_node(std::move(new_node)));
1948 480099 : }
1949 : }
1950 :
1951 276641 : elems_of_dimension[elem->dim()] = true;
1952 295645 : Elem * added_elem = mesh.add_elem(std::move(elem));
1953 :
1954 : // Make sure that the Elem we just added to the Mesh has
1955 : // space for the required number of extra integers. Note
1956 : // that we have to add the Elem to the Mesh prior to
1957 : // accessing any of its extra integers, since otherwise the
1958 : // Elem does not know that it should have extra integers...
1959 9502 : libmesh_assert_equal_to(n_elem_integers, added_elem->n_extra_integers());
1960 :
1961 : // Extra integers for this Elem
1962 306052 : for (unsigned int ei=0; ei<n_elem_integers; ++ei)
1963 : {
1964 29411 : auto extra_int = cast_int<dof_id_type>(*it++);
1965 29411 : added_elem->set_extra_integer(ei, extra_int);
1966 : }
1967 : }
1968 : }
1969 :
1970 : // Set the mesh dimension to the largest encountered for an element
1971 17560 : for (unsigned char i=0; i!=4; ++i)
1972 14504 : if (elems_of_dimension[i])
1973 3512 : mesh.set_mesh_dimension(i);
1974 :
1975 : #if LIBMESH_DIM < 3
1976 : libmesh_error_msg_if(mesh.mesh_dimension() > LIBMESH_DIM,
1977 : "Cannot open dimension "
1978 : << mesh.mesh_dimension()
1979 : << " mesh file when configured without "
1980 : << mesh.mesh_dimension()
1981 : << "D support.");
1982 : #endif
1983 : }
1984 :
1985 :
1986 :
1987 : void
1988 3512 : XdrIO::read_serialized_nodes (Xdr & io,
1989 : const dof_id_type n_nodes,
1990 : const std::vector<new_header_id_type> & meta_data)
1991 : {
1992 114 : libmesh_assert (io.reading());
1993 :
1994 : // convenient reference to our mesh
1995 3512 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
1996 :
1997 3512 : if (!mesh.n_nodes()) return;
1998 :
1999 : // At this point the elements have been read from file and placeholder nodes
2000 : // have been assigned. These nodes, however, do not have the proper (x,y,z)
2001 : // locations or unique_id values. This method will read all the
2002 : // nodes from disk, and each processor can then grab the individual
2003 : // values it needs.
2004 :
2005 : // If the file includes unique ids for nodes (as indicated by a
2006 : // flag in 0.9.6+ files), those will be read next.
2007 :
2008 : // build up a list of the nodes contained in our local mesh. These are the nodes
2009 : // stored on the local processor whose (x,y,z) and unique_id values
2010 : // need to be corrected.
2011 228 : std::vector<dof_id_type> needed_nodes;
2012 3512 : needed_nodes.reserve (mesh.n_nodes());
2013 : {
2014 518901 : for (auto & node : mesh.node_ptr_range())
2015 515275 : needed_nodes.push_back(node->id());
2016 :
2017 3512 : std::sort (needed_nodes.begin(), needed_nodes.end());
2018 :
2019 : // We should not have any duplicate node->id()s
2020 114 : libmesh_assert (std::unique(needed_nodes.begin(), needed_nodes.end()) == needed_nodes.end());
2021 : }
2022 :
2023 : // Version 1.8.0+ introduces node integers
2024 114 : const std::size_t n_node_integers_index = 11;
2025 114 : new_header_id_type n_node_integers = 0;
2026 3512 : if (version_at_least_1_8_0())
2027 : {
2028 38 : libmesh_assert_greater_equal(meta_data.size(), 13);
2029 1279 : n_node_integers = meta_data[n_node_integers_index];
2030 : }
2031 :
2032 : // Get the nodes in blocks.
2033 228 : std::vector<Real> coords;
2034 : std::pair<std::vector<dof_id_type>::iterator,
2035 114 : std::vector<dof_id_type>::iterator> pos;
2036 114 : pos.first = needed_nodes.begin();
2037 :
2038 : // Broadcast node coordinates
2039 7024 : for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2040 : {
2041 3512 : first_node = blk*io_blksize;
2042 3512 : last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2043 :
2044 3512 : coords.resize(3*(last_node - first_node));
2045 :
2046 3626 : if (this->processor_id() == 0)
2047 1206 : io.data_stream (coords.empty() ? nullptr : coords.data(),
2048 : cast_int<unsigned int>(coords.size()));
2049 :
2050 : // For large numbers of processors the majority of processors at any given
2051 : // block may not actually need these data. It may be worth profiling this,
2052 : // although it is expected that disk IO will be the bottleneck
2053 3512 : this->comm().broadcast (coords);
2054 :
2055 515503 : for (std::size_t n=first_node, idx=0; n<last_node; n++, idx+=3)
2056 : {
2057 : // first see if we need this node. use pos.first as a smart lower
2058 : // bound, this will ensure that the size of the searched range
2059 : // decreases as we match nodes.
2060 511991 : pos = std::equal_range (pos.first, needed_nodes.end(), n);
2061 :
2062 511991 : if (pos.first != pos.second) // we need this node.
2063 : {
2064 15946 : libmesh_assert_equal_to (*pos.first, n);
2065 15946 : libmesh_assert(!libmesh_isnan(coords[idx+0]));
2066 15946 : libmesh_assert(!libmesh_isnan(coords[idx+1]));
2067 15946 : libmesh_assert(!libmesh_isnan(coords[idx+2]));
2068 511991 : mesh.node_ref(cast_int<dof_id_type>(n)) =
2069 543883 : Point (coords[idx+0],
2070 511991 : coords[idx+1],
2071 527937 : coords[idx+2]);
2072 :
2073 : }
2074 : }
2075 : }
2076 :
2077 : // Check for node unique ids
2078 3512 : unsigned short read_unique_ids = false;
2079 :
2080 3512 : if (version_at_least_0_9_6())
2081 : {
2082 2958 : if (this->processor_id() == 0)
2083 492 : io.data (read_unique_ids);
2084 :
2085 2866 : this->comm().broadcast (read_unique_ids);
2086 : }
2087 :
2088 : // If no node unique ids are in the file, well, we already
2089 : // assigned our own ... *but*, our ids might conflict with the
2090 : // file-assigned element unique ids. Let's check on that.
2091 : // Should be easy since we're serialized.
2092 3512 : if (!read_unique_ids)
2093 : {
2094 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
2095 2162 : unique_id_type max_elem_unique_id = 0;
2096 20553 : for (auto & elem : mesh.element_ptr_range())
2097 16303 : max_elem_unique_id = std::max(max_elem_unique_id,
2098 34106 : elem->unique_id()+1);
2099 2162 : if (max_elem_unique_id > mesh.n_elem())
2100 : {
2101 73641 : for (auto & node : mesh.node_ptr_range())
2102 36972 : node->set_unique_id(max_elem_unique_id + node->id());
2103 : }
2104 2162 : mesh.set_next_unique_id(max_elem_unique_id + mesh.n_nodes());
2105 : #endif
2106 : }
2107 : else
2108 : {
2109 80 : std::vector<uint32_t> unique_32;
2110 80 : std::vector<uint64_t> unique_64;
2111 :
2112 : // We're starting over from node 0 again
2113 40 : pos.first = needed_nodes.begin();
2114 :
2115 2700 : for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2116 : {
2117 1350 : first_node = blk*io_blksize;
2118 1350 : last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2119 :
2120 40 : libmesh_assert((_field_width == 8) || (_field_width == 4));
2121 :
2122 1350 : if (_field_width == 8)
2123 1350 : unique_64.resize(last_node - first_node);
2124 : else
2125 0 : unique_32.resize(last_node - first_node);
2126 :
2127 1390 : if (this->processor_id() == 0)
2128 : {
2129 227 : if (_field_width == 8)
2130 454 : io.data_stream (unique_64.empty() ? nullptr : unique_64.data(),
2131 : cast_int<unsigned int>(unique_64.size()));
2132 : else
2133 0 : io.data_stream (unique_32.empty() ? nullptr : unique_32.data(),
2134 : cast_int<unsigned int>(unique_32.size()));
2135 : }
2136 :
2137 : #ifdef LIBMESH_ENABLE_UNIQUE_ID
2138 1350 : if (_field_width == 8)
2139 1350 : this->comm().broadcast (unique_64);
2140 : else
2141 0 : this->comm().broadcast (unique_32);
2142 :
2143 457147 : for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
2144 : {
2145 : // first see if we need this node. use pos.first as a smart lower
2146 : // bound, this will ensure that the size of the searched range
2147 : // decreases as we match nodes.
2148 455797 : pos = std::equal_range (pos.first, needed_nodes.end(), n);
2149 :
2150 455797 : if (pos.first != pos.second) // we need this node.
2151 : {
2152 14250 : libmesh_assert_equal_to (*pos.first, n);
2153 455797 : if (_field_width == 8)
2154 470047 : mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id(unique_64[idx]);
2155 : else
2156 0 : mesh.node_ref(cast_int<dof_id_type>(n)).set_unique_id(unique_32[idx]);
2157 : }
2158 : }
2159 : #endif // LIBMESH_ENABLE_UNIQUE_ID
2160 : }
2161 : }
2162 :
2163 : // Read extra node integers (if present) in chunks on processor 0 and broadcast to
2164 : // all other procs. For a ReplicatedMesh, all procs need to know about all Nodes'
2165 : // extra integers. For a DistributedMesh, this broadcasting will transmit some
2166 : // redundant information.
2167 3512 : if (n_node_integers)
2168 : {
2169 16 : std::vector<dof_id_type> extra_integers;
2170 :
2171 : // We're starting over from node 0 again
2172 8 : pos.first = needed_nodes.begin();
2173 :
2174 568 : for (std::size_t blk=0, first_node=0, last_node=0; last_node<n_nodes; blk++)
2175 : {
2176 284 : first_node = blk*io_blksize;
2177 284 : last_node = std::min((blk+1)*io_blksize, std::size_t(n_nodes));
2178 :
2179 284 : extra_integers.resize((last_node - first_node) * n_node_integers);
2180 :
2181 : // Read in block of node integers on proc 0 only...
2182 292 : if (this->processor_id() == 0)
2183 96 : io.data_stream (extra_integers.empty() ? nullptr : extra_integers.data(),
2184 : cast_int<unsigned int>(extra_integers.size()));
2185 :
2186 : // ... and broadcast it to all other procs.
2187 284 : this->comm().broadcast (extra_integers);
2188 :
2189 9514 : for (std::size_t n=first_node, idx=0; n<last_node; n++, idx++)
2190 : {
2191 : // first see if we need this node. use pos.first as a smart lower
2192 : // bound, this will ensure that the size of the searched range
2193 : // decreases as we match nodes.
2194 9230 : pos = std::equal_range (pos.first, needed_nodes.end(), n);
2195 :
2196 9230 : if (pos.first != pos.second) // we need this node.
2197 : {
2198 260 : libmesh_assert_equal_to (*pos.first, n);
2199 :
2200 9490 : Node & node_ref = mesh.node_ref(cast_int<dof_id_type>(n));
2201 47450 : for (unsigned int i=0; i != n_node_integers; ++i)
2202 39780 : node_ref.set_extra_integer(i, extra_integers[n_node_integers*idx + i]);
2203 : }
2204 : }
2205 : } // end for (block-based extra integer reads)
2206 : } // end if (n_node_integers)
2207 : }
2208 :
2209 :
2210 :
2211 : template <typename T>
2212 6638 : void XdrIO::read_serialized_bcs_helper (Xdr & io, T /*type_size*/, const std::string bc_type)
2213 : {
2214 6638 : if (this->boundary_condition_file_name() == "n/a") return;
2215 :
2216 152 : libmesh_assert (io.reading());
2217 :
2218 : // convenient reference to our mesh
2219 5185 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
2220 :
2221 : // and our boundary info object
2222 152 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
2223 :
2224 : // Version 0.9.2+ introduces unique ids
2225 5185 : read_serialized_bc_names(io, boundary_info, true); // sideset names
2226 :
2227 304 : std::vector<T> input_buffer;
2228 :
2229 5185 : new_header_id_type n_bcs=0;
2230 5337 : if (this->processor_id() == 0)
2231 : {
2232 872 : if (this->version_at_least_1_3_0())
2233 681 : io.data (n_bcs);
2234 : else
2235 : {
2236 : old_header_id_type temp;
2237 191 : io.data (temp);
2238 191 : n_bcs = temp;
2239 : }
2240 : }
2241 5185 : this->comm().broadcast (n_bcs);
2242 :
2243 8025 : for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_bcs; blk++)
2244 : {
2245 2840 : first_bc = blk*io_blksize;
2246 2840 : last_bc = std::min((blk+1)*io_blksize, std::size_t(n_bcs));
2247 :
2248 2840 : input_buffer.resize (3*(last_bc - first_bc));
2249 :
2250 2922 : if (this->processor_id() == 0)
2251 956 : io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
2252 : cast_int<unsigned int>(input_buffer.size()));
2253 :
2254 2840 : this->comm().broadcast (input_buffer);
2255 :
2256 : // Look for BCs in this block for all the level-0 elements we have
2257 : // (not just local ones). Do this by checking all entries for
2258 : // IDs matching an element we can query.
2259 : // We cannot rely on nullptr neighbors at this point since the neighbor
2260 : // data structure has not been initialized.
2261 301241 : for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=3)
2262 : {
2263 : const dof_id_type dof_id =
2264 306531 : cast_int<dof_id_type>(input_buffer[idx+0]);
2265 8426 : const unsigned short side =
2266 306531 : cast_int<unsigned short>(input_buffer[idx+1]);
2267 : const boundary_id_type bc_id =
2268 306827 : cast_int<boundary_id_type>(input_buffer[idx+2]);
2269 :
2270 298401 : const Elem * elem = mesh.query_elem_ptr(dof_id);
2271 298401 : if (!elem)
2272 0 : continue;
2273 :
2274 298401 : if (bc_type == "side")
2275 : {
2276 1760 : libmesh_assert_less (side, elem->n_sides());
2277 61758 : boundary_info.add_side (elem, side, bc_id);
2278 : }
2279 236643 : else if (bc_type == "edge")
2280 : {
2281 10 : libmesh_assert_less (side, elem->n_edges());
2282 355 : boundary_info.add_edge (elem, side, bc_id);
2283 : }
2284 236288 : else if (bc_type == "shellface")
2285 : {
2286 : // Shell face IDs can only be 0 or 1.
2287 6656 : libmesh_assert_less(side, 2);
2288 :
2289 236288 : boundary_info.add_shellface (elem, side, bc_id);
2290 : }
2291 : else
2292 : {
2293 0 : libmesh_error_msg("bc_type not recognized: " + bc_type);
2294 : }
2295 : }
2296 82 : input_buffer.clear();
2297 : }
2298 : }
2299 :
2300 :
2301 :
2302 : template <typename T>
2303 3512 : void XdrIO::read_serialized_side_bcs (Xdr & io, T type_size)
2304 : {
2305 3512 : read_serialized_bcs_helper(io, type_size, "side");
2306 3512 : }
2307 :
2308 :
2309 :
2310 : template <typename T>
2311 1563 : void XdrIO::read_serialized_edge_bcs (Xdr & io, T type_size)
2312 : {
2313 1563 : read_serialized_bcs_helper(io, type_size, "edge");
2314 1563 : }
2315 :
2316 :
2317 :
2318 : template <typename T>
2319 1563 : void XdrIO::read_serialized_shellface_bcs (Xdr & io, T type_size)
2320 : {
2321 1563 : read_serialized_bcs_helper(io, type_size, "shellface");
2322 1563 : }
2323 :
2324 :
2325 :
2326 : template <typename T>
2327 2866 : void XdrIO::read_serialized_nodesets (Xdr & io, T /*type_size*/)
2328 : {
2329 2866 : if (this->boundary_condition_file_name() == "n/a") return;
2330 :
2331 64 : libmesh_assert (io.reading());
2332 :
2333 : // convenient reference to our mesh
2334 2202 : MeshBase & mesh = MeshInput<MeshBase>::mesh();
2335 :
2336 : // and our boundary info object
2337 64 : BoundaryInfo & boundary_info = mesh.get_boundary_info();
2338 :
2339 : // Version 0.9.2+ introduces unique ids
2340 2202 : read_serialized_bc_names(io, boundary_info, false); // nodeset names
2341 :
2342 128 : std::vector<T> input_buffer;
2343 :
2344 2202 : new_header_id_type n_nodesets=0;
2345 2266 : if (this->processor_id() == 0)
2346 : {
2347 371 : if (this->version_at_least_1_3_0())
2348 227 : io.data (n_nodesets);
2349 : else
2350 : {
2351 : old_header_id_type temp;
2352 144 : io.data (temp);
2353 144 : n_nodesets = temp;
2354 : }
2355 : }
2356 2202 : this->comm().broadcast (n_nodesets);
2357 :
2358 3410 : for (std::size_t blk=0, first_bc=0, last_bc=0; last_bc<n_nodesets; blk++)
2359 : {
2360 1208 : first_bc = blk*io_blksize;
2361 1208 : last_bc = std::min((blk+1)*io_blksize, std::size_t(n_nodesets));
2362 :
2363 1208 : input_buffer.resize (2*(last_bc - first_bc));
2364 :
2365 1244 : if (this->processor_id() == 0)
2366 406 : io.data_stream (input_buffer.empty() ? nullptr : input_buffer.data(),
2367 : cast_int<unsigned int>(input_buffer.size()));
2368 :
2369 1208 : this->comm().broadcast (input_buffer);
2370 :
2371 : // Look for BCs in this block for all nodes we have (not just
2372 : // local ones). Do this by checking all entries for
2373 : // IDs matching a node we can query.
2374 71830 : for (std::size_t idx=0, ibs=input_buffer.size(); idx<ibs; idx+=2)
2375 : {
2376 : const dof_id_type dof_id =
2377 72642 : cast_int<dof_id_type>(input_buffer[idx+0]);
2378 : const boundary_id_type bc_id =
2379 72642 : cast_int<boundary_id_type>(input_buffer[idx+1]);
2380 :
2381 70622 : const Node * node = mesh.query_node_ptr(dof_id);
2382 70622 : if (node)
2383 70622 : boundary_info.add_node (node, bc_id);
2384 : }
2385 36 : input_buffer.clear();
2386 : }
2387 : }
2388 :
2389 :
2390 :
2391 7387 : void XdrIO::read_serialized_bc_names(Xdr & io, BoundaryInfo & info, bool is_sideset)
2392 : {
2393 7387 : const bool read_entity_info = version_at_least_0_9_2();
2394 7387 : const bool use_new_header_type (this->version_at_least_1_3_0());
2395 7387 : if (read_entity_info)
2396 : {
2397 7104 : new_header_id_type n_boundary_names = 0;
2398 208 : std::vector<new_header_id_type> boundary_ids;
2399 320 : std::vector<std::string> boundary_names;
2400 :
2401 : // Read the sideset names
2402 7312 : if (this->processor_id() == 0)
2403 : {
2404 1196 : if (use_new_header_type)
2405 908 : io.data(n_boundary_names);
2406 : else
2407 : {
2408 : old_header_id_type temp;
2409 288 : io.data(temp);
2410 288 : n_boundary_names = temp;
2411 : }
2412 :
2413 1196 : boundary_names.resize(n_boundary_names);
2414 :
2415 1196 : if (n_boundary_names)
2416 : {
2417 620 : if (use_new_header_type)
2418 620 : io.data(boundary_ids);
2419 : else
2420 : {
2421 0 : std::vector<old_header_id_type> temp(n_boundary_names);
2422 0 : io.data(temp);
2423 0 : boundary_ids.assign(temp.begin(), temp.end());
2424 : }
2425 620 : io.data(boundary_names);
2426 : }
2427 : }
2428 :
2429 : // Broadcast the boundary names to all processors
2430 7104 : this->comm().broadcast(n_boundary_names);
2431 7104 : if (n_boundary_names == 0)
2432 192 : return;
2433 :
2434 3696 : boundary_ids.resize(n_boundary_names);
2435 3696 : boundary_names.resize(n_boundary_names);
2436 3696 : this->comm().broadcast(boundary_ids);
2437 3696 : this->comm().broadcast(boundary_names);
2438 :
2439 : // Reassemble the named boundary information
2440 3696 : std::map<boundary_id_type, std::string> & boundary_map = is_sideset ?
2441 112 : info.set_sideset_name_map() : info.set_nodeset_name_map();
2442 :
2443 18480 : for (unsigned int i=0; i<n_boundary_names; ++i)
2444 16128 : boundary_map.emplace(cast_int<boundary_id_type>(boundary_ids[i]), boundary_names[i]);
2445 6688 : }
2446 : }
2447 :
2448 :
2449 :
2450 43172 : void XdrIO::pack_element (std::vector<xdr_id_type> & conn,
2451 : const Elem * elem,
2452 : const dof_id_type parent_id,
2453 : const dof_id_type parent_pid,
2454 : const new_header_id_type n_elem_integers) const
2455 : {
2456 4260 : libmesh_assert(elem);
2457 4260 : libmesh_assert_equal_to (elem->n_nodes(), Elem::type_to_n_nodes_map[elem->type()]);
2458 :
2459 43172 : conn.push_back(elem->n_nodes());
2460 :
2461 43172 : conn.push_back (elem->type());
2462 :
2463 : // In version 0.7.0+ "id" is stored but is not used. In version 0.9.2+
2464 : // we will store unique_id instead, therefore there is no need to
2465 : // check for the older version when writing the unique_id.
2466 43172 : conn.push_back (elem->unique_id());
2467 :
2468 43172 : if (parent_id != DofObject::invalid_id)
2469 : {
2470 20336 : conn.push_back (parent_id);
2471 2354 : libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_id);
2472 20336 : conn.push_back (parent_pid);
2473 : }
2474 :
2475 43172 : conn.push_back (elem->processor_id());
2476 43172 : conn.push_back (elem->subdomain_id());
2477 :
2478 : #ifdef LIBMESH_ENABLE_AMR
2479 43172 : conn.push_back (elem->p_level());
2480 : #endif
2481 :
2482 263912 : for (auto n : elem->node_index_range())
2483 241785 : conn.push_back (elem->node_id(n));
2484 :
2485 : // Write extra elem integers to connectivity array
2486 48548 : for (unsigned int i=0; i != n_elem_integers; ++i)
2487 5965 : conn.push_back(elem->get_extra_integer(i));
2488 43172 : }
2489 :
2490 25550 : bool XdrIO::version_at_least_0_9_2() const
2491 : {
2492 : return
2493 50257 : (this->version().find("0.9.2") != std::string::npos) ||
2494 41809 : (this->version().find("0.9.6") != std::string::npos) ||
2495 33662 : (this->version().find("1.1.0") != std::string::npos) ||
2496 57261 : (this->version().find("1.3.0") != std::string::npos) ||
2497 15994 : (this->version().find("1.8.0") != std::string::npos);
2498 : }
2499 :
2500 3512 : bool XdrIO::version_at_least_0_9_6() const
2501 : {
2502 : return
2503 5607 : (this->version().find("0.9.6") != std::string::npos) ||
2504 4350 : (this->version().find("1.1.0") != std::string::npos) ||
2505 7586 : (this->version().find("1.3.0") != std::string::npos) ||
2506 2093 : (this->version().find("1.8.0") != std::string::npos);
2507 : }
2508 :
2509 3512 : bool XdrIO::version_at_least_1_1_0() const
2510 : {
2511 : return
2512 6910 : (this->version().find("1.1.0") != std::string::npos) ||
2513 6748 : (this->version().find("1.3.0") != std::string::npos) ||
2514 3350 : (this->version().find("1.8.0") != std::string::npos);
2515 : }
2516 :
2517 18520 : bool XdrIO::version_at_least_1_3_0() const
2518 : {
2519 : return
2520 35192 : (this->version().find("1.3.0") != std::string::npos) ||
2521 17376 : (this->version().find("1.8.0") != std::string::npos);
2522 : }
2523 :
2524 7627 : bool XdrIO::version_at_least_1_8_0() const
2525 : {
2526 : return
2527 7627 : (this->version().find("1.8.0") != std::string::npos);
2528 : }
2529 :
2530 : } // namespace libMesh
|