libMesh
system_io.C
Go to the documentation of this file.
1 // The libMesh Finite Element Library.
2 // Copyright (C) 2002-2019 Benjamin S. Kirk, John W. Peterson, Roy H. Stogner
3 
4 // This library is free software; you can redistribute it and/or
5 // modify it under the terms of the GNU Lesser General Public
6 // License as published by the Free Software Foundation; either
7 // version 2.1 of the License, or (at your option) any later version.
8 
9 // This library is distributed in the hope that it will be useful,
10 // but WITHOUT ANY WARRANTY; without even the implied warranty of
11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 // Lesser General Public License for more details.
13 
14 // You should have received a copy of the GNU Lesser General Public
15 // License along with this library; if not, write to the Free Software
16 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 
18 
19 #include "libmesh/libmesh_common.h"
20 #include "libmesh/parallel.h"
21 
22 // C++ Includes
23 #include <cstdio> // for std::sprintf
24 #include <set>
25 #include <numeric> // for std::partial_sum
26 
27 // Local Include
28 #include "libmesh/libmesh_version.h"
29 #include "libmesh/system.h"
30 #include "libmesh/mesh_base.h"
31 #include "libmesh/elem.h"
32 #include "libmesh/xdr_cxx.h"
33 #include "libmesh/numeric_vector.h"
34 #include "libmesh/dof_map.h"
35 #include "libmesh/auto_ptr.h" // libmesh_make_unique
36 
37 
38 // Anonymous namespace for implementation details.
39 namespace {
40 
41 using libMesh::DofObject;
42 using libMesh::Number;
43 using libMesh::cast_int;
44 
45 // Comments:
46 // ---------
47 // - The max_io_blksize governs how many nodes or elements will be
48 // treated as a single block when performing parallel IO on large
49 // systems.
50 // - This parameter only loosely affects the size of the actual IO
51 // buffer as this depends on the number of components a given
52 // variable has for the nodes/elements in the block.
53 // - When reading/writing each processor uses an ID map which is
54 // 3*io_blksize*sizeof(dof_id_type) bytes long, so with unsigned int
55 // and // io_blksize=256000 we would expect that buffer alone to be
56 // ~3Mb.
57 // - In general, an increase in max_io_blksize should increase the
58 // efficiency of large parallel read/writes by reducing the number
59 // of MPI messages at the expense of memory.
60 // - If the library exhausts memory during IO you might reduce this
61 // parameter.
62 
63 const std::size_t max_io_blksize = 256000;
64 
65 
71 struct CompareDofObjectsByID
72 {
73  bool operator()(const DofObject * a,
74  const DofObject * b) const
75  {
76  libmesh_assert (a);
77  libmesh_assert (b);
78 
79  return a->id() < b->id();
80  }
81 };
82 
86 template <typename InValType>
87 class ThreadedIO
88 {
89 private:
90  libMesh::Xdr & _io;
91  std::vector<InValType> & _data;
92 
93 public:
94  ThreadedIO (libMesh::Xdr & io, std::vector<InValType> & data) :
95  _io(io),
96  _data(data)
97  {}
98 
99  void operator()()
100  {
101  if (_data.empty()) return;
102  _io.data_stream (_data.data(), cast_int<unsigned int>(_data.size()));
103  }
104 };
105 }
106 
107 
108 namespace libMesh
109 {
110 
111 
112 // ------------------------------------------------------------
113 // System class implementation
115  const std::string & version,
116  const bool read_header_in,
117  const bool read_additional_data,
118  const bool read_legacy_format)
119 {
120  // This method implements the input of a
121  // System object, embedded in the output of
122  // an EquationSystems<T_sys>. This warrants some
123  // documentation. The output file essentially
124  // consists of 5 sections:
125  //
126  // for this system
127  //
128  // 5.) The number of variables in the system (unsigned int)
129  //
130  // for each variable in the system
131  //
132  // 6.) The name of the variable (string)
133  //
134  // 6.1.) Variable subdomains
135  //
136  // 7.) Combined in an FEType:
137  // - The approximation order(s) of the variable
138  // (Order Enum, cast to int/s)
139  // - The finite element family/ies of the variable
140  // (FEFamily Enum, cast to int/s)
141  //
142  // end variable loop
143  //
144  // 8.) The number of additional vectors (unsigned int),
145  //
146  // for each additional vector in the system object
147  //
148  // 9.) the name of the additional vector (string)
149  //
150  // end system
151  libmesh_assert (io.reading());
152 
153  // Possibly clear data structures and start from scratch.
154  if (read_header_in)
155  this->clear ();
156 
157  // Figure out if we need to read infinite element information.
158  // This will be true if the version string contains " with infinite elements"
159  const bool read_ifem_info =
160  (version.rfind(" with infinite elements") < version.size()) ||
161  libMesh::on_command_line ("--read-ifem-systems");
162 
163 
164  {
165  // 5.)
166  // Read the number of variables in the system
167  unsigned int nv=0;
168  if (this->processor_id() == 0)
169  io.data (nv);
170  this->comm().broadcast(nv);
171 
172  _written_var_indices.clear();
173  _written_var_indices.resize(nv, 0);
174 
175  for (unsigned int var=0; var<nv; var++)
176  {
177  // 6.)
178  // Read the name of the var-th variable
179  std::string var_name;
180  if (this->processor_id() == 0)
181  io.data (var_name);
182  this->comm().broadcast(var_name);
183 
184  // 6.1.)
185  std::set<subdomain_id_type> domains;
186  if (io.version() >= LIBMESH_VERSION_ID(0,7,2))
187  {
188  std::vector<subdomain_id_type> domain_array;
189  if (this->processor_id() == 0)
190  io.data (domain_array);
191  for (const auto & id : domain_array)
192  domains.insert(id);
193  }
194  this->comm().broadcast(domains);
195 
196  // 7.)
197  // Read the approximation order(s) of the var-th variable
198  int order=0;
199  if (this->processor_id() == 0)
200  io.data (order);
201  this->comm().broadcast(order);
202 
203 
204  // do the same for infinite element radial_order
205  int rad_order=0;
206  if (read_ifem_info)
207  {
208  if (this->processor_id() == 0)
209  io.data(rad_order);
210  this->comm().broadcast(rad_order);
211  }
212 
213  // Read the finite element type of the var-th variable
214  int fam=0;
215  if (this->processor_id() == 0)
216  io.data (fam);
217  this->comm().broadcast(fam);
218  FEType type;
219  type.order = static_cast<Order>(order);
220  type.family = static_cast<FEFamily>(fam);
221 
222  // Check for incompatibilities. The shape function indexing was
223  // changed for the monomial and xyz finite element families to
224  // simplify extension to arbitrary p. The consequence is that
225  // old restart files will not be read correctly. This is expected
226  // to be an unlikely occurence, but catch it anyway.
227  if (read_legacy_format)
228  if ((type.family == MONOMIAL || type.family == XYZ) &&
229  ((type.order.get_order() > 2 && this->get_mesh().mesh_dimension() == 2) ||
230  (type.order.get_order() > 1 && this->get_mesh().mesh_dimension() == 3)))
231  {
232  libmesh_here();
233  libMesh::out << "*****************************************************************\n"
234  << "* WARNING: reading a potentially incompatible restart file!!! *\n"
235  << "* contact [email protected] for more details *\n"
236  << "*****************************************************************"
237  << std::endl;
238  }
239 
240  // Read additional information for infinite elements
241  int radial_fam=0;
242  int i_map=0;
243  if (read_ifem_info)
244  {
245  if (this->processor_id() == 0)
246  io.data (radial_fam);
247  this->comm().broadcast(radial_fam);
248  if (this->processor_id() == 0)
249  io.data (i_map);
250  this->comm().broadcast(i_map);
251  }
252 
253 #ifdef LIBMESH_ENABLE_INFINITE_ELEMENTS
254 
255  type.radial_order = static_cast<Order>(rad_order);
256  type.radial_family = static_cast<FEFamily>(radial_fam);
257  type.inf_map = static_cast<InfMapType>(i_map);
258 
259 #endif
260 
261  if (read_header_in)
262  {
263  if (domains.empty())
264  _written_var_indices[var] = this->add_variable (var_name, type);
265  else
266  _written_var_indices[var] = this->add_variable (var_name, type, &domains);
267  }
268  else
269  _written_var_indices[var] = this->variable_number(var_name);
270  }
271  }
272 
273  // 8.)
274  // Read the number of additional vectors.
275  unsigned int nvecs=0;
276  if (this->processor_id() == 0)
277  io.data (nvecs);
278  this->comm().broadcast(nvecs);
279 
280  // If nvecs > 0, this means that write_additional_data
281  // was true when this file was written. We will need to
282  // make use of this fact later.
283  this->_additional_data_written = nvecs;
284 
285  for (unsigned int vec=0; vec<nvecs; vec++)
286  {
287  // 9.)
288  // Read the name of the vec-th additional vector
289  std::string vec_name;
290  if (this->processor_id() == 0)
291  io.data (vec_name);
292  this->comm().broadcast(vec_name);
293 
294  if (read_additional_data)
295  {
296  // Systems now can handle adding post-initialization vectors
297  // libmesh_assert(this->_can_add_vectors);
298  // Some systems may have added their own vectors already
299  // libmesh_assert_equal_to (this->_vectors.count(vec_name), 0);
300 
301  this->add_vector(vec_name);
302  }
303  }
304 }
305 
306 
307 
308 #ifdef LIBMESH_ENABLE_DEPRECATED
310  const bool read_additional_data)
311 {
312  libmesh_deprecated();
313 
314  // This method implements the output of the vectors
315  // contained in this System object, embedded in the
316  // output of an EquationSystems<T_sys>.
317  //
318  // 10.) The global solution vector, re-ordered to be node-major
319  // (More on this later.)
320  //
321  // for each additional vector in the object
322  //
323  // 11.) The global additional vector, re-ordered to be
324  // node-major (More on this later.)
325  libmesh_assert (io.reading());
326 
327  // read and reordering buffers
328  std::vector<Number> global_vector;
329  std::vector<Number> reordered_vector;
330 
331  // 10.)
332  // Read and set the solution vector
333  {
334  if (this->processor_id() == 0)
335  io.data (global_vector);
336  this->comm().broadcast(global_vector);
337 
338  // Remember that the stored vector is node-major.
339  // We need to put it into whatever application-specific
340  // ordering we may have using the dof_map.
341  reordered_vector.resize(global_vector.size());
342 
343  //libMesh::out << "global_vector.size()=" << global_vector.size() << std::endl;
344  //libMesh::out << "this->n_dofs()=" << this->n_dofs() << std::endl;
345 
346  libmesh_assert_equal_to (global_vector.size(), this->n_dofs());
347 
348  dof_id_type cnt=0;
349 
350  const unsigned int sys = this->number();
351  const unsigned int nv = cast_int<unsigned int>
352  (this->_written_var_indices.size());
353  libmesh_assert_less_equal (nv, this->n_vars());
354 
355  for (unsigned int data_var=0; data_var<nv; data_var++)
356  {
357  const unsigned int var = _written_var_indices[data_var];
358 
359  // First reorder the nodal DOF values
360  for (auto & node : this->get_mesh().node_ptr_range())
361  for (auto index : IntRange<unsigned int>(0, node->n_comp(sys,var)))
362  {
363  libmesh_assert_not_equal_to (node->dof_number(sys, var, index),
365 
366  libmesh_assert_less (cnt, global_vector.size());
367 
368  reordered_vector[node->dof_number(sys, var, index)] =
369  global_vector[cnt++];
370  }
371 
372  // Then reorder the element DOF values
373  for (auto & elem : this->get_mesh().active_element_ptr_range())
374  for (auto index : IntRange<unsigned int>(0, elem->n_comp(sys,var)))
375  {
376  libmesh_assert_not_equal_to (elem->dof_number(sys, var, index),
378 
379  libmesh_assert_less (cnt, global_vector.size());
380 
381  reordered_vector[elem->dof_number(sys, var, index)] =
382  global_vector[cnt++];
383  }
384  }
385 
386  *(this->solution) = reordered_vector;
387  }
388 
389  // For each additional vector, simply go through the list.
390  // ONLY attempt to do this IF additional data was actually
391  // written to the file for this system (controlled by the
392  // _additional_data_written flag).
393  if (this->_additional_data_written)
394  {
395  const std::size_t nvecs = this->_vectors.size();
396 
397  // If the number of additional vectors written is non-zero, and
398  // the number of additional vectors we have is non-zero, and
399  // they don't match, then something is wrong and we can't be
400  // sure we're reading data into the correct places.
401  if (read_additional_data && nvecs &&
402  nvecs != this->_additional_data_written)
403  libmesh_error_msg
404  ("Additional vectors in file do not match system");
405 
406  std::map<std::string, NumericVector<Number> *>::iterator
407  pos = this->_vectors.begin();
408 
409  for (std::size_t i = 0; i != this->_additional_data_written; ++i)
410  {
411  // 11.)
412  // Read the values of the vec-th additional vector.
413  // Prior do _not_ clear, but fill with zero, since the
414  // additional vectors _have_ to have the same size
415  // as the solution vector
416  std::fill (global_vector.begin(), global_vector.end(), libMesh::zero);
417 
418  if (this->processor_id() == 0)
419  io.data (global_vector);
420 
421  // If read_additional_data==true and we have additional vectors,
422  // then we will keep this vector data; otherwise we are going to
423  // throw it away.
424  if (read_additional_data && nvecs)
425  {
426  this->comm().broadcast(global_vector);
427 
428  // Remember that the stored vector is node-major.
429  // We need to put it into whatever application-specific
430  // ordering we may have using the dof_map.
431  std::fill (reordered_vector.begin(),
432  reordered_vector.end(),
433  libMesh::zero);
434 
435  reordered_vector.resize(global_vector.size());
436 
437  libmesh_assert_equal_to (global_vector.size(), this->n_dofs());
438 
439  dof_id_type cnt=0;
440 
441  const unsigned int sys = this->number();
442  const unsigned int nv = cast_int<unsigned int>
443  (this->_written_var_indices.size());
444  libmesh_assert_less_equal (nv, this->n_vars());
445 
446  for (unsigned int data_var=0; data_var<nv; data_var++)
447  {
448  const unsigned int var = _written_var_indices[data_var];
449  // First reorder the nodal DOF values
450  for (auto & node : this->get_mesh().node_ptr_range())
451  for (auto index : IntRange<unsigned int>(0, node->n_comp(sys,var)))
452  {
453  libmesh_assert_not_equal_to (node->dof_number(sys, var, index),
455 
456  libmesh_assert_less (cnt, global_vector.size());
457 
458  reordered_vector[node->dof_number(sys, var, index)] =
459  global_vector[cnt++];
460  }
461 
462  // Then reorder the element DOF values
463  for (auto & elem : this->get_mesh().active_element_ptr_range())
464  for (auto index : IntRange<unsigned int>(0, elem->n_comp(sys,var)))
465  {
466  libmesh_assert_not_equal_to (elem->dof_number(sys, var, index),
468 
469  libmesh_assert_less (cnt, global_vector.size());
470 
471  reordered_vector[elem->dof_number(sys, var, index)] =
472  global_vector[cnt++];
473  }
474  }
475 
476  // use the overloaded operator=(std::vector) to assign the values
477  *(pos->second) = reordered_vector;
478  }
479 
480  // If we've got vectors then we need to be iterating through
481  // those too
482  if (pos != this->_vectors.end())
483  ++pos;
484  }
485  } // end if (_additional_data_written)
486 }
487 #endif
488 
489 
490 
491 template <typename InValType>
493  const bool read_additional_data)
494 {
514  // PerfLog pl("IO Performance",false);
515  // pl.push("read_parallel_data");
516  dof_id_type total_read_size = 0;
517 
518  libmesh_assert (io.reading());
519  libmesh_assert (io.is_open());
520 
521  // build the ordered nodes and element maps.
522  // when writing/reading parallel files we need to iterate
523  // over our nodes/elements in order of increasing global id().
524  // however, this is not guaranteed to be ordering we obtain
525  // by using the node_iterators/element_iterators directly.
526  // so build a set, sorted by id(), that provides the ordering.
527  // further, for memory economy build the set but then transfer
528  // its contents to vectors, which will be sorted.
529  std::vector<const DofObject *> ordered_nodes, ordered_elements;
530  {
531  std::set<const DofObject *, CompareDofObjectsByID>
532  ordered_nodes_set (this->get_mesh().local_nodes_begin(),
533  this->get_mesh().local_nodes_end());
534 
535  ordered_nodes.insert(ordered_nodes.end(),
536  ordered_nodes_set.begin(),
537  ordered_nodes_set.end());
538  }
539  {
540  std::set<const DofObject *, CompareDofObjectsByID>
541  ordered_elements_set (this->get_mesh().local_elements_begin(),
542  this->get_mesh().local_elements_end());
543 
544  ordered_elements.insert(ordered_elements.end(),
545  ordered_elements_set.begin(),
546  ordered_elements_set.end());
547  }
548 
549  // std::vector<Number> io_buffer;
550  std::vector<InValType> io_buffer;
551 
552  // 9.)
553  //
554  // Actually read the solution components
555  // for the ith system to disk
556  io.data(io_buffer);
557 
558  total_read_size += cast_int<dof_id_type>(io_buffer.size());
559 
560  const unsigned int sys_num = this->number();
561  const unsigned int nv = cast_int<unsigned int>
562  (this->_written_var_indices.size());
563  libmesh_assert_less_equal (nv, this->n_vars());
564 
565  dof_id_type cnt=0;
566 
567  // Loop over each non-SCALAR variable and each node, and read out the value.
568  for (unsigned int data_var=0; data_var<nv; data_var++)
569  {
570  const unsigned int var = _written_var_indices[data_var];
571  if (this->variable(var).type().family != SCALAR)
572  {
573  // First read the node DOF values
574  for (const auto & node : ordered_nodes)
575  for (auto comp : IntRange<unsigned int>(0, node->n_comp(sys_num,var)))
576  {
577  libmesh_assert_not_equal_to (node->dof_number(sys_num, var, comp),
579  libmesh_assert_less (cnt, io_buffer.size());
580  this->solution->set(node->dof_number(sys_num, var, comp), io_buffer[cnt++]);
581  }
582 
583  // Then read the element DOF values
584  for (const auto & elem : ordered_elements)
585  for (auto comp : IntRange<unsigned int>(0, elem->n_comp(sys_num,var)))
586  {
587  libmesh_assert_not_equal_to (elem->dof_number(sys_num, var, comp),
589  libmesh_assert_less (cnt, io_buffer.size());
590  this->solution->set(elem->dof_number(sys_num, var, comp), io_buffer[cnt++]);
591  }
592  }
593  }
594 
595  // Finally, read the SCALAR variables on the last processor
596  for (unsigned int data_var=0; data_var<nv; data_var++)
597  {
598  const unsigned int var = _written_var_indices[data_var];
599  if (this->variable(var).type().family == SCALAR)
600  {
601  if (this->processor_id() == (this->n_processors()-1))
602  {
603  const DofMap & dof_map = this->get_dof_map();
604  std::vector<dof_id_type> SCALAR_dofs;
605  dof_map.SCALAR_dof_indices(SCALAR_dofs, var);
606 
607  for (auto dof : SCALAR_dofs)
608  this->solution->set(dof, io_buffer[cnt++]);
609  }
610  }
611  }
612 
613  // And we're done setting solution entries
614  this->solution->close();
615 
616  // For each additional vector, simply go through the list.
617  // ONLY attempt to do this IF additional data was actually
618  // written to the file for this system (controlled by the
619  // _additional_data_written flag).
620  if (this->_additional_data_written)
621  {
622  const std::size_t nvecs = this->_vectors.size();
623 
624  // If the number of additional vectors written is non-zero, and
625  // the number of additional vectors we have is non-zero, and
626  // they don't match, then something is wrong and we can't be
627  // sure we're reading data into the correct places.
628  if (read_additional_data && nvecs &&
629  nvecs != this->_additional_data_written)
630  libmesh_error_msg
631  ("Additional vectors in file do not match system");
632 
633  std::map<std::string, NumericVector<Number> *>::const_iterator
634  pos = _vectors.begin();
635 
636  for (std::size_t i = 0; i != this->_additional_data_written; ++i)
637  {
638  cnt=0;
639  io_buffer.clear();
640 
641  // 10.)
642  //
643  // Actually read the additional vector components
644  // for the ith system from disk
645  io.data(io_buffer);
646 
647  total_read_size += cast_int<dof_id_type>(io_buffer.size());
648 
649  // If read_additional_data==true and we have additional vectors,
650  // then we will keep this vector data; otherwise we are going to
651  // throw it away.
652  if (read_additional_data && nvecs)
653  {
654  // Loop over each non-SCALAR variable and each node, and read out the value.
655  for (unsigned int data_var=0; data_var<nv; data_var++)
656  {
657  const unsigned int var = _written_var_indices[data_var];
658  if (this->variable(var).type().family != SCALAR)
659  {
660  // First read the node DOF values
661  for (const auto & node : ordered_nodes)
662  for (auto comp : IntRange<unsigned int>(0, node->n_comp(sys_num,var)))
663  {
664  libmesh_assert_not_equal_to (node->dof_number(sys_num, var, comp),
666  libmesh_assert_less (cnt, io_buffer.size());
667  pos->second->set(node->dof_number(sys_num, var, comp), io_buffer[cnt++]);
668  }
669 
670  // Then read the element DOF values
671  for (const auto & elem : ordered_elements)
672  for (auto comp : IntRange<unsigned int>(0, elem->n_comp(sys_num,var)))
673  {
674  libmesh_assert_not_equal_to (elem->dof_number(sys_num, var, comp),
676  libmesh_assert_less (cnt, io_buffer.size());
677  pos->second->set(elem->dof_number(sys_num, var, comp), io_buffer[cnt++]);
678  }
679  }
680  }
681 
682  // Finally, read the SCALAR variables on the last processor
683  for (unsigned int data_var=0; data_var<nv; data_var++)
684  {
685  const unsigned int var = _written_var_indices[data_var];
686  if (this->variable(var).type().family == SCALAR)
687  {
688  if (this->processor_id() == (this->n_processors()-1))
689  {
690  const DofMap & dof_map = this->get_dof_map();
691  std::vector<dof_id_type> SCALAR_dofs;
692  dof_map.SCALAR_dof_indices(SCALAR_dofs, var);
693 
694  for (auto dof : SCALAR_dofs)
695  pos->second->set(dof, io_buffer[cnt++]);
696  }
697  }
698  }
699 
700  // And we're done setting entries for this variable
701  pos->second->close();
702  }
703 
704  // If we've got vectors then we need to be iterating through
705  // those too
706  if (pos != this->_vectors.end())
707  ++pos;
708  }
709  }
710 
711  // const Real
712  // dt = pl.get_elapsed_time(),
713  // rate = total_read_size*sizeof(Number)/dt;
714 
715  // libMesh::err << "Read " << total_read_size << " \"Number\" values\n"
716  // << " Elapsed time = " << dt << '\n'
717  // << " Rate = " << rate/1.e6 << "(MB/sec)\n\n";
718 
719  // pl.pop("read_parallel_data");
720 }
721 
722 
723 template <typename InValType>
725  const bool read_additional_data)
726 {
727  // This method implements the input of the vectors
728  // contained in this System object, embedded in the
729  // output of an EquationSystems<T_sys>.
730  //
731  // 10.) The global solution vector, re-ordered to be node-major
732  // (More on this later.)
733  //
734  // for each additional vector in the object
735  //
736  // 11.) The global additional vector, re-ordered to be
737  // node-major (More on this later.)
738  parallel_object_only();
739  std::string comment;
740 
741  // PerfLog pl("IO Performance",false);
742  // pl.push("read_serialized_data");
743  // std::size_t total_read_size = 0;
744 
745  // 10.)
746  // Read the global solution vector
747  {
748  // total_read_size +=
749  this->read_serialized_vector<InValType>(io, this->solution.get());
750 
751  // get the comment
752  if (this->processor_id() == 0)
753  io.comment (comment);
754  }
755 
756  // 11.)
757  // Only read additional vectors if data is available, and only use
758  // that data to fill our vectors if the user requested it.
759  if (this->_additional_data_written)
760  {
761  const std::size_t nvecs = this->_vectors.size();
762 
763  // If the number of additional vectors written is non-zero, and
764  // the number of additional vectors we have is non-zero, and
765  // they don't match, then we can't read additional vectors
766  // and be sure we're reading data into the correct places.
767  if (read_additional_data && nvecs &&
768  nvecs != this->_additional_data_written)
769  libmesh_error_msg
770  ("Additional vectors in file do not match system");
771 
772  std::map<std::string, NumericVector<Number> *>::const_iterator
773  pos = _vectors.begin();
774 
775  for (std::size_t i = 0; i != this->_additional_data_written; ++i)
776  {
777  // Read data, but only put it into a vector if we've been
778  // asked to and if we have a corresponding vector to read.
779 
780  // total_read_size +=
781  this->read_serialized_vector<InValType>
782  (io, (read_additional_data && nvecs) ? pos->second : nullptr);
783 
784  // get the comment
785  if (this->processor_id() == 0)
786  io.comment (comment);
787 
788 
789  // If we've got vectors then we need to be iterating through
790  // those too
791  if (pos != this->_vectors.end())
792  ++pos;
793  }
794  }
795 
796  // const Real
797  // dt = pl.get_elapsed_time(),
798  // rate = total_read_size*sizeof(Number)/dt;
799 
800  // libMesh::out << "Read " << total_read_size << " \"Number\" values\n"
801  // << " Elapsed time = " << dt << '\n'
802  // << " Rate = " << rate/1.e6 << "(MB/sec)\n\n";
803 
804  // pl.pop("read_serialized_data");
805 }
806 
807 
808 
809 template <typename iterator_type, typename InValType>
811  const iterator_type begin,
812  const iterator_type end,
813  const InValType ,
814  Xdr & io,
815  const std::vector<NumericVector<Number> *> & vecs,
816  const unsigned int var_to_read) const
817 {
818  //-------------------------------------------------------
819  // General order: (IO format 0.7.4 & greater)
820  //
821  // for (objects ...)
822  // for (vecs ....)
823  // for (vars ....)
824  // for (comps ...)
825  //
826  // where objects are nodes or elements, sorted to be
827  // partition independent,
828  // vecs are one or more *identically distributed* solution
829  // coefficient vectors, vars are one or more variables
830  // to write, and comps are all the components for said
831  // vars on the object.
832 
833  // variables to read. Unless specified otherwise, defaults to _written_var_indices.
834  std::vector<unsigned int> vars_to_read (_written_var_indices);
835 
836  if (var_to_read != libMesh::invalid_uint)
837  {
838  vars_to_read.clear();
839  vars_to_read.push_back(var_to_read);
840  }
841 
842  const unsigned int
843  sys_num = this->number(),
844  num_vecs = cast_int<unsigned int>(vecs.size());
845  const dof_id_type
846  io_blksize = cast_int<dof_id_type>(std::min(max_io_blksize, static_cast<std::size_t>(n_objs))),
847  num_blks = cast_int<unsigned int>(std::ceil(static_cast<double>(n_objs)/
848  static_cast<double>(io_blksize)));
849 
850  libmesh_assert_less_equal (_written_var_indices.size(), this->n_vars());
851 
852  std::size_t n_read_values=0;
853 
854  std::vector<std::vector<dof_id_type>> xfer_ids(num_blks); // The global IDs and # of components for the local objects in all blocks
855  std::vector<std::vector<Number>> recv_vals(num_blks); // The raw values for the local objects in all blocks
856  std::vector<Parallel::Request>
857  id_requests(num_blks), val_requests(num_blks);
858  std::vector<Parallel::MessageTag>
859  id_tags(num_blks), val_tags(num_blks);
860 
861  // ------------------------------------------------------
862  // First pass - count the number of objects in each block
863  // traverse all the objects and figure out which block they
864  // will ultimately live in.
865  std::vector<std::size_t>
866  xfer_ids_size (num_blks,0),
867  recv_vals_size (num_blks,0);
868 
869 
870  for (iterator_type it=begin; it!=end; ++it)
871  {
872  const dof_id_type
873  id = (*it)->id(),
874  block = id/io_blksize;
875 
876  libmesh_assert_less (block, num_blks);
877 
878  xfer_ids_size[block] += 2; // for each object, we send its id, as well as the total number of components for all variables
879 
880  dof_id_type n_comp_tot=0;
881  for (const auto & var : vars_to_read)
882  n_comp_tot += (*it)->n_comp(sys_num, var); // for each variable, we will receive the nonzero components
883 
884  recv_vals_size[block] += n_comp_tot*num_vecs;
885  }
886 
887  // knowing the recv_vals_size[block] for each processor allows
888  // us to sum them and find the global size for each block.
889  std::vector<std::size_t> tot_vals_size(recv_vals_size);
890  this->comm().sum (tot_vals_size);
891 
892 
893  //------------------------------------------
894  // Collect the ids & number of values needed
895  // for all local objects, binning them into
896  // 'blocks' that will be sent to processor 0
897  for (dof_id_type blk=0; blk<num_blks; blk++)
898  {
899  // Each processor should build up its transfer buffers for its
900  // local objects in [first_object,last_object).
901  const dof_id_type
902  first_object = blk*io_blksize,
903  last_object = std::min(cast_int<dof_id_type>((blk+1)*io_blksize), n_objs);
904 
905  // convenience
906  std::vector<dof_id_type> & ids (xfer_ids[blk]);
907  std::vector<Number> & vals (recv_vals[blk]);
908 
909  // we now know the number of values we will store for each block,
910  // so we can do efficient preallocation
911  ids.clear(); ids.reserve (xfer_ids_size[blk]);
912  vals.resize(recv_vals_size[blk]);
913 
914  if (recv_vals_size[blk] != 0) // only if there are nonzero values to receive
915  for (iterator_type it=begin; it!=end; ++it)
916  if (((*it)->id() >= first_object) && // object in [first_object,last_object)
917  ((*it)->id() < last_object))
918  {
919  ids.push_back((*it)->id());
920 
921  unsigned int n_comp_tot=0;
922 
923  for (const auto & var : vars_to_read)
924  n_comp_tot += (*it)->n_comp(sys_num, var);
925 
926  ids.push_back (n_comp_tot*num_vecs);
927  }
928 
929 #ifdef LIBMESH_HAVE_MPI
930  id_tags[blk] = this->comm().get_unique_tag(100*num_blks + blk);
931  val_tags[blk] = this->comm().get_unique_tag(200*num_blks + blk);
932 
933  // nonblocking send the data for this block
934  this->comm().send (0, ids, id_requests[blk], id_tags[blk]);
935 
936  // Go ahead and post the receive too
937  this->comm().receive (0, vals, val_requests[blk], val_tags[blk]);
938 #endif
939  }
940 
941  //---------------------------------------------------
942  // Here processor 0 will read and distribute the data.
943  // We have to do this block-wise to ensure that we
944  // do not exhaust memory on processor 0.
945 
946  // give these variables scope outside the block to avoid reallocation
947  std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
948  std::vector<std::vector<Number>> send_vals (this->n_processors());
949  std::vector<Parallel::Request> reply_requests (this->n_processors());
950  std::vector<unsigned int> obj_val_offsets; // map to traverse entry-wise rather than processor-wise
951  std::vector<Number> input_vals; // The input buffer for the current block
952  std::vector<InValType> input_vals_tmp; // The input buffer for the current block
953 
954  for (dof_id_type blk=0; blk<num_blks; blk++)
955  {
956  // Each processor should build up its transfer buffers for its
957  // local objects in [first_object,last_object).
958  const dof_id_type
959  first_object = blk*io_blksize,
960  last_object = std::min(cast_int<dof_id_type>((blk+1)*io_blksize), n_objs),
961  n_objects_blk = last_object - first_object;
962 
963  // Processor 0 has a special job. It needs to gather the requested indices
964  // in [first_object,last_object) from all processors, read the data from
965  // disk, and reply
966  if (this->processor_id() == 0)
967  {
968  // we know the input buffer size for this block and can begin reading it now
969  input_vals.resize(tot_vals_size[blk]);
970  input_vals_tmp.resize(tot_vals_size[blk]);
971 
972  // a ThreadedIO object to perform asynchronous file IO
973  ThreadedIO<InValType> threaded_io(io, input_vals_tmp);
974  Threads::Thread async_io(threaded_io);
975 
976  // offset array. this will define where each object's values
977  // map into the actual input_vals buffer. this must get
978  // 0-initialized because 0-component objects are not actually sent
979  obj_val_offsets.resize (n_objects_blk); std::fill (obj_val_offsets.begin(), obj_val_offsets.end(), 0);
980  recv_vals_size.resize(this->n_processors()); // reuse this to count how many values are going to each processor
981 
982 #ifndef NDEBUG
983  std::size_t n_vals_blk = 0;
984 #endif
985 
986  // loop over all processors and process their index request
987  for (processor_id_type comm_step=0, tnp=this->n_processors(); comm_step != tnp; ++comm_step)
988  {
989 #ifdef LIBMESH_HAVE_MPI
990  // blocking receive indices for this block, imposing no particular order on processor
991  Parallel::Status id_status (this->comm().probe (Parallel::any_source, id_tags[blk]));
992  std::vector<dof_id_type> & ids (recv_ids[id_status.source()]);
993  std::size_t & n_vals_proc (recv_vals_size[id_status.source()]);
994  this->comm().receive (id_status.source(), ids, id_tags[blk]);
995 #else
996  // straight copy without MPI
997  std::vector<dof_id_type> & ids (recv_ids[0]);
998  std::size_t & n_vals_proc (recv_vals_size[0]);
999  ids = xfer_ids[blk];
1000 #endif
1001 
1002  n_vals_proc = 0;
1003 
1004  // note its possible we didn't receive values for objects in
1005  // this block if they have no components allocated.
1006  for (std::size_t idx=0, sz=ids.size(); idx<sz; idx+=2)
1007  {
1008  const dof_id_type
1009  local_idx = ids[idx+0]-first_object,
1010  n_vals_tot_allvecs = ids[idx+1];
1011 
1012  libmesh_assert_less (local_idx, n_objects_blk);
1013 
1014  obj_val_offsets[local_idx] = n_vals_tot_allvecs;
1015  n_vals_proc += n_vals_tot_allvecs;
1016  }
1017 
1018 #ifndef NDEBUG
1019  n_vals_blk += n_vals_proc;
1020 #endif
1021  }
1022 
1023  // We need the offsets into the input_vals vector for each object.
1024  // fortunately, this is simply the partial sum of the total number
1025  // of components for each object
1026  std::partial_sum(obj_val_offsets.begin(), obj_val_offsets.end(),
1027  obj_val_offsets.begin());
1028 
1029  libmesh_assert_equal_to (n_vals_blk, obj_val_offsets.back());
1030  libmesh_assert_equal_to (n_vals_blk, tot_vals_size[blk]);
1031 
1032  // Wait for read completion
1033  async_io.join();
1034  // now copy the values back to the main vector for transfer
1035  for (auto i_val : index_range(input_vals))
1036  input_vals[i_val] = input_vals_tmp[i_val];
1037 
1038  n_read_values += input_vals.size();
1039 
1040  // pack data replies for each processor
1041  for (auto proc : IntRange<processor_id_type>(0, this->n_processors()))
1042  {
1043  const std::vector<dof_id_type> & ids (recv_ids[proc]);
1044  std::vector<Number> & vals (send_vals[proc]);
1045  const std::size_t & n_vals_proc (recv_vals_size[proc]);
1046 
1047  vals.clear(); vals.reserve(n_vals_proc);
1048 
1049  for (std::size_t idx=0, sz=ids.size(); idx<sz; idx+=2)
1050  {
1051  const dof_id_type
1052  local_idx = ids[idx+0]-first_object,
1053  n_vals_tot_allvecs = ids[idx+1];
1054 
1055  std::vector<Number>::const_iterator in_vals(input_vals.begin());
1056  if (local_idx != 0)
1057  std::advance (in_vals, obj_val_offsets[local_idx-1]);
1058 
1059  for (unsigned int val=0; val<n_vals_tot_allvecs; val++, ++in_vals)
1060  {
1061  libmesh_assert (in_vals != input_vals.end());
1062  //libMesh::out << "*in_vals=" << *in_vals << '\n';
1063  vals.push_back(*in_vals);
1064  }
1065  }
1066 
1067 #ifdef LIBMESH_HAVE_MPI
1068  // send the relevant values to this processor
1069  this->comm().send (proc, vals, reply_requests[proc], val_tags[blk]);
1070 #else
1071  recv_vals[blk] = vals;
1072 #endif
1073  }
1074  } // end processor 0 read/reply
1075 
1076  // all processors complete the (already posted) read for this block
1077  {
1078  Parallel::wait (val_requests[blk]);
1079 
1080  const std::vector<Number> & vals (recv_vals[blk]);
1081  std::vector<Number>::const_iterator val_it(vals.begin());
1082 
1083  if (!recv_vals[blk].empty()) // nonzero values to receive
1084  for (iterator_type it=begin; it!=end; ++it)
1085  if (((*it)->id() >= first_object) && // object in [first_object,last_object)
1086  ((*it)->id() < last_object))
1087  // unpack & set the values
1088  for (auto & vec : vecs)
1089  for (const auto & var : vars_to_read)
1090  {
1091  const unsigned int n_comp = (*it)->n_comp(sys_num, var);
1092 
1093  for (unsigned int comp=0; comp<n_comp; comp++, ++val_it)
1094  {
1095  const dof_id_type dof_index = (*it)->dof_number (sys_num, var, comp);
1096  libmesh_assert (val_it != vals.end());
1097  if (vec)
1098  {
1099  libmesh_assert_greater_equal (dof_index, vec->first_local_index());
1100  libmesh_assert_less (dof_index, vec->last_local_index());
1101  //libMesh::out << "dof_index, *val_it = \t" << dof_index << ", " << *val_it << '\n';
1102  vec->set (dof_index, *val_it);
1103  }
1104  }
1105  }
1106  }
1107 
1108  // processor 0 needs to make sure all replies have been handed off
1109  if (this->processor_id () == 0)
1110  Parallel::wait(reply_requests);
1111  }
1112 
1113  Parallel::wait(id_requests);
1114 
1115  return n_read_values;
1116 }
1117 
1118 
1119 
1120 unsigned int System::read_SCALAR_dofs (const unsigned int var,
1121  Xdr & io,
1122  NumericVector<Number> * vec) const
1123 {
1124  unsigned int n_assigned_vals = 0; // the number of values assigned, this will be returned.
1125 
1126  // Processor 0 will read the block from the buffer stream and send it to the last processor
1127  const unsigned int n_SCALAR_dofs = this->variable(var).type().order.get_order();
1128  std::vector<Number> input_buffer(n_SCALAR_dofs);
1129  if (this->processor_id() == 0)
1130  io.data_stream(input_buffer.data(), n_SCALAR_dofs);
1131 
1132 #ifdef LIBMESH_HAVE_MPI
1133  if (this->n_processors() > 1)
1134  {
1135  const Parallel::MessageTag val_tag = this->comm().get_unique_tag();
1136 
1137  // Post the receive on the last processor
1138  if (this->processor_id() == (this->n_processors()-1))
1139  this->comm().receive(0, input_buffer, val_tag);
1140 
1141  // Send the data to processor 0
1142  if (this->processor_id() == 0)
1143  this->comm().send(this->n_processors()-1, input_buffer, val_tag);
1144  }
1145 #endif
1146 
1147  // Finally, set the SCALAR values
1148  if (this->processor_id() == (this->n_processors()-1))
1149  {
1150  const DofMap & dof_map = this->get_dof_map();
1151  std::vector<dof_id_type> SCALAR_dofs;
1152  dof_map.SCALAR_dof_indices(SCALAR_dofs, var);
1153 
1154  for (auto i : index_range(SCALAR_dofs))
1155  {
1156  if (vec)
1157  vec->set (SCALAR_dofs[i], input_buffer[i]);
1158  ++n_assigned_vals;
1159  }
1160  }
1161 
1162  return n_assigned_vals;
1163 }
1164 
1165 
1166 template <typename InValType>
1168  NumericVector<Number> * vec)
1169 {
1170  parallel_object_only();
1171 
1172 #ifndef NDEBUG
1173  // In parallel we better be reading a parallel vector -- if not
1174  // we will not set all of its components below!!
1175  if (this->n_processors() > 1 && vec)
1176  {
1177  libmesh_assert (vec->type() == PARALLEL ||
1178  vec->type() == GHOSTED);
1179  }
1180 #endif
1181 
1182  libmesh_assert (io.reading());
1183 
1184  // vector length
1185  unsigned int vector_length=0; // FIXME? size_t would break binary compatibility...
1186 #ifndef NDEBUG
1187  std::size_t n_assigned_vals=0;
1188 #endif
1189 
1190  // Get the buffer size
1191  if (this->processor_id() == 0)
1192  io.data(vector_length, "# vector length");
1193  this->comm().broadcast(vector_length);
1194 
1195  const unsigned int nv = cast_int<unsigned int>
1196  (this->_written_var_indices.size());
1197  const dof_id_type
1198  n_nodes = this->get_mesh().n_nodes(),
1199  n_elem = this->get_mesh().n_elem();
1200 
1201  libmesh_assert_less_equal (nv, this->n_vars());
1202 
1203  // for newer versions, read variables node/elem major
1204  if (io.version() >= LIBMESH_VERSION_ID(0,7,4))
1205  {
1206  //---------------------------------
1207  // Collect the values for all nodes
1208 #ifndef NDEBUG
1209  n_assigned_vals +=
1210 #endif
1211  this->read_serialized_blocked_dof_objects (n_nodes,
1212  this->get_mesh().local_nodes_begin(),
1213  this->get_mesh().local_nodes_end(),
1214  InValType(),
1215  io,
1216  std::vector<NumericVector<Number> *> (1,vec));
1217 
1218 
1219  //------------------------------------
1220  // Collect the values for all elements
1221 #ifndef NDEBUG
1222  n_assigned_vals +=
1223 #endif
1225  this->get_mesh().local_elements_begin(),
1226  this->get_mesh().local_elements_end(),
1227  InValType(),
1228  io,
1229  std::vector<NumericVector<Number> *> (1,vec));
1230  }
1231 
1232  // for older versions, read variables var-major
1233  else
1234  {
1235  // Loop over each variable in the system, and then each node/element in the mesh.
1236  for (unsigned int data_var=0; data_var<nv; data_var++)
1237  {
1238  const unsigned int var = _written_var_indices[data_var];
1239  if (this->variable(var).type().family != SCALAR)
1240  {
1241  //---------------------------------
1242  // Collect the values for all nodes
1243 #ifndef NDEBUG
1244  n_assigned_vals +=
1245 #endif
1246  this->read_serialized_blocked_dof_objects (n_nodes,
1247  this->get_mesh().local_nodes_begin(),
1248  this->get_mesh().local_nodes_end(),
1249  InValType(),
1250  io,
1251  std::vector<NumericVector<Number> *> (1,vec),
1252  var);
1253 
1254 
1255  //------------------------------------
1256  // Collect the values for all elements
1257 #ifndef NDEBUG
1258  n_assigned_vals +=
1259 #endif
1261  this->get_mesh().local_elements_begin(),
1262  this->get_mesh().local_elements_end(),
1263  InValType(),
1264  io,
1265  std::vector<NumericVector<Number> *> (1,vec),
1266  var);
1267  } // end variable loop
1268  }
1269  }
1270 
1271  //-------------------------------------------
1272  // Finally loop over all the SCALAR variables
1273  for (unsigned int data_var=0; data_var<nv; data_var++)
1274  {
1275  const unsigned int var = _written_var_indices[data_var];
1276  if (this->variable(var).type().family == SCALAR)
1277  {
1278 #ifndef NDEBUG
1279  n_assigned_vals +=
1280 #endif
1281  this->read_SCALAR_dofs (var, io, vec);
1282  }
1283  }
1284 
1285  if (vec)
1286  vec->close();
1287 
1288 #ifndef NDEBUG
1289  this->comm().sum (n_assigned_vals);
1290  libmesh_assert_equal_to (n_assigned_vals, vector_length);
1291 #endif
1292 
1293  return vector_length;
1294 }
1295 
1296 
1297 
1299  const std::string & /* version is currently unused */,
1300  const bool write_additional_data) const
1301 {
1335  libmesh_assert (io.writing());
1336 
1337 
1338  // Only write the header information
1339  // if we are processor 0.
1340  if (this->get_mesh().processor_id() != 0)
1341  return;
1342 
1343  std::string comment;
1344  char buf[80];
1345 
1346  // 5.)
1347  // Write the number of variables in the system
1348 
1349  {
1350  // set up the comment
1351  comment = "# No. of Variables in System \"";
1352  comment += this->name();
1353  comment += "\"";
1354 
1355  unsigned int nv = this->n_vars();
1356  io.data (nv, comment.c_str());
1357  }
1358 
1359 
1360  for (auto var : IntRange<unsigned int>(0, this->n_vars()))
1361  {
1362  // 6.)
1363  // Write the name of the var-th variable
1364  {
1365  // set up the comment
1366  comment = "# Name, Variable No. ";
1367  std::sprintf(buf, "%u", var);
1368  comment += buf;
1369  comment += ", System \"";
1370  comment += this->name();
1371  comment += "\"";
1372 
1373  std::string var_name = this->variable_name(var);
1374  io.data (var_name, comment.c_str());
1375  }
1376 
1377  // 6.1.) Variable subdomains
1378  {
1379  // set up the comment
1380  comment = "# Subdomains, Variable \"";
1381  std::sprintf(buf, "%s", this->variable_name(var).c_str());
1382  comment += buf;
1383  comment += "\", System \"";
1384  comment += this->name();
1385  comment += "\"";
1386 
1387  const std::set<subdomain_id_type> & domains = this->variable(var).active_subdomains();
1388  std::vector<subdomain_id_type> domain_array;
1389  domain_array.assign(domains.begin(), domains.end());
1390  io.data (domain_array, comment.c_str());
1391  }
1392 
1393  // 7.)
1394  // Write the approximation order of the var-th variable
1395  // in this system
1396  {
1397  // set up the comment
1398  comment = "# Approximation Order, Variable \"";
1399  std::sprintf(buf, "%s", this->variable_name(var).c_str());
1400  comment += buf;
1401  comment += "\", System \"";
1402  comment += this->name();
1403  comment += "\"";
1404 
1405  int order = static_cast<int>(this->variable_type(var).order);
1406  io.data (order, comment.c_str());
1407  }
1408 
1409 
1410 #ifdef LIBMESH_ENABLE_INFINITE_ELEMENTS
1411 
1412  // do the same for radial_order
1413  {
1414  comment = "# Radial Approximation Order, Variable \"";
1415  std::sprintf(buf, "%s", this->variable_name(var).c_str());
1416  comment += buf;
1417  comment += "\", System \"";
1418  comment += this->name();
1419  comment += "\"";
1420 
1421  int rad_order = static_cast<int>(this->variable_type(var).radial_order);
1422  io.data (rad_order, comment.c_str());
1423  }
1424 
1425 #endif
1426 
1427  // Write the Finite Element type of the var-th variable
1428  // in this System
1429  {
1430  // set up the comment
1431  comment = "# FE Family, Variable \"";
1432  std::sprintf(buf, "%s", this->variable_name(var).c_str());
1433  comment += buf;
1434  comment += "\", System \"";
1435  comment += this->name();
1436  comment += "\"";
1437 
1438  const FEType & type = this->variable_type(var);
1439  int fam = static_cast<int>(type.family);
1440  io.data (fam, comment.c_str());
1441 
1442 #ifdef LIBMESH_ENABLE_INFINITE_ELEMENTS
1443 
1444  comment = "# Radial FE Family, Variable \"";
1445  std::sprintf(buf, "%s", this->variable_name(var).c_str());
1446  comment += buf;
1447  comment += "\", System \"";
1448  comment += this->name();
1449  comment += "\"";
1450 
1451  int radial_fam = static_cast<int>(type.radial_family);
1452  io.data (radial_fam, comment.c_str());
1453 
1454  comment = "# Infinite Mapping Type, Variable \"";
1455  std::sprintf(buf, "%s", this->variable_name(var).c_str());
1456  comment += buf;
1457  comment += "\", System \"";
1458  comment += this->name();
1459  comment += "\"";
1460 
1461  int i_map = static_cast<int>(type.inf_map);
1462  io.data (i_map, comment.c_str());
1463 #endif
1464  }
1465  } // end of the variable loop
1466 
1467  // 8.)
1468  // Write the number of additional vectors in the System.
1469  // If write_additional_data==false, then write zero for
1470  // the number of additional vectors.
1471  {
1472  {
1473  // set up the comment
1474  comment = "# No. of Additional Vectors, System \"";
1475  comment += this->name();
1476  comment += "\"";
1477 
1478  unsigned int nvecs = write_additional_data ? this->n_vectors () : 0;
1479  io.data (nvecs, comment.c_str());
1480  }
1481 
1482  if (write_additional_data)
1483  {
1484  unsigned int cnt=0;
1485  for (const auto & pr : _vectors)
1486  {
1487  // 9.)
1488  // write the name of the cnt-th additional vector
1489  comment = "# Name of ";
1490  std::sprintf(buf, "%d", cnt++);
1491  comment += buf;
1492  comment += "th vector";
1493  std::string vec_name = pr.first;
1494 
1495  io.data (vec_name, comment.c_str());
1496  }
1497  }
1498  }
1499 }
1500 
1501 
1502 
1504  const bool write_additional_data) const
1505 {
1525  // PerfLog pl("IO Performance",false);
1526  // pl.push("write_parallel_data");
1527  // std::size_t total_written_size = 0;
1528 
1529  std::string comment;
1530 
1531  libmesh_assert (io.writing());
1532 
1533  std::vector<Number> io_buffer; io_buffer.reserve(this->solution->local_size());
1534 
1535  // build the ordered nodes and element maps.
1536  // when writing/reading parallel files we need to iterate
1537  // over our nodes/elements in order of increasing global id().
1538  // however, this is not guaranteed to be ordering we obtain
1539  // by using the node_iterators/element_iterators directly.
1540  // so build a set, sorted by id(), that provides the ordering.
1541  // further, for memory economy build the set but then transfer
1542  // its contents to vectors, which will be sorted.
1543  std::vector<const DofObject *> ordered_nodes, ordered_elements;
1544  {
1545  std::set<const DofObject *, CompareDofObjectsByID>
1546  ordered_nodes_set (this->get_mesh().local_nodes_begin(),
1547  this->get_mesh().local_nodes_end());
1548 
1549  ordered_nodes.insert(ordered_nodes.end(),
1550  ordered_nodes_set.begin(),
1551  ordered_nodes_set.end());
1552  }
1553  {
1554  std::set<const DofObject *, CompareDofObjectsByID>
1555  ordered_elements_set (this->get_mesh().local_elements_begin(),
1556  this->get_mesh().local_elements_end());
1557 
1558  ordered_elements.insert(ordered_elements.end(),
1559  ordered_elements_set.begin(),
1560  ordered_elements_set.end());
1561  }
1562 
1563  const unsigned int sys_num = this->number();
1564  const unsigned int nv = this->n_vars();
1565 
1566  // Loop over each non-SCALAR variable and each node, and write out the value.
1567  for (unsigned int var=0; var<nv; var++)
1568  if (this->variable(var).type().family != SCALAR)
1569  {
1570  // First write the node DOF values
1571  for (const auto & node : ordered_nodes)
1572  for (auto comp : IntRange<unsigned int>(0, node->n_comp(sys_num,var)))
1573  {
1574  libmesh_assert_not_equal_to (node->dof_number(sys_num, var, comp),
1576 
1577  io_buffer.push_back((*this->solution)(node->dof_number(sys_num, var, comp)));
1578  }
1579 
1580  // Then write the element DOF values
1581  for (const auto & elem : ordered_elements)
1582  for (auto comp : IntRange<unsigned int>(0, elem->n_comp(sys_num,var)))
1583  {
1584  libmesh_assert_not_equal_to (elem->dof_number(sys_num, var, comp),
1586 
1587  io_buffer.push_back((*this->solution)(elem->dof_number(sys_num, var, comp)));
1588  }
1589  }
1590 
1591  // Finally, write the SCALAR data on the last processor
1592  for (auto var : IntRange<unsigned int>(0, this->n_vars()))
1593  if (this->variable(var).type().family == SCALAR)
1594  {
1595  if (this->processor_id() == (this->n_processors()-1))
1596  {
1597  const DofMap & dof_map = this->get_dof_map();
1598  std::vector<dof_id_type> SCALAR_dofs;
1599  dof_map.SCALAR_dof_indices(SCALAR_dofs, var);
1600 
1601  for (auto dof : SCALAR_dofs)
1602  io_buffer.push_back((*this->solution)(dof));
1603  }
1604  }
1605 
1606  // 9.)
1607  //
1608  // Actually write the reordered solution vector
1609  // for the ith system to disk
1610 
1611  // set up the comment
1612  {
1613  comment = "# System \"";
1614  comment += this->name();
1615  comment += "\" Solution Vector";
1616  }
1617 
1618  io.data (io_buffer, comment.c_str());
1619 
1620  // total_written_size += io_buffer.size();
1621 
1622  // Only write additional vectors if wanted
1623  if (write_additional_data)
1624  {
1625  for (auto & pr : _vectors)
1626  {
1627  io_buffer.clear();
1628  io_buffer.reserve(pr.second->local_size());
1629 
1630  // Loop over each non-SCALAR variable and each node, and write out the value.
1631  for (unsigned int var=0; var<nv; var++)
1632  if (this->variable(var).type().family != SCALAR)
1633  {
1634  // First write the node DOF values
1635  for (const auto & node : ordered_nodes)
1636  for (auto comp : IntRange<unsigned int>(0, node->n_comp(sys_num,var)))
1637  {
1638  libmesh_assert_not_equal_to (node->dof_number(sys_num, var, comp),
1640 
1641  io_buffer.push_back((*pr.second)(node->dof_number(sys_num, var, comp)));
1642  }
1643 
1644  // Then write the element DOF values
1645  for (const auto & elem : ordered_elements)
1646  for (auto comp : IntRange<unsigned int>(0, elem->n_comp(sys_num,var)))
1647  {
1648  libmesh_assert_not_equal_to (elem->dof_number(sys_num, var, comp),
1650 
1651  io_buffer.push_back((*pr.second)(elem->dof_number(sys_num, var, comp)));
1652  }
1653  }
1654 
1655  // Finally, write the SCALAR data on the last processor
1656  for (auto var : IntRange<unsigned int>(0, this->n_vars()))
1657  if (this->variable(var).type().family == SCALAR)
1658  {
1659  if (this->processor_id() == (this->n_processors()-1))
1660  {
1661  const DofMap & dof_map = this->get_dof_map();
1662  std::vector<dof_id_type> SCALAR_dofs;
1663  dof_map.SCALAR_dof_indices(SCALAR_dofs, var);
1664 
1665  for (auto dof : SCALAR_dofs)
1666  io_buffer.push_back((*pr.second)(dof));
1667  }
1668  }
1669 
1670  // 10.)
1671  //
1672  // Actually write the reordered additional vector
1673  // for this system to disk
1674 
1675  // set up the comment
1676  {
1677  comment = "# System \"";
1678  comment += this->name();
1679  comment += "\" Additional Vector \"";
1680  comment += pr.first;
1681  comment += "\"";
1682  }
1683 
1684  io.data (io_buffer, comment.c_str());
1685 
1686  // total_written_size += io_buffer.size();
1687  }
1688  }
1689 
1690  // const Real
1691  // dt = pl.get_elapsed_time(),
1692  // rate = total_written_size*sizeof(Number)/dt;
1693 
1694  // libMesh::err << "Write " << total_written_size << " \"Number\" values\n"
1695  // << " Elapsed time = " << dt << '\n'
1696  // << " Rate = " << rate/1.e6 << "(MB/sec)\n\n";
1697 
1698  // pl.pop("write_parallel_data");
1699 }
1700 
1701 
1702 
1704  const bool write_additional_data) const
1705 {
1719  parallel_object_only();
1720  std::string comment;
1721 
1722  // PerfLog pl("IO Performance",false);
1723  // pl.push("write_serialized_data");
1724  // std::size_t total_written_size = 0;
1725 
1726  // total_written_size +=
1727  this->write_serialized_vector(io, *this->solution);
1728 
1729  // set up the comment
1730  if (this->processor_id() == 0)
1731  {
1732  comment = "# System \"";
1733  comment += this->name();
1734  comment += "\" Solution Vector";
1735 
1736  io.comment (comment);
1737  }
1738 
1739  // Only write additional vectors if wanted
1740  if (write_additional_data)
1741  {
1742  for (auto & pair : this->_vectors)
1743  {
1744  // total_written_size +=
1745  this->write_serialized_vector(io, *pair.second);
1746 
1747  // set up the comment
1748  if (this->processor_id() == 0)
1749  {
1750  comment = "# System \"";
1751  comment += this->name();
1752  comment += "\" Additional Vector \"";
1753  comment += pair.first;
1754  comment += "\"";
1755  io.comment (comment);
1756  }
1757  }
1758  }
1759 
1760  // const Real
1761  // dt = pl.get_elapsed_time(),
1762  // rate = total_written_size*sizeof(Number)/dt;
1763 
1764  // libMesh::out << "Write " << total_written_size << " \"Number\" values\n"
1765  // << " Elapsed time = " << dt << '\n'
1766  // << " Rate = " << rate/1.e6 << "(MB/sec)\n\n";
1767 
1768  // pl.pop("write_serialized_data");
1769 
1770 
1771 
1772 
1773  // // test the new method
1774  // {
1775  // std::vector<std::string> names;
1776  // std::vector<NumericVector<Number> *> vectors_to_write;
1777 
1778  // names.push_back("Solution Vector");
1779  // vectors_to_write.push_back(this->solution.get());
1780 
1781  // // Only write additional vectors if wanted
1782  // if (write_additional_data)
1783  // {
1784  // std::map<std::string, NumericVector<Number> *>::const_iterator
1785  // pos = _vectors.begin();
1786 
1787  // for (; pos != this->_vectors.end(); ++pos)
1788  // {
1789  // names.push_back("Additional Vector " + pos->first);
1790  // vectors_to_write.push_back(pos->second);
1791  // }
1792  // }
1793 
1794  // total_written_size =
1795  // this->write_serialized_vectors (io, names, vectors_to_write);
1796 
1797  // const Real
1798  // dt2 = pl.get_elapsed_time(),
1799  // rate2 = total_written_size*sizeof(Number)/(dt2-dt);
1800 
1801  // libMesh::out << "Write (new) " << total_written_size << " \"Number\" values\n"
1802  // << " Elapsed time = " << (dt2-dt) << '\n'
1803  // << " Rate = " << rate2/1.e6 << "(MB/sec)\n\n";
1804 
1805  // }
1806 }
1807 
1808 
1809 
1810 template <typename iterator_type>
1811 std::size_t System::write_serialized_blocked_dof_objects (const std::vector<const NumericVector<Number> *> & vecs,
1812  const dof_id_type n_objs,
1813  const iterator_type begin,
1814  const iterator_type end,
1815  Xdr & io,
1816  const unsigned int var_to_write) const
1817 {
1818  parallel_object_only();
1819 
1820  //-------------------------------------------------------
1821  // General order: (IO format 0.7.4 & greater)
1822  //
1823  // for (objects ...)
1824  // for (vecs ....)
1825  // for (vars ....)
1826  // for (comps ...)
1827  //
1828  // where objects are nodes or elements, sorted to be
1829  // partition independent,
1830  // vecs are one or more *identically distributed* solution
1831  // coefficient vectors, vars are one or more variables
1832  // to write, and comps are all the components for said
1833  // vars on the object.
1834 
1835  // We will write all variables unless requested otherwise.
1836  std::vector<unsigned int> vars_to_write(1, var_to_write);
1837 
1838  if (var_to_write == libMesh::invalid_uint)
1839  {
1840  vars_to_write.clear(); vars_to_write.reserve(this->n_vars());
1841  for (auto var : IntRange<unsigned int>(0, this->n_vars()))
1842  vars_to_write.push_back(var);
1843  }
1844 
1845  const dof_id_type io_blksize = cast_int<dof_id_type>
1846  (std::min(max_io_blksize, static_cast<std::size_t>(n_objs)));
1847 
1848  const unsigned int
1849  sys_num = this->number(),
1850  num_vecs = cast_int<unsigned int>(vecs.size()),
1851  num_blks = cast_int<unsigned int>(std::ceil(static_cast<double>(n_objs)/
1852  static_cast<double>(io_blksize)));
1853 
1854  // libMesh::out << "io_blksize = " << io_blksize
1855  // << ", num_objects = " << n_objs
1856  // << ", num_blks = " << num_blks
1857  // << std::endl;
1858 
1859  std::size_t written_length=0; // The numer of values written. This will be returned
1860  std::vector<std::vector<dof_id_type>> xfer_ids(num_blks); // The global IDs and # of components for the local objects in all blocks
1861  std::vector<std::vector<Number>> send_vals(num_blks); // The raw values for the local objects in all blocks
1862  std::vector<Parallel::Request>
1863  id_requests(num_blks), val_requests(num_blks); // send request handle for each block
1864  std::vector<Parallel::MessageTag>
1865  id_tags(num_blks), val_tags(num_blks); // tag number for each block
1866 
1867  // ------------------------------------------------------
1868  // First pass - count the number of objects in each block
1869  // traverse all the objects and figure out which block they
1870  // will ultimately live in.
1871  std::vector<unsigned int>
1872  xfer_ids_size (num_blks,0),
1873  send_vals_size (num_blks,0);
1874 
1875  for (iterator_type it=begin; it!=end; ++it)
1876  {
1877  const dof_id_type
1878  id = (*it)->id(),
1879  block = id/io_blksize;
1880 
1881  libmesh_assert_less (block, num_blks);
1882 
1883  xfer_ids_size[block] += 2; // for each object, we store its id, as well as the total number of components for all variables
1884 
1885  unsigned int n_comp_tot=0;
1886 
1887  for (const auto & var : vars_to_write)
1888  n_comp_tot += (*it)->n_comp(sys_num, var); // for each variable, we will store the nonzero components
1889 
1890  send_vals_size[block] += n_comp_tot*num_vecs;
1891  }
1892 
1893  //-----------------------------------------
1894  // Collect the values for all local objects,
1895  // binning them into 'blocks' that will be
1896  // sent to processor 0
1897  for (unsigned int blk=0; blk<num_blks; blk++)
1898  {
1899  // libMesh::out << "Writing object block " << blk << std::endl;
1900 
1901  // Each processor should build up its transfer buffers for its
1902  // local objects in [first_object,last_object).
1903  const dof_id_type
1904  first_object = blk*io_blksize,
1905  last_object = std::min(cast_int<dof_id_type>((blk+1)*io_blksize), n_objs);
1906 
1907  // convenience
1908  std::vector<dof_id_type> & ids (xfer_ids[blk]);
1909  std::vector<Number> & vals (send_vals[blk]);
1910 
1911  // we now know the number of values we will store for each block,
1912  // so we can do efficient preallocation
1913  ids.clear(); ids.reserve (xfer_ids_size[blk]);
1914  vals.clear(); vals.reserve (send_vals_size[blk]);
1915 
1916  if (send_vals_size[blk] != 0) // only send if we have nonzero components to write
1917  for (iterator_type it=begin; it!=end; ++it)
1918  if (((*it)->id() >= first_object) && // object in [first_object,last_object)
1919  ((*it)->id() < last_object))
1920  {
1921  ids.push_back((*it)->id());
1922 
1923  // count the total number of nonzeros transferred for this object
1924  {
1925  unsigned int n_comp_tot=0;
1926 
1927  for (const auto & var : vars_to_write)
1928  n_comp_tot += (*it)->n_comp(sys_num, var);
1929 
1930  ids.push_back (n_comp_tot*num_vecs); // even if 0 - processor 0 has no way of knowing otherwise...
1931  }
1932 
1933  // pack the values to send
1934  for (const auto & vec : vecs)
1935  for (const auto & var : vars_to_write)
1936  {
1937  const unsigned int n_comp = (*it)->n_comp(sys_num, var);
1938 
1939  for (unsigned int comp=0; comp<n_comp; comp++)
1940  {
1941  libmesh_assert_greater_equal ((*it)->dof_number(sys_num, var, comp), vec->first_local_index());
1942  libmesh_assert_less ((*it)->dof_number(sys_num, var, comp), vec->last_local_index());
1943  vals.push_back((*vec)((*it)->dof_number(sys_num, var, comp)));
1944  }
1945  }
1946  }
1947 
1948 #ifdef LIBMESH_HAVE_MPI
1949  id_tags[blk] = this->comm().get_unique_tag(100*num_blks + blk);
1950  val_tags[blk] = this->comm().get_unique_tag(200*num_blks + blk);
1951 
1952  // nonblocking send the data for this block
1953  this->comm().send (0, ids, id_requests[blk], id_tags[blk]);
1954  this->comm().send (0, vals, val_requests[blk], val_tags[blk]);
1955 #endif
1956  }
1957 
1958 
1959  if (this->processor_id() == 0)
1960  {
1961  std::vector<std::vector<dof_id_type>> recv_ids (this->n_processors());
1962  std::vector<std::vector<Number>> recv_vals (this->n_processors());
1963  std::vector<unsigned int> obj_val_offsets; // map to traverse entry-wise rather than processor-wise
1964  std::vector<Number> output_vals; // The output buffer for the current block
1965 
1966  // a ThreadedIO object to perform asynchronous file IO
1967  ThreadedIO<Number> threaded_io(io, output_vals);
1968  std::unique_ptr<Threads::Thread> async_io;
1969 
1970  for (unsigned int blk=0; blk<num_blks; blk++)
1971  {
1972  // Each processor should build up its transfer buffers for its
1973  // local objects in [first_object,last_object).
1974  const dof_id_type
1975  first_object = cast_int<dof_id_type>(blk*io_blksize),
1976  last_object = std::min(cast_int<dof_id_type>((blk+1)*io_blksize), n_objs),
1977  n_objects_blk = last_object - first_object;
1978 
1979  // offset array. this will define where each object's values
1980  // map into the actual output_vals buffer. this must get
1981  // 0-initialized because 0-component objects are not actually sent
1982  obj_val_offsets.resize (n_objects_blk); std::fill (obj_val_offsets.begin(), obj_val_offsets.end(), 0);
1983 
1984  std::size_t n_val_recvd_blk=0;
1985 
1986  // receive this block of data from all processors.
1987  for (processor_id_type comm_step=0, tnp=this->n_processors(); comm_step != tnp; ++comm_step)
1988  {
1989 #ifdef LIBMESH_HAVE_MPI
1990  // blocking receive indices for this block, imposing no particular order on processor
1991  Parallel::Status id_status (this->comm().probe (Parallel::any_source, id_tags[blk]));
1992  std::vector<dof_id_type> & ids (recv_ids[id_status.source()]);
1993  this->comm().receive (id_status.source(), ids, id_tags[blk]);
1994 #else
1995  std::vector<dof_id_type> & ids (recv_ids[0]);
1996  ids = xfer_ids[blk];
1997 #endif
1998 
1999  // note its possible we didn't receive values for objects in
2000  // this block if they have no components allocated.
2001  for (std::size_t idx=0, sz=ids.size(); idx<sz; idx+=2)
2002  {
2003  const dof_id_type
2004  local_idx = ids[idx+0]-first_object,
2005  n_vals_tot_allvecs = ids[idx+1];
2006 
2007  libmesh_assert_less (local_idx, n_objects_blk);
2008  libmesh_assert_less (local_idx, obj_val_offsets.size());
2009 
2010  obj_val_offsets[local_idx] = n_vals_tot_allvecs;
2011  }
2012 
2013 #ifdef LIBMESH_HAVE_MPI
2014  // blocking receive values for this block, imposing no particular order on processor
2015  Parallel::Status val_status (this->comm().probe (Parallel::any_source, val_tags[blk]));
2016  std::vector<Number> & vals (recv_vals[val_status.source()]);
2017  this->comm().receive (val_status.source(), vals, val_tags[blk]);
2018 #else
2019  // straight copy without MPI
2020  std::vector<Number> & vals (recv_vals[0]);
2021  vals = send_vals[blk];
2022 #endif
2023 
2024  n_val_recvd_blk += vals.size();
2025  }
2026 
2027  // We need the offsets into the output_vals vector for each object.
2028  // fortunately, this is simply the partial sum of the total number
2029  // of components for each object
2030  std::partial_sum(obj_val_offsets.begin(), obj_val_offsets.end(),
2031  obj_val_offsets.begin());
2032 
2033  // wait on any previous asynchronous IO - this *must* complete before
2034  // we start messing with the output_vals buffer!
2035  if (async_io.get()) async_io->join();
2036 
2037  // this is the actual output buffer that will be written to disk.
2038  // at ths point we finally know wha size it will be.
2039  output_vals.resize(n_val_recvd_blk);
2040 
2041  // pack data from all processors into output values
2042  for (auto proc : IntRange<unsigned int>(0, this->n_processors()))
2043  {
2044  const std::vector<dof_id_type> & ids (recv_ids [proc]);
2045  const std::vector<Number> & vals(recv_vals[proc]);
2046  std::vector<Number>::const_iterator proc_vals(vals.begin());
2047 
2048  for (std::size_t idx=0, sz=ids.size(); idx<sz; idx+=2)
2049  {
2050  const dof_id_type
2051  local_idx = ids[idx+0]-first_object,
2052  n_vals_tot_allvecs = ids[idx+1];
2053 
2054  // put this object's data into the proper location
2055  // in the output buffer
2056  std::vector<Number>::iterator out_vals(output_vals.begin());
2057  if (local_idx != 0)
2058  std::advance (out_vals, obj_val_offsets[local_idx-1]);
2059 
2060  for (unsigned int val=0; val<n_vals_tot_allvecs; val++, ++out_vals, ++proc_vals)
2061  {
2062  libmesh_assert (out_vals != output_vals.end());
2063  libmesh_assert (proc_vals != vals.end());
2064  *out_vals = *proc_vals;
2065  }
2066  }
2067  }
2068 
2069  // output_vals buffer is now filled for this block.
2070  // write it to disk
2071  async_io = libmesh_make_unique<Threads::Thread>(threaded_io);
2072  written_length += output_vals.size();
2073  }
2074 
2075  // wait on any previous asynchronous IO - this *must* complete before
2076  // our stuff goes out of scope
2077  async_io->join();
2078  }
2079 
2080  Parallel::wait(id_requests);
2081  Parallel::wait(val_requests);
2082 
2083  // we need some synchronization here. Because this method
2084  // can be called for a range of nodes, then a range of elements,
2085  // we need some mechanism to prevent processors from racing past
2086  // to the next range and overtaking ongoing communication. one
2087  // approach would be to figure out unique tags for each range,
2088  // but for now we just impose a barrier here. And might as
2089  // well have it do some useful work.
2090  this->comm().broadcast(written_length);
2091 
2092  return written_length;
2093 }
2094 
2095 
2096 
2098  const unsigned int var,
2099  Xdr & io) const
2100 {
2101  unsigned int written_length=0;
2102  std::vector<Number> vals; // The raw values for the local objects in the current block
2103  // Collect the SCALARs for the current variable
2104  if (this->processor_id() == (this->n_processors()-1))
2105  {
2106  const DofMap & dof_map = this->get_dof_map();
2107  std::vector<dof_id_type> SCALAR_dofs;
2108  dof_map.SCALAR_dof_indices(SCALAR_dofs, var);
2109  const unsigned int n_scalar_dofs = cast_int<unsigned int>
2110  (SCALAR_dofs.size());
2111 
2112  for (unsigned int i=0; i<n_scalar_dofs; i++)
2113  {
2114  vals.push_back( vec(SCALAR_dofs[i]) );
2115  }
2116  }
2117 
2118 #ifdef LIBMESH_HAVE_MPI
2119  if (this->n_processors() > 1)
2120  {
2121  const Parallel::MessageTag val_tag(1);
2122 
2123  // Post the receive on processor 0
2124  if (this->processor_id() == 0)
2125  {
2126  this->comm().receive(this->n_processors()-1, vals, val_tag);
2127  }
2128 
2129  // Send the data to processor 0
2130  if (this->processor_id() == (this->n_processors()-1))
2131  {
2132  this->comm().send(0, vals, val_tag);
2133  }
2134  }
2135 #endif
2136 
2137  // -------------------------------------------------------
2138  // Write the output on processor 0.
2139  if (this->processor_id() == 0)
2140  {
2141  const unsigned int vals_size =
2142  cast_int<unsigned int>(vals.size());
2143  io.data_stream (vals.data(), vals_size);
2144  written_length += vals_size;
2145  }
2146 
2147  return written_length;
2148 }
2149 
2150 
2151 
2153  const NumericVector<Number> & vec) const
2154 {
2155  parallel_object_only();
2156 
2157  libmesh_assert (io.writing());
2158 
2159  dof_id_type vec_length = vec.size();
2160  if (this->processor_id() == 0) io.data (vec_length, "# vector length");
2161 
2162  dof_id_type written_length = 0;
2163 
2164  //---------------------------------
2165  // Collect the values for all nodes
2166  written_length += cast_int<dof_id_type>
2167  (this->write_serialized_blocked_dof_objects (std::vector<const NumericVector<Number> *>(1,&vec),
2168  this->get_mesh().n_nodes(),
2169  this->get_mesh().local_nodes_begin(),
2170  this->get_mesh().local_nodes_end(),
2171  io));
2172 
2173  //------------------------------------
2174  // Collect the values for all elements
2175  written_length += cast_int<dof_id_type>
2176  (this->write_serialized_blocked_dof_objects (std::vector<const NumericVector<Number> *>(1,&vec),
2177  this->get_mesh().n_elem(),
2178  this->get_mesh().local_elements_begin(),
2179  this->get_mesh().local_elements_end(),
2180  io));
2181 
2182  //-------------------------------------------
2183  // Finally loop over all the SCALAR variables
2184  for (auto var : IntRange<unsigned int>(0, this->n_vars()))
2185  if (this->variable(var).type().family == SCALAR)
2186  {
2187  written_length +=
2188  this->write_SCALAR_dofs (vec, var, io);
2189  }
2190 
2191  if (this->processor_id() == 0)
2192  libmesh_assert_equal_to (written_length, vec_length);
2193 
2194  return written_length;
2195 }
2196 
2197 
2198 template <typename InValType>
2200  const std::vector<NumericVector<Number> *> & vectors) const
2201 {
2202  parallel_object_only();
2203 
2204  // Error checking
2205  // #ifndef NDEBUG
2206  // // In parallel we better be reading a parallel vector -- if not
2207  // // we will not set all of its components below!!
2208  // if (this->n_processors() > 1)
2209  // {
2210  // libmesh_assert (vec.type() == PARALLEL ||
2211  // vec.type() == GHOSTED);
2212  // }
2213  // #endif
2214 
2215  libmesh_assert (io.reading());
2216 
2217  if (this->processor_id() == 0)
2218  {
2219  // sizes
2220  unsigned int num_vecs=0;
2221  dof_id_type vector_length=0;
2222 
2223  // Get the number of vectors
2224  io.data(num_vecs);
2225  // Get the buffer size
2226  io.data(vector_length);
2227 
2228  libmesh_assert_equal_to (num_vecs, vectors.size());
2229 
2230  if (num_vecs != 0)
2231  {
2232  libmesh_assert_not_equal_to (vectors[0], 0);
2233  libmesh_assert_equal_to (vectors[0]->size(), vector_length);
2234  }
2235  }
2236 
2237  // no need to actually communicate these.
2238  // this->comm().broadcast(num_vecs);
2239  // this->comm().broadcast(vector_length);
2240 
2241  // Cache these - they are not free!
2242  const dof_id_type
2243  n_nodes = this->get_mesh().n_nodes(),
2244  n_elem = this->get_mesh().n_elem();
2245 
2246  std::size_t read_length = 0;
2247 
2248  //---------------------------------
2249  // Collect the values for all nodes
2250  read_length +=
2251  this->read_serialized_blocked_dof_objects (n_nodes,
2252  this->get_mesh().local_nodes_begin(),
2253  this->get_mesh().local_nodes_end(),
2254  InValType(),
2255  io,
2256  vectors);
2257 
2258  //------------------------------------
2259  // Collect the values for all elements
2260  read_length +=
2262  this->get_mesh().local_elements_begin(),
2263  this->get_mesh().local_elements_end(),
2264  InValType(),
2265  io,
2266  vectors);
2267 
2268  //-------------------------------------------
2269  // Finally loop over all the SCALAR variables
2270  for (NumericVector<Number> * vec : vectors)
2271  for (auto var : IntRange<unsigned int>(0, this->n_vars()))
2272  if (this->variable(var).type().family == SCALAR)
2273  {
2274  libmesh_assert_not_equal_to (vec, 0);
2275 
2276  read_length +=
2277  this->read_SCALAR_dofs (var, io, vec);
2278  }
2279 
2280  //---------------------------------------
2281  // last step - must close all the vectors
2282  for (NumericVector<Number> * vec : vectors)
2283  {
2284  libmesh_assert_not_equal_to (vec, 0);
2285  vec->close();
2286  }
2287 
2288  return read_length;
2289 }
2290 
2291 
2292 
2294  const std::vector<const NumericVector<Number> *> & vectors) const
2295 {
2296  parallel_object_only();
2297 
2298  libmesh_assert (io.writing());
2299 
2300  // Cache these - they are not free!
2301  const dof_id_type
2302  n_nodes = this->get_mesh().n_nodes(),
2303  n_elem = this->get_mesh().n_elem();
2304 
2305  std::size_t written_length = 0;
2306 
2307  if (this->processor_id() == 0)
2308  {
2309  unsigned int
2310  n_vec = cast_int<unsigned int>(vectors.size());
2311  dof_id_type
2312  vec_size = vectors.empty() ? 0 : vectors[0]->size();
2313  // Set the number of vectors
2314  io.data(n_vec, "# number of vectors");
2315  // Set the buffer size
2316  io.data(vec_size, "# vector length");
2317  }
2318 
2319  //---------------------------------
2320  // Collect the values for all nodes
2321  written_length +=
2322  this->write_serialized_blocked_dof_objects (vectors,
2323  n_nodes,
2324  this->get_mesh().local_nodes_begin(),
2325  this->get_mesh().local_nodes_end(),
2326  io);
2327 
2328  //------------------------------------
2329  // Collect the values for all elements
2330  written_length +=
2331  this->write_serialized_blocked_dof_objects (vectors,
2332  n_elem,
2333  this->get_mesh().local_elements_begin(),
2334  this->get_mesh().local_elements_end(),
2335  io);
2336 
2337  //-------------------------------------------
2338  // Finally loop over all the SCALAR variables
2339  for (const NumericVector<Number> * vec : vectors)
2340  for (auto var : IntRange<unsigned int>(0, this->n_vars()))
2341  if (this->variable(var).type().family == SCALAR)
2342  {
2343  libmesh_assert_not_equal_to (vec, 0);
2344 
2345  written_length +=
2346  this->write_SCALAR_dofs (*vec, var, io);
2347  }
2348 
2349  return written_length;
2350 }
2351 
2352 
2353 
2354 
2355 template void System::read_parallel_data<Number> (Xdr & io, const bool read_additional_data);
2356 template void System::read_serialized_data<Number> (Xdr & io, const bool read_additional_data);
2357 template numeric_index_type System::read_serialized_vector<Number> (Xdr & io, NumericVector<Number> * vec);
2358 template std::size_t System::read_serialized_vectors<Number> (Xdr & io, const std::vector<NumericVector<Number> *> & vectors) const;
2359 #ifdef LIBMESH_USE_COMPLEX_NUMBERS
2360 template void System::read_parallel_data<Real> (Xdr & io, const bool read_additional_data);
2361 template void System::read_serialized_data<Real> (Xdr & io, const bool read_additional_data);
2362 template numeric_index_type System::read_serialized_vector<Real> (Xdr & io, NumericVector<Number> * vec);
2363 template std::size_t System::read_serialized_vectors<Real> (Xdr & io, const std::vector<NumericVector<Number> *> & vectors) const;
2364 #endif
2365 
2366 } // namespace libMesh
libMesh::FEType::inf_map
InfMapType inf_map
The coordinate mapping type of the infinite element.
Definition: fe_type.h:257
libMesh::System::write_serialized_vector
dof_id_type write_serialized_vector(Xdr &io, const NumericVector< Number > &vec) const
Writes a vector for this System.
Definition: system_io.C:2152
libMesh::System::read_serialized_vector
numeric_index_type read_serialized_vector(Xdr &io, NumericVector< Number > *vec)
Reads a vector for this System.
Definition: system_io.C:1167
libMesh::dof_id_type
uint8_t dof_id_type
Definition: id_types.h:67
libMesh::Number
Real Number
Definition: libmesh_common.h:195
libMesh::System::n_vars
unsigned int n_vars() const
Definition: system.h:2155
libMesh::FEType::radial_family
FEFamily radial_family
The type of approximation in radial direction.
Definition: fe_type.h:249
libMesh::OrderWrapper::get_order
int get_order() const
Explicitly request the order as an int.
Definition: fe_type.h:77
libMesh::invalid_uint
const unsigned int invalid_uint
A number which is used quite often to represent an invalid or uninitialized value.
Definition: libmesh.h:249
libMesh::System::write_parallel_data
void write_parallel_data(Xdr &io, const bool write_additional_data) const
Writes additional data, namely vectors, for this System.
Definition: system_io.C:1503
libMesh::MeshTools::n_elem
dof_id_type n_elem(const MeshBase::const_element_iterator &begin, const MeshBase::const_element_iterator &end)
Count up the number of elements of a specific type (as defined by an iterator range).
Definition: mesh_tools.C:705
libMesh::FEType::family
FEFamily family
The type of finite element.
Definition: fe_type.h:203
libMesh::PARALLEL
Definition: enum_parallel_type.h:36
libMesh::MeshBase::n_elem
virtual dof_id_type n_elem() const =0
libMesh::System::_vectors
std::map< std::string, NumericVector< Number > * > _vectors
Some systems need an arbitrary number of vectors.
Definition: system.h:1985
libMesh::System::variable_name
const std::string & variable_name(const unsigned int i) const
Definition: system.h:2203
libMesh::System::read_serialized_blocked_dof_objects
std::size_t read_serialized_blocked_dof_objects(const dof_id_type n_objects, const iterator_type begin, const iterator_type end, const InValType dummy, Xdr &io, const std::vector< NumericVector< Number > * > &vecs, const unsigned int var_to_read=libMesh::invalid_uint) const
Reads an input vector from the stream io and assigns the values to a set of DofObjects.
Definition: system_io.C:810
libMesh::index_range
IntRange< std::size_t > index_range(const std::vector< T > &vec)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:106
libMesh
The libMesh namespace provides an interface to certain functionality in the library.
Definition: factoryfunction.C:55
libMesh::Xdr
This class implements a C++ interface to the XDR (eXternal Data Representation) format.
Definition: xdr_cxx.h:65
libMesh::System::n_vectors
unsigned int n_vectors() const
Definition: system.h:2283
libMesh::XYZ
Definition: enum_fe_family.h:46
libMesh::NumericVector::close
virtual void close()=0
Calls the NumericVector's internal assembly routines, ensuring that the values are consistent across ...
end
IterBase * end
Also have a polymorphic pointer to the end object, this prevents iterating past the end.
Definition: variant_filter_iterator.h:343
libMesh::ParallelObject::comm
const Parallel::Communicator & comm() const
Definition: parallel_object.h:94
libMesh::System::write_header
void write_header(Xdr &io, const std::string &version, const bool write_additional_data) const
Writes the basic data header for this System.
Definition: system_io.C:1298
libMesh::GHOSTED
Definition: enum_parallel_type.h:37
libMesh::Xdr::version
int version() const
Gets the version of the file that is being read.
Definition: xdr_cxx.h:163
libMesh::System::number
unsigned int number() const
Definition: system.h:2075
libMesh::Variable::type
const FEType & type() const
Definition: variable.h:119
libMesh::MeshBase::mesh_dimension
unsigned int mesh_dimension() const
Definition: mesh_base.C:135
libMesh::System::read_SCALAR_dofs
unsigned int read_SCALAR_dofs(const unsigned int var, Xdr &io, NumericVector< Number > *vec) const
Reads the SCALAR dofs from the stream io and assigns the values to the appropriate entries of vec.
Definition: system_io.C:1120
libMesh::System::variable
const Variable & variable(unsigned int var) const
Return a constant reference to Variable var.
Definition: system.h:2183
libMesh::Variable::active_subdomains
const std::set< subdomain_id_type > & active_subdomains() const
Definition: variable.h:150
libMesh::DofObject
The DofObject defines an abstract base class for objects that have degrees of freedom associated with...
Definition: dof_object.h:53
libMesh::NumericVector::size
virtual numeric_index_type size() const =0
libMesh::NumericVector< Number >
libMesh::zero
const Number zero
.
Definition: libmesh.h:243
libMesh::libmesh_assert
libmesh_assert(ctx)
libMesh::IntRange
The IntRange templated class is intended to make it easy to loop over integers which are indices of a...
Definition: int_range.h:53
libMesh::Xdr::writing
bool writing() const
Definition: xdr_cxx.h:116
libMesh::Xdr::reading
bool reading() const
Definition: xdr_cxx.h:110
libMesh::System::read_header
void read_header(Xdr &io, const std::string &version, const bool read_header=true, const bool read_additional_data=true, const bool read_legacy_format=false)
Reads the basic data header for this System.
Definition: system_io.C:114
libMesh::Xdr::data
void data(T &a, const char *comment="")
Inputs or outputs a single value.
Definition: xdr_cxx.C:766
libMesh::ParallelObject::n_processors
processor_id_type n_processors() const
Definition: parallel_object.h:100
libMesh::System::get_mesh
const MeshBase & get_mesh() const
Definition: system.h:2083
libMesh::ParallelObject::processor_id
processor_id_type processor_id() const
Definition: parallel_object.h:106
libMesh::DofObject::invalid_id
static const dof_id_type invalid_id
An invalid id to distinguish an uninitialized DofObject.
Definition: dof_object.h:421
libMesh::System::add_variable
unsigned int add_variable(const std::string &var, const FEType &type, const std::set< subdomain_id_type > *const active_subdomains=nullptr)
Adds the variable var to the list of variables for this system.
Definition: system.C:1069
libMesh::DofMap::SCALAR_dof_indices
void SCALAR_dof_indices(std::vector< dof_id_type > &di, const unsigned int vn, const bool old_dofs=false) const
Fills the vector di with the global degree of freedom indices corresponding to the SCALAR variable vn...
Definition: dof_map.C:2488
libMesh::processor_id_type
uint8_t processor_id_type
Definition: id_types.h:104
libMesh::System::read_serialized_data
void read_serialized_data(Xdr &io, const bool read_additional_data=true)
Reads additional data, namely vectors, for this System.
Definition: system_io.C:724
libMesh::cast_int
Tnew cast_int(Told oldvar)
Definition: libmesh_common.h:610
libMesh::System::read_serialized_vectors
std::size_t read_serialized_vectors(Xdr &io, const std::vector< NumericVector< Number > * > &vectors) const
Read a number of identically distributed vectors.
Definition: system_io.C:2199
libMesh::System::write_SCALAR_dofs
unsigned int write_SCALAR_dofs(const NumericVector< Number > &vec, const unsigned int var, Xdr &io) const
Writes the SCALAR dofs associated with var to the stream io.
Definition: system_io.C:2097
libMesh::MeshTools::Generation::Private::idx
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.
Definition: mesh_generation.C:72
libMesh::System::_written_var_indices
std::vector< unsigned int > _written_var_indices
This vector is used only when reading in a system from file.
Definition: system.h:2046
libMesh::System::read_legacy_data
void read_legacy_data(Xdr &io, const bool read_additional_data=true)
Reads additional data, namely vectors, for this System.
Definition: system_io.C:309
n_nodes
const dof_id_type n_nodes
Definition: tecplot_io.C:68
libMesh::Xdr::data_stream
void data_stream(T *val, const unsigned int len, const unsigned int line_break=libMesh::invalid_uint)
Inputs or outputs a raw data stream.
Definition: xdr_cxx.C:831
libMesh::System::_additional_data_written
unsigned int _additional_data_written
This flag is used only when reading in a system from file.
Definition: system.h:2034
libMesh::System::read_parallel_data
void read_parallel_data(Xdr &io, const bool read_additional_data)
Reads additional data, namely vectors, for this System.
Definition: system_io.C:492
libMesh::numeric_index_type
dof_id_type numeric_index_type
Definition: id_types.h:99
libMesh::MONOMIAL
Definition: enum_fe_family.h:39
libMesh::System::variable_type
const FEType & variable_type(const unsigned int i) const
Definition: system.h:2233
libMesh::MeshBase::n_nodes
virtual dof_id_type n_nodes() const =0
libMesh::System::write_serialized_data
void write_serialized_data(Xdr &io, const bool write_additional_data=true) const
Writes additional data, namely vectors, for this System.
Definition: system_io.C:1703
libMesh::Threads::NonConcurrentThread
Simple compatibility class for std::thread 'concurrent' execution.
Definition: threads.h:69
libMesh::System::solution
std::unique_ptr< NumericVector< Number > > solution
Data structure to hold solution values.
Definition: system.h:1539
libMesh::FEType
class FEType hides (possibly multiple) FEFamily and approximation orders, thereby enabling specialize...
Definition: fe_type.h:178
libMesh::FEType::order
OrderWrapper order
The approximation order of the element.
Definition: fe_type.h:197
libMesh::NumericVector::set
virtual void set(const numeric_index_type i, const T value)=0
Sets v(i) = value.
libMesh::DofMap
This class handles the numbering of degrees of freedom on a mesh.
Definition: dof_map.h:176
libMesh::System::name
const std::string & name() const
Definition: system.h:2067
libMesh::System::write_serialized_vectors
std::size_t write_serialized_vectors(Xdr &io, const std::vector< const NumericVector< Number > * > &vectors) const
Serialize & write a number of identically distributed vectors.
Definition: system_io.C:2293
libMesh::System::variable_number
unsigned short int variable_number(const std::string &var) const
Definition: system.C:1232
libMesh::System::write_serialized_blocked_dof_objects
std::size_t write_serialized_blocked_dof_objects(const std::vector< const NumericVector< Number > * > &vecs, const dof_id_type n_objects, const iterator_type begin, const iterator_type end, Xdr &io, const unsigned int var_to_write=libMesh::invalid_uint) const
Writes an output vector to the stream io for a set of DofObjects.
Definition: system_io.C:1811
libMesh::on_command_line
bool on_command_line(std::string arg)
Definition: libmesh.C:898
libMesh::Threads::NonConcurrentThread::join
void join()
Join is a no-op, since the constructor blocked until completion.
Definition: threads.h:83
libMesh::Xdr::is_open
bool is_open() const
Definition: xdr_cxx.C:341
data
IterBase * data
Ideally this private member data should have protected access.
Definition: variant_filter_iterator.h:337
libMesh::System::clear
virtual void clear()
Clear all the data structures associated with the system.
Definition: system.C:205
libMesh::System::add_vector
NumericVector< Number > & add_vector(const std::string &vec_name, const bool projections=true, const ParallelType type=PARALLEL)
Adds the additional vector vec_name to this system.
Definition: system.C:661
libMesh::System::get_dof_map
const DofMap & get_dof_map() const
Definition: system.h:2099
libMesh::System::n_dofs
dof_id_type n_dofs() const
Definition: system.C:150
libMesh::SCALAR
Definition: enum_fe_family.h:58
libMesh::out
OStreamProxy out
libMesh::Xdr::comment
void comment(std::string &)
Writes or reads (ignores) a comment line.
Definition: xdr_cxx.C:1661
libMesh::NumericVector::type
ParallelType type() const
Definition: numeric_vector.h:160
libMesh::FEType::radial_order
OrderWrapper radial_order
The approximation order in radial direction of the infinite element.
Definition: fe_type.h:236