libMesh
rb_construction_base.C
Go to the documentation of this file.
1 // rbOOmit: An implementation of the Certified Reduced Basis method.
2 // Copyright (C) 2009, 2010 David J. Knezevic
3 
4 // This file is part of rbOOmit.
5 
6 // rbOOmit is free software; you can redistribute it and/or
7 // modify it under the terms of the GNU Lesser General Public
8 // License as published by the Free Software Foundation; either
9 // version 2.1 of the License, or (at your option) any later version.
10 
11 // rbOOmit is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 // Lesser General Public License for more details.
15 
16 // You should have received a copy of the GNU Lesser General Public
17 // License along with this library; if not, write to the Free Software
18 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20 // C++ includes
21 #include <algorithm>
22 #include <cstddef>
23 #include <ctime>
24 #include <cstdlib> // *must* precede <cmath> for proper std:abs() on PGI, Sun Studio CC
25 #include <cmath>
26 #include <iterator>
27 #include <memory>
28 #include <numeric>
29 
30 // rbOOmit includes
31 #include "libmesh/rb_construction_base.h"
32 #include "libmesh/rb_parameters.h"
33 
34 // libMesh includes
35 #include "libmesh/id_types.h"
36 #include "libmesh/libmesh_common.h"
37 #include "libmesh/numeric_vector.h"
38 #include "libmesh/equation_systems.h"
39 #include "libmesh/parallel.h"
40 #include "libmesh/condensed_eigen_system.h"
41 #include "libmesh/linear_implicit_system.h"
42 #include "libmesh/int_range.h"
43 #include "libmesh/utility.h"
44 
45 // Nvidia C++ whining about destroying incomplete unique_ptr<T> Base::foo types
46 #include "libmesh/dof_map.h"
47 #include "libmesh/shell_matrix.h"
48 #include "libmesh/sparse_matrix.h"
49 #include "timpi/communicator.h"
50 
51 // Anonymous namespace
52 namespace
53 {
54 
55 /*
56  * Helper function to divide a vector of samples across processors.
57  * Returns a pair with num_local_samples and first_local_index.
58  */
59 std::pair<unsigned int, unsigned int> calculate_n_local_samples_and_index(
60  const libMesh::Parallel::Communicator &communicator,
61  const unsigned int n_global_samples,
62  const bool serial)
63 {
64  unsigned int n_local_samples = n_global_samples;
65  unsigned int first_local_index = 0;
66 
67  if (serial || communicator.size() == 1)
68  return {n_local_samples, first_local_index};
69 
70  // Calculate the number of training parameters local to this processor
71  unsigned int quotient = n_global_samples/communicator.size();
72  unsigned int remainder = n_global_samples%communicator.size();
73  if (communicator.rank() < remainder)
74  {
75  n_local_samples = (quotient + 1);
76  first_local_index = communicator.rank()*(quotient+1);
77  }
78  else
79  {
80  n_local_samples = quotient;
81  first_local_index = communicator.rank()*quotient + remainder;
82  }
83  return {n_local_samples, first_local_index};
84 }
85 } // end anonymous namespace
86 
87 namespace libMesh
88 {
89 
90 // ------------------------------------------------------------
91 // RBConstructionBase implementation
92 
93 
94 template <class Base>
96  const std::string & name_in,
97  const unsigned int number_in)
98  : Base(es, name_in, number_in),
99  quiet_mode(true),
100  serial_training_set(false),
101  _normalize_solution_snapshots(false),
102  _training_parameters_initialized(false),
103  _first_local_index(0),
104  _n_local_training_samples(0),
105  _n_global_training_samples(0),
106  _training_parameters_random_seed(-1) // by default, use std::time to seed RNG
107 {
108 }
109 
110 template <class Base>
112 
113 template <class Base>
115 {
116  // clear the parent data
117  Base::clear();
118  RBParametrized::clear();
119  _training_parameters.clear();
120 }
121 
122 template <class Base>
124 {
125  Base::init_data();
126 
127  // Initialize the inner product storage vector, which is useful for
128  // storing intermediate results when evaluating inner products
129  inner_product_storage_vector = NumericVector<Number>::build(this->comm());
130  inner_product_storage_vector->init (this->n_dofs(), this->n_local_dofs(), false, PARALLEL);
131 }
132 
133 template <class Base>
135  std::pair<numeric_index_type, Real> & error_pair)
136 {
137  // Set error_pair.second to the maximum global value and also
138  // find which processor contains the maximum value
139  unsigned int proc_ID_index;
140  communicator.maxloc(error_pair.second, proc_ID_index);
141 
142  // Then broadcast error_pair.first from proc_ID_index
143  communicator.broadcast(error_pair.first, proc_ID_index);
144 }
145 
146 template <class Base>
148 {
149  _normalize_solution_snapshots = value;
150 }
151 
152 template <class Base>
154 {
155  libmesh_error_msg_if(!_training_parameters_initialized,
156  "Error: training parameters must first be initialized.");
157 
158  // First we check if there are no parameters here, and in that case we
159  // return 1 since a single training sample is sufficient to generate an
160  // RB approximation if there are no parameters. Note that in parallel,
161  // and when we don't have a serial training set, set return comm().size()
162  // so that each processor is assigned a single (empty) training sample.
163  if (_training_parameters.empty())
164  {
165  if (serial_training_set)
166  return 1;
167  else
168  return this->comm().size();
169  }
170 
171  return _n_global_training_samples;
172 }
173 
174 template <class Base>
176 {
177  libmesh_error_msg_if(!_training_parameters_initialized,
178  "Error: training parameters must first be initialized.");
179 
180  // First we check if there are no parameters here, and in that case we
181  // return 1 for both serial and parallel training sets. This is consistent
182  // with get_n_training_samples(), and avoids accessing
183  // training_parameters.begin() when training_parameters is empty.
184  if (_training_parameters.empty())
185  return 1;
186 
187  return _n_local_training_samples;
188 }
189 
190 template <class Base>
192 {
193  libmesh_error_msg_if(!_training_parameters_initialized,
194  "Error: training parameters must first be initialized.");
195 
196  // First we check if there are no parameters here, and in that case we
197  // return 0 for a serial training set and comm().rank() for a parallel
198  // training set. This is consistent with get_n_training_samples(), and
199  // avoids accessing training_parameters.begin() when training_parameters
200  // is empty.
201  if (_training_parameters.empty())
202  {
203  if (serial_training_set)
204  return 0;
205  else
206  return this->comm().rank();
207  }
208 
209  return _first_local_index;
210 }
211 
212 template <class Base>
214 {
215  libmesh_error_msg_if(!_training_parameters_initialized,
216  "Error: training parameters must first be initialized.");
217 
218  if (_training_parameters.empty())
219  return 0;
220 
221  return _first_local_index + _n_local_training_samples;
222 }
223 
224 template <class Base>
226 {
227  set_parameters(get_params_from_training_set(global_index));
228 }
229 
230 template <class Base>
232 {
233  libmesh_error_msg_if(!_training_parameters_initialized,
234  "Error: training parameters must first be initialized.");
235 
236  // If the _training_parameters are empty, return an empty RBParameters.
237  // Otherwise, create a new RBParameters object from the single sample requested.
238  RBParameters params;
239  if (!_training_parameters.empty())
240  {
241  libmesh_error_msg_if((global_index < this->get_first_local_training_index()) ||
242  (global_index >= this->get_last_local_training_index()),
243  "Error: index "
244  << global_index
245  << " must be within range: "
246  << this->get_first_local_training_index()
247  << " - "
248  << this->get_last_local_training_index());
249 
250  const numeric_index_type local_index = global_index - get_first_local_training_index();
251  for (const auto & [param_name, sample_vector] : _training_parameters)
252  params.set_value(param_name, sample_vector[local_index]);
253 
254  // Copy all extra values into the new RBParameters.
255  // We assume that the samples may be indexed differently for extra parameters,
256  // so we don't just copy the local_index value.
257  const auto & mine = get_parameters();
258  for (const auto & [key, extra_sample_vector] :
259  as_range(mine.extra_begin(), mine.extra_end()))
260  {
261  for (const auto idx : index_range(extra_sample_vector))
262  params.set_extra_value(key, idx, extra_sample_vector[idx]);
263  }
264  }
265 
266  return params;
267 }
268 
269 template <class Base>
271 {
272  libmesh_error_msg_if(!_training_parameters_initialized,
273  "Error: training parameters must first be initialized.");
274 
275  processor_id_type root_id = 0;
276  if ((this->get_first_local_training_index() <= global_index) &&
277  (global_index < this->get_last_local_training_index()))
278  {
279  // Set parameters on only one processor
280  set_params_from_training_set(global_index);
281 
282  // set root_id, only non-zero on one processor
283  root_id = this->processor_id();
284  }
285 
286  // broadcast
287  this->comm().max(root_id);
288  broadcast_parameters(root_id);
289 }
290 
291 template <class Base>
293  const RBParameters & mu_max,
294  const unsigned int n_global_training_samples,
295  const std::map<std::string,bool> & log_param_scale,
296  const bool deterministic)
297 {
298  if (!is_quiet())
299  {
300  // Print out some info about the training set initialization
301  libMesh::out << "Initializing training parameters with "
302  << (deterministic ? "deterministic " : "random " )
303  << "training set..." << std::endl;
304 
305  for (const auto & pr : log_param_scale)
306  libMesh::out << "Parameter "
307  << pr.first
308  << ": log scaling = "
309  << pr.second
310  << std::endl;
311 
312  libMesh::out << std::endl;
313  }
314 
315  if (deterministic)
316  {
317  const auto [first_local_index, last_local_index] =
318  generate_training_parameters_deterministic(this->comm(),
319  log_param_scale,
320  _training_parameters,
321  n_global_training_samples,
322  mu_min,
323  mu_max,
324  serial_training_set);
325  _first_local_index = first_local_index;
326  _n_local_training_samples = last_local_index-first_local_index;
327  }
328  else
329  {
330  // Generate random training samples for all parameters
331  const auto [first_local_index, last_local_index] =
332  generate_training_parameters_random(this->comm(),
333  log_param_scale,
334  _training_parameters,
335  n_global_training_samples,
336  mu_min,
337  mu_max,
338  this->_training_parameters_random_seed,
339  serial_training_set);
340  _first_local_index = first_local_index;
341  _n_local_training_samples = last_local_index-first_local_index;
342  }
343  _n_global_training_samples = _n_local_training_samples;
344 
345  if (!serial_training_set)
346  this->comm().sum(_n_global_training_samples);
347 
348  // For each parameter that only allows discrete values, we "snap" to the nearest
349  // allowable discrete value
350  if (get_n_discrete_params() > 0)
351  {
352  for (auto & [param_name, sample_vector] : _training_parameters)
353  {
354  if (is_discrete_parameter(param_name))
355  {
356  const std::vector<Real> & discrete_values =
357  libmesh_map_find(get_discrete_parameter_values(), param_name);
358 
359  for (const auto sample_idx : index_range(sample_vector))
360  {
361  // Round all values to the closest discrete value.
362  std::vector<Real> discretized_vector(sample_vector[sample_idx].size());
363  std::transform(sample_vector[sample_idx].cbegin(),
364  sample_vector[sample_idx].cend(),
365  discretized_vector.begin(),
366  [&discrete_values](const Real & val) {
367  return get_closest_value(val, discrete_values);
368  });
369  sample_vector[sample_idx] = discretized_vector;
370  }
371  }
372  }
373  }
374 
375  _training_parameters_initialized = true;
376 }
377 
378 template <class Base>
379 void RBConstructionBase<Base>::load_training_set(const std::map<std::string, std::vector<RBParameter>> & new_training_set)
380 {
381  // Make sure we're running this on all processors at the same time
382  libmesh_parallel_only(this->comm());
383 
384  // First, make sure that an initial training set has already been generated
385  libmesh_error_msg_if(!_training_parameters_initialized,
386  "Error: load_training_set cannot be used to initialize parameters");
387 
388  // Make sure that the training set has the correct number of parameters
389  const unsigned int n_params = get_n_params();
390  libmesh_error_msg_if(new_training_set.size() > n_params,
391  "Error: new_training_set should not have more than get_n_params() parameters.");
392 
393  // Check that (new_training_set.size() == get_n_params()) is the same on all processes so that
394  // we go into the same branch of the "if" statement below on all processes.
395  const bool size_matches = (new_training_set.size() == n_params);
396  libmesh_assert(this->comm().verify(size_matches));
397 
398  if (size_matches)
399  {
400  // If new_training_set stores values for all parameters, then we overwrite
401  // _training_parameters with new_training_set.
402 
403  // Get the number of local and global training parameters
404  _first_local_index = 0;
405  _n_local_training_samples =
406  cast_int<numeric_index_type>(new_training_set.begin()->second.size());
407  _n_global_training_samples = _n_local_training_samples;
408 
409  if (!serial_training_set)
410  {
411  this->comm().sum(_n_global_training_samples);
412 
413  // Set the first/last indices.
414  std::vector<numeric_index_type> local_sizes (this->n_processors(), 0);
415  local_sizes[this->processor_id()] = _n_local_training_samples;
416  this->comm().sum(local_sizes);
417 
418  // first_local_index is the sum of local_sizes
419  // for all processor ids less than ours
420  for (auto p : make_range(this->processor_id()))
421  _first_local_index += local_sizes[p];
422  }
423 
424  // Ensure that the parameters are the same.
425  for (const auto & pr : _training_parameters)
426  libmesh_error_msg_if(!new_training_set.count(pr.first),
427  "Parameters must be identical in order to overwrite dataset.");
428 
429  // Copy the values from the new_training_set to the internal training_parameters.
430  _training_parameters = new_training_set;
431  }
432  else
433  {
434  // If new_training_set stores values for a subset of the parameters, then we keep the
435  // length of training_parameters unchanged and overwrite the entries of the specified
436  // parameters from new_training_set. Note that we repeatedly loop over new_training_set
437  // to fill up the entire length of the sample_vector.
438  for (auto & [param_name, sample_vector]: _training_parameters)
439  {
440  if (new_training_set.count(param_name))
441  {
442  for (const auto i : make_range(get_local_n_training_samples()))
443  {
444  const unsigned int num_new_samples = libmesh_map_find(new_training_set,param_name).size();
445  libmesh_error_msg_if (num_new_samples==0, "new_training_set set should not be empty");
446 
447  const unsigned int new_training_set_index = i % num_new_samples;
448  sample_vector[i] = libmesh_map_find(new_training_set,param_name)[new_training_set_index];
449  }
450  }
451  }
452  }
453 }
454 
455 template <class Base>
457  const std::string & param_name, const std::vector<RBParameter> & values)
458 {
459  libmesh_error_msg_if(!_training_parameters_initialized,
460  "Training parameters must be initialized before calling set_training_parameter_values");
461  libmesh_error_msg_if(values.size() != get_local_n_training_samples(),
462  "Inconsistent sizes");
463 
464  // Copy the new data, overwriting the old data.
465  auto & training_vector = libmesh_map_find(_training_parameters, param_name);
466  training_vector = values;
467 }
468 
469 
470 template <class Base>
471 std::pair<std::size_t, std::size_t>
473  const std::map<std::string, bool> & log_param_scale,
474  std::map<std::string, std::vector<RBParameter>> & local_training_parameters_in,
475  const unsigned int n_global_training_samples_in,
476  const RBParameters & min_parameters,
477  const RBParameters & max_parameters,
478  const int training_parameters_random_seed,
479  const bool serial_training_set)
480 {
481  const unsigned int num_params = min_parameters.n_parameters();
482  libmesh_error_msg_if(num_params!=max_parameters.n_parameters(),
483  "Number of parameters must be identical for min/max.");
484 
485  // Clear training_parameters_in
486  local_training_parameters_in.clear();
487 
488  if (num_params == 0)
489  return {0,0};
490 
491  if (training_parameters_random_seed < 0)
492  {
493  if (!serial_training_set)
494  {
495  // seed the random number generator with the system time
496  // and the processor ID so that the seed is different
497  // on different processors
498  std::srand( static_cast<unsigned>( std::time(0)*(1+communicator.rank()) ));
499  }
500  else
501  {
502  // seed the random number generator with the system time
503  // only so that the seed is the same on all processors
504  //
505  // Note that we broadcast the time on processor 0 to make
506  // sure all processors agree.
507  unsigned int current_time = static_cast<unsigned>( std::time(0) );
508  communicator.broadcast(current_time, 0);
509  std::srand(current_time);
510  }
511  }
512  else
513  {
514  if (!serial_training_set)
515  {
516  // seed the random number generator with the provided value
517  // and the processor ID so that the seed is different
518  // on different processors
519  std::srand( static_cast<unsigned>( training_parameters_random_seed*(1+communicator.rank()) ));
520  }
521  else
522  {
523  // seed the random number generator with the provided value
524  // so that the seed is the same on all processors
525  std::srand( static_cast<unsigned>( training_parameters_random_seed ));
526  }
527  }
528 
529  // TODO - we don't support vector-data here yet. This would only apply in the case where
530  // min or max are vector-valued, and all the generated points need to stay within those ranges.
531  // But typically we expect that if we're calling this function, we only have 1 min and 1 max,
532  // so the generated values are single-valued as well. The .get_value() calls will throw an error
533  // if this is not the case.
534 
535  // initialize training_parameters_in
536  const auto & [n_local_training_samples, first_local_index] =
537  calculate_n_local_samples_and_index(communicator, n_global_training_samples_in,
538  serial_training_set);
539  for (const auto & pr : min_parameters)
540  local_training_parameters_in[pr.first] = std::vector<RBParameter>(n_local_training_samples);
541 
542  // finally, set the values
543  for (auto & [param_name, sample_vector] : local_training_parameters_in)
544  {
545  for (auto i : make_range(n_local_training_samples))
546  {
547  Real random_number = static_cast<Real>(std::rand()) / RAND_MAX; // in range [0,1]
548 
549  // Generate log10 scaled training parameters
550  if (libmesh_map_find(log_param_scale, param_name))
551  {
552  Real log_min = std::log10(min_parameters.get_value(param_name));
553  Real log_range = std::log10(max_parameters.get_value(param_name) / min_parameters.get_value(param_name));
554 
555  sample_vector[i] = {std::pow(Real(10.), log_min + random_number*log_range )};
556  }
557  // Generate linearly scaled training parameters
558  else
559  {
560  sample_vector[i] = {
561  random_number * (max_parameters.get_value(param_name) -
562  min_parameters.get_value(param_name)) +
563  min_parameters.get_value(param_name)};
564  }
565  }
566  }
567  return {first_local_index, first_local_index+n_local_training_samples};
568 }
569 
570 template <class Base>
571 std::pair<std::size_t, std::size_t>
573  const std::map<std::string, bool> & log_param_scale,
574  std::map<std::string, std::vector<RBParameter>> & local_training_parameters_in,
575  const unsigned int n_global_training_samples_in,
576  const RBParameters & min_parameters,
577  const RBParameters & max_parameters,
578  const bool serial_training_set)
579 {
580  libmesh_assert_equal_to ( min_parameters.n_parameters(), max_parameters.n_parameters() );
581  const unsigned int num_params = min_parameters.n_parameters();
582 
583  if (num_params == 0)
584  return {0,0};
585 
586  if (num_params > 3)
587  libmesh_not_implemented_msg("ERROR: Deterministic training sample generation "
588  "not implemented for more than three parameters.");
589 
590  // TODO - we don't support vector-data here yet. This would only apply in the case where
591  // min or max are vector-valued, and all the generated points need to stay within those ranges.
592  // But typically we expect that if we're calling this function, we only have 1 min and 1 max,
593  // so the generated values are single-valued as well. The .get_value() calls will throw an error
594  // if this is not the case.
595 
596  // Reinitialize training_parameters_in (but don't remove existing keys!)
597  const auto &[n_local_training_samples, first_local_index] =
598  calculate_n_local_samples_and_index(communicator, n_global_training_samples_in,
599  serial_training_set);
600  const auto last_local_index = first_local_index + n_local_training_samples;
601  for (const auto & pr : min_parameters)
602  local_training_parameters_in[pr.first] = std::vector<RBParameter>(n_local_training_samples);
603 
604  // n_training_samples_per_param has 3 entries, but entries after "num_params"
605  // are unused so we just set their value to 1. We need to set it to 1 (rather
606  // than 0) so that we don't skip the inner part of the triply-nested loop over
607  // n_training_samples_per_param below.
608  std::vector<unsigned int> n_training_samples_per_param(3);
609  for (unsigned int param=0; param<3; param++)
610  {
611  if (param < num_params)
612  {
613  n_training_samples_per_param[param] =
614  static_cast<unsigned int>( std::round(std::pow(static_cast<Real>(n_global_training_samples_in), 1./num_params)) );
615  }
616  else
617  {
618  n_training_samples_per_param[param] = 1;
619  }
620  }
621 
622  {
623  // The current implementation assumes that we have the same number of
624  // samples in each parameter, so we check that n_training_samples_in
625  // is consistent with this assumption.
626  unsigned int total_samples_check = 1;
627  for (unsigned int n_samples : n_training_samples_per_param)
628  {
629  total_samples_check *= n_samples;
630  }
631 
632  libmesh_error_msg_if(total_samples_check != n_global_training_samples_in,
633  "Error: Number of training samples = "
634  << n_global_training_samples_in
635  << " does not enable a uniform grid of samples with "
636  << num_params << " parameters. Try "
637  << total_samples_check << " samples instead?");
638  }
639 
640  // First we make a list of training samples associated with each parameter,
641  // then we take a tensor product to obtain the final set of training samples.
642  std::vector<std::vector<Real>> training_samples_per_param(num_params);
643  {
644  unsigned int i = 0;
645  for (const auto & pr : min_parameters)
646  {
647  const std::string & param_name = pr.first;
648  const bool use_log_scaling = libmesh_map_find(log_param_scale, param_name);
649  Real min_param = min_parameters.get_value(param_name);
650  Real max_param = max_parameters.get_value(param_name);
651 
652  training_samples_per_param[i].resize(n_training_samples_per_param[i]);
653 
654  for (unsigned int j=0; j<n_training_samples_per_param[i]; j++)
655  {
656  // Generate log10 scaled training parameters
657  if (use_log_scaling)
658  {
659  Real epsilon = 1.e-6; // Prevent rounding errors triggering asserts
660  Real log_min = std::log10(min_param + epsilon);
661  Real log_range = std::log10( (max_param-epsilon) / (min_param+epsilon) );
662  Real step_size = log_range /
663  std::max((unsigned int)1,(n_training_samples_per_param[i]-1));
664 
665  if (j<(n_training_samples_per_param[i]-1))
666  {
667  training_samples_per_param[i][j] = std::pow(10., log_min + j*step_size );
668  }
669  else
670  {
671  // due to rounding error, the last parameter can be slightly
672  // bigger than max_parameters, hence snap back to the max
673  training_samples_per_param[i][j] = max_param;
674  }
675  }
676  else
677  {
678  // Generate linearly scaled training parameters
679  Real step_size = (max_param - min_param) /
680  std::max((unsigned int)1,(n_training_samples_per_param[i]-1));
681  training_samples_per_param[i][j] = j*step_size + min_param;
682  }
683 
684  }
685  i++;
686  }
687  }
688 
689  // Now load into training_samples_in
690  {
691  std::vector<unsigned int> indices(3);
692  unsigned int index_count = 0;
693  for (indices[0]=0; indices[0]<n_training_samples_per_param[0]; indices[0]++)
694  {
695  for (indices[1]=0; indices[1]<n_training_samples_per_param[1]; indices[1]++)
696  {
697  for (indices[2]=0; indices[2]<n_training_samples_per_param[2]; indices[2]++)
698  {
699  unsigned int param_count = 0;
700  for (const auto & pr : min_parameters)
701  {
702  std::vector<RBParameter> & training_vector =
703  libmesh_map_find(local_training_parameters_in, pr.first);
704  if (first_local_index <= index_count && index_count < last_local_index)
705  training_vector[index_count - first_local_index] =
706  {training_samples_per_param[param_count][indices[param_count]]};
707 
708  param_count++;
709  }
710  index_count++;
711  }
712  }
713  }
714  }
715  return {first_local_index, first_local_index+n_local_training_samples};
716 }
717 
718 
719 template <class Base>
720 void RBConstructionBase<Base>::broadcast_parameters(const unsigned int proc_id)
721 {
722  libmesh_assert_less (proc_id, this->n_processors());
723 
724  // create a copy of the current parameters
725  RBParameters current_parameters = get_parameters();
726  libmesh_error_msg_if(current_parameters.n_samples()!=1,
727  "Only single-sample RBParameter objects can be broadcast.");
728 
729  // Serialize the current_parameters to current_parameters_vector in order to broadcast.
730  // We handle multiple samples and vector values.
731  // However, the vector values are assumed to remain the same size across samples.
732  const std::size_t nparams = current_parameters.n_parameters();
733  const std::size_t nsamples = current_parameters.n_samples();
734 
735  // First we get the sizes of all the parameter value vectors.
736  std::vector<std::size_t> param_value_sizes;
737  param_value_sizes.reserve(nparams);
738  for (const auto & pr : current_parameters)
739  param_value_sizes.push_back(pr.second[0].size());
740 
741  // Broadcast the sizes vector and reserve memory.
742  this->comm().broadcast(param_value_sizes, proc_id);
743  std::size_t buffsize = std::accumulate(param_value_sizes.cbegin(), param_value_sizes.cend(), 0ul);
744  std::vector<Real> serialized_parameters;
745  serialized_parameters.reserve(buffsize);
746 
747  // Then we serialize the parameters/sample/value vectors into a single vector.
748  for (const auto & pr : current_parameters)
749  {
750  for (const auto sample_idx : make_range(nsamples))
751  serialized_parameters.insert(serialized_parameters.end(),
752  pr.second[sample_idx].cbegin(),
753  pr.second[sample_idx].cend());
754  }
755 
756  // Do the broadcasts.
757  this->comm().broadcast(serialized_parameters, proc_id);
758 
759  // Deserialize into the copy of the RBParameters object.
760  std::size_t param_idx = 0;
761  auto val_idx = serialized_parameters.cbegin();
762  for (const auto & pr : current_parameters)
763  {
764  const std::size_t param_value_size = param_value_sizes[param_idx];
765  for (const auto sample_idx: make_range(nsamples))
766  {
767  auto end_val_idx = std::next(val_idx,param_value_size);
768  RBParameter sample_val(val_idx, end_val_idx);
769  current_parameters.set_value(pr.first, sample_idx, sample_val);
770  val_idx = end_val_idx;
771  }
772  ++param_idx;
773  }
774 
775  // Overwrite the parameters globally.
776  set_parameters(current_parameters);
777 }
778 
779 template <class Base>
781 {
782  this->_training_parameters_random_seed = seed;
783 }
784 
785 // Template specializations
786 
787 // EigenSystem is only defined if we have SLEPc
788 #if defined(LIBMESH_HAVE_SLEPC)
789 template class LIBMESH_EXPORT RBConstructionBase<CondensedEigenSystem>;
790 #endif
791 
792 template class LIBMESH_EXPORT RBConstructionBase<LinearImplicitSystem>;
793 template class LIBMESH_EXPORT RBConstructionBase<System>;
794 
795 } // namespace libMesh
Real get_value(const std::string &param_name) const
Get the value of the specified parameter, throw an error if it does not exist.
Definition: rb_parameters.C:65
This is the EquationSystems class.
Provides a uniform interface to vector storage schemes for different linear algebra libraries...
Definition: vector_fe_ex5.C:44
The libMesh namespace provides an interface to certain functionality in the library.
uint8_t processor_id_type
Definition: id_types.h:104
unsigned int n_parameters() const
Get the number of parameters that have been added.
T pow(const T &x)
Definition: utility.h:328
dof_id_type numeric_index_type
Definition: id_types.h:99
SimpleRange< IndexType > as_range(const std::pair< IndexType, IndexType > &p)
Helper function that allows us to treat a homogenous pair as a range.
Definition: simple_range.h:57
RBConstructionBase(EquationSystems &es, const std::string &name, const unsigned int number)
Constructor.
std::vector< Real > RBParameter
Typedef for an individual RB parameter.
Definition: rb_parameters.h:39
libmesh_assert(ctx)
This class is part of the rbOOmit framework.
Definition: rb_parameters.h:52
unsigned int n_samples() const
Returns the number of samples stored for all parameters.
static const unsigned int next[3]
A lookup table for the increment modulo 3 operation, for iterating through the three nodes per elemen...
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
void set_extra_value(const std::string &param_name, Real value)
Set the value of the specified extra parameter.
DIE A HORRIBLE DEATH HERE typedef MPI_Comm communicator
void set_value(const std::string &param_name, Real value)
Set the value of the specified parameter.
OStreamProxy out
static const bool value
Definition: xdr_io.C:54
IntRange< T > make_range(T beg, T end)
The 2-parameter make_range() helper function returns an IntRange<T> when both input parameters are of...
Definition: int_range.h:140
This class is part of the rbOOmit framework.
auto index_range(const T &sizable)
Helper function that returns an IntRange<std::size_t> representing all the indices of the passed-in v...
Definition: int_range.h:117
unsigned int idx(const ElemType type, const unsigned int nx, const unsigned int i, const unsigned int j)
A useful inline function which replaces the macros used previously.