Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #include "PODReducedBasisTrainer.h"
11 : #include "libmesh/replicated_mesh.h"
12 : #include "libmesh/mesh_generation.h"
13 : #include "SerializerGuard.h"
14 : #include "libmesh/parallel_algebra.h"
15 : #include "libmesh/parallel_sync.h"
16 : #include "VectorPacker.h"
17 :
18 : registerMooseObject("StochasticToolsApp", PODReducedBasisTrainer);
19 :
20 : InputParameters
21 230 : PODReducedBasisTrainer::validParams()
22 : {
23 230 : InputParameters params = SurrogateTrainerBase::validParams();
24 :
25 230 : params.addClassDescription("Computes the reduced subspace plus the reduced operators for "
26 : "POD-RB surrogate.");
27 460 : params.addRequiredParam<std::vector<std::string>>("var_names",
28 : "Names of variables we want to "
29 : "extract from solution vectors.");
30 460 : params.addRequiredParam<std::vector<Real>>("error_res",
31 : "The errors allowed in the snapshot reconstruction.");
32 460 : params.addRequiredParam<std::vector<std::string>>("tag_names",
33 : "Names of tags for the reduced operators.");
34 460 : params.addParam<std::vector<std::string>>(
35 : "filenames", "Files where the eigenvalues are printed for each variable (if given).");
36 :
37 460 : params.addRequiredParam<std::vector<std::string>>(
38 : "tag_types",
39 : "List of keywords describing if the tags"
40 : " correspond to independent operators or not. (op/op_dir/src/src_dir)");
41 230 : return params;
42 0 : }
43 :
44 121 : PODReducedBasisTrainer::PODReducedBasisTrainer(const InputParameters & parameters)
45 : : SurrogateTrainerBase(parameters),
46 242 : _var_names(declareModelData<std::vector<std::string>>(
47 : "_var_names", getParam<std::vector<std::string>>("var_names"))),
48 242 : _error_res(getParam<std::vector<Real>>("error_res")),
49 363 : _tag_names(declareModelData<std::vector<std::string>>(
50 : "_tag_names", getParam<std::vector<std::string>>("tag_names"))),
51 363 : _tag_types(declareModelData<std::vector<std::string>>(
52 : "_tag_types", getParam<std::vector<std::string>>("tag_types"))),
53 242 : _base(declareModelData<std::vector<std::vector<DenseVector<Real>>>>("_base")),
54 242 : _red_operators(declareModelData<std::vector<DenseMatrix<Real>>>("_red_operators")),
55 121 : _base_completed(false),
56 242 : _empty_operators(true)
57 : {
58 121 : if (_error_res.size() != _var_names.size())
59 4 : paramError("error_res",
60 : "The number of elements is not equal to the number"
61 : " of elements in 'var_names'!");
62 :
63 117 : if (_tag_names.size() != _tag_types.size())
64 4 : paramError("tag_types",
65 : "The number of elements is not equal to the number"
66 : " of elements in 'tag_names'!");
67 :
68 565 : std::vector<std::string> available_names{"op", "src", "op_dir", "src_dir"};
69 610 : for (auto tag_type : _tag_types)
70 : {
71 501 : auto it = std::find(available_names.begin(), available_names.end(), tag_type);
72 501 : if (it == available_names.end())
73 4 : paramError("tag_types",
74 : "Tag type '",
75 : tag_type,
76 : "' is not valid, available names are:",
77 : " op, op_dir, src, src_dir");
78 : }
79 335 : }
80 :
81 : void
82 93 : PODReducedBasisTrainer::initialSetup()
83 : {
84 : // Initializing the containers for the essential data to construct the
85 : // reduced operators.
86 :
87 93 : _snapshots.clear();
88 93 : _snapshots.resize(_var_names.size(), DistributedSnapshots(_communicator));
89 :
90 93 : _base.clear();
91 93 : _base.resize(_var_names.size());
92 :
93 93 : _corr_mx.clear();
94 93 : _corr_mx.resize(_var_names.size());
95 :
96 93 : _eigenvalues.clear();
97 93 : _eigenvalues.resize(_var_names.size());
98 :
99 93 : _eigenvectors.clear();
100 93 : _eigenvectors.resize(_var_names.size());
101 93 : }
102 :
103 : void
104 170 : PODReducedBasisTrainer::execute()
105 : {
106 : // If the base is not ready yet, create it by performing the POD on the
107 : // snapshot matrices. Also, initialize the containers for the reduced
108 : // operators.
109 170 : if (!_base_completed)
110 : {
111 85 : computeCorrelationMatrix();
112 85 : computeEigenDecomposition();
113 85 : computeBasisVectors();
114 85 : initReducedOperators();
115 85 : _base_completed = true;
116 85 : _empty_operators = true;
117 : }
118 170 : }
119 :
120 : void
121 170 : PODReducedBasisTrainer::finalize()
122 : {
123 : // If the operators are already filled on each processor, gather and sum them
124 : // together. This way every processor has access to every complete reduced
125 : // operator.
126 170 : if (!_empty_operators)
127 510 : for (unsigned int tag_i = 0; tag_i < _red_operators.size(); ++tag_i)
128 : {
129 : gatherSum(_red_operators[tag_i].get_values());
130 : }
131 170 : }
132 :
133 : void
134 188 : PODReducedBasisTrainer::addSnapshot(unsigned int var_i,
135 : unsigned int glob_i,
136 : const std::shared_ptr<DenseVector<Real>> & snapshot)
137 : {
138 188 : _snapshots[var_i].addNewEntry(glob_i, snapshot);
139 188 : }
140 :
141 : void
142 85 : PODReducedBasisTrainer::computeCorrelationMatrix()
143 : {
144 : // Getting the number of snapshots. It is assumed that every variable has the
145 : // same number of snapshots. This assumption is used at multiple locations in
146 : // this source file.
147 85 : const auto no_snaps = getSnapsSize(0);
148 :
149 : // Initializing the correlation matrices for each variable.
150 170 : for (unsigned int var_i = 0; var_i < _snapshots.size(); ++var_i)
151 85 : _corr_mx[var_i] = DenseMatrix<Real>(no_snaps, no_snaps);
152 :
153 : /*
154 : Creating a simple 2D map that distributes the elements in the correlation
155 : matrices to each processor. A square mesh with elements corresponding to
156 : the entries in the correlation matrix is used, since optimal partitioning
157 : routines are already implemented for it. In this case, the centroids of the
158 : elements are equivalent to the indices of the correlation matrix. This method
159 : may seem too convoluted, but is necessary to ensure that the communication
160 : stays balanced and scales well for runs with a large number of processors.
161 : */
162 85 : ReplicatedMesh mesh(_communicator, 2);
163 85 : MeshTools::Generation::build_square(
164 : mesh, no_snaps, no_snaps, -0.5, no_snaps - 0.5, -0.5, no_snaps - 0.5);
165 :
166 : // A flag is added to each element saying if the result has already been compuuted
167 : // or not.
168 85 : mesh.add_elem_integer("filled");
169 :
170 : // Since the correlation matrix is symmetric, it is enough to compute the
171 : // entries on the diagonal in addition to the elements above the diagonal.
172 : // Therefore, the elements in the mesh below the diagonal are deleted.
173 2074 : for (Elem * elem : mesh.active_element_ptr_range())
174 : {
175 952 : const auto centroid = elem->vertex_average();
176 952 : if (centroid(0) > centroid(1))
177 340 : mesh.delete_elem(elem);
178 85 : }
179 :
180 : // The mesh is distributed among the processors.
181 85 : mesh.prepare_for_use();
182 :
183 : /*
184 : This step restructures the snapshots into an unordered map to make it easier
185 : to do cross-products in the later stages. Since _snapshots only contains
186 : pointers to the data, this should not include a considerable amount of copy
187 : operations.
188 : */
189 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>> local_vectors;
190 261 : for (unsigned int loc_vec_i = 0; loc_vec_i < _snapshots[0].getNumberOfLocalEntries(); ++loc_vec_i)
191 : {
192 176 : const unsigned int glob_vec_i = _snapshots[0].getGlobalIndex(loc_vec_i);
193 : auto & entry = local_vectors[glob_vec_i];
194 :
195 352 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
196 176 : entry.push_back(_snapshots[v_ind].getLocalEntry(loc_vec_i));
197 : }
198 :
199 : /*
200 : Initializing containers for the objects that will be sent to other processors.
201 : send_vectors is just a temporary object that ensures that the same snapshot is
202 : sent to other processors only once. send_map, on the other hand, will be used
203 : by the communicator and contains the required snapshots for each processor
204 : which is not the the current rank.
205 : std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>> type
206 : is a container that uses (global snapshot index, variable index, snapshot) to
207 : identify and send/receive snapshots during the communication.
208 : */
209 :
210 : std::unordered_map<processor_id_type, std::set<unsigned int>> send_vectors;
211 : std::unordered_map<
212 : processor_id_type,
213 : std::vector<std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>>>
214 : send_map;
215 :
216 : // Fill the send map with snapshots we need to send for each processor. First,
217 : // we loop over the matrix entries (elements in the mesh)
218 1394 : for (Elem * elem : mesh.active_element_ptr_range())
219 612 : if (elem->processor_id() != processor_id())
220 : {
221 : // The centroids in the mesh correspond to the 2D corrdinates of the elements
222 : // in the mesh.
223 216 : const auto centroid = elem->vertex_average();
224 216 : const unsigned int i = centroid(0);
225 216 : const unsigned int j = centroid(1);
226 :
227 : /*
228 : Checking if the current processor has the required snapshot.
229 : We assume that every variable has the same number of snapshots with the
230 : same distribution among processors. Therefore, it is enough to test
231 : the first variable only.
232 : */
233 216 : if (_snapshots[0].hasGlobalEntry(i))
234 : {
235 : // Continue loop if the snapshot is already being sent.
236 30 : if (send_vectors[elem->processor_id()].count(i))
237 30 : continue;
238 :
239 : // Add another entry to the map if another processor needs the owned
240 : // snapshot.
241 42 : send_vectors[elem->processor_id()].insert(i);
242 84 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
243 42 : send_map[elem->processor_id()].emplace_back(
244 : i, v_ind, _snapshots[v_ind].getGlobalEntry(i));
245 : }
246 144 : else if (_snapshots[0].hasGlobalEntry(j))
247 : {
248 : // Continue loop if the snapshot is already being sent.
249 0 : if (send_vectors[elem->processor_id()].count(j))
250 0 : continue;
251 :
252 : // Add another entry to the map if another processor needs the owned
253 : // snapshot.
254 24 : send_vectors[elem->processor_id()].insert(j);
255 48 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
256 24 : send_map[elem->processor_id()].emplace_back(
257 : j, v_ind, _snapshots[v_ind].getGlobalEntry(j));
258 : }
259 : }
260 : else
261 : // Initializing the flag value with 0 (not computed) for each element.
262 481 : elem->set_extra_integer(0, 0);
263 :
264 : // Creating container for received data. In this case the map contains the snapshots
265 : // for each variable for each required global snapshot index.
266 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>>
267 : received_vectors;
268 :
269 : // Converting function to functor to be able to pass to push packed range.
270 : auto functor =
271 139 : [this, &mesh, &received_vectors, &local_vectors](
272 : processor_id_type pid,
273 : const std::vector<
274 : std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>> & vectors)
275 139 : { PODReducedBasisTrainer::receiveObjects(mesh, received_vectors, local_vectors, pid, vectors); };
276 :
277 85 : Parallel::push_parallel_packed_range(_communicator, send_map, (void *)nullptr, functor);
278 :
279 : // This extra call is necessary in case a processor has all the elements it needs
280 : // (hence doesn't receive any).
281 85 : functor(
282 : processor_id(),
283 85 : std::vector<std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>>());
284 :
285 : // Now, the correlation matrices aregathered and summed to make sure every processor
286 : // sees them.
287 : gatherSum(_corr_mx[0].get_values());
288 :
289 : // The lower triangle of the matrices are then filled using symmetry.
290 170 : for (auto & corr_mx : _corr_mx)
291 272 : for (unsigned int row_i = 1; row_i < corr_mx.m(); ++row_i)
292 527 : for (unsigned int col_i = 0; col_i < row_i; ++col_i)
293 340 : corr_mx(row_i, col_i) = corr_mx(col_i, row_i);
294 85 : }
295 :
296 : void
297 139 : PODReducedBasisTrainer::receiveObjects(
298 : ReplicatedMesh & mesh,
299 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>> &
300 : received_vectors,
301 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>> &
302 : local_vectors,
303 : processor_id_type /*pid*/,
304 : const std::vector<std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>> &
305 : vectors)
306 : {
307 205 : for (auto & tuple : vectors)
308 : {
309 66 : const auto glob_vec_i = std::get<0>(tuple);
310 66 : const auto var_i = std::get<1>(tuple);
311 : const auto & vector = std::get<2>(tuple);
312 :
313 : // The entry that will be filled with all of the variable solutions for a vector
314 : std::vector<std::shared_ptr<DenseVector<Real>>> & entry = received_vectors[glob_vec_i];
315 :
316 : // Size it to the number of variables in case we haven't already
317 66 : entry.resize(_snapshots.size(), nullptr);
318 :
319 : // Add this varaible's contribution - this is shared_ptr so we are claiming partial
320 : // ownership of this vector and do not have to do a copy
321 66 : entry[var_i] = vector;
322 : }
323 :
324 : // Looping over the locally owned entries in the matrix and computing the
325 : // values.
326 1490 : for (Elem * elem : mesh.active_local_element_ptr_range())
327 : {
328 : // If the matrix entry is already filled, the loop skips this element.
329 606 : if (elem->get_extra_integer(0))
330 210 : continue;
331 :
332 : // Getting pointers to the necessary snapshots for the matrix entry.
333 : // This points to a vector of size (number of variables).
334 396 : const unsigned int i = elem->vertex_average()(0);
335 396 : const unsigned int j = elem->vertex_average()(1);
336 : std::vector<std::shared_ptr<DenseVector<Real>>> * i_vec = nullptr;
337 : std::vector<std::shared_ptr<DenseVector<Real>>> * j_vec = nullptr;
338 :
339 : const auto find_i_local = local_vectors.find(i);
340 396 : if (find_i_local != local_vectors.end())
341 324 : i_vec = &find_i_local->second;
342 : else
343 : {
344 : const auto find_i_received = received_vectors.find(i);
345 72 : if (find_i_received != received_vectors.end())
346 : {
347 72 : i_vec = &find_i_received->second;
348 : }
349 : else
350 : continue;
351 : }
352 :
353 : const auto find_j_local = local_vectors.find(j);
354 396 : if (find_j_local != local_vectors.end())
355 354 : j_vec = &find_j_local->second;
356 : else
357 : {
358 : const auto find_j_received = received_vectors.find(j);
359 42 : if (find_j_received != received_vectors.end())
360 : {
361 42 : j_vec = &find_j_received->second;
362 : }
363 : else
364 : continue;
365 : }
366 :
367 : // Coputing the available matrix entries for every variable.
368 792 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
369 396 : _corr_mx[v_ind](i, j) = (*i_vec)[v_ind]->dot(*(*j_vec)[v_ind]);
370 :
371 : // Set the 'filled' flag to 1 (true) to make sure it is not recomputed.
372 396 : elem->set_extra_integer(0, 1);
373 139 : }
374 139 : }
375 :
376 : void
377 85 : PODReducedBasisTrainer::computeEigenDecomposition()
378 : {
379 170 : for (unsigned int v_ind = 0; v_ind < _corr_mx.size(); ++v_ind)
380 : {
381 : unsigned int no_snaps = _corr_mx[v_ind].n();
382 :
383 : // Initializing temporary objects for the eigenvalues and eigenvectors since
384 : // evd_left() returns an unordered vector of eigenvalues.
385 85 : DenseVector<Real> eigenvalues(no_snaps);
386 85 : DenseMatrix<Real> eigenvectors(no_snaps, no_snaps);
387 :
388 : // Creating a temporary placeholder for the imaginary parts of the eigenvalues
389 85 : DenseVector<Real> eigenvalues_imag(no_snaps);
390 :
391 : // Performing the eigenvalue decomposition
392 85 : _corr_mx[v_ind].evd_left(eigenvalues, eigenvalues_imag, eigenvectors);
393 :
394 : // Sorting the eigenvectors and eigenvalues based on the magnitude of
395 : // the eigenvalues
396 85 : std::vector<unsigned int> idx(eigenvalues.size());
397 : std::iota(idx.begin(), idx.end(), 0);
398 : std::vector<Real> & v = eigenvalues.get_values();
399 :
400 : // Getting the indices to be able to copy the corresponding eigenvector too.
401 85 : std::stable_sort(
402 340 : idx.begin(), idx.end(), [&v](unsigned int i, unsigned int j) { return v[i] > v[j]; });
403 :
404 : // Getting a cutoff for the number of modes. The function requires a sorted list,
405 : // thus the temporary vector is sorted.
406 85 : std::stable_sort(v.begin(), v.end(), std::greater<Real>());
407 85 : unsigned int cutoff = determineNumberOfModes(_error_res[v_ind], v);
408 :
409 : // Initializing the actual containers for the eigenvectors and eigenvalues.
410 170 : _eigenvalues[v_ind] = DenseVector<Real>(cutoff);
411 85 : _eigenvectors[v_ind] = DenseMatrix<Real>(eigenvectors.m(), cutoff);
412 :
413 : // Copying the kept eigenvalues and eigenvectors in a sorted container.
414 357 : for (unsigned int j = 0; j < cutoff; ++j)
415 : {
416 272 : _eigenvalues[v_ind](j) = eigenvalues(j);
417 1224 : for (unsigned int k = 0; k < _eigenvectors[v_ind].m(); ++k)
418 : {
419 952 : _eigenvectors[v_ind](k, j) = eigenvectors(k, idx[j]);
420 : }
421 : }
422 :
423 85 : printEigenvalues();
424 170 : }
425 85 : }
426 :
427 : unsigned int
428 85 : PODReducedBasisTrainer::determineNumberOfModes(Real error, const std::vector<Real> & inp_vec) const
429 : {
430 : Real sum = std::accumulate(inp_vec.begin(), inp_vec.end(), 0.0);
431 :
432 : Real part_sum = 0.0;
433 272 : for (unsigned int i = 0; i < inp_vec.size(); ++i)
434 : {
435 272 : part_sum += inp_vec[i];
436 272 : if (part_sum / sum > 1 - error)
437 85 : return (i + 1);
438 : }
439 0 : return (inp_vec.size());
440 : }
441 :
442 : void
443 85 : PODReducedBasisTrainer::computeBasisVectors()
444 : {
445 : // Looping over all the variables.
446 170 : for (unsigned int var_i = 0; var_i < _eigenvectors.size(); ++var_i)
447 : {
448 : unsigned int no_bases = _eigenvalues[var_i].size();
449 : unsigned int no_snaps = _snapshots[var_i].getNumberOfLocalEntries();
450 :
451 85 : _base[var_i].resize(no_bases);
452 :
453 : // Filling the containers using the local snapshots and the eigenvalues and
454 : // eigenvectors of the correlation matrices.
455 357 : for (unsigned int base_i = 0; base_i < no_bases; ++base_i)
456 : {
457 272 : _base[var_i][base_i].resize(_snapshots[var_i].getLocalEntry(0)->size());
458 :
459 888 : for (unsigned int loc_i = 0; loc_i < no_snaps; ++loc_i)
460 : {
461 616 : unsigned int glob_i = _snapshots[var_i].getGlobalIndex(loc_i);
462 616 : const DenseVector<Real> & snapshot = *_snapshots[var_i].getLocalEntry(loc_i);
463 :
464 24024 : for (unsigned int i = 0; i < _base[var_i][base_i].size(); ++i)
465 : {
466 23408 : _base[var_i][base_i](i) += _eigenvectors[var_i](glob_i, base_i) * snapshot(i);
467 : }
468 : }
469 :
470 : // Gathering and summing the local contributions over all of the processes.
471 : // This makes sure that every process sees all of the basis functions.
472 272 : gatherSum(_base[var_i][base_i].get_values());
473 :
474 : // Normalizing the basis functions to make sure they are orthonormal.
475 272 : _base[var_i][base_i].scale(1.0 / sqrt(_eigenvalues[var_i](base_i)));
476 : }
477 : }
478 85 : }
479 :
480 : void
481 85 : PODReducedBasisTrainer::initReducedOperators()
482 : {
483 85 : _red_operators.resize(_tag_names.size());
484 :
485 : // Getting the sum of the ranks of the different bases. Every reduced operator
486 : // is resized with this number. This way the construction can be more general.
487 85 : unsigned int base_num = getSumBaseSize();
488 :
489 : // Initializing each operator (each operator with a tag).
490 510 : for (unsigned int tag_i = 0; tag_i < _red_operators.size(); ++tag_i)
491 : {
492 425 : if (_tag_types[tag_i] == "src" || _tag_types[tag_i] == "src_dir")
493 136 : _red_operators[tag_i].resize(base_num, 1);
494 : else
495 289 : _red_operators[tag_i].resize(base_num, base_num);
496 : }
497 85 : }
498 :
499 : void
500 627 : PODReducedBasisTrainer::addToReducedOperator(unsigned int base_i,
501 : unsigned int tag_i,
502 : std::vector<DenseVector<Real>> & residual)
503 : {
504 : // Computing the elements of the reduced operator using Galerkin projection
505 : // on the residual. This is done in parallel and the local contributions are
506 : // gathered in finalize().
507 : unsigned int counter = 0;
508 1254 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
509 : {
510 2673 : for (unsigned int base_j = 0; base_j < _base[var_i].size(); ++base_j)
511 : {
512 2046 : _red_operators[tag_i](counter, base_i) = residual[var_i].dot(_base[var_i][base_j]);
513 2046 : counter++;
514 : }
515 : }
516 :
517 627 : _empty_operators = false;
518 627 : }
519 :
520 : unsigned int
521 85 : PODReducedBasisTrainer::getSnapsSize(unsigned int var_i) const
522 : {
523 85 : return _snapshots[var_i].getNumberOfGlobalEntries();
524 : }
525 :
526 : unsigned int
527 259 : PODReducedBasisTrainer::getSumBaseSize() const
528 : {
529 : unsigned int sum = 0;
530 :
531 518 : for (unsigned int i = 0; i < _base.size(); ++i)
532 259 : sum += _base[i].size();
533 :
534 259 : return sum;
535 : }
536 :
537 : const DenseVector<Real> &
538 176 : PODReducedBasisTrainer::getBasisVector(unsigned int var_i, unsigned int base_i) const
539 : {
540 176 : return _base[var_i][base_i];
541 : }
542 :
543 : const DenseVector<Real> &
544 0 : PODReducedBasisTrainer::getBasisVector(unsigned int glob_i) const
545 : {
546 : unsigned int counter = 0;
547 :
548 0 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
549 0 : for (unsigned int base_i = 0; base_i < _base[var_i].size(); ++base_i)
550 : {
551 0 : if (glob_i == counter)
552 : return _base[var_i][base_i];
553 :
554 0 : counter += 1;
555 : }
556 :
557 0 : mooseError("The basis vector with global index ", glob_i, "is not available!");
558 : return _base[0][0];
559 : }
560 :
561 : unsigned int
562 0 : PODReducedBasisTrainer::getVariableIndex(unsigned int glob_i) const
563 : {
564 : unsigned int counter = 0;
565 :
566 0 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
567 0 : for (unsigned int base_i = 0; base_i < _base[var_i].size(); ++base_i)
568 : {
569 0 : if (glob_i == counter)
570 : return var_i;
571 :
572 0 : counter += 1;
573 : }
574 :
575 0 : mooseError("Variable with global base index ", glob_i, "is not available!");
576 : return 0;
577 : }
578 :
579 : void
580 85 : PODReducedBasisTrainer::printEigenvalues()
581 : {
582 195 : if (processor_id() == 0 && _tid == 0 && isParamValid("filenames"))
583 : {
584 33 : std::vector<std::string> filenames = getParam<std::vector<std::string>>("filenames");
585 :
586 11 : if (filenames.size() != _var_names.size())
587 0 : paramError("filenames",
588 : "The number of file names is not equal to the number of variable names!");
589 :
590 22 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
591 : {
592 11 : std::filebuf fb;
593 : fb.open(filenames[var_i], std::ios::out);
594 11 : std::ostream os(&fb);
595 11 : os << "evs" << std::endl;
596 11 : _eigenvalues[var_i].print_scientific(os);
597 11 : }
598 11 : }
599 85 : }
|