Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #include "PODReducedBasisTrainer.h"
11 : #include "libmesh/replicated_mesh.h"
12 : #include "libmesh/mesh_generation.h"
13 : #include "SerializerGuard.h"
14 : #include "libmesh/parallel_algebra.h"
15 : #include "libmesh/parallel_sync.h"
16 : #include "VectorPacker.h"
17 :
18 : registerMooseObject("StochasticToolsApp", PODReducedBasisTrainer);
19 :
20 : InputParameters
21 220 : PODReducedBasisTrainer::validParams()
22 : {
23 220 : InputParameters params = SurrogateTrainerBase::validParams();
24 :
25 220 : params.addClassDescription("Computes the reduced subspace plus the reduced operators for "
26 : "POD-RB surrogate.");
27 440 : params.addRequiredParam<std::vector<std::string>>("var_names",
28 : "Names of variables we want to "
29 : "extract from solution vectors.");
30 440 : params.addRequiredParam<std::vector<Real>>("error_res",
31 : "The errors allowed in the snapshot reconstruction.");
32 440 : params.addRequiredParam<std::vector<std::string>>("tag_names",
33 : "Names of tags for the reduced operators.");
34 440 : params.addParam<std::vector<std::string>>(
35 : "filenames", "Files where the eigenvalues are printed for each variable (if given).");
36 :
37 440 : params.addRequiredParam<std::vector<std::string>>(
38 : "tag_types",
39 : "List of keywords describing if the tags"
40 : " correspond to independent operators or not. (op/op_dir/src/src_dir)");
41 220 : return params;
42 0 : }
43 :
44 116 : PODReducedBasisTrainer::PODReducedBasisTrainer(const InputParameters & parameters)
45 : : SurrogateTrainerBase(parameters),
46 232 : _var_names(declareModelData<std::vector<std::string>>(
47 : "_var_names", getParam<std::vector<std::string>>("var_names"))),
48 232 : _error_res(getParam<std::vector<Real>>("error_res")),
49 348 : _tag_names(declareModelData<std::vector<std::string>>(
50 : "_tag_names", getParam<std::vector<std::string>>("tag_names"))),
51 348 : _tag_types(declareModelData<std::vector<std::string>>(
52 : "_tag_types", getParam<std::vector<std::string>>("tag_types"))),
53 232 : _base(declareModelData<std::vector<std::vector<DenseVector<Real>>>>("_base")),
54 232 : _red_operators(declareModelData<std::vector<DenseMatrix<Real>>>("_red_operators")),
55 116 : _base_completed(false),
56 348 : _empty_operators(true)
57 : {
58 116 : if (_error_res.size() != _var_names.size())
59 4 : paramError("error_res",
60 : "The number of elements is not equal to the number"
61 : " of elements in 'var_names'!");
62 :
63 112 : if (_tag_names.size() != _tag_types.size())
64 4 : paramError("tag_types",
65 : "The number of elements is not equal to the number"
66 : " of elements in 'tag_names'!");
67 :
68 540 : std::vector<std::string> available_names{"op", "src", "op_dir", "src_dir"};
69 580 : for (auto tag_type : _tag_types)
70 : {
71 476 : auto it = std::find(available_names.begin(), available_names.end(), tag_type);
72 476 : if (it == available_names.end())
73 4 : paramError("tag_types",
74 : "Tag type '",
75 : tag_type,
76 : "' is not valid, available names are:",
77 : " op, op_dir, src, src_dir");
78 : }
79 320 : }
80 :
81 : void
82 88 : PODReducedBasisTrainer::initialSetup()
83 : {
84 : // Initializing the containers for the essential data to construct the
85 : // reduced operators.
86 :
87 88 : _snapshots.clear();
88 88 : _snapshots.resize(_var_names.size(), DistributedSnapshots(_communicator));
89 :
90 88 : _base.clear();
91 88 : _base.resize(_var_names.size());
92 :
93 88 : _corr_mx.clear();
94 88 : _corr_mx.resize(_var_names.size());
95 :
96 88 : _eigenvalues.clear();
97 88 : _eigenvalues.resize(_var_names.size());
98 :
99 88 : _eigenvectors.clear();
100 88 : _eigenvectors.resize(_var_names.size());
101 88 : }
102 :
103 : void
104 160 : PODReducedBasisTrainer::execute()
105 : {
106 : // If the base is not ready yet, create it by performing the POD on the
107 : // snapshot matrices. Also, initialize the containers for the reduced
108 : // operators.
109 160 : if (!_base_completed)
110 : {
111 80 : computeCorrelationMatrix();
112 80 : computeEigenDecomposition();
113 80 : computeBasisVectors();
114 80 : initReducedOperators();
115 80 : _base_completed = true;
116 80 : _empty_operators = true;
117 : }
118 160 : }
119 :
120 : void
121 160 : PODReducedBasisTrainer::finalize()
122 : {
123 : // If the operators are already filled on each processor, gather and sum them
124 : // together. This way every processor has access to every complete reduced
125 : // operator.
126 160 : if (!_empty_operators)
127 480 : for (unsigned int tag_i = 0; tag_i < _red_operators.size(); ++tag_i)
128 : {
129 : gatherSum(_red_operators[tag_i].get_values());
130 : }
131 160 : }
132 :
133 : void
134 172 : PODReducedBasisTrainer::addSnapshot(unsigned int var_i,
135 : unsigned int glob_i,
136 : const std::shared_ptr<DenseVector<Real>> & snapshot)
137 : {
138 172 : _snapshots[var_i].addNewEntry(glob_i, snapshot);
139 172 : }
140 :
141 : void
142 80 : PODReducedBasisTrainer::computeCorrelationMatrix()
143 : {
144 : // Getting the number of snapshots. It is assumed that every variable has the
145 : // same number of snapshots. This assumption is used at multiple locations in
146 : // this source file.
147 80 : const auto no_snaps = getSnapsSize(0);
148 :
149 : // Initializing the correlation matrices for each variable.
150 160 : for (unsigned int var_i = 0; var_i < _snapshots.size(); ++var_i)
151 80 : _corr_mx[var_i] = DenseMatrix<Real>(no_snaps, no_snaps);
152 :
153 : /*
154 : Creating a simple 2D map that distributes the elements in the correlation
155 : matrices to each processor. A square mesh with elements corresponding to
156 : the entries in the correlation matrix is used, since optimal partitioning
157 : routines are already implemented for it. In this case, the centroids of the
158 : elements are equivalent to the indices of the correlation matrix. This method
159 : may seem too convoluted, but is necessary to ensure that the communication
160 : stays balanced and scales well for runs with a large number of processors.
161 : */
162 80 : ReplicatedMesh mesh(_communicator, 2);
163 80 : MeshTools::Generation::build_square(
164 : mesh, no_snaps, no_snaps, -0.5, no_snaps - 0.5, -0.5, no_snaps - 0.5);
165 :
166 : // A flag is added to each element saying if the result has already been compuuted
167 : // or not.
168 80 : mesh.add_elem_integer("filled");
169 :
170 : // Since the correlation matrix is symmetric, it is enough to compute the
171 : // entries on the diagonal in addition to the elements above the diagonal.
172 : // Therefore, the elements in the mesh below the diagonal are deleted.
173 1952 : for (Elem * elem : mesh.active_element_ptr_range())
174 : {
175 896 : const auto centroid = elem->vertex_average();
176 896 : if (centroid(0) > centroid(1))
177 320 : mesh.delete_elem(elem);
178 80 : }
179 :
180 : // The mesh is distributed among the processors.
181 80 : mesh.prepare_for_use();
182 :
183 : /*
184 : This step restructures the snapshots into an unordered map to make it easier
185 : to do cross-products in the later stages. Since _snapshots only contains
186 : pointers to the data, this should not include a considerable amount of copy
187 : operations.
188 : */
189 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>> local_vectors;
190 240 : for (unsigned int loc_vec_i = 0; loc_vec_i < _snapshots[0].getNumberOfLocalEntries(); ++loc_vec_i)
191 : {
192 160 : const unsigned int glob_vec_i = _snapshots[0].getGlobalIndex(loc_vec_i);
193 : auto & entry = local_vectors[glob_vec_i];
194 :
195 320 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
196 160 : entry.push_back(_snapshots[v_ind].getLocalEntry(loc_vec_i));
197 : }
198 :
199 : /*
200 : Initializing containers for the objects that will be sent to other processors.
201 : send_vectors is just a temporary object that ensures that the same snapshot is
202 : sent to other processors only once. send_map, on the other hand, will be used
203 : by the communicator and contains the required snapshots for each processor
204 : which is not the the current rank.
205 : std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>> type
206 : is a container that uses (global snapshot index, variable index, snapshot) to
207 : identify and send/receive snapshots during the communication.
208 : */
209 :
210 : std::unordered_map<processor_id_type, std::set<unsigned int>> send_vectors;
211 : std::unordered_map<
212 : processor_id_type,
213 : std::vector<std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>>>
214 : send_map;
215 :
216 : // Fill the send map with snapshots we need to send for each processor. First,
217 : // we loop over the matrix entries (elements in the mesh)
218 1312 : for (Elem * elem : mesh.active_element_ptr_range())
219 576 : if (elem->processor_id() != processor_id())
220 : {
221 : // The centroids in the mesh correspond to the 2D corrdinates of the elements
222 : // in the mesh.
223 216 : const auto centroid = elem->vertex_average();
224 216 : const unsigned int i = centroid(0);
225 216 : const unsigned int j = centroid(1);
226 :
227 : /*
228 : Checking if the current processor has the required snapshot.
229 : We assume that every variable has the same number of snapshots with the
230 : same distribution among processors. Therefore, it is enough to test
231 : the first variable only.
232 : */
233 216 : if (_snapshots[0].hasGlobalEntry(i))
234 : {
235 : // Continue loop if the snapshot is already being sent.
236 30 : if (send_vectors[elem->processor_id()].count(i))
237 30 : continue;
238 :
239 : // Add another entry to the map if another processor needs the owned
240 : // snapshot.
241 42 : send_vectors[elem->processor_id()].insert(i);
242 84 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
243 42 : send_map[elem->processor_id()].emplace_back(
244 : i, v_ind, _snapshots[v_ind].getGlobalEntry(i));
245 : }
246 144 : else if (_snapshots[0].hasGlobalEntry(j))
247 : {
248 : // Continue loop if the snapshot is already being sent.
249 0 : if (send_vectors[elem->processor_id()].count(j))
250 0 : continue;
251 :
252 : // Add another entry to the map if another processor needs the owned
253 : // snapshot.
254 24 : send_vectors[elem->processor_id()].insert(j);
255 48 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
256 24 : send_map[elem->processor_id()].emplace_back(
257 : j, v_ind, _snapshots[v_ind].getGlobalEntry(j));
258 : }
259 : }
260 : else
261 : // Initializing the flag value with 0 (not computed) for each element.
262 440 : elem->set_extra_integer(0, 0);
263 :
264 : // Creating container for received data. In this case the map contains the snapshots
265 : // for each variable for each required global snapshot index.
266 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>>
267 : received_vectors;
268 :
269 : // Converting function to functor to be able to pass to push packed range.
270 : auto functor =
271 134 : [this, &mesh, &received_vectors, &local_vectors](
272 : processor_id_type pid,
273 : const std::vector<
274 134 : std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>> & vectors)
275 134 : { PODReducedBasisTrainer::receiveObjects(mesh, received_vectors, local_vectors, pid, vectors); };
276 :
277 80 : Parallel::push_parallel_packed_range(_communicator, send_map, (void *)nullptr, functor);
278 :
279 : // This extra call is necessary in case a processor has all the elements it needs
280 : // (hence doesn't receive any).
281 80 : functor(
282 : processor_id(),
283 80 : std::vector<std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>>());
284 :
285 : // Now, the correlation matrices aregathered and summed to make sure every processor
286 : // sees them.
287 : gatherSum(_corr_mx[0].get_values());
288 :
289 : // The lower triangle of the matrices are then filled using symmetry.
290 160 : for (auto & corr_mx : _corr_mx)
291 256 : for (unsigned int row_i = 1; row_i < corr_mx.m(); ++row_i)
292 496 : for (unsigned int col_i = 0; col_i < row_i; ++col_i)
293 320 : corr_mx(row_i, col_i) = corr_mx(col_i, row_i);
294 80 : }
295 :
296 : void
297 134 : PODReducedBasisTrainer::receiveObjects(
298 : ReplicatedMesh & mesh,
299 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>> &
300 : received_vectors,
301 : std::unordered_map<unsigned int, std::vector<std::shared_ptr<DenseVector<Real>>>> &
302 : local_vectors,
303 : processor_id_type /*pid*/,
304 : const std::vector<std::tuple<unsigned int, unsigned int, std::shared_ptr<DenseVector<Real>>>> &
305 : vectors)
306 : {
307 200 : for (auto & tuple : vectors)
308 : {
309 66 : const auto glob_vec_i = std::get<0>(tuple);
310 66 : const auto var_i = std::get<1>(tuple);
311 : const auto & vector = std::get<2>(tuple);
312 :
313 : // The entry that will be filled with all of the variable solutions for a vector
314 : std::vector<std::shared_ptr<DenseVector<Real>>> & entry = received_vectors[glob_vec_i];
315 :
316 : // Size it to the number of variables in case we haven't already
317 66 : entry.resize(_snapshots.size(), nullptr);
318 :
319 : // Add this varaible's contribution - this is shared_ptr so we are claiming partial
320 : // ownership of this vector and do not have to do a copy
321 66 : entry[var_i] = vector;
322 : }
323 :
324 : // Looping over the locally owned entries in the matrix and computing the
325 : // values.
326 1408 : for (Elem * elem : mesh.active_local_element_ptr_range())
327 : {
328 : // If the matrix entry is already filled, the loop skips this element.
329 570 : if (elem->get_extra_integer(0))
330 210 : continue;
331 :
332 : // Getting pointers to the necessary snapshots for the matrix entry.
333 : // This points to a vector of size (number of variables).
334 360 : const unsigned int i = elem->vertex_average()(0);
335 360 : const unsigned int j = elem->vertex_average()(1);
336 : std::vector<std::shared_ptr<DenseVector<Real>>> * i_vec = nullptr;
337 : std::vector<std::shared_ptr<DenseVector<Real>>> * j_vec = nullptr;
338 :
339 : const auto find_i_local = local_vectors.find(i);
340 360 : if (find_i_local != local_vectors.end())
341 288 : i_vec = &find_i_local->second;
342 : else
343 : {
344 : const auto find_i_received = received_vectors.find(i);
345 72 : if (find_i_received != received_vectors.end())
346 : {
347 72 : i_vec = &find_i_received->second;
348 : }
349 : else
350 : continue;
351 : }
352 :
353 : const auto find_j_local = local_vectors.find(j);
354 360 : if (find_j_local != local_vectors.end())
355 318 : j_vec = &find_j_local->second;
356 : else
357 : {
358 : const auto find_j_received = received_vectors.find(j);
359 42 : if (find_j_received != received_vectors.end())
360 : {
361 42 : j_vec = &find_j_received->second;
362 : }
363 : else
364 : continue;
365 : }
366 :
367 : // Coputing the available matrix entries for every variable.
368 720 : for (unsigned int v_ind = 0; v_ind < _snapshots.size(); ++v_ind)
369 360 : _corr_mx[v_ind](i, j) = (*i_vec)[v_ind]->dot(*(*j_vec)[v_ind]);
370 :
371 : // Set the 'filled' flag to 1 (true) to make sure it is not recomputed.
372 360 : elem->set_extra_integer(0, 1);
373 134 : }
374 134 : }
375 :
376 : void
377 80 : PODReducedBasisTrainer::computeEigenDecomposition()
378 : {
379 160 : for (unsigned int v_ind = 0; v_ind < _corr_mx.size(); ++v_ind)
380 : {
381 : unsigned int no_snaps = _corr_mx[v_ind].n();
382 :
383 : // Initializing temporary objects for the eigenvalues and eigenvectors since
384 : // evd_left() returns an unordered vector of eigenvalues.
385 80 : DenseVector<Real> eigenvalues(no_snaps);
386 80 : DenseMatrix<Real> eigenvectors(no_snaps, no_snaps);
387 :
388 : // Creating a temporary placeholder for the imaginary parts of the eigenvalues
389 80 : DenseVector<Real> eigenvalues_imag(no_snaps);
390 :
391 : // Performing the eigenvalue decomposition
392 80 : _corr_mx[v_ind].evd_left(eigenvalues, eigenvalues_imag, eigenvectors);
393 :
394 : // Sorting the eigenvectors and eigenvalues based on the magnitude of
395 : // the eigenvalues
396 80 : std::vector<unsigned int> idx(eigenvalues.size());
397 : std::iota(idx.begin(), idx.end(), 0);
398 : std::vector<Real> & v = eigenvalues.get_values();
399 :
400 : // Getting the indices to be able to copy the corresponding eigenvector too.
401 80 : std::stable_sort(
402 320 : idx.begin(), idx.end(), [&v](unsigned int i, unsigned int j) { return v[i] > v[j]; });
403 :
404 : // Getting a cutoff for the number of modes. The function requires a sorted list,
405 : // thus the temporary vector is sorted.
406 80 : std::stable_sort(v.begin(), v.end(), std::greater<Real>());
407 80 : unsigned int cutoff = determineNumberOfModes(_error_res[v_ind], v);
408 :
409 : // Initializing the actual containers for the eigenvectors and eigenvalues.
410 160 : _eigenvalues[v_ind] = DenseVector<Real>(cutoff);
411 80 : _eigenvectors[v_ind] = DenseMatrix<Real>(eigenvectors.m(), cutoff);
412 :
413 : // Copying the kept eigenvalues and eigenvectors in a sorted container.
414 336 : for (unsigned int j = 0; j < cutoff; ++j)
415 : {
416 256 : _eigenvalues[v_ind](j) = eigenvalues(j);
417 1152 : for (unsigned int k = 0; k < _eigenvectors[v_ind].m(); ++k)
418 : {
419 896 : _eigenvectors[v_ind](k, j) = eigenvectors(k, idx[j]);
420 : }
421 : }
422 :
423 80 : printEigenvalues();
424 80 : }
425 80 : }
426 :
427 : unsigned int
428 80 : PODReducedBasisTrainer::determineNumberOfModes(Real error, const std::vector<Real> & inp_vec) const
429 : {
430 : Real sum = std::accumulate(inp_vec.begin(), inp_vec.end(), 0.0);
431 :
432 : Real part_sum = 0.0;
433 256 : for (unsigned int i = 0; i < inp_vec.size(); ++i)
434 : {
435 256 : part_sum += inp_vec[i];
436 256 : if (part_sum / sum > 1 - error)
437 80 : return (i + 1);
438 : }
439 0 : return (inp_vec.size());
440 : }
441 :
442 : void
443 80 : PODReducedBasisTrainer::computeBasisVectors()
444 : {
445 : // Looping over all the variables.
446 160 : for (unsigned int var_i = 0; var_i < _eigenvectors.size(); ++var_i)
447 : {
448 : unsigned int no_bases = _eigenvalues[var_i].size();
449 : unsigned int no_snaps = _snapshots[var_i].getNumberOfLocalEntries();
450 :
451 80 : _base[var_i].resize(no_bases);
452 :
453 : // Filling the containers using the local snapshots and the eigenvalues and
454 : // eigenvectors of the correlation matrices.
455 336 : for (unsigned int base_i = 0; base_i < no_bases; ++base_i)
456 : {
457 256 : _base[var_i][base_i].resize(_snapshots[var_i].getLocalEntry(0)->size());
458 :
459 816 : for (unsigned int loc_i = 0; loc_i < no_snaps; ++loc_i)
460 : {
461 560 : unsigned int glob_i = _snapshots[var_i].getGlobalIndex(loc_i);
462 560 : const DenseVector<Real> & snapshot = *_snapshots[var_i].getLocalEntry(loc_i);
463 :
464 21840 : for (unsigned int i = 0; i < _base[var_i][base_i].size(); ++i)
465 : {
466 21280 : _base[var_i][base_i](i) += _eigenvectors[var_i](glob_i, base_i) * snapshot(i);
467 : }
468 : }
469 :
470 : // Gathering and summing the local contributions over all of the processes.
471 : // This makes sure that every process sees all of the basis functions.
472 256 : gatherSum(_base[var_i][base_i].get_values());
473 :
474 : // Normalizing the basis functions to make sure they are orthonormal.
475 256 : _base[var_i][base_i].scale(1.0 / sqrt(_eigenvalues[var_i](base_i)));
476 : }
477 : }
478 80 : }
479 :
480 : void
481 80 : PODReducedBasisTrainer::initReducedOperators()
482 : {
483 80 : _red_operators.resize(_tag_names.size());
484 :
485 : // Getting the sum of the ranks of the different bases. Every reduced operator
486 : // is resized with this number. This way the construction can be more general.
487 80 : unsigned int base_num = getSumBaseSize();
488 :
489 : // Initializing each operator (each operator with a tag).
490 480 : for (unsigned int tag_i = 0; tag_i < _red_operators.size(); ++tag_i)
491 : {
492 400 : if (_tag_types[tag_i] == "src" || _tag_types[tag_i] == "src_dir")
493 128 : _red_operators[tag_i].resize(base_num, 1);
494 : else
495 272 : _red_operators[tag_i].resize(base_num, base_num);
496 : }
497 80 : }
498 :
499 : void
500 570 : PODReducedBasisTrainer::addToReducedOperator(unsigned int base_i,
501 : unsigned int tag_i,
502 : std::vector<DenseVector<Real>> & residual)
503 : {
504 : // Computing the elements of the reduced operator using Galerkin projection
505 : // on the residual. This is done in parallel and the local contributions are
506 : // gathered in finalize().
507 : unsigned int counter = 0;
508 1140 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
509 : {
510 2430 : for (unsigned int base_j = 0; base_j < _base[var_i].size(); ++base_j)
511 : {
512 1860 : _red_operators[tag_i](counter, base_i) = residual[var_i].dot(_base[var_i][base_j]);
513 1860 : counter++;
514 : }
515 : }
516 :
517 570 : _empty_operators = false;
518 570 : }
519 :
520 : unsigned int
521 80 : PODReducedBasisTrainer::getSnapsSize(unsigned int var_i) const
522 : {
523 80 : return _snapshots[var_i].getNumberOfGlobalEntries();
524 : }
525 :
526 : unsigned int
527 244 : PODReducedBasisTrainer::getSumBaseSize() const
528 : {
529 : unsigned int sum = 0;
530 :
531 488 : for (unsigned int i = 0; i < _base.size(); ++i)
532 244 : sum += _base[i].size();
533 :
534 244 : return sum;
535 : }
536 :
537 : const DenseVector<Real> &
538 160 : PODReducedBasisTrainer::getBasisVector(unsigned int var_i, unsigned int base_i) const
539 : {
540 160 : return _base[var_i][base_i];
541 : }
542 :
543 : const DenseVector<Real> &
544 0 : PODReducedBasisTrainer::getBasisVector(unsigned int glob_i) const
545 : {
546 : unsigned int counter = 0;
547 :
548 0 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
549 0 : for (unsigned int base_i = 0; base_i < _base[var_i].size(); ++base_i)
550 : {
551 0 : if (glob_i == counter)
552 : return _base[var_i][base_i];
553 :
554 0 : counter += 1;
555 : }
556 :
557 0 : mooseError("The basis vector with global index ", glob_i, "is not available!");
558 : return _base[0][0];
559 : }
560 :
561 : unsigned int
562 0 : PODReducedBasisTrainer::getVariableIndex(unsigned int glob_i) const
563 : {
564 : unsigned int counter = 0;
565 :
566 0 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
567 0 : for (unsigned int base_i = 0; base_i < _base[var_i].size(); ++base_i)
568 : {
569 0 : if (glob_i == counter)
570 : return var_i;
571 :
572 0 : counter += 1;
573 : }
574 :
575 0 : mooseError("Variable with global base index ", glob_i, "is not available!");
576 : return 0;
577 : }
578 :
579 : void
580 80 : PODReducedBasisTrainer::printEigenvalues()
581 : {
582 180 : if (processor_id() == 0 && _tid == 0 && isParamValid("filenames"))
583 : {
584 30 : std::vector<std::string> filenames = getParam<std::vector<std::string>>("filenames");
585 :
586 10 : if (filenames.size() != _var_names.size())
587 0 : paramError("filenames",
588 : "The number of file names is not equal to the number of variable names!");
589 :
590 20 : for (unsigned int var_i = 0; var_i < _var_names.size(); ++var_i)
591 : {
592 10 : std::filebuf fb;
593 : fb.open(filenames[var_i], std::ios::out);
594 10 : std::ostream os(&fb);
595 10 : os << "evs" << std::endl;
596 10 : _eigenvalues[var_i].print_scientific(os);
597 10 : }
598 10 : }
599 80 : }
|