https://mooseframework.inl.gov
POD.C
Go to the documentation of this file.
1 
2 //* This file is part of the MOOSE framework
3 //* https://mooseframework.inl.gov
4 //*
5 //* All rights reserved, see COPYRIGHT for full restrictions
6 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
7 //*
8 //* Licensed under LGPL 2.1, please see LICENSE for details
9 //* https://www.gnu.org/licenses/lgpl-2.1.html
10 
11 #include "MooseError.h"
12 #include "POD.h"
13 
14 using namespace libMesh;
15 
16 namespace StochasticTools
17 {
18 
19 #if PETSC_VERSION_LESS_THAN(3, 14, 0)
20 
21 POD::POD(const ParallelSolutionStorage * const, const std::string &, const Parallel::Communicator &)
22 {
23  mooseError("PETSc-3.14.0 or higher is required for using StochasticTools::POD.");
24 }
25 
26 #else
27 
28 POD::POD(const ParallelSolutionStorage * const parallel_storage,
29  const std::string & extra_slepc_options,
30  const Parallel::Communicator & comm)
31  : _parallel_storage(parallel_storage),
32  _extra_slepc_options(extra_slepc_options),
33  _communicator(comm)
34 {
35 }
36 
37 #endif
38 
39 void
40 POD::computePOD(const VariableName & vname,
41  std::vector<DenseVector<Real>> & left_basis_functions,
42  std::vector<DenseVector<Real>> & right_basis_functions,
43  std::vector<Real> & singular_values,
44  const dof_id_type num_modes,
45  const Real energy) const
46 {
47 
48 #if !PETSC_VERSION_LESS_THAN(3, 14, 0)
49 
50  // Define the petsc matrix which needs and SVD, we will populate it using the snapshots
51  Mat mat;
52 
53  // We make sure every rank knows how many global and local samples we have and how long the
54  // snapshots are. At this point we assume that the snapshots are the same size so we don't
55  // need to map them to a reference domain.
56  dof_id_type local_rows = 0;
57  dof_id_type snapshot_size = 0;
58  dof_id_type global_rows = 0;
59  if (_parallel_storage->getStorage().size())
60  {
61  for (const auto & row : _parallel_storage->getStorage(vname))
62  {
63  local_rows += row.second.size();
64  if (row.second.size())
65  snapshot_size = row.second[0].size();
66  }
67  global_rows = local_rows;
68  }
69 
70  _communicator.sum(global_rows);
71  _communicator.max(snapshot_size);
72 
73  // Generally snapshot matrices are dense.
74  LibmeshPetscCallA(
76  MatCreateDense(
77  _communicator.get(), local_rows, PETSC_DECIDE, global_rows, snapshot_size, NULL, &mat));
78 
79  // Check where the local rows begin in the matrix, we use these to convert from local to
80  // global indices
81  dof_id_type local_beg = 0;
82  dof_id_type local_end = 0;
83  LibmeshPetscCallA(
85  MatGetOwnershipRange(mat, numeric_petsc_cast(&local_beg), numeric_petsc_cast(&local_end)));
86 
87  unsigned int counter = 0;
88  if (local_rows)
89  for (const auto & row : _parallel_storage->getStorage(vname))
90  {
91  // Adds each snap individually. For problems with multiple snaps per run.
92  for (const auto & snap : row.second)
93  {
94  std::vector<PetscInt> rows(snapshot_size, (counter++) + local_beg);
95 
96  // Fill the column indices with 0,1,...,snapshot_size-1
97  std::vector<PetscInt> columns(snapshot_size);
98  std::iota(std::begin(columns), std::end(columns), 0);
99 
100  // Set the rows in the "sparse" matrix
101  LibmeshPetscCallA(_communicator.get(),
102  MatSetValues(mat,
103  1,
104  rows.data(),
105  snapshot_size,
106  columns.data(),
107  snap.get_values().data(),
108  INSERT_VALUES));
109  }
110  }
111 
112  // Assemble the matrix
113  LibmeshPetscCallA(_communicator.get(), MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY));
114  LibmeshPetscCallA(_communicator.get(), MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY));
115 
116  SVD svd;
117  LibmeshPetscCallA(_communicator.get(), SVDCreate(_communicator.get(), &svd));
118  // Now we set the operators for our SVD objects
119  LibmeshPetscCallA(_communicator.get(), SVDSetOperators(svd, mat, NULL));
120 
121  // Set the parallel operation mode to "DISTRIBUTED", default is "REDUNDANT"
122  DS ds;
123  LibmeshPetscCallA(_communicator.get(), SVDGetDS(svd, &ds));
124  LibmeshPetscCallA(_communicator.get(), DSSetParallel(ds, DS_PARALLEL_DISTRIBUTED));
125 
126  // We want the Lanczos method, might give the choice to the user
127  // at some point
128  LibmeshPetscCallA(_communicator.get(), SVDSetType(svd, SVDTRLANCZOS));
129 
130  // Default is the transpose is explicitly created. This method is less efficient
131  // computationally but better for storage
132  LibmeshPetscCallA(_communicator.get(), SVDSetImplicitTranspose(svd, PETSC_TRUE));
133 
134  LibmeshPetscCallA(_communicator.get(),
135  PetscOptionsInsertString(NULL, _extra_slepc_options.c_str()));
136 
137  // Set the subspace size for the Lanczos method, we take twice as many
138  // basis vectors as the requested number of POD modes. This guarantees in most of the case the
139  // convergence of the singular triplets.
140  LibmeshPetscCallA(_communicator.get(),
141  SVDSetDimensions(svd,
142  num_modes,
143  std::min(2 * num_modes, global_rows),
144  std::min(2 * num_modes, global_rows)));
145 
146  // Gives the user the ability to override any option set before the solve.
147  LibmeshPetscCallA(_communicator.get(), SVDSetFromOptions(svd));
148 
149  // Compute the singular value triplets
150  LibmeshPetscCallA(_communicator.get(), SVDSolve(svd));
151 
152  // Check how many singular triplets converged
153  PetscInt nconv;
154  LibmeshPetscCallA(_communicator.get(), SVDGetConverged(svd, &nconv));
155 
156  // We start extracting the basis functions and the singular values.
157 
158  // Find the local size needed for u
159  dof_id_type local_snapsize = 0;
160  LibmeshPetscCallA(_communicator.get(),
161  MatGetLocalSize(mat, NULL, numeric_petsc_cast(&local_snapsize)));
162 
164  u.init(snapshot_size, local_snapsize, false, PARALLEL);
165 
167  v.init(global_rows, local_rows, false, PARALLEL);
168 
169  left_basis_functions.clear();
170  right_basis_functions.clear();
171  singular_values.clear();
172 
173  singular_values.resize(nconv);
174  // Fetch the singular value triplet and immediately save the singular value
175  for (PetscInt j = 0; j < nconv; ++j)
176  LibmeshPetscCallA(_communicator.get(),
177  SVDGetSingularTriplet(svd, j, &singular_values[j], NULL, NULL));
178 
179  // Determine how many modes we need
180  unsigned int num_requested_modes = determineNumberOfModes(singular_values, num_modes, energy);
181  // Only save the basis functions which are needed. We serialize the modes
182  // on every processor so all of them have access to every mode.
183  left_basis_functions.resize(num_requested_modes);
184  right_basis_functions.resize(num_requested_modes);
185  for (PetscInt j = 0; j < cast_int<PetscInt>(num_requested_modes); ++j)
186  {
187  LibmeshPetscCallA(_communicator.get(), SVDGetSingularTriplet(svd, j, NULL, v.vec(), u.vec()));
188  u.localize(left_basis_functions[j].get_values());
189  v.localize(right_basis_functions[j].get_values());
190  }
191  LibmeshPetscCallA(_communicator.get(), MatDestroy(&mat));
192  LibmeshPetscCallA(_communicator.get(), SVDDestroy(&svd));
193 #else
194  // These variables would otherwise be unused
195  libmesh_ignore(vname);
196  libmesh_ignore(left_basis_functions);
197  libmesh_ignore(right_basis_functions);
198  libmesh_ignore(singular_values);
199  libmesh_ignore(num_modes);
200  libmesh_ignore(energy);
201 #endif
202 }
203 
205 POD::determineNumberOfModes(const std::vector<Real> & singular_values,
206  const dof_id_type num_modes_compute,
207  const Real energy) const
208 {
209  dof_id_type num_modes = 0;
210  // We either use the number of modes defined by the user or the maximum number of converged
211  // modes. We don't want to use modes which are unconverged.
212  std::size_t num_requested_modes =
213  std::min((std::size_t)num_modes_compute, singular_values.size());
214  // Grab a cumulative sum of singular value squared
215  std::vector<Real> ev_sum(singular_values.begin(), singular_values.begin() + num_requested_modes);
216  std::partial_sum(ev_sum.cbegin(),
217  ev_sum.cend(),
218  ev_sum.begin(),
219  [](Real sum, Real ev) { return sum + ev * ev; });
220 
221  // Find the first element that satisfies the threshold
222  const Real threshold = energy;
223  for (num_modes = 0; num_modes < ev_sum.size(); ++num_modes)
224  if (ev_sum[num_modes] / ev_sum.back() > 1 - threshold)
225  break;
226 
227  return num_modes + 1;
228 }
229 }
dof_id_type determineNumberOfModes(const std::vector< Real > &singular_values, const dof_id_type num_modes_compute, const Real energy) const
Determine the number of basis functions needed for a given variable based on the information on the s...
Definition: POD.C:205
PARALLEL
void mooseError(Args &&... args)
const ParallelSolutionStorage *const _parallel_storage
The container where the snapshots are stored.
Definition: POD.h:61
The following methods are specializations for using the Parallel::packed_range_* routines for a vecto...
PetscInt * numeric_petsc_cast(const numeric_index_type *p)
void computePOD(const VariableName &vname, std::vector< DenseVector< Real >> &left_basis_functions, std::vector< DenseVector< Real >> &right_basis_functions, std::vector< Real > &singular_values, const dof_id_type num_modes, const Real energy) const
Definition: POD.C:40
void libmesh_ignore(const Args &...)
Enum for batch type in stochastic tools MultiApp.
const std::string & _extra_slepc_options
Additional options for the singular value solver.
Definition: POD.h:63
std::unordered_map< unsigned int, std::vector< DenseVector< Real > > > & getStorage(const VariableName &variable)
Get the stored solution vectors for a given variable.
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
static const std::string v
Definition: NS.h:84
void max(const T &r, T &o, Request &req) const
static const std::complex< double > j(0, 1)
Complex number "j" (also known as "i")
const Parallel::Communicator & _communicator
The communicator for parallel routines.
Definition: POD.h:65
A Reporter which stores serialized solution fields for given variables in a distributed fashion...
uint8_t dof_id_type