14 #include <petscdmda.h> 16 #include "libmesh/petsc_vector.h" 17 #include "libmesh/petsc_matrix.h" 28 const unsigned int num_iter,
29 const unsigned int batch_size,
30 const Real learning_rate,
35 : show_every_nth_iteration(show_every_nth_iteration),
37 batch_size(batch_size),
38 learning_rate(learning_rate),
50 const std::vector<std::string> & params_to_tune,
51 const std::vector<Real> & min,
52 const std::vector<Real> & max)
81 _K.resize(training_params.rows() * training_data.cols(),
82 training_params.rows() * training_data.cols());
86 training_data.reshaped(training_params.rows() * training_data.cols(), 1);
103 const std::vector<Real> & min_vector,
104 const std::vector<Real> & max_vector)
108 const bool upper_bounds_specified = min_vector.size();
109 const bool lower_bounds_specified = max_vector.size();
111 for (
const auto param_i :
index_range(params_to_tune))
113 const auto & hp = params_to_tune[param_i];
123 ::mooseError(
"The covariance parameter ", hp,
" could not be found!");
126 min = lower_bounds_specified ? min_vector[param_i] :
min;
127 max = upper_bounds_specified ? max_vector[param_i] :
max;
172 Real store_loss = 0.0;
173 std::vector<Real> grad1;
176 std::vector<unsigned int> v_sequence(training_params.rows());
177 std::iota(std::begin(v_sequence), std::end(v_sequence), 0);
181 Moose::out <<
"OPTIMIZING GP HYPER-PARAMETERS USING Adam" << std::endl;
182 for (
unsigned int ss = 0; ss < opts.
num_iter; ++ss)
186 generator.
seed(0, 1980);
188 MooseUtils::shuffle<unsigned int>(v_sequence, generator, 0);
191 for (
unsigned int jj = 0; jj < training_params.cols(); ++jj)
192 inputs(ii, jj) = training_params(v_sequence[ii], jj);
194 for (
unsigned int jj = 0; jj < training_data.cols(); ++jj)
195 outputs(ii, jj) = training_data(v_sequence[ii], jj);
198 store_loss =
getLoss(inputs, outputs);
200 Moose::out <<
"Iteration: " << ss + 1 <<
" LOSS: " << store_loss << std::endl;
204 const auto first_index = std::get<0>(iter->second);
205 const auto num_entries = std::get<1>(iter->second);
206 for (
unsigned int ii = 0; ii < num_entries; ++ii)
208 const auto global_index = first_index + ii;
209 m0[global_index] = b1 * m0[global_index] + (1 - b1) * grad1[global_index];
211 b2 * v0[global_index] + (1 - b2) * grad1[global_index] * grad1[global_index];
212 m_hat = m0[global_index] / (1 -
std::pow(b1, (ss + 1)));
213 v_hat = v0[global_index] / (1 -
std::pow(b2, (ss + 1)));
214 new_val = theta[global_index] - opts.
learning_rate * m_hat / (std::sqrt(v_hat) +
eps);
216 const auto min_value = std::get<2>(iter->second);
217 const auto max_value = std::get<3>(iter->second);
219 theta[global_index] = std::min(std::max(new_val, min_value), max_value);
227 Moose::out <<
"OPTIMIZED GP HYPER-PARAMETERS:" << std::endl;
229 Moose::out <<
"FINAL LOSS: " << store_loss << std::endl;
238 RealEigenMatrix flattened_data = outputs.reshaped(outputs.rows() * outputs.cols(), 1);
242 Real log_likelihood = 0;
244 log_likelihood += -std::log(
_K.determinant());
245 log_likelihood -=
_batch_size * std::log(2 * M_PI);
246 log_likelihood = -log_likelihood / 2;
247 return log_likelihood;
255 std::vector<Real> grad_vec;
259 std::string hyper_param_name = iter->first;
260 const auto first_index = std::get<0>(iter->second);
261 const auto num_entries = std::get<1>(iter->second);
262 for (
unsigned int ii = 0; ii < num_entries; ++ii)
264 const auto global_index = first_index + ii;
267 grad_vec[global_index] = -tmp.trace() / 2.0;
275 const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
277 const std::unordered_map<std::string, Real> & scalar_map,
278 const std::unordered_map<std::string, std::vector<Real>> & vector_map,
279 std::vector<Real> & vec)
const 281 for (
auto iter : tuning_data)
283 const std::string & param_name = iter.first;
284 const auto scalar_it = scalar_map.find(param_name);
285 if (scalar_it != scalar_map.end())
286 vec[std::get<0>(iter.second)] = scalar_it->second;
289 const auto vector_it = vector_map.find(param_name);
290 if (vector_it != vector_map.end())
291 for (
unsigned int ii = 0; ii < std::get<1>(iter.second); ++ii)
292 vec[std::get<0>(iter.second) + ii] = (vector_it->second)[ii];
299 const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
301 std::unordered_map<std::string, Real> & scalar_map,
302 std::unordered_map<std::string, std::vector<Real>> & vector_map,
303 const std::vector<Real> & vec)
const 305 for (
auto iter : tuning_data)
307 const std::string & param_name = iter.first;
308 if (scalar_map.find(param_name) != scalar_map.end())
309 scalar_map[param_name] = vec[std::get<0>(iter.second)];
310 else if (vector_map.find(param_name) != vector_map.end())
311 for (
unsigned int ii = 0; ii < std::get<1>(iter.second); ++ii)
312 vector_map[param_name][ii] = vec[std::get<0>(iter.second) + ii];
320 dataStore(std::ostream & stream, Eigen::LLT<RealEigenMatrix> & decomp,
void * context)
330 dataLoad(std::istream & stream, Eigen::LLT<RealEigenMatrix> & decomp,
void * context)
334 decomp.compute(L * L.transpose());
void loadHyperParamMap(const std::unordered_map< std::string, Real > &map, const std::unordered_map< std::string, std::vector< Real >> &vec_map)
Load some hyperparameters into the local maps contained in this object.
void seed(std::size_t i, unsigned int seed)
Base class for covariance functions that are used in Gaussian Processes.
virtual const std::string & name() const
auto max(const L &left, const R &right)
void dataLoad(std::istream &stream, Eigen::LLT< RealEigenMatrix > &decomp, void *context)
void dataStore(std::ostream &stream, Eigen::LLT< RealEigenMatrix > &decomp, void *context)
const std::string & type() const
const std::vector< UserObjectName > & dependentCovarianceNames() const
Get the names of the dependent covariances.
void buildHyperParamMap(std::unordered_map< std::string, Real > &map, std::unordered_map< std::string, std::vector< Real >> &vec_map) const
Populates the input maps with the owned hyperparameters.
Eigen::Matrix< Real, Eigen::Dynamic, Eigen::Dynamic > RealEigenMatrix
std::string stringify(const T &t)
void dependentCovarianceTypes(std::map< UserObjectName, std::string > &name_type_map) const
Populate a map with the names and types of the dependent covariance functions.
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
virtual void computeCovarianceMatrix(RealEigenMatrix &K, const RealEigenMatrix &x, const RealEigenMatrix &xp, const bool is_self_covariance) const =0
Generates the Covariance Matrix given two sets of points in the parameter space.
static const std::string alpha
unsigned int numOutputs() const
Return the number of outputs assumed for this covariance function.
auto min(const L &left, const R &right)
virtual bool computedKdhyper(RealEigenMatrix &dKdhp, const RealEigenMatrix &x, const std::string &hyper_param_name, unsigned int ind) const
Redirect dK/dhp for hyperparameter "hp".
MooseUnits pow(const MooseUnits &, int)
virtual bool getTuningData(const std::string &name, unsigned int &size, Real &min, Real &max) const
Get the default minimum and maximum and size of a hyperparameter.
auto index_range(const T &sizable)
virtual bool isTunable(const std::string &name) const
Check if a given parameter is tunable.