GaussianProcessHandler
Overview
The GaussianProcessHandler is designed to incorporate structures (data) and functions commonly used by every object that needs to use/modify Gaussian Processes such as GaussianProcessTrainer, GaussianProcess, or Active Learning objects. It contains accesses to the covariance function, stores covariance matrices and their corresponding decomposition and inverse action.
Initializing
The object which requires access to Gaussian Process-related functionalities shall have it as a member variable, which is important to enable restarting capabilities:
StochasticTools::GaussianProcessHandler & _gp_handler;
(modules/stochastic_tools/include/surrogates/GaussianProcessTrainer.h)An important step is the initialization of the covariance function, which can either be done using the initialize
function as in GaussianProcessTrainer:
_gp_handler.initialize(
getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
(modules/stochastic_tools/src/surrogates/GaussianProcessTrainer.C)Or by linking it directly as in GaussianProcess:
_gp_handler.linkCovarianceFunction(getCovarianceFunctionByName(covar_name));
(modules/stochastic_tools/src/surrogates/GaussianProcess.C)Creating a covariance matrix
Once the covariance function has been added to the handler, one can use it to create a covariance matrix by either using setupCovarianceMatrix
as in GaussianProcessTrainer:
_gp_handler.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
(modules/stochastic_tools/src/surrogates/GaussianProcessTrainer.C)Or by simply calling the covariance matrix builder as in GaussianProcess:
_gp_handler.getCovarFunction().computeCovarianceMatrix(K_test, test_points, test_points, true);
(modules/stochastic_tools/src/surrogates/GaussianProcess.C)Optimizing hyper parameters
As described in GaussianProcessTrainer, the covariance function might have hyper parameters that need to be optimized to be able to get accurate predictions from the corresponding Gaussian Processes. We can optimize these parameters during the generation of the Covariance matrix:
_gp_handler.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
(modules/stochastic_tools/src/surrogates/GaussianProcessTrainer.C)Or by simply calling the optimizer with TAO (deterministic algorithms):
if (tuneHyperParamsTAO(
training_params, training_data, opts.tao_options, opts.show_optimization_details))
(modules/stochastic_tools/src/utils/GaussianProcessHandler.C)or with Adam (stochastic algorithm):
tuneHyperParamsAdam(training_params,
training_data,
opts.iter_adam,
batch_size,
opts.learning_rate_adam,
(modules/stochastic_tools/src/utils/GaussianProcessHandler.C)(modules/stochastic_tools/include/surrogates/GaussianProcessTrainer.h)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#pragma once
#include "SurrogateTrainer.h"
#include "Standardizer.h"
#include <Eigen/Dense>
#include "Distribution.h"
#include "CovarianceFunctionBase.h"
#include "CovarianceInterface.h"
#include "GaussianProcessHandler.h"
class GaussianProcessTrainer : public SurrogateTrainer, public CovarianceInterface
{
public:
static InputParameters validParams();
GaussianProcessTrainer(const InputParameters & parameters);
virtual void preTrain() override;
virtual void train() override;
virtual void postTrain() override;
StochasticTools::GaussianProcessHandler & gpHandler() { return _gp_handler; }
const StochasticTools::GaussianProcessHandler & getGPHandler() const { return _gp_handler; }
private:
/// Data from the current predictor row
const std::vector<Real> & _predictor_row;
/// Gaussian process handler responsible for managing training related tasks
StochasticTools::GaussianProcessHandler & _gp_handler;
/// Parameters (x) used for training -- we'll allgather these in postTrain().
std::vector<std::vector<Real>> _params_buffer;
/// Data (y) used for training.
std::vector<Real> _data_buffer;
/// Paramaters (x) used for training, along with statistics
RealEigenMatrix & _training_params;
/// Data (y) used for training
RealEigenMatrix _training_data;
/// Switch for training param (x) standardization
bool _standardize_params;
/// Switch for training data(y) standardization
bool _standardize_data;
/// Flag to toggle hyperparameter tuning/optimization
bool _do_tuning;
/// Struct holding parameters necessary for parameter tuning
const StochasticTools::GaussianProcessHandler::GPOptimizerOptions _optimization_opts;
/// Data from the current sampler row
const std::vector<Real> & _sampler_row;
/// Predictor values from reporters
std::vector<const Real *> _pvals;
/// Columns from sampler for predictors
std::vector<unsigned int> _pcols;
/// Total number of parameters/dimensions
unsigned int _n_params;
};
(modules/stochastic_tools/src/surrogates/GaussianProcessTrainer.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessTrainer.h"
#include "Sampler.h"
#include "CartesianProduct.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
registerMooseObject("StochasticToolsApp", GaussianProcessTrainer);
InputParameters
GaussianProcessTrainer::validParams()
{
InputParameters params = SurrogateTrainer::validParams();
params.addClassDescription(
"Provides data preperation and training for a Gaussian Process surrogate model.");
params.addRequiredParam<UserObjectName>("covariance_function", "Name of covariance function.");
params.addParam<bool>(
"standardize_params", true, "Standardize (center and scale) training parameters (x values)");
params.addParam<bool>(
"standardize_data", true, "Standardize (center and scale) training data (y values)");
// Already preparing to use Adam here
MooseEnum tuning_type("tao adam none", "none");
params.addParam<MooseEnum>(
"tuning_algorithm", tuning_type, "Hyper parameter optimizaton algorithm");
params.addParam<unsigned int>("iter_adam", 1000, "Tolerance value for Adam optimization");
params.addParam<unsigned int>("batch_size", 0, "The batch size for Adam optimization");
params.addParam<Real>("learning_rate_adam", 0.001, "The learning rate for Adam optimization");
params.addParam<std::string>(
"tao_options", "", "Command line options for PETSc/TAO hyperparameter optimization");
params.addParam<bool>(
"show_optimization_details", false, "Switch to show TAO or Adam solver results");
params.addParam<std::vector<std::string>>("tune_parameters",
"Select hyperparameters to be tuned");
params.addParam<std::vector<Real>>("tuning_min", "Minimum allowable tuning value");
params.addParam<std::vector<Real>>("tuning_max", "Maximum allowable tuning value");
params.suppressParameter<MooseEnum>("response_type");
return params;
}
GaussianProcessTrainer::GaussianProcessTrainer(const InputParameters & parameters)
: SurrogateTrainer(parameters),
CovarianceInterface(parameters),
_predictor_row(getPredictorData()),
_gp_handler(declareModelData<StochasticTools::GaussianProcessHandler>("_gp_handler")),
_training_params(declareModelData<RealEigenMatrix>("_training_params")),
_standardize_params(getParam<bool>("standardize_params")),
_standardize_data(getParam<bool>("standardize_data")),
_do_tuning(isParamValid("tune_parameters")),
_optimization_opts(StochasticTools::GaussianProcessHandler::GPOptimizerOptions(
getParam<MooseEnum>("tuning_algorithm"),
getParam<std::string>("tao_options"),
getParam<bool>("show_optimization_details"),
getParam<unsigned int>("iter_adam"),
getParam<unsigned int>("batch_size"),
getParam<Real>("learning_rate_adam"))),
_sampler_row(getSamplerData()),
_pvals(getParam<std::vector<ReporterName>>("predictors").size()),
_pcols(getParam<std::vector<unsigned int>>("predictor_cols")),
_n_params((_pvals.empty() && _pcols.empty()) ? _sampler.getNumberOfCols()
: (_pvals.size() + _pcols.size()))
{
const auto & pnames = getParam<std::vector<ReporterName>>("predictors");
for (unsigned int i = 0; i < pnames.size(); ++i)
_pvals[i] = &getTrainingData<Real>(pnames[i]);
// If predictors and predictor_cols are empty, use all sampler columns
if (_pvals.empty() && _pcols.empty())
{
_pcols.resize(_sampler.getNumberOfCols());
std::iota(_pcols.begin(), _pcols.end(), 0);
}
// Error Checking
if (_do_tuning && _optimization_opts.opt_type == "none")
paramError("tuning_algorithm",
"No tuning algorithm is selected for the hyper parameter optimization!");
if (parameters.isParamSetByUser("batch_size") && _optimization_opts.opt_type == "tao")
paramError("batch_size",
"Mini-batch sampling is not compatible with the TAO optimization library. Please "
"use Adam optimization.");
if (parameters.isParamSetByUser("batch_size"))
if (_sampler.getNumberOfRows() < _optimization_opts.batch_size)
paramError("batch_size", "Batch size cannot be greater than the training data set size.");
std::vector<std::string> tune_parameters(getParam<std::vector<std::string>>("tune_parameters"));
if (isParamValid("tuning_min") &&
(getParam<std::vector<Real>>("tuning_min").size() != tune_parameters.size()))
mooseError("tuning_min size does not match tune_parameters");
if (isParamValid("tuning_max") &&
(getParam<std::vector<Real>>("tuning_max").size() != tune_parameters.size()))
mooseError("tuning_max size does not match tune_parameters");
std::vector<Real> lower_bounds, upper_bounds;
if (isParamValid("tuning_min"))
lower_bounds = getParam<std::vector<Real>>("tuning_min");
if (isParamValid("tuning_max"))
upper_bounds = getParam<std::vector<Real>>("tuning_max");
_gp_handler.initialize(
getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
}
void
GaussianProcessTrainer::preTrain()
{
_params_buffer.clear();
_data_buffer.clear();
_params_buffer.reserve(getLocalSampleSize());
_data_buffer.reserve(getLocalSampleSize());
}
void
GaussianProcessTrainer::train()
{
_params_buffer.push_back(_predictor_row);
_data_buffer.push_back(*_rval);
}
void
GaussianProcessTrainer::postTrain()
{
// Instead of gatherSum, we have to allgather.
_communicator.allgather(_params_buffer);
_communicator.allgather(_data_buffer);
_training_params.resize(_params_buffer.size(), _n_dims);
_training_data.resize(_data_buffer.size(), 1);
for (auto ii : make_range(_training_params.rows()))
{
for (auto jj : make_range(_training_params.cols()))
_training_params(ii, jj) = _params_buffer[ii][jj];
_training_data(ii, 0) = _data_buffer[ii];
}
// Standardize (center and scale) training params
if (_standardize_params)
_gp_handler.standardizeParameters(_training_params);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp_handler.paramStandardizer().set(0, 1, _n_dims);
// Standardize (center and scale) training data
if (_standardize_data)
_gp_handler.standardizeData(_training_data);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp_handler.dataStandardizer().set(0, 1, _n_dims);
// Setup the covariance
_gp_handler.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
}
(modules/stochastic_tools/src/surrogates/GaussianProcess.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcess.h"
#include "Sampler.h"
#include "CovarianceFunctionBase.h"
registerMooseObject("StochasticToolsApp", GaussianProcess);
InputParameters
GaussianProcess::validParams()
{
InputParameters params = SurrogateModel::validParams();
params.addClassDescription("Computes and evaluates Gaussian Process surrogate model.");
return params;
}
GaussianProcess::GaussianProcess(const InputParameters & parameters)
: SurrogateModel(parameters),
CovarianceInterface(parameters),
_gp_handler(declareModelData<StochasticTools::GaussianProcessHandler>("_gp_handler")),
_training_params(getModelData<RealEigenMatrix>("_training_params"))
{
}
void
GaussianProcess::setupCovariance(UserObjectName covar_name)
{
if (_gp_handler.getCovarFunctionPtr() != nullptr)
::mooseError("Attempting to redefine covariance function using setupCovariance.");
_gp_handler.linkCovarianceFunction(getCovarianceFunctionByName(covar_name));
}
Real
GaussianProcess::evaluate(const std::vector<Real> & x) const
{
// Overlaod for evaluate to maintain general compatibility. Only returns mean
Real dummy = 0;
return this->evaluate(x, dummy);
}
Real
GaussianProcess::evaluate(const std::vector<Real> & x, Real & std_dev) const
{
unsigned int _n_params = _training_params.cols();
unsigned int _num_tests = 1;
mooseAssert(x.size() == _n_params,
"Number of parameters provided for evaluation does not match number of parameters "
"used for training.");
RealEigenMatrix test_points(_num_tests, _n_params);
for (unsigned int ii = 0; ii < _n_params; ++ii)
test_points(0, ii) = x[ii];
_gp_handler.getParamStandardizer().getStandardized(test_points);
RealEigenMatrix K_train_test(_training_params.rows(), test_points.rows());
_gp_handler.getCovarFunction().computeCovarianceMatrix(
K_train_test, _training_params, test_points, false);
RealEigenMatrix K_test(test_points.rows(), test_points.rows());
_gp_handler.getCovarFunction().computeCovarianceMatrix(K_test, test_points, test_points, true);
// Compute the predicted mean value (centered)
RealEigenMatrix pred_value = (K_train_test.transpose() * _gp_handler.getKResultsSolve());
// De-center/scale the value and store for return
_gp_handler.getDataStandardizer().getDestandardized(pred_value);
RealEigenMatrix pred_var =
K_test - (K_train_test.transpose() * _gp_handler.getKCholeskyDecomp().solve(K_train_test));
// Vairance computed, take sqrt for standard deviation, scale up by training data std and store
RealEigenMatrix std_dev_mat = pred_var.array().sqrt();
_gp_handler.getDataStandardizer().getDescaled(std_dev_mat);
std_dev = std_dev_mat(0, 0);
return pred_value(0, 0);
}
(modules/stochastic_tools/src/surrogates/GaussianProcessTrainer.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessTrainer.h"
#include "Sampler.h"
#include "CartesianProduct.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
registerMooseObject("StochasticToolsApp", GaussianProcessTrainer);
InputParameters
GaussianProcessTrainer::validParams()
{
InputParameters params = SurrogateTrainer::validParams();
params.addClassDescription(
"Provides data preperation and training for a Gaussian Process surrogate model.");
params.addRequiredParam<UserObjectName>("covariance_function", "Name of covariance function.");
params.addParam<bool>(
"standardize_params", true, "Standardize (center and scale) training parameters (x values)");
params.addParam<bool>(
"standardize_data", true, "Standardize (center and scale) training data (y values)");
// Already preparing to use Adam here
MooseEnum tuning_type("tao adam none", "none");
params.addParam<MooseEnum>(
"tuning_algorithm", tuning_type, "Hyper parameter optimizaton algorithm");
params.addParam<unsigned int>("iter_adam", 1000, "Tolerance value for Adam optimization");
params.addParam<unsigned int>("batch_size", 0, "The batch size for Adam optimization");
params.addParam<Real>("learning_rate_adam", 0.001, "The learning rate for Adam optimization");
params.addParam<std::string>(
"tao_options", "", "Command line options for PETSc/TAO hyperparameter optimization");
params.addParam<bool>(
"show_optimization_details", false, "Switch to show TAO or Adam solver results");
params.addParam<std::vector<std::string>>("tune_parameters",
"Select hyperparameters to be tuned");
params.addParam<std::vector<Real>>("tuning_min", "Minimum allowable tuning value");
params.addParam<std::vector<Real>>("tuning_max", "Maximum allowable tuning value");
params.suppressParameter<MooseEnum>("response_type");
return params;
}
GaussianProcessTrainer::GaussianProcessTrainer(const InputParameters & parameters)
: SurrogateTrainer(parameters),
CovarianceInterface(parameters),
_predictor_row(getPredictorData()),
_gp_handler(declareModelData<StochasticTools::GaussianProcessHandler>("_gp_handler")),
_training_params(declareModelData<RealEigenMatrix>("_training_params")),
_standardize_params(getParam<bool>("standardize_params")),
_standardize_data(getParam<bool>("standardize_data")),
_do_tuning(isParamValid("tune_parameters")),
_optimization_opts(StochasticTools::GaussianProcessHandler::GPOptimizerOptions(
getParam<MooseEnum>("tuning_algorithm"),
getParam<std::string>("tao_options"),
getParam<bool>("show_optimization_details"),
getParam<unsigned int>("iter_adam"),
getParam<unsigned int>("batch_size"),
getParam<Real>("learning_rate_adam"))),
_sampler_row(getSamplerData()),
_pvals(getParam<std::vector<ReporterName>>("predictors").size()),
_pcols(getParam<std::vector<unsigned int>>("predictor_cols")),
_n_params((_pvals.empty() && _pcols.empty()) ? _sampler.getNumberOfCols()
: (_pvals.size() + _pcols.size()))
{
const auto & pnames = getParam<std::vector<ReporterName>>("predictors");
for (unsigned int i = 0; i < pnames.size(); ++i)
_pvals[i] = &getTrainingData<Real>(pnames[i]);
// If predictors and predictor_cols are empty, use all sampler columns
if (_pvals.empty() && _pcols.empty())
{
_pcols.resize(_sampler.getNumberOfCols());
std::iota(_pcols.begin(), _pcols.end(), 0);
}
// Error Checking
if (_do_tuning && _optimization_opts.opt_type == "none")
paramError("tuning_algorithm",
"No tuning algorithm is selected for the hyper parameter optimization!");
if (parameters.isParamSetByUser("batch_size") && _optimization_opts.opt_type == "tao")
paramError("batch_size",
"Mini-batch sampling is not compatible with the TAO optimization library. Please "
"use Adam optimization.");
if (parameters.isParamSetByUser("batch_size"))
if (_sampler.getNumberOfRows() < _optimization_opts.batch_size)
paramError("batch_size", "Batch size cannot be greater than the training data set size.");
std::vector<std::string> tune_parameters(getParam<std::vector<std::string>>("tune_parameters"));
if (isParamValid("tuning_min") &&
(getParam<std::vector<Real>>("tuning_min").size() != tune_parameters.size()))
mooseError("tuning_min size does not match tune_parameters");
if (isParamValid("tuning_max") &&
(getParam<std::vector<Real>>("tuning_max").size() != tune_parameters.size()))
mooseError("tuning_max size does not match tune_parameters");
std::vector<Real> lower_bounds, upper_bounds;
if (isParamValid("tuning_min"))
lower_bounds = getParam<std::vector<Real>>("tuning_min");
if (isParamValid("tuning_max"))
upper_bounds = getParam<std::vector<Real>>("tuning_max");
_gp_handler.initialize(
getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
}
void
GaussianProcessTrainer::preTrain()
{
_params_buffer.clear();
_data_buffer.clear();
_params_buffer.reserve(getLocalSampleSize());
_data_buffer.reserve(getLocalSampleSize());
}
void
GaussianProcessTrainer::train()
{
_params_buffer.push_back(_predictor_row);
_data_buffer.push_back(*_rval);
}
void
GaussianProcessTrainer::postTrain()
{
// Instead of gatherSum, we have to allgather.
_communicator.allgather(_params_buffer);
_communicator.allgather(_data_buffer);
_training_params.resize(_params_buffer.size(), _n_dims);
_training_data.resize(_data_buffer.size(), 1);
for (auto ii : make_range(_training_params.rows()))
{
for (auto jj : make_range(_training_params.cols()))
_training_params(ii, jj) = _params_buffer[ii][jj];
_training_data(ii, 0) = _data_buffer[ii];
}
// Standardize (center and scale) training params
if (_standardize_params)
_gp_handler.standardizeParameters(_training_params);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp_handler.paramStandardizer().set(0, 1, _n_dims);
// Standardize (center and scale) training data
if (_standardize_data)
_gp_handler.standardizeData(_training_data);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp_handler.dataStandardizer().set(0, 1, _n_dims);
// Setup the covariance
_gp_handler.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
}
(modules/stochastic_tools/src/surrogates/GaussianProcess.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcess.h"
#include "Sampler.h"
#include "CovarianceFunctionBase.h"
registerMooseObject("StochasticToolsApp", GaussianProcess);
InputParameters
GaussianProcess::validParams()
{
InputParameters params = SurrogateModel::validParams();
params.addClassDescription("Computes and evaluates Gaussian Process surrogate model.");
return params;
}
GaussianProcess::GaussianProcess(const InputParameters & parameters)
: SurrogateModel(parameters),
CovarianceInterface(parameters),
_gp_handler(declareModelData<StochasticTools::GaussianProcessHandler>("_gp_handler")),
_training_params(getModelData<RealEigenMatrix>("_training_params"))
{
}
void
GaussianProcess::setupCovariance(UserObjectName covar_name)
{
if (_gp_handler.getCovarFunctionPtr() != nullptr)
::mooseError("Attempting to redefine covariance function using setupCovariance.");
_gp_handler.linkCovarianceFunction(getCovarianceFunctionByName(covar_name));
}
Real
GaussianProcess::evaluate(const std::vector<Real> & x) const
{
// Overlaod for evaluate to maintain general compatibility. Only returns mean
Real dummy = 0;
return this->evaluate(x, dummy);
}
Real
GaussianProcess::evaluate(const std::vector<Real> & x, Real & std_dev) const
{
unsigned int _n_params = _training_params.cols();
unsigned int _num_tests = 1;
mooseAssert(x.size() == _n_params,
"Number of parameters provided for evaluation does not match number of parameters "
"used for training.");
RealEigenMatrix test_points(_num_tests, _n_params);
for (unsigned int ii = 0; ii < _n_params; ++ii)
test_points(0, ii) = x[ii];
_gp_handler.getParamStandardizer().getStandardized(test_points);
RealEigenMatrix K_train_test(_training_params.rows(), test_points.rows());
_gp_handler.getCovarFunction().computeCovarianceMatrix(
K_train_test, _training_params, test_points, false);
RealEigenMatrix K_test(test_points.rows(), test_points.rows());
_gp_handler.getCovarFunction().computeCovarianceMatrix(K_test, test_points, test_points, true);
// Compute the predicted mean value (centered)
RealEigenMatrix pred_value = (K_train_test.transpose() * _gp_handler.getKResultsSolve());
// De-center/scale the value and store for return
_gp_handler.getDataStandardizer().getDestandardized(pred_value);
RealEigenMatrix pred_var =
K_test - (K_train_test.transpose() * _gp_handler.getKCholeskyDecomp().solve(K_train_test));
// Vairance computed, take sqrt for standard deviation, scale up by training data std and store
RealEigenMatrix std_dev_mat = pred_var.array().sqrt();
_gp_handler.getDataStandardizer().getDescaled(std_dev_mat);
std_dev = std_dev_mat(0, 0);
return pred_value(0, 0);
}
(modules/stochastic_tools/src/surrogates/GaussianProcessTrainer.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessTrainer.h"
#include "Sampler.h"
#include "CartesianProduct.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
registerMooseObject("StochasticToolsApp", GaussianProcessTrainer);
InputParameters
GaussianProcessTrainer::validParams()
{
InputParameters params = SurrogateTrainer::validParams();
params.addClassDescription(
"Provides data preperation and training for a Gaussian Process surrogate model.");
params.addRequiredParam<UserObjectName>("covariance_function", "Name of covariance function.");
params.addParam<bool>(
"standardize_params", true, "Standardize (center and scale) training parameters (x values)");
params.addParam<bool>(
"standardize_data", true, "Standardize (center and scale) training data (y values)");
// Already preparing to use Adam here
MooseEnum tuning_type("tao adam none", "none");
params.addParam<MooseEnum>(
"tuning_algorithm", tuning_type, "Hyper parameter optimizaton algorithm");
params.addParam<unsigned int>("iter_adam", 1000, "Tolerance value for Adam optimization");
params.addParam<unsigned int>("batch_size", 0, "The batch size for Adam optimization");
params.addParam<Real>("learning_rate_adam", 0.001, "The learning rate for Adam optimization");
params.addParam<std::string>(
"tao_options", "", "Command line options for PETSc/TAO hyperparameter optimization");
params.addParam<bool>(
"show_optimization_details", false, "Switch to show TAO or Adam solver results");
params.addParam<std::vector<std::string>>("tune_parameters",
"Select hyperparameters to be tuned");
params.addParam<std::vector<Real>>("tuning_min", "Minimum allowable tuning value");
params.addParam<std::vector<Real>>("tuning_max", "Maximum allowable tuning value");
params.suppressParameter<MooseEnum>("response_type");
return params;
}
GaussianProcessTrainer::GaussianProcessTrainer(const InputParameters & parameters)
: SurrogateTrainer(parameters),
CovarianceInterface(parameters),
_predictor_row(getPredictorData()),
_gp_handler(declareModelData<StochasticTools::GaussianProcessHandler>("_gp_handler")),
_training_params(declareModelData<RealEigenMatrix>("_training_params")),
_standardize_params(getParam<bool>("standardize_params")),
_standardize_data(getParam<bool>("standardize_data")),
_do_tuning(isParamValid("tune_parameters")),
_optimization_opts(StochasticTools::GaussianProcessHandler::GPOptimizerOptions(
getParam<MooseEnum>("tuning_algorithm"),
getParam<std::string>("tao_options"),
getParam<bool>("show_optimization_details"),
getParam<unsigned int>("iter_adam"),
getParam<unsigned int>("batch_size"),
getParam<Real>("learning_rate_adam"))),
_sampler_row(getSamplerData()),
_pvals(getParam<std::vector<ReporterName>>("predictors").size()),
_pcols(getParam<std::vector<unsigned int>>("predictor_cols")),
_n_params((_pvals.empty() && _pcols.empty()) ? _sampler.getNumberOfCols()
: (_pvals.size() + _pcols.size()))
{
const auto & pnames = getParam<std::vector<ReporterName>>("predictors");
for (unsigned int i = 0; i < pnames.size(); ++i)
_pvals[i] = &getTrainingData<Real>(pnames[i]);
// If predictors and predictor_cols are empty, use all sampler columns
if (_pvals.empty() && _pcols.empty())
{
_pcols.resize(_sampler.getNumberOfCols());
std::iota(_pcols.begin(), _pcols.end(), 0);
}
// Error Checking
if (_do_tuning && _optimization_opts.opt_type == "none")
paramError("tuning_algorithm",
"No tuning algorithm is selected for the hyper parameter optimization!");
if (parameters.isParamSetByUser("batch_size") && _optimization_opts.opt_type == "tao")
paramError("batch_size",
"Mini-batch sampling is not compatible with the TAO optimization library. Please "
"use Adam optimization.");
if (parameters.isParamSetByUser("batch_size"))
if (_sampler.getNumberOfRows() < _optimization_opts.batch_size)
paramError("batch_size", "Batch size cannot be greater than the training data set size.");
std::vector<std::string> tune_parameters(getParam<std::vector<std::string>>("tune_parameters"));
if (isParamValid("tuning_min") &&
(getParam<std::vector<Real>>("tuning_min").size() != tune_parameters.size()))
mooseError("tuning_min size does not match tune_parameters");
if (isParamValid("tuning_max") &&
(getParam<std::vector<Real>>("tuning_max").size() != tune_parameters.size()))
mooseError("tuning_max size does not match tune_parameters");
std::vector<Real> lower_bounds, upper_bounds;
if (isParamValid("tuning_min"))
lower_bounds = getParam<std::vector<Real>>("tuning_min");
if (isParamValid("tuning_max"))
upper_bounds = getParam<std::vector<Real>>("tuning_max");
_gp_handler.initialize(
getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
}
void
GaussianProcessTrainer::preTrain()
{
_params_buffer.clear();
_data_buffer.clear();
_params_buffer.reserve(getLocalSampleSize());
_data_buffer.reserve(getLocalSampleSize());
}
void
GaussianProcessTrainer::train()
{
_params_buffer.push_back(_predictor_row);
_data_buffer.push_back(*_rval);
}
void
GaussianProcessTrainer::postTrain()
{
// Instead of gatherSum, we have to allgather.
_communicator.allgather(_params_buffer);
_communicator.allgather(_data_buffer);
_training_params.resize(_params_buffer.size(), _n_dims);
_training_data.resize(_data_buffer.size(), 1);
for (auto ii : make_range(_training_params.rows()))
{
for (auto jj : make_range(_training_params.cols()))
_training_params(ii, jj) = _params_buffer[ii][jj];
_training_data(ii, 0) = _data_buffer[ii];
}
// Standardize (center and scale) training params
if (_standardize_params)
_gp_handler.standardizeParameters(_training_params);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp_handler.paramStandardizer().set(0, 1, _n_dims);
// Standardize (center and scale) training data
if (_standardize_data)
_gp_handler.standardizeData(_training_data);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp_handler.dataStandardizer().set(0, 1, _n_dims);
// Setup the covariance
_gp_handler.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
}
(modules/stochastic_tools/src/utils/GaussianProcessHandler.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessHandler.h"
#include "FEProblemBase.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
#include "MooseRandom.h"
#include "Shuffle.h"
namespace StochasticTools
{
GaussianProcessHandler::GPOptimizerOptions::GPOptimizerOptions(
const MooseEnum & inp_opt_type,
const std::string & inp_tao_options,
const bool inp_show_optimization_details,
const unsigned int inp_iter_adam,
const unsigned int inp_batch_size,
const Real inp_learning_rate_adam)
: opt_type(inp_opt_type),
tao_options(inp_tao_options),
show_optimization_details(inp_show_optimization_details),
iter_adam(inp_iter_adam),
batch_size(inp_batch_size),
learning_rate_adam(inp_learning_rate_adam)
{
}
GaussianProcessHandler::GaussianProcessHandler() : _tao_comm(MPI_COMM_SELF) {}
void
GaussianProcessHandler::initialize(CovarianceFunctionBase * covariance_function,
const std::vector<std::string> params_to_tune,
std::vector<Real> min,
std::vector<Real> max)
{
linkCovarianceFunction(covariance_function);
generateTuningMap(params_to_tune, min, max);
}
void
GaussianProcessHandler::linkCovarianceFunction(CovarianceFunctionBase * covariance_function)
{
_covariance_function = covariance_function;
_covar_type = _covariance_function->type();
}
void
GaussianProcessHandler::setupCovarianceMatrix(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
const GPOptimizerOptions & opts)
{
const unsigned int batch_size = opts.batch_size > 0 ? opts.batch_size : training_params.rows();
_K.resize(batch_size, batch_size);
if (opts.opt_type == "tao")
{
if (tuneHyperParamsTAO(
training_params, training_data, opts.tao_options, opts.show_optimization_details))
::mooseError("PETSc/TAO error in hyperparameter tuning.");
}
else if (opts.opt_type == "adam")
tuneHyperParamsAdam(training_params,
training_data,
opts.iter_adam,
batch_size,
opts.learning_rate_adam,
opts.show_optimization_details);
_K.resize(training_params.rows(), training_params.rows());
_covariance_function->computeCovarianceMatrix(_K, training_params, training_params, true);
// Compute the Cholesky decomposition and inverse action of the covariance matrix
setupStoredMatrices(training_data);
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
}
void
GaussianProcessHandler::setupStoredMatrices(const RealEigenMatrix & input)
{
_K_cho_decomp = _K.llt();
_K_results_solve = _K_cho_decomp.solve(input);
}
void
GaussianProcessHandler::generateTuningMap(const std::vector<std::string> params_to_tune,
std::vector<Real> min_vector,
std::vector<Real> max_vector)
{
_num_tunable = 0;
bool upper_bounds_specified = false;
bool lower_bounds_specified = false;
if (min_vector.size())
lower_bounds_specified = true;
if (max_vector.size())
upper_bounds_specified = true;
for (unsigned int param_i = 0; param_i < params_to_tune.size(); ++param_i)
{
const auto & hp = params_to_tune[param_i];
if (_covariance_function->isTunable(hp))
{
unsigned int size;
Real min;
Real max;
// Get size and default min/max
_covariance_function->getTuningData(hp, size, min, max);
// Check for overridden min/max
min = lower_bounds_specified ? min_vector[param_i] : min;
max = upper_bounds_specified ? max_vector[param_i] : max;
// Save data in tuple
_tuning_data[hp] = std::make_tuple(_num_tunable, size, min, max);
_num_tunable += size;
}
}
}
void
GaussianProcessHandler::standardizeParameters(RealEigenMatrix & data, bool keep_moments)
{
if (!keep_moments)
_param_standardizer.computeSet(data);
_param_standardizer.getStandardized(data);
}
void
GaussianProcessHandler::standardizeData(RealEigenMatrix & data, bool keep_moments)
{
if (!keep_moments)
_data_standardizer.computeSet(data);
_data_standardizer.getStandardized(data);
}
PetscErrorCode
GaussianProcessHandler::tuneHyperParamsTAO(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
std::string tao_options,
bool show_optimization_details)
{
PetscErrorCode ierr;
Tao tao;
_training_params = &training_params;
_training_data = &training_data;
// Setup Tao optimization problem
ierr = TaoCreate(_tao_comm.get(), &tao);
CHKERRQ(ierr);
ierr = PetscOptionsSetValue(NULL, "-tao_type", "bncg");
CHKERRQ(ierr);
ierr = PetscOptionsInsertString(NULL, tao_options.c_str());
CHKERRQ(ierr);
ierr = TaoSetFromOptions(tao);
CHKERRQ(ierr);
// Define petsc vector to hold tunable hyper-params
libMesh::PetscVector<Number> theta(_tao_comm, _num_tunable);
ierr = formInitialGuessTAO(theta.vec());
CHKERRQ(ierr);
#if !PETSC_VERSION_LESS_THAN(3, 17, 0)
ierr = TaoSetSolution(tao, theta.vec());
#else
ierr = TaoSetInitialVector(tao, theta.vec());
#endif
CHKERRQ(ierr);
// Get Hyperparameter bounds.
libMesh::PetscVector<Number> lower(_tao_comm, _num_tunable);
libMesh::PetscVector<Number> upper(_tao_comm, _num_tunable);
buildHyperParamBoundsTAO(lower, upper);
CHKERRQ(ierr);
ierr = TaoSetVariableBounds(tao, lower.vec(), upper.vec());
CHKERRQ(ierr);
// Set Objective and Gradient Callback
#if !PETSC_VERSION_LESS_THAN(3, 17, 0)
ierr = TaoSetObjectiveAndGradient(tao, NULL, formFunctionGradientWrapper, (void *)this);
#else
ierr = TaoSetObjectiveAndGradientRoutine(tao, formFunctionGradientWrapper, (void *)this);
#endif
CHKERRQ(ierr);
// Solve
ierr = TaoSolve(tao);
CHKERRQ(ierr);
//
if (show_optimization_details)
{
ierr = TaoView(tao, PETSC_VIEWER_STDOUT_WORLD);
theta.print();
}
_covariance_function->loadHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
ierr = TaoDestroy(&tao);
CHKERRQ(ierr);
return 0;
}
PetscErrorCode
GaussianProcessHandler::formInitialGuessTAO(Vec theta_vec)
{
libMesh::PetscVector<Number> theta(theta_vec, _tao_comm);
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
mapToPetscVec(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
return 0;
}
void
GaussianProcessHandler::buildHyperParamBoundsTAO(libMesh::PetscVector<Number> & theta_l,
libMesh::PetscVector<Number> & theta_u) const
{
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
{
theta_l.set(std::get<0>(iter->second) + ii, std::get<2>(iter->second));
theta_u.set(std::get<0>(iter->second) + ii, std::get<3>(iter->second));
}
}
}
PetscErrorCode
GaussianProcessHandler::formFunctionGradientWrapper(
Tao tao, Vec theta_vec, PetscReal * f, Vec grad_vec, void * ptr)
{
GaussianProcessHandler * GP_ptr = (GaussianProcessHandler *)ptr;
GP_ptr->formFunctionGradient(tao, theta_vec, f, grad_vec);
return 0;
}
void
GaussianProcessHandler::formFunctionGradient(Tao /*tao*/,
Vec theta_vec,
PetscReal * f,
Vec grad_vec)
{
libMesh::PetscVector<Number> theta(theta_vec, _tao_comm);
libMesh::PetscVector<Number> grad(grad_vec, _tao_comm);
petscVecToMap(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
_covariance_function->loadHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
_covariance_function->computeCovarianceMatrix(_K, *_training_params, *_training_params, true);
setupStoredMatrices(*_training_data);
// testing auto tuning
RealEigenMatrix dKdhp(_training_params->rows(), _training_params->rows());
RealEigenMatrix alpha = _K_results_solve * _K_results_solve.transpose();
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
std::string hyper_param_name = iter->first;
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
{
_covariance_function->computedKdhyper(dKdhp, *_training_params, hyper_param_name, ii);
RealEigenMatrix tmp = alpha * dKdhp - _K_cho_decomp.solve(dKdhp);
grad.set(std::get<0>(iter->second) + ii, -tmp.trace() / 2.0);
}
}
Real log_likelihood = 0;
log_likelihood += -(_training_data->transpose() * _K_results_solve)(0, 0);
log_likelihood += -std::log(_K.determinant());
log_likelihood += -_training_data->rows() * std::log(2 * M_PI);
log_likelihood = -log_likelihood / 2;
*f = log_likelihood;
}
void
GaussianProcessHandler::tuneHyperParamsAdam(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
unsigned int iter,
const unsigned int & batch_size,
const Real & learning_rate,
const bool & show_optimization_details)
{
libMesh::PetscVector<Number> theta(_tao_comm, _num_tunable);
_batch_size = batch_size;
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
mapToPetscVec(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
Real b1;
Real b2;
Real eps;
// Internal params for Adam; set to the recommended values in the paper
b1 = 0.9;
b2 = 0.999;
eps = 1e-7;
std::vector<Real> m0(_num_tunable, 0.0);
std::vector<Real> v0(_num_tunable, 0.0);
Real new_val;
Real m_hat;
Real v_hat;
Real store_loss;
std::vector<Real> grad1;
// Initialize randomizer
std::vector<unsigned int> v_sequence(training_params.rows());
std::iota(std::begin(v_sequence), std::end(v_sequence), 0);
RealEigenMatrix inputs(_batch_size, training_params.cols());
RealEigenMatrix outputs(_batch_size, 1);
if (show_optimization_details)
Moose::out << "OPTIMIZING GP HYPER-PARAMETERS USING Adam" << std::endl;
for (unsigned int ss = 0; ss < iter; ++ss)
{
// Shuffle data
MooseRandom generator;
generator.seed(0, 1980);
generator.saveState();
MooseUtils::shuffle<unsigned int>(v_sequence, generator, 0);
for (unsigned int ii = 0; ii < _batch_size; ++ii)
{
for (unsigned int jj = 0; jj < training_params.cols(); ++jj)
inputs(ii, jj) = training_params(v_sequence[ii], jj);
outputs(ii, 0) = training_data(v_sequence[ii], 0);
}
store_loss = getLossAdam(inputs, outputs);
if (show_optimization_details && ss == 0)
Moose::out << "INITIAL LOSS: " << store_loss << std::endl;
grad1 = getGradientAdam(inputs);
for (unsigned int ii = 0; ii < _num_tunable; ++ii)
{
m0[ii] = b1 * m0[ii] + (1 - b1) * grad1[ii];
v0[ii] = b2 * v0[ii] + (1 - b2) * grad1[ii] * grad1[ii];
m_hat = m0[ii] / (1 - std::pow(b1, (ss + 1)));
v_hat = v0[ii] / (1 - std::pow(b2, (ss + 1)));
new_val = theta(ii) - learning_rate * m_hat / (std::sqrt(v_hat) + eps);
if (new_val < 0.01) // constrain params on the lower side
new_val = 0.01;
theta.set(ii, new_val);
}
petscVecToMap(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
_covariance_function->loadHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
}
if (show_optimization_details)
{
Moose::out << "OPTIMIZED GP HYPER-PARAMETERS:" << std::endl;
theta.print();
Moose::out << "FINAL LOSS: " << store_loss << std::endl;
}
}
Real
GaussianProcessHandler::getLossAdam(RealEigenMatrix & inputs, RealEigenMatrix & outputs)
{
_covariance_function->computeCovarianceMatrix(_K, inputs, inputs, true);
setupStoredMatrices(outputs);
Real log_likelihood = 0;
log_likelihood += -(outputs.transpose() * _K_results_solve)(0, 0);
log_likelihood += -std::log(_K.determinant());
log_likelihood -= _batch_size * std::log(2 * M_PI);
log_likelihood = -log_likelihood / 2;
return log_likelihood;
}
std::vector<Real>
GaussianProcessHandler::getGradientAdam(RealEigenMatrix & inputs)
{
RealEigenMatrix dKdhp(_batch_size, _batch_size);
RealEigenMatrix alpha = _K_results_solve * _K_results_solve.transpose();
std::vector<Real> grad_vec;
grad_vec.resize(_num_tunable);
int count;
count = 1;
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
std::string hyper_param_name = iter->first;
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
{
_covariance_function->computedKdhyper(dKdhp, inputs, hyper_param_name, ii);
RealEigenMatrix tmp = alpha * dKdhp - _K_cho_decomp.solve(dKdhp);
Real grad1 = -tmp.trace() / 2.0;
if (hyper_param_name.compare("length_factor") == 0)
{
grad_vec[count] = grad1;
++count;
}
else
grad_vec[0] = grad1;
}
}
return grad_vec;
}
void
GaussianProcessHandler::mapToPetscVec(
const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
tuning_data,
const std::unordered_map<std::string, Real> & scalar_map,
const std::unordered_map<std::string, std::vector<Real>> & vector_map,
libMesh::PetscVector<Number> & petsc_vec)
{
for (auto iter = tuning_data.begin(); iter != tuning_data.end(); ++iter)
{
std::string param_name = iter->first;
const auto scalar_it = scalar_map.find(param_name);
if (scalar_it != scalar_map.end())
petsc_vec.set(std::get<0>(iter->second), scalar_it->second);
else
{
const auto vector_it = vector_map.find(param_name);
if (vector_it != vector_map.end())
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
petsc_vec.set(std::get<0>(iter->second) + ii, (vector_it->second)[ii]);
}
}
}
void
GaussianProcessHandler::petscVecToMap(
const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
tuning_data,
std::unordered_map<std::string, Real> & scalar_map,
std::unordered_map<std::string, std::vector<Real>> & vector_map,
const libMesh::PetscVector<Number> & petsc_vec)
{
for (auto iter = tuning_data.begin(); iter != tuning_data.end(); ++iter)
{
std::string param_name = iter->first;
if (scalar_map.find(param_name) != scalar_map.end())
scalar_map[param_name] = petsc_vec(std::get<0>(iter->second));
else if (vector_map.find(param_name) != vector_map.end())
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
vector_map[param_name][ii] = petsc_vec(std::get<0>(iter->second) + ii);
}
}
} // StochasticTools namespace
template <>
void
dataStore(std::ostream & stream, Eigen::LLT<RealEigenMatrix> & decomp, void * context)
{
// Store the L matrix as opposed to the full matrix to avoid compounding
// roundoff error and decomposition error
RealEigenMatrix L(decomp.matrixL());
dataStore(stream, L, context);
}
template <>
void
dataLoad(std::istream & stream, Eigen::LLT<RealEigenMatrix> & decomp, void * context)
{
RealEigenMatrix L;
dataLoad(stream, L, context);
decomp.compute(L * L.transpose());
}
template <>
void
dataStore(std::ostream & stream, StochasticTools::GaussianProcessHandler & gp_utils, void * context)
{
dataStore(stream, gp_utils.hyperparamMap(), context);
dataStore(stream, gp_utils.hyperparamVectorMap(), context);
dataStore(stream, gp_utils.covarType(), context);
dataStore(stream, gp_utils.K(), context);
dataStore(stream, gp_utils.KResultsSolve(), context);
dataStore(stream, gp_utils.KCholeskyDecomp(), context);
dataStore(stream, gp_utils.paramStandardizer(), context);
dataStore(stream, gp_utils.dataStandardizer(), context);
}
template <>
void
dataLoad(std::istream & stream, StochasticTools::GaussianProcessHandler & gp_utils, void * context)
{
dataLoad(stream, gp_utils.hyperparamMap(), context);
dataLoad(stream, gp_utils.hyperparamVectorMap(), context);
dataLoad(stream, gp_utils.covarType(), context);
dataLoad(stream, gp_utils.K(), context);
dataLoad(stream, gp_utils.KResultsSolve(), context);
dataLoad(stream, gp_utils.KCholeskyDecomp(), context);
dataLoad(stream, gp_utils.paramStandardizer(), context);
dataLoad(stream, gp_utils.dataStandardizer(), context);
}
(modules/stochastic_tools/src/utils/GaussianProcessHandler.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessHandler.h"
#include "FEProblemBase.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
#include "MooseRandom.h"
#include "Shuffle.h"
namespace StochasticTools
{
GaussianProcessHandler::GPOptimizerOptions::GPOptimizerOptions(
const MooseEnum & inp_opt_type,
const std::string & inp_tao_options,
const bool inp_show_optimization_details,
const unsigned int inp_iter_adam,
const unsigned int inp_batch_size,
const Real inp_learning_rate_adam)
: opt_type(inp_opt_type),
tao_options(inp_tao_options),
show_optimization_details(inp_show_optimization_details),
iter_adam(inp_iter_adam),
batch_size(inp_batch_size),
learning_rate_adam(inp_learning_rate_adam)
{
}
GaussianProcessHandler::GaussianProcessHandler() : _tao_comm(MPI_COMM_SELF) {}
void
GaussianProcessHandler::initialize(CovarianceFunctionBase * covariance_function,
const std::vector<std::string> params_to_tune,
std::vector<Real> min,
std::vector<Real> max)
{
linkCovarianceFunction(covariance_function);
generateTuningMap(params_to_tune, min, max);
}
void
GaussianProcessHandler::linkCovarianceFunction(CovarianceFunctionBase * covariance_function)
{
_covariance_function = covariance_function;
_covar_type = _covariance_function->type();
}
void
GaussianProcessHandler::setupCovarianceMatrix(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
const GPOptimizerOptions & opts)
{
const unsigned int batch_size = opts.batch_size > 0 ? opts.batch_size : training_params.rows();
_K.resize(batch_size, batch_size);
if (opts.opt_type == "tao")
{
if (tuneHyperParamsTAO(
training_params, training_data, opts.tao_options, opts.show_optimization_details))
::mooseError("PETSc/TAO error in hyperparameter tuning.");
}
else if (opts.opt_type == "adam")
tuneHyperParamsAdam(training_params,
training_data,
opts.iter_adam,
batch_size,
opts.learning_rate_adam,
opts.show_optimization_details);
_K.resize(training_params.rows(), training_params.rows());
_covariance_function->computeCovarianceMatrix(_K, training_params, training_params, true);
// Compute the Cholesky decomposition and inverse action of the covariance matrix
setupStoredMatrices(training_data);
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
}
void
GaussianProcessHandler::setupStoredMatrices(const RealEigenMatrix & input)
{
_K_cho_decomp = _K.llt();
_K_results_solve = _K_cho_decomp.solve(input);
}
void
GaussianProcessHandler::generateTuningMap(const std::vector<std::string> params_to_tune,
std::vector<Real> min_vector,
std::vector<Real> max_vector)
{
_num_tunable = 0;
bool upper_bounds_specified = false;
bool lower_bounds_specified = false;
if (min_vector.size())
lower_bounds_specified = true;
if (max_vector.size())
upper_bounds_specified = true;
for (unsigned int param_i = 0; param_i < params_to_tune.size(); ++param_i)
{
const auto & hp = params_to_tune[param_i];
if (_covariance_function->isTunable(hp))
{
unsigned int size;
Real min;
Real max;
// Get size and default min/max
_covariance_function->getTuningData(hp, size, min, max);
// Check for overridden min/max
min = lower_bounds_specified ? min_vector[param_i] : min;
max = upper_bounds_specified ? max_vector[param_i] : max;
// Save data in tuple
_tuning_data[hp] = std::make_tuple(_num_tunable, size, min, max);
_num_tunable += size;
}
}
}
void
GaussianProcessHandler::standardizeParameters(RealEigenMatrix & data, bool keep_moments)
{
if (!keep_moments)
_param_standardizer.computeSet(data);
_param_standardizer.getStandardized(data);
}
void
GaussianProcessHandler::standardizeData(RealEigenMatrix & data, bool keep_moments)
{
if (!keep_moments)
_data_standardizer.computeSet(data);
_data_standardizer.getStandardized(data);
}
PetscErrorCode
GaussianProcessHandler::tuneHyperParamsTAO(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
std::string tao_options,
bool show_optimization_details)
{
PetscErrorCode ierr;
Tao tao;
_training_params = &training_params;
_training_data = &training_data;
// Setup Tao optimization problem
ierr = TaoCreate(_tao_comm.get(), &tao);
CHKERRQ(ierr);
ierr = PetscOptionsSetValue(NULL, "-tao_type", "bncg");
CHKERRQ(ierr);
ierr = PetscOptionsInsertString(NULL, tao_options.c_str());
CHKERRQ(ierr);
ierr = TaoSetFromOptions(tao);
CHKERRQ(ierr);
// Define petsc vector to hold tunable hyper-params
libMesh::PetscVector<Number> theta(_tao_comm, _num_tunable);
ierr = formInitialGuessTAO(theta.vec());
CHKERRQ(ierr);
#if !PETSC_VERSION_LESS_THAN(3, 17, 0)
ierr = TaoSetSolution(tao, theta.vec());
#else
ierr = TaoSetInitialVector(tao, theta.vec());
#endif
CHKERRQ(ierr);
// Get Hyperparameter bounds.
libMesh::PetscVector<Number> lower(_tao_comm, _num_tunable);
libMesh::PetscVector<Number> upper(_tao_comm, _num_tunable);
buildHyperParamBoundsTAO(lower, upper);
CHKERRQ(ierr);
ierr = TaoSetVariableBounds(tao, lower.vec(), upper.vec());
CHKERRQ(ierr);
// Set Objective and Gradient Callback
#if !PETSC_VERSION_LESS_THAN(3, 17, 0)
ierr = TaoSetObjectiveAndGradient(tao, NULL, formFunctionGradientWrapper, (void *)this);
#else
ierr = TaoSetObjectiveAndGradientRoutine(tao, formFunctionGradientWrapper, (void *)this);
#endif
CHKERRQ(ierr);
// Solve
ierr = TaoSolve(tao);
CHKERRQ(ierr);
//
if (show_optimization_details)
{
ierr = TaoView(tao, PETSC_VIEWER_STDOUT_WORLD);
theta.print();
}
_covariance_function->loadHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
ierr = TaoDestroy(&tao);
CHKERRQ(ierr);
return 0;
}
PetscErrorCode
GaussianProcessHandler::formInitialGuessTAO(Vec theta_vec)
{
libMesh::PetscVector<Number> theta(theta_vec, _tao_comm);
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
mapToPetscVec(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
return 0;
}
void
GaussianProcessHandler::buildHyperParamBoundsTAO(libMesh::PetscVector<Number> & theta_l,
libMesh::PetscVector<Number> & theta_u) const
{
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
{
theta_l.set(std::get<0>(iter->second) + ii, std::get<2>(iter->second));
theta_u.set(std::get<0>(iter->second) + ii, std::get<3>(iter->second));
}
}
}
PetscErrorCode
GaussianProcessHandler::formFunctionGradientWrapper(
Tao tao, Vec theta_vec, PetscReal * f, Vec grad_vec, void * ptr)
{
GaussianProcessHandler * GP_ptr = (GaussianProcessHandler *)ptr;
GP_ptr->formFunctionGradient(tao, theta_vec, f, grad_vec);
return 0;
}
void
GaussianProcessHandler::formFunctionGradient(Tao /*tao*/,
Vec theta_vec,
PetscReal * f,
Vec grad_vec)
{
libMesh::PetscVector<Number> theta(theta_vec, _tao_comm);
libMesh::PetscVector<Number> grad(grad_vec, _tao_comm);
petscVecToMap(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
_covariance_function->loadHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
_covariance_function->computeCovarianceMatrix(_K, *_training_params, *_training_params, true);
setupStoredMatrices(*_training_data);
// testing auto tuning
RealEigenMatrix dKdhp(_training_params->rows(), _training_params->rows());
RealEigenMatrix alpha = _K_results_solve * _K_results_solve.transpose();
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
std::string hyper_param_name = iter->first;
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
{
_covariance_function->computedKdhyper(dKdhp, *_training_params, hyper_param_name, ii);
RealEigenMatrix tmp = alpha * dKdhp - _K_cho_decomp.solve(dKdhp);
grad.set(std::get<0>(iter->second) + ii, -tmp.trace() / 2.0);
}
}
Real log_likelihood = 0;
log_likelihood += -(_training_data->transpose() * _K_results_solve)(0, 0);
log_likelihood += -std::log(_K.determinant());
log_likelihood += -_training_data->rows() * std::log(2 * M_PI);
log_likelihood = -log_likelihood / 2;
*f = log_likelihood;
}
void
GaussianProcessHandler::tuneHyperParamsAdam(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
unsigned int iter,
const unsigned int & batch_size,
const Real & learning_rate,
const bool & show_optimization_details)
{
libMesh::PetscVector<Number> theta(_tao_comm, _num_tunable);
_batch_size = batch_size;
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
mapToPetscVec(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
Real b1;
Real b2;
Real eps;
// Internal params for Adam; set to the recommended values in the paper
b1 = 0.9;
b2 = 0.999;
eps = 1e-7;
std::vector<Real> m0(_num_tunable, 0.0);
std::vector<Real> v0(_num_tunable, 0.0);
Real new_val;
Real m_hat;
Real v_hat;
Real store_loss;
std::vector<Real> grad1;
// Initialize randomizer
std::vector<unsigned int> v_sequence(training_params.rows());
std::iota(std::begin(v_sequence), std::end(v_sequence), 0);
RealEigenMatrix inputs(_batch_size, training_params.cols());
RealEigenMatrix outputs(_batch_size, 1);
if (show_optimization_details)
Moose::out << "OPTIMIZING GP HYPER-PARAMETERS USING Adam" << std::endl;
for (unsigned int ss = 0; ss < iter; ++ss)
{
// Shuffle data
MooseRandom generator;
generator.seed(0, 1980);
generator.saveState();
MooseUtils::shuffle<unsigned int>(v_sequence, generator, 0);
for (unsigned int ii = 0; ii < _batch_size; ++ii)
{
for (unsigned int jj = 0; jj < training_params.cols(); ++jj)
inputs(ii, jj) = training_params(v_sequence[ii], jj);
outputs(ii, 0) = training_data(v_sequence[ii], 0);
}
store_loss = getLossAdam(inputs, outputs);
if (show_optimization_details && ss == 0)
Moose::out << "INITIAL LOSS: " << store_loss << std::endl;
grad1 = getGradientAdam(inputs);
for (unsigned int ii = 0; ii < _num_tunable; ++ii)
{
m0[ii] = b1 * m0[ii] + (1 - b1) * grad1[ii];
v0[ii] = b2 * v0[ii] + (1 - b2) * grad1[ii] * grad1[ii];
m_hat = m0[ii] / (1 - std::pow(b1, (ss + 1)));
v_hat = v0[ii] / (1 - std::pow(b2, (ss + 1)));
new_val = theta(ii) - learning_rate * m_hat / (std::sqrt(v_hat) + eps);
if (new_val < 0.01) // constrain params on the lower side
new_val = 0.01;
theta.set(ii, new_val);
}
petscVecToMap(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
_covariance_function->loadHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
}
if (show_optimization_details)
{
Moose::out << "OPTIMIZED GP HYPER-PARAMETERS:" << std::endl;
theta.print();
Moose::out << "FINAL LOSS: " << store_loss << std::endl;
}
}
Real
GaussianProcessHandler::getLossAdam(RealEigenMatrix & inputs, RealEigenMatrix & outputs)
{
_covariance_function->computeCovarianceMatrix(_K, inputs, inputs, true);
setupStoredMatrices(outputs);
Real log_likelihood = 0;
log_likelihood += -(outputs.transpose() * _K_results_solve)(0, 0);
log_likelihood += -std::log(_K.determinant());
log_likelihood -= _batch_size * std::log(2 * M_PI);
log_likelihood = -log_likelihood / 2;
return log_likelihood;
}
std::vector<Real>
GaussianProcessHandler::getGradientAdam(RealEigenMatrix & inputs)
{
RealEigenMatrix dKdhp(_batch_size, _batch_size);
RealEigenMatrix alpha = _K_results_solve * _K_results_solve.transpose();
std::vector<Real> grad_vec;
grad_vec.resize(_num_tunable);
int count;
count = 1;
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
std::string hyper_param_name = iter->first;
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
{
_covariance_function->computedKdhyper(dKdhp, inputs, hyper_param_name, ii);
RealEigenMatrix tmp = alpha * dKdhp - _K_cho_decomp.solve(dKdhp);
Real grad1 = -tmp.trace() / 2.0;
if (hyper_param_name.compare("length_factor") == 0)
{
grad_vec[count] = grad1;
++count;
}
else
grad_vec[0] = grad1;
}
}
return grad_vec;
}
void
GaussianProcessHandler::mapToPetscVec(
const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
tuning_data,
const std::unordered_map<std::string, Real> & scalar_map,
const std::unordered_map<std::string, std::vector<Real>> & vector_map,
libMesh::PetscVector<Number> & petsc_vec)
{
for (auto iter = tuning_data.begin(); iter != tuning_data.end(); ++iter)
{
std::string param_name = iter->first;
const auto scalar_it = scalar_map.find(param_name);
if (scalar_it != scalar_map.end())
petsc_vec.set(std::get<0>(iter->second), scalar_it->second);
else
{
const auto vector_it = vector_map.find(param_name);
if (vector_it != vector_map.end())
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
petsc_vec.set(std::get<0>(iter->second) + ii, (vector_it->second)[ii]);
}
}
}
void
GaussianProcessHandler::petscVecToMap(
const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
tuning_data,
std::unordered_map<std::string, Real> & scalar_map,
std::unordered_map<std::string, std::vector<Real>> & vector_map,
const libMesh::PetscVector<Number> & petsc_vec)
{
for (auto iter = tuning_data.begin(); iter != tuning_data.end(); ++iter)
{
std::string param_name = iter->first;
if (scalar_map.find(param_name) != scalar_map.end())
scalar_map[param_name] = petsc_vec(std::get<0>(iter->second));
else if (vector_map.find(param_name) != vector_map.end())
for (unsigned int ii = 0; ii < std::get<1>(iter->second); ++ii)
vector_map[param_name][ii] = petsc_vec(std::get<0>(iter->second) + ii);
}
}
} // StochasticTools namespace
template <>
void
dataStore(std::ostream & stream, Eigen::LLT<RealEigenMatrix> & decomp, void * context)
{
// Store the L matrix as opposed to the full matrix to avoid compounding
// roundoff error and decomposition error
RealEigenMatrix L(decomp.matrixL());
dataStore(stream, L, context);
}
template <>
void
dataLoad(std::istream & stream, Eigen::LLT<RealEigenMatrix> & decomp, void * context)
{
RealEigenMatrix L;
dataLoad(stream, L, context);
decomp.compute(L * L.transpose());
}
template <>
void
dataStore(std::ostream & stream, StochasticTools::GaussianProcessHandler & gp_utils, void * context)
{
dataStore(stream, gp_utils.hyperparamMap(), context);
dataStore(stream, gp_utils.hyperparamVectorMap(), context);
dataStore(stream, gp_utils.covarType(), context);
dataStore(stream, gp_utils.K(), context);
dataStore(stream, gp_utils.KResultsSolve(), context);
dataStore(stream, gp_utils.KCholeskyDecomp(), context);
dataStore(stream, gp_utils.paramStandardizer(), context);
dataStore(stream, gp_utils.dataStandardizer(), context);
}
template <>
void
dataLoad(std::istream & stream, StochasticTools::GaussianProcessHandler & gp_utils, void * context)
{
dataLoad(stream, gp_utils.hyperparamMap(), context);
dataLoad(stream, gp_utils.hyperparamVectorMap(), context);
dataLoad(stream, gp_utils.covarType(), context);
dataLoad(stream, gp_utils.K(), context);
dataLoad(stream, gp_utils.KResultsSolve(), context);
dataLoad(stream, gp_utils.KCholeskyDecomp(), context);
dataLoad(stream, gp_utils.paramStandardizer(), context);
dataLoad(stream, gp_utils.dataStandardizer(), context);
}