LCOV - code coverage report
Current view: top level - src/surrogates - ActiveLearningGaussianProcess.C (source / functions) Hit Total Coverage
Test: idaholab/moose stochastic_tools: f45d79 Lines: 44 51 86.3 %
Date: 2025-07-25 05:00:46 Functions: 3 3 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //* This file is part of the MOOSE framework
       2             : //* https://mooseframework.inl.gov
       3             : //*
       4             : //* All rights reserved, see COPYRIGHT for full restrictions
       5             : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
       6             : //*
       7             : //* Licensed under LGPL 2.1, please see LICENSE for details
       8             : //* https://www.gnu.org/licenses/lgpl-2.1.html
       9             : 
      10             : #include "ActiveLearningGaussianProcess.h"
      11             : 
      12             : #include <petsctao.h>
      13             : #include <petscdmda.h>
      14             : 
      15             : #include "libmesh/petsc_vector.h"
      16             : #include "libmesh/petsc_matrix.h"
      17             : 
      18             : #include <math.h>
      19             : 
      20             : registerMooseObject("StochasticToolsApp", ActiveLearningGaussianProcess);
      21             : 
      22             : InputParameters
      23         228 : ActiveLearningGaussianProcess::validParams()
      24             : {
      25         228 :   InputParameters params = SurrogateTrainerBase::validParams();
      26         228 :   params.addClassDescription(
      27             :       "Permit re-training Gaussian Process surrogate model for active learning.");
      28         456 :   params.addRequiredParam<UserObjectName>("covariance_function", "Name of covariance function.");
      29         456 :   params.addParam<bool>(
      30         456 :       "standardize_params", true, "Standardize (center and scale) training parameters (x values)");
      31         456 :   params.addParam<bool>(
      32         456 :       "standardize_data", true, "Standardize (center and scale) training data (y values)");
      33         456 :   params.addParam<unsigned int>("num_iters", 1000, "Tolerance value for Adam optimization");
      34         456 :   params.addParam<unsigned int>("batch_size", 0, "The batch size for Adam optimization");
      35         456 :   params.addParam<Real>("learning_rate", 0.001, "The learning rate for Adam optimization");
      36         456 :   params.addParam<unsigned int>(
      37             :       "show_every_nth_iteration",
      38         456 :       0,
      39             :       "Switch to show Adam optimization loss values at every nth step. If 0, nothing is showed.");
      40         456 :   params.addParam<std::vector<std::string>>(
      41             :       "tune_parameters", {}, "Select hyperparameters to be tuned");
      42         456 :   params.addParam<std::vector<Real>>("tuning_min", {}, "Minimum allowable tuning value");
      43         456 :   params.addParam<std::vector<Real>>("tuning_max", {}, "Maximum allowable tuning value");
      44         228 :   return params;
      45           0 : }
      46             : 
      47         114 : ActiveLearningGaussianProcess::ActiveLearningGaussianProcess(const InputParameters & parameters)
      48             :   : SurrogateTrainerBase(parameters),
      49             :     CovarianceInterface(parameters),
      50             :     SurrogateModelInterface(this),
      51         228 :     _gp(declareModelData<StochasticTools::GaussianProcess>("_gp")),
      52         228 :     _training_params(declareModelData<RealEigenMatrix>("_training_params")),
      53         228 :     _standardize_params(getParam<bool>("standardize_params")),
      54         228 :     _standardize_data(getParam<bool>("standardize_data")),
      55         570 :     _optimization_opts(StochasticTools::GaussianProcess::GPOptimizerOptions(
      56         342 :         getParam<unsigned int>("show_every_nth_iteration"),
      57         228 :         getParam<unsigned int>("num_iters"),
      58         228 :         getParam<unsigned int>("batch_size"),
      59         228 :         getParam<Real>("learning_rate")))
      60             : {
      61         684 :   _gp.initialize(getCovarianceFunctionByName(getParam<UserObjectName>("covariance_function")),
      62             :                  getParam<std::vector<std::string>>("tune_parameters"),
      63             :                  getParam<std::vector<Real>>("tuning_min"),
      64             :                  getParam<std::vector<Real>>("tuning_max"));
      65         114 : }
      66             : 
      67             : void
      68         286 : ActiveLearningGaussianProcess::reTrain(const std::vector<std::vector<Real>> & inputs,
      69             :                                        const std::vector<Real> & outputs) const
      70             : {
      71             : 
      72             :   // Addtional error check for each re-train call of the GP surrogate
      73         286 :   if (inputs.size() != outputs.size())
      74           0 :     mooseError("Number of inputs (",
      75           0 :                inputs.size(),
      76             :                ") does not match number of outputs (",
      77           0 :                outputs.size(),
      78             :                ").");
      79         286 :   if (inputs.empty())
      80           0 :     mooseError("There is no data for retraining.");
      81             : 
      82             :   RealEigenMatrix training_data;
      83         286 :   _training_params.setZero(outputs.size(), inputs[0].size());
      84         286 :   training_data.setZero(outputs.size(), 1);
      85             : 
      86        3252 :   for (unsigned int i = 0; i < outputs.size(); ++i)
      87             :   {
      88        2966 :     training_data(i, 0) = outputs[i];
      89       11688 :     for (unsigned int j = 0; j < inputs[i].size(); ++j)
      90        8722 :       _training_params(i, j) = inputs[i][j];
      91             :   }
      92             : 
      93             :   // Standardize (center and scale) training params
      94         286 :   if (_standardize_params)
      95         286 :     _gp.standardizeParameters(_training_params);
      96             :   // if not standardizing data set mean=0, std=1 for use in surrogate
      97             :   else
      98           0 :     _gp.paramStandardizer().set(0, 1, inputs[0].size());
      99             : 
     100             :   // Standardize (center and scale) training data
     101         286 :   if (_standardize_data)
     102         286 :     _gp.standardizeData(training_data);
     103             :   // if not standardizing data set mean=0, std=1 for use in surrogate
     104             :   else
     105           0 :     _gp.dataStandardizer().set(0, 1, inputs[0].size());
     106             : 
     107             :   // Setup the covariance
     108         286 :   _gp.setupCovarianceMatrix(_training_params, training_data, _optimization_opts);
     109         286 : }

Generated by: LCOV version 1.14