Line data Source code
1 : //* This file is part of the MOOSE framework
2 : //* https://mooseframework.inl.gov
3 : //*
4 : //* All rights reserved, see COPYRIGHT for full restrictions
5 : //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 : //*
7 : //* Licensed under LGPL 2.1, please see LICENSE for details
8 : //* https://www.gnu.org/licenses/lgpl-2.1.html
9 :
10 : #include "LMC.h"
11 : #include "MooseRandom.h"
12 : #include "MathUtils.h"
13 :
14 : registerMooseObject("StochasticToolsApp", LMC);
15 :
16 : InputParameters
17 96 : LMC::validParams()
18 : {
19 96 : InputParameters params = CovarianceFunctionBase::validParams();
20 96 : params.addClassDescription("Covariance function for multioutput Gaussian Processes based on the "
21 : "Linear Model of Coregionalization (LMC).");
22 192 : params.addParam<unsigned int>(
23 192 : "num_latent_funcs", 1., "The number of latent functions for the expansion of the outputs.");
24 96 : params.makeParamRequired<unsigned int>("num_outputs");
25 96 : params.makeParamRequired<std::vector<UserObjectName>>("covariance_functions");
26 96 : return params;
27 0 : }
28 :
29 48 : LMC::LMC(const InputParameters & parameters)
30 : : CovarianceFunctionBase(parameters),
31 96 : _num_expansion_terms(getParam<unsigned int>("num_latent_funcs"))
32 : {
33 : // We use a random number generator to obtain the initial guess for the
34 : // hyperparams
35 : MooseRandom generator_latent;
36 48 : generator_latent.seed(0, 1980);
37 :
38 : // First add and initialize the a A coefficients in the (aa^T+lambda*I) matrix
39 96 : for (const auto exp_i : make_range(_num_expansion_terms))
40 : {
41 48 : const std::string a_coeff_name = "acoeff_" + std::to_string(exp_i);
42 48 : const std::string a_coeff_name_with_prefix = _name + ":" + a_coeff_name;
43 : _a_coeffs.push_back(
44 96 : &addVectorRealHyperParameter(a_coeff_name, std::vector(_num_outputs, 1.0), true));
45 :
46 : auto & acoeff_vector = _hp_map_vector_real[a_coeff_name_with_prefix];
47 144 : for (const auto out_i : make_range(_num_outputs))
48 96 : acoeff_vector[out_i] = 3.0 * generator_latent.rand(0) + 1.0;
49 : }
50 :
51 : // Then add and initialize the lambda coefficients in the (aa^T+lambda*I) matrix
52 96 : for (const auto exp_i : make_range(_num_expansion_terms))
53 : {
54 48 : const std::string lambda_name = "lambda_" + std::to_string(exp_i);
55 48 : const std::string lambda_name_with_prefix = _name + ":" + lambda_name;
56 : _lambdas.push_back(
57 96 : &addVectorRealHyperParameter(lambda_name, std::vector(_num_outputs, 1.0), true));
58 :
59 : auto & lambda_vector = _hp_map_vector_real[lambda_name_with_prefix];
60 144 : for (const auto out_i : make_range(_num_outputs))
61 96 : lambda_vector[out_i] = 3.0 * generator_latent.rand(0) + 1.0;
62 : }
63 48 : }
64 :
65 : void
66 16332 : LMC::computeCovarianceMatrix(RealEigenMatrix & K,
67 : const RealEigenMatrix & x,
68 : const RealEigenMatrix & xp,
69 : const bool is_self_covariance) const
70 : {
71 : // Create temporary vectors for constructing the covariance matrix
72 16332 : RealEigenMatrix K_params = RealEigenMatrix::Zero(x.rows(), xp.rows());
73 32664 : RealEigenMatrix B = RealEigenMatrix::Zero(_num_outputs, _num_outputs);
74 16332 : K = RealEigenMatrix::Zero(x.rows() * _num_outputs, xp.rows() * _num_outputs);
75 : RealEigenMatrix K_working =
76 16332 : RealEigenMatrix::Zero(x.rows() * _num_outputs, xp.rows() * _num_outputs);
77 :
78 : // For every expansion term we add the contribution to the covariance matrix
79 32664 : for (const auto exp_i : make_range(_num_expansion_terms))
80 : {
81 16332 : _covariance_functions[exp_i]->computeCovarianceMatrix(K_params, x, xp, is_self_covariance);
82 16332 : computeBMatrix(B, exp_i);
83 16332 : MathUtils::kron(K_working, B, K_params);
84 : K += K_working;
85 : }
86 16332 : }
87 :
88 : bool
89 112000 : LMC::computedKdhyper(RealEigenMatrix & dKdhp,
90 : const RealEigenMatrix & x,
91 : const std::string & hyper_param_name,
92 : unsigned int ind) const
93 : {
94 : // Early return in the paramter name is longer than the expected [name] prefix.
95 : // We prefix the parameter names with the name of the covariance function.
96 112000 : if (name().length() + 1 > hyper_param_name.length())
97 : return false;
98 :
99 : // Strip the prefix from the given parameter name
100 112000 : const std::string name_without_prefix = hyper_param_name.substr(name().length() + 1);
101 :
102 : // Check if the parameter is tunable
103 112000 : if (_tunable_hp.find(hyper_param_name) != _tunable_hp.end())
104 : {
105 64000 : const std::string acoeff_prefix = "acoeff_";
106 64000 : const std::string lambda_prefix = "lambda_";
107 :
108 : // Allocate storage for the factors of the total gradient matrix
109 128000 : RealEigenMatrix dBdhp = RealEigenMatrix::Zero(_num_outputs, _num_outputs);
110 64000 : RealEigenMatrix K_params = RealEigenMatrix::Zero(x.rows(), x.rows());
111 :
112 64000 : if (name_without_prefix.find(acoeff_prefix) != std::string::npos)
113 : {
114 : // Automatically grab the expansion index
115 32000 : const int number = std::stoi(name_without_prefix.substr(acoeff_prefix.length()));
116 32000 : computeAGradient(dBdhp, number, ind);
117 32000 : _covariance_functions[number]->computeCovarianceMatrix(K_params, x, x, true);
118 : }
119 32000 : else if (name_without_prefix.find(lambda_prefix) != std::string::npos)
120 : {
121 : // Automatically grab the expansion index
122 32000 : const int number = std::stoi(name_without_prefix.substr(lambda_prefix.length()));
123 32000 : computeLambdaGradient(dBdhp, number, ind);
124 32000 : _covariance_functions[number]->computeCovarianceMatrix(K_params, x, x, true);
125 : }
126 64000 : MathUtils::kron(dKdhp, dBdhp, K_params);
127 : return true;
128 : }
129 : else
130 : {
131 : // Allocate storage for the matrix factors
132 48000 : RealEigenMatrix B_tmp = RealEigenMatrix::Zero(_num_outputs, _num_outputs);
133 96000 : RealEigenMatrix B = RealEigenMatrix::Zero(_num_outputs, _num_outputs);
134 48000 : RealEigenMatrix dKdhp_sub = RealEigenMatrix::Zero(x.rows(), x.rows());
135 :
136 : // First, check the dependent covariances
137 : bool found = false;
138 96000 : for (const auto dependent_covar : _covariance_functions)
139 48000 : if (!found)
140 48000 : found = dependent_covar->computedKdhyper(dKdhp_sub, x, hyper_param_name, ind);
141 :
142 48000 : if (!found)
143 0 : mooseError("Hyperparameter ", hyper_param_name, "not found!");
144 :
145 : // Then we compute the output covariance
146 96000 : for (const auto exp_i : make_range(_num_expansion_terms))
147 : {
148 48000 : computeBMatrix(B_tmp, exp_i);
149 : B += B_tmp;
150 : }
151 :
152 48000 : MathUtils::kron(dKdhp, B, dKdhp_sub);
153 :
154 : return true;
155 : }
156 :
157 : return false;
158 : }
159 :
160 : void
161 64332 : LMC::computeBMatrix(RealEigenMatrix & Bmat, const unsigned int exp_i) const
162 : {
163 64332 : const auto & a_coeffs = *_a_coeffs[exp_i];
164 64332 : const auto & lambda_coeffs = *_lambdas[exp_i];
165 :
166 192996 : for (const auto row_i : make_range(_num_outputs))
167 385992 : for (const auto col_i : make_range(_num_outputs))
168 : {
169 257328 : Bmat(row_i, col_i) = a_coeffs[row_i] * a_coeffs[col_i];
170 257328 : if (row_i == col_i)
171 128664 : Bmat(row_i, col_i) += lambda_coeffs[col_i];
172 : }
173 64332 : }
174 :
175 : void
176 32000 : LMC::computeAGradient(RealEigenMatrix & grad,
177 : const unsigned int exp_i,
178 : const unsigned int index) const
179 : {
180 32000 : const auto & a_coeffs = *_a_coeffs[exp_i];
181 : // Add asserts here
182 : grad.setZero();
183 96000 : for (const auto col_i : make_range(_num_outputs))
184 64000 : grad(index, col_i) = a_coeffs[col_i];
185 32000 : const RealEigenMatrix transpose = grad.transpose();
186 32000 : grad = grad + transpose;
187 32000 : }
188 :
189 : void
190 32000 : LMC::computeLambdaGradient(RealEigenMatrix & grad,
191 : const unsigned int /*exp_i*/,
192 : const unsigned int index) const
193 : {
194 : grad.setZero();
195 32000 : grad(index, index) = 1.0;
196 32000 : }
|