https://mooseframework.inl.gov
LibtorchArtificialNeuralNetTrainer.h
Go to the documentation of this file.
1 //* This file is part of the MOOSE framework
2 //* https://mooseframework.inl.gov
3 //*
4 //* All rights reserved, see COPYRIGHT for full restrictions
5 //* https://github.com/idaholab/moose/blob/master/COPYRIGHT
6 //*
7 //* Licensed under LGPL 2.1, please see LICENSE for details
8 //* https://www.gnu.org/licenses/lgpl-2.1.html
9 
10 #ifdef LIBTORCH_ENABLED
11 
12 #pragma once
13 
15 #include "LibtorchDataset.h"
16 #include "DataIO.h"
17 #include "MooseEnum.h"
18 
19 namespace Moose
20 {
21 
28 {
31  MooseEnum optimizer_type = MooseEnum("adam=0 adagrad=1 rmsprop=2 sgd=3", "adam");
33  unsigned int num_epochs = 1;
35  unsigned int num_batches = 1;
37  bool print_loss = false;
39  unsigned int print_epoch_loss = 1;
41  Real rel_loss_tol = 1e-12;
47  unsigned int parallel_processes = 1;
51  bool allow_duplicates = false;
52 };
53 
59 template <typename SamplerType = torch::data::samplers::DistributedSequentialSampler>
61 {
62 public:
70 
76  virtual void train(LibtorchDataset & dataset, const LibtorchTrainingOptions & options);
77 
85  static unsigned int computeBatchSize(const unsigned int num_samples,
86  const unsigned int num_batches);
95  static unsigned int computeLocalBatchSize(const unsigned int batch_size,
96  const unsigned int num_ranks);
97 
103  static std::unique_ptr<torch::optim::Optimizer>
105 
106 protected:
109 };
110 } // end Moose namespace
111 
112 #endif
LibtorchArtificialNeuralNetTrainer(LibtorchArtificialNeuralNet &nn, const Parallel::Communicator &comm)
Construct using the neural network and a parallel communicator.
unsigned int num_batches
Number of batches we want to split the dataset into.
bool print_loss
If we want to print additional information during training.
const Parallel::Communicator & comm() const
Templated class which is responsible for training LibtorchArtificialNeuralNets.
static unsigned int computeLocalBatchSize(const unsigned int batch_size, const unsigned int num_ranks)
Computes the number of local samples.
This class is a wrapper around a libtorch dataset which can be used by the data loaders in the neural...
unsigned int parallel_processes
The number of allowed parallel processes.
This is a "smart" enum class intended to replace many of the shortcomings in the C++ enum type It sho...
Definition: MooseEnum.h:33
Real learning_rate
The learning rate for the optimizers.
MooseEnum optimizer_type
The type of optimizer we want to use for training, adam is the default due to its robustness and fast...
LibtorchArtificialNeuralNet & _nn
Reference to the neural network which is trained.
Real rel_loss_tol
The relative loss tolerance where the training shall stop.
DIE A HORRIBLE DEATH HERE typedef LIBMESH_DEFAULT_SCALAR_TYPE Real
virtual void train(LibtorchDataset &dataset, const LibtorchTrainingOptions &options)
Train the neural network using a given (serialized) data and options for the training process...
MOOSE now contains C++17 code, so give a reasonable error message stating what the user can do to add...
unsigned int num_epochs
Number of iterations we want to perform on the whole dataset.
static unsigned int computeBatchSize(const unsigned int num_samples, const unsigned int num_batches)
Computes the number of samples used for each batch.
A struct containing necessary information for training neural networks.
unsigned int print_epoch_loss
The frequency of training loss print to console.
static std::unique_ptr< torch::optim::Optimizer > createOptimizer(const LibtorchArtificialNeuralNet &nn, const LibtorchTrainingOptions &options)
Setup the optimizer based on the provided options.