n2p2 - A neural network potential package
GradientDescent.h
Go to the documentation of this file.
1// n2p2 - A neural network potential package
2// Copyright (C) 2018 Andreas Singraber (University of Vienna)
3//
4// This program is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8//
9// This program is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12// GNU General Public License for more details.
13//
14// You should have received a copy of the GNU General Public License
15// along with this program. If not, see <https://www.gnu.org/licenses/>.
16
17#ifndef GRADIENTDESCENT_H
18#define GRADIENTDESCENT_H
19
20#include "Updater.h"
21#include <cstddef> // std::size_t
22#include <string> // std::string
23#include <vector> // std::vector
24
25namespace nnp
26{
27
30{
31public:
34 {
39 };
40
47 GradientDescent(std::size_t const sizeState, DescentType const type);
50 virtual ~GradientDescent() {};
56 void setState(double* state);
63 void setError(double const* const error,
64 std::size_t const size = 1);
74 void setJacobian(double const* const jacobian,
75 std::size_t const columns = 1);
80 void update();
86 void setParametersFixed(double const eta);
95 void setParametersAdam(double const eta,
96 double const beta1,
97 double const beta2,
98 double const epsilon);
105 std::string status(std::size_t epoch) const;
110 std::vector<std::string> statusHeader() const;
115 std::vector<std::string> info() const;
116
117private:
120 double eta;
122 double beta1;
124 double beta2;
126 double epsilon;
128 double eta0;
130 double beta1t;
132 double beta2t;
134 double* state;
136 double const* error;
138 double const* gradient;
140 std::vector<double> m;
142 std::vector<double> v;
143};
144
145}
146
147#endif
Weight updates based on simple gradient descent methods.
GradientDescent(std::size_t const sizeState, DescentType const type)
GradientDescent class constructor.
void setParametersFixed(double const eta)
Set parameters for fixed step gradient descent algorithm.
void setParametersAdam(double const eta, double const beta1, double const beta2, double const epsilon)
Set parameters for Adam algorithm.
std::string status(std::size_t epoch) const
Status report.
double epsilon
Small scalar.
DescentType
Enumerate different gradient descent variants.
@ DT_ADAM
Adaptive moment estimation (Adam).
@ DT_FIXED
Fixed step size.
std::vector< std::string > statusHeader() const
Header for status report file.
std::vector< double > v
Second moment estimate (Adam).
double beta2t
Decay rate 2 to the power of t (Adam).
double beta1t
Decay rate 1 to the power of t (Adam).
double eta0
Initial learning rate.
double eta
Learning rate .
double const * gradient
Gradient vector pointer.
double beta1
Decay rate 1 (Adam).
std::vector< double > m
First moment estimate (Adam).
void setJacobian(double const *const jacobian, std::size_t const columns=1)
Set pointer to current Jacobi matrix.
void setState(double *state)
Set pointer to current state.
virtual ~GradientDescent()
Destructor.
double beta2
Decay rate 2 (Adam).
double const * error
Error pointer (single double value).
std::vector< std::string > info() const
Information about gradient descent settings.
double * state
State vector pointer.
void setError(double const *const error, std::size_t const size=1)
Set pointer to current error vector.
void update()
Perform connection update.
Base class for different weight update methods.
Definition: Updater.h:32
std::size_t sizeState
Number of neural network connections (weights + biases).
Definition: Updater.h:110
Definition: Atom.h:28