mlpack 3.4.2
layer_types.hpp
Go to the documentation of this file.
1
12#ifndef MLPACK_METHODS_ANN_LAYER_LAYER_TYPES_HPP
13#define MLPACK_METHODS_ANN_LAYER_LAYER_TYPES_HPP
14
15#include <boost/variant.hpp>
16
17// Layer modules.
57
58// Convolution modules.
62
63// Regularizers.
65
66// Loss function modules.
68
69namespace mlpack {
70namespace ann {
71
72template<typename InputDataType, typename OutputDataType> class BatchNorm;
73template<typename InputDataType, typename OutputDataType> class DropConnect;
74template<typename InputDataType, typename OutputDataType> class Glimpse;
75template<typename InputDataType, typename OutputDataType> class LayerNorm;
76template<typename InputDataType, typename OutputDataType> class LSTM;
77template<typename InputDataType, typename OutputDataType> class GRU;
78template<typename InputDataType, typename OutputDataType> class FastLSTM;
79template<typename InputDataType, typename OutputDataType> class VRClassReward;
80template<typename InputDataType, typename OutputDataType> class Concatenate;
81template<typename InputDataType, typename OutputDataType> class Padding;
82
83template<typename InputDataType,
84 typename OutputDataType,
85 typename RegularizerType>
86class Linear;
87
88template<typename InputDataType,
89 typename OutputDataType,
90 typename Activation>
91class RBF;
92
93template<typename InputDataType,
94 typename OutputDataType,
95 typename RegularizerType>
96class LinearNoBias;
97
98template<typename InputDataType,
99 typename OutputDataType>
100class NoisyLinear;
101
102template<typename InputDataType,
103 typename OutputDataType,
104 typename RegularizerType>
105class Linear3D;
106
107template<typename InputDataType,
108 typename OutputDataType
109>
110class VirtualBatchNorm;
111
112template<typename InputDataType,
113 typename OutputDataType
114>
115class MiniBatchDiscrimination;
116
117template <typename InputDataType,
118 typename OutputDataType,
119 typename RegularizerType>
120class MultiheadAttention;
121
122template<typename InputDataType,
123 typename OutputDataType
124>
125class Reparametrization;
126
127template<typename InputDataType,
128 typename OutputDataType,
129 typename... CustomLayers
130>
131class AddMerge;
132
133template<typename InputDataType,
134 typename OutputDataType,
135 bool residual,
136 typename... CustomLayers
137>
138class Sequential;
139
140template<typename InputDataType,
141 typename OutputDataType,
142 typename... CustomLayers
143>
144class Highway;
145
146template<typename InputDataType,
147 typename OutputDataType,
148 typename... CustomLayers
149>
150class Recurrent;
151
152template<typename InputDataType,
153 typename OutputDataType,
154 typename... CustomLayers
155>
156class Concat;
157
158template<
159 typename OutputLayerType,
160 typename InputDataType,
161 typename OutputDataType
162>
163class ConcatPerformance;
164
165template<
166 typename ForwardConvolutionRule,
167 typename BackwardConvolutionRule,
168 typename GradientConvolutionRule,
169 typename InputDataType,
170 typename OutputDataType
171>
172class Convolution;
173
174template<
175 typename ForwardConvolutionRule,
176 typename BackwardConvolutionRule,
177 typename GradientConvolutionRule,
178 typename InputDataType,
179 typename OutputDataType
180>
181class TransposedConvolution;
182
183template<
184 typename ForwardConvolutionRule,
185 typename BackwardConvolutionRule,
186 typename GradientConvolutionRule,
187 typename InputDataType,
188 typename OutputDataType
189>
190class AtrousConvolution;
191
192template<
193 typename InputDataType,
194 typename OutputDataType
195>
196class RecurrentAttention;
197
198template<typename InputDataType,
199 typename OutputDataType,
200 typename... CustomLayers
201>
202class MultiplyMerge;
203
204template <typename InputDataType,
205 typename OutputDataType,
206 typename... CustomLayers
207>
208class WeightNorm;
209
210template <typename InputDataType,
211 typename OutputDataType
212>
213class AdaptiveMaxPooling;
214
215template <typename InputDataType,
216 typename OutputDataType
217>
218class AdaptiveMeanPooling;
219
220using MoreTypes = boost::variant<
238>;
239
240template <typename... CustomLayers>
241using LayerTypes = boost::variant<
250 arma::mat, arma::mat>*,
262 arma::mat, arma::mat>*,
266 NaiveConvolution<ValidConvolution>, arma::mat, arma::mat>*,
296 NaiveConvolution<ValidConvolution>, arma::mat, arma::mat>*,
298 MoreTypes,
299 CustomLayers*...
300>;
301
302} // namespace ann
303} // namespace mlpack
304
305#endif
Implementation of the AdaptiveMaxPooling layer.
Implementation of the AdaptiveMeanPooling.
Implementation of the AddMerge module class.
Definition: add_merge.hpp:43
Implementation of the Add module class.
Definition: add.hpp:35
The alpha - dropout layer is a regularizer that randomly with probability 'ratio' sets input values t...
Implementation of the Atrous Convolution class.
Implementation of the base layer.
Definition: base_layer.hpp:66
Declaration of the Batch Normalization layer class.
Definition: batch_norm.hpp:57
Definition and Implementation of the Bilinear Interpolation Layer.
The CELU activation function, defined by.
Definition: celu.hpp:61
A concatenated ReLU has two outputs, one ReLU and one negative ReLU, concatenated together.
Definition: c_relu.hpp:51
Implementation of the concat performance class.
Implementation of the Concat class.
Definition: concat.hpp:46
Implementation of the Concatenate module class.
Definition: concatenate.hpp:37
Implementation of the constant layer.
Definition: constant.hpp:35
Implementation of the Convolution class.
Definition: convolution.hpp:49
The DropConnect layer is a regularizer that randomly with probability ratio sets the connection value...
Definition: dropconnect.hpp:64
The dropout layer is a regularizer that randomly with probability 'ratio' sets input values to zero a...
Definition: dropout.hpp:54
The ELU activation function, defined by.
Definition: elu.hpp:112
An implementation of a faster version of the Fast LSTM network layer.
Definition: fast_lstm.hpp:67
The FlexibleReLU activation function, defined by.
An implementation of a gru network layer.
Definition: gru.hpp:59
The glimpse layer returns a retina-like representation (down-scaled cropped images) of increasing sca...
Definition: glimpse.hpp:89
The Hard Tanh activation function, defined by.
Definition: hard_tanh.hpp:50
Implementation of the Highway layer.
Definition: highway.hpp:61
Implementation of the Join module class.
Definition: join.hpp:34
Implementation of the LSTM module class.
Definition: lstm.hpp:63
Declaration of the Layer Normalization class.
Definition: layer_norm.hpp:66
The LeakyReLU activation function, defined by.
Definition: leaky_relu.hpp:45
Implementation of the Linear3D layer class.
Definition: linear3d.hpp:41
Implementation of the LinearNoBias class.
Implementation of the Linear layer class.
Definition: linear.hpp:39
Implementation of the log softmax layer.
Definition: log_softmax.hpp:37
The Lookup class stores word embeddings and retrieves them using tokens.
Definition: lookup.hpp:42
Implementation of the MaxPooling layer.
Definition: max_pooling.hpp:53
Implementation of the MeanPooling.
Implementation of the MiniBatchDiscrimination layer.
Multihead Attention allows the model to jointly attend to information from different representation s...
Implementation of the multiply constant layer.
Implementation of the MultiplyMerge module class.
Computes the two-dimensional convolution.
Implementation of the negative log likelihood layer.
Implementation of the NoisyLinear layer class.
Definition: noisylinear.hpp:34
The PReLU activation function, defined by (where alpha is trainable)
Implementation of the Padding module class.
Definition: padding.hpp:35
Positional Encoding injects some information about the relative or absolute position of the tokens in...
Implementation of the Radial Basis Function layer.
This class implements the Recurrent Model for Visual Attention, using a variety of possible layer imp...
Implementation of the RecurrentLayer class.
Definition: recurrent.hpp:45
Implementation of the reinforce normal layer.
Implementation of the Reparametrization layer class.
The select module selects the specified column from a given input matrix.
Definition: select.hpp:33
Implementation of the Sequential class.
Definition: sequential.hpp:73
Implementation of the Softmax layer.
Definition: softmax.hpp:39
Implementation of the SpatialDropout layer.
Implementation of the subview layer.
Definition: subview.hpp:35
Implementation of the Transposed Convolution class.
Implementation of the variance reduced classification reinforcement layer.
Declaration of the VirtualBatchNorm layer class.
Declaration of the WeightNorm layer class.
Definition: weight_norm.hpp:62
boost::variant< AdaptiveMaxPooling< arma::mat, arma::mat > *, AdaptiveMeanPooling< arma::mat, arma::mat > *, Add< arma::mat, arma::mat > *, AddMerge< arma::mat, arma::mat > *, AlphaDropout< arma::mat, arma::mat > *, AtrousConvolution< NaiveConvolution< ValidConvolution >, NaiveConvolution< FullConvolution >, NaiveConvolution< ValidConvolution >, arma::mat, arma::mat > *, BaseLayer< LogisticFunction, arma::mat, arma::mat > *, BaseLayer< IdentityFunction, arma::mat, arma::mat > *, BaseLayer< TanhFunction, arma::mat, arma::mat > *, BaseLayer< SoftplusFunction, arma::mat, arma::mat > *, BaseLayer< RectifierFunction, arma::mat, arma::mat > *, BatchNorm< arma::mat, arma::mat > *, BilinearInterpolation< arma::mat, arma::mat > *, CELU< arma::mat, arma::mat > *, Concat< arma::mat, arma::mat > *, Concatenate< arma::mat, arma::mat > *, ConcatPerformance< NegativeLogLikelihood< arma::mat, arma::mat >, arma::mat, arma::mat > *, Constant< arma::mat, arma::mat > *, Convolution< NaiveConvolution< ValidConvolution >, NaiveConvolution< FullConvolution >, NaiveConvolution< ValidConvolution >, arma::mat, arma::mat > *, CReLU< arma::mat, arma::mat > *, DropConnect< arma::mat, arma::mat > *, Dropout< arma::mat, arma::mat > *, ELU< arma::mat, arma::mat > *, FastLSTM< arma::mat, arma::mat > *, FlexibleReLU< arma::mat, arma::mat > *, GRU< arma::mat, arma::mat > *, HardTanH< arma::mat, arma::mat > *, Join< arma::mat, arma::mat > *, LayerNorm< arma::mat, arma::mat > *, LeakyReLU< arma::mat, arma::mat > *, Linear< arma::mat, arma::mat, NoRegularizer > *, LinearNoBias< arma::mat, arma::mat, NoRegularizer > *, LogSoftMax< arma::mat, arma::mat > *, Lookup< arma::mat, arma::mat > *, LSTM< arma::mat, arma::mat > *, MaxPooling< arma::mat, arma::mat > *, MeanPooling< arma::mat, arma::mat > *, MiniBatchDiscrimination< arma::mat, arma::mat > *, MultiplyConstant< arma::mat, arma::mat > *, MultiplyMerge< arma::mat, arma::mat > *, NegativeLogLikelihood< arma::mat, arma::mat > *, NoisyLinear< arma::mat, arma::mat > *, Padding< arma::mat, arma::mat > *, PReLU< arma::mat, arma::mat > *, Softmax< arma::mat, arma::mat > *, SpatialDropout< arma::mat, arma::mat > *, TransposedConvolution< NaiveConvolution< ValidConvolution >, NaiveConvolution< ValidConvolution >, NaiveConvolution< ValidConvolution >, arma::mat, arma::mat > *, WeightNorm< arma::mat, arma::mat > *, MoreTypes, CustomLayers *... > LayerTypes
boost::variant< Linear3D< arma::mat, arma::mat, NoRegularizer > *, Glimpse< arma::mat, arma::mat > *, Highway< arma::mat, arma::mat > *, MultiheadAttention< arma::mat, arma::mat, NoRegularizer > *, Recurrent< arma::mat, arma::mat > *, RecurrentAttention< arma::mat, arma::mat > *, ReinforceNormal< arma::mat, arma::mat > *, Reparametrization< arma::mat, arma::mat > *, Select< arma::mat, arma::mat > *, Sequential< arma::mat, arma::mat, false > *, Sequential< arma::mat, arma::mat, true > *, Subview< arma::mat, arma::mat > *, VRClassReward< arma::mat, arma::mat > *, VirtualBatchNorm< arma::mat, arma::mat > *, RBF< arma::mat, arma::mat, GaussianFunction > *, BaseLayer< GaussianFunction, arma::mat, arma::mat > *, PositionalEncoding< arma::mat, arma::mat > * > MoreTypes
Linear algebra utility functions, generally performed on matrices or vectors.
Definition: cv.hpp:1