13#ifndef MLPACK_METHODS_ANN_LAYER_MAX_POOLING_HPP
14#define MLPACK_METHODS_ANN_LAYER_MAX_POOLING_HPP
33 template<
typename MatType>
36 return arma::as_scalar(arma::find(input.max() == input, 1));
49 typename InputDataType = arma::mat,
50 typename OutputDataType = arma::mat
68 const size_t kernelHeight,
69 const size_t strideWidth = 1,
70 const size_t strideHeight = 1,
71 const bool floor =
true);
81 void Forward(
const arma::Mat<eT>& input, arma::Mat<eT>& output);
94 const arma::Mat<eT>& gy,
103 const OutputDataType&
Delta()
const {
return delta; }
105 OutputDataType&
Delta() {
return delta; }
154 bool Floor()
const {
return floor; }
166 template<
typename Archive>
177 template<
typename eT>
178 void PoolingOperation(
const arma::Mat<eT>& input,
179 arma::Mat<eT>& output,
180 arma::Mat<eT>& poolingIndices)
182 for (
size_t j = 0, colidx = 0; j < output.n_cols;
183 ++j, colidx += strideHeight)
185 for (
size_t i = 0, rowidx = 0; i < output.n_rows;
186 ++i, rowidx += strideWidth)
188 arma::mat subInput = input(
189 arma::span(rowidx, rowidx + kernelWidth - 1 - offset),
190 arma::span(colidx, colidx + kernelHeight - 1 - offset));
192 const size_t idx = pooling.
Pooling(subInput);
193 output(i, j) = subInput(idx);
197 arma::Mat<size_t> subIndices = indices(arma::span(rowidx,
198 rowidx + kernelWidth - 1 - offset),
199 arma::span(colidx, colidx + kernelHeight - 1 - offset));
201 poolingIndices(i, j) = subIndices(idx);
214 template<
typename eT>
215 void Unpooling(
const arma::Mat<eT>& error,
216 arma::Mat<eT>& output,
217 arma::Mat<eT>& poolingIndices)
219 for (
size_t i = 0; i < poolingIndices.n_elem; ++i)
221 output(poolingIndices(i)) += error(i);
271 arma::cube outputTemp;
274 arma::cube inputTemp;
280 MaxPoolingRule pooling;
283 OutputDataType delta;
286 OutputDataType gradient;
289 OutputDataType outputParameter;
292 arma::Mat<size_t> indices;
295 arma::Col<size_t> indicesCol;
298 std::vector<arma::cube> poolingIndices;
305#include "max_pooling_impl.hpp"
size_t Pooling(const MatType &input)
Implementation of the MaxPooling layer.
size_t StrideHeight() const
Get the stride height.
size_t & InputHeight()
Modify the input height.
size_t InputWidth() const
Get the input width.
void Forward(const arma::Mat< eT > &input, arma::Mat< eT > &output)
Ordinary feed forward pass of a neural network, evaluating the function f(x) by propagating the activ...
size_t InputSize() const
Get the input size.
size_t & InputWidth()
Modify the input width.
size_t KernelWidth() const
Get the kernel width.
size_t KernelHeight() const
Get the kernel height.
size_t & StrideWidth()
Modify the stride width.
size_t & KernelWidth()
Modify the kernel width.
size_t & OutputHeight()
Modify the output height.
size_t OutputHeight() const
Get the output height.
size_t OutputSize() const
Get the output size.
bool & Deterministic()
Modify the value of the deterministic parameter.
bool Deterministic() const
Get the value of the deterministic parameter.
size_t OutputWidth() const
Get the output width.
MaxPooling()
Create the MaxPooling object.
bool & Floor()
Modify the value of the rounding operation.
size_t InputHeight() const
Get the input height.
MaxPooling(const size_t kernelWidth, const size_t kernelHeight, const size_t strideWidth=1, const size_t strideHeight=1, const bool floor=true)
Create the MaxPooling object using the specified number of units.
size_t & KernelHeight()
Modify the kernel height.
size_t StrideWidth() const
Get the stride width.
size_t & StrideHeight()
Modify the stride height.
void Backward(const arma::Mat< eT > &, const arma::Mat< eT > &gy, arma::Mat< eT > &g)
Ordinary feed backward pass of a neural network, using 3rd-order tensors as input,...
const OutputDataType & OutputParameter() const
Get the output parameter.
OutputDataType & OutputParameter()
Modify the output parameter.
size_t & OutputWidth()
Modify the output width.
bool Floor() const
Get the value of the rounding operation.
void serialize(Archive &ar, const unsigned int)
Serialize the layer.
OutputDataType & Delta()
Modify the delta.
const OutputDataType & Delta() const
Get the delta.
Linear algebra utility functions, generally performed on matrices or vectors.
The core includes that mlpack expects; standard C++ includes and Armadillo.