12#ifndef MLPACK_METHODS_ANN_LAYER_ADAPTIVE_MAX_POOLING_HPP
13#define MLPACK_METHODS_ANN_LAYER_ADAPTIVE_MAX_POOLING_HPP
30 typename InputDataType = arma::mat,
31 typename OutputDataType = arma::mat
46 const size_t outputHeight);
63 void Forward(
const arma::Mat<eT>& input, arma::Mat<eT>& output);
76 const arma::Mat<eT>& gy,
81 {
return poolingLayer.OutputParameter(); }
87 const OutputDataType&
Delta()
const {
return poolingLayer.Delta(); }
89 OutputDataType&
Delta() {
return poolingLayer.Delta(); }
92 size_t InputWidth()
const {
return poolingLayer.InputWidth(); }
94 size_t&
InputWidth() {
return poolingLayer.InputWidth(); }
97 size_t InputHeight()
const {
return poolingLayer.InputHeight(); }
112 size_t InputSize()
const {
return poolingLayer.InputSize(); }
115 size_t OutputSize()
const {
return poolingLayer.OutputSize(); }
120 template<
typename Archive>
121 void serialize(Archive& ar,
const unsigned int version);
127 void IntializeAdaptivePadding()
129 poolingLayer.StrideWidth() = std::floor(poolingLayer.InputWidth() /
131 poolingLayer.StrideHeight() = std::floor(poolingLayer.InputHeight() /
134 poolingLayer.KernelWidth() = poolingLayer.InputWidth() -
135 (outputWidth - 1) * poolingLayer.StrideWidth();
136 poolingLayer.KernelHeight() = poolingLayer.InputHeight() -
137 (outputHeight - 1) * poolingLayer.StrideHeight();
139 if (poolingLayer.KernelHeight() <= 0 || poolingLayer.KernelWidth() <= 0 ||
140 poolingLayer.StrideWidth() <= 0 || poolingLayer.StrideHeight() <= 0)
142 Log::Fatal <<
"Given output shape (" << outputWidth <<
", "
143 << outputHeight <<
") is not possible for given input shape ("
144 << poolingLayer.InputWidth() <<
", " << poolingLayer.InputHeight()
145 <<
")." << std::endl;
150 MaxPooling<InputDataType, OutputDataType> poolingLayer;
166#include "adaptive_max_pooling_impl.hpp"
static MLPACK_EXPORT util::PrefixedOutStream Fatal
Prints fatal messages prefixed with [FATAL], then terminates the program.
Implementation of the AdaptiveMaxPooling layer.
size_t & InputHeight()
Modify the input height.
AdaptiveMaxPooling()
Create the AdaptiveMaxPooling object.
size_t InputWidth() const
Get the input width.
void Forward(const arma::Mat< eT > &input, arma::Mat< eT > &output)
Ordinary feed forward pass of a neural network, evaluating the function f(x) by propagating the activ...
size_t InputSize() const
Get the input size.
size_t & InputWidth()
Modify the input width.
void serialize(Archive &ar, const unsigned int version)
Serialize the layer.
AdaptiveMaxPooling(const size_t outputWidth, const size_t outputHeight)
Create the AdaptiveMaxPooling object.
void Backward(const arma::Mat< eT > &input, const arma::Mat< eT > &gy, arma::Mat< eT > &g)
Ordinary feed backward pass of a neural network, using 3rd-order tensors as input,...
size_t & OutputHeight()
Modify the output height.
AdaptiveMaxPooling(const std::tuple< size_t, size_t > &outputShape)
Create the AdaptiveMaxPooling object.
size_t OutputHeight() const
Get the output height.
size_t OutputSize() const
Get the output size.
size_t OutputWidth() const
Get the output width.
size_t InputHeight() const
Get the input height.
const OutputDataType & OutputParameter() const
Get the output parameter.
OutputDataType & OutputParameter()
Modify the output parameter.
size_t & OutputWidth()
Modify the output width.
OutputDataType & Delta()
Modify the delta.
const OutputDataType & Delta() const
Get the delta.
Linear algebra utility functions, generally performed on matrices or vectors.
The core includes that mlpack expects; standard C++ includes and Armadillo.