feature_dropout Class — pytorch Architecture
Architecture documentation for the feature_dropout class in PyTorchOperatorHacks.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/functorch/PyTorchOperatorHacks.cpp lines 162–200
template<bool feature_dropout, bool alpha_dropout, bool inplace, typename T>
Ctype<inplace> _dropout_impl(T& input, double p, bool train) {
TORCH_CHECK(p >= 0 && p <= 1, "dropout probability has to be between 0 and 1, but got ", p);
if (p == 0 || !train || input.numel() == 0) {
return input;
}
if (p == 1) {
return multiply<inplace>(input, at::zeros({}, input.options()));
}
at::Tensor b; // used for alpha_dropout only
// NB: THIS WAS CHANGED FROM THE ORIGINAL
Tensor noise;
if (feature_dropout) {
auto empty = make_feature_noise(input);
noise = at::bernoulli(empty, 1 - p);
} else {
// NB: it is important that this is at::empty and not at::empty_like
auto empty = at::empty({}, input.options()).expand(input.sizes());
noise = at::bernoulli(empty, 1 - p);
}
if (alpha_dropout) {
constexpr double alpha = 1.7580993408473766;
double a = 1. / std::sqrt((alpha * alpha * p + 1) * (1 - p));
b = noise.add(-1).mul_(alpha * a).add_(alpha * a * p);
noise.mul_(a);
} else {
noise.div_(1 - p);
}
if (!alpha_dropout) {
return multiply<inplace>(input, noise);
} else {
return multiply<inplace>(input, noise).add_(b);
}
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free