FullLayer Class — pytorch Architecture
Architecture documentation for the FullLayer class in RNN.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/RNN.cpp lines 835–877
template<typename hidden_type, typename cell_params>
struct FullLayer : Layer<Tensor, hidden_type, cell_params> {
using output_type =
typename Layer<Tensor, hidden_type, cell_params>::output_type;
using unstacked_output_type = LayerOutput<std::vector<Tensor>, hidden_type>;
FullLayer(Cell<hidden_type, cell_params>& cell)
: cell_(cell) {}
unstacked_output_type operator()(
const std::vector<Tensor>& step_inputs,
const hidden_type& input_hidden,
const cell_params& params,
bool pre_compute_input = false) const {
std::vector<Tensor> step_outputs;
auto hidden = input_hidden;
for (const auto& input : step_inputs) {
hidden = cell_(input, hidden, params, pre_compute_input);
step_outputs.emplace_back(hidden_as_output(hidden));
}
return {step_outputs, hidden};
}
output_type operator()(
const Tensor& inputs,
const hidden_type& input_hidden,
const cell_params& params) const override {
if (inputs.device().is_cpu()) {
const auto inputs_w = params.linear_ih(inputs);
auto unstacked_output =
(*this)(inputs_w.unbind(0), input_hidden, params, true);
TORCH_CHECK(unstacked_output.outputs.size()>0, "Expected sequence length to be larger than 0 in RNN");
return {at::stack(unstacked_output.outputs, 0),
unstacked_output.final_hidden};
}
auto unstacked_output = (*this)(inputs.unbind(0), input_hidden, params);
TORCH_CHECK(unstacked_output.outputs.size()>0, "Expected sequence length to be larger than 0 in RNN");
return {at::stack(unstacked_output.outputs, 0),
unstacked_output.final_hidden};
}
Cell<hidden_type, cell_params>& cell_;
};
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free