cpu_padding_channels_last Class — pytorch Architecture
Architecture documentation for the cpu_padding_channels_last class in PaddingKernel.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/cpu/PaddingKernel.cpp lines 233–309
template <typename scalar_t, typename PaddingType>
void cpu_padding_channels_last(
const Tensor& output_,
const Tensor& input_,
PaddingParams& p) {
auto memory_format = p.ndim == 2
? at::MemoryFormat::ChannelsLast
: at::MemoryFormat::ChannelsLast3d;
auto input = input_.contiguous(memory_format);
auto output = output_.contiguous(memory_format);
auto input_data = input.const_data_ptr<scalar_t>();
auto output_data = output.data_ptr<scalar_t>();
int64_t nbatch = p.nbatch;
int64_t channels = p.channels;
int ndim = p.ndim;
int64_t input_depth = ndim == 3 ? p.ishape[ndim - 3] : 1;
int64_t input_height = ndim >=2 ? p.ishape[ndim - 2] : 1;
int64_t input_width = p.ishape[ndim - 1];
int64_t output_depth = ndim == 3 ? p.oshape[ndim - 3] : 1;
int64_t output_height = ndim >= 2 ? p.oshape[ndim - 2] : 1;
int64_t output_width = p.oshape[ndim - 1];
int64_t pad_d = ndim == 3 ? p.pads[ndim - 3] : 0;
int64_t pad_h = ndim >= 2 ? p.pads[ndim - 2] : 0;
int64_t pad_w = p.pads[ndim - 1];
int64_t offset_d = ndim == 3 ? p.offsets[ndim - 3] : 0;
int64_t offset_h = ndim >= 2 ? p.offsets[ndim - 2] : 0;
int64_t offset_w = p.offsets[ndim - 1];
if (ndim == 2) {
// parallel on N,H,W, vectorize on C
at::parallel_for(0, nbatch * output_height * output_width, 1, [&](int64_t begin, int64_t end) {
int64_t n{0}, oh{0}, ow{0};
data_index_init(begin, n, nbatch, oh, output_height, ow, output_width);
for (const auto i : c10::irange(begin, end)) {
int64_t ih = PaddingType::index(oh, input_height, pad_h, offset_h);
int64_t iw = PaddingType::index(ow, input_width, pad_w, offset_w);
scalar_t* output_ptr = output_data + i * channels;
const scalar_t* input_ptr = input_data + (n * input_height * input_width + ih * input_width + iw) * channels;
copy_stub(output_ptr, input_ptr, channels);
data_index_step(n, nbatch, oh, output_height, ow, output_width);
}
});
} else if (ndim == 3) {
// parallel on N,D,H,W, vectorize on C
at::parallel_for(0, nbatch * output_depth * output_height * output_width, 1, [&](int64_t begin, int64_t end) {
int64_t n{0}, od{0}, oh{0}, ow{0};
data_index_init(begin, n, nbatch, od, output_depth, oh, output_height, ow, output_width);
for (const auto i : c10::irange(begin, end)) {
int64_t id = PaddingType::index(od, input_depth, pad_d, offset_d);
int64_t ih = PaddingType::index(oh, input_height, pad_h, offset_h);
int64_t iw = PaddingType::index(ow, input_width, pad_w, offset_w);
scalar_t* output_ptr = output_data + i * channels;
const scalar_t* input_ptr = input_data + (n * input_depth * input_height * input_width +
id * input_height * input_width + ih * input_width + iw) * channels;
copy_stub(output_ptr, input_ptr, channels);
data_index_step(n, nbatch, od, output_depth, oh, output_height, ow, output_width);
}
});
} else {
TORCH_INTERNAL_ASSERT(false, "expect input dim to be 2d or 3d.");
}
if (!output_.is_contiguous(memory_format)) {
output_.copy_(output);
}
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free