Home / Class/ cpu_padding_backward Class — pytorch Architecture

cpu_padding_backward Class — pytorch Architecture

Architecture documentation for the cpu_padding_backward class in PaddingKernel.cpp from the pytorch codebase.

Entity Profile

Source Code

aten/src/ATen/native/cpu/PaddingKernel.cpp lines 311–393

template <typename scalar_t, typename PaddingType>
void cpu_padding_backward(
    const Tensor& grad_input_,
    const Tensor& grad_output_,
    PaddingParams& p) {

  auto grad_output = grad_output_.contiguous();
  auto grad_input = grad_input_.contiguous();

  auto grad_output_data = grad_output.const_data_ptr<scalar_t>();
  auto grad_input_data = grad_input.data_ptr<scalar_t>();

  // fold nbatch and channels into single dimension for channels first.
  int64_t channels = p.nbatch * p.channels;

  int ndim = p.ndim;
  int64_t input_depth = ndim == 3 ? p.ishape[ndim - 3] : 1;
  int64_t input_height = ndim >=2 ? p.ishape[ndim - 2] : 1;
  int64_t input_width = p.ishape[ndim - 1];
  int64_t output_depth = ndim == 3 ? p.oshape[ndim - 3] : 1;
  int64_t output_height = ndim >= 2 ? p.oshape[ndim - 2] : 1;
  int64_t output_width = p.oshape[ndim - 1];
  int64_t pad_d = ndim == 3 ? p.pads[ndim - 3] : 0;
  int64_t pad_h = ndim >= 2 ? p.pads[ndim - 2] : 0;
  int64_t pad_w = p.pads[ndim - 1];
  int64_t offset_d = ndim == 3 ? p.offsets[ndim - 3] : 0;
  int64_t offset_h = ndim >= 2 ? p.offsets[ndim - 2] : 0;
  int64_t offset_w = p.offsets[ndim - 1];

  if (ndim == 1) {
    // parallel on N,C, sequential on W
    at::parallel_for(0, channels, 1, [&](int64_t begin, int64_t end) {
      for (const auto c : c10::irange(begin, end)) {
        for (const auto ow : c10::irange(output_width)) {
          int64_t iw = PaddingType::index(ow, input_width, pad_w, offset_w);
          grad_input_data[c * input_width + iw] += grad_output_data[c * output_width + ow];
        }
      }
    });
  } else if (ndim == 2) {
    // parallel on N,C, sequential on H,W
    at::parallel_for(0, channels, 1, [&](int64_t begin, int64_t end) {
      for (const auto c : c10::irange(begin, end)) {
        const scalar_t* grad_output_ptr = grad_output_data + c * output_height * output_width;
        scalar_t* grad_input_ptr = grad_input_data + c * input_height * input_width;

        for (const auto oh : c10::irange(output_height)) {
          int64_t ih = PaddingType::index(oh, input_height, pad_h, offset_h);
          for (const auto ow : c10::irange(output_width)) {
            int64_t iw = PaddingType::index(ow, input_width, pad_w, offset_w);
            grad_input_ptr[ih * input_width + iw] += grad_output_ptr[oh * output_width + ow];
          }
        }
      }
    });
  } else if (p.ndim == 3) {
    // parallel on N,C, sequential on D,H,W
    at::parallel_for(0, channels, 1, [&](int64_t begin, int64_t end) {
      for (const auto c : c10::irange(begin, end)) {
        const scalar_t* grad_output_ptr = grad_output_data + c * output_depth *output_height * output_width;
        scalar_t* grad_input_ptr = grad_input_data + c * input_depth * input_height * input_width;

        for (const auto od : c10::irange(output_depth)) {
          int64_t id = PaddingType::index(od, input_depth, pad_d, offset_d);
          for (const auto oh : c10::irange(output_height)) {
            int64_t ih = PaddingType::index(oh, input_height, pad_h, offset_h);
            for (const auto ow : c10::irange(output_width)) {
              int64_t iw = PaddingType::index(ow, input_width, pad_w, offset_w);
              grad_input_ptr[id * input_height * input_width + ih * input_width + iw] +=
                  grad_output_ptr[od * output_height * output_width + oh * output_width + ow];
            }
          }
        }
      }
    });
  } else {
    TORCH_INTERNAL_ASSERT(false, "expect input dim to be 1d, 2d, or 3d.");
  }

  if (!grad_input_.is_contiguous()) {
    grad_input_.copy_(grad_input);
  }
}

Analyze Your Own Codebase

Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.

Try Supermodel Free