Home / Class/ _compute_indices_min_size_weights_aa Class — pytorch Architecture

_compute_indices_min_size_weights_aa Class — pytorch Architecture

Architecture documentation for the _compute_indices_min_size_weights_aa class in UpSampleKernel.cpp from the pytorch codebase.

Entity Profile

Source Code

aten/src/ATen/native/cpu/UpSampleKernel.cpp lines 746–783

  template <typename scalar_t, typename aa_filter_fn_t>
  static inline scalar_t _compute_indices_min_size_weights_aa(
    const int64_t i, const int64_t input_size, const scalar_t scale, const scalar_t support,
    scalar_t* wt_ptr, const int64_t max_interp_size, aa_filter_fn_t filter_fn,
    int64_t& xmin, int64_t& xsize
  ) {

    scalar_t center = scale * (i + 0.5);
    scalar_t total_w = 0.0;
    scalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0;
    xmin = std::max(
        static_cast<int64_t>(center - support + 0.5), static_cast<int64_t>(0));
    xsize = std::min(
        static_cast<int64_t>(center + support + 0.5), input_size) - xmin;
    // There are rare cases when due to precision xsize can be larger than max_interp_size by one.
    // We have to clip the value
    xsize = std::clamp(xsize, static_cast<int64_t>(0), max_interp_size);

    int64_t j = 0;
    for (; j < xsize; j++) {
      scalar_t w = filter_fn((j + xmin - center + 0.5) * invscale);
      wt_ptr[j] = w;
      total_w += w;
    }

    scalar_t wt_max = 0.0;
    if (total_w != 0.0) {
      for (j = 0; j < xsize; j++) {
        wt_ptr[j] /= total_w;
        wt_max = std::max(wt_max, wt_ptr[j]);
      }
    }

    for (; j < max_interp_size; j++) {
      wt_ptr[j] = static_cast<scalar_t>(0.0);
    }
    return wt_max;
  }

Analyze Your Own Codebase

Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.

Try Supermodel Free