void Class — pytorch Architecture
Architecture documentation for the void class in SpectralOps.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/mkl/SpectralOps.cpp lines 31–126
template <typename scalar_t>
static __ubsan_ignore_undefined__ // UBSAN gives false positives on using negative indexes with a pointer
void _fft_fill_with_conjugate_symmetry_slice(
Range range, at::ArrayRef<bool> is_mirrored_dim, IntArrayRef signal_half_sizes,
IntArrayRef in_strides, const scalar_t * in_ptr,
IntArrayRef out_strides, scalar_t * out_ptr) {
const auto ndim = signal_half_sizes.size();
DimVector iter_index(ndim, 0);
// We explicitly loop over one row, then use this lambda to iterate over
// n-dimensions. This advances iter_index by one row, while updating in_ptr
// and out_ptr to point to the new row of data.
auto advance_index = [&] () __ubsan_ignore_undefined__ {
for (const auto i : c10::irange(1, iter_index.size())) {
if (iter_index[i] + 1 < signal_half_sizes[i]) {
++iter_index[i];
in_ptr += in_strides[i];
if (is_mirrored_dim[i]) {
if (iter_index[i] == 1) {
out_ptr += (signal_half_sizes[i] - 1) * out_strides[i];
} else {
out_ptr -= out_strides[i];
}
} else {
out_ptr += out_strides[i];
}
return;
}
in_ptr -= in_strides[i] * iter_index[i];
if (is_mirrored_dim[i]) {
out_ptr -= out_strides[i];
} else {
out_ptr -= out_strides[i] * iter_index[i];
}
iter_index[i] = 0;
}
};
// The data slice we operate on may start part-way into the data
// Update iter_index and pointers to reference the start of the slice
if (range.begin > 0) {
iter_index[0] = range.begin % signal_half_sizes[0];
auto linear_idx = range.begin / signal_half_sizes[0];
for (size_t i = 1; i < ndim && linear_idx > 0; ++i) {
iter_index[i] = linear_idx % signal_half_sizes[i];
linear_idx = linear_idx / signal_half_sizes[i];
if (iter_index[i] > 0) {
in_ptr += in_strides[i] * iter_index[i];
if (is_mirrored_dim[i]) {
out_ptr += out_strides[i] * (signal_half_sizes[i] - iter_index[i]);
} else {
out_ptr += out_strides[i] * iter_index[i];
}
}
}
}
auto numel_remaining = range.end - range.begin;
if (is_mirrored_dim[0]) {
// Explicitly loop over a Hermitian mirrored dimension
if (iter_index[0] > 0) {
auto end = std::min(signal_half_sizes[0], iter_index[0] + numel_remaining);
for (const auto i : c10::irange(iter_index[0], end)) {
out_ptr[(signal_half_sizes[0] - i) * out_strides[0]] = std::conj(in_ptr[i * in_strides[0]]);
}
numel_remaining -= (end - iter_index[0]);
iter_index[0] = 0;
advance_index();
}
while (numel_remaining > 0) {
auto end = std::min(signal_half_sizes[0], numel_remaining);
out_ptr[0] = std::conj(in_ptr[0]);
for (const auto i : c10::irange(1, end)) {
out_ptr[(signal_half_sizes[0] - i) * out_strides[0]] = std::conj(in_ptr[i * in_strides[0]]);
}
numel_remaining -= end;
advance_index();
}
} else {
// Explicit loop over a non-mirrored dimension, so just a simple conjugated copy
while (numel_remaining > 0) {
auto end = std::min(signal_half_sizes[0], iter_index[0] + numel_remaining);
for (int64_t i = iter_index[0]; i != end; ++i) {
out_ptr[i * out_strides[0]] = std::conj(in_ptr[i * in_strides[0]]);
}
numel_remaining -= (end - iter_index[0]);
iter_index[0] = 0;
advance_index();
}
}
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free