_resize_ Class — pytorch Architecture
Architecture documentation for the _resize_ class in SparseTensorImpl.h from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/SparseTensorImpl.h lines 127–231
template <typename T>
void _resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<T> size) {
TORCH_CHECK(
allow_tensor_metadata_change(),
"resize_ ",
err_msg_tensor_metadata_change_not_allowed);
TORCH_CHECK(
!has_symbolic_sizes_strides_,
"resize_ called on tensor with symbolic shape")
TORCH_CHECK(
sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
"'len(size) == sparse_dim + dense_dim' is not satisfied: len(size) = ",
size.size(),
", sparse_dim = ",
sparse_dim,
", dense_dim = ",
dense_dim);
if (nnz() > 0) {
[[maybe_unused]] auto constexpr alt_options_msg =
"You could try the following options:\n\
1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\
2. If you need to resize this tensor, you have the following options:\n\
1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\
2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor.";
TORCH_CHECK(
sparse_dim == sparse_dim_,
"changing the number of sparse dimensions (from ",
sparse_dim_,
" to ",
sparse_dim,
") on a non-empty sparse tensor is not supported.\n",
alt_options_msg);
TORCH_CHECK(
dense_dim == dense_dim_,
"changing the number of dense dimensions (from ",
dense_dim_,
" to ",
dense_dim,
") on a non-empty sparse tensor is not supported.\n",
alt_options_msg);
bool shrinking_sparse_dims = false;
bool shrinking_dense_dim = false;
auto sparse_size_original = generic_sizes<T>().slice(0, sparse_dim);
auto sparse_size_new = size.slice(0, sparse_dim);
for (const auto i : c10::irange(sparse_dim)) {
if (sparse_size_new[i] < sparse_size_original[i]) {
shrinking_sparse_dims = true;
break;
}
}
auto dense_size_original = generic_sizes<T>().slice(sparse_dim);
auto dense_size_new = size.slice(sparse_dim);
for (const auto i : c10::irange(dense_dim)) {
if (dense_size_new[i] < dense_size_original[i]) {
shrinking_dense_dim = true;
break;
}
}
TORCH_CHECK(
!shrinking_sparse_dims,
"shrinking the size of sparse dimensions (from ",
sparse_size_original,
" to ",
sparse_size_new,
") on a non-empty sparse tensor is not supported.\n",
alt_options_msg);
TORCH_CHECK(
!shrinking_dense_dim,
"shrinking the size of dense dimensions (from ",
dense_size_original,
" to ",
dense_size_new,
") on a non-empty sparse tensor is not supported.\n",
alt_options_msg);
}
auto sizes_and_strides = generic_sizes<T>();
const bool size_equals_sizes = std::equal(
size.begin(),
size.end(),
sizes_and_strides.begin(),
sizes_and_strides.end());
if ((!size_equals_sizes) || (sparse_dim != sparse_dim_) ||
(dense_dim != dense_dim_)) {
auto nnz = at::symint::sizes<T>(values())[0];
std::vector<T> values_size = {nnz};
auto dense_size = size.slice(sparse_dim);
values_size.insert(
values_size.end(), dense_size.begin(), dense_size.end());
at::symint::resize_<T>(values_, values_size);
at::symint::resize_<T>(indices_, {T(sparse_dim), nnz});
}
if (!size_equals_sizes) {
set_sizes_and_strides(size, std::vector<T>(size.size()));
}
sparse_dim_ = sparse_dim;
dense_dim_ = dense_dim;
refresh_numel();
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free