target_layout Class — pytorch Architecture
Architecture documentation for the target_layout class in TensorConversions.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/TensorConversions.cpp lines 1262–1348
template <Layout target_layout>
static Tensor dense_to_sparse_compressed(
const Tensor& self,
const Tensor& self_mask,
IntArrayRef blocksize,
std::optional<int64_t> dense_dim_opt) {
static_assert(
target_layout == Layout::SparseCsr ||
target_layout == Layout::SparseCsc ||
target_layout == Layout::SparseBsr ||
target_layout == Layout::SparseBsc,
"invalid layout template parameter for dense_to_sparse_compressed");
constexpr auto compressed_rows_layout =
target_layout == Layout::SparseCsr || target_layout == Layout::SparseBsr;
constexpr auto blocked_layout =
target_layout == Layout::SparseBsr || target_layout == Layout::SparseBsc;
int64_t dense_dim = dense_dim_opt.value_or(0);
// Reshape values so that the block dims are explicitly added, and
// calculate a mask tensor that has only batch and sparse dims, and
// value true whenever sparse matrix has a non-zero element over
// corresponding block and dense dims, and false otherwise.
auto n_batch_dim = self.dim() - 2 - dense_dim;
auto is_batched = n_batch_dim > 0;
auto values =
blocked_layout ? _batch_tile_tensor(self, blocksize, dense_dim) : self;
auto not_zero_mask = blocked_layout
? _batch_tile_tensor(self_mask, blocksize, dense_dim)
: self_mask;
if (blocked_layout || dense_dim > 0) {
std::vector<int64_t> reduce_dim((blocked_layout ? 2 : 0) + dense_dim);
std::iota(reduce_dim.begin(), reduce_dim.end(), n_batch_dim + 2);
not_zero_mask = not_zero_mask.sum(reduce_dim) != 0;
}
if (is_batched) {
// Prepare for the conversion, in particular join the batch dims
// and the compressed dim into the single dim.
dense_to_sparse_compressed_prepare_check_mask_values_batched(
target_layout, values, not_zero_mask, n_batch_dim);
}
// Calculate sparse matrix row and col indices and then, depending
// on the target layout, corresponding compressed and sparse
// indices. Use the mask tensor calculate above to generate sparse
// matrix values tensor.
Tensor row_indices;
Tensor col_indices;
Tensor compressed_indices;
if (compressed_rows_layout) {
std::tie(col_indices, row_indices) =
_not_zero_mask_to_col_row_indices(not_zero_mask);
compressed_indices = at::_convert_indices_from_coo_to_csr(
row_indices, not_zero_mask.size(0), false /*out_int32*/);
{
auto mask_indices = _mask_to_indices(not_zero_mask.flatten());
values = values.flatten(0, 1).index_select(0, mask_indices);
}
} else {
std::tie(row_indices, col_indices) =
_not_zero_mask_to_col_row_indices(not_zero_mask.transpose(1, 0));
compressed_indices = at::_convert_indices_from_coo_to_csr(
col_indices, not_zero_mask.size(-1), false /*out_int32*/);
{
auto mask_indices =
_mask_to_indices(not_zero_mask.transpose(0, 1).flatten());
values =
values.transpose(0, 1).flatten(0, 1).index_select(0, mask_indices);
}
}
Tensor& plain_indices = compressed_rows_layout ? col_indices : row_indices;
if (is_batched) {
// Restore the batch dims and compressed dim.
reshape_2d_sparse_compressed_members_to_nd_batched(
self.sizes(), n_batch_dim, compressed_indices, plain_indices, values);
}
// Create compressed sparse matrix with the target layout.
return at::_sparse_compressed_tensor_unsafe(
compressed_indices,
plain_indices,
values,
self.sizes(),
self.options().layout(target_layout));
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free