add_dense_sparse_worker_non_coalesced_cpu Class — pytorch Architecture
Architecture documentation for the add_dense_sparse_worker_non_coalesced_cpu class in SparseTensorMath.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/sparse/SparseTensorMath.cpp lines 649–708
template <typename scalar_t>
static inline void add_dense_sparse_worker_non_coalesced_cpu(Tensor& r, const Scalar& value,
const SparseTensor& sparse, const Tensor& indices, const Tensor& values) {
// Get the dense dimension element numbers of hybrid sparse tensor
auto values_dense_size = values.stride(0);
TORCH_CHECK(values.is_contiguous());
scalar_t* v_ptr = values.data_ptr<scalar_t>();
TORCH_CHECK(v_ptr != nullptr);
scalar_t* r_ptr = r.data_ptr<scalar_t>();
TORCH_CHECK(r_ptr != nullptr);
scalar_t cast_value = value.to<scalar_t>();
auto sparse_dim = sparse.sparse_dim();
auto indices_accessor = indices.accessor<int64_t, 2>();
int64_t result_length = r.size(0);
std::vector<int64_t> result_stride(sparse_dim);
for (auto d : c10::irange(sparse_dim)) {
result_stride[d] = r.stride(d);
}
auto sparse_nnz = sparse._nnz();
int max_threads = at::get_num_threads();
max_threads = (result_length < max_threads) ? result_length : max_threads;
int64_t avg_chunk_down = result_length / max_threads;
std::vector<int64_t> chuck_size(max_threads);
for (const auto i : c10::irange(max_threads)) {
chuck_size[i] = avg_chunk_down;
}
//make chunk balance among threads as 211
for (auto i = 0 ; i < result_length % max_threads ; i++) {
chuck_size[i] += 1;
}
std::vector<int64_t> chuck_sum_size(max_threads + 1);
chuck_sum_size[0] = 0;
for (const auto i : c10::irange(1, max_threads)) {
chuck_sum_size[i] = chuck_sum_size[i - 1] + chuck_size[i - 1];
}
chuck_sum_size[max_threads] = result_length;
at::parallel_for(0, max_threads, 0, [&](int64_t start, int64_t end) {
for (auto k: c10::irange(start, end)) {
int64_t chunk_begin = chuck_sum_size[k];
int64_t chunk_end = chuck_sum_size[k + 1];
for (const auto n: c10::irange(sparse_nnz)) {
int64_t chunk_offset = indices_accessor[0][n];
if (chunk_offset >= chunk_begin && chunk_offset < chunk_end) {
int64_t r_offset = result_stride[0] * chunk_offset;
for (const auto d : c10::irange(1, sparse_dim)) {
r_offset += result_stride[d] * indices_accessor[d][n];
}
scalar_t* v_index = v_ptr + n * values_dense_size;
auto r_index = r_ptr + r_offset;
at::native::cpublas::axpy<scalar_t>(values_dense_size, cast_value, v_index, 1, r_index, 1);
}
}
}
});
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free