Home / Class/ addmm_out_sparse_eigen Class — pytorch Architecture

addmm_out_sparse_eigen Class — pytorch Architecture

Architecture documentation for the addmm_out_sparse_eigen class in SparseBlasImpl.cpp from the pytorch codebase.

Entity Profile

Source Code

aten/src/ATen/native/sparse/eigen/SparseBlasImpl.cpp lines 142–249

template <typename scalar_t>
void addmm_out_sparse_eigen(
    const at::Tensor& mat1,
    const at::Tensor& mat2,
    const at::Tensor& result,
    const at::Scalar& alpha,
    const at::Scalar& beta) {
  // empty matrices
  if (mat1._nnz() == 0 || mat2._nnz() == 0) {
    return;
  }

  // If beta is zero NaN and Inf should not be propagated to the result
  // In addition, beta = 0 lets us enable a fast-path for result = alpha * A @ B
  bool is_beta_zero = false;
  if (beta.toComplexDouble() == 0.) {
    is_beta_zero = true;
    result.values().zero_();
  } else {
    result.values().mul_(beta);
  }

  c10::ScalarType result_index_dtype = at::sparse_csr::getIndexDtype(result);

  sparse_indices_to_result_dtype_inplace(result_index_dtype, mat1);
  sparse_indices_to_result_dtype_inplace(result_index_dtype, mat2);

  AT_DISPATCH_INDEX_TYPES(
      result_index_dtype, "eigen_sparse_mm", [&]() {
        typedef Eigen::SparseMatrix<scalar_t, Eigen::RowMajor, index_t> EigenCsrMatrix;
        typedef Eigen::SparseMatrix<scalar_t, Eigen::ColMajor, index_t> EigenCscMatrix;

        at::Tensor mat1_mat2;
        if (is_beta_zero) {
          mat1_mat2 = result;
        } else {
          mat1_mat2 = at::empty_like(result, result.options());
        }

        if (mat1_mat2.layout() == kSparseCsr) {
          if (mat1.layout() == kSparseCsr) {
            const auto mat1_eigen = Tensor_to_Eigen<scalar_t, Eigen::RowMajor, index_t>(mat1);
            if (mat2.layout() == kSparseCsr) {
              // Out_csr = M1_csr * M2_csr
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::RowMajor, index_t>(mat2);
              const EigenCsrMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::RowMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            } else {
              // Out_csr = M1_csr * M2_csc
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::ColMajor, index_t>(mat2);
              const EigenCsrMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::RowMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            }
          } else {
            const auto mat1_eigen = Tensor_to_Eigen<scalar_t, Eigen::ColMajor, index_t>(mat1);
            if (mat2.layout() == kSparseCsr) {
              // Out_csr = M1_csc * M2_csr
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::RowMajor, index_t>(mat2);
              const EigenCsrMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::RowMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            } else {
              // Out_csr = M1_csc * M2_csc
              // This multiplication will be computationally inefficient, as it will require
              // additional conversion of the output matrix from CSC to CSR format.
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::ColMajor, index_t>(mat2);
              const EigenCsrMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::RowMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            }
          }
        } else {
          if (mat1.layout() == kSparseCsr) {
            const auto mat1_eigen = Tensor_to_Eigen<scalar_t, Eigen::RowMajor, index_t>(mat1);
            if (mat2.layout() == kSparseCsr) {
              // Out_csc = M1_csr * M2_csr
              // This multiplication will be computationally inefficient, as it will require
              // additional conversion of the output matrix from CSR to CSC format.
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::RowMajor, index_t>(mat2);
              const EigenCscMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::ColMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            } else {
              // Out_csc = M1_csr * M2_csc
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::ColMajor, index_t>(mat2);
              const EigenCscMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::ColMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            }
          } else {
            const auto mat1_eigen = Tensor_to_Eigen<scalar_t, Eigen::ColMajor, index_t>(mat1);
            if (mat2.layout() == kSparseCsr) {
              // Out_csc = M1_csc * M2_csr
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::RowMajor, index_t>(mat2);
              const EigenCscMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::ColMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            } else {
              // Out_csc = M1_csc * M2_csc
              const auto mat2_eigen = Tensor_to_Eigen<scalar_t, Eigen::ColMajor, index_t>(mat2);
              const EigenCscMatrix mat1_mat2_eigen = (mat1_eigen * mat2_eigen);
              Eigen_to_Tensor<scalar_t, Eigen::ColMajor, index_t>(mat1_mat2, mat1_mat2_eigen);
            }
          }
        }

        if (is_beta_zero) {
          result.mul_(alpha.to<scalar_t>());
        } else {
          result.add_(mat1_mat2, alpha.to<scalar_t>());
        }
      });
}

Analyze Your Own Codebase

Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.

Try Supermodel Free