Q8GEMM Class — pytorch Architecture
Architecture documentation for the Q8GEMM class in q8gemm_sparse.cc from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/quantized/cpu/qnnpack/bench/q8gemm_sparse.cc lines 63–201
class Q8GEMM : public benchmark::Fixture {
public:
inline Q8GEMM(uint32_t mr, uint32_t nr, uint32_t np, uint32_t kr)
: mr_(mr), nr_(nr), np_(np), kr_(kr), mc_(mr), nc_(nr), kc_(kr) {}
void SetUp(const benchmark::State&) override {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto s32rng =
std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
a_.resize(mc() * kc());
std::generate(a_.begin(), a_.end(), std::ref(u8rng));
k_.resize(nc() * kc());
std::generate(k_.begin(), k_.end(), std::ref(u8rng));
b_.resize(nc());
std::generate(b_.begin(), b_.end(), std::ref(s32rng));
w_.resize(
kcStride() * ncStride() +
ncStride() * sizeof(int32_t) / sizeof(uint8_t));
std::fill(w_.begin(), w_.end(), 127);
size_t num_zero_points_kernel = (nc_ + (nr_ -1)) & -nr_;
std::vector<uint8_t> kernel_zero_points(num_zero_points_kernel, 127);
std::vector<float> requantization_scales(num_zero_points_kernel, 0.75f);
pytorch_pack_q8gemm_w(
nc(),
kc(),
nr(),
np(),
kr(),
#if !PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
127,
127,
#endif
k(),
b(),
#if PYTORCH_QNNPACK_RUNTIME_QUANTIZATION
kernel_zero_points.data(),
#endif
w());
c_.resize(mc() * nc());
std::fill(c_.begin(), c_.end(), 0xA5);
quantizationParams_ = pytorch_qnnp_compute_conv_quantization_params(
127, kernel_zero_points.data(),
requantization_scales.data(), 127, 1, 254);
}
void TearDown(benchmark::State& state) override {
state.SetItemsProcessed(
uint64_t(state.iterations()) * 2 * mc() * nc() * kc());
a_.clear();
k_.clear();
b_.clear();
w_.clear();
c_.clear();
}
inline const uint8_t* a() const {
return a_.data();
}
inline const uint8_t* k() const {
return k_.data();
}
inline const int32_t* b() const {
return b_.data();
}
inline uint8_t* w() {
return w_.data();
}
inline const uint8_t* w() const {
return w_.data();
}
inline uint8_t* c() {
return c_.data();
}
inline uint32_t mr() const {
return mr_;
}
inline uint32_t mc() const {
return mc_;
}
inline uint32_t nr() const {
return nr_;
}
inline uint32_t np() const {
return np_;
}
inline uint32_t nc() const {
return nc_;
}
inline uint32_t ncStride() const {
return roundUp(nc(), nr());
}
inline uint32_t kr() const {
return kr_;
}
inline uint32_t kc() const {
return kc_;
}
inline uint32_t kcStride() const {
return roundUp(kc(), kr());
}
inline const pytorch_qnnp_conv_quantization_params* quantizationParams()
const {
return &quantizationParams_;
}
protected:
std::vector<uint8_t> a_;
std::vector<uint8_t> k_;
std::vector<int32_t> b_;
std::vector<uint8_t, AlignedAllocator<uint8_t, 32>> w_;
std::vector<uint8_t> c_;
uint32_t mr_{0};
uint32_t nr_{0};
uint32_t np_{0};
uint32_t kr_{0};
uint32_t mc_{mr_};
uint32_t nc_{nr_};
uint32_t kc_{kr_};
pytorch_qnnp_conv_quantization_params quantizationParams_;
};
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free