QConv1dUnpackWeightsInt8 Class — pytorch Architecture
Architecture documentation for the QConv1dUnpackWeightsInt8 class in qconv_unpack.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/quantized/qconv_unpack.cpp lines 80–119
class QConv1dUnpackWeightsInt8 final {
public:
static std::tuple<at::Tensor, std::optional<at::Tensor>> run(
const c10::intrusive_ptr<ConvPackedParamsBase<2>>& packed_weight) {
auto& ctx = at::globalContext();
at::Tensor weight;
std::optional<at::Tensor> bias;
#ifdef USE_FBGEMM
if (ctx.qEngine() == at::QEngine::FBGEMM ||
ctx.qEngine() == at::QEngine::X86) {
std::tie(weight, bias) = packed_weight->unpack();
weight = weight.squeeze_(quant_utils::kConv1dSqueezeDim + 2);
return std::tuple<at::Tensor, std::optional<at::Tensor>>(weight, bias);
}
#endif
#ifdef USE_PYTORCH_QNNPACK
if (ctx.qEngine() == at::QEngine::QNNPACK) {
std::tie(weight, bias) = packed_weight->unpack();
at::Tensor new_weight = weight.clone();
new_weight = new_weight.squeeze_(quant_utils::kConv1dSqueezeDim + 2);
return std::tuple<at::Tensor, std::optional<at::Tensor>>(new_weight, bias);
}
#endif
#if AT_MKLDNN_ENABLED()
if (ctx.qEngine() == at::QEngine::ONEDNN) {
std::tie(weight, bias) = packed_weight->unpack();
at::Tensor new_weight = weight.clone();
new_weight.squeeze_(quant_utils::kConv1dSqueezeDim + 2);
return std::tuple<at::Tensor, std::optional<at::Tensor>>(new_weight, bias);
}
#endif
TORCH_CHECK(
false,
"Didn't find engine for operation quantized::conv1d_unpack ",
toString(ctx.qEngine()));
}
};
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free