QConv1dPackWeightInt8 Class — pytorch Architecture
Architecture documentation for the QConv1dPackWeightInt8 class in qconv_prepack.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp lines 730–822
class QConv1dPackWeightInt8 final {
public:
static c10::intrusive_ptr<ConvPackedParamsBase<2>> run_conv(
Tensor weight,
std::optional<Tensor> bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> dilation,
int64_t groups) {
const torch::List<int64_t> output_padding({0});
return _run(std::move(weight), std::move(bias), stride, padding, output_padding, dilation, groups,
/*transpose=*/false);
}
static c10::intrusive_ptr<ConvPackedParamsBase<2>> run_deconv(
Tensor weight,
std::optional<Tensor> bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups) {
return _run(std::move(weight), std::move(bias), stride, padding, output_padding, dilation, groups,
/*transpose=*/true);
}
private:
static c10::intrusive_ptr<ConvPackedParamsBase<2>> _run(
Tensor weight,
std::optional<Tensor> bias,
torch::List<int64_t> stride,
torch::List<int64_t> padding,
torch::List<int64_t> output_padding,
torch::List<int64_t> dilation,
int64_t groups,
bool transpose) {
auto& ctx = at::globalContext();
if (weight.dim() == 3) {
weight = weight.unsqueeze(quant_utils::kConv1dSqueezeDim + 2);
}
stride = quant_utils::MakeArgForConv1d(stride, 1);
padding = quant_utils::MakeArgForConv1d(padding, 0);
output_padding = quant_utils::MakeArgForConv1d(output_padding, 0);
dilation = quant_utils::MakeArgForConv1d(dilation, 1);
#ifdef USE_FBGEMM
if (ctx.qEngine() == at::QEngine::X86) {
#if AT_MKLDNN_ENABLED()
bool use_onednn = onednn_utils::should_use_onednn_quant(
weight, transpose, groups, output_padding);
if (use_onednn) {
return PackedConvWeightsOnednn<2>::prepack(
weight, bias, stride, padding, output_padding, dilation, groups,
transpose);
}
#endif
return PackedConvWeight<2>::prepack(
std::move(weight), std::move(bias), stride, padding, output_padding, dilation, groups,
transpose);
} // x86
#endif
#ifdef USE_FBGEMM
if (ctx.qEngine() == at::QEngine::FBGEMM) {
return PackedConvWeight<2>::prepack(
std::move(weight), std::move(bias), stride, padding, output_padding, dilation, groups,
transpose);
}
#endif
#ifdef USE_PYTORCH_QNNPACK
if (ctx.qEngine() == at::QEngine::QNNPACK) {
return PackedConvWeightsQnnp<2>::prepack(
std::move(weight), std::move(bias), stride, padding, output_padding, dilation, groups,
transpose);
}
#endif
#if AT_MKLDNN_ENABLED()
if (ctx.qEngine() == at::QEngine::ONEDNN) {
return PackedConvWeightsOnednn<2>::prepack(
weight, bias, stride, padding, output_padding, dilation, groups,
transpose);
}
#endif
TORCH_CHECK(
false,
"Didn't find engine for operation quantized::conv1d_prepack ",
toString(ctx.qEngine()));
}
};
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free