Requantization Class — pytorch Architecture
Architecture documentation for the Requantization class in requantization.cc from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/quantized/cpu/qnnpack/bench/requantization.cc lines 36–83
class Requantization : public benchmark::Fixture {
public:
inline Requantization() {
cpuinfo_initialize();
const size_t l1d_size = cpuinfo_get_l1d_cache(0)->size;
const size_t l1d_reserve = 1024;
n_ = (l1d_size - l1d_reserve) / (sizeof(int32_t) + sizeof(uint8_t));
n_ = n_ / 16 * 16;
}
void SetUp(const benchmark::State&) override {
const uint_fast32_t seed =
std::chrono::steady_clock::now().time_since_epoch().count();
auto rng =
std::bind(std::uniform_int_distribution<int32_t>(), std::mt19937(seed));
input_.resize(n());
std::generate(input_.begin(), input_.end(), std::ref(rng));
output_.resize(n());
std::fill(output_.begin(), output_.end(), 0xA5);
}
void TearDown(benchmark::State& state) override {
state.SetItemsProcessed(uint64_t(state.iterations()) * n());
state.SetBytesProcessed(
uint64_t(state.iterations()) * n() *
(sizeof(int32_t) + sizeof(uint8_t)));
input_.clear();
output_.clear();
}
inline const int32_t* input() const {
return input_.data();
}
inline uint8_t* output() {
return output_.data();
}
inline size_t n() const {
return n_;
}
protected:
std::vector<int32_t, AlignedAllocator<int32_t, 32>> input_;
std::vector<uint8_t> output_;
size_t n_;
};
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free