Vectorized8 Class — pytorch Architecture
Architecture documentation for the Vectorized8 class in vec256_int.h from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/cpu/vec/vec256/vec256_int.h lines 631–953
class Vectorized8 : public Vectorizedi {
static_assert(
std::is_same_v<T, int8_t> || std::is_same_v<T, uint8_t>,
"Only int8_t/uint8_t are supported");
protected:
static const Vectorized<T> ones;
public:
using value_type = T;
static constexpr int size() {
return 32;
}
using Vectorizedi::Vectorizedi;
Vectorized8() {}
Vectorized8(T v) {
values = _mm256_set1_epi8(v);
}
Vectorized8(
T val1,
T val2,
T val3,
T val4,
T val5,
T val6,
T val7,
T val8,
T val9,
T val10,
T val11,
T val12,
T val13,
T val14,
T val15,
T val16,
T val17,
T val18,
T val19,
T val20,
T val21,
T val22,
T val23,
T val24,
T val25,
T val26,
T val27,
T val28,
T val29,
T val30,
T val31,
T val32) {
values = _mm256_setr_epi8(
val1,
val2,
val3,
val4,
val5,
val6,
val7,
val8,
val9,
val10,
val11,
val12,
val13,
val14,
val15,
val16,
val17,
val18,
val19,
val20,
val21,
val22,
val23,
val24,
val25,
val26,
val27,
val28,
val29,
val30,
val31,
val32);
}
template <int64_t mask>
static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
__at_align__ T tmp_values[size()];
a.store(tmp_values);
if (mask & 0x01)
tmp_values[0] = _mm256_extract_epi8(b.values, 0);
if (mask & 0x02)
tmp_values[1] = _mm256_extract_epi8(b.values, 1);
if (mask & 0x04)
tmp_values[2] = _mm256_extract_epi8(b.values, 2);
if (mask & 0x08)
tmp_values[3] = _mm256_extract_epi8(b.values, 3);
if (mask & 0x10)
tmp_values[4] = _mm256_extract_epi8(b.values, 4);
if (mask & 0x20)
tmp_values[5] = _mm256_extract_epi8(b.values, 5);
if (mask & 0x40)
tmp_values[6] = _mm256_extract_epi8(b.values, 6);
if (mask & 0x80)
tmp_values[7] = _mm256_extract_epi8(b.values, 7);
if (mask & 0x100)
tmp_values[8] = _mm256_extract_epi8(b.values, 8);
if (mask & 0x200)
tmp_values[9] = _mm256_extract_epi8(b.values, 9);
if (mask & 0x400)
tmp_values[10] = _mm256_extract_epi8(b.values, 10);
if (mask & 0x800)
tmp_values[11] = _mm256_extract_epi8(b.values, 11);
if (mask & 0x1000)
tmp_values[12] = _mm256_extract_epi8(b.values, 12);
if (mask & 0x2000)
tmp_values[13] = _mm256_extract_epi8(b.values, 13);
if (mask & 0x4000)
tmp_values[14] = _mm256_extract_epi8(b.values, 14);
if (mask & 0x8000)
tmp_values[15] = _mm256_extract_epi8(b.values, 15);
if (mask & 0x010000)
tmp_values[16] = _mm256_extract_epi8(b.values, 16);
if (mask & 0x020000)
tmp_values[17] = _mm256_extract_epi8(b.values, 17);
if (mask & 0x040000)
tmp_values[18] = _mm256_extract_epi8(b.values, 18);
if (mask & 0x080000)
tmp_values[19] = _mm256_extract_epi8(b.values, 19);
if (mask & 0x100000)
tmp_values[20] = _mm256_extract_epi8(b.values, 20);
if (mask & 0x200000)
tmp_values[21] = _mm256_extract_epi8(b.values, 21);
if (mask & 0x400000)
tmp_values[22] = _mm256_extract_epi8(b.values, 22);
if (mask & 0x800000)
tmp_values[23] = _mm256_extract_epi8(b.values, 23);
if (mask & 0x1000000)
tmp_values[24] = _mm256_extract_epi8(b.values, 24);
if (mask & 0x2000000)
tmp_values[25] = _mm256_extract_epi8(b.values, 25);
if (mask & 0x4000000)
tmp_values[26] = _mm256_extract_epi8(b.values, 26);
if (mask & 0x8000000)
tmp_values[27] = _mm256_extract_epi8(b.values, 27);
if (mask & 0x10000000)
tmp_values[28] = _mm256_extract_epi8(b.values, 28);
if (mask & 0x20000000)
tmp_values[29] = _mm256_extract_epi8(b.values, 29);
if (mask & 0x40000000)
tmp_values[30] = _mm256_extract_epi8(b.values, 30);
if (mask & 0x80000000)
tmp_values[31] = _mm256_extract_epi8(b.values, 31);
return loadu(tmp_values);
}
static Vectorized<T> blendv(
const Vectorized<T>& a,
const Vectorized<T>& b,
const Vectorized<T>& mask) {
return _mm256_blendv_epi8(a.values, b.values, mask.values);
}
template <typename step_t>
static Vectorized<T> arange(
T base = 0,
step_t step = static_cast<step_t>(1)) {
return Vectorized<T>(
base,
base + step,
base + 2 * step,
base + 3 * step,
base + 4 * step,
base + 5 * step,
base + 6 * step,
base + 7 * step,
base + 8 * step,
base + 9 * step,
base + 10 * step,
base + 11 * step,
base + 12 * step,
base + 13 * step,
base + 14 * step,
base + 15 * step,
base + 16 * step,
base + 17 * step,
base + 18 * step,
base + 19 * step,
base + 20 * step,
base + 21 * step,
base + 22 * step,
base + 23 * step,
base + 24 * step,
base + 25 * step,
base + 26 * step,
base + 27 * step,
base + 28 * step,
base + 29 * step,
base + 30 * step,
base + 31 * step);
}
static Vectorized<T> set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
switch (count) {
case 0:
return a;
case 1:
return blend<0x1>(a, b);
case 2:
return blend<0x3>(a, b);
case 3:
return blend<0x7>(a, b);
case 4:
return blend<0xF>(a, b);
case 5:
return blend<0x1F>(a, b);
case 6:
return blend<0x3F>(a, b);
case 7:
return blend<0x7F>(a, b);
case 8:
return blend<0xFF>(a, b);
case 9:
return blend<0x1FF>(a, b);
case 10:
return blend<0x3FF>(a, b);
case 11:
return blend<0x7FF>(a, b);
case 12:
return blend<0xFFF>(a, b);
case 13:
return blend<0x1FFF>(a, b);
case 14:
return blend<0x3FFF>(a, b);
case 15:
return blend<0x7FFF>(a, b);
case 16:
return blend<0xFFFF>(a, b);
case 17:
return blend<0x1FFFF>(a, b);
case 18:
return blend<0x3FFFF>(a, b);
case 19:
return blend<0x7FFFF>(a, b);
case 20:
return blend<0xFFFFF>(a, b);
case 21:
return blend<0x1FFFFF>(a, b);
case 22:
return blend<0x3FFFFF>(a, b);
case 23:
return blend<0x7FFFFF>(a, b);
case 24:
return blend<0xFFFFFF>(a, b);
case 25:
return blend<0x1FFFFFF>(a, b);
case 26:
return blend<0x3FFFFFF>(a, b);
case 27:
return blend<0x7FFFFFF>(a, b);
case 28:
return blend<0xFFFFFFF>(a, b);
case 29:
return blend<0x1FFFFFFF>(a, b);
case 30:
return blend<0x3FFFFFFF>(a, b);
case 31:
return blend<0x7FFFFFFF>(a, b);
}
return b;
}
static Vectorized<T> loadu(const void* ptr) {
return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
}
static Vectorized<T> loadu_one_fourth(const void* ptr) {
// Fast path if only load element number of 8.
// Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
// Because loadu(const void* ptr, T count) requires zero initialization for
// upper 128 bits. However, by using _mm256_castsi128_si256, the upper 128
// bits of the result are undefined.
// TODO<leslie> We can use _mm256_zextsi128_si256 in the future,
// since gcc 9.3 doesn't support it now.
__m128i input_128 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr));
return _mm256_castsi128_si256(input_128);
}
static Vectorized<T> loadu(const void* ptr, T count) {
__at_align__ T tmp_values[size()];
// Ensure uninitialized memory does not change the output value See
// https://github.com/pytorch/pytorch/issues/32502 for more details. We do
// not initialize arrays to one using "={1}" because gcc would compile it
// to two instructions while a loop would be compiled to one instruction.
for (const auto i : c10::irange(size())) {
tmp_values[i] = 1;
}
std::memcpy(tmp_values, ptr, count * sizeof(T));
return loadu(tmp_values);
}
void store(void* ptr, int count = size()) const {
if (count == size()) {
// ptr need not to be aligned here. See
// https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
_mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
} else if (count > 0) {
if (count == 8) {
// Fast path if only store element number of 8
_mm_storel_epi64(
reinterpret_cast<__m128i*>(ptr), _mm256_castsi256_si128(values));
} else {
__at_align__ T tmp_values[size()];
_mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
std::memcpy(ptr, tmp_values, count * sizeof(T));
}
}
}
const T& operator[](int idx) const = delete;
T& operator[](int idx) = delete;
Vectorized<T> real() const {
return *this;
}
Vectorized<T> imag() const {
return _mm256_set1_epi8(0);
}
Vectorized<T> conj() const {
return *this;
}
};
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free