should_prefer_converting_through_float_v Class — pytorch Architecture
Architecture documentation for the should_prefer_converting_through_float_v class in functional_bfloat16.h from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/cpu/vec/functional_bfloat16.h lines 431–462
template <
typename scalar_t,
typename Op,
typename std::enable_if_t<
!(!detail::should_prefer_converting_through_float_v<scalar_t> &&
std::is_invocable_v<Op, vec::Vectorized<scalar_t>>),
int> = 0>
inline void map(
const Op& vec_fun,
scalar_t* output_data,
const scalar_t* input_data,
int64_t size) {
using bVec = vec::Vectorized<scalar_t>;
using fVec = vec::Vectorized<float>;
int64_t d = 0;
for (; d < size - (size % bVec::size()); d += bVec::size()) {
bVec data_bvec = bVec::loadu(input_data + d);
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
fVec output_fvec0 = vec_fun(data_fvec0);
fVec output_fvec1 = vec_fun(data_fvec1);
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
output_bvec.store(output_data + d);
}
if (size - d > 0) {
bVec data_bvec = bVec::loadu(input_data + d, size - d);
auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
fVec output_fvec0 = vec_fun(data_fvec0);
fVec output_fvec1 = vec_fun(data_fvec1);
bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
output_bvec.store(output_data + d, size - d);
}
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free