is_cuda Class — pytorch Architecture
Architecture documentation for the is_cuda class in Math.h from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/Math.h lines 235–309
template <typename scalar_t, bool is_cuda=false>
C10_HOST_DEVICE inline scalar_t zeta(scalar_t x, scalar_t q) __ubsan_ignore_float_divide_by_zero__ {
using acc_t = at::acc_type<scalar_t, is_cuda>;
const acc_t MACHEP = acc_t{1.11022302462515654042E-16};
constexpr acc_t zero = acc_t{0.0};
constexpr acc_t half = acc_t{0.5};
constexpr acc_t one = acc_t{1.0};
static const acc_t A[] = {
12.0,
-720.0,
30240.0,
-1209600.0,
47900160.0,
-1.8924375803183791606e9, /*1.307674368e12/691*/
7.47242496e10,
-2.950130727918164224e12, /*1.067062284288e16/3617*/
1.1646782814350067249e14, /*5.109094217170944e18/43867*/
-4.5979787224074726105e15, /*8.028576626982912e20/174611*/
1.8152105401943546773e17, /*1.5511210043330985984e23/854513*/
-7.1661652561756670113e18 /*1.6938241367317436694528e27/236364091*/
};
acc_t a, b, k, s, t, w;
if (x == one) {
return std::numeric_limits<scalar_t>::infinity();
}
if (x < one) {
return std::numeric_limits<scalar_t>::quiet_NaN();
}
if (q <= zero) {
if (q == std::floor(q)) {
return std::numeric_limits<scalar_t>::infinity();
}
if (x != std::floor(x)) {
return std::numeric_limits<scalar_t>::quiet_NaN();
}
}
s = std::pow(q, -x);
a = q;
int i = 0;
b = zero;
while ((i < 9) || (a <= acc_t{9.0})) {
i += 1;
a += one;
b = ::pow(a, -x);
s += b;
if ((-MACHEP * s < b) && (b < MACHEP * s)) {
return static_cast<scalar_t>(s);
}
};
w = a;
s += b * w / (x - one);
s -= half * b;
a = one;
k = zero;
for (i = 0; i < 12; i++) {
a *= x + k;
b /= w;
t = a * b / A[i];
s = s + t;
t = ::fabs(t / s);
if (t < MACHEP) {
return static_cast<scalar_t>(s);
}
k += one;
a *= x + k;
b /= w;
k += one;
}
return static_cast<scalar_t>(s);
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free