multilabel_margin_loss_forward_out_frame Class — pytorch Architecture
Architecture documentation for the multilabel_margin_loss_forward_out_frame class in LossMultiLabelMargin.cpp from the pytorch codebase.
Entity Profile
Source Code
aten/src/ATen/native/LossMultiLabelMargin.cpp lines 60–109
template <typename scalar_t>
void multilabel_margin_loss_forward_out_frame(
const Tensor& input_contiguous,
const Tensor& target_contiguous,
Tensor& output,
Tensor& is_target,
int64_t reduction,
int64_t nframe,
int64_t dim) {
using accscalar_t = at::acc_type<scalar_t, false>;
const scalar_t* input_data = input_contiguous.const_data_ptr<scalar_t>();
const int64_t* target_data = target_contiguous.const_data_ptr<int64_t>();
scalar_t* is_target_data = is_target.data_ptr<scalar_t>();
if (reduction != Reduction::None || output.dim() == 0) {
scalar_t* output_data = output.data_ptr<scalar_t>();
accscalar_t sum = 0;
for ([[maybe_unused]] const auto t : c10::irange(nframe)) {
sum += multilabel_margin_loss_forward_inner_sum_cpu(
input_data, target_data, is_target_data, dim);
input_data += dim;
target_data += dim;
is_target_data += dim;
}
sum /= dim;
if (reduction == Reduction::Mean) {
sum /= nframe;
}
*output_data = sum; // write scalar output value
} else {
auto output_acc = output.accessor<scalar_t, 1>();
for (const auto t : c10::irange(nframe)) {
scalar_t sum = multilabel_margin_loss_forward_inner_sum_cpu(
input_data, target_data, is_target_data, dim);
sum /= dim;
output_acc[t] = sum;
input_data += dim;
target_data += dim;
is_target_data += dim;
}
}
}
Source
Analyze Your Own Codebase
Get architecture documentation, dependency graphs, and domain analysis for your codebase in minutes.
Try Supermodel Free