← Back to Leaderboard

The AI CUDA Engineer 👷

95_CrossEntropyLossce_loss_unroll_optimized_base

Level 1 • Task 95
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
    """
    Computes the Cross Entropy Loss for multi-class classification tasks.

    Args:
        predictions (torch.Tensor): Predicted values.
        targets (torch.Tensor): Target values.

    Returns:
        torch.Tensor: Cross Entropy Loss.
    """
    return F.cross_entropy(predictions, targets)


class Model(nn.Module):
    """
    A model that computes Cross Entropy Loss for multi-class classification tasks.

    Parameters:
        None
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets, fn=module_fn):
        return fn(predictions, targets)


batch_size = 4096
num_classes = 10
input_shape = (num_classes,)  # Output for each class
dim = 1


def get_inputs():
    return [
        torch.randn(batch_size, *input_shape),
        torch.randint(0, num_classes, (batch_size,)),
    ]


def get_init_inputs():
    return []
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A model that computes Cross Entropy Loss for multi-class classification tasks.

    Parameters:
        None
    """
    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets):
        return torch.nn.functional.cross_entropy(predictions, targets)

batch_size = 4096
num_classes = 10
input_shape = (num_classes, )  # Output for each class
dim = 1

def get_inputs():
    return [torch.randn(batch_size, *input_shape), torch.randint(0, num_classes, (batch_size,))]

def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 95 • 95_CrossEntropyLoss)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 95_CrossEntropyLoss 0.01 8.97 2.45
🥇 memory_coalescing_base 0.01 8.97 2.45
🥇 block_size_experimentation_base 0.01 8.97 2.45
🥇 stride_loop_boundary_optimization_base 0.01 8.97 2.45
🥇 optimized_thread_block_mapping_base_base 0.01 8.97 2.45
🥇 optimal_blocksize_experiment_base 0.01 8.97 2.45
🥇 modular_crossentropy_base 0.01 8.97 2.45
🥇 warp_aligned_base_base 0.01 8.97 2.45
🥇 warp_divergence_minimization_base_base 0.01 8.97 2.45
🥇 modularized_device_functions_base 0.01 8.97 2.45
🥇 ce_loss_unroll_optimized_base 0.01 8.97 2.45
🥇 ce_loss_ldg_aligned_base 0.01 8.97 2.45
🥇 ce_loss_optimized_blocksize_512_base 0.01 8.97 2.45
🥇 ce_loss_grid_stride_unroll_edit_1 0.01 8.97 2.45
🥇 ce_loss_ldg_aligned_edit_1 0.01 8.97 2.45
🥇 stride_loop_optimization_base_base 0.01 8.97 2.45
🥇 ldg_aligned_access_base 0.01 8.97 2.45
🥇 modular_device_ce_loss_base 0.01 8.97 2.45
🥇 ce_loss_stride_base 0.01 8.97 2.45
🥇 atomic_optimized_crossentropy_edit_1 0.01 8.97 2.45
#include <torch/extension.h>

__global__ void cross_entropy_loss_kernel(
    const float* __restrict__ logits,
    const int64_t* __restrict__ targets,
    float* __restrict__ losses,
    int batch_size,
    int num_classes)
{
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int total_threads = blockDim.x * gridDim.x;
    
    for (int i = idx; i < batch_size; i += total_threads) {
        const float* row = logits + i * num_classes;
        const int target = targets[i];

        // Max logit computation with unrolling
        float max_val = row[0];
        #pragma unroll 4
        for (int j = 1; j < num_classes; j++) {
            max_val = fmaxf(max_val, row[j]);
        }

        // Sum exp computation with unrolling
        float sum_exp = 0.0f;
        #pragma unroll 4
        for (int j = 0; j < num_classes; j++) {
            sum_exp += expf(row[j] - max_val);
        }

        float log_sum_exp = logf(sum_exp);
        losses[i] = -(row[target] - max_val - log_sum_exp);
    }
}

torch::Tensor forward(torch::Tensor predictions, torch::Tensor targets) {
    TORCH_CHECK(predictions.is_cuda() && targets.is_cuda(), "Inputs must be CUDA tensors");
    TORCH_CHECK(predictions.dim() == 2, "Predictions must be 2D tensor");
    TORCH_CHECK(targets.dim() == 1, "Targets must be 1D tensor");

    const int batch_size = predictions.size(0);
    const int num_classes = predictions.size(1);
    auto losses = torch::empty({batch_size}, predictions.options());

    const int threads = 256;
    const int blocks = (batch_size + threads - 1) / threads;

    cross_entropy_loss_kernel<<<blocks, threads>>>(
        predictions.data_ptr<float>(),
        targets.data_ptr<int64_t>(),
        losses.data_ptr<float>(),
        batch_size,
        num_classes
    );

    cudaError_t err = cudaGetLastError();
    TORCH_CHECK(err == cudaSuccess, "CUDA Error: ", cudaGetErrorString(err));

    return losses.mean();
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Optimized CrossEntropyLoss with loop unrolling");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.508 inst/cycle 0.000 5
Executed Ipc Elapsed 0.030 inst/cycle 0.000 5
Issue Slots Busy 13.056 % 0.076 5
Issued Ipc Active 0.522 inst/cycle 0.000 5
SM Busy 13.056 % 0.076 5
Memory Throughput 52703144383.272 byte/second 896374610186402432.000 5
Mem Busy 8.906 % 0.017 5
Max Bandwidth 4.812 % 0.007 5
L1/TEX Hit Rate 92.360 % 0.000 5
L2 Hit Rate 87.506 % 0.112 5
Mem Pipes Busy 0.394 % 0.000 5
Warp Cycles Per Issued Instruction 14.228 cycle 0.020 5
Warp Cycles Per Executed Instruction 14.638 cycle 0.020 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.230 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 11.836 % 0.009 5
Achieved Active Warps Per SM 7.576 warp 0.004 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (11.8%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 599727.61 μs
Device Time 11.20 μs
Self CPU Time 54.02 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 599673.59 μs
Device Time 11.20 μs
Self CPU Time 105.92 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 599429.57 μs
Device Time 0.00 μs
Self CPU Time 113.04 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 590396.41 μs
Device Time 0.00 μs
Self CPU Time 590396.41 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 483324.61 μs
Device Time 6351.03 μs
Self CPU Time 483324.61 μs
Self Device Time 6351.03 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::mean
CPU Time 173826.97 μs
Device Time 41899.49 μs
Self CPU Time 99678.24 μs
Self Device Time 41899.49 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::reduce_kernel<512, 1, at::native::ReduceOp<float, at::native::MeanOps<float, float, float, float>, unsigned int, float, 4> >(at::native::ReduceOp<float, at::native::MeanOps<float, float, float, float>, unsigned int, float, 4>)
CPU Time 0.00 μs
Device Time 41899.49 μs
Self CPU Time 0.00 μs
Self Device Time 41899.49 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 95832.04 μs
Device Time 776428.81 μs
Self CPU Time 21360.61 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 74472.81 μs
Device Time 776428.81 μs
Self CPU Time 27282.07 μs
Self Device Time 776428.81 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 776428.81 μs
Self CPU Time 0.00 μs
Self Device Time 776428.81 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45284 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:7:5 bugprone-easily-swappable-parameters
7 | int batch_size,
| ^~~~~~~~~~~~~~~
8 | int num_classes)
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:7:9: note: the first parameter in the range is 'batch_size'
7 | int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:8:9: note: the last parameter in the range is 'num_classes'
8 | int num_classes)
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:10:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
10 | int idx = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:11:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
11 | int total_threads = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:14:28: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
14 | const float* row = logits + i * num_classes;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:14:37: note: make conversion explicit to silence this warning
2 |
3 | __global__ void cross_entropy_loss_kernel(
4 | const float* __restrict__ logits,
5 | const int64_t* __restrict__ targets,
6 | float* __restrict__ losses,
7 | int batch_size,
8 | int num_classes)
9 | {
10 | int idx = blockIdx.x * blockDim.x + threadIdx.x;
11 | int total_threads = blockDim.x * gridDim.x;
12 |
13 | for (int i = idx; i < batch_size; i += total_threads) {
14 | const float* row = logits + i * num_classes;
| ^~~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:14:37: note: perform multiplication in a wider type
14 | const float* row = logits + i * num_classes;
| ^
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:15:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
15 | const int target = targets[i];
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:36:37: warning: the parameter 'predictions' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
36 | torch::Tensor forward(torch::Tensor predictions, torch::Tensor targets) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:36:64: warning: the parameter 'targets' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
36 | torch::Tensor forward(torch::Tensor predictions, torch::Tensor targets) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:41:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
41 | const int batch_size = predictions.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_95/b4_s2_ce_loss_unroll_optimized/base/base.cu:42:29: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
42 | const int num_classes = predictions.size(1);
| ^