← Back to Leaderboard

The AI CUDA Engineer 👷

98_KLDivLossfast_strided_kl_base

Level 1 • Task 98
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
    """
    Computes the Kullback-Leibler Divergence for comparing two distributions.

    Args:
        predictions (torch.Tensor): Predicted values.
        targets (torch.Tensor): Target values.

    Returns:
        torch.Tensor: Kullback-Leibler Divergence.
    """
    return F.kl_div(torch.log(predictions), targets, reduction="batchmean")


class Model(nn.Module):
    """
    A model that computes Kullback-Leibler Divergence for comparing two distributions.

    Parameters:
        None
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets, fn=module_fn):
        return fn(predictions, targets)


batch_size = 128
input_shape = (4096,)
dim = 1


def get_inputs():
    return [
        torch.randn(batch_size, *input_shape).softmax(dim=-1),
        torch.randn(batch_size, *input_shape).softmax(dim=-1),
    ]


def get_init_inputs():
    return []
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A model that computes Kullback-Leibler Divergence for comparing two distributions.

    Parameters:
        None
    """
    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets):
        return torch.nn.functional.kl_div(torch.log(predictions), targets, reduction='batchmean')

batch_size = 128
input_shape = (4096, )
dim = 1

def get_inputs():
    return [torch.randn(batch_size, *input_shape).softmax(dim=-1), torch.randn(batch_size, *input_shape).softmax(dim=-1)]

def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 98 • 98_KLDivLoss)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 optimized_kl_div_cuda_base 0.01 2.83 3.20
🥈 kl_div_sync_optimized_base 0.01 2.59 2.93
🥈 optimized_kl_div_kernel_base 0.01 2.59 2.93
🥈 kl_div_balanced_workload_base 0.01 2.59 2.93
🥈 kl_div_warp_reduce_base_base 0.01 2.59 2.93
🥈 optimized_kl_div_base 0.01 2.59 2.93
🥈 kl_div_modular_reduce_base_base 0.01 2.59 2.93
🥈 kldiv_optimized_stride_base_base_base 0.01 2.59 2.93
🥈 vectorized_aligned_kl_base 0.01 2.59 2.93
🥈 98_KLDivLoss_optimal_reduce_edit_1 0.01 2.59 2.93
🥈 strided_warp_kl_base_base 0.01 2.59 2.93
🥈 fast_strided_kl_base 0.01 2.59 2.93
🥈 coalesced_chunked_kl_base 0.01 2.59 2.93
🥈 kldiv_modular_per_thread_base_base 0.01 2.59 2.93
🥈 kldiv_unrolled_reduction_base_base 0.01 2.59 2.93
🥈 kl_div_unrolled_reduce_base_base 0.01 2.59 2.93
🥈 warp_block_vec4_opt_base 0.01 2.59 2.93
🥈 vectorized_kldiv_base_base 0.01 2.59 2.93
🥈 kl_div_even_workload_distribution_base 0.01 2.59 2.93
🥈 adaptive_kl_div_cuda_base 0.01 2.59 2.93
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

// Define constants
constexpr int WARP_SIZE = 32;
constexpr int ELEMENTS_PER_THREAD = 8;

// Optimized CUDA kernel for KL divergence using grid-stride loop, loop unrolling, and warp-level reduction
__global__ void fast_strided_kl_kernel(
    const float* __restrict__ log_predictions,
    const float* __restrict__ targets,
    float* __restrict__ output,
    const int n) {

    const int total_threads = gridDim.x * blockDim.x;
    const int tid = blockIdx.x * blockDim.x + threadIdx.x;
    float sum = 0.f;

    // Each thread processes multiple elements using a grid-stride loop and unrolling
    for (int stride = 0;; stride++) {
        // Compute base index for this iteration
        int base = tid + stride * total_threads * ELEMENTS_PER_THREAD;
        if (base >= n) break;
        #pragma unroll
        for (int i = 0; i < ELEMENTS_PER_THREAD; i++) {
            int idx = base + i * total_threads;
            if (idx < n) {
                // Use __ldg for read-only cache load
                float lp = __ldg(log_predictions + idx);
                float t = __ldg(targets + idx);
                sum += expf(lp) - t * lp;
            }
        }
    }

    // Intra-warp reduction using shuffle
    for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
        sum += __shfl_down_sync(0xffffffff, sum, offset);
    }

    // Allocate shared memory for each warp's result
    extern __shared__ float warp_sums[];
    int warp_id = threadIdx.x / WARP_SIZE;
    int lane = threadIdx.x % WARP_SIZE;

    // First thread in each warp writes its result
    if (lane == 0) {
        warp_sums[warp_id] = sum;
    }

    __syncthreads();

    // Final reduction: let the first warp reduce the per-warp sums
    int num_warps = (blockDim.x + WARP_SIZE - 1) / WARP_SIZE;
    sum = (threadIdx.x < num_warps) ? warp_sums[threadIdx.x] : 0.f;
    for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
        sum += __shfl_down_sync(0xffffffff, sum, offset);
    }

    // The first thread of the block atomically adds the block's sum to the global output
    if (threadIdx.x == 0) {
        atomicAdd(output, sum);
    }
}

// Host function to launch the optimized kernel
torch::Tensor fast_strided_kl_forward(
    torch::Tensor log_predictions,
    torch::Tensor targets) {
    const int n = log_predictions.numel();
    auto output = torch::zeros({1}, log_predictions.options());

    // Define kernel launch parameters
    const int threads = 256;
    int blocks = (n + threads * ELEMENTS_PER_THREAD - 1) / (threads * ELEMENTS_PER_THREAD);
    // Optionally limit the number of blocks to ensure sufficient work per block
    const int max_blocks = 256;
    blocks = (blocks < max_blocks) ? blocks : max_blocks;

    // Calculate shared memory size: one float per warp
    int shared_mem = ((threads + WARP_SIZE - 1) / WARP_SIZE) * sizeof(float);

    fast_strided_kl_kernel<<<blocks, threads, shared_mem>>>(
        log_predictions.data_ptr<float>(),
        targets.data_ptr<float>(),
        output.data_ptr<float>(),
        n
    );

    return output / static_cast<float>(n);
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &fast_strided_kl_forward, "Optimized KL divergence (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.572 inst/cycle 0.000 5
Executed Ipc Elapsed 0.306 inst/cycle 0.000 5
Issue Slots Busy 15.142 % 0.014 5
Issued Ipc Active 0.606 inst/cycle 0.000 5
SM Busy 15.142 % 0.014 5
Memory Throughput 844536043915.252 byte/second 65376945529851543552.000 5
Mem Busy 14.610 % 0.023 5
Max Bandwidth 25.308 % 0.073 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 18.480 % 0.001 5
Mem Pipes Busy 9.664 % 0.010 5
Warp Cycles Per Issued Instruction 24.718 cycle 0.003 5
Warp Cycles Per Executed Instruction 26.146 cycle 0.004 5
Avg. Active Threads Per Warp 31.900 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.100 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 28.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 23.508 % 0.002 5
Achieved Active Warps Per SM 15.046 warp 0.001 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (23.5%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::zeros
CPU Time 5147720.82 μs
Device Time 227599.52 μs
Self CPU Time 135404.98 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 5468448.81 μs
Device Time 7528904.81 μs
Self CPU Time 268501.32 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 5199949.47 μs
Device Time 7528904.81 μs
Self CPU Time 381673.46 μs
Self Device Time 7528902.22 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 5544420.75 μs
Device Time 24648.81 μs
Self CPU Time 5544420.75 μs
Self Device Time 24648.81 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
fast_strided_kl_kernel(float const*, float const*, float*, int)
CPU Time 0.00 μs
Device Time 452684.92 μs
Self CPU Time 0.00 μs
Self Device Time 452684.92 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::div
CPU Time 951330.81 μs
Device Time 249758.90 μs
Self CPU Time 573920.58 μs
Self Device Time 249678.84 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 260645.12 μs
Device Time 680526.63 μs
Self CPU Time 260645.12 μs
Self Device Time 680526.63 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 7301305.28 μs
Self CPU Time 0.00 μs
Self Device Time 7301305.28 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45284 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:11:5 bugprone-easily-swappable-parameters
11 | const float* __restrict__ log_predictions,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 | const float* __restrict__ targets,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:11:31: note: the first parameter in the range is 'log_predictions'
11 | const float* __restrict__ log_predictions,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:12:31: note: the last parameter in the range is 'targets'
12 | const float* __restrict__ targets,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:16:31: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
16 | const int total_threads = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:17:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
17 | const int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:44:19: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
44 | int warp_id = threadIdx.x / WARP_SIZE;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:45:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
45 | int lane = threadIdx.x % WARP_SIZE;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:55:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
55 | int num_warps = (blockDim.x + WARP_SIZE - 1) / WARP_SIZE;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:69:19: warning: the parameter 'log_predictions' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
69 | torch::Tensor log_predictions,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:70:19: warning: the parameter 'targets' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
70 | torch::Tensor targets) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_1/task_98/b4_s2_fast_strided_kl/base/base.cu:71:19: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
71 | const int n = log_predictions.numel();
| ^