← Back to Leaderboard

The AI CUDA Engineer 👷

96_HuberLossvectorized_ldg_block_reduce_base

Level 1 • Task 96
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
    """
    Computes the Smooth L1 (Huber) Loss for regression tasks.

    Args:
        predictions (torch.Tensor): Predicted values.
        targets (torch.Tensor): Target values.

    Returns:
        torch.Tensor: Smooth L1 (Huber) Loss.
    """
    return F.smooth_l1_loss(predictions, targets)


class Model(nn.Module):
    """
    A model that computes Smooth L1 (Huber) Loss for regression tasks.

    Parameters:
        None
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets, fn=module_fn):
        return fn(predictions, targets)


batch_size = 128
input_shape = (4096,)
dim = 1


def get_inputs():
    return [
        torch.randn(batch_size, *input_shape),
        torch.randn(batch_size, *input_shape),
    ]


def get_init_inputs():
    return []
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A model that computes Smooth L1 (Huber) Loss for regression tasks.

    Parameters:
        None
    """
    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets):
        return torch.nn.functional.smooth_l1_loss(predictions, targets)

batch_size = 128
input_shape = (4096, )
dim = 1

def get_inputs():
    return [torch.randn(batch_size, *input_shape), torch.randn(batch_size, *input_shape)]

def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 96 • 96_HuberLoss)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 const_mem_vectorized_base_base 0.01 1.62 5.37
🥇 coalesced_vectorized_opt_base 0.01 1.62 5.37
🥇 vectorized_warp_base 0.01 1.62 5.37
🥇 warp_optimized_reduction_base_base 0.01 1.62 5.37
🥇 vectorized_ldg_block_reduce_base 0.01 1.62 5.37
🥇 combined_unrolled_reduction_edit_1 0.01 1.62 5.37
🥇 combined_unrolled_reduction_base 0.01 1.62 5.37
🥇 optimized_sync_reduction_base 0.01 1.62 5.37
🥇 sync_optimized_unrolled_reduction_edit_1 0.01 1.62 5.37
🥇 tuned_blocksize_huber_base_edit_1 0.01 1.62 5.37
🥇 grid_stride_huber_base_edit_1 0.01 1.62 5.37
🥇 unrolled_huber_loss_optimized_base 0.01 1.62 5.37
🥇 96_huberloss_warp_reduction_edit_1 0.01 1.62 5.37
14 96_huber_dyn_block_edit_1 0.01 1.47 4.89
14 96_huber_unrolled_edit_1 0.01 1.47 4.89
14 96_huber_unrolled_base 0.01 1.47 4.89
14 vectorized_const_optimized_base 0.01 1.47 4.89
14 smooth_l1_loss_combined_base 0.01 1.47 4.89
14 strided_vectorized_base_base_base 0.01 1.47 4.89
14 aligned_ldg_vectorized_base 0.01 1.47 4.89
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

__global__ void smooth_l1_loss_optimized_kernel(
    const float* __restrict__ predictions,
    const float* __restrict__ targets,
    float* output,
    int n_elements
) {
    int tid = threadIdx.x;
    int idx = blockIdx.x * blockDim.x + tid;
    int stride = gridDim.x * blockDim.x;
    float thread_sum = 0.0f;

    // Vectorized processing
    int vec_count = n_elements / 4;
    const float4* pred4 = reinterpret_cast<const float4*>(predictions);
    const float4* targ4 = reinterpret_cast<const float4*>(targets);

    for (int i = idx; i < vec_count; i += stride) {
        float4 p = __ldg(pred4 + i);
        float4 t = __ldg(targ4 + i);

        float diff = p.x - t.x;
        thread_sum += (fabsf(diff) < 1.0f) ? 0.5f*diff*diff : fabsf(diff)-0.5f;
        
        diff = p.y - t.y;
        thread_sum += (fabsf(diff) < 1.0f) ? 0.5f*diff*diff : fabsf(diff)-0.5f;
        
        diff = p.z - t.z;
        thread_sum += (fabsf(diff) < 1.0f) ? 0.5f*diff*diff : fabsf(diff)-0.5f;
        
        diff = p.w - t.w;
        thread_sum += (fabsf(diff) < 1.0f) ? 0.5f*diff*diff : fabsf(diff)-0.5f;
    }

    // Scalar processing for remainder
    int scalar_base = vec_count * 4;
    for (int i = scalar_base + idx; i < n_elements; i += stride) {
        float diff = __ldg(predictions + i) - __ldg(targets + i);
        thread_sum += (fabsf(diff) < 1.0f) ? 0.5f*diff*diff : fabsf(diff)-0.5f;
    }

    // Optimized block reduction
    __shared__ float shared_mem[256];
    shared_mem[tid] = thread_sum;
    __syncthreads();

    for (int s = blockDim.x/2; s > 0; s >>= 1) {
        if (tid < s) {
            shared_mem[tid] += shared_mem[tid + s];
        }
        __syncthreads();
    }

    if (tid == 0) {
        atomicAdd(output, shared_mem[0] / n_elements);
    }
}

torch::Tensor smooth_l1_loss_optimized(
    torch::Tensor predictions,
    torch::Tensor targets
) {
    TORCH_CHECK(predictions.sizes() == targets.sizes(), "Input shape mismatch");
    TORCH_CHECK(predictions.is_contiguous() && targets.is_contiguous(), "Non-contiguous inputs");

    int n = predictions.numel();
    auto output = torch::zeros({1}, predictions.options());

    const int block_size = 256;
    int grid_size = (n / 4 + block_size - 1) / block_size;
    grid_size = grid_size > 0 ? grid_size : 1;

    smooth_l1_loss_optimized_kernel<<<grid_size, block_size>>>(
        predictions.data_ptr<float>(),
        targets.data_ptr<float>(),
        output.data_ptr<float>(),
        n
    );

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &smooth_l1_loss_optimized, "Optimized Smooth L1 Loss");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.998 inst/cycle 0.000 5
Executed Ipc Elapsed 0.562 inst/cycle 0.000 5
Issue Slots Busy 25.942 % 0.047 5
Issued Ipc Active 1.038 inst/cycle 0.000 5
SM Busy 25.942 % 0.047 5
Memory Throughput 806924368945.290 byte/second 45032122791160659968.000 5
Mem Busy 14.036 % 0.013 5
Max Bandwidth 24.296 % 0.034 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 18.544 % 0.000 5
Mem Pipes Busy 12.588 % 0.013 5
Warp Cycles Per Issued Instruction 27.518 cycle 0.524 5
Warp Cycles Per Executed Instruction 28.644 cycle 0.564 5
Avg. Active Threads Per Warp 31.710 0.000 5
Avg. Not Predicated Off Threads Per Warp 22.850 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 10.000 block 0.000 5
Block Limit Shared Mem 16.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 44.314 % 0.024 5
Achieved Active Warps Per SM 28.364 warp 0.010 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN ThreadDivergence Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 31.7 threads being active per cycle. This is further reduced to 22.8 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp().
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (44.5%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 513676.52 μs
Device Time 305.02 μs
Self CPU Time 43.21 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zeros
CPU Time 5850304.50 μs
Device Time 226864.21 μs
Self CPU Time 159622.89 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 6188126.39 μs
Device Time 7703732.67 μs
Self CPU Time 318690.45 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 5869437.15 μs
Device Time 7703732.67 μs
Self CPU Time 406886.13 μs
Self Device Time 7703730.46 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 5843986.56 μs
Device Time 2922.50 μs
Self CPU Time 5843986.56 μs
Self Device Time 2922.50 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
smooth_l1_loss_optimized_kernel(float const*, float const*, float*, int)
CPU Time 0.00 μs
Device Time 468322.35 μs
Self CPU Time 0.00 μs
Self Device Time 468322.35 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 264240.91 μs
Device Time 1240025.63 μs
Self CPU Time 264240.91 μs
Self Device Time 1240025.63 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 7476868.47 μs
Self CPU Time 0.00 μs
Self Device Time 7476868.47 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45283 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:12:15 bugprone-narrowing-conversions
12 | int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:13:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
13 | int idx = blockIdx.x * blockDim.x + tid;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:14:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
14 | int stride = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:51:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
51 | for (int s = blockDim.x/2; s > 0; s >>= 1) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:59:43: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
59 | atomicAdd(output, shared_mem[0] / n_elements);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:64:19: warning: the parameter 'predictions' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
64 | torch::Tensor predictions,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:65:19: warning: the parameter 'targets' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
65 | torch::Tensor targets
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_96/b4_s3_vectorized_ldg_block_reduce/base/base.cu:70:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
70 | int n = predictions.numel();
| ^