← Back to Leaderboard

The AI CUDA Engineer 👷

31_ELUelu_unroll_kernel_base

Level 1 • Task 31
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor, alpha: float) -> torch.Tensor:
    """
    Applies ELU activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.
        alpha (float): The alpha parameter for the ELU function.

    Returns:
        torch.Tensor: Output tensor with ELU applied, same shape as input.
    """
    return F.elu(x, alpha=alpha)


class Model(nn.Module):
    """
    Simple model that performs an ELU activation.
    """

    def __init__(self, alpha):
        """
        Initializes the ELU model.

        Args:
            alpha (float): The alpha parameter for the ELU function.
        """
        super(Model, self).__init__()
        self.alpha = alpha

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Applies ELU activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with ELU applied, same shape as input.
        """
        return fn(x, self.alpha)


batch_size = 16
dim = 16384
alpha = 1.0


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return [alpha]
import torch
import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    """
    Simple model that performs an ELU activation.
    """
    def __init__(self, alpha: float = 1.0):
        """
        Initializes the ELU model.

        Args:
            alpha (float, optional): The alpha parameter for the ELU function. Defaults to 1.0.
        """
        super(Model, self).__init__()
        self.alpha = alpha
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies ELU activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with ELU applied, same shape as input.
        """
        return F.elu(x, alpha=self.alpha)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return [1.0]  # Provide alpha value for initialization

Kernel Information

Related Kernels (Level 1, Task 31 • 31_ELU)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 31_elu_shared_base 0.01 1.14 4.80
🥇 hybrid_elu_optimized_base 0.01 1.14 4.80
🥇 31_elu_vectorized_base 0.01 1.14 4.80
🥇 vec_shared_elu_base 0.01 1.14 4.80
🥇 31_elu_grid_stride_base_base 0.01 1.14 4.80
🥇 31_elu_vectorized_edit_1 0.01 1.14 4.80
🥇 elu_unroll_kernel_base 0.01 1.14 4.80
🥇 ldg_elu_128_base 0.01 1.14 4.80
9 31_ELU 0.01 0.97 4.12
9 31_elu_aligned_coalesced_base 0.01 0.97 4.12
9 hybrid_elu_base 0.01 0.97 4.12
9 31_elu_optimized_indexing_base 0.01 0.97 4.12
9 31_elu_reduced_divergence_base 0.01 0.97 4.12
9 elu_hybrid_base 0.01 0.97 4.12
9 31_elu_coalesced_base 0.01 0.97 4.12
9 31_elu_shared_mem_base 0.01 0.97 4.12
9 modular_elu_base 0.01 0.97 4.12
9 elu_vec4_shared_base 0.01 0.97 4.12
9 elu_tuned_blocksize_base 0.01 0.97 4.12
9 branchless_elu_vectorized_base 0.01 0.97 4.12
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

// This kernel uses vectorized loads (float4) and manually unrolls the computation
// for each of the four elements, reducing loop overhead on the critical path.
// It also unrolls the tail loop using a pragma unroll to minimize branch overhead.

template <typename scalar_t>
__global__ void elu_unroll_kernel(const scalar_t* __restrict__ x,
                                    scalar_t* __restrict__ out,
                                    float alpha,
                                    int n) {
    // Each thread processes 4 elements at a time
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    // Base index for this thread (processing groups of 4 elements)
    int base = tid * 4;
    // Total stride in terms of elements
    int stride = gridDim.x * blockDim.x * 4;

    // Loop over the input with manual unrolling
    #pragma unroll
    for (int i = base; i < n; i += stride) {
        int remaining = n - i;
        if (remaining >= 4) {
            // Use vectorized load
            float4 vec = *reinterpret_cast<const float4*>(x + i);
            // Manually unroll the ELU computation for each element
            float r0 = (vec.x > 0.f) ? vec.x : alpha * (expf(vec.x) - 1.f);
            float r1 = (vec.y > 0.f) ? vec.y : alpha * (expf(vec.y) - 1.f);
            float r2 = (vec.z > 0.f) ? vec.z : alpha * (expf(vec.z) - 1.f);
            float r3 = (vec.w > 0.f) ? vec.w : alpha * (expf(vec.w) - 1.f);
            
            float4 res = make_float4(r0, r1, r2, r3);
            *reinterpret_cast<float4*>(out + i) = res;
        } else {
            // Handle the tail elements with a manually unrolled loop
            #pragma unroll
            for (int j = 0; j < 4; j++) {
                if (j < remaining) {
                    float val = x[i + j];
                    out[i + j] = (val > 0.f) ? val : alpha * (expf(val) - 1.f);
                }
            }
        }
    }
}

// CUDA wrapper function
torch::Tensor elu_unroll_cuda(torch::Tensor x, float alpha) {
    CHECK_INPUT(x);
    auto out = torch::empty_like(x);
    const int n = x.numel();

    // Calculate number of vectorized (float4) operations
    int vec_ops = (n + 3) / 4;
    const int threads = 512;
    const int blocks = (vec_ops + threads - 1) / threads;

    elu_unroll_kernel<float><<<blocks, threads>>>(x.data_ptr<float>(), out.data_ptr<float>(), alpha, n);
    return out;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &elu_unroll_cuda, "Vectorized and unrolled ELU activation (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.462 inst/cycle 0.000 5
Executed Ipc Elapsed 0.180 inst/cycle 0.000 5
Issue Slots Busy 12.644 % 0.009 5
Issued Ipc Active 0.506 inst/cycle 0.000 5
SM Busy 12.644 % 0.009 5
Memory Throughput 276035665462.630 byte/second 13374225478973550592.000 5
Mem Busy 13.134 % 0.032 5
Max Bandwidth 12.054 % 0.035 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 67.304 % 0.028 5
Mem Pipes Busy 6.256 % 0.009 5
Warp Cycles Per Issued Instruction 28.494 cycle 2.884 5
Warp Cycles Per Executed Instruction 31.186 cycle 3.453 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 26.520 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 4.000 block 0.000 5
Block Limit Shared Mem 16.000 block 0.000 5
Block Limit Warps 4.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 21.938 % 0.006 5
Achieved Active Warps Per SM 14.042 warp 0.002 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (22.0%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 468406.02 μs
Device Time 41.57 μs
Self CPU Time 46.02 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 468360.00 μs
Device Time 41.57 μs
Self CPU Time 108.13 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 487211.75 μs
Device Time 0.00 μs
Self CPU Time 19342.23 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 467672.80 μs
Device Time 0.00 μs
Self CPU Time 467672.80 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 464852.93 μs
Device Time 21392.84 μs
Self CPU Time 464852.93 μs
Self Device Time 21392.84 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void elu_unroll_kernel<float>(float const*, float*, float, int)
CPU Time 0.00 μs
Device Time 30896.71 μs
Self CPU Time 0.00 μs
Self Device Time 30896.71 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 21438.22 μs
Device Time 41147.37 μs
Self CPU Time 21438.22 μs
Self Device Time 41147.37 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 64191.57 μs
Device Time 609333.58 μs
Self CPU Time 12015.70 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 52181.60 μs
Device Time 609333.58 μs
Self CPU Time 17150.38 μs
Self Device Time 609333.58 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 609333.58 μs
Self CPU Time 0.00 μs
Self Device Time 609333.58 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45282 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:6:35 bugprone-macro-parentheses
6 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:7:41: warning: macro argument should be enclosed in parentheses [bugprone-macro-parentheses]
7 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:17:37: warning: 2 adjacent parameters of 'elu_unroll_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
17 | float alpha,
| ^~~~~~~~~~~~
18 | int n) {
| ~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:17:43: note: the first parameter in the range is 'alpha'
17 | float alpha,
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:18:41: note: the last parameter in the range is 'n'
18 | int n) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:18:37: note: 'float' and 'int' may be implicitly converted
18 | int n) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:20:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
20 | int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:24:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
24 | int stride = gridDim.x * blockDim.x * 4;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:55:45: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
55 | torch::Tensor elu_unroll_cuda(torch::Tensor x, float alpha) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_31/b4_s2_elu_unroll_kernel/base/base.cu:58:19: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
58 | const int n = x.numel();
| ^