← Back to Leaderboard

The AI CUDA Engineer 👷

88_MinGPTNewGelugelu_uniform_flow_base

Level 1 • Task 88
import torch
import torch.nn as nn
import torch.nn.functional as F
import math


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Implementation of the Gaussian Error Linear Units (GELU) activation function currently in Google BERT repo (identical to OpenAI GPT).

    Args:
        x (torch.Tensor): Input tensor.

    Returns:
        torch.Tensor: Output tensor.
    """
    return (
        0.5
        * x
        * (
            1.0
            + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))
        )
    )


class Model(nn.Module):
    """
    Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
    Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x, fn=module_fn):
        return fn(x)


batch_size = 2000
dim = 2000


def get_inputs():
    return [torch.randn(batch_size, dim)]


def get_init_inputs():
    return []
import torch
import torch.nn as nn
import torch.nn.functional as F
import math

# From https://github.com/karpathy/minGPT/blob/master/mingpt/model.py


class Model(nn.Module):
    """
    Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
    Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x):
        return (
            0.5
            * x
            * (
                1.0
                + torch.tanh(
                    math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))
                )
            )
        )


batch_size = 2000
dim = 2000


def get_inputs():
    return [torch.randn(batch_size, dim)]


def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 88 • 88_MinGPTNewGelu)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 88_MinGPTNewGelu_shared_base_base 0.02 5.72 2.99
🥇 gelu_uniform_flow_base 0.02 5.72 2.99
🥇 gelu_kernel_optimized_indexing_base 0.02 5.72 2.99
🥇 gelu_tile_inline_base 0.02 5.72 2.99
🥇 optimized_gelu_kernel_base 0.02 5.72 2.99
🥇 gelu_kernel_optimized_base 0.02 5.72 2.99
🥇 gelu_kernel_optimized_base 0.02 5.72 2.99
🥇 88_mingptnewgelu_shared_tile_base 0.02 5.72 2.99
🥇 gelu_vectorized_base 0.02 5.72 2.99
🥇 gelu_modular_base_base 0.02 5.72 2.99
🥇 optimized_gelu_manual_unroll_base 0.02 5.72 2.99
🥇 modular_gelu_device_base 0.02 5.72 2.99
🥇 optimized_gelu_combined_edit_1 0.02 5.72 2.99
🥇 gelu_optimized_block_size_base 0.02 5.72 2.99
🥇 combined_gelu_modular_vectorized_edit_1 0.02 5.72 2.99
🥇 optimized_gelu_combined_base 0.02 5.72 2.99
🥇 gelu_vectorized_tuned_edit_1 0.02 5.72 2.99
🥇 modular_gelu_kernel_base 0.02 5.72 2.99
🥇 gelu_vectorized_tuned_base 0.02 5.72 2.99
🥇 gelu_vectorized_base 0.02 5.72 2.99
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

// Inline device function for GELU activation computation
__device__ __forceinline__ float compute_gelu(float x) {
    const float sqrt_2_over_pi = 0.7978845608f;
    const float coeff = 0.044715f;
    float x_cubed = x * x * x;
    float inner = (x + coeff * x_cubed) * sqrt_2_over_pi;
    return 0.5f * x * (1.0f + tanhf(inner));
}

// Optimized kernel with uniform control flow to minimize warp divergence
__global__ void gelu_kernel_uniform(const float* __restrict__ x, float* __restrict__ y, int n) {
    extern __shared__ float shared_x[];
    
    const int unroll = 4;
    int tid = threadIdx.x;
    int base = blockIdx.x * blockDim.x * unroll;
    
    // Check if the current block has a full tile of valid elements
    bool full_tile = (base + blockDim.x * unroll <= n);

    if (full_tile) {
        // All accesses are valid; no branch divergence inside the loop
        #pragma unroll
        for (int i = 0; i < unroll; i++) {
            int idx = base + tid + i * blockDim.x;
            shared_x[tid + i * blockDim.x] = x[idx];
        }
        __syncthreads();
        
        #pragma unroll
        for (int i = 0; i < unroll; i++) {
            int idx = base + tid + i * blockDim.x;
            float xi = shared_x[tid + i * blockDim.x];
            y[idx] = compute_gelu(xi);
        }
    } else {
        // For the tail block, use conditional code to guard against out-of-bound accesses
        #pragma unroll
        for (int i = 0; i < unroll; i++) {
            int idx = base + tid + i * blockDim.x;
            if (idx < n) {
                shared_x[tid + i * blockDim.x] = x[idx];
            }
        }
        __syncthreads();
        
        #pragma unroll
        for (int i = 0; i < unroll; i++) {
            int idx = base + tid + i * blockDim.x;
            if (idx < n) {
                float xi = shared_x[tid + i * blockDim.x];
                y[idx] = compute_gelu(xi);
            }
        }
    }
}

// Host function to launch the kernel
torch::Tensor gelu_forward(torch::Tensor x) {
    TORCH_CHECK(x.is_cuda(), "Input tensor must be on CUDA");
    TORCH_CHECK(x.is_contiguous(), "Input tensor must be contiguous");
    
    auto y = torch::empty_like(x);
    int n = x.numel();
    
    const int threads = 256;
    const int unroll = 4;
    int blocks = (n + threads * unroll - 1) / (threads * unroll);
    size_t shared_mem_size = threads * unroll * sizeof(float);
    
    gelu_kernel_uniform<<<blocks, threads, shared_mem_size>>>(
        x.data_ptr<float>(),
        y.data_ptr<float>(),
        n
    );
    
    return y;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &gelu_forward, "GELU forward CUDA kernel with uniform control flow to minimize warp divergence");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 2.650 inst/cycle 0.001 5
Executed Ipc Elapsed 2.096 inst/cycle 0.001 5
Issue Slots Busy 66.448 % 0.497 5
Issued Ipc Active 2.656 inst/cycle 0.001 5
SM Busy 66.448 % 0.497 5
Memory Throughput 1427149930633.488 byte/second 61167284831592775680.000 5
Mem Busy 35.286 % 0.070 5
Max Bandwidth 42.654 % 0.068 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 50.580 % 0.027 5
Mem Pipes Busy 21.710 % 0.048 5
Warp Cycles Per Issued Instruction 20.456 cycle 0.005 5
Warp Cycles Per Executed Instruction 20.524 cycle 0.005 5
Avg. Active Threads Per Warp 24.500 0.000 5
Avg. Not Predicated Off Threads Per Warp 23.940 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 20.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 85.416 % 0.061 5
Achieved Active Warps Per SM 54.664 warp 0.025 5
Analysis Rules
Rule Description
INF HighPipeUtilization FMA is the highest-utilized pipeline (31.6%) based on active cycles, taking into account the rates of its different instructions. It executes 32-bit floating point (FADD, FMUL, FMAD, ...) and integer (IMUL, IMAD) operations. It is well-utilized, but should not be a bottleneck.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN ThreadDivergence Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 24.5 threads being active per cycle. This is further reduced to 23.9 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp().
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (85.8%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 350868.86 μs
Device Time 2223.79 μs
Self CPU Time 42.31 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 350826.55 μs
Device Time 2223.79 μs
Self CPU Time 116.89 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 373444.70 μs
Device Time 0.00 μs
Self CPU Time 17313.01 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 347733.22 μs
Device Time 0.00 μs
Self CPU Time 347733.22 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 532764.52 μs
Device Time 21038.13 μs
Self CPU Time 532764.52 μs
Self Device Time 21038.13 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
gelu_kernel_uniform(float const*, float*, int)
CPU Time 0.00 μs
Device Time 103352.34 μs
Self CPU Time 0.00 μs
Self Device Time 103352.34 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 22618.37 μs
Device Time 40530.16 μs
Self CPU Time 22618.37 μs
Self Device Time 40530.16 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 79964.17 μs
Device Time 600415.60 μs
Self CPU Time 12164.23 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 67801.68 μs
Device Time 600415.60 μs
Self CPU Time 15093.48 μs
Self Device Time 600415.60 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 600415.60 μs
Self CPU Time 0.00 μs
Self Device Time 600415.60 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45283 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:20:15 bugprone-narrowing-conversions
20 | int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:21:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
21 | int base = blockIdx.x * blockDim.x * unroll;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:30:23: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
30 | int idx = base + tid + i * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:37:23: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
37 | int idx = base + tid + i * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:45:23: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
45 | int idx = base + tid + i * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:54:23: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
54 | int idx = base + tid + i * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:64:42: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
64 | torch::Tensor gelu_forward(torch::Tensor x) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:69:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
69 | int n = x.numel();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:74:30: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
74 | size_t shared_mem_size = threads * unroll * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:74:30: note: make conversion explicit to silence this warning
5 | size_t shared_mem_size = threads * unroll * sizeof(float);
| ^~~~~~~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_88/b5_s2_gelu_uniform_flow/base/base.cu:74:30: note: perform multiplication in a wider type
74 | size_t shared_mem_size = threads * unroll * sizeof(float);
| ^~~~~~~
| static_cast<long>( )