← Back to Leaderboard

The AI CUDA Engineer 👷

36_RMSNorm_unrolled_rms_norm_base

Level 1 • Task 36
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor, eps: float) -> torch.Tensor:
    """
    Applies RMS Normalization to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, num_features, *)
        eps (float): Small value added to denominator for numerical stability

    Returns:
        torch.Tensor: Output tensor with RMS Normalization applied
    """
    rms = torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + eps)
    return x / rms


class Model(nn.Module):
    """
    Simple model that performs RMS Normalization.
    """

    def __init__(self, num_features: int, eps: float):
        """
        Initializes the RMSNorm layer.

        Args:
            num_features (int): Number of features in the input tensor
            eps (float): Small value added to denominator for numerical stability
        """
        super(Model, self).__init__()
        self.eps = eps

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Forward pass that calls module_fn.

        Args:
            x (torch.Tensor): Input tensor
            fn: Function to call, defaults to module_fn

        Returns:
            torch.Tensor: Output of module_fn
        """
        return fn(x, self.eps)


batch_size = 16
features = 64
dim1 = 256
dim2 = 256
eps = 1e-5


def get_inputs():
    x = torch.randn(batch_size, features, dim1, dim2)
    return [x]


def get_init_inputs():
    return [features, eps]
import torch
import torch.nn as nn


class Model(nn.Module):
    """
    Simple model that performs RMS Normalization.
    """

    def __init__(self, num_features: int, eps: float = 1e-5):
        """
        Initializes the RMSNorm layer.

        Args:
            num_features (int): Number of features in the input tensor.
            eps (float, optional): A small value added to the denominator to avoid division by zero. Defaults to 1e-5.
        """
        super(Model, self).__init__()
        self.num_features = num_features
        self.eps = eps

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies RMS Normalization to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, num_features, *).

        Returns:
            torch.Tensor: Output tensor with RMS Normalization applied, same shape as input.
        """
        # Calculate the RMS along the feature dimension
        rms = torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + self.eps)

        # Normalize the input by dividing by the RMS
        return x / rms


batch_size = 16
features = 64
dim1 = 256
dim2 = 256
eps = 1e-5


def get_inputs():
    x = torch.randn(batch_size, features, dim1, dim2)
    return [x]


def get_init_inputs():
    return [features, eps]

Kernel Information

Related Kernels (Level 1, Task 36 • 36_RMSNorm_)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 36_rmsnorm_even_workload_base 0.19 2.81 2.68
🥇 36_rmsnorm_optimized_indexing_base_base 0.19 2.81 2.68
🥉 36_rmsnorm_optimized_indexing_base_base 0.19 2.79 2.67
4 modular_rms_norm_edit_1 0.27 1.95 1.86
4 modular_rms_norm_base 0.27 1.95 1.86
4 combined_grid_unroll_base 0.27 1.95 1.86
7 variable_block_size_rms_norm_edit_1 0.27 1.94 1.85
7 36_rmsnorm_ldg_aligned_opt_base 0.27 1.94 1.85
7 36_rmsnorm_modular_funcs_base 0.27 1.94 1.85
7 36_RMSNorm_uniform_control_base_base 0.27 1.94 1.85
7 balanced_workload_rms_base 0.27 1.94 1.85
7 36_rmsnorm_unroll_opt_base 0.27 1.94 1.85
7 unrolled_rms_norm_edit_1 0.27 1.94 1.85
7 unrolled_rms_norm_base 0.27 1.94 1.85
7 variable_block_size_rms_norm_base 0.27 1.94 1.85
7 36_rmsnorm_modular_inlined_base 0.27 1.94 1.85
7 36_RMSNorm_optimized_block_size_base 0.27 1.94 1.85
18 36_RMSNorm_stride_loop_base 0.28 1.93 1.84
18 efficient_rms_norm_kernel_base 0.28 1.93 1.84
18 36_RMSNorm_no_divergence_base 0.28 1.93 1.84
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

template <typename scalar_t>
__device__ scalar_t calculate_sumsq(
    const scalar_t* __restrict__ input,
    const int batch_offset,
    const int numel_per_batch,
    const int offset_in_batch,
    const int num_features
) {
    scalar_t sumsq = 0.0f;
    #pragma unroll 4
    for (int feat = 0; feat < num_features; feat++) {
        const scalar_t val = input[batch_offset + feat * numel_per_batch + offset_in_batch];
        sumsq += val * val;
    }
    return sumsq;
}

template <typename scalar_t>
__device__ void normalize_features(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const int batch_offset,
    const int numel_per_batch,
    const int offset_in_batch,
    const int num_features,
    const scalar_t inv_rms
) {
    #pragma unroll 4
    for (int feat = 0; feat < num_features; feat++) {
        const int idx = batch_offset + feat * numel_per_batch + offset_in_batch;
        output[idx] = input[idx] * inv_rms;
    }
}

template <typename scalar_t>
__global__ void rms_norm_kernel(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const int batch_size,
    const int num_features,
    const int numel_per_batch,
    const float eps
) {
    const int tid = blockIdx.x * blockDim.x + threadIdx.x;
    const int batch_id = tid / numel_per_batch;
    
    if (batch_id >= batch_size) return;
    
    const int offset_in_batch = tid % numel_per_batch;
    const int batch_offset = batch_id * num_features * numel_per_batch;

    const scalar_t sumsq = calculate_sumsq(
        input, batch_offset, numel_per_batch, 
        offset_in_batch, num_features
    );
    
    const scalar_t inv_rms = rsqrt(sumsq / num_features + eps);
    
    normalize_features(
        input, output, batch_offset, numel_per_batch,
        offset_in_batch, num_features, inv_rms
    );
}

torch::Tensor rms_norm_cuda_forward(torch::Tensor input, float eps) {
    auto output = torch::empty_like(input);
    
    const int batch_size = input.size(0);
    const int num_features = input.size(1);
    
    int numel_per_batch = 1;
    for(int i = 2; i < input.dim(); i++) {
        numel_per_batch *= input.size(i);
    }

    const int total_threads = batch_size * numel_per_batch;
    const int threads_per_block = 256;
    const int blocks = (total_threads + threads_per_block - 1) / threads_per_block;

    AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "rms_norm_cuda", ([&] {
        rms_norm_kernel<scalar_t><<<blocks, threads_per_block>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            batch_size,
            num_features,
            numel_per_batch,
            eps
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &rms_norm_cuda_forward, "RMS normalization forward (CUDA) with loop unrolling");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.434 inst/cycle 0.000 5
Executed Ipc Elapsed 0.420 inst/cycle 0.000 5
Issue Slots Busy 10.872 % 0.001 5
Issued Ipc Active 0.434 inst/cycle 0.000 5
SM Busy 12.276 % 0.001 5
Memory Throughput 2963473568207.854 byte/second 20562456394527162368.000 5
Mem Busy 48.696 % 0.005 5
Max Bandwidth 88.412 % 0.018 5
L1/TEX Hit Rate 0.294 % 0.000 5
L2 Hit Rate 34.202 % 0.000 5
Mem Pipes Busy 11.424 % 0.000 5
Warp Cycles Per Issued Instruction 137.164 cycle 0.265 5
Warp Cycles Per Executed Instruction 137.202 cycle 0.265 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 31.460 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 93.516 % 0.010 5
Achieved Active Warps Per SM 59.850 warp 0.004 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
INF Occupancy This kernel's theoretical occupancy is not impacted by any block limit.
Operation / Metric Value Unit
aten::to
CPU Time 496339.61 μs
Device Time 27671.96 μs
Self CPU Time 47.85 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 496291.76 μs
Device Time 27671.96 μs
Self CPU Time 100.82 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 1884495.46 μs
Device Time 16421.72 μs
Self CPU Time 1884495.46 μs
Self Device Time 16421.72 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void rms_norm_kernel<float>(float const*, float*, int, int, int, float)
CPU Time 0.00 μs
Device Time 1634284.91 μs
Self CPU Time 0.00 μs
Self Device Time 1634284.91 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 16939.03 μs
Device Time 31677.07 μs
Self CPU Time 16939.03 μs
Self Device Time 31677.07 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 1531083.53 μs
Device Time 471180.95 μs
Self CPU Time 12127.30 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 1518957.23 μs
Device Time 471180.95 μs
Self CPU Time 14594.23 μs
Self Device Time 471180.95 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 471180.95 μs
Self CPU Time 0.00 μs
Self Device Time 471180.95 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45287 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:10:5 bugprone-easily-swappable-parameters
10 | const int offset_in_batch,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
11 | const int num_features
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:10:15: note: the first parameter in the range is 'offset_in_batch'
10 | const int offset_in_batch,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:11:15: note: the last parameter in the range is 'num_features'
11 | const int num_features
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:28:5: warning: 2 adjacent parameters of 'normalize_features' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
28 | const int offset_in_batch,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
29 | const int num_features,
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:28:15: note: the first parameter in the range is 'offset_in_batch'
28 | const int offset_in_batch,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:29:15: note: the last parameter in the range is 'num_features'
29 | const int num_features,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:43:5: warning: 2 adjacent parameters of 'rms_norm_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
43 | const int batch_size,
| ^~~~~~~~~~~~~~~~~~~~~
44 | const int num_features,
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:43:15: note: the first parameter in the range is 'batch_size'
43 | const int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:44:15: note: the last parameter in the range is 'num_features'
44 | const int num_features,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:45:5: warning: 2 adjacent parameters of 'rms_norm_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
45 | const int numel_per_batch,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
46 | const float eps
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:45:15: note: the first parameter in the range is 'numel_per_batch'
45 | const int numel_per_batch,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:46:17: note: the last parameter in the range is 'eps'
46 | const float eps
| ^~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:46:5: note: 'const int' and 'const float' may be implicitly converted: 'const int' (as 'int') -> 'const float' (as 'float'), 'const float' (as 'float') -> 'const int' (as 'int')
46 | const float eps
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:48:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
48 | const int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:72:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
72 | const int batch_size = input.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:73:30: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
73 | const int num_features = input.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:77:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
77 | numel_per_batch *= input.size(i);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_36/b4_s3_unrolled_rms_norm/base/base.cu:84:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
84 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "rms_norm_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:246:19: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES_AND_HALF'
246 | TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:240:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES_AND_HALF'
240 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^