← Back to Leaderboard

The AI CUDA Engineer 👷

39_L2Norm_l2norm_even_workload_base

Level 1 • Task 39
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies L2 normalization to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of shape (*, dim, *).

    Returns:
        torch.Tensor: Output tensor with L2 normalization applied, same shape as input.
    """
    return F.normalize(x, p=2, dim=1)


class Model(nn.Module):
    """
    Simple model that performs L2 normalization.
    """

    def __init__(self):
        """
        Initializes the L2Norm layer.
        """
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Applies L2 normalization to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of shape (*, dim, *).

        Returns:
            torch.Tensor: Output tensor with L2 normalization applied, same shape as input.
        """
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs L2 normalization.
    """
    def __init__(self):
        """
        Initializes the L2Norm layer.

        Args:
            dim (int): Dimension along which to normalize.
        """
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies L2 normalization to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of shape (*, dim, *).

        Returns:
            torch.Tensor: Output tensor with L2 normalization applied, same shape as input.
        """
        return x / torch.norm(x, p=2, dim=1, keepdim=True)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 39 • 39_L2Norm_)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 l2norm_strided_optimized_base_base 0.01 1.89 5.60
🥈 l2_norm_combined_base 0.01 1.55 4.58
🥉 l2_norm_unroll_optimized_base 0.01 1.42 4.20
🥉 39_l2norm_modular_edit_1 0.01 1.42 4.20
🥉 39_l2norm_blocksize_experiment_base 0.01 1.42 4.20
6 39_l2norm_atomic_opt_base 0.01 1.31 3.87
6 l2norm_block_tuned_base 0.01 1.31 3.87
6 l2_norm_block_size_tuning_base 0.01 1.31 3.87
6 l2_norm_atomic_minimized_base_base 0.01 1.31 3.87
6 l2norm_stride_optimized_base 0.01 1.31 3.87
6 39_l2norm_coalesced_base 0.01 1.31 3.87
6 39_l2norm_memory_coalescing_base 0.01 1.31 3.87
6 39_l2norm_modular_refactored_edit_1 0.01 1.31 3.87
6 39_l2norm_optimized_indexing_edit_1 0.01 1.31 3.87
6 39_l2norm_sync_optimized_edit_1 0.01 1.31 3.87
6 39_l2norm_blocksize_experiment_edit_1 0.01 1.31 3.87
6 39_l2norm_memory_coalescing_edit_1 0.01 1.31 3.87
6 39_l2norm_modular_refactored_base 0.01 1.31 3.87
6 l2norm_even_workload_base 0.01 1.31 3.87
6 39_l2norm_atomic_edit_1 0.01 1.31 3.87
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

// Define the segment size so that each block processes a contiguous, evenly distributed chunk of the vector
#define SEGMENT_SIZE 512

// Stage 1: Each block computes the sum-of-squares for its segment and atomically accumulates into the per-vector partial sum
template <typename scalar_t>
__global__ void l2norm_stage1_kernel(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ partial_sums,
    const int C,
    const int total_vectors,
    const int stride_C,
    const int outer_stride,
    const int blocks_per_vector) {

    // Determine which vector and which segment this block is processing
    int vector_idx = blockIdx.x / blocks_per_vector;
    int block_segment = blockIdx.x % blocks_per_vector;
    if (vector_idx >= total_vectors) return;

    int base_offset = vector_idx * outer_stride;
    int seg_start = block_segment * SEGMENT_SIZE;
    int seg_end = seg_start + SEGMENT_SIZE;
    if (seg_end > C) seg_end = C;

    scalar_t sum = 0;
    // Each thread processes a subset of the segment via a stride-loop
    for (int i = seg_start + threadIdx.x; i < seg_end; i += blockDim.x) {
        scalar_t val = input[base_offset + i * stride_C];
        sum += val * val;
    }

    // Perform warp-level reduction using shuffle operations
    for (int offset = warpSize/2; offset > 0; offset /= 2) {
        sum += __shfl_down_sync(0xffffffff, sum, offset);
    }

    // Use shared memory to reduce across warps within the block
    __shared__ scalar_t sdata[32];  // enough for up to 32 warps
    int lane = threadIdx.x % warpSize;
    int warp_id = threadIdx.x / warpSize;
    if (lane == 0) {
        sdata[warp_id] = sum;
    }
    __syncthreads();

    // First warp reduces the values stored in shared memory
    if (warp_id == 0) {
        sum = (threadIdx.x < (blockDim.x + warpSize - 1) / warpSize) ? sdata[lane] : 0;
        for (int offset = warpSize/2; offset > 0; offset /= 2) {
            sum += __shfl_down_sync(0xffffffff, sum, offset);
        }
        if (threadIdx.x == 0) {
            // Atomically add the block's result to the global partial sum for this vector
            atomicAdd(&partial_sums[vector_idx], sum);
        }
    }
}

// Stage 2: Each block normalizes the corresponding segment using the computed norm
template <typename scalar_t>
__global__ void l2norm_stage2_kernel(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const scalar_t* __restrict__ partial_sums,
    const int C,
    const int total_vectors,
    const int stride_C,
    const int outer_stride,
    const int blocks_per_vector) {

    int vector_idx = blockIdx.x / blocks_per_vector;
    int block_segment = blockIdx.x % blocks_per_vector;
    if (vector_idx >= total_vectors) return;

    int base_offset = vector_idx * outer_stride;
    int seg_start = block_segment * SEGMENT_SIZE;
    int seg_end = seg_start + SEGMENT_SIZE;
    if (seg_end > C) seg_end = C;

    // Compute normalization factor
    scalar_t norm = partial_sums[vector_idx];
    scalar_t inv_norm = 1.0 / (sqrt(norm) + 1e-12);

    // Each thread normalizes its assigned elements using a stride loop
    for (int i = seg_start + threadIdx.x; i < seg_end; i += blockDim.x) {
        output[base_offset + i * stride_C] = input[base_offset + i * stride_C] * inv_norm;
    }
}

// The forward function interfacing with PyTorch
torch::Tensor forward(torch::Tensor input) {
    TORCH_CHECK(input.is_cuda(), "Input must be a CUDA tensor");
    TORCH_CHECK(input.dim() >= 1, "Input must have at least 1 dimension");

    const int C = input.size(1);
    const int total_vectors = input.numel() / C;
    const int stride_C = input.stride(1);
    const int outer_stride = input.stride(0);

    auto output = torch::empty_like(input);
    auto partial_sums = torch::zeros({total_vectors}, input.options());

    // Calculate the number of blocks to evenly distribute each vector's workload
    int blocks_per_vector = (C + SEGMENT_SIZE - 1) / SEGMENT_SIZE;
    int total_blocks = total_vectors * blocks_per_vector;
    const int threads = 256;

    AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "l2norm_even_workload", ([&] {
        l2norm_stage1_kernel<scalar_t><<<total_blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            partial_sums.data_ptr<scalar_t>(),
            C,
            total_vectors,
            stride_C,
            outer_stride,
            blocks_per_vector
        );

        l2norm_stage2_kernel<scalar_t><<<total_blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            partial_sums.data_ptr<scalar_t>(),
            C,
            total_vectors,
            stride_C,
            outer_stride,
            blocks_per_vector
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "L2 normalization along dim=1 with even workload distribution");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.744 inst/cycle 0.000 5
Executed Ipc Elapsed 0.364 inst/cycle 0.000 5
Issue Slots Busy 19.020 % 0.159 5
Issued Ipc Active 0.762 inst/cycle 0.000 5
SM Busy 19.020 % 0.159 5
Memory Throughput 234077498393.660 byte/second 14552936527429742592.000 5
Mem Busy 11.478 % 0.056 5
Max Bandwidth 10.364 % 0.036 5
L1/TEX Hit Rate 5.500 % 0.000 5
L2 Hit Rate 66.142 % 0.010 5
Mem Pipes Busy 6.504 % 0.019 5
Warp Cycles Per Issued Instruction 37.466 cycle 3.008 5
Warp Cycles Per Executed Instruction 38.262 cycle 3.138 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.320 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 16.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 42.926 % 0.031 5
Achieved Active Warps Per SM 27.474 warp 0.013 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (42.7%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::empty_strided
CPU Time 541438.07 μs
Device Time 0.00 μs
Self CPU Time 221893.13 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zeros
CPU Time 4703236.07 μs
Device Time 216880.00 μs
Self CPU Time 114946.72 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 5044807.22 μs
Device Time 7043698.21 μs
Self CPU Time 273284.03 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 4771527.82 μs
Device Time 7043698.21 μs
Self CPU Time 389098.05 μs
Self Device Time 7043620.42 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 5046465.29 μs
Device Time 337432.44 μs
Self CPU Time 5046465.29 μs
Self Device Time 337432.44 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void l2norm_stage1_kernel<float>(float const*, float*, int, int, int, int, int)
CPU Time 0.00 μs
Device Time 399495.56 μs
Self CPU Time 0.00 μs
Self Device Time 399495.56 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 6827055.54 μs
Self CPU Time 0.00 μs
Self Device Time 6827055.54 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45293 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:14:5 bugprone-easily-swappable-parameters
14 | const int C,
| ^~~~~~~~~~~~
15 | const int total_vectors,
| ~~~~~~~~~~~~~~~~~~~~~~~~
16 | const int stride_C,
| ~~~~~~~~~~~~~~~~~~~
17 | const int outer_stride,
| ~~~~~~~~~~~~~~~~~~~~~~~
18 | const int blocks_per_vector) {
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:14:15: note: the first parameter in the range is 'C'
14 | const int C,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:18:15: note: the last parameter in the range is 'blocks_per_vector'
18 | const int blocks_per_vector) {
| ^~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:21:22: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
21 | int vector_idx = blockIdx.x / blocks_per_vector;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:22:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
22 | int block_segment = blockIdx.x % blocks_per_vector;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:32:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
32 | for (int i = seg_start + threadIdx.x; i < seg_end; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:32:61: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
32 | for (int i = seg_start + threadIdx.x; i < seg_end; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:44:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
44 | int lane = threadIdx.x % warpSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:45:19: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
45 | int warp_id = threadIdx.x / warpSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:70:5: warning: 5 adjacent parameters of 'l2norm_stage2_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
70 | const int C,
| ^~~~~~~~~~~~
71 | const int total_vectors,
| ~~~~~~~~~~~~~~~~~~~~~~~~
72 | const int stride_C,
| ~~~~~~~~~~~~~~~~~~~
73 | const int outer_stride,
| ~~~~~~~~~~~~~~~~~~~~~~~
74 | const int blocks_per_vector) {
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:70:15: note: the first parameter in the range is 'C'
70 | const int C,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:74:15: note: the last parameter in the range is 'blocks_per_vector'
74 | const int blocks_per_vector) {
| ^~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:76:22: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
76 | int vector_idx = blockIdx.x / blocks_per_vector;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:77:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
77 | int block_segment = blockIdx.x % blocks_per_vector;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:90:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
90 | for (int i = seg_start + threadIdx.x; i < seg_end; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:90:61: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
90 | for (int i = seg_start + threadIdx.x; i < seg_end; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:100:19: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
100 | const int C = input.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:101:31: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
101 | const int total_vectors = input.numel() / C;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:102:26: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
102 | const int stride_C = input.stride(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:103:30: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
103 | const int outer_stride = input.stride(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_39/b5_s3_l2norm_even_workload/base/base.cu:113:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
113 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "l2norm_even_workload", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^