← Back to Leaderboard

The AI CUDA Engineer 👷

27_SELU_selu_even_load_balance_base

Level 1 • Task 27
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies SELU activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.

    Returns:
        torch.Tensor: Output tensor with SELU applied, same shape as input.
    """
    return F.selu(x)


class Model(nn.Module):
    """
    Simple model that performs a SELU activation.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a SELU activation.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies SELU activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with SELU applied, same shape as input.
        """
        return torch.selu(x)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 27 • 27_SELU_)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 selu_vectorized_base_base 0.01 1.10 4.96
🥇 selu_shared_opt_base 0.01 1.10 4.96
🥇 27_selu_aligned_ldg_base 0.01 1.10 4.96
🥇 27_selu_aligned_ldg_edit_1 0.01 1.10 4.96
5 27_selu_unroll_optimized_base 0.01 0.94 4.25
5 27_SELU_ 0.01 0.94 4.25
5 selu_kernel_combined_optimized_base 0.01 0.94 4.25
5 selu_atomic_optimized_base 0.01 0.94 4.25
5 27_selu_manual_unroll_base 0.01 0.94 4.25
5 modular_selu_optimized_base 0.01 0.94 4.25
5 selu_2d_indexing_base 0.01 0.94 4.25
5 selu_shared_mem_optimized_base 0.01 0.94 4.25
5 selu_kernel_combined_base 0.01 0.94 4.25
5 evenly_distributed_selu_base 0.01 0.94 4.25
5 selu_memory_coalesced_base_base 0.01 0.94 4.25
5 evenly_partitioned_selu_base 0.01 0.94 4.25
5 selu_even_load_balance_base 0.01 0.94 4.25
5 selu_atomic_minimal_base 0.01 0.94 4.25
5 selu_coalesced_access_base_base 0.01 0.94 4.25
5 27_selu_vectorized_base 0.01 0.94 4.25
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

// Device helper: define an inline exponential function for float and double.
template <typename scalar_t>
__device__ inline scalar_t my_exp(scalar_t x);

template <>
__device__ inline float my_exp<float>(float x) {
    return expf(x);
}

template <>
__device__ inline double my_exp<double>(double x) {
    return exp(x);
}

// CUDA kernel that evenly distributes the workload among threads.
// Each thread computes a contiguous segment of the data based on its global thread ID.
// This ensures balanced workload and minimizes underutilization or bottlenecks.

template <typename scalar_t>
__global__ void selu_kernel_even_load_balance(const scalar_t* __restrict__ input,
                                               scalar_t* __restrict__ output,
                                               size_t numel) {
    // Compute a unique thread ID
    const int tid = blockIdx.x * blockDim.x + threadIdx.x;
    const int total_threads = gridDim.x * blockDim.x;
    
    // Calculate the number of elements per thread and the residue
    size_t base = numel / total_threads;
    size_t residue = numel % total_threads;
    
    // Each thread processes base elements, plus one extra if its ID is less than the residue
    size_t start = tid * base + (tid < residue ? tid : residue);
    size_t count = base + (tid < residue ? 1 : 0);
    size_t end = start + count;

    for (size_t i = start; i < end; i++) {
        // Load input using __ldg for potential caching benefits
        scalar_t x = __ldg(&input[i]);
        scalar_t res = (x > static_cast<scalar_t>(0))
                           ? x
                           : static_cast<scalar_t>(1.67326324235437728481) *
                                 (my_exp(x) - static_cast<scalar_t>(1));
        output[i] = static_cast<scalar_t>(1.05070098735548049342) * res;
    }
}

// Host function launching the kernel

torch::Tensor selu_forward(torch::Tensor input) {
    TORCH_CHECK(input.is_cuda(), "Input tensor must be a CUDA tensor");

    auto output = torch::empty_like(input);
    size_t numel = input.numel();

    // Launch configuration: using 1024 threads per block.
    const int threads = 1024;
    int blocks = (numel + threads - 1) / threads;  // Ensures enough threads are launched

    AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "selu_forward_even_load_balance_cuda", ([&] {
        const scalar_t* input_ptr = input.data_ptr<scalar_t>();
        scalar_t* output_ptr = output.data_ptr<scalar_t>();
        selu_kernel_even_load_balance<scalar_t><<<blocks, threads>>>(input_ptr, output_ptr, numel);
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &selu_forward, "SELU Activation Forward with Even Load Balancing (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.660 inst/cycle 0.000 5
Executed Ipc Elapsed 0.778 inst/cycle 0.000 5
Issue Slots Busy 43.848 % 0.059 5
Issued Ipc Active 1.756 inst/cycle 0.000 5
SM Busy 43.848 % 0.059 5
Memory Throughput 236459105348.264 byte/second 17333745306692784128.000 5
Mem Busy 11.180 % 0.044 5
Max Bandwidth 10.430 % 0.037 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 67.524 % 0.128 5
Mem Pipes Busy 6.628 % 0.017 5
Warp Cycles Per Issued Instruction 30.316 cycle 0.616 5
Warp Cycles Per Executed Instruction 31.986 cycle 0.682 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.460 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 2.000 block 0.000 5
Block Limit Shared Mem 8.000 block 0.000 5
Block Limit Warps 2.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 82.626 % 0.135 5
Achieved Active Warps Per SM 52.878 warp 0.055 5
Analysis Rules
Rule Description
INF HighPipeUtilization ALU is the highest-utilized pipeline (36.9%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (82.4%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 342781.39 μs
Device Time 39.90 μs
Self CPU Time 30.74 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 342750.65 μs
Device Time 39.90 μs
Self CPU Time 79.71 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 357416.67 μs
Device Time 0.00 μs
Self CPU Time 15053.69 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 342171.66 μs
Device Time 0.00 μs
Self CPU Time 342171.66 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 426440.94 μs
Device Time 18846.26 μs
Self CPU Time 426440.94 μs
Self Device Time 18846.26 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void selu_kernel_even_load_balance<float>(float const*, float*, unsigned long)
CPU Time 0.00 μs
Device Time 24911.91 μs
Self CPU Time 0.00 μs
Self Device Time 24911.91 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 15041.88 μs
Device Time 36367.53 μs
Self CPU Time 15041.88 μs
Self Device Time 36367.53 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 55908.52 μs
Device Time 539443.27 μs
Self CPU Time 10225.34 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 45684.22 μs
Device Time 539443.27 μs
Self CPU Time 13412.20 μs
Self Device Time 539443.27 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 539443.27 μs
Self CPU Time 0.00 μs
Self Device Time 539443.27 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45280 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b5_s2_selu_even_load_balance/base/base.cu:29:21 bugprone-narrowing-conversions
29 | const int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b5_s2_selu_even_load_balance/base/base.cu:30:31: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
30 | const int total_threads = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b5_s2_selu_even_load_balance/base/base.cu:62:18: warning: narrowing conversion from 'size_t' (aka 'unsigned long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
62 | int blocks = (numel + threads - 1) / threads; // Ensures enough threads are launched
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b5_s2_selu_even_load_balance/base/base.cu:64:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
64 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "selu_forward_even_load_balance_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^