← Back to Leaderboard

The AI CUDA Engineer 👷

27_SELU_selu_shared_mem_optimized_base

Level 1 • Task 27
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies SELU activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.

    Returns:
        torch.Tensor: Output tensor with SELU applied, same shape as input.
    """
    return F.selu(x)


class Model(nn.Module):
    """
    Simple model that performs a SELU activation.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a SELU activation.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies SELU activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with SELU applied, same shape as input.
        """
        return torch.selu(x)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 27 • 27_SELU_)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 selu_vectorized_base_base 0.01 1.10 4.96
🥇 selu_shared_opt_base 0.01 1.10 4.96
🥇 27_selu_aligned_ldg_base 0.01 1.10 4.96
🥇 27_selu_aligned_ldg_edit_1 0.01 1.10 4.96
5 27_selu_unroll_optimized_base 0.01 0.94 4.25
5 27_SELU_ 0.01 0.94 4.25
5 selu_kernel_combined_optimized_base 0.01 0.94 4.25
5 selu_atomic_optimized_base 0.01 0.94 4.25
5 27_selu_manual_unroll_base 0.01 0.94 4.25
5 modular_selu_optimized_base 0.01 0.94 4.25
5 selu_2d_indexing_base 0.01 0.94 4.25
5 selu_shared_mem_optimized_base 0.01 0.94 4.25
5 selu_kernel_combined_base 0.01 0.94 4.25
5 evenly_distributed_selu_base 0.01 0.94 4.25
5 selu_memory_coalesced_base_base 0.01 0.94 4.25
5 evenly_partitioned_selu_base 0.01 0.94 4.25
5 selu_even_load_balance_base 0.01 0.94 4.25
5 selu_atomic_minimal_base 0.01 0.94 4.25
5 selu_coalesced_access_base_base 0.01 0.94 4.25
5 27_selu_vectorized_base 0.01 0.94 4.25
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

template <typename scalar_t>
__device__ __forceinline__ scalar_t my_exp(scalar_t x);

template <>
__device__ __forceinline__ float my_exp<float>(float x) {
    return expf(x);
}

template <>
__device__ __forceinline__ double my_exp<double>(double x) {
    return exp(x);
}

template <typename scalar_t>
__global__ void selu_kernel_shared(const scalar_t* __restrict__ input,
                                 scalar_t* __restrict__ output,
                                 const size_t numel) {
    __shared__ scalar_t shared_data[256];
    
    const int tid = threadIdx.x;
    const int gid = blockIdx.x * blockDim.x + threadIdx.x;
    const int stride = blockDim.x * gridDim.x;
    
    const scalar_t alpha = 1.67326324235437728481;
    const scalar_t lambda = 1.05070098735548049342;
    
    for (int idx = gid; idx < numel; idx += stride) {
        // Load data into shared memory
        shared_data[tid] = __ldg(&input[idx]);
        __syncthreads();
        
        // Process data from shared memory
        const scalar_t x = shared_data[tid];
        const scalar_t result = (x > static_cast<scalar_t>(0))
            ? x
            : alpha * (my_exp(x) - static_cast<scalar_t>(1));
        output[idx] = lambda * result;
        
        __syncthreads();
    }
}

torch::Tensor selu_forward(torch::Tensor input) {
    TORCH_CHECK(input.is_cuda(), "Input tensor must be a CUDA tensor");
    
    auto output = torch::empty_like(input);
    const size_t numel = input.numel();
    
    const int threads = 256;
    const int blocks = std::min(65535, (int)((numel + threads - 1) / threads));
    
    AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "selu_forward_cuda", ([&] {
        const scalar_t* input_ptr = input.data_ptr<scalar_t>();
        scalar_t* output_ptr = output.data_ptr<scalar_t>();
        selu_kernel_shared<scalar_t><<<blocks, threads>>>(input_ptr, output_ptr, numel);
    }));
    
    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &selu_forward, "SELU Activation Forward with Shared Memory (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.212 inst/cycle 0.000 5
Executed Ipc Elapsed 0.498 inst/cycle 0.000 5
Issue Slots Busy 32.636 % 0.007 5
Issued Ipc Active 1.304 inst/cycle 0.000 5
SM Busy 32.636 % 0.007 5
Memory Throughput 277601673709.032 byte/second 5716683369323156480.000 5
Mem Busy 13.150 % 0.015 5
Max Bandwidth 12.212 % 0.012 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 66.016 % 0.150 5
Mem Pipes Busy 11.490 % 0.011 5
Warp Cycles Per Issued Instruction 39.326 cycle 0.153 5
Warp Cycles Per Executed Instruction 42.374 cycle 0.178 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.330 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 16.000 block 0.000 5
Block Limit Shared Mem 16.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 80.604 % 0.068 5
Achieved Active Warps Per SM 51.588 warp 0.029 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (80.3%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 342396.22 μs
Device Time 39.97 μs
Self CPU Time 33.57 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 342362.65 μs
Device Time 39.97 μs
Self CPU Time 70.77 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 357213.80 μs
Device Time 0.00 μs
Self CPU Time 15253.31 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 341781.29 μs
Device Time 0.00 μs
Self CPU Time 341781.29 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 412287.67 μs
Device Time 19112.85 μs
Self CPU Time 412287.67 μs
Self Device Time 19112.85 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void selu_kernel_shared<float>(float const*, float*, unsigned long)
CPU Time 0.00 μs
Device Time 27491.45 μs
Self CPU Time 0.00 μs
Self Device Time 27491.45 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 19622.33 μs
Device Time 36719.20 μs
Self CPU Time 19622.33 μs
Self Device Time 36719.20 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 57851.92 μs
Device Time 545273.72 μs
Self CPU Time 10413.30 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 47440.21 μs
Device Time 545273.72 μs
Self CPU Time 13258.03 μs
Self Device Time 545273.72 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 545273.72 μs
Self CPU Time 0.00 μs
Self Device Time 545273.72 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45282 warnings generated when compiling for host.
Suppressed 45323 warnings (45276 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b3_s2_selu_shared_mem_optimized/base/base.cu:25:21 bugprone-narrowing-conversions
25 | const int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b3_s2_selu_shared_mem_optimized/base/base.cu:26:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
26 | const int gid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b3_s2_selu_shared_mem_optimized/base/base.cu:27:24: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
27 | const int stride = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_27/b3_s2_selu_shared_mem_optimized/base/base.cu:57:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
57 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "selu_forward_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^