← Back to Leaderboard

The AI CUDA Engineer 👷

29_Softplussoftplus_unrolled_base

Level 1 • Task 29
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies Softplus activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.

    Returns:
        torch.Tensor: Output tensor with Softplus applied, same shape as input.
    """
    return F.softplus(x)


class Model(nn.Module):
    """
    Simple model that performs a Softplus activation.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a Softplus activation.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies Softplus activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with Softplus applied, same shape as input.
        """
        return torch.nn.functional.softplus(x)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 29 • 29_Softplus)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 softplus_modular_base_base 0.01 1.16 4.88
🥇 warp_and_alignment_optimized_softplus_edit_1 0.01 1.16 4.88
🥇 branchless_softplus_edit_1 0.01 1.16 4.88
🥇 warp_optimized_softplus_base 0.01 1.16 4.88
5 softplus_unrolled_base_base 0.01 0.99 4.18
5 softplus_coalesced_base 0.01 0.99 4.18
5 softplus_2d_block_thread_base 0.01 0.99 4.18
5 optimized_softplus_cuda_base 0.01 0.99 4.18
5 softplus_coalesced_memory_access_base 0.01 0.99 4.18
5 softplus_tuned_indexing_base_base 0.01 0.99 4.18
5 softplus_blockstride_base 0.01 0.99 4.18
5 softplus_loop_unroll_base_base 0.01 0.99 4.18
5 softplus_branchless_base 0.01 0.99 4.18
5 softplus_blocksize_experiment_base 0.01 0.99 4.18
5 softplus_unrolled_base 0.01 0.99 4.18
5 softplus_constant_memory_base_base 0.01 0.99 4.18
5 optimized_softplus_cuda_base 0.01 0.99 4.18
5 29_Softplus 0.01 0.99 4.18
5 softplus_constant_memory_base 0.01 0.99 4.18
5 optimized_softplus_base 0.01 0.99 4.18
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

// Device function to compute the Softplus activation
template <typename scalar_t>
__device__ __forceinline__ scalar_t compute_softplus(const scalar_t x) {
    if (x > static_cast<scalar_t>(20.0)) {
        return x;
    } else if (x < static_cast<scalar_t>(-20.0)) {
        return exp(x);
    }
    return log1p(exp(x));
}

// CUDA kernel with manual loop unrolling (factor of 4) using #pragma unroll
template <typename scalar_t>
__global__ void softplus_kernel_unrolled(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const int size) {

    const int tid = blockIdx.x * blockDim.x + threadIdx.x;
    const int stride = blockDim.x * gridDim.x;
    
    int i = tid;
    // Unrolled loop: process 4 elements per iteration
    for (; i + 3 * stride < size; i += 4 * stride) {
        #pragma unroll
        {
            scalar_t in0 = input[i];
            scalar_t in1 = input[i + stride];
            scalar_t in2 = input[i + 2 * stride];
            scalar_t in3 = input[i + 3 * stride];
            
            output[i]             = compute_softplus(in0);
            output[i + stride]      = compute_softplus(in1);
            output[i + 2 * stride]  = compute_softplus(in2);
            output[i + 3 * stride]  = compute_softplus(in3);
        }
    }
    // Process any remaining elements
    for (; i < size; i += stride) {
        output[i] = compute_softplus(input[i]);
    }
}

// CUDA forward function
torch::Tensor softplus_cuda_forward(torch::Tensor input) {
    auto output = torch::empty_like(input);
    const int size = input.numel();
    const int threads = 256;
    const int blocks = (size + threads - 1) / threads;

    AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "softplus_forward_cuda", ([&] {
        softplus_kernel_unrolled<scalar_t><<<blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            size);
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &softplus_cuda_forward, "Softplus forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.906 inst/cycle 0.000 5
Executed Ipc Elapsed 0.884 inst/cycle 0.000 5
Issue Slots Busy 51.406 % 0.106 5
Issued Ipc Active 2.056 inst/cycle 0.000 5
SM Busy 51.406 % 0.106 5
Memory Throughput 254029531171.830 byte/second 5144178277050267648.000 5
Mem Busy 12.004 % 0.011 5
Max Bandwidth 11.206 % 0.008 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 66.926 % 0.215 5
Mem Pipes Busy 12.736 % 0.014 5
Warp Cycles Per Issued Instruction 25.726 cycle 0.988 5
Warp Cycles Per Executed Instruction 27.732 cycle 1.150 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 28.900 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 81.318 % 0.001 5
Achieved Active Warps Per SM 52.044 warp 0.001 5
Analysis Rules
Rule Description
INF HighPipeUtilization ALU is the highest-utilized pipeline (31.0%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (81.3%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
Operation / Metric Value Unit
aten::to
CPU Time 623195.06 μs
Device Time 40.13 μs
Self CPU Time 34.79 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 623160.27 μs
Device Time 40.13 μs
Self CPU Time 71.69 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 640940.92 μs
Device Time 0.00 μs
Self CPU Time 18160.38 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 622582.56 μs
Device Time 0.00 μs
Self CPU Time 622582.56 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 451320.54 μs
Device Time 20437.66 μs
Self CPU Time 451320.54 μs
Self Device Time 20437.66 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void softplus_kernel_unrolled<float>(float const*, float*, int)
CPU Time 0.00 μs
Device Time 25884.42 μs
Self CPU Time 0.00 μs
Self Device Time 25884.42 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 20620.14 μs
Device Time 39394.26 μs
Self CPU Time 20620.14 μs
Self Device Time 39394.26 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 60983.62 μs
Device Time 584510.29 μs
Self CPU Time 11080.42 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 49904.08 μs
Device Time 584510.29 μs
Self CPU Time 14157.99 μs
Self Device Time 584510.29 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 584588.98 μs
Self CPU Time 0.00 μs
Self Device Time 584588.98 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Failed
45245 warnings and 1 error generated when compiling for host.
Error while processing /home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_29/b7_s3_softplus_unrolled/base/base.cu.
Suppressed 45286 warnings (45239 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
Found compiler error(s).
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_29/b7_s3_softplus_unrolled/base/base.cu:23:21 bugprone-narrowing-conversions
23 | const int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_29/b7_s3_softplus_unrolled/base/base.cu:24:24: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
24 | const int stride = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_29/b7_s3_softplus_unrolled/base/base.cu:30:9: error: expected a for, while, or do-while loop to follow '#pragma unroll' [clang-diagnostic-error]
30 | {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_29/b7_s3_softplus_unrolled/base/base.cu:51:22: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
51 | const int size = input.numel();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_29/b7_s3_softplus_unrolled/base/base.cu:55:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
55 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "softplus_forward_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^