← Back to Leaderboard

The AI CUDA Engineer 👷

28_HardSigmoidevenly_distributed_hardsigmoid_base

Level 1 • Task 28
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies HardSigmoid activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.

    Returns:
        torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
    """
    return F.hardsigmoid(x)


class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return torch.nn.functional.hardsigmoid(x)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 28 • 28_HardSigmoid)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 hardsigmoid_warp_vectorized_base 0.01 1.12 4.96
🥇 hardsigmoid_shared_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_base 0.01 1.12 4.96
🥇 evenly_distributed_hardsigmoid_base 0.01 1.12 4.96
6 divergence_reduced_hardsigmoid_base_base 0.01 0.96 4.25
6 constant_mem_hardsigmoid_base 0.01 0.96 4.25
6 warp_hardsigmoid_opt_base 0.01 0.96 4.25
6 28_HardSigmoid 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 branchless_hardsigmoid_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 optimized_hardsigmoid_base 0.01 0.96 4.25
6 warp_broadcast_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 shared_memory_hardsigmoid_base_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 even_chunk_hardsigmoid_base 0.01 0.96 4.25
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <type_traits>

// This kernel evenly distributes workloads by precomputing the number of complete vectorized chunks
// and then assigning them uniformly among threads. It processes remaining tail elements separately.

template <typename scalar_t, int VEC_SIZE>
__global__ void hardsigmoid_kernel(const scalar_t* __restrict__ input,
                                   scalar_t* __restrict__ output,
                                   size_t numel) {
    // Each chunk holds VEC_SIZE elements
    size_t num_chunks = numel / VEC_SIZE;
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    int total_threads = blockDim.x * gridDim.x;

    constexpr scalar_t three = static_cast<scalar_t>(3.0);
    constexpr scalar_t sixth = static_cast<scalar_t>(1.0/6.0);

    // Select vector type based on precision: for float use float4, for double use double2
    using vec_t = typename std::conditional<
        std::is_same<scalar_t, float>::value,
        float4,
        double2
    >::type;

    // Evenly process full vectorized chunks
    for (size_t idx = tid; idx < num_chunks; idx += total_threads) {
        size_t base = idx * VEC_SIZE;
        vec_t chunk = *reinterpret_cast<const vec_t*>(&input[base]);
        scalar_t elems[VEC_SIZE];
        *reinterpret_cast<vec_t*>(elems) = chunk;

        #pragma unroll
        for (int i = 0; i < VEC_SIZE; i++) {
            scalar_t x = elems[i];
            x = (x + three) * sixth;  // computes (x + 3) / 6
            x = (x < static_cast<scalar_t>(0)) ? static_cast<scalar_t>(0) :
                (x > static_cast<scalar_t>(1) ? static_cast<scalar_t>(1) : x);
            elems[i] = x;
        }

        *reinterpret_cast<vec_t*>(&output[base]) = *reinterpret_cast<vec_t*>(elems);
    }

    // Process any remaining tail elements
    size_t tail_start = num_chunks * VEC_SIZE;
    for (size_t i = tail_start + tid; i < numel; i += total_threads) {
        scalar_t x = input[i];
        x = (x + three) * sixth;
        x = (x < static_cast<scalar_t>(0)) ? static_cast<scalar_t>(0) :
            (x > static_cast<scalar_t>(1) ? static_cast<scalar_t>(1) : x);
        output[i] = x;
    }
}

torch::Tensor forward(torch::Tensor input) {
    TORCH_CHECK(input.is_cuda(), "Input tensor must be on CUDA");
    auto output = torch::empty_like(input);
    size_t numel = input.numel();

    const int threads = 256;

    AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "evenly_distributed_hardsigmoid_cuda", ([&] {
        // Choose vector size depending on type: 4 for float (float4), 2 for double (double2)
        constexpr int vec_size = std::is_same<scalar_t, float>::value ? 4 : 2;
        size_t num_chunks = numel / vec_size;
        int blocks = (num_chunks + threads - 1) / threads;
        if (blocks == 0) {
            blocks = 1;
        }
        hardsigmoid_kernel<scalar_t, vec_size><<<blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            numel);
    }));

    cudaError_t err = cudaGetLastError();
    TORCH_CHECK(err == cudaSuccess, "CUDA kernel failed: ", cudaGetErrorString(err));
    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &forward, "Evenly Distributed HardSigmoid activation forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.458 inst/cycle 0.000 5
Executed Ipc Elapsed 0.180 inst/cycle 0.000 5
Issue Slots Busy 12.988 % 0.304 5
Issued Ipc Active 0.520 inst/cycle 0.000 5
SM Busy 12.988 % 0.304 5
Memory Throughput 283972097352.134 byte/second 5846095965073282048.000 5
Mem Busy 13.498 % 0.015 5
Max Bandwidth 12.422 % 0.010 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 67.364 % 0.038 5
Mem Pipes Busy 2.134 % 0.000 5
Warp Cycles Per Issued Instruction 25.922 cycle 0.083 5
Warp Cycles Per Executed Instruction 29.390 cycle 0.107 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 31.040 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 10.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 21.476 % 0.014 5
Achieved Active Warps Per SM 13.744 warp 0.005 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (21.6%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 281519.12 μs
Device Time 40.10 μs
Self CPU Time 38.16 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 281480.96 μs
Device Time 40.10 μs
Self CPU Time 83.72 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 300901.83 μs
Device Time 0.00 μs
Self CPU Time 19834.96 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 280891.64 μs
Device Time 0.00 μs
Self CPU Time 280891.64 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 496508.91 μs
Device Time 634.72 μs
Self CPU Time 496508.91 μs
Self Device Time 634.72 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void hardsigmoid_kernel<float, 4>(float const*, float*, unsigned long)
CPU Time 0.00 μs
Device Time 31531.64 μs
Self CPU Time 0.00 μs
Self Device Time 31531.64 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 17981.85 μs
Device Time 41697.74 μs
Self CPU Time 17981.85 μs
Self Device Time 41697.74 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 65510.02 μs
Device Time 638412.87 μs
Self CPU Time 11981.29 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 53529.55 μs
Device Time 638412.87 μs
Self CPU Time 15958.51 μs
Self Device Time 638412.87 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 638491.78 μs
Self CPU Time 0.00 μs
Self Device Time 638491.78 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45279 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_28/b3_s2_evenly_distributed_hardsigmoid/base/base.cu:15:15 bugprone-narrowing-conversions
15 | int tid = threadIdx.x + blockIdx.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_28/b3_s2_evenly_distributed_hardsigmoid/base/base.cu:16:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
16 | int total_threads = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_28/b3_s2_evenly_distributed_hardsigmoid/base/base.cu:65:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
65 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "evenly_distributed_hardsigmoid_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^