← Back to Leaderboard

The AI CUDA Engineer 👷

28_HardSigmoidwarp_hardsigmoid_opt_base

Level 1 • Task 28
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies HardSigmoid activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.

    Returns:
        torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
    """
    return F.hardsigmoid(x)


class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return torch.nn.functional.hardsigmoid(x)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 28 • 28_HardSigmoid)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 hardsigmoid_warp_vectorized_base 0.01 1.12 4.96
🥇 hardsigmoid_shared_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_base 0.01 1.12 4.96
🥇 evenly_distributed_hardsigmoid_base 0.01 1.12 4.96
6 divergence_reduced_hardsigmoid_base_base 0.01 0.96 4.25
6 constant_mem_hardsigmoid_base 0.01 0.96 4.25
6 warp_hardsigmoid_opt_base 0.01 0.96 4.25
6 28_HardSigmoid 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 branchless_hardsigmoid_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 optimized_hardsigmoid_base 0.01 0.96 4.25
6 warp_broadcast_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 shared_memory_hardsigmoid_base_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 even_chunk_hardsigmoid_base 0.01 0.96 4.25
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

// Warp-level reduction for minimum for float
__device__ inline float warp_reduce_min(float val) {
  for (int offset = warpSize/2; offset > 0; offset /= 2) {
    val = fminf(val, __shfl_down_sync(0xffffffff, val, offset));
  }
  return val;
}

// Warp-level reduction for maximum for float
__device__ inline float warp_reduce_max(float val) {
  for (int offset = warpSize/2; offset > 0; offset /= 2) {
    val = fmaxf(val, __shfl_down_sync(0xffffffff, val, offset));
  }
  return val;
}

// Warp-level reduction for minimum for double
__device__ inline double warp_reduce_min(double val) {
  for (int offset = warpSize/2; offset > 0; offset /= 2) {
    val = fmin(val, __shfl_down_sync(0xffffffff, val, offset));
  }
  return val;
}

// Warp-level reduction for maximum for double
__device__ inline double warp_reduce_max(double val) {
  for (int offset = warpSize/2; offset > 0; offset /= 2) {
    val = fmax(val, __shfl_down_sync(0xffffffff, val, offset));
  }
  return val;
}

// CUDA kernel applying HardSigmoid activation: y = clamp((x + 3) / 6, 0, 1).
// It uses warp-level primitives to detect if an entire warp's inputs are saturated.
// If all values in a warp are >= 3, then y = 1; if all <= -3, then y = 0.
// This avoids redundant per-thread arithmetic when the condition holds uniformly in the warp.

template <typename scalar_t>
__global__ void warp_hardsigmoid_kernel(const scalar_t* __restrict__ input,
                                          scalar_t* __restrict__ output,
                                          size_t numel) {
  size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
  size_t stride = blockDim.x * gridDim.x;
  
  for (size_t i = idx; i < numel; i += stride) {
    scalar_t x = input[i];
    // Determine lane id within the warp
    int lane = threadIdx.x & (warpSize - 1);
    
    // Compute warp-level min and max of the input values within the warp
    scalar_t warp_min = warp_reduce_min(x);
    scalar_t warp_max = warp_reduce_max(x);
    
    // Use a sentinel value (-1) which is outside the valid [0,1] output range
    // to decide if the entire warp falls in a saturated region.
    scalar_t warp_result = static_cast<scalar_t>(-1);
    if (lane == 0) {
      if (warp_min >= static_cast<scalar_t>(3)) {
        warp_result = static_cast<scalar_t>(1);
      } else if (warp_max <= static_cast<scalar_t>(-3)) {
        warp_result = static_cast<scalar_t>(0);
      }
    }
    // Broadcast the warp decision to all lanes
    warp_result = __shfl_sync(0xffffffff, warp_result, 0);
    
    scalar_t result;
    if (warp_result != static_cast<scalar_t>(-1)) {
      result = warp_result; // Uniform saturation in the warp
    } else {
      // Compute HardSigmoid normally: y = clamp((x+3)/6, 0, 1)
      result = (x + static_cast<scalar_t>(3)) / static_cast<scalar_t>(6);
      result = (result < static_cast<scalar_t>(0)) ? static_cast<scalar_t>(0) :
               (result > static_cast<scalar_t>(1)) ? static_cast<scalar_t>(1) : result;
    }
    output[i] = result;
  }
}

// Host function that dispatches the kernel
torch::Tensor forward(torch::Tensor input) {
  TORCH_CHECK(input.is_cuda(), "Input tensor must be on CUDA");
  auto output = torch::empty_like(input);
  const size_t numel = input.numel();
  const int threads = 1024;
  const int blocks = (numel + threads - 1) / threads;

  AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "warp_hardsigmoid_cuda", ([&] {
    warp_hardsigmoid_kernel<scalar_t><<<blocks, threads>>>(
      input.data_ptr<scalar_t>(),
      output.data_ptr<scalar_t>(),
      numel);
  }));

  cudaError_t err = cudaGetLastError();
  TORCH_CHECK(err == cudaSuccess, "CUDA kernel failed: ", cudaGetErrorString(err));

  return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &forward, "HardSigmoid activation forward (CUDA) with warp-level optimization");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 2.300 inst/cycle 0.000 5
Executed Ipc Elapsed 1.196 inst/cycle 0.000 5
Issue Slots Busy 59.426 % 0.025 5
Issued Ipc Active 2.378 inst/cycle 0.000 5
SM Busy 59.426 % 0.025 5
Memory Throughput 226778208156.194 byte/second 1922962412906271488.000 5
Mem Busy 11.960 % 0.006 5
Max Bandwidth 11.960 % 0.006 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 67.192 % 0.025 5
Mem Pipes Busy 11.960 % 0.006 5
Warp Cycles Per Issued Instruction 21.862 cycle 0.002 5
Warp Cycles Per Executed Instruction 22.614 cycle 0.002 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 31.090 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 4.000 block 0.000 5
Block Limit Shared Mem 8.000 block 0.000 5
Block Limit Warps 2.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 81.862 % 0.006 5
Achieved Active Warps Per SM 52.390 warp 0.002 5
Analysis Rules
Rule Description
INF HighPipeUtilization ALU is the highest-utilized pipeline (50.8%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (81.9%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 216024.12 μs
Device Time 40.35 μs
Self CPU Time 41.14 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 215982.98 μs
Device Time 40.35 μs
Self CPU Time 84.63 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 241354.07 μs
Device Time 0.00 μs
Self CPU Time 25798.99 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 215351.89 μs
Device Time 0.00 μs
Self CPU Time 215351.89 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 390017.27 μs
Device Time 40111.22 μs
Self CPU Time 390017.27 μs
Self Device Time 40111.22 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 27047.53 μs
Device Time 39648.04 μs
Self CPU Time 27047.53 μs
Self Device Time 39648.04 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 79441.09 μs
Device Time 607911.27 μs
Self CPU Time 17713.62 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 61728.92 μs
Device Time 607911.27 μs
Self CPU Time 21289.63 μs
Self Device Time 607911.27 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 607911.27 μs
Self CPU Time 0.00 μs
Self Device Time 607911.27 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45280 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b2_s3_warp_hardsigmoid_opt/base/base.cu:47:19 bugprone-implicit-widening-of-multiplication-result
47 | size_t stride = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b2_s3_warp_hardsigmoid_opt/base/base.cu:47:19: note: make conversion explicit to silence this warning
4 | size_t stride = blockDim.x * gridDim.x;
| ^~~~~~~~~~~~~~~~~~~~~~
| static_cast<size_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b2_s3_warp_hardsigmoid_opt/base/base.cu:47:19: note: perform multiplication in a wider type
47 | size_t stride = blockDim.x * gridDim.x;
| ^~~~~~~~~~
| static_cast<size_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b2_s3_warp_hardsigmoid_opt/base/base.cu:52:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
52 | int lane = threadIdx.x & (warpSize - 1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b2_s3_warp_hardsigmoid_opt/base/base.cu:90:22: warning: narrowing conversion from 'size_t' (aka 'unsigned long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
90 | const int blocks = (numel + threads - 1) / threads;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b2_s3_warp_hardsigmoid_opt/base/base.cu:92:3: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
92 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "warp_hardsigmoid_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^