← Back to Leaderboard

The AI CUDA Engineer 👷

28_HardSigmoidvectorized_coalesced_hardsigmoid_base

Level 1 • Task 28
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies HardSigmoid activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.

    Returns:
        torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
    """
    return F.hardsigmoid(x)


class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return torch.nn.functional.hardsigmoid(x)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 28 • 28_HardSigmoid)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 hardsigmoid_warp_vectorized_base 0.01 1.12 4.96
🥇 hardsigmoid_shared_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_base 0.01 1.12 4.96
🥇 evenly_distributed_hardsigmoid_base 0.01 1.12 4.96
6 divergence_reduced_hardsigmoid_base_base 0.01 0.96 4.25
6 constant_mem_hardsigmoid_base 0.01 0.96 4.25
6 warp_hardsigmoid_opt_base 0.01 0.96 4.25
6 28_HardSigmoid 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 branchless_hardsigmoid_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 optimized_hardsigmoid_base 0.01 0.96 4.25
6 warp_broadcast_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 shared_memory_hardsigmoid_base_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 even_chunk_hardsigmoid_base 0.01 0.96 4.25
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <type_traits>

// Vectorized and coalesced HardSigmoid kernel
// Computes y = clamp((x + 3) / 6, 0, 1) using vectorized global memory accesses

template <typename scalar_t>
__global__ void vectorized_coalesced_hardsigmoid_kernel(const scalar_t* __restrict__ input,
                                                           scalar_t* __restrict__ output,
                                                           size_t numel) {
  // Calculate global thread index and stride
  size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
  size_t stride = blockDim.x * gridDim.x;

  // Use vectorized loads/stores for memory coalescing based on precision
  if constexpr (std::is_same<scalar_t, float>::value) {
    // For float, use float4 (4 floats at a time)
    constexpr int vecSize = 4;
    using vec_t = float4;
    size_t num_vec = numel / vecSize;  // number of vectorized elements

    // Process main vectorized portion
    for (size_t i = idx; i < num_vec; i += stride) {
      vec_t in_vec = reinterpret_cast<const vec_t*>(input)[i];
      vec_t out_vec;
      out_vec.x = fminf(fmaxf((in_vec.x + 3.0f) / 6.0f, 0.0f), 1.0f);
      out_vec.y = fminf(fmaxf((in_vec.y + 3.0f) / 6.0f, 0.0f), 1.0f);
      out_vec.z = fminf(fmaxf((in_vec.z + 3.0f) / 6.0f, 0.0f), 1.0f);
      out_vec.w = fminf(fmaxf((in_vec.w + 3.0f) / 6.0f, 0.0f), 1.0f);
      reinterpret_cast<vec_t*>(output)[i] = out_vec;
    }

    // Process any remaining elements
    size_t tail_start = num_vec * vecSize;
    for (size_t i = idx; i < (numel - tail_start); i += stride) {
      size_t index = tail_start + i;
      float x = input[index];
      float y = (x + 3.0f) / 6.0f;
      y = fminf(fmaxf(y, 0.0f), 1.0f);
      output[index] = y;
    }
  } else {
    // For double, use double2 (2 doubles at a time)
    constexpr int vecSize = 2;
    using vec_t = double2;
    size_t num_vec = numel / vecSize;

    for (size_t i = idx; i < num_vec; i += stride) {
      vec_t in_vec = reinterpret_cast<const vec_t*>(input)[i];
      vec_t out_vec;
      out_vec.x = fmin(fmax((in_vec.x + 3.0) / 6.0, 0.0), 1.0);
      out_vec.y = fmin(fmax((in_vec.y + 3.0) / 6.0, 0.0), 1.0);
      reinterpret_cast<vec_t*>(output)[i] = out_vec;
    }

    // Handle tail elements for double
    size_t tail_start = num_vec * vecSize;
    for (size_t i = idx; i < (numel - tail_start); i += stride) {
      size_t index = tail_start + i;
      double x = input[index];
      double y = (x + 3.0) / 6.0;
      y = fmin(fmax(y, 0.0), 1.0);
      output[index] = y;
    }
  }
}

// Host function to launch the vectorized kernel

torch::Tensor forward(torch::Tensor input) {
  TORCH_CHECK(input.is_cuda(), "Input tensor must be on CUDA");
  auto output = torch::empty_like(input);
  size_t numel = input.numel();
  const int threads = 1024;
  const int blocks = (numel + threads - 1) / threads;

  AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "vectorized_coalesced_hardsigmoid_cuda", ([&] {
    vectorized_coalesced_hardsigmoid_kernel<scalar_t><<<blocks, threads>>>(
      input.data_ptr<scalar_t>(),
      output.data_ptr<scalar_t>(),
      numel
    );
  }));

  cudaError_t err = cudaGetLastError();
  TORCH_CHECK(err == cudaSuccess, "CUDA kernel failed: ", cudaGetErrorString(err));
  return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &forward, "Vectorized and coalesced HardSigmoid activation (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.086 inst/cycle 0.000 5
Executed Ipc Elapsed 0.364 inst/cycle 0.000 5
Issue Slots Busy 32.194 % 0.278 5
Issued Ipc Active 1.286 inst/cycle 0.000 5
SM Busy 32.194 % 0.278 5
Memory Throughput 280053929605.488 byte/second 15184891815191379968.000 5
Mem Busy 13.298 % 0.056 5
Max Bandwidth 12.184 % 0.048 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 67.164 % 0.003 5
Mem Pipes Busy 5.292 % 0.008 5
Warp Cycles Per Issued Instruction 37.946 cycle 8.649 5
Warp Cycles Per Executed Instruction 44.994 cycle 12.198 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 31.540 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 2.000 block 0.000 5
Block Limit Shared Mem 8.000 block 0.000 5
Block Limit Warps 2.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 74.236 % 0.749 5
Achieved Active Warps Per SM 47.512 warp 0.310 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (73.5%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 433959.03 μs
Device Time 40.10 μs
Self CPU Time 31.35 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 433927.68 μs
Device Time 40.10 μs
Self CPU Time 83.07 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 453543.00 μs
Device Time 0.00 μs
Self CPU Time 20071.38 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 433290.78 μs
Device Time 0.00 μs
Self CPU Time 433290.78 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 502948.35 μs
Device Time 627.16 μs
Self CPU Time 502948.35 μs
Self Device Time 627.16 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void vectorized_coalesced_hardsigmoid_kernel<float>(float const*, float*, unsigned long)
CPU Time 0.00 μs
Device Time 26209.38 μs
Self CPU Time 0.00 μs
Self Device Time 26209.38 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 22881.47 μs
Device Time 42460.51 μs
Self CPU Time 22881.47 μs
Self Device Time 42460.51 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 69153.78 μs
Device Time 648904.18 μs
Self CPU Time 12517.36 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 56638.10 μs
Device Time 648904.18 μs
Self CPU Time 16024.52 μs
Self Device Time 648904.18 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 648981.65 μs
Self CPU Time 0.00 μs
Self Device Time 648981.65 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45279 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b7_s2_vectorized_coalesced_hardsigmoid/base/base.cu:15:19 bugprone-implicit-widening-of-multiplication-result
15 | size_t stride = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b7_s2_vectorized_coalesced_hardsigmoid/base/base.cu:15:19: note: make conversion explicit to silence this warning
4 | #include <type_traits>
5 |
6 | // Vectorized and coalesced HardSigmoid kernel
7 | // Computes y = clamp((x + 3) / 6, 0, 1) using vectorized global memory accesses
8 |
9 | template <typename scalar_t>
10 | __global__ void vectorized_coalesced_hardsigmoid_kernel(const scalar_t* __restrict__ input,
11 | scalar_t* __restrict__ output,
12 | size_t numel) {
13 | // Calculate global thread index and stride
14 | size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
15 | size_t stride = blockDim.x * gridDim.x;
| ^~~~~~~~~~~~~~~~~~~~~~
| static_cast<size_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b7_s2_vectorized_coalesced_hardsigmoid/base/base.cu:15:19: note: perform multiplication in a wider type
15 | size_t stride = blockDim.x * gridDim.x;
| ^~~~~~~~~~
| static_cast<size_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b7_s2_vectorized_coalesced_hardsigmoid/base/base.cu:77:22: warning: narrowing conversion from 'size_t' (aka 'unsigned long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
77 | const int blocks = (numel + threads - 1) / threads;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b7_s2_vectorized_coalesced_hardsigmoid/base/base.cu:79:3: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
79 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "vectorized_coalesced_hardsigmoid_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^