← Back to Leaderboard

The AI CUDA Engineer 👷

28_HardSigmoideven_chunk_hardsigmoid_base

Level 1 • Task 28
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor) -> torch.Tensor:
    """
    Applies HardSigmoid activation to the input tensor.

    Args:
        x (torch.Tensor): Input tensor of any shape.

    Returns:
        torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
    """
    return F.hardsigmoid(x)


class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return fn(x)


batch_size = 16
dim = 16384


def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a HardSigmoid activation.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies HardSigmoid activation to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of any shape.

        Returns:
            torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
        """
        return torch.nn.functional.hardsigmoid(x)

batch_size = 16
dim = 16384

def get_inputs():
    x = torch.randn(batch_size, dim)
    return [x]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 28 • 28_HardSigmoid)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 hardsigmoid_warp_vectorized_base 0.01 1.12 4.96
🥇 hardsigmoid_shared_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_edit_1 0.01 1.12 4.96
🥇 hardsigmoid_unrolled_optimized_base 0.01 1.12 4.96
🥇 evenly_distributed_hardsigmoid_base 0.01 1.12 4.96
6 divergence_reduced_hardsigmoid_base_base 0.01 0.96 4.25
6 constant_mem_hardsigmoid_base 0.01 0.96 4.25
6 warp_hardsigmoid_opt_base 0.01 0.96 4.25
6 28_HardSigmoid 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 modular_hardsigmoid_base 0.01 0.96 4.25
6 branchless_hardsigmoid_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 optimized_hardsigmoid_base 0.01 0.96 4.25
6 warp_broadcast_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 vectorized_coalesced_hardsigmoid_base 0.01 0.96 4.25
6 shared_memory_hardsigmoid_base_base 0.01 0.96 4.25
6 warp_optimized_hardsigmoid_base 0.01 0.96 4.25
6 even_chunk_hardsigmoid_base 0.01 0.96 4.25
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

// CUDA kernel that distributes workloads evenly by assigning each thread a contiguous chunk of data
template <typename scalar_t>
__global__ void even_chunk_hardsigmoid_kernel(const scalar_t* __restrict__ input,
                                               scalar_t* __restrict__ output,
                                               size_t numel) {
  // Compute global thread id and total number of threads
  int tid = blockIdx.x * blockDim.x + threadIdx.x;
  int total_threads = gridDim.x * blockDim.x;

  // Calculate the number of elements each thread should process (ceiling division)
  size_t items_per_thread = (numel + total_threads - 1) / total_threads;

  // Determine the contiguous block of indices this thread will handle
  size_t start = tid * items_per_thread;
  size_t end = start + items_per_thread;
  if (end > numel) end = numel;

  // Process each element in the assigned contiguous chunk
  for (size_t i = start; i < end; i++) {
    scalar_t x = input[i];
    scalar_t y = (x + static_cast<scalar_t>(3)) / static_cast<scalar_t>(6);
    // Clamp y to the range [0, 1]
    if (y < static_cast<scalar_t>(0))
      y = static_cast<scalar_t>(0);
    else if (y > static_cast<scalar_t>(1))
      y = static_cast<scalar_t>(1);
    output[i] = y;
  }
}

// Host function that dispatches the kernel
torch::Tensor forward(torch::Tensor input) {
  TORCH_CHECK(input.is_cuda(), "Input tensor must be on CUDA");
  auto output = torch::empty_like(input);
  size_t numel = input.numel();

  // Configure kernel launch parameters
  // Using 1024 threads per block; blocks is computed to cover all elements
  const int threads = 1024;
  const int blocks = (numel + threads - 1) / threads;

  AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "even_chunk_hardsigmoid_cuda", ([&] {
    even_chunk_hardsigmoid_kernel<scalar_t><<<blocks, threads>>>(
      input.data_ptr<scalar_t>(),
      output.data_ptr<scalar_t>(),
      numel);
  }));

  cudaError_t err = cudaGetLastError();
  TORCH_CHECK(err == cudaSuccess, "CUDA kernel failed: ", cudaGetErrorString(err));

  return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &forward, "HardSigmoid activation forward (CUDA) with even workload chunk distribution");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.678 inst/cycle 0.004 5
Executed Ipc Elapsed 0.814 inst/cycle 0.000 5
Issue Slots Busy 42.702 % 2.845 5
Issued Ipc Active 1.706 inst/cycle 0.005 5
SM Busy 42.702 % 2.845 5
Memory Throughput 242270239803.094 byte/second 10870226859294586880.000 5
Mem Busy 11.440 % 0.033 5
Max Bandwidth 10.696 % 0.027 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 68.230 % 0.090 5
Mem Pipes Busy 9.106 % 0.018 5
Warp Cycles Per Issued Instruction 29.932 cycle 0.005 5
Warp Cycles Per Executed Instruction 30.474 cycle 0.005 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.510 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 2.000 block 0.000 5
Block Limit Shared Mem 8.000 block 0.000 5
Block Limit Warps 2.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 82.204 % 0.223 5
Achieved Active Warps Per SM 52.608 warp 0.090 5
Analysis Rules
Rule Description
INF HighPipeUtilization ALU is the highest-utilized pipeline (41.8%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (81.9%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 600157.56 μs
Device Time 40.38 μs
Self CPU Time 35.12 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 600122.45 μs
Device Time 40.38 μs
Self CPU Time 85.08 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 619962.98 μs
Device Time 0.00 μs
Self CPU Time 20286.43 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 592692.57 μs
Device Time 0.00 μs
Self CPU Time 592692.57 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 514815.51 μs
Device Time 627.71 μs
Self CPU Time 514815.51 μs
Self Device Time 627.71 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void even_chunk_hardsigmoid_kernel<float>(float const*, float*, unsigned long)
CPU Time 0.00 μs
Device Time 30274.26 μs
Self CPU Time 0.00 μs
Self Device Time 30274.26 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 23761.33 μs
Device Time 43162.96 μs
Self CPU Time 23761.33 μs
Self Device Time 43162.96 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 68836.84 μs
Device Time 661182.26 μs
Self CPU Time 12354.60 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 56484.04 μs
Device Time 661182.26 μs
Self CPU Time 15853.88 μs
Self Device Time 661182.26 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 661261.78 μs
Self CPU Time 0.00 μs
Self Device Time 661261.78 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45280 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b6_s2_even_chunk_hardsigmoid/base/base.cu:11:13 bugprone-narrowing-conversions
11 | int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b6_s2_even_chunk_hardsigmoid/base/base.cu:12:23: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
12 | int total_threads = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b6_s2_even_chunk_hardsigmoid/base/base.cu:44:22: warning: narrowing conversion from 'size_t' (aka 'unsigned long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
44 | const int blocks = (numel + threads - 1) / threads;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_28/b6_s2_even_chunk_hardsigmoid/base/base.cu:46:3: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
46 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "even_chunk_hardsigmoid_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^