← Back to Leaderboard

The AI CUDA Engineer 👷

89_cumsumhybrid_adaptive_cumsum_base

Level 1 • Task 89
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor, dim: int) -> torch.Tensor:
    """
    Performs a cumulative sum operation.

    Args:
        x (torch.Tensor): Input tensor.
        dim (int): The dimension along which to perform the cumulative sum.

    Returns:
        torch.Tensor: Output tensor.
    """
    return torch.cumsum(x, dim=dim)


class Model(nn.Module):
    """
    A simple model that performs a cumulative sum (prefix sum) operation along a specified dimension.
    """

    def __init__(self, dim):
        """
        Initialize the Scan model.

        Args:
            dim (int): The dimension along which to perform the cumulative sum.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x, fn=module_fn):
        """
        Forward pass for the Scan model, computing the cumulative sum along the specified dimension.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, *input_shape)
            fn (callable): Function to compute the output, defaults to module_fn
        """
        return fn(x, self.dim)


# Define input dimensions and parameters
batch_size = 128
input_shape = (4000,)  # Example shape (arbitrary)
dim = 1


def get_inputs():
    """
    Generates random inputs for testing the Scan model.

    Returns:
        list: A list containing a single randomly generated tensor with shape
              (batch_size, *input_shape).
    """
    return [torch.randn(batch_size, *input_shape)]


def get_init_inputs():
    """
    Returns the initialization parameters for the Scan model.

    Returns:
        list: A list containing the `dim` parameter for model initialization.
    """
    return [dim]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A simple model that performs a cumulative sum (prefix sum) operation along a specified dimension.

    Parameters:
        dim (int): The dimension along which to perform the scan operation.
    """

    def __init__(self, dim):
        """
        Initialize the Scan model.

        Args:
            dim (int): The dimension along which to perform the cumulative sum.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x):
        """
        Forward pass for the Scan model, computing the cumulative sum along the specified dimension.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, *input_shape), where `*input_shape` 
                              can vary depending on the use case.

        Returns:
            torch.Tensor: Tensor of the same shape as `x` after applying cumulative sum along `dim`.
        """
        return torch.cumsum(x, dim=self.dim)

# Define input dimensions and parameters
batch_size = 128
input_shape = (4000,)  # Example shape (arbitrary)
dim = 1

def get_inputs():
    """
    Generates random inputs for testing the Scan model.

    Returns:
        list: A list containing a single randomly generated tensor with shape 
              (batch_size, *input_shape).
    """
    return [torch.randn(batch_size, *input_shape)]

def get_init_inputs():
    """
    Returns the initialization parameters for the Scan model.

    Returns:
        list: A list containing the `dim` parameter for model initialization.
    """
    return [dim]

Kernel Information

Related Kernels (Level 1, Task 89 • 89_cumsum)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 hybrid_aligned_cumsum_edit_1 0.01 2.21 2.14
🥇 tile_scan_cumsum_base 0.01 2.21 2.14
🥇 aligned_cumsum_ldg_edit_1 0.01 2.21 2.14
🥇 hybrid_aligned_cumsum_base 0.01 2.21 2.14
🥇 aligned_cumsum_ldg_base 0.01 2.21 2.14
🥇 tile_scan_cumsum_edit_1 0.01 2.21 2.14
🥇 shared_memory_cumsum_base 0.01 2.21 2.14
8 cumsum_even_dist_edit_1 0.01 2.07 2.00
8 hybrid_cumsum_edit_1 0.01 2.07 2.00
8 parallel_cumsum_base 0.01 2.07 2.00
8 cumsum_even_dist_base 0.01 2.07 2.00
8 parallel_cumsum_unroll_base 0.01 2.07 2.00
8 hybrid_cumsum_base 0.01 2.07 2.00
8 modular_cumsum_base 0.01 2.07 2.00
15 parallel_cumsum_stride_base 0.02 1.48 1.43
16 parallel_cumsum_stride_edit_1 0.02 1.29 1.25
17 cumsum_warp_atomic_base_base 0.04 0.82 0.79
18 cumsum_optimized_sync_base 0.04 0.79 0.77
18 cumsum_optimized_sync_base 0.04 0.79 0.77
20 hybrid_adaptive_cumsum_base 0.04 0.77 0.75
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

// Threshold for switching between algorithms
#define STRIDE_THRESHOLD 128
#define INNER_SIZE_THRESHOLD 64

__global__ void cumsum_kernel_small_stride(const float* __restrict__ input, 
                                         float* output,
                                         int outer_size, 
                                         int inner_size, 
                                         int stride) {
    int outer_idx = blockIdx.x;
    if (outer_idx >= outer_size) return;

    for (int inner_idx = threadIdx.x; inner_idx < inner_size; inner_idx += blockDim.x) {
        float sum = 0.0f;
        int base = outer_idx * stride * inner_size + inner_idx;
        
        #pragma unroll 8
        for (int s = 0; s < stride; s++) {
            int idx = base + s * inner_size;
            sum += __ldg(&input[idx]);
            output[idx] = sum;
        }
    }
}

__global__ void cumsum_kernel_large_stride(const float* __restrict__ input, 
                                         float* output,
                                         int inner_size, 
                                         int stride) {
    int idx = blockIdx.x;
    int outer_idx = idx / inner_size;
    int inner_idx = idx % inner_size;
    
    const int warp_size = 32;
    int total_segments = (stride + warp_size - 1) / warp_size;
    int lane = threadIdx.x;
    float warp_total = 0.0f;
    unsigned mask = 0xffffffff;

    for (int seg = 0; seg < total_segments; seg++) {
        int s = seg * warp_size + lane;
        float val = 0.0f;
        if (s < stride) {
            int global_idx = outer_idx * (stride * inner_size) + s * inner_size + inner_idx;
            val = input[global_idx];
        }
        
        for (int offset = 1; offset < warp_size; offset *= 2) {
            float n = __shfl_up_sync(mask, val, offset);
            if (lane >= offset) val += n;
        }
        
        val += warp_total;
        
        if (s < stride) {
            int global_idx = outer_idx * (stride * inner_size) + s * inner_size + inner_idx;
            output[global_idx] = val;
        }
        
        int valid_count = min(warp_size, stride - seg * warp_size);
        warp_total = __shfl_sync(mask, val, valid_count - 1);
    }
}

torch::Tensor forward(torch::Tensor x, int dim) {
    CHECK_INPUT(x);
    
    auto output = torch::empty_like(x);
    int ndim = x.dim();
    dim = (dim + ndim) % ndim;
    
    int outer_size = 1;
    for (int i = 0; i < dim; i++) outer_size *= x.size(i);
    
    int inner_size = 1;
    for (int i = dim + 1; i < ndim; i++) inner_size *= x.size(i);
    
    int stride = x.size(dim);
    
    // Choose algorithm based on tensor dimensions
    if (stride <= STRIDE_THRESHOLD || inner_size >= INNER_SIZE_THRESHOLD) {
        int threads = std::min(256, inner_size);
        cumsum_kernel_small_stride<<<outer_size, threads>>>(
            x.data_ptr<float>(), output.data_ptr<float>(), 
            outer_size, inner_size, stride
        );
    } else {
        int total_blocks = outer_size * inner_size;
        cumsum_kernel_large_stride<<<total_blocks, 32>>>(
            x.data_ptr<float>(), output.data_ptr<float>(), 
            inner_size, stride
        );
    }
    
    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Hybrid adaptive CUDA cumulative sum");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.080 inst/cycle 0.000 5
Executed Ipc Elapsed 0.070 inst/cycle 0.000 5
Issue Slots Busy 1.904 % 0.000 5
Issued Ipc Active 0.080 inst/cycle 0.000 5
SM Busy 1.904 % 0.000 5
Memory Throughput 50803249019.492 byte/second 30390039137126936.000 5
Mem Busy 1.646 % 0.000 5
Max Bandwidth 1.846 % 0.000 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 59.974 % 0.002 5
Mem Pipes Busy 1.530 % 0.000 5
Warp Cycles Per Issued Instruction 13.148 cycle 0.001 5
Warp Cycles Per Executed Instruction 13.150 cycle 0.001 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 31.070 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 64.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 64.000 block 0.000 5
Theoretical Active Warps per SM 32.000 warp 0.000 5
Theoretical Occupancy 50.000 % 0.000 5
Achieved Occupancy 1.560 % 0.000 5
Achieved Active Warps Per SM 1.000 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy (50.0%) is limited by the number of blocks that can fit on the SM. This kernel's theoretical occupancy (50.0%) is limited by the required amount of shared memory. The difference between calculated theoretical (50.0%) and measured achieved occupancy (1.6%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 221015.39 μs
Device Time 159.20 μs
Self CPU Time 34.62 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 240281.52 μs
Device Time 0.00 μs
Self CPU Time 19896.65 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 736234.40 μs
Device Time 21856.64 μs
Self CPU Time 736234.40 μs
Self Device Time 21856.64 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cumsum_kernel_large_stride(float const*, float*, int, int)
CPU Time 0.00 μs
Device Time 292650.10 μs
Self CPU Time 0.00 μs
Self Device Time 292650.10 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 17852.79 μs
Device Time 42152.65 μs
Self CPU Time 17852.79 μs
Self Device Time 42152.65 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 251228.06 μs
Device Time 623519.61 μs
Self CPU Time 12192.16 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 239037.42 μs
Device Time 623519.61 μs
Self CPU Time 17214.22 μs
Self Device Time 623519.61 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 623519.61 μs
Self CPU Time 0.00 μs
Self Device Time 623519.61 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45290 warnings generated when compiling for host.
Suppressed 45324 warnings (45277 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:5:35 bugprone-macro-parentheses
5 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:6:41: warning: macro argument should be enclosed in parentheses [bugprone-macro-parentheses]
6 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:15:42: warning: 2 adjacent parameters of 'cumsum_kernel_small_stride' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
15 | int outer_size,
| ^~~~~~~~~~~~~~~
16 | int inner_size,
| ~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:15:46: note: the first parameter in the range is 'outer_size'
15 | int outer_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:16:46: note: the last parameter in the range is 'inner_size'
16 | int inner_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:18:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
18 | int outer_idx = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:21:26: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
21 | for (int inner_idx = threadIdx.x; inner_idx < inner_size; inner_idx += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:21:76: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
21 | for (int inner_idx = threadIdx.x; inner_idx < inner_size; inner_idx += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:38:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
38 | int idx = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:44:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
44 | int lane = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:73:37: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
73 | torch::Tensor forward(torch::Tensor x, int dim) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:77:16: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
77 | int ndim = x.dim();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:81:49: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
81 | for (int i = 0; i < dim; i++) outer_size *= x.size(i);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:84:56: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
84 | for (int i = dim + 1; i < ndim; i++) inner_size *= x.size(i);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b8_s2_hybrid_adaptive_cumsum/base/base.cu:86:18: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
86 | int stride = x.size(dim);
| ^