← Back to Leaderboard

The AI CUDA Engineer 👷

89_cumsumcumsum_optimized_sync_base

Level 1 • Task 89
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor, dim: int) -> torch.Tensor:
    """
    Performs a cumulative sum operation.

    Args:
        x (torch.Tensor): Input tensor.
        dim (int): The dimension along which to perform the cumulative sum.

    Returns:
        torch.Tensor: Output tensor.
    """
    return torch.cumsum(x, dim=dim)


class Model(nn.Module):
    """
    A simple model that performs a cumulative sum (prefix sum) operation along a specified dimension.
    """

    def __init__(self, dim):
        """
        Initialize the Scan model.

        Args:
            dim (int): The dimension along which to perform the cumulative sum.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x, fn=module_fn):
        """
        Forward pass for the Scan model, computing the cumulative sum along the specified dimension.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, *input_shape)
            fn (callable): Function to compute the output, defaults to module_fn
        """
        return fn(x, self.dim)


# Define input dimensions and parameters
batch_size = 128
input_shape = (4000,)  # Example shape (arbitrary)
dim = 1


def get_inputs():
    """
    Generates random inputs for testing the Scan model.

    Returns:
        list: A list containing a single randomly generated tensor with shape
              (batch_size, *input_shape).
    """
    return [torch.randn(batch_size, *input_shape)]


def get_init_inputs():
    """
    Returns the initialization parameters for the Scan model.

    Returns:
        list: A list containing the `dim` parameter for model initialization.
    """
    return [dim]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A simple model that performs a cumulative sum (prefix sum) operation along a specified dimension.

    Parameters:
        dim (int): The dimension along which to perform the scan operation.
    """

    def __init__(self, dim):
        """
        Initialize the Scan model.

        Args:
            dim (int): The dimension along which to perform the cumulative sum.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x):
        """
        Forward pass for the Scan model, computing the cumulative sum along the specified dimension.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, *input_shape), where `*input_shape` 
                              can vary depending on the use case.

        Returns:
            torch.Tensor: Tensor of the same shape as `x` after applying cumulative sum along `dim`.
        """
        return torch.cumsum(x, dim=self.dim)

# Define input dimensions and parameters
batch_size = 128
input_shape = (4000,)  # Example shape (arbitrary)
dim = 1

def get_inputs():
    """
    Generates random inputs for testing the Scan model.

    Returns:
        list: A list containing a single randomly generated tensor with shape 
              (batch_size, *input_shape).
    """
    return [torch.randn(batch_size, *input_shape)]

def get_init_inputs():
    """
    Returns the initialization parameters for the Scan model.

    Returns:
        list: A list containing the `dim` parameter for model initialization.
    """
    return [dim]

Kernel Information

Related Kernels (Level 1, Task 89 • 89_cumsum)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 hybrid_aligned_cumsum_edit_1 0.01 2.21 2.14
🥇 tile_scan_cumsum_base 0.01 2.21 2.14
🥇 aligned_cumsum_ldg_edit_1 0.01 2.21 2.14
🥇 hybrid_aligned_cumsum_base 0.01 2.21 2.14
🥇 aligned_cumsum_ldg_base 0.01 2.21 2.14
🥇 tile_scan_cumsum_edit_1 0.01 2.21 2.14
🥇 shared_memory_cumsum_base 0.01 2.21 2.14
8 cumsum_even_dist_edit_1 0.01 2.07 2.00
8 hybrid_cumsum_edit_1 0.01 2.07 2.00
8 parallel_cumsum_base 0.01 2.07 2.00
8 cumsum_even_dist_base 0.01 2.07 2.00
8 parallel_cumsum_unroll_base 0.01 2.07 2.00
8 hybrid_cumsum_base 0.01 2.07 2.00
8 modular_cumsum_base 0.01 2.07 2.00
15 parallel_cumsum_stride_base 0.02 1.48 1.43
16 parallel_cumsum_stride_edit_1 0.02 1.29 1.25
17 cumsum_warp_atomic_base_base 0.04 0.82 0.79
18 cumsum_optimized_sync_base 0.04 0.79 0.77
18 cumsum_optimized_sync_base 0.04 0.79 0.77
20 hybrid_adaptive_cumsum_base 0.04 0.77 0.75
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

__device__ __forceinline__ float warp_scan(float val, const unsigned mask = 0xffffffff) {
    #pragma unroll
    for (int offset = 1; offset < 32; offset *= 2) {
        float n = __shfl_up_sync(mask, val, offset);
        if (threadIdx.x % 32 >= offset) val += n;
    }
    return val;
}

__global__ void cumsum_kernel_optimized(const float* __restrict__ input,
                            float* __restrict__ output,
                            float* __restrict__ warp_sums,
                            const int inner_size,
                            const int stride) {
    const int idx = blockIdx.x;
    const int outer_idx = idx / inner_size;
    const int inner_idx = idx % inner_size;
    const int lane_id = threadIdx.x % 32;
    const int warp_id = threadIdx.x / 32;
    const int warps_per_block = blockDim.x / 32;
    
    // Base index for this thread's work
    const int base_idx = outer_idx * stride * inner_size + inner_idx;
    
    // Process elements in chunks of 32 (warp size)
    for (int warp_start = warp_id * 32; warp_start < stride; warp_start += warps_per_block * 32) {
        float val = 0.0f;
        const int pos = warp_start + lane_id;
        
        if (pos < stride) {
            val = input[base_idx + pos * inner_size];
        }
        
        // Perform warp-level scan
        val = warp_scan(val);
        
        // Last thread in warp stores sum for next warp
        if (lane_id == 31 && pos < stride) {
            warp_sums[outer_idx * ((stride + 31)/32) + warp_start/32] = val;
        }
        
        // Synchronize only when necessary
        if (warp_start + 32 < stride) {
            __syncthreads();
        }
        
        // Add previous warps' sums
        if (pos < stride && warp_start > 0) {
            float prev_sum = 0.0f;
            const int warp_idx = warp_start/32;
            #pragma unroll 4
            for (int w = 0; w < warp_idx; w++) {
                prev_sum += warp_sums[outer_idx * ((stride + 31)/32) + w];
            }
            val += prev_sum;
        }
        
        // Store result
        if (pos < stride) {
            output[base_idx + pos * inner_size] = val;
        }
    }
}

torch::Tensor forward(torch::Tensor x, int dim) {
    CHECK_INPUT(x);
    
    auto output = torch::empty_like(x);
    int ndim = x.dim();
    dim = (dim + ndim) % ndim;
    
    int outer_size = 1;
    for (int i = 0; i < dim; i++) {
        outer_size *= x.size(i);
    }
    
    int inner_size = 1;
    for (int i = dim + 1; i < ndim; i++) {
        inner_size *= x.size(i);
    }
    
    int stride = x.size(dim);
    
    // Allocate temporary storage for warp sums
    auto warp_sums = torch::empty({outer_size * ((stride + 31)/32)}, x.options());
    
    const int total_blocks = outer_size * inner_size;
    const int threads_per_block = 256; // Use 8 warps per block
    
    cumsum_kernel_optimized<<<total_blocks, threads_per_block>>>(
        x.data_ptr<float>(),
        output.data_ptr<float>(),
        warp_sums.data_ptr<float>(),
        inner_size,
        stride
    );
    
    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Optimized CUDA cumulative sum with selective synchronization");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.692 inst/cycle 0.000 5
Executed Ipc Elapsed 0.534 inst/cycle 0.000 5
Issue Slots Busy 17.336 % 0.005 5
Issued Ipc Active 0.692 inst/cycle 0.000 5
SM Busy 17.336 % 0.005 5
Memory Throughput 50922493530.508 byte/second 3686907656487795712.000 5
Mem Busy 12.910 % 0.014 5
Max Bandwidth 12.852 % 0.014 5
L1/TEX Hit Rate 88.136 % 0.000 5
L2 Hit Rate 64.180 % 1.628 5
Mem Pipes Busy 12.852 % 0.014 5
Warp Cycles Per Issued Instruction 10.974 cycle 0.002 5
Warp Cycles Per Executed Instruction 10.984 cycle 0.002 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.710 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 11.898 % 0.000 5
Achieved Active Warps Per SM 7.614 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (11.9%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 617701.87 μs
Device Time 144.51 μs
Self CPU Time 38.00 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 617663.87 μs
Device Time 144.51 μs
Self CPU Time 86.55 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 631870.02 μs
Device Time 0.00 μs
Self CPU Time 14744.70 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 601296.38 μs
Device Time 0.00 μs
Self CPU Time 601296.38 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 545299.17 μs
Device Time 14337.01 μs
Self CPU Time 545299.17 μs
Self Device Time 14337.01 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cumsum_kernel_optimized(float const*, float*, float*, int, int)
CPU Time 0.00 μs
Device Time 222427.15 μs
Self CPU Time 0.00 μs
Self Device Time 222427.15 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 13550.56 μs
Device Time 28064.41 μs
Self CPU Time 13550.56 μs
Self Device Time 28064.41 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 189023.92 μs
Device Time 489395.12 μs
Self CPU Time 10238.63 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 178788.73 μs
Device Time 489395.12 μs
Self CPU Time 12453.87 μs
Self Device Time 489395.12 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 489395.12 μs
Self CPU Time 0.00 μs
Self Device Time 489395.12 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45288 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:5:35 bugprone-macro-parentheses
5 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:6:41: warning: macro argument should be enclosed in parentheses [bugprone-macro-parentheses]
6 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:19:29: warning: 2 adjacent parameters of 'cumsum_kernel_optimized' of similar type ('float *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
19 | float* __restrict__ output,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
20 | float* __restrict__ warp_sums,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:19:49: note: the first parameter in the range is 'output'
19 | float* __restrict__ output,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:20:49: note: the last parameter in the range is 'warp_sums'
20 | float* __restrict__ warp_sums,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:23:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
23 | const int idx = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:26:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
26 | const int lane_id = threadIdx.x % 32;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:27:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
27 | const int warp_id = threadIdx.x / 32;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:28:33: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
28 | const int warps_per_block = blockDim.x / 32;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:73:37: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
73 | torch::Tensor forward(torch::Tensor x, int dim) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:77:16: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
77 | int ndim = x.dim();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:82:23: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
82 | outer_size *= x.size(i);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:87:23: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
87 | inner_size *= x.size(i);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:90:18: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
90 | int stride = x.size(dim);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:93:36: warning: performing an implicit widening conversion to type 'const long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
93 | auto warp_sums = torch::empty({outer_size * ((stride + 31)/32)}, x.options());
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:93:36: note: make conversion explicit to silence this warning
4 | auto warp_sums = torch::empty({outer_size * ((stride + 31)/32)}, x.options());
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<const long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_89/b10_s2_cumsum_optimized_sync/base/base.cu:93:36: note: perform multiplication in a wider type
93 | auto warp_sums = torch::empty({outer_size * ((stride + 31)/32)}, x.options());
| ^~~~~~~~~~
| static_cast<const long>( )