← Back to Leaderboard

The AI CUDA Engineer 👷

89_cumsumshared_memory_cumsum_base

Level 1 • Task 89
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(x: torch.Tensor, dim: int) -> torch.Tensor:
    """
    Performs a cumulative sum operation.

    Args:
        x (torch.Tensor): Input tensor.
        dim (int): The dimension along which to perform the cumulative sum.

    Returns:
        torch.Tensor: Output tensor.
    """
    return torch.cumsum(x, dim=dim)


class Model(nn.Module):
    """
    A simple model that performs a cumulative sum (prefix sum) operation along a specified dimension.
    """

    def __init__(self, dim):
        """
        Initialize the Scan model.

        Args:
            dim (int): The dimension along which to perform the cumulative sum.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x, fn=module_fn):
        """
        Forward pass for the Scan model, computing the cumulative sum along the specified dimension.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, *input_shape)
            fn (callable): Function to compute the output, defaults to module_fn
        """
        return fn(x, self.dim)


# Define input dimensions and parameters
batch_size = 128
input_shape = (4000,)  # Example shape (arbitrary)
dim = 1


def get_inputs():
    """
    Generates random inputs for testing the Scan model.

    Returns:
        list: A list containing a single randomly generated tensor with shape
              (batch_size, *input_shape).
    """
    return [torch.randn(batch_size, *input_shape)]


def get_init_inputs():
    """
    Returns the initialization parameters for the Scan model.

    Returns:
        list: A list containing the `dim` parameter for model initialization.
    """
    return [dim]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A simple model that performs a cumulative sum (prefix sum) operation along a specified dimension.

    Parameters:
        dim (int): The dimension along which to perform the scan operation.
    """

    def __init__(self, dim):
        """
        Initialize the Scan model.

        Args:
            dim (int): The dimension along which to perform the cumulative sum.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x):
        """
        Forward pass for the Scan model, computing the cumulative sum along the specified dimension.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, *input_shape), where `*input_shape` 
                              can vary depending on the use case.

        Returns:
            torch.Tensor: Tensor of the same shape as `x` after applying cumulative sum along `dim`.
        """
        return torch.cumsum(x, dim=self.dim)

# Define input dimensions and parameters
batch_size = 128
input_shape = (4000,)  # Example shape (arbitrary)
dim = 1

def get_inputs():
    """
    Generates random inputs for testing the Scan model.

    Returns:
        list: A list containing a single randomly generated tensor with shape 
              (batch_size, *input_shape).
    """
    return [torch.randn(batch_size, *input_shape)]

def get_init_inputs():
    """
    Returns the initialization parameters for the Scan model.

    Returns:
        list: A list containing the `dim` parameter for model initialization.
    """
    return [dim]

Kernel Information

Related Kernels (Level 1, Task 89 • 89_cumsum)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 hybrid_aligned_cumsum_edit_1 0.01 2.21 2.14
🥇 tile_scan_cumsum_base 0.01 2.21 2.14
🥇 aligned_cumsum_ldg_edit_1 0.01 2.21 2.14
🥇 hybrid_aligned_cumsum_base 0.01 2.21 2.14
🥇 aligned_cumsum_ldg_base 0.01 2.21 2.14
🥇 tile_scan_cumsum_edit_1 0.01 2.21 2.14
🥇 shared_memory_cumsum_base 0.01 2.21 2.14
8 cumsum_even_dist_edit_1 0.01 2.07 2.00
8 hybrid_cumsum_edit_1 0.01 2.07 2.00
8 parallel_cumsum_base 0.01 2.07 2.00
8 cumsum_even_dist_base 0.01 2.07 2.00
8 parallel_cumsum_unroll_base 0.01 2.07 2.00
8 hybrid_cumsum_base 0.01 2.07 2.00
8 modular_cumsum_base 0.01 2.07 2.00
15 parallel_cumsum_stride_base 0.02 1.48 1.43
16 parallel_cumsum_stride_edit_1 0.02 1.29 1.25
17 cumsum_warp_atomic_base_base 0.04 0.82 0.79
18 cumsum_optimized_sync_base 0.04 0.79 0.77
18 cumsum_optimized_sync_base 0.04 0.79 0.77
20 hybrid_adaptive_cumsum_base 0.04 0.77 0.75
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

// This kernel uses shared memory to store intermediate results of the cumulative sum
// to reduce global memory accesses and improve performance.

__global__ void shared_memory_cumsum_kernel(const float* __restrict__ input, float* __restrict__ output, int stride, int inner_size) {
    extern __shared__ float sdata[];

    int line_index = blockIdx.x;
    int outer_idx = line_index / inner_size;
    int inner_idx = line_index % inner_size;

    const float* in_line = input + outer_idx * stride * inner_size + inner_idx;
    float* out_line = output + outer_idx * stride * inner_size + inner_idx;

    int tid = threadIdx.x;
    int block_threads = blockDim.x;

    int chunk_size = (stride + block_threads - 1) / block_threads;
    int start = tid * chunk_size;
    int end = min(start + chunk_size, stride);

    float thread_sum = 0.0f;
    for (int i = start; i < end; i++) {
        thread_sum += __ldg(&in_line[i * inner_size]);
    }

    sdata[tid] = thread_sum;
    __syncthreads();

    for (int offset = 1; offset < block_threads; offset *= 2) {
        float temp = 0.0f;
        if (tid >= offset) {
            temp = sdata[tid - offset];
        }
        __syncthreads();
        sdata[tid] += temp;
        __syncthreads();
    }

    float add_offset = (tid == 0) ? 0.0f : sdata[tid - 1];

    float local_running = 0.0f;
    for (int i = start; i < end; i++) {
        local_running += __ldg(&in_line[i * inner_size]);
        out_line[i * inner_size] = local_running + add_offset;
    }
}

// The forward function sets up the grid to cover each "line" of the tensor along the cumsum dimension
// and launches the kernel with a fixed number of threads per block.

torch::Tensor forward(torch::Tensor x, int dim) {
    CHECK_INPUT(x);

    auto output = torch::empty_like(x);
    int ndim = x.dim();
    dim = (dim + ndim) % ndim;

    int outer_size = 1;
    for (int i = 0; i < dim; ++i) {
        outer_size *= x.size(i);
    }

    int inner_size = 1;
    for (int i = dim + 1; i < ndim; ++i) {
        inner_size *= x.size(i);
    }

    int stride = x.size(dim);

    int total_lines = outer_size * inner_size;

    int threads = 256;
    shared_memory_cumsum_kernel<<<total_lines, threads, threads * sizeof(float)>>> (
        x.data_ptr<float>(), output.data_ptr<float>(), stride, inner_size
    );

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "CUDA cumulative sum with shared memory optimization");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.270 inst/cycle 0.000 5
Executed Ipc Elapsed 0.180 inst/cycle 0.000 5
Issue Slots Busy 6.900 % 0.000 5
Issued Ipc Active 0.280 inst/cycle 0.000 5
SM Busy 6.900 % 0.000 5
Memory Throughput 172171689619.212 byte/second 12675135068979195904.000 5
Mem Busy 32.080 % 0.204 5
Max Bandwidth 44.278 % 0.682 5
L1/TEX Hit Rate 86.512 % 0.001 5
L2 Hit Rate 99.558 % 8.021 5
Mem Pipes Busy 3.060 % 0.002 5
Warp Cycles Per Issued Instruction 27.664 cycle 0.002 5
Warp Cycles Per Executed Instruction 27.980 cycle 0.002 5
Avg. Active Threads Per Warp 31.560 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.130 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 16.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 11.968 % 0.000 5
Achieved Active Warps Per SM 7.660 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (12.0%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 561395.55 μs
Device Time 165.25 μs
Self CPU Time 34.52 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 561361.03 μs
Device Time 165.25 μs
Self CPU Time 88.04 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 571751.96 μs
Device Time 0.00 μs
Self CPU Time 11003.56 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 558007.63 μs
Device Time 0.00 μs
Self CPU Time 558007.63 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 218361.30 μs
Device Time 9982.61 μs
Self CPU Time 218361.30 μs
Self Device Time 9982.61 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
shared_memory_cumsum_kernel(float const*, float*, int, int)
CPU Time 0.00 μs
Device Time 38744.07 μs
Self CPU Time 0.00 μs
Self Device Time 38744.07 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 10391.12 μs
Device Time 19166.30 μs
Self CPU Time 10391.12 μs
Self Device Time 19166.30 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 41436.11 μs
Device Time 289988.37 μs
Self CPU Time 6204.77 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 35232.27 μs
Device Time 289988.37 μs
Self CPU Time 7501.61 μs
Self Device Time 289988.37 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 289988.37 μs
Self CPU Time 0.00 μs
Self Device Time 289988.37 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45290 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:5:35 bugprone-macro-parentheses
5 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:6:41: warning: macro argument should be enclosed in parentheses [bugprone-macro-parentheses]
6 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:15:22: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
15 | int line_index = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:19:28: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
19 | const float* in_line = input + outer_idx * stride * inner_size + inner_idx;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:19:36: note: make conversion explicit to silence this warning
4 |
5 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
6 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
7 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
8 |
9 | // This kernel uses shared memory to store intermediate results of the cumulative sum
10 | // to reduce global memory accesses and improve performance.
11 |
12 | __global__ void shared_memory_cumsum_kernel(const float* __restrict__ input, float* __restrict__ output, int stride, int inner_size) {
13 | extern __shared__ float sdata[];
14 |
15 | int line_index = blockIdx.x;
16 | int outer_idx = line_index / inner_size;
17 | int inner_idx = line_index % inner_size;
18 |
19 | const float* in_line = input + outer_idx * stride * inner_size + inner_idx;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:19:36: note: perform multiplication in a wider type
19 | const float* in_line = input + outer_idx * stride * inner_size + inner_idx;
| ^~~~~~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:20:23: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
20 | float* out_line = output + outer_idx * stride * inner_size + inner_idx;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:20:32: note: make conversion explicit to silence this warning
20 | float* out_line = output + outer_idx * stride * inner_size + inner_idx;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:20:32: note: perform multiplication in a wider type
20 | float* out_line = output + outer_idx * stride * inner_size + inner_idx;
| ^~~~~~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:22:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
22 | int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:23:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
23 | int block_threads = blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:31:30: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
31 | thread_sum += __ldg(&in_line[i * inner_size]);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:31:38: note: make conversion explicit to silence this warning
31 | thread_sum += __ldg(&in_line[i * inner_size]);
| ^~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:31:38: note: perform multiplication in a wider type
31 | thread_sum += __ldg(&in_line[i * inner_size]);
| ^
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:51:33: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
51 | local_running += __ldg(&in_line[i * inner_size]);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:51:41: note: make conversion explicit to silence this warning
51 | local_running += __ldg(&in_line[i * inner_size]);
| ^~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:51:41: note: perform multiplication in a wider type
51 | local_running += __ldg(&in_line[i * inner_size]);
| ^
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:52:9: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
52 | out_line[i * inner_size] = local_running + add_offset;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:52:18: note: make conversion explicit to silence this warning
52 | out_line[i * inner_size] = local_running + add_offset;
| ^~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:52:18: note: perform multiplication in a wider type
52 | out_line[i * inner_size] = local_running + add_offset;
| ^
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:59:37: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
59 | torch::Tensor forward(torch::Tensor x, int dim) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:63:16: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
63 | int ndim = x.dim();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:68:23: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
68 | outer_size *= x.size(i);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:73:23: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
73 | inner_size *= x.size(i);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_1/task_89/b5_s2_shared_memory_cumsum/base/base.cu:76:18: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
76 | int stride = x.size(dim);
| ^