← Back to Leaderboard

The AI CUDA Engineer 👷

44_ConvTranspose2d_Multiply_GlobalAvgPool_GlobalAvgPool_Meanunrolled_vectorized_mean_kernel_base

Level 2 • Task 44
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    stride: int,
    padding: int,
    output_padding: int,
    conv_transpose: torch.Tensor,
    conv_transpose_bias: torch.Tensor,
    multiplier: float,
) -> torch.Tensor:
    """
    Applies transposed convolution, scalar multiplication, and multiple global average pooling operations.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_channels, height, width)
        stride (int): Stride of the transposed convolution
        padding (int): Padding of the transposed convolution
        output_padding (int): Additional size added to output shape
        conv_transpose (torch.Tensor): Transposed convolution weight tensor
        conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
        multiplier (float): Scalar multiplier value

    Returns:
        torch.Tensor: Scalar output after applying operations
    """
    x = F.conv_transpose2d(
        x,
        conv_transpose,
        bias=conv_transpose_bias,
        stride=stride,
        padding=padding,
        output_padding=output_padding,
    )
    x = x * multiplier
    x = torch.mean(x, dim=[2, 3], keepdim=True)
    x = torch.mean(x, dim=[2, 3], keepdim=True)
    x = torch.mean(x)
    return x


class Model(nn.Module):
    """
    Model that performs a transposed convolution, multiplies by a scalar, applies global average pooling,
    another global average pooling, and then calculates the mean.
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size,
        stride,
        padding,
        output_padding,
        multiplier,
    ):
        super(Model, self).__init__()
        conv = nn.ConvTranspose2d(
            in_channels,
            out_channels,
            kernel_size,
            stride=stride,
            padding=padding,
            output_padding=output_padding,
        )
        self.conv_transpose_parameter = nn.Parameter(conv.weight)
        self.conv_transpose_bias = nn.Parameter(
            conv.bias
            + torch.randn(
                conv.bias.shape, device=conv.bias.device, dtype=conv.bias.dtype
            )
            * 0.02
        )
        self.multiplier = multiplier

    def forward(self, x, stride, padding, output_padding, fn=module_fn):
        return fn(
            x,
            stride,
            padding,
            output_padding,
            self.conv_transpose_parameter,
            self.conv_transpose_bias,
            self.multiplier,
        )


batch_size = 128
in_channels = 3
out_channels = 16
height, width = 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
multiplier = 0.5


def get_inputs():
    return [
        torch.randn(batch_size, in_channels, height, width),
        stride,
        padding,
        output_padding,
    ]


def get_init_inputs():
    return [
        in_channels,
        out_channels,
        kernel_size,
        stride,
        padding,
        output_padding,
        multiplier,
    ]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a transposed convolution, multiplies by a scalar, applies global average pooling, 
    another global average pooling, and then calculates the mean.
    """
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding, output_padding, multiplier):
        super(Model, self).__init__()
        self.conv_transpose = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding)
        self.conv_transpose.bias = nn.Parameter(self.conv_transpose.bias + torch.randn(self.conv_transpose.bias.shape, device=self.conv_transpose.bias.device, dtype=self.conv_transpose.bias.dtype) * 0.02)
        self.multiplier = multiplier

    def forward(self, x):
        x = self.conv_transpose(x)
        x = x * self.multiplier
        x = torch.mean(x, dim=[2, 3], keepdim=True)  # First global average pooling
        x = torch.mean(x, dim=[2, 3], keepdim=True)  # Second global average pooling
        x = torch.mean(x)
        return x

batch_size = 128
in_channels = 3
out_channels = 16
height, width = 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
multiplier = 0.5

def get_inputs():
    return [torch.randn(batch_size, in_channels, height, width)]

def get_init_inputs():
    return [in_channels, out_channels, kernel_size, stride, padding, output_padding, multiplier]

Kernel Information

Related Kernels (Level 2, Task 44 • 44_ConvTranspose2d_Multiply_GlobalAvgPool_GlobalAvgPool_Mean)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 optimized_spatial_reduction_base 0.18 1.21 0.72
🥇 optimized_spatial_reduction_edit_1 0.18 1.21 0.72
🥉 minimal_sync_reduction_edit_1 0.19 1.19 0.71
4 shared_memory_tiled_reduction_base 0.19 1.17 0.70
5 fused_global_avg_base 0.20 1.13 0.67
6 block_size_experimentation_base 0.20 1.11 0.66
7 optimized_strided_avg_pooling_edit_1 0.20 1.10 0.66
7 aligned_ldg_optimized_kernel_base 0.20 1.10 0.66
9 combined_optimized_mean_kernel_base 0.20 1.10 0.65
9 vectorized_ldg_mean_kernel_base 0.20 1.10 0.65
9 optimized_mean_kernel_base 0.20 1.10 0.65
9 warp_uniform_mean_kernel_base_base 0.20 1.10 0.65
9 unrolled_vectorized_mean_kernel_base 0.20 1.10 0.65
9 atomic_final_reduction_base 0.20 1.10 0.65
15 optimized_sync_reduction_base 0.20 1.09 0.65
15 shared_mem_reduction_optimized_base 0.20 1.09 0.65
15 modular_shared_warp_mean_base_base 0.20 1.09 0.65
15 coalesced_vectorized_mean_kernel_base 0.20 1.09 0.65
15 reduced_sync_shared_memory_base 0.20 1.09 0.65
15 fused_atomic_reduction_base 0.20 1.09 0.65
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>

// Kernel to compute the mean of each (batch, channel) slice using vectorized loads,
// and manual loop unrolling via #pragma unroll to reduce loop overhead in both global memory
// accesses and shared memory reduction.
// Each block processes one (batch, channel) slice and atomically accumulates its slice mean
// into a global accumulator.

template <unsigned int blockSize>
__global__ void unrolled_vectorized_mean_kernel(
    const float* __restrict__ input,
    float* __restrict__ global_accum,
    int H,
    int W,
    int C
) {
    extern __shared__ float shared[];  // Shared memory for reduction
    int num_elements = H * W;
    int batch = blockIdx.x / C;
    int channel = blockIdx.x % C;
    int input_offset = (batch * C + channel) * num_elements;
    float sum = 0.0f;

    // Use vectorized loads if the number of elements is divisible by 4 (ensuring 128-bit alignment)
    if ((num_elements & 3) == 0) {
        int num_vec = num_elements >> 2;  // equivalent to num_elements / 4
        const float4* in_vec = reinterpret_cast<const float4*>(input + input_offset);
        for (int i = threadIdx.x; i < num_vec; i += blockDim.x) {
            #pragma unroll
            {
                float4 v = __ldg(&in_vec[i]);
                sum += v.x + v.y + v.z + v.w;
            }
        }
    } else {
        for (int i = threadIdx.x; i < num_elements; i += blockDim.x) {
            #pragma unroll
            {
                sum += __ldg(&input[input_offset + i]);
            }
        }
    }

    // Store the partial sum in shared memory
    shared[threadIdx.x] = sum;
    __syncthreads();

    // Intra-block reduction with manual unrolling
    if (blockSize >= 256) {
        if (threadIdx.x < 128)
            shared[threadIdx.x] += shared[threadIdx.x + 128];
        __syncthreads();
    }
    if (blockSize >= 128) {
        if (threadIdx.x < 64)
            shared[threadIdx.x] += shared[threadIdx.x + 64];
        __syncthreads();
    }

    if (threadIdx.x < 32) {
        volatile float* vsmem = shared;
        #pragma unroll
        for (int offset = 32; offset > 0; offset /= 2) {
            vsmem[threadIdx.x] += vsmem[threadIdx.x + offset];
        }
    }

    // Thread 0 computes the mean for this slice and atomically adds it to the global accumulator
    if (threadIdx.x == 0) {
        float slice_mean = shared[0] / static_cast<float>(num_elements);
        atomicAdd(global_accum, slice_mean);
    }
}

at::Tensor module_fn(
    at::Tensor x,
    int64_t stride,
    int64_t padding,
    int64_t output_padding,
    at::Tensor conv_transpose,
    at::Tensor conv_transpose_bias,
    double multiplier
) {
    // Perform transposed convolution using PyTorch's native function
    at::Tensor y = at::conv_transpose2d(
        x,
        conv_transpose,
        conv_transpose_bias,
        {stride, stride},
        {padding, padding},
        {output_padding, output_padding},
        1,
        {1, 1}
    );

    // Scale the output
    y = y * multiplier;

    // Get dimensions (N, C, H, W)
    auto dims = y.sizes();
    int N = dims[0];
    int C = dims[1];
    int H = dims[2];
    int W = dims[3];

    // Allocate a scalar accumulator on the device and initialize to zero
    auto options = torch::TensorOptions().device(y.device()).dtype(y.dtype());
    at::Tensor accum = torch::zeros({1}, options);

    // Launch one block per (batch, channel) slice
    constexpr int blockSize = 256;
    int numBlocks = N * C;
    size_t sharedMemSize = blockSize * sizeof(float);

    unrolled_vectorized_mean_kernel<blockSize><<<numBlocks, blockSize, sharedMemSize>>>(
        y.data_ptr<float>(),
        accum.data_ptr<float>(),
        H, W, C
    );

    // Compute the final overall mean: average the means of all (batch, channel) slices
    accum = accum / static_cast<float>(N * C);
    return accum;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &module_fn, "Module function");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.538 inst/cycle 0.000 5
Executed Ipc Elapsed 0.426 inst/cycle 0.000 5
Issue Slots Busy 13.554 % 0.008 5
Issued Ipc Active 0.542 inst/cycle 0.000 5
SM Busy 13.554 % 0.008 5
Memory Throughput 2252222273009.038 byte/second 1856769220760494931968.000 5
Mem Busy 37.896 % 0.522 5
Max Bandwidth 67.310 % 1.663 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 2.924 % 0.000 5
Mem Pipes Busy 9.596 % 0.031 5
Warp Cycles Per Issued Instruction 103.960 cycle 0.135 5
Warp Cycles Per Executed Instruction 104.644 cycle 0.138 5
Avg. Active Threads Per Warp 31.680 0.000 5
Avg. Not Predicated Off Threads Per Warp 28.960 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 16.000 block 0.000 5
Block Limit Shared Mem 16.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 88.452 % 0.056 5
Achieved Active Warps Per SM 56.608 warp 0.023 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (88.3%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::conv_transpose2d
CPU Time 5055406.88 μs
Device Time 4959714.75 μs
Self CPU Time 54880.48 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::convolution
CPU Time 5000526.39 μs
Device Time 4959714.75 μs
Self CPU Time 71565.33 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_convolution
CPU Time 4928961.07 μs
Device Time 4959714.75 μs
Self CPU Time 150283.30 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::cudnn_convolution_transpose
CPU Time 3894559.69 μs
Device Time 4000010.66 μs
Self CPU Time 841420.76 μs
Self Device Time 4000010.66 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 5483132.51 μs
Device Time 50834.96 μs
Self CPU Time 5483132.51 μs
Self Device Time 50834.96 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 2172917.30 μs
Device Time 2511173.39 μs
Self CPU Time 105670.04 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Failed
45258 warnings and 2 errors generated when compiling for host.
Error while processing /home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu.
Suppressed 45292 warnings (45245 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
Found compiler error(s).
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:17:5 bugprone-easily-swappable-parameters
17 | int W,
| ^~~~~~
18 | int C
| ~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:17:9: note: the first parameter in the range is 'W'
17 | int W,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:18:9: note: the last parameter in the range is 'C'
18 | int C
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:22:17: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
22 | int batch = blockIdx.x / C;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:23:19: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
23 | int channel = blockIdx.x % C;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:31:22: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
31 | for (int i = threadIdx.x; i < num_vec; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:31:53: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
31 | for (int i = threadIdx.x; i < num_vec; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:33:13: error: expected a for, while, or do-while loop to follow '#pragma unroll' [clang-diagnostic-error]
33 | {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:39:22: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
39 | for (int i = threadIdx.x; i < num_elements; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:39:58: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
39 | for (int i = threadIdx.x; i < num_elements; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:41:13: error: expected a for, while, or do-while loop to follow '#pragma unroll' [clang-diagnostic-error]
41 | {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:79:16: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
79 | at::Tensor x,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:83:16: warning: the parameter 'conv_transpose' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
83 | at::Tensor conv_transpose,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:104:13: warning: narrowing conversion from 'long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
104 | int N = dims[0];
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:105:13: warning: narrowing conversion from 'long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
105 | int C = dims[1];
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:106:13: warning: narrowing conversion from 'long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
106 | int H = dims[2];
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_44/b9_s2_unrolled_vectorized_mean_kernel/base/base.cu:107:13: warning: narrowing conversion from 'long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
107 | int W = dims[3];
| ^