← Back to Leaderboard

The AI CUDA Engineer 👷

100_ConvTranspose3d_Clamp_Min_Dividevectorized_ldg_kernel_128_base

Level 2 • Task 100
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    stride: int,
    padding: int,
    min_value: float,
    divisor: float,
    conv_transpose: torch.Tensor,
    conv_transpose_bias: torch.Tensor,
) -> torch.Tensor:
    """
    Applies a transposed 3D convolution, clamps output to min value, and divides by constant.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
        stride (int): Stride of the transposed convolution
        padding (int): Padding of the transposed convolution
        min_value (float): Minimum value for clamping
        divisor (float): Value to divide output by
        conv_transpose (torch.Tensor): Transposed convolution weight tensor
        conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution

    Returns:
        torch.Tensor: Output tensor after applying transposed convolution, clamping and division
    """
    x = F.conv_transpose3d(
        x, conv_transpose, bias=conv_transpose_bias, stride=stride, padding=padding
    )
    x = torch.clamp(x, min=min_value)
    x = x / divisor
    return x


class Model(nn.Module):
    """
    A model that performs a transposed 3D convolution, clamps the output to a minimum value,
    and then divides the result by a constant.
    """

    def __init__(
        self,
        in_channels,
        out_channels,
        kernel_size,
        stride,
        padding,
        min_value,
        divisor,
    ):
        super(Model, self).__init__()
        conv_transpose = nn.ConvTranspose3d(
            in_channels, out_channels, kernel_size, stride, padding
        )
        self.conv_transpose_parameter = conv_transpose.weight
        self.conv_transpose_bias = conv_transpose.bias

    def forward(self, x, stride, padding, min_value, divisor, fn=module_fn):
        return fn(
            x,
            stride,
            padding,
            min_value,
            divisor,
            self.conv_transpose_parameter,
            self.conv_transpose_bias,
        )


batch_size = 16
in_channels = 32
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
stride = 2
padding = 1
min_value = -1.0
divisor = 2.0


def get_inputs():
    return [
        torch.randn(batch_size, in_channels, depth, height, width),
        stride,
        padding,
        min_value,
        divisor,
    ]


def get_init_inputs():
    return [in_channels, out_channels, kernel_size, stride, padding, min_value, divisor]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A model that performs a transposed 3D convolution, clamps the output to a minimum value, 
    and then divides the result by a constant.
    """
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding, min_value, divisor):
        super(Model, self).__init__()
        self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
        self.min_value = min_value
        self.divisor = divisor

    def forward(self, x):
        x = self.conv_transpose(x)
        x = torch.clamp(x, min=self.min_value)
        x = x / self.divisor
        return x

batch_size = 16
in_channels = 32
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
stride = 2
padding = 1
min_value = -1.0
divisor = 2.0

def get_inputs():
    return [torch.randn(batch_size, in_channels, depth, height, width)]

def get_init_inputs():
    return [in_channels, out_channels, kernel_size, stride, padding, min_value, divisor]

Kernel Information

Related Kernels (Level 2, Task 100 • 100_ConvTranspose3d_Clamp_Min_Divide)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 optimized_clamp_divide_base 0.57 1.13 0.84
🥇 stride_loop_implementation_edit_1 0.57 1.13 0.84
🥇 modular_device_funcs3_base 0.57 1.13 0.84
🥇 balanced_workload_distribution_base 0.57 1.13 0.84
🥇 memory_coalescing_optimization_base 0.57 1.13 0.84
6 unrolled_convtranspose3d_edit_1 0.57 1.13 0.84
6 minimize_warp_divergence_edit_base 0.57 1.13 0.84
6 branchless_clamp_divide_opt_edit_1 0.57 1.13 0.84
6 modular_device_funcs2_edit_1 0.57 1.13 0.84
10 modular_device_funcs2_base 0.57 1.13 0.84
10 optimized_thread_block_indexing_base 0.57 1.13 0.84
10 optimized_thread_block_indexing_edit_1 0.57 1.13 0.84
13 vectorized_ldg_kernel_128_base 0.58 1.12 0.83
13 vectorized_ldg_kernel_128_edit_1 0.58 1.12 0.83
13 vectorized_modular_kernel_edit_1 0.58 1.12 0.83
13 vectorized_stride_loop_edit_1 0.58 1.12 0.83
13 vectorized_stride_loop_base 0.58 1.12 0.83
13 vectorized_modular_kernel_base 0.58 1.12 0.83
19 atomic_free_stride_loop_edit_1 0.58 1.12 0.83
19 modular_conv3d_clamp_divide_base 0.58 1.12 0.83
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <algorithm>

// Template specialization for applying clamp and divide operation
template <typename scalar_t>
__device__ __forceinline__ scalar_t apply_op(scalar_t val, const float min_value, const float divisor);

template <>
__device__ __forceinline__ float apply_op<float>(float val, const float min_value, const float divisor) {
    return fmaxf(val, min_value) / divisor;
}

template <>
__device__ __forceinline__ double apply_op<double>(double val, const float min_value, const float divisor) {
    return fmax(val, static_cast<double>(min_value)) / static_cast<double>(divisor);
}

// Define vectorized types for 128-bit memory transactions
template <typename scalar_t>
struct VectorizedType;

// For float, use float4 (4 * 32-bit = 128 bits)
template <>
struct VectorizedType<float> {
    using Vec = float4;
    static constexpr int kElements = 4;
};

// For double, use double2 (2 * 64-bit = 128 bits)
template <>
struct VectorizedType<double> {
    using Vec = double2;
    static constexpr int kElements = 2;
};


// Kernel that uses __ldg() for read-only global memory accesses and vectorized loads/stores
template <typename scalar_t>
__global__ void clamp_and_divide_kernel(
    scalar_t* __restrict__ output,
    const int64_t numel,
    const float min_value,
    const float divisor) {
    
    using Vec = typename VectorizedType<scalar_t>::Vec;
    constexpr int ElementsPerVec = VectorizedType<scalar_t>::kElements;

    // number of complete vectors
    int vec_num = numel / ElementsPerVec;

    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = gridDim.x * blockDim.x;

    // Process vectorized portion using 128-bit aligned loads and stores
    Vec* output_vec = reinterpret_cast<Vec*>(output);
    for (int idx = tid; idx < vec_num; idx += stride) {
        // Use __ldg() to load from global memory via the read-only cache
        Vec vec_val = __ldg(&output_vec[idx]);

        // Use union trick to access individual elements of the vector
        union {
            Vec v;
            scalar_t arr[ElementsPerVec];
        } u;
        u.v = vec_val;

        #pragma unroll
        for (int i = 0; i < ElementsPerVec; i++) {
            u.arr[i] = apply_op<scalar_t>(u.arr[i], min_value, divisor);
        }
        output_vec[idx] = u.v;
    }

    // Handle remaining elements if numel is not divisible by ElementsPerVec
    int offset = vec_num * ElementsPerVec;
    for (int idx = offset + tid; idx < numel; idx += stride) {
        scalar_t val = __ldg(&output[idx]);
        output[idx] = apply_op<scalar_t>(val, min_value, divisor);
    }
}

// The forward function wraps conv_transpose3d and applies the custom kernel
torch::Tensor forward(
    torch::Tensor input,
    int stride,
    int padding,
    float min_value,
    float divisor,
    torch::Tensor weight,
    torch::Tensor bias) {
    
    // Perform the 3D transposed convolution using PyTorch's native function
    auto output = torch::conv_transpose3d(input, weight, bias, stride, padding);

    const int threads = 256;
    int blocks = (output.numel() + threads - 1) / threads;
    if (blocks > 65535) {
        blocks = 65535;
    }

    AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "clamp_and_divide_ldg", ([&] {
        clamp_and_divide_kernel<scalar_t><<<blocks, threads>>>(
            output.data_ptr<scalar_t>(),
            output.numel(),
            min_value,
            divisor
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "3D Transposed convolution with clamp and divide using __ldg and 128-bit alignment");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.826 inst/cycle 0.000 5
Executed Ipc Elapsed 0.784 inst/cycle 0.000 5
Issue Slots Busy 20.660 % 0.017 5
Issued Ipc Active 0.826 inst/cycle 0.000 5
SM Busy 20.660 % 0.017 5
Memory Throughput 2326825097643.950 byte/second 169304328797474357248.000 5
Mem Busy 38.760 % 0.066 5
Max Bandwidth 69.420 % 0.150 5
L1/TEX Hit Rate 44.924 % 0.006 5
L2 Hit Rate 50.290 % 0.006 5
Mem Pipes Busy 17.608 % 0.012 5
Warp Cycles Per Issued Instruction 52.270 cycle 0.164 5
Warp Cycles Per Executed Instruction 52.368 cycle 0.166 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 31.030 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 10.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 68.242 % 0.009 5
Achieved Active Warps Per SM 43.674 warp 0.004 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (68.4%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::conv_transpose3d
CPU Time 2444530.16 μs
Device Time 3439260.50 μs
Self CPU Time 13031.68 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::convolution
CPU Time 2431498.48 μs
Device Time 3439260.50 μs
Self CPU Time 17846.89 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_convolution
CPU Time 2413651.59 μs
Device Time 3439260.50 μs
Self CPU Time 37387.81 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::cudnn_convolution_transpose
CPU Time 860455.94 μs
Device Time 2641134.99 μs
Self CPU Time 189162.64 μs
Self Device Time 2641134.99 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 3426437.85 μs
Device Time 60222.23 μs
Self CPU Time 3426437.85 μs
Self Device Time 60222.23 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
sm90_xmma_dgrad_implicit_gemm_indexed_f32f32_tf32f32_f32_nhwckrsc_nhwc_tilesize256x64x32_warpgroupsize1x1x1_g1_strided_execute_kernel__5x_cudnn
CPU Time 0.00 μs
Device Time 1583622.48 μs
Self CPU Time 0.00 μs
Self Device Time 1583622.48 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::add_
CPU Time 1504662.93 μs
Device Time 798125.51 μs
Self CPU Time 29491.97 μs
Self Device Time 798125.51 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45290 warnings generated when compiling for host.
Suppressed 45327 warnings (45280 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:43:5 bugprone-easily-swappable-parameters
43 | const int64_t numel,
| ^~~~~~~~~~~~~~~~~~~~
44 | const float min_value,
| ~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:43:19: note: the first parameter in the range is 'numel'
43 | const int64_t numel,
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:44:17: note: the last parameter in the range is 'min_value'
44 | const float min_value,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:43:5: note:
43 | const int64_t numel,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:44:5: note: 'const int64_t' and 'const float' may be implicitly converted: 'const int64_t' (as 'long') -> 'const float' (as 'float'), 'const float' (as 'float') -> 'const int64_t' (as 'long')
44 | const float min_value,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:53:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
53 | int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:54:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
54 | int stride = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:86:19: warning: the parameter 'input' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
86 | torch::Tensor input,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:88:5: warning: 2 adjacent parameters of 'forward' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
88 | int padding,
| ^~~~~~~~~~~~
89 | float min_value,
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:88:9: note: the first parameter in the range is 'padding'
88 | int padding,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:89:11: note: the last parameter in the range is 'min_value'
89 | float min_value,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:89:5: note: 'int' and 'float' may be implicitly converted
89 | float min_value,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:91:19: warning: the parameter 'weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
91 | torch::Tensor weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:98:18: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
98 | int blocks = (output.numel() + threads - 1) / threads;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250207_optimize_b5_s4_e1_sweep/level_2/task_100/b3_s0_vectorized_ldg_kernel_128/base/base.cu:103:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
103 | AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "clamp_and_divide_ldg", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^