← Back to Leaderboard

The AI CUDA Engineer 👷

9_Matmul_Subtract_Multiply_ReLUefficient_indexing_tile_kernel_base

Level 2 • Task 9
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    linear_weight: torch.Tensor,
    linear_bias: torch.Tensor,
    subtract_value: float,
    multiply_value: float,
) -> torch.Tensor:
    """
    Applies linear transformation, subtraction, multiplication and ReLU activation.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        linear_weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
        linear_bias (torch.Tensor): Bias vector of shape (out_features)
        subtract_value (float): Value to subtract
        multiply_value (float): Value to multiply

    Returns:
        torch.Tensor: Output tensor after applying linear transformation, subtraction,
            multiplication and ReLU, with shape (batch_size, out_features)
    """
    x = F.linear(x, linear_weight, linear_bias)
    x = x - subtract_value
    x = x * multiply_value
    x = torch.relu(x)
    return x


class Model(nn.Module):
    """
    Model that performs a matrix multiplication, subtraction, multiplication, and ReLU activation.
    """

    def __init__(self, in_features, out_features, subtract_value, multiply_value):
        super(Model, self).__init__()
        self.linear_weight = nn.Parameter(torch.randn(out_features, in_features) * 0.02)
        self.linear_bias = nn.Parameter(torch.randn(out_features) * 0.02)
        self.subtract_value = subtract_value
        self.multiply_value = multiply_value

    def forward(self, x, fn=module_fn):
        return fn(
            x,
            self.linear_weight,
            self.linear_bias,
            self.subtract_value,
            self.multiply_value,
        )


batch_size = 128
in_features = 10
out_features = 5
subtract_value = 2.0
multiply_value = 1.5


def get_inputs():
    return [torch.randn(batch_size, in_features)]


def get_init_inputs():
    return [in_features, out_features, subtract_value, multiply_value]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a matrix multiplication, subtraction, multiplication, and ReLU activation.
    """
    def __init__(self, in_features, out_features, subtract_value, multiply_value):
        super(Model, self).__init__()
        self.linear = nn.Linear(in_features, out_features)
        self.subtract_value = subtract_value
        self.multiply_value = multiply_value

    def forward(self, x):
        x = self.linear(x)
        x = x - self.subtract_value
        x = x * self.multiply_value
        x = torch.relu(x)
        return x

batch_size = 128
in_features = 10
out_features = 5
subtract_value = 2.0
multiply_value = 1.5

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, subtract_value, multiply_value]

Kernel Information

Related Kernels (Level 2, Task 9 • 9_Matmul_Subtract_Multiply_ReLU)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 unrolled_loop_kernel_base 0.01 4.05 2.63
🥇 9_Matmul_Subtract_Multiply_ReLU 0.01 4.05 2.63
🥇 9_matmul_subtract_multiply_relu_unroll_base 0.01 4.05 2.63
🥇 9_matmul_subtract_multiply_relu_unroll_base 0.01 4.05 2.63
🥇 modular_matmul_subtract_multiply_relu_base 0.01 4.05 2.63
🥇 efficient_indexing_tile_kernel_base 0.01 4.05 2.63
🥇 efficient_thread_block_mapping_base 0.01 4.05 2.63
🥇 warp_divergence_optimized_base 0.01 4.05 2.63
🥇 warp_level_fused_kernel_base 0.01 4.05 2.63
🥇 shared_mem_tiled_base 0.01 4.05 2.63
🥇 tiled_sharedmem_optimized_base 0.01 4.05 2.63
🥇 warp_level_reduction_kernel_base 0.01 4.05 2.63
🥇 strided_thread_blocks_base_base 0.01 4.05 2.63
🥇 optimized_block_size_base 0.01 4.05 2.63
🥇 double_buffered_tiled_kernel_base 0.01 4.05 2.63
🥇 coalesced_memory_matmul_base_base 0.01 4.05 2.63
🥇 tiled_matmul_shared_mem_base 0.01 4.05 2.63
🥇 optimized_tiled_2d_base 0.01 4.05 2.63
🥇 matmul_1d_thread_mapping_base 0.01 4.05 2.63
🥇 modularized_matmul_ops_base 0.01 4.05 2.63
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

#define TILE_WIDTH 16

// This kernel uses 2D grid and 2D block indexing to map the output matrix (batch_size x out_features)
// into tiles of size TILE_WIDTH x TILE_WIDTH. Each block computes one tile of the output using shared memory
// to load tiles of the input and weight matrices. The thread indices are mapped so that threadIdx.x corresponds
// to the row within a tile (from the batch dimension) and threadIdx.y corresponds to the column within a tile
// (from the output feature dimension). This ensures efficient and correct mapping of threads to problem domains,
// minimizing divergence and promoting coalesced memory accesses.

template <typename scalar_t>
__global__ void efficient_indexing_tile_kernel(
    const scalar_t* __restrict__ input,   // [batch_size, in_features]
    const scalar_t* __restrict__ weight,  // [out_features, in_features]
    const scalar_t* __restrict__ bias,    // [out_features]
    scalar_t* __restrict__ output,
    int batch_size,
    int in_features,
    int out_features,
    float subtract_value,
    float multiply_value) {

    // Compute global row and column indices
    int row = blockIdx.x * TILE_WIDTH + threadIdx.x; // batch dimension
    int col = blockIdx.y * TILE_WIDTH + threadIdx.y; // output feature dimension

    scalar_t sum = 0;

    // Allocate shared memory for a tile of input and weight
    __shared__ scalar_t shared_input[TILE_WIDTH][TILE_WIDTH];
    __shared__ scalar_t shared_weight[TILE_WIDTH][TILE_WIDTH];

    // Number of tiles to loop over the K-dimension
    int numTiles = (in_features + TILE_WIDTH - 1) / TILE_WIDTH;

    for (int t = 0; t < numTiles; t++) {
        // Compute the column index for input tile and corresponding column index for weight
        int input_col = t * TILE_WIDTH + threadIdx.y;
        int weight_col = t * TILE_WIDTH + threadIdx.x;  

        // Load one element of the input tile, if within bounds
        if (row < batch_size && input_col < in_features) {
            shared_input[threadIdx.x][threadIdx.y] = input[row * in_features + input_col];
        } else {
            shared_input[threadIdx.x][threadIdx.y] = 0;
        }
        
        // Load one element of the weight tile, if within bounds
        if (col < out_features && weight_col < in_features) {
            shared_weight[threadIdx.x][threadIdx.y] = weight[col * in_features + weight_col];
        } else {
            shared_weight[threadIdx.x][threadIdx.y] = 0;
        }

        __syncthreads();

        // Compute partial sum for the tile
        #pragma unroll
        for (int k = 0; k < TILE_WIDTH; k++) {
            sum += shared_input[threadIdx.x][k] * shared_weight[k][threadIdx.y];
        }

        __syncthreads();
    }

    // Write back the result if within output bounds, applying bias, subtract, multiply, and ReLU activation.
    if (row < batch_size && col < out_features) {
        sum += bias[col];
        sum = (sum - subtract_value) * multiply_value;
        output[row * out_features + col] = (sum > 0) ? sum : static_cast<scalar_t>(0);
    }
}

// PyTorch forward interface

torch::Tensor forward(
    torch::Tensor input,
    torch::Tensor weight,
    torch::Tensor bias,
    float subtract_value,
    float multiply_value) {

    int batch_size = input.size(0);
    int in_features = input.size(1);
    int out_features = weight.size(0);

    auto output = torch::empty({batch_size, out_features}, input.options());

    // Configure a 2D grid where x-dimension covers the batch and y-dimension covers output features
    dim3 threads(TILE_WIDTH, TILE_WIDTH);
    dim3 blocks(
        (batch_size + TILE_WIDTH - 1) / TILE_WIDTH,
        (out_features + TILE_WIDTH - 1) / TILE_WIDTH
    );

    AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "efficient_indexing_tile_kernel", ([&] {
        efficient_indexing_tile_kernel<scalar_t><<<blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            weight.data_ptr<scalar_t>(),
            bias.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            batch_size,
            in_features,
            out_features,
            subtract_value,
            multiply_value
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Efficient 2D tile kernel with optimized thread-block mapping");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.308 inst/cycle 0.000 5
Executed Ipc Elapsed 0.010 inst/cycle 0.000 5
Issue Slots Busy 8.108 % 0.102 5
Issued Ipc Active 0.324 inst/cycle 0.000 5
SM Busy 8.108 % 0.102 5
Memory Throughput 2572725742.488 byte/second 1816434813389615.250 5
Mem Busy 8.378 % 0.017 5
Max Bandwidth 4.318 % 0.005 5
L1/TEX Hit Rate 68.600 % 0.000 5
L2 Hit Rate 100.784 % 0.111 5
Mem Pipes Busy 0.216 % 0.000 5
Warp Cycles Per Issued Instruction 21.786 cycle 0.936 5
Warp Cycles Per Executed Instruction 22.976 cycle 1.043 5
Avg. Active Threads Per Warp 31.730 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.440 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 21.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 10.996 % 0.034 5
Achieved Active Warps Per SM 7.038 warp 0.013 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (11.1%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 385239.26 μs
Device Time 5.57 μs
Self CPU Time 67.75 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 385171.51 μs
Device Time 5.57 μs
Self CPU Time 104.87 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 384913.56 μs
Device Time 0.00 μs
Self CPU Time 107.97 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 384592.56 μs
Device Time 0.00 μs
Self CPU Time 384592.56 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 366232.42 μs
Device Time 16180.79 μs
Self CPU Time 366232.42 μs
Self Device Time 16180.79 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void efficient_indexing_tile_kernel<float>(float const*, float const*, float const*, float*, int, int, int, float, float)
CPU Time 0.00 μs
Device Time 22167.51 μs
Self CPU Time 0.00 μs
Self Device Time 22167.51 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 13897.33 μs
Device Time 32001.74 μs
Self CPU Time 13897.33 μs
Self Device Time 32001.74 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 51426.73 μs
Device Time 482250.95 μs
Self CPU Time 9912.74 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 41515.68 μs
Device Time 482250.95 μs
Self CPU Time 12006.90 μs
Self Device Time 482250.95 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 482250.95 μs
Self CPU Time 0.00 μs
Self Device Time 482250.95 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45290 warnings generated when compiling for host.
Suppressed 45325 warnings (45278 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:16:5 bugprone-easily-swappable-parameters
16 | const scalar_t* __restrict__ input, // [batch_size, in_features]
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17 | const scalar_t* __restrict__ weight, // [out_features, in_features]
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 | const scalar_t* __restrict__ bias, // [out_features]
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:16:34: note: the first parameter in the range is 'input'
16 | const scalar_t* __restrict__ input, // [batch_size, in_features]
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:18:34: note: the last parameter in the range is 'bias'
18 | const scalar_t* __restrict__ bias, // [out_features]
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:22:5: warning: 2 adjacent parameters of 'efficient_indexing_tile_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
22 | int out_features,
| ^~~~~~~~~~~~~~~~~
23 | float subtract_value,
| ~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:22:9: note: the first parameter in the range is 'out_features'
22 | int out_features,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:23:11: note: the last parameter in the range is 'subtract_value'
23 | float subtract_value,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:23:5: note: 'int' and 'float' may be implicitly converted
23 | float subtract_value,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:27:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
27 | int row = blockIdx.x * TILE_WIDTH + threadIdx.x; // batch dimension
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:28:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
28 | int col = blockIdx.y * TILE_WIDTH + threadIdx.y; // output feature dimension
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:41:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
41 | int input_col = t * TILE_WIDTH + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:42:26: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
42 | int weight_col = t * TILE_WIDTH + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:86:22: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
86 | int batch_size = input.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:87:23: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
87 | int in_features = input.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:88:24: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
88 | int out_features = weight.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b6_s0_efficient_indexing_tile_kernel/base/base.cu:99:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
99 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "efficient_indexing_tile_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^