← Back to Leaderboard

The AI CUDA Engineer 👷

3_Batched_matrix_multiplicationoptimized_bmm_kernel_base

Level 1 • Task 3
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(A: torch.Tensor, B: torch.Tensor):
    """
    Performs batched matrix multiplication (C = A * B) where A, B, and C have the same batch dimension.

    Args:
        A: Input tensor of shape (batch_size, m, k).
        B: Input tensor of shape (batch_size, k, n).

    Returns:
        C: Output tensor of shape (batch_size, m, n).
    """
    return torch.bmm(A, B)


class Model(nn.Module):
    """
    Performs batched matrix multiplication (C = A * B) where A, B, and C have the same batch dimension.
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, A: torch.Tensor, B: torch.Tensor, fn=module_fn) -> torch.Tensor:
        return fn(A, B)


batch_size = 128
m = 128
k = 256
n = 512


def get_inputs():
    A = torch.randn(batch_size, m, k)
    B = torch.randn(batch_size, k, n)
    return [A, B]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Performs batched matrix multiplication (C = A * B) where A, B, and C have the same batch dimension.
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
        """
        Performs batched matrix multiplication.

        Args:
            A: Input tensor of shape (batch_size, m, k).
            B: Input tensor of shape (batch_size, k, n).

        Returns:
            C: Output tensor of shape (batch_size, m, n).
        """
        return torch.bmm(A, B)

batch_size = 128
m = 128
k = 256
n = 512

def get_inputs():
    A = torch.randn(batch_size, m, k)
    B = torch.randn(batch_size, k, n)
    return [A, B]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 3 • 3_Batched_matrix_multiplication)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 bmm_tiled_shared_memory_optimized_edit_1 0.51 0.25 0.35
🥈 bmm_optimized_sync_reduction_base 0.51 0.25 0.35
🥈 optimized_bmm_kernel_base 0.51 0.25 0.35
🥈 optimized_bmm_kernel_base 0.51 0.25 0.35
🥈 bmm_warp_divergence_reduction_base 0.51 0.25 0.35
🥈 bmm_manual_unroll_base 0.51 0.25 0.35
🥈 bmm_unroll_pragma_optimized_base 0.51 0.25 0.35
8 bmm_optimized_tiling_base 0.51 0.25 0.35
8 bmm_thread_block_optimization_base 0.51 0.25 0.35
10 bmm_double_buffer_min_sync_base 0.52 0.25 0.35
11 bmm_cuda_streams_pipelining_base 0.52 0.25 0.35
12 aligned_ldg_bmm_opt_base 0.52 0.25 0.35
12 bmm_ldg_aligned_base 0.52 0.25 0.35
14 bmm_warp_uniform_base_base 0.52 0.25 0.35
14 bmm_shared_memory_optimized_base 0.52 0.25 0.35
16 bmm_thread_block_optimization_base 0.52 0.25 0.35
16 tiled_bmm_optimal_config_edit_1 0.52 0.25 0.35
16 bmm_warp_uniform_base 0.52 0.25 0.35
16 tiled_batchptr_unroll_base 0.52 0.25 0.35
16 warp_divergence_reduction_edit_1 0.52 0.25 0.35
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

#define TILE_SIZE 32

__global__ void optimized_bmm_kernel(
    const float* __restrict__ A,
    const float* __restrict__ B,
    float* __restrict__ C,
    int batch_size,
    int M,
    int K,
    int N
) {
    int b = blockIdx.z;
    int row = blockIdx.y * TILE_SIZE + threadIdx.y;
    int col = blockIdx.x * TILE_SIZE + threadIdx.x;
    
    __shared__ float As[TILE_SIZE][TILE_SIZE];
    __shared__ float Bs[TILE_SIZE][TILE_SIZE];
    
    float sum = 0.0f;
    
    const float* batch_A = A + b * M * K;
    const float* batch_B = B + b * K * N;
    
    int numTiles = (K + TILE_SIZE - 1) / TILE_SIZE;
    for (int t = 0; t < numTiles; t++) {
        int tiledCol = t * TILE_SIZE + threadIdx.x;
        int tiledRow = t * TILE_SIZE + threadIdx.y;

        // Load tiles using ternary operators to reduce warp divergence
        As[threadIdx.y][threadIdx.x] = (row < M && tiledCol < K) ? batch_A[row * K + tiledCol] : 0.0f;
        Bs[threadIdx.y][threadIdx.x] = (tiledRow < K && col < N) ? batch_B[tiledRow * N + col] : 0.0f;
        
        __syncthreads();

        // Unroll the loop for better performance
        #pragma unroll
        for (int k = 0; k < TILE_SIZE; k++) {
            sum += As[threadIdx.y][k] * Bs[k][threadIdx.x];
        }

        if (t < numTiles - 1) {
            __syncthreads();
        }
    }
    
    if (row < M && col < N) {
        C[b * M * N + row * N + col] = sum;
    }
}

torch::Tensor forward_optimized_bmm(torch::Tensor A, torch::Tensor B) {
    TORCH_CHECK(A.is_cuda(), "A must be a CUDA tensor");
    TORCH_CHECK(B.is_cuda(), "B must be a CUDA tensor");
    TORCH_CHECK(A.dim() == 3, "A must be 3D");
    TORCH_CHECK(B.dim() == 3, "B must be 3D");
    TORCH_CHECK(A.size(0) == B.size(0), "Batch sizes must match");
    TORCH_CHECK(A.size(2) == B.size(1), "Inner dimensions (K) must match");

    int batch_size = A.size(0);
    int M = A.size(1);
    int K = A.size(2);
    int N = B.size(2);

    auto options = torch::TensorOptions().dtype(A.dtype()).device(A.device());
    auto C = torch::zeros({batch_size, M, N}, options);

    dim3 block(TILE_SIZE, TILE_SIZE);
    dim3 grid((N + TILE_SIZE - 1) / TILE_SIZE, 
              (M + TILE_SIZE - 1) / TILE_SIZE, 
              batch_size);

    optimized_bmm_kernel<<<grid, block>>>(
        A.data_ptr<float>(),
        B.data_ptr<float>(),
        C.data_ptr<float>(),
        batch_size, M, K, N
    );

    return C;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward_optimized_bmm, "Optimized batched matrix multiplication (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.770 inst/cycle 0.000 5
Executed Ipc Elapsed 1.742 inst/cycle 0.000 5
Issue Slots Busy 44.162 % 0.000 5
Issued Ipc Active 1.770 inst/cycle 0.000 5
SM Busy 44.162 % 0.000 5
Memory Throughput 184439342546.174 byte/second 110433926180996192.000 5
Mem Busy 87.102 % 0.001 5
Max Bandwidth 82.586 % 0.001 5
L1/TEX Hit Rate 0.016 % 0.000 5
L2 Hit Rate 73.256 % 0.066 5
Mem Pipes Busy 72.932 % 0.001 5
Warp Cycles Per Issued Instruction 35.542 cycle 0.000 5
Warp Cycles Per Executed Instruction 35.544 cycle 0.000 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 31.850 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 2.000 block 0.000 5
Block Limit Shared Mem 3.000 block 0.000 5
Block Limit Warps 2.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 98.154 % 0.000 5
Achieved Active Warps Per SM 62.820 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
INF Occupancy This kernel's theoretical occupancy is not impacted by any block limit.
Operation / Metric Value Unit
aten::to
CPU Time 424147.45 μs
Device Time 8713.70 μs
Self CPU Time 44.76 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zeros
CPU Time 1140050.93 μs
Device Time 192168.76 μs
Self CPU Time 30268.21 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 9174986.09 μs
Device Time 1460832.98 μs
Self CPU Time 58983.83 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 9116004.30 μs
Device Time 1460832.98 μs
Self CPU Time 82159.51 μs
Self Device Time 1460832.98 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 9097099.83 μs
Device Time 209890.17 μs
Self CPU Time 9097099.83 μs
Self Device Time 209890.17 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
optimized_bmm_kernel(float const*, float const*, float*, int, int, int, int)
CPU Time 0.00 μs
Device Time 8206281.84 μs
Self CPU Time 0.00 μs
Self Device Time 8206281.84 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 1268664.22 μs
Self CPU Time 0.00 μs
Self Device Time 1268664.22 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45290 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:8:5 bugprone-easily-swappable-parameters
8 | const float* __restrict__ A,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 | const float* __restrict__ B,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:8:31: note: the first parameter in the range is 'A'
8 | const float* __restrict__ A,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:9:31: note: the last parameter in the range is 'B'
9 | const float* __restrict__ B,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:11:5: warning: 2 adjacent parameters of 'optimized_bmm_kernel' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
11 | int batch_size,
| ^~~~~~~~~~~~~~~
12 | int M,
| ~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:11:9: note: the first parameter in the range is 'batch_size'
11 | int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:12:9: note: the last parameter in the range is 'M'
12 | int M,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:16:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
16 | int b = blockIdx.z;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:17:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
17 | int row = blockIdx.y * TILE_SIZE + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:18:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
18 | int col = blockIdx.x * TILE_SIZE + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:25:28: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
25 | const float* batch_A = A + b * M * K;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:25:32: note: make conversion explicit to silence this warning
4 | const float* batch_A = A + b * M * K;
| ^~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:25:32: note: perform multiplication in a wider type
25 | const float* batch_A = A + b * M * K;
| ^~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:26:28: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
26 | const float* batch_B = B + b * K * N;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:26:32: note: make conversion explicit to silence this warning
26 | const float* batch_B = B + b * K * N;
| ^~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:26:32: note: perform multiplication in a wider type
26 | const float* batch_B = B + b * K * N;
| ^~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:30:24: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
30 | int tiledCol = t * TILE_SIZE + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:31:24: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
31 | int tiledRow = t * TILE_SIZE + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:55:51: warning: the parameter 'A' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
55 | torch::Tensor forward_optimized_bmm(torch::Tensor A, torch::Tensor B) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:55:68: warning: the parameter 'B' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
55 | torch::Tensor forward_optimized_bmm(torch::Tensor A, torch::Tensor B) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:63:22: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
63 | int batch_size = A.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:64:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
64 | int M = A.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:65:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
65 | int K = A.size(2);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_3/b8_s2_optimized_bmm_kernel/base/base.cu:66:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
66 | int N = B.size(2);
| ^