← Back to Leaderboard

The AI CUDA Engineer 👷

6_Matmul_with_large_K_dimension_6_matmul_no_divergence_base

Level 1 • Task 6
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(A, B):
    """
    Performs a single matrix multiplication (C = A * B) with a large K dimension.

    Args:
        A: Input tensor of shape (M, K)
        B: Input tensor of shape (K, N)

    Returns:
        Output tensor of shape (M, N)
    """
    return torch.matmul(A, B)


class Model(nn.Module):
    """
    Simple model that performs a single matrix multiplication (C = A * B) with a large K dimension
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, A: torch.Tensor, B: torch.Tensor, fn=module_fn) -> torch.Tensor:
        return fn(A, B)


M = 256
N = 256
K = 131072


def get_inputs():
    A = torch.randn(M, K)
    B = torch.randn(K, N)
    return [A, B]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a single matrix multiplication (C = A * B) with a large K dimension
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
        """
        Performs matrix multiplication of A and B.

        Args:
            A: Input tensor of shape (M, K)
            B: Input tensor of shape (K, N)

        Returns:
            Output tensor of shape (M, N)
        """
        return torch.matmul(A, B)

M = 256
N = 256
K = 131072

def get_inputs():
    A = torch.randn(M, K)
    B = torch.randn(K, N)
    return [A, B]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 6 • 6_Matmul_with_large_K_dimension_)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 double_buffered_matmul_base 5.11 0.07 0.11
🥈 6_matmul_multi_stream_base 5.26 0.07 0.11
🥉 fewer_sync_matmul_edit_1_base 5.27 0.07 0.11
4 atomic_operations_matmul_edit_1 5.27 0.07 0.11
5 6_matmul_modular_refactored_base 5.30 0.06 0.11
6 modular_matmul_device_fn_edit_1 5.30 0.06 0.11
7 matmul_stream_ldg_base 5.31 0.06 0.11
8 6_matmul_modular_device_func_base 5.31 0.06 0.11
9 6_matmul_modular_device_base 5.32 0.06 0.11
10 6_matmul_no_divergence_base 5.32 0.06 0.11
11 6_matmul_ldg_base 5.33 0.06 0.11
12 optimized_streamed_tiled_matmul_base 5.33 0.06 0.11
12 6_matmul_even_workload_distribution_base 5.33 0.06 0.11
14 optimized_matmul_kernel_base 5.34 0.06 0.11
15 grid_stride_matmul_edit_1 5.34 0.06 0.11
16 6_matmul_stride_loops_base 5.34 0.06 0.11
17 6_matmul_ldg_128bit_aligned_base 5.35 0.06 0.11
18 optimized_matmul_kernel_base 5.35 0.06 0.11
19 unroll_loop_matmul_base 5.36 0.06 0.11
20 warp_divergence_optimized_matmul_base 5.37 0.06 0.11
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

#define TILE_WIDTH 16

// CUDA kernel for matrix multiplication with minimized warp divergence
// The kernel distinguishes between interior blocks (fully within matrix bounds) and boundary blocks.
// For interior blocks, bounds checks are omitted to ensure uniform control flow and avoid divergent branching.

template <typename scalar_t>
__global__ void matmul_cuda_kernel(const scalar_t* __restrict__ A,
                                     const scalar_t* __restrict__ B,
                                     scalar_t* __restrict__ C,
                                     int M, int K, int N) {
    // Compute global row and column indices
    int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
    int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
    scalar_t value = 0;

    // Determine if this block is completely interior (i.e. no boundary checks needed).
    // If (blockIdx.x+1)*TILE_WIDTH <= N and (blockIdx.y+1)*TILE_WIDTH <= M, the entire tile is within bounds.
    bool interior_block = ((blockIdx.x + 1) * TILE_WIDTH <= N) && ((blockIdx.y + 1) * TILE_WIDTH <= M);

    __shared__ scalar_t sA[TILE_WIDTH][TILE_WIDTH];
    __shared__ scalar_t sB[TILE_WIDTH][TILE_WIDTH];

    int num_tiles = (K + TILE_WIDTH - 1) / TILE_WIDTH;
    for (int t = 0; t < num_tiles; ++t) {
        int tiledA_col = t * TILE_WIDTH + threadIdx.x;
        int tiledB_row = t * TILE_WIDTH + threadIdx.y;

        // For interior blocks, all accesses are within bounds so we avoid conditional checks.
        if (interior_block) {
            sA[threadIdx.y][threadIdx.x] = __ldg(&A[row * K + tiledA_col]);
            sB[threadIdx.y][threadIdx.x] = __ldg(&B[tiledB_row * N + col]);
        } else {
            // For boundary blocks, use a ternary operator for a branchless-like selection
            sA[threadIdx.y][threadIdx.x] = (row < M && tiledA_col < K) ? __ldg(&A[row * K + tiledA_col]) : static_cast<scalar_t>(0);
            sB[threadIdx.y][threadIdx.x] = (col < N && tiledB_row < K) ? __ldg(&B[tiledB_row * N + col]) : static_cast<scalar_t>(0);
        }

        __syncthreads();

        #pragma unroll
        for (int i = 0; i < TILE_WIDTH; ++i) {
            value += sA[threadIdx.y][i] * sB[i][threadIdx.x];
        }
        __syncthreads();
    }

    // Write the computed result if within output bounds
    if (row < M && col < N) {
        C[row * N + col] = value;
    }
}

// Forward function called from Python
torch::Tensor module_fn(torch::Tensor A, torch::Tensor B) {
    TORCH_CHECK(A.is_cuda(), "Input tensor A must be a CUDA tensor");
    TORCH_CHECK(B.is_cuda(), "Input tensor B must be a CUDA tensor");

    int M = A.size(0);
    int K = A.size(1);
    int N = B.size(1);
    TORCH_CHECK(K == B.size(0), "Inner dimensions of A and B must match");

    auto C = torch::empty({M, N}, A.options());

    dim3 threads(TILE_WIDTH, TILE_WIDTH);
    dim3 blocks((N + TILE_WIDTH - 1) / TILE_WIDTH, (M + TILE_WIDTH - 1) / TILE_WIDTH);

    AT_DISPATCH_FLOATING_TYPES(A.scalar_type(), "matmul_cuda_kernel", ([&] {
        matmul_cuda_kernel<scalar_t><<<blocks, threads>>>(
            A.data_ptr<scalar_t>(),
            B.data_ptr<scalar_t>(),
            C.data_ptr<scalar_t>(),
            M, K, N);
    }));

    cudaDeviceSynchronize();
    return C;
}

// Pybind11 module binding
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &module_fn, "Matrix multiplication with minimized warp divergence");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.760 inst/cycle 0.000 5
Executed Ipc Elapsed 0.740 inst/cycle 0.000 5
Issue Slots Busy 18.884 % 0.000 5
Issued Ipc Active 0.760 inst/cycle 0.000 5
SM Busy 18.884 % 0.000 5
Memory Throughput 46664206040.824 byte/second 1142340236746573.500 5
Mem Busy 42.086 % 0.001 5
Max Bandwidth 35.802 % 0.001 5
L1/TEX Hit Rate 0.308 % 0.001 5
L2 Hit Rate 83.106 % 0.023 5
Mem Pipes Busy 33.044 % 0.001 5
Warp Cycles Per Issued Instruction 20.428 cycle 0.000 5
Warp Cycles Per Executed Instruction 20.428 cycle 0.000 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 32.000 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 21.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 24.104 % 0.000 5
Achieved Active Warps Per SM 15.426 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (24.1%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 358802.59 μs
Device Time 28271.38 μs
Self CPU Time 55.39 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 358747.19 μs
Device Time 28271.38 μs
Self CPU Time 116.86 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 329993.74 μs
Device Time 0.00 μs
Self CPU Time 97.88 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 329416.51 μs
Device Time 0.00 μs
Self CPU Time 329416.51 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceSynchronize
CPU Time 8096521.66 μs
Device Time 10157.28 μs
Self CPU Time 8096521.66 μs
Self Device Time 10157.28 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void matmul_cuda_kernel<float>(float const*, float const*, float*, int, int, int)
CPU Time 0.00 μs
Device Time 8003557.13 μs
Self CPU Time 0.00 μs
Self Device Time 8003557.13 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 32175.87 μs
Device Time 116703.42 μs
Self CPU Time 5253.13 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 26924.75 μs
Device Time 116703.42 μs
Self CPU Time 8375.00 μs
Self Device Time 116703.42 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 116703.42 μs
Self CPU Time 0.00 μs
Self Device Time 116703.42 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45286 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:12:36 bugprone-easily-swappable-parameters
12 | __global__ void matmul_cuda_kernel(const scalar_t* __restrict__ A,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 | const scalar_t* __restrict__ B,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:12:65: note: the first parameter in the range is 'A'
12 | __global__ void matmul_cuda_kernel(const scalar_t* __restrict__ A,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:13:67: note: the last parameter in the range is 'B'
13 | const scalar_t* __restrict__ B,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:17:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
17 | int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:18:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
18 | int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:30:26: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
30 | int tiledA_col = t * TILE_WIDTH + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:31:26: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
31 | int tiledB_row = t * TILE_WIDTH + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:63:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
63 | int M = A.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:64:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
64 | int K = A.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:65:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
65 | int N = B.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b5_s3_6_matmul_no_divergence/base/base.cu:73:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
73 | AT_DISPATCH_FLOATING_TYPES(A.scalar_type(), "matmul_cuda_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^