← Back to Leaderboard

The AI CUDA Engineer 👷

16_Matmul_with_transposed_Atiled_shared_unroll_base_base

Level 1 • Task 16
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
    """
    Performs a single matrix multiplication with transposed A (C = A.T * B).

    Args:
        A: Input tensor of shape (K, M).
        B: Input tensor of shape (K, N).

    Returns:
        Output tensor of shape (M, N).
    """
    return torch.matmul(A.T, B)


class Model(nn.Module):
    """
    Simple model that performs a single matrix multiplication (C = A * B)
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, A: torch.Tensor, B: torch.Tensor, fn=module_fn) -> torch.Tensor:
        return fn(A, B)


M = 1024
K = 4096
N = 2048


def get_inputs():
    A = torch.randn(K, M)
    B = torch.randn(K, N)
    return [A, B]


def get_init_inputs():
    return []  # No special initialization inputs needed
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a single matrix multiplication (C = A * B)
    """
    def __init__(self):
        super(Model, self).__init__()
    
    def forward(self, A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
        """
        Performs matrix multiplication.

        Args:
            A: Input tensor of shape (M, K).
            B: Input tensor of shape (K, N).

        Returns:
            Output tensor of shape (M, N).
        """
        return torch.matmul(A.T, B)

M = 1024
K = 4096
N = 2048

def get_inputs():
    A = torch.randn(K, M)
    B = torch.randn(K, N)
    return [A, B]

def get_init_inputs():
    return []  # No special initialization inputs needed

Kernel Information

Related Kernels (Level 1, Task 16 • 16_Matmul_with_transposed_A)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 tiled_double_output_base 2.29 0.15 0.17
🥈 pipelined_tiled_matmul_base_base 2.70 0.13 0.15
🥉 hybrid_tiled_linear_matmul_base 2.76 0.13 0.14
4 modular_tiled_matmul_base_base 2.77 0.13 0.14
5 unrolled_tiled_matmul_base_base 2.81 0.13 0.14
6 optimized_tiled_matmul_base 2.81 0.13 0.14
6 tiled_shared_ldg_aligned_base 2.81 0.13 0.14
6 optimized_tiled_matmul_base 2.81 0.13 0.14
6 hybrid_tiling_grid_stride_base 2.81 0.13 0.14
10 syncthreads_optimized_tiling_edit_1 3.00 0.12 0.13
10 atomic_operations_optimized_tiling_base 3.00 0.12 0.13
12 streams_partitioned_matmul_edit_1 3.02 0.12 0.13
13 tiled_shared_unroll_base_base 3.02 0.12 0.13
14 streams_partitioned_matmul_base 3.03 0.12 0.13
15 modular_device_functions_tiling_2_base 3.04 0.12 0.13
15 modular_tiled_kernel_edit_1 3.04 0.12 0.13
15 modular_tiled_kernel_base 3.04 0.12 0.13
18 optimized_matmul_combined_kernel_edit_1 3.04 0.12 0.13
18 tiled_shared_const_memory_base 3.04 0.12 0.13
18 optimized_matmul_combined_kernel_base 3.04 0.12 0.13
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdexcept>

// Tile dimensions and unroll factor
#define TILE_M 16
#define TILE_N 16
#define BLOCK_K 32

// Kernel to compute C = A.T * B using tiled shared memory with loop unrolling
// A: shape (K, M), B: shape (K, N), C: shape (M, N) computed as C[i, j] = sum_{k=0}^{K-1} A[k*M + i] * B[k*N + j]
__global__ void tiledSharedUnrollKernel(const float* __restrict__ A,
                                         const float* __restrict__ B,
                                         float* __restrict__ C,
                                         int K, int M, int N) {
    // Global row and column indices for C
    int row = blockIdx.x * TILE_M + threadIdx.x;  // corresponds to index i
    int col = blockIdx.y * TILE_N + threadIdx.y;  // corresponds to index j

    float sum = 0.0f;

    // Allocate shared memory for a tile of A and B
    // As: stores a tile of A for current k-chunk. Dimensions: BLOCK_K x TILE_M
    // Bs: stores a tile of B for current k-chunk. Dimensions: BLOCK_K x TILE_N
    __shared__ float As[BLOCK_K][TILE_M];
    __shared__ float Bs[BLOCK_K][TILE_N];

    // Loop over k dimension in increments of BLOCK_K
    for (int k0 = 0; k0 < K; k0 += BLOCK_K) {
        // Each block needs to load BLOCK_K * TILE_M elements for A and BLOCK_K * TILE_N elements for B
        int tid = threadIdx.y * blockDim.x + threadIdx.x;  // Unique thread index within the block
        int totalThreads = blockDim.x * blockDim.y;         // should be TILE_M * TILE_N

        // Load tile of A into shared memory
        // A is stored in row-major order with shape (K, M): element A[k, i] is at A[k * M + i]
        for (int index = tid; index < BLOCK_K * TILE_M; index += totalThreads) {
            int t = index / TILE_M;       // local k index within the tile
            int i = index % TILE_M;         // local i index within the tile
            int global_i = blockIdx.x * TILE_M + i;
            int global_k = k0 + t;
            if (global_i < M && global_k < K)
                As[t][i] = A[global_k * M + global_i];
            else
                As[t][i] = 0.0f;
        }

        // Load tile of B into shared memory
        // B is stored in row-major order with shape (K, N): element B[k, j] is at B[k * N + j]
        for (int index = tid; index < BLOCK_K * TILE_N; index += totalThreads) {
            int t = index / TILE_N;       // local k index within the tile
            int j = index % TILE_N;         // local j index within the tile
            int global_j = blockIdx.y * TILE_N + j;
            int global_k = k0 + t;
            if (global_j < N && global_k < K)
                Bs[t][j] = B[global_k * N + global_j];
            else
                Bs[t][j] = 0.0f;
        }

        __syncthreads();  // Ensure the shared memory tiles are loaded before computation

        // Compute the partial dot product for this k-chunk using loop unrolling
        #pragma unroll
        for (int t = 0; t < BLOCK_K; t += 4) {
            sum += As[t][threadIdx.x] * Bs[t][threadIdx.y];
            if (t + 1 < BLOCK_K) sum += As[t + 1][threadIdx.x] * Bs[t + 1][threadIdx.y];
            if (t + 2 < BLOCK_K) sum += As[t + 2][threadIdx.x] * Bs[t + 2][threadIdx.y];
            if (t + 3 < BLOCK_K) sum += As[t + 3][threadIdx.x] * Bs[t + 3][threadIdx.y];
        }

        __syncthreads();  // Synchronize before loading the next tile
    }

    // Write the result to global memory if within bounds
    if (row < M && col < N) {
        C[row * N + col] = sum;
    }
}

// PyBind11 forward function exposed to Python
// A: Tensor of shape (K, M) [CUDA, float32]
// B: Tensor of shape (K, N) [CUDA, float32]
// Returns: C, Tensor of shape (M, N) computed as C = A.T * B

torch::Tensor forward(torch::Tensor A, torch::Tensor B) {
    TORCH_CHECK(A.is_cuda(), "Input A must be a CUDA tensor");
    TORCH_CHECK(B.is_cuda(), "Input B must be a CUDA tensor");
    TORCH_CHECK(A.dtype() == torch::kFloat32, "Input A must be float32");
    TORCH_CHECK(B.dtype() == torch::kFloat32, "Input B must be float32");

    int K = A.size(0);
    int M = A.size(1);
    TORCH_CHECK(B.size(0) == K, "Dimension mismatch: A and B must have the same first dimension (K)");
    int N = B.size(1);

    auto C = torch::zeros({M, N}, torch::device(A.device()).dtype(A.dtype()));

    // Define block and grid dimensions
    dim3 block(TILE_M, TILE_N);  // 16x16 threads per block
    dim3 grid((M + TILE_M - 1) / TILE_M,
              (N + TILE_N - 1) / TILE_N);

    const float* A_ptr = A.data_ptr<float>();
    const float* B_ptr = B.data_ptr<float>();
    float* C_ptr = C.data_ptr<float>();

    // Launch the kernel
    tiledSharedUnrollKernel<<<grid, block>>>(A_ptr, B_ptr, C_ptr, K, M, N);
    cudaError_t err = cudaGetLastError();
    if (err != cudaSuccess) {
        throw std::runtime_error(cudaGetErrorString(err));
    }

    return C;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Compute C = A.T * B using tiled shared memory with unrolling (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 2.250 inst/cycle 0.000 5
Executed Ipc Elapsed 2.236 inst/cycle 0.000 5
Issue Slots Busy 56.204 % 0.000 5
Issued Ipc Active 2.250 inst/cycle 0.000 5
SM Busy 56.204 % 0.000 5
Memory Throughput 16382770515.292 byte/second 4007460519725013.500 5
Mem Busy 83.980 % 0.006 5
Max Bandwidth 78.662 % 0.005 5
L1/TEX Hit Rate 0.744 % 0.000 5
L2 Hit Rate 97.220 % 0.063 5
Mem Pipes Busy 78.662 % 0.005 5
Warp Cycles Per Issued Instruction 20.602 cycle 0.000 5
Warp Cycles Per Executed Instruction 20.602 cycle 0.000 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.430 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 6.000 block 0.000 5
Block Limit Shared Mem 12.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 48.000 warp 0.000 5
Theoretical Occupancy 75.000 % 0.000 5
Achieved Occupancy 72.372 % 0.000 5
Achieved Active Warps Per SM 46.316 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
WRN Occupancy This kernel's theoretical occupancy (75.0%) is limited by the number of required registers. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 358647.27 μs
Device Time 5003.68 μs
Self CPU Time 43.85 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 9241925.35 μs
Device Time 261803.42 μs
Self CPU Time 12046.86 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 9229880.07 μs
Device Time 261803.42 μs
Self CPU Time 15994.66 μs
Self Device Time 261803.42 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 9230128.21 μs
Device Time 6758.47 μs
Self CPU Time 9230128.21 μs
Self Device Time 6758.47 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
tiledSharedUnrollKernel(float const*, float const*, float*, int, int, int)
CPU Time 0.00 μs
Device Time 9721154.79 μs
Self CPU Time 0.00 μs
Self Device Time 9721154.79 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceSynchronize
CPU Time 655355.11 μs
Device Time 78.14 μs
Self CPU Time 655355.11 μs
Self Device Time 78.14 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 17462.30 μs
Device Time 13602.13 μs
Self CPU Time 17462.30 μs
Self Device Time 13602.13 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 248717.13 μs
Self CPU Time 0.00 μs
Self Device Time 248717.13 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45287 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:13:41 bugprone-easily-swappable-parameters
13 | __global__ void tiledSharedUnrollKernel(const float* __restrict__ A,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 | const float* __restrict__ B,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:13:67: note: the first parameter in the range is 'A'
13 | __global__ void tiledSharedUnrollKernel(const float* __restrict__ A,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:14:68: note: the last parameter in the range is 'B'
14 | const float* __restrict__ B,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:18:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
18 | int row = blockIdx.x * TILE_M + threadIdx.x; // corresponds to index i
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:19:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
19 | int col = blockIdx.y * TILE_N + threadIdx.y; // corresponds to index j
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:32:19: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
32 | int tid = threadIdx.y * blockDim.x + threadIdx.x; // Unique thread index within the block
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:33:28: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
33 | int totalThreads = blockDim.x * blockDim.y; // should be TILE_M * TILE_N
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:40:28: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
40 | int global_i = blockIdx.x * TILE_M + i;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:53:28: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
53 | int global_j = blockIdx.y * TILE_N + j;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:86:37: warning: the parameter 'A' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
86 | torch::Tensor forward(torch::Tensor A, torch::Tensor B) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:86:54: warning: the parameter 'B' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
86 | torch::Tensor forward(torch::Tensor A, torch::Tensor B) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:92:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
92 | int K = A.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:93:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
93 | int M = A.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_16/b5_s3_tiled_shared_unroll_base/base/base.cu:95:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
95 | int N = B.size(1);
| ^