12_Matmul_with_diagonal_matrices_
• diag_matmul_readonly_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(A, B):
"""
Performs a matrix multiplication of a diagonal matrix with another matrix.
Args:
A (torch.Tensor): A 1D tensor representing the diagonal of the diagonal matrix. Shape: (N,).
B (torch.Tensor): A 2D tensor representing the second matrix. Shape: (N, M).
Returns:
torch.Tensor: The result of the matrix multiplication. Shape: (N, M).
"""
return torch.diag(A) @ B
class Model(nn.Module):
"""
Simple model that performs a matrix multiplication of a diagonal matrix with another matrix.
C = diag(A) * B
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, A, B, fn=module_fn):
return fn(A, B)
M = 4096
N = 4096
def get_inputs():
A = torch.randn(N)
B = torch.randn(N, M)
return [A, B]
def get_init_inputs():
return [] # No special initialization inputs needed
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a matrix multiplication of a diagonal matrix with another matrix.
C = diag(A) * B
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, A, B):
"""
Performs the matrix multiplication.
Args:
A (torch.Tensor): A 1D tensor representing the diagonal of the diagonal matrix. Shape: (N,).
B (torch.Tensor): A 2D tensor representing the second matrix. Shape: (N, M).
Returns:
torch.Tensor: The result of the matrix multiplication. Shape: (N, M).
"""
return torch.diag(A) @ B
M = 4096
N = 4096
def get_inputs():
A = torch.randn(N)
B = torch.randn(N, M)
return [A, B]
def get_init_inputs():
return [] # No special initialization inputs needed
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
// Kernel: Each block processes one row. Instead of using shared memory and synchronization, each thread reads
// the diagonal element directly from global memory using __ldg to leverage the read-only cache. This avoids
// unnecessary atomics since no race conditions exist, as each thread writes to a unique output element.
__global__ void diag_matmul_readonly_kernel(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const int64_t N,
const int64_t M
) {
int row = blockIdx.x;
// Each thread loads the diagonal element independently using the read-only cache
float a = __ldg(&A[row]);
int thread_id = threadIdx.x;
int stride = blockDim.x;
// Process four elements at a time when possible
int vec_limit = M / 4;
const float4* B_vec = reinterpret_cast<const float4*>(B + row * M);
float4* C_vec = reinterpret_cast<float4*>(C + row * M);
for (int col = thread_id; col < vec_limit; col += stride) {
float4 b4 = B_vec[col];
float4 c4;
c4.x = a * b4.x;
c4.y = a * b4.y;
c4.z = a * b4.z;
c4.w = a * b4.w;
C_vec[col] = c4;
}
// Process any remaining elements
int offset = vec_limit * 4;
for (int col = thread_id; col < (M - offset); col += stride) {
int index = row * M + offset + col;
C[index] = a * B[index];
}
}
// Forward function that wraps our CUDA kernel
at::Tensor forward(at::Tensor A, at::Tensor B) {
TORCH_CHECK(A.dim() == 1, "A must be a 1D tensor");
TORCH_CHECK(B.dim() == 2, "B must be a 2D tensor");
TORCH_CHECK(A.size(0) == B.size(0),
"Dimension mismatch: A.size(0) must match B.size(0)");
// Ensure inputs are contiguous
A = A.contiguous();
B = B.contiguous();
int64_t N = A.size(0);
int64_t M = B.size(1);
// Create an output tensor with the same type and device as B
auto C = torch::empty({N, M}, B.options());
// Launch kernel with one block per row, using 256 threads per block
const int threads = 256;
diag_matmul_readonly_kernel<<<N, threads>>>(
A.data_ptr<float>(),
B.data_ptr<float>(),
C.data_ptr<float>(),
N,
M
);
return C;
}
// Pybind11 module definition
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Diagonal matrix multiplication using direct read-only memory access");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.404 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.376 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 10.106 | % | 0.007 | 5 |
Issued Ipc Active | 0.404 | inst/cycle | 0.000 | 5 |
SM Busy | 10.106 | % | 0.007 | 5 |
Memory Throughput | 2652283997707.720 | byte/second | 535470542620967895040.000 | 5 |
Mem Busy | 46.560 | % | 0.179 | 5 |
Max Bandwidth | 79.192 | % | 0.503 | 5 |
L1/TEX Hit Rate | 0.680 | % | 0.000 | 5 |
L2 Hit Rate | 49.858 | % | 0.001 | 5 |
Mem Pipes Busy | 5.854 | % | 0.003 | 5 |
Warp Cycles Per Issued Instruction | 131.482 | cycle | 0.324 | 5 |
Warp Cycles Per Executed Instruction | 131.644 | cycle | 0.325 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 29.810 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 83.540 | % | 0.016 | 5 |
Achieved Active Warps Per SM | 53.464 | warp | 0.007 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (83.5%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 375671.50 | μs |
Device Time | 6957.17 | μs |
Self CPU Time | 44.13 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 375627.38 | μs |
Device Time | 6957.17 | μs |
Self CPU Time | 127.53 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 368269.64 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 85.21 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 366224.98 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 366224.98 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 619244.99 | μs |
Device Time | 16612.42 | μs |
Self CPU Time | 619244.99 | μs |
Self Device Time | 16612.42 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
diag_matmul_readonly_kernel(float const*, float const*, float*, long, long) | ||
CPU Time | 0.00 | μs |
Device Time | 303108.71 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 303108.71 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 20072.53 | μs |
Device Time | 33140.27 | μs |
Self CPU Time | 20072.53 | μs |
Self Device Time | 33140.27 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 260689.55 | μs |
Device Time | 497080.52 | μs |
Self CPU Time | 16516.51 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 244174.57 | μs |
Device Time | 497080.52 | μs |
Self CPU Time | 16060.52 | μs |
Self Device Time | 497080.52 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 497158.15 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 497158.15 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45282 warnings generated when compiling for host. Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.