45_Gemm_Sigmoid_Sum_LogSumExp
• optimized_sync_fusion_kernel_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
linear1_weight: torch.Tensor,
linear1_bias: torch.Tensor,
) -> torch.Tensor:
"""
Performs matrix multiplication, applies Sigmoid, sums result, and calculates LogSumExp.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, input_size)
linear1_weight (torch.Tensor): Weight matrix for first linear layer of shape (hidden_size, input_size)
linear1_bias (torch.Tensor): Bias vector for first linear layer of shape (hidden_size)
Returns:
torch.Tensor: Scalar output after applying linear layers, sigmoid, sum and logsumexp
"""
x = F.linear(x, linear1_weight, linear1_bias)
x = torch.sigmoid(x)
x = torch.sum(x, dim=1)
x = torch.logsumexp(x, dim=0)
return x
class Model(nn.Module):
"""
Model that performs a matrix multiplication (Gemm), applies Sigmoid, sums the result, and calculates the LogSumExp.
"""
def __init__(self, input_size, hidden_size, output_size):
super(Model, self).__init__()
lin1 = nn.Linear(input_size, hidden_size)
self.linear1_weight = nn.Parameter(lin1.weight)
self.linear1_bias = nn.Parameter(
lin1.bias
+ torch.randn(
lin1.bias.shape, device=lin1.bias.device, dtype=lin1.bias.dtype
)
* 0.02
)
def forward(self, x, fn=module_fn):
return fn(x, self.linear1_weight, self.linear1_bias)
batch_size = 128
input_size = 10
hidden_size = 20
output_size = 5
def get_inputs():
return [torch.randn(batch_size, input_size)]
def get_init_inputs():
return [input_size, hidden_size, output_size]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Model that performs a matrix multiplication (Gemm), applies Sigmoid, sums the result, and calculates the LogSumExp.
"""
def __init__(self, input_size, hidden_size, output_size):
super(Model, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear1.bias = nn.Parameter(self.linear1.bias + torch.randn(self.linear1.bias.shape, device=self.linear1.bias.device, dtype=self.linear1.bias.dtype) * 0.02)
def forward(self, x):
x = self.linear1(x)
x = torch.sigmoid(x)
x = torch.sum(x, dim=1)
x = torch.logsumexp(x, dim=0)
return x
batch_size = 128
input_size = 10
hidden_size = 20
output_size = 5
def get_inputs():
return [torch.randn(batch_size, input_size)]
def get_init_inputs():
return [input_size, hidden_size, output_size]
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
// Fused kernel: Compute matrix multiplication with sigmoid activation and row-wise sum reduction
// Each block processes one row of the output.
__global__ void fused_gemm_sigmoid_sum_kernel(
const float* __restrict__ input,
const float* __restrict__ weight,
const float* __restrict__ bias,
float* __restrict__ row_sum,
const int batch_size,
const int input_size,
const int hidden_size
) {
int row = blockIdx.x; // one block per row
if (row >= batch_size) return;
extern __shared__ float sdata[]; // shared memory for reduction
int tid = threadIdx.x;
int blockSize = blockDim.x;
float local_sum = 0.0f;
// Each thread processes a subset of the columns
for (int col = tid; col < hidden_size; col += blockSize) {
float dot = 0.0f;
// Compute dot product for given row and column
for (int k = 0; k < input_size; ++k) {
dot += input[row * input_size + k] * weight[col * input_size + k];
}
dot += bias[col];
// Apply sigmoid activation
float sig = 1.0f / (1.0f + expf(-dot));
local_sum += sig;
}
sdata[tid] = local_sum;
__syncthreads();
// Reduce partial sums in shared memory
for (unsigned int s = blockSize / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // Only synchronize when necessary
}
if (tid == 0) {
row_sum[row] = sdata[0];
}
}
// LogSumExp kernel: Reduces an array of row sums into a single scalar using shared memory reduction
__global__ void logsumexp_kernel(
const float* __restrict__ input,
float* __restrict__ output,
const int size
) {
extern __shared__ float sdata[];
int tid = threadIdx.x;
// First, compute the maximum value in the input array
float max_val = -INFINITY;
for (int i = tid; i < size; i += blockDim.x) {
float val = input[i];
max_val = (val > max_val ? val : max_val);
}
sdata[tid] = max_val;
__syncthreads();
// Reduction for maximum
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] = (sdata[tid] > sdata[tid + s] ? sdata[tid] : sdata[tid + s]);
}
__syncthreads(); // Only synchronize when necessary
}
float shared_max = sdata[0];
__syncthreads();
// Now compute the sum of exp(values - max) in parallel
float exp_sum = 0.0f;
for (int i = tid; i < size; i += blockDim.x) {
exp_sum += expf(input[i] - shared_max);
}
sdata[tid] = exp_sum;
__syncthreads();
// Reduction for summing the exponentials
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // Only synchronize when necessary
}
if (tid == 0) {
output[0] = logf(sdata[0]) + shared_max;
}
}
// The forward function launches the fused kernel and then the logsumexp kernel
torch::Tensor forward(
torch::Tensor input,
torch::Tensor weight,
torch::Tensor bias
) {
const int batch_size = input.size(0);
const int input_size = input.size(1);
const int hidden_size = weight.size(0);
auto options = torch::TensorOptions()
.dtype(input.dtype())
.device(input.device());
// Allocate tensor for per-row sum and final output (scalar)
auto row_sum = torch::empty({batch_size}, options);
auto final_output = torch::empty({1}, options);
// Launch fused kernel: one block per row; use 128 threads per block
const int threadsPerBlock = 128;
dim3 grid(batch_size);
fused_gemm_sigmoid_sum_kernel<<<grid, threadsPerBlock, threadsPerBlock * sizeof(float)>>>(
input.data_ptr<float>(),
weight.data_ptr<float>(),
bias.data_ptr<float>(),
row_sum.data_ptr<float>(),
batch_size,
input_size,
hidden_size
);
// Launch logsumexp kernel using a single block of 256 threads
const int logThreads = 256;
logsumexp_kernel<<<1, logThreads, logThreads * sizeof(float)>>>(
row_sum.data_ptr<float>(),
final_output.data_ptr<float>(),
batch_size
);
return final_output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Forward pass");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.416 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.000 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 10.642 | % | 0.041 | 5 |
Issued Ipc Active | 0.426 | inst/cycle | 0.000 | 5 |
SM Busy | 10.642 | % | 0.041 | 5 |
Memory Throughput | 979937434.830 | byte/second | 108601134539789.344 | 5 |
Mem Busy | 7.166 | % | 0.009 | 5 |
Max Bandwidth | 3.638 | % | 0.002 | 5 |
L1/TEX Hit Rate | 48.480 | % | 0.000 | 5 |
L2 Hit Rate | 101.776 | % | 0.027 | 5 |
Mem Pipes Busy | 0.040 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 18.446 | cycle | 0.061 | 5 |
Warp Cycles Per Executed Instruction | 18.918 | cycle | 0.064 | 5 |
Avg. Active Threads Per Warp | 30.900 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 24.180 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 16.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 16.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 12.144 | % | 0.000 | 5 |
Achieved Active Warps Per SM | 7.774 | warp | 0.000 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (12.1%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 175600.71 | μs |
Device Time | 2.78 | μs |
Self CPU Time | 63.08 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 175537.63 | μs |
Device Time | 2.78 | μs |
Self CPU Time | 139.85 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 175213.23 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 142.50 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 174888.83 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 174888.83 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 481538.80 | μs |
Device Time | 28986.67 | μs |
Self CPU Time | 481538.80 | μs |
Self Device Time | 28986.67 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 21334.06 | μs |
Device Time | 27356.80 | μs |
Self CPU Time | 21334.06 | μs |
Self Device Time | 27356.80 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 65204.51 | μs |
Device Time | 623263.52 | μs |
Self CPU Time | 12952.28 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 52253.98 | μs |
Device Time | 623263.52 | μs |
Self CPU Time | 16190.54 | μs |
Self Device Time | 623263.52 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 623341.02 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 623341.02 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45290 warnings generated when compiling for host. Suppressed 45323 warnings (45276 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.