import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
) -> torch.Tensor:
"""
Applies linear transformation followed by two Mish activations.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_features)
weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
bias (torch.Tensor): Bias vector of shape (out_features)
Returns:
torch.Tensor: Output tensor after linear transformation and two Mish activations,
with shape (batch_size, out_features)
"""
x = F.linear(x, weight, bias)
x = F.mish(x)
x = F.mish(x)
return x
class Model(nn.Module):
"""
Simple model that performs a matrix multiplication, applies Mish, and applies Mish again.
"""
def __init__(self, in_features, out_features):
super(Model, self).__init__()
linear = nn.Linear(in_features, out_features)
self.weight = nn.Parameter(linear.weight)
self.bias = nn.Parameter(linear.bias + torch.ones_like(linear.bias) * 0.02)
def forward(self, x, fn=module_fn):
return fn(x, self.weight, self.bias)
batch_size = 128
in_features = 10
out_features = 20
def get_inputs():
return [torch.randn(batch_size, in_features)]
def get_init_inputs():
return [in_features, out_features]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a matrix multiplication, applies Mish, and applies Mish again.
"""
def __init__(self, in_features, out_features):
super(Model, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.linear.bias = nn.Parameter(self.linear.bias + torch.ones_like(self.linear.bias) * 0.02)
def forward(self, x):
x = self.linear(x)
x = torch.nn.functional.mish(x)
x = torch.nn.functional.mish(x)
return x
batch_size = 128
in_features = 10
out_features = 20
def get_inputs():
return [torch.randn(batch_size, in_features)]
def get_init_inputs():
return [in_features, out_features]
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cmath>
#define TILE_DIM 16
#define ALIGN_MASK 0xFFFFFFF0
__device__ __forceinline__ float softplus_func(float x) {
float abs_x = fabsf(x);
float z = expf(-abs_x);
return fmaxf(x, 0.0f) + log1pf(z);
}
__device__ __forceinline__ float mish_func(float x) {
float sp = softplus_func(x);
return x * tanhf(sp);
}
__global__ void forward_kernel(
const float* __restrict__ x,
const float* __restrict__ weight,
const float* __restrict__ bias,
float* __restrict__ output,
int M,
int K,
int N
) {
int row = blockIdx.y * TILE_DIM + threadIdx.y;
int col = blockIdx.x * TILE_DIM + threadIdx.x;
// Align shared memory to 128-bit boundary
__shared__ __align__(16) float As[TILE_DIM][TILE_DIM];
__shared__ __align__(16) float Bs[TILE_DIM][TILE_DIM];
float sum = 0.0f;
// Ensure K is aligned to TILE_DIM
int aligned_K = (K + TILE_DIM - 1) & ALIGN_MASK;
int numTiles = aligned_K / TILE_DIM;
for (int t = 0; t < numTiles; t++) {
int tiledCol = t * TILE_DIM + threadIdx.x;
if (row < M && tiledCol < K) {
// Use __ldg for read-only global memory access
As[threadIdx.y][threadIdx.x] = __ldg(&x[row * K + tiledCol]);
} else {
As[threadIdx.y][threadIdx.x] = 0.0f;
}
int tiledRow = t * TILE_DIM + threadIdx.y;
if (col < N && tiledRow < K) {
// Use __ldg for read-only global memory access
Bs[threadIdx.y][threadIdx.x] = __ldg(&weight[col * K + tiledRow]);
} else {
Bs[threadIdx.y][threadIdx.x] = 0.0f;
}
__syncthreads();
#pragma unroll
for (int i = 0; i < TILE_DIM; i++) {
sum += As[threadIdx.y][i] * Bs[i][threadIdx.x];
}
__syncthreads();
}
if (row < M && col < N) {
// Use __ldg for read-only bias access
float val = sum + __ldg(&bias[col]);
// Cache intermediate computations for the first mish calculation
float abs_x = fabsf(val);
float z = expf(-abs_x);
float sp = fmaxf(val, 0.0f) + log1pf(z);
float tanh_sp = tanhf(sp);
float mish1 = val * tanh_sp;
// Reuse the same pattern for the second mish, operating on mish1
abs_x = fabsf(mish1);
z = expf(-abs_x);
sp = fmaxf(mish1, 0.0f) + log1pf(z);
tanh_sp = tanhf(sp);
float mish2 = mish1 * tanh_sp;
// Ensure aligned store to global memory
output[row * N + col] = mish2;
}
}
torch::Tensor forward(
torch::Tensor x,
torch::Tensor weight,
torch::Tensor bias
) {
TORCH_CHECK(x.dim() == 2, "x must be 2D");
TORCH_CHECK(weight.dim() == 2, "weight must be 2D");
TORCH_CHECK(bias.dim() == 1, "bias must be 1D");
int M = x.size(0);
int K = x.size(1);
int N = weight.size(0);
TORCH_CHECK(weight.size(1) == K, "weight shape mismatch");
TORCH_CHECK(bias.size(0) == N, "bias shape mismatch");
auto output = torch::empty({M, N}, x.options());
dim3 blockDim(TILE_DIM, TILE_DIM);
dim3 gridDim(
(N + TILE_DIM - 1) / TILE_DIM,
(M + TILE_DIM - 1) / TILE_DIM
);
forward_kernel<<<gridDim, blockDim, 0, at::cuda::getCurrentCUDAStream()>>>(
x.data_ptr<float>(),
weight.data_ptr<float>(),
bias.data_ptr<float>(),
output.data_ptr<float>(),
M, K, N
);
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Linear double Mish forward (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.612 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.030 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 15.698 | % | 0.193 | 5 |
Issued Ipc Active | 0.630 | inst/cycle | 0.000 | 5 |
SM Busy | 15.698 | % | 0.193 | 5 |
Memory Throughput | 3102427549.170 | byte/second | 16455911545938218.000 | 5 |
Mem Busy | 8.326 | % | 0.029 | 5 |
Max Bandwidth | 4.374 | % | 0.010 | 5 |
L1/TEX Hit Rate | 50.714 | % | 0.092 | 5 |
L2 Hit Rate | 101.344 | % | 0.081 | 5 |
Mem Pipes Busy | 0.440 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 12.960 | cycle | 0.169 | 5 |
Warp Cycles Per Executed Instruction | 13.266 | cycle | 0.176 | 5 |
Avg. Active Threads Per Warp | 24.170 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 23.380 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 21.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 12.408 | % | 0.000 | 5 |
Achieved Active Warps Per SM | 7.940 | warp | 0.000 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN ThreadDivergence | Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 24.2 threads being active per cycle. This is further reduced to 23.4 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp(). |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (12.4%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 172118.08 | μs |
Device Time | 2.85 | μs |
Self CPU Time | 61.37 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 172056.71 | μs |
Device Time | 2.85 | μs |
Self CPU Time | 97.35 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 171820.63 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 118.59 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 171515.91 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 171515.91 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 51838.62 | μs |
Device Time | 584847.36 | μs |
Self CPU Time | 16485.30 | μs |
Self Device Time | 584847.36 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 436873.74 | μs |
Device Time | 15564.98 | μs |
Self CPU Time | 436873.74 | μs |
Self Device Time | 15564.98 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
forward_kernel(float const*, float const*, float const*, float*, int, int, int) | ||
CPU Time | 0.00 | μs |
Device Time | 19037.98 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 19037.98 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 19984.03 | μs |
Device Time | 30348.46 | μs |
Self CPU Time | 19984.03 | μs |
Self Device Time | 30348.46 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 65146.53 | μs |
Device Time | 584847.36 | μs |
Self CPU Time | 13320.99 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 584847.36 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 584847.36 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45303 warnings generated when compiling for host. Suppressed 45338 warnings (45291 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.