import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
) -> torch.Tensor:
"""
Applies linear transformation followed by two Mish activations.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_features)
weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
bias (torch.Tensor): Bias vector of shape (out_features)
Returns:
torch.Tensor: Output tensor after linear transformation and two Mish activations,
with shape (batch_size, out_features)
"""
x = F.linear(x, weight, bias)
x = F.mish(x)
x = F.mish(x)
return x
class Model(nn.Module):
"""
Simple model that performs a matrix multiplication, applies Mish, and applies Mish again.
"""
def __init__(self, in_features, out_features):
super(Model, self).__init__()
linear = nn.Linear(in_features, out_features)
self.weight = nn.Parameter(linear.weight)
self.bias = nn.Parameter(linear.bias + torch.ones_like(linear.bias) * 0.02)
def forward(self, x, fn=module_fn):
return fn(x, self.weight, self.bias)
batch_size = 128
in_features = 10
out_features = 20
def get_inputs():
return [torch.randn(batch_size, in_features)]
def get_init_inputs():
return [in_features, out_features]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a matrix multiplication, applies Mish, and applies Mish again.
"""
def __init__(self, in_features, out_features):
super(Model, self).__init__()
self.linear = nn.Linear(in_features, out_features)
self.linear.bias = nn.Parameter(self.linear.bias + torch.ones_like(self.linear.bias) * 0.02)
def forward(self, x):
x = self.linear(x)
x = torch.nn.functional.mish(x)
x = torch.nn.functional.mish(x)
return x
batch_size = 128
in_features = 10
out_features = 20
def get_inputs():
return [torch.randn(batch_size, in_features)]
def get_init_inputs():
return [in_features, out_features]
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
__device__ float softplus(float x) {
float abs_x = fabsf(x);
return log1pf(expf(-abs_x)) + fmaxf(x, 0.0f);
}
__device__ float mish(float x) {
float sp = softplus(x);
return x * tanhf(sp);
}
__global__ void forward_kernel(
const float* __restrict__ x,
const float* __restrict__ weight,
const float* __restrict__ bias,
float* __restrict__ output,
int batch_size,
int in_features,
int out_features
) {
const int total_elements = batch_size * out_features;
const int stride = blockDim.x * gridDim.x;
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < total_elements;
idx += stride) {
const int i = idx / out_features;
const int j = idx % out_features;
float sum = 0.0f;
for (int k = 0; k < in_features; ++k) {
sum += x[i * in_features + k] * weight[j * in_features + k];
}
sum += bias[j];
float y = mish(sum);
output[idx] = mish(y);
}
}
torch::Tensor forward(
torch::Tensor x,
torch::Tensor weight,
torch::Tensor bias
) {
TORCH_CHECK(x.dim() == 2, "x must be 2D");
TORCH_CHECK(weight.dim() == 2, "weight must be 2D");
TORCH_CHECK(bias.dim() == 1, "bias must be 1D");
const int batch_size = x.size(0);
const int in_features = x.size(1);
const int out_features = weight.size(0);
TORCH_CHECK(weight.size(1) == in_features, "weight shape mismatch");
TORCH_CHECK(bias.size(0) == out_features, "bias shape mismatch");
auto output = torch::empty({batch_size, out_features}, x.options());
const int block_size = 256;
const int grid_size = 512; // Optimized for better SM occupancy
forward_kernel<<<grid_size, block_size, 0, at::cuda::getCurrentCUDAStream()>>>(
x.data_ptr<float>(),
weight.data_ptr<float>(),
bias.data_ptr<float>(),
output.data_ptr<float>(),
batch_size,
in_features,
out_features
);
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Strided Linear double Mish forward (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.354 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.062 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 13.602 | % | 0.014 | 5 |
Issued Ipc Active | 0.544 | inst/cycle | 0.000 | 5 |
SM Busy | 13.602 | % | 0.014 | 5 |
Memory Throughput | 3064592346.102 | byte/second | 5726754359105346.000 | 5 |
Mem Busy | 7.202 | % | 0.014 | 5 |
Max Bandwidth | 3.746 | % | 0.002 | 5 |
L1/TEX Hit Rate | 95.800 | % | 0.000 | 5 |
L2 Hit Rate | 103.252 | % | 0.200 | 5 |
Mem Pipes Busy | 2.828 | % | 0.002 | 5 |
Warp Cycles Per Issued Instruction | 44.962 | cycle | 0.181 | 5 |
Warp Cycles Per Executed Instruction | 69.048 | cycle | 0.429 | 5 |
Avg. Active Threads Per Warp | 31.250 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 30.500 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 39.140 | % | 0.006 | 5 |
Achieved Active Warps Per SM | 25.048 | warp | 0.002 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (39.2%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 683096.73 | μs |
Device Time | 2.62 | μs |
Self CPU Time | 55.24 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 683041.49 | μs |
Device Time | 2.62 | μs |
Self CPU Time | 106.83 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 682797.28 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 112.33 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 678466.35 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 678466.35 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 59664.80 | μs |
Device Time | 638445.25 | μs |
Self CPU Time | 22305.98 | μs |
Self Device Time | 638445.25 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 437757.42 | μs |
Device Time | 18174.74 | μs |
Self CPU Time | 437757.42 | μs |
Self Device Time | 18174.74 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
forward_kernel(float const*, float const*, float const*, float*, int, int, int) | ||
CPU Time | 0.00 | μs |
Device Time | 23653.32 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 23653.32 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 19464.30 | μs |
Device Time | 33203.64 | μs |
Self CPU Time | 19464.30 | μs |
Self Device Time | 33203.64 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 76758.42 | μs |
Device Time | 638445.25 | μs |
Self CPU Time | 17111.78 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 638524.00 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 638524.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45301 warnings generated when compiling for host. Suppressed 45338 warnings (45291 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.