import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(x: torch.Tensor) -> torch.Tensor:
"""
Applies HardSigmoid activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
"""
return F.hardsigmoid(x)
class Model(nn.Module):
"""
Simple model that performs a HardSigmoid activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
"""
Applies HardSigmoid activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
"""
return fn(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a HardSigmoid activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Applies HardSigmoid activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with HardSigmoid applied, same shape as input.
"""
return torch.nn.functional.hardsigmoid(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
// Warp-level reduction for minimum for float
__device__ inline float warp_reduce_min(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2) {
val = fminf(val, __shfl_down_sync(0xffffffff, val, offset));
}
return val;
}
// Warp-level reduction for maximum for float
__device__ inline float warp_reduce_max(float val) {
for (int offset = warpSize/2; offset > 0; offset /= 2) {
val = fmaxf(val, __shfl_down_sync(0xffffffff, val, offset));
}
return val;
}
// Warp-level reduction for minimum for double
__device__ inline double warp_reduce_min(double val) {
for (int offset = warpSize/2; offset > 0; offset /= 2) {
val = fmin(val, __shfl_down_sync(0xffffffff, val, offset));
}
return val;
}
// Warp-level reduction for maximum for double
__device__ inline double warp_reduce_max(double val) {
for (int offset = warpSize/2; offset > 0; offset /= 2) {
val = fmax(val, __shfl_down_sync(0xffffffff, val, offset));
}
return val;
}
// CUDA kernel applying HardSigmoid activation: y = clamp((x + 3) / 6, 0, 1).
// It uses warp-level primitives to detect if an entire warp's inputs are saturated.
// If all values in a warp are >= 3, then y = 1; if all <= -3, then y = 0.
// This avoids redundant per-thread arithmetic when the condition holds uniformly in the warp.
template <typename scalar_t>
__global__ void warp_hardsigmoid_kernel(const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
size_t numel) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = blockDim.x * gridDim.x;
for (size_t i = idx; i < numel; i += stride) {
scalar_t x = input[i];
// Determine lane id within the warp
int lane = threadIdx.x & (warpSize - 1);
// Compute warp-level min and max of the input values within the warp
scalar_t warp_min = warp_reduce_min(x);
scalar_t warp_max = warp_reduce_max(x);
// Use a sentinel value (-1) which is outside the valid [0,1] output range
// to decide if the entire warp falls in a saturated region.
scalar_t warp_result = static_cast<scalar_t>(-1);
if (lane == 0) {
if (warp_min >= static_cast<scalar_t>(3)) {
warp_result = static_cast<scalar_t>(1);
} else if (warp_max <= static_cast<scalar_t>(-3)) {
warp_result = static_cast<scalar_t>(0);
}
}
// Broadcast the warp decision to all lanes
warp_result = __shfl_sync(0xffffffff, warp_result, 0);
scalar_t result;
if (warp_result != static_cast<scalar_t>(-1)) {
result = warp_result; // Uniform saturation in the warp
} else {
// Compute HardSigmoid normally: y = clamp((x+3)/6, 0, 1)
result = (x + static_cast<scalar_t>(3)) / static_cast<scalar_t>(6);
result = (result < static_cast<scalar_t>(0)) ? static_cast<scalar_t>(0) :
(result > static_cast<scalar_t>(1)) ? static_cast<scalar_t>(1) : result;
}
output[i] = result;
}
}
// Host function that dispatches the kernel
torch::Tensor forward(torch::Tensor input) {
TORCH_CHECK(input.is_cuda(), "Input tensor must be on CUDA");
auto output = torch::empty_like(input);
const size_t numel = input.numel();
const int threads = 1024;
const int blocks = (numel + threads - 1) / threads;
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "warp_hardsigmoid_cuda", ([&] {
warp_hardsigmoid_kernel<scalar_t><<<blocks, threads>>>(
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
numel);
}));
cudaError_t err = cudaGetLastError();
TORCH_CHECK(err == cudaSuccess, "CUDA kernel failed: ", cudaGetErrorString(err));
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "HardSigmoid activation forward (CUDA) with warp-level optimization");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 2.300 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 1.196 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 59.426 | % | 0.025 | 5 |
Issued Ipc Active | 2.378 | inst/cycle | 0.000 | 5 |
SM Busy | 59.426 | % | 0.025 | 5 |
Memory Throughput | 226778208156.194 | byte/second | 1922962412906271488.000 | 5 |
Mem Busy | 11.960 | % | 0.006 | 5 |
Max Bandwidth | 11.960 | % | 0.006 | 5 |
L1/TEX Hit Rate | 0.000 | % | 0.000 | 5 |
L2 Hit Rate | 67.192 | % | 0.025 | 5 |
Mem Pipes Busy | 11.960 | % | 0.006 | 5 |
Warp Cycles Per Issued Instruction | 21.862 | cycle | 0.002 | 5 |
Warp Cycles Per Executed Instruction | 22.614 | cycle | 0.002 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 31.090 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 4.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 8.000 | block | 0.000 | 5 |
Block Limit Warps | 2.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 81.862 | % | 0.006 | 5 |
Achieved Active Warps Per SM | 52.390 | warp | 0.002 | 5 |
Rule | Description |
---|---|
INF HighPipeUtilization | ALU is the highest-utilized pipeline (50.8%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (81.9%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 216024.12 | μs |
Device Time | 40.35 | μs |
Self CPU Time | 41.14 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 215982.98 | μs |
Device Time | 40.35 | μs |
Self CPU Time | 84.63 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 241354.07 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 25798.99 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 215351.89 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 215351.89 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 390017.27 | μs |
Device Time | 40111.22 | μs |
Self CPU Time | 390017.27 | μs |
Self Device Time | 40111.22 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 27047.53 | μs |
Device Time | 39648.04 | μs |
Self CPU Time | 27047.53 | μs |
Self Device Time | 39648.04 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 79441.09 | μs |
Device Time | 607911.27 | μs |
Self CPU Time | 17713.62 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 61728.92 | μs |
Device Time | 607911.27 | μs |
Self CPU Time | 21289.63 | μs |
Self Device Time | 607911.27 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 607911.27 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 607911.27 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45280 warnings generated when compiling for host. Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.