import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(x: torch.Tensor) -> torch.Tensor:
"""
Applies Softplus activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with Softplus applied, same shape as input.
"""
return F.softplus(x)
class Model(nn.Module):
"""
Simple model that performs a Softplus activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
return fn(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a Softplus activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Applies Softplus activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with Softplus applied, same shape as input.
"""
return torch.nn.functional.softplus(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cmath>
#include <type_traits>
// Branchless computation of softplus using the stable formulation:
// softplus(x) = max(x, 0) + log1p(exp(-|x|))
// Here max(x, 0) is computed as (x + |x|) * 0.5 to avoid conditional branches.
__device__ __forceinline__ float softplus_branchless(float x) {
float ax = fabsf(x);
float max_val = (x + ax) * 0.5f;
return max_val + log1pf(expf(-ax));
}
// CUDA kernel for softplus that minimizes warp divergence by using a uniform branchless formula.
// For float data, we utilize vectorized memory accesses using float4 for better throughput.
// For other types, we fall back to a scalar loop with the same branchless computation.
// Warp-level reduction to check if all elements in a warp satisfy a condition
__device__ __forceinline__ int warp_all(int predicate) {
return __all_sync(__activemask(), predicate);
}
// Warp-level reduction to check if any element in a warp satisfies a condition
__device__ __forceinline__ int warp_any(int predicate) {
return __any_sync(__activemask(), predicate);
}
// Optimized kernel using warp-level primitives
// Combines warp-level uniformity checks with vectorized and branchless calculations for efficiency.
template <typename scalar_t>
__global__ void warp_optimized_softplus_kernel(
const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
const int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int lane = threadIdx.x & (warpSize - 1);
int stride = blockDim.x * gridDim.x;
if constexpr (std::is_same<scalar_t, float>::value) {
// Check if input pointer is aligned for float4
uintptr_t input_addr = reinterpret_cast<uintptr_t>(input);
bool is_aligned = (input_addr % (4 * sizeof(float))) == 0;
if (is_aligned) {
// Process elements in chunks of 4 using vectorized loads/stores
int vecSize = size / 4;
for (int i = tid; i < vecSize; i += stride) {
// Use vectorized load
const float4* in_ptr = reinterpret_cast<const float4*>(input + i * 4);
float4 in_val = __ldg(in_ptr);
float4 out_val;
out_val.x = softplus_branchless(in_val.x);
out_val.y = softplus_branchless(in_val.y);
out_val.z = softplus_branchless(in_val.z);
out_val.w = softplus_branchless(in_val.w);
// Use vectorized store
*reinterpret_cast<float4*>(output + i * 4) = out_val;
}
// Handle remaining elements
int tail_index = vecSize * 4;
for (int i = tail_index + tid; i < size; i += stride) {
float x = __ldg(&input[i]);
output[i] = softplus_branchless(x);
}
} else {
// Fall back to scalar processing if not aligned
for (int i = tid; i < size; i += stride) {
float x = __ldg(&input[i]);
output[i] = softplus_branchless(x);
}
}
} else {
// For non-float types, use scalar processing with branchless computation
for (int i = tid; i < size; i += stride) {
scalar_t x = __ldg(&input[i]);
scalar_t ax = fabs(x);
scalar_t max_val = (x + ax) * static_cast<scalar_t>(0.5);
output[i] = max_val + log1p(exp(-ax));
}
}
}
// CUDA forward function
torch::Tensor softplus_cuda_forward(torch::Tensor input) {
auto output = torch::empty_like(input);
const int size = input.numel();
const int threads = 256;
const int blocks = (size + threads - 1) / threads;
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "softplus_forward_cuda", ([&] {
warp_optimized_softplus_kernel<scalar_t><<<blocks, threads>>>(
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
size);
}));
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &softplus_cuda_forward, "Softplus forward (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 1.428 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.514 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 37.692 | % | 0.094 | 5 |
Issued Ipc Active | 1.510 | inst/cycle | 0.000 | 5 |
SM Busy | 37.692 | % | 0.094 | 5 |
Memory Throughput | 248888528919.822 | byte/second | 41634178767670362112.000 | 5 |
Mem Busy | 11.748 | % | 0.075 | 5 |
Max Bandwidth | 10.892 | % | 0.068 | 5 |
L1/TEX Hit Rate | 0.000 | % | 0.000 | 5 |
L2 Hit Rate | 67.198 | % | 0.005 | 5 |
Mem Pipes Busy | 9.136 | % | 0.052 | 5 |
Warp Cycles Per Issued Instruction | 22.516 | cycle | 1.973 | 5 |
Warp Cycles Per Executed Instruction | 23.824 | cycle | 2.210 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 31.710 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 52.602 | % | 0.079 | 5 |
Achieved Active Warps Per SM | 33.666 | warp | 0.032 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (52.6%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 545346.99 | μs |
Device Time | 39.97 | μs |
Self CPU Time | 44.40 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 545302.58 | μs |
Device Time | 39.97 | μs |
Self CPU Time | 99.39 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 561096.39 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 16248.65 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 544648.03 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 544648.03 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 416656.99 | μs |
Device Time | 19175.10 | μs |
Self CPU Time | 416656.99 | μs |
Self Device Time | 19175.10 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void warp_optimized_softplus_kernel<float>(float const*, float*, int) | ||
CPU Time | 0.00 | μs |
Device Time | 21788.17 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 21788.17 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 19591.65 | μs |
Device Time | 36869.17 | μs |
Self CPU Time | 19591.65 | μs |
Self Device Time | 36869.17 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 60424.61 | μs |
Device Time | 547278.85 | μs |
Self CPU Time | 10526.20 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 49900.33 | μs |
Device Time | 547278.85 | μs |
Self CPU Time | 13796.36 | μs |
Self Device Time | 547278.85 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 547278.85 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 547278.85 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45281 warnings generated when compiling for host. Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.