import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(x: torch.Tensor) -> torch.Tensor:
"""
Applies Softplus activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with Softplus applied, same shape as input.
"""
return F.softplus(x)
class Model(nn.Module):
"""
Simple model that performs a Softplus activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
return fn(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a Softplus activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Applies Softplus activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with Softplus applied, same shape as input.
"""
return torch.nn.functional.softplus(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
Operation Name | 29_Softplus |
Level ID | 1 |
Task ID | 29 |
Kernel Name | softplus_coalesced_base |
CUDA Speedup (Native) | 0.993x |
CUDA Speedup (Compile) | 4.180x |
CUDA Runtime | 0.007 ms |
PyTorch Runtime (Native) | 0.007 ms |
PyTorch Runtime (Compile) | 0.029 ms |
Correct | True |
Max Diff (vs. Reference) | 0.000000 |
Model | o3-mini-2025-01-31 |
Temperature | 1.00 |
Rank | Kernel Name | Runtime (ms) | Speedup Native | Speedup Compile |
---|---|---|---|---|
🥇 | softplus_modular_base_base | 0.01 | 1.16 | 4.88 |
🥇 | warp_and_alignment_optimized_softplus_edit_1 | 0.01 | 1.16 | 4.88 |
🥇 | branchless_softplus_edit_1 | 0.01 | 1.16 | 4.88 |
🥇 | warp_optimized_softplus_base | 0.01 | 1.16 | 4.88 |
5 | softplus_unrolled_base_base | 0.01 | 0.99 | 4.18 |
5 | softplus_coalesced_base | 0.01 | 0.99 | 4.18 |
5 | softplus_2d_block_thread_base | 0.01 | 0.99 | 4.18 |
5 | optimized_softplus_cuda_base | 0.01 | 0.99 | 4.18 |
5 | softplus_coalesced_memory_access_base | 0.01 | 0.99 | 4.18 |
5 | softplus_tuned_indexing_base_base | 0.01 | 0.99 | 4.18 |
5 | softplus_blockstride_base | 0.01 | 0.99 | 4.18 |
5 | softplus_loop_unroll_base_base | 0.01 | 0.99 | 4.18 |
5 | softplus_branchless_base | 0.01 | 0.99 | 4.18 |
5 | softplus_blocksize_experiment_base | 0.01 | 0.99 | 4.18 |
5 | softplus_unrolled_base | 0.01 | 0.99 | 4.18 |
5 | softplus_constant_memory_base_base | 0.01 | 0.99 | 4.18 |
5 | optimized_softplus_cuda_base | 0.01 | 0.99 | 4.18 |
5 | 29_Softplus | 0.01 | 0.99 | 4.18 |
5 | softplus_constant_memory_base | 0.01 | 0.99 | 4.18 |
5 | optimized_softplus_base | 0.01 | 0.99 | 4.18 |
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
// CUDA kernel with block-stride loop and __ldg for read-only cache to improve memory coalescing
template <typename scalar_t>
__global__ void softplus_kernel_coalesced(
const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
const int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; idx < size; idx += stride) {
// Use __ldg to load from global memory via read-only cache
scalar_t x = __ldg(&input[idx]);
if (x > static_cast<scalar_t>(20.0)) {
output[idx] = x;
} else if (x < static_cast<scalar_t>(-20.0)) {
output[idx] = exp(x);
} else {
output[idx] = log1p(exp(x));
}
}
}
// CUDA forward function
torch::Tensor softplus_cuda_forward(torch::Tensor input) {
auto output = torch::empty_like(input);
const int size = input.numel();
const int threads = 256;
const int blocks = (size + threads - 1) / threads;
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "softplus_forward_cuda", ([&] {
softplus_kernel_coalesced<scalar_t><<<blocks, threads>>>(
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
size);
}));
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &softplus_cuda_forward, "Softplus forward (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 1.426 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.578 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 38.002 | % | 0.036 | 5 |
Issued Ipc Active | 1.520 | inst/cycle | 0.000 | 5 |
SM Busy | 38.002 | % | 0.036 | 5 |
Memory Throughput | 269039062658.060 | byte/second | 6501464437210878976.000 | 5 |
Mem Busy | 12.878 | % | 0.015 | 5 |
Max Bandwidth | 11.892 | % | 0.005 | 5 |
L1/TEX Hit Rate | 0.000 | % | 0.000 | 5 |
L2 Hit Rate | 66.776 | % | 0.178 | 5 |
Mem Pipes Busy | 7.676 | % | 0.004 | 5 |
Warp Cycles Per Issued Instruction | 34.008 | cycle | 0.557 | 5 |
Warp Cycles Per Executed Instruction | 36.222 | cycle | 0.635 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 29.750 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 16.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 80.878 | % | 0.014 | 5 |
Achieved Active Warps Per SM | 51.762 | warp | 0.006 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (80.9%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 416308.61 | μs |
Device Time | 40.10 | μs |
Self CPU Time | 42.92 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 416265.69 | μs |
Device Time | 40.10 | μs |
Self CPU Time | 99.11 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 435251.28 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 19456.99 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 415576.84 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 415576.84 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 488709.64 | μs |
Device Time | 22472.42 | μs |
Self CPU Time | 488709.64 | μs |
Self Device Time | 22472.42 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void softplus_kernel_coalesced<float>(float const*, float*, int) | ||
CPU Time | 0.00 | μs |
Device Time | 27154.53 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 27154.53 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 24017.53 | μs |
Device Time | 43261.16 | μs |
Self CPU Time | 24017.53 | μs |
Self Device Time | 43261.16 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 64497.75 | μs |
Device Time | 641421.85 | μs |
Self CPU Time | 13676.55 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 50823.03 | μs |
Device Time | 641421.85 | μs |
Self CPU Time | 15405.12 | μs |
Self Device Time | 641421.85 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 641421.85 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 641421.85 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45280 warnings generated when compiling for host. Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.