import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(x: torch.Tensor) -> torch.Tensor:
"""
Applies SELU activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with SELU applied, same shape as input.
"""
return F.selu(x)
class Model(nn.Module):
"""
Simple model that performs a SELU activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
return fn(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a SELU activation.
"""
def __init__(self):
super(Model, self).__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Applies SELU activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with SELU applied, same shape as input.
"""
return torch.selu(x)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [] # No special initialization inputs needed
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
// Device helper: define an inline exponential function for float and double
template <typename scalar_t>
__device__ inline scalar_t my_exp(scalar_t x);
template <>
__device__ inline float my_exp<float>(float x) {
return expf(x);
}
template <>
__device__ inline double my_exp<double>(double x) {
return exp(x);
}
// CUDA kernel with 2D thread and block indexing for efficient mapping
// This kernel maps the flat 1D input array to a 2D grid of threads to enhance occupancy and performance
template <typename scalar_t>
__global__ void selu_kernel_2d_indexed(const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
size_t numel) {
// Compute global thread index using 2D block and grid indexing
int tIdx = threadIdx.x + threadIdx.y * blockDim.x;
int bIdx = blockIdx.x + blockIdx.y * gridDim.x;
size_t global_idx = bIdx * (blockDim.x * blockDim.y) + tIdx;
// Total number of threads in the grid
size_t total_threads = gridDim.x * gridDim.y * blockDim.x * blockDim.y;
for (size_t i = global_idx; i < numel; i += total_threads) {
scalar_t x = __ldg(&input[i]);
scalar_t res = (x > static_cast<scalar_t>(0))
? x
: static_cast<scalar_t>(1.67326324235437728481) * (my_exp(x) - static_cast<scalar_t>(1));
output[i] = static_cast<scalar_t>(1.05070098735548049342) * res;
}
}
// Host function that prepares the 2D grid and block dimensions and launches the kernel
torch::Tensor selu_forward(torch::Tensor input) {
TORCH_CHECK(input.is_cuda(), "Input tensor must be a CUDA tensor");
auto output = torch::empty_like(input);
size_t numel = input.numel();
// Define 2D block dimensions
const int block_dim_x = 32, block_dim_y = 8; // 256 threads per block
dim3 threads(block_dim_x, block_dim_y);
// Compute the number of blocks needed given the total number of elements
int block_size = block_dim_x * block_dim_y;
int blocks_needed = (numel + block_size - 1) / block_size;
// Arrange blocks in a 2D grid: use a near-square grid layout
int grid_dim_x = static_cast<int>(ceil(sqrt((double)blocks_needed)));
int grid_dim_y = (blocks_needed + grid_dim_x - 1) / grid_dim_x;
dim3 blocks(grid_dim_x, grid_dim_y);
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "selu_forward_cuda_2d", ([&] {
const scalar_t* input_ptr = input.data_ptr<scalar_t>();
scalar_t* output_ptr = output.data_ptr<scalar_t>();
selu_kernel_2d_indexed<scalar_t><<<blocks, threads>>>(input_ptr, output_ptr, numel);
}));
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &selu_forward, "SELU Activation Forward with 2D Thread Indexing (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 1.118 | inst/cycle | 0.001 | 5 |
Executed Ipc Elapsed | 0.472 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 30.654 | % | 0.438 | 5 |
Issued Ipc Active | 1.224 | inst/cycle | 0.001 | 5 |
SM Busy | 30.654 | % | 0.438 | 5 |
Memory Throughput | 278538036683.600 | byte/second | 4457596902183544320.000 | 5 |
Mem Busy | 13.210 | % | 0.011 | 5 |
Max Bandwidth | 12.262 | % | 0.014 | 5 |
L1/TEX Hit Rate | 0.000 | % | 0.000 | 5 |
L2 Hit Rate | 66.236 | % | 0.050 | 5 |
Mem Pipes Busy | 9.460 | % | 0.009 | 5 |
Warp Cycles Per Issued Instruction | 39.554 | cycle | 0.480 | 5 |
Warp Cycles Per Executed Instruction | 43.358 | cycle | 0.579 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 29.150 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 16.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 76.890 | % | 0.170 | 5 |
Achieved Active Warps Per SM | 49.210 | warp | 0.070 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (76.5%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 388468.79 | μs |
Device Time | 40.06 | μs |
Self CPU Time | 35.47 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 388433.32 | μs |
Device Time | 40.06 | μs |
Self CPU Time | 94.04 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 407415.14 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 19437.55 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 381748.52 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 381748.52 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 510160.31 | μs |
Device Time | 22963.17 | μs |
Self CPU Time | 510160.31 | μs |
Self Device Time | 22963.17 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void selu_kernel_2d_indexed<float>(float const*, float*, unsigned long) | ||
CPU Time | 0.00 | μs |
Device Time | 33562.69 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 33562.69 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 24531.64 | μs |
Device Time | 44272.12 | μs |
Self CPU Time | 24531.64 | μs |
Self Device Time | 44272.12 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 65933.08 | μs |
Device Time | 655884.49 | μs |
Self CPU Time | 13943.28 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 51993.67 | μs |
Device Time | 655884.49 | μs |
Self CPU Time | 15527.72 | μs |
Self Device Time | 655884.49 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 655884.49 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 655884.49 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45281 warnings generated when compiling for host. Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.