import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(x: torch.Tensor, alpha: float) -> torch.Tensor:
"""
Applies ELU activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
alpha (float): The alpha parameter for the ELU function.
Returns:
torch.Tensor: Output tensor with ELU applied, same shape as input.
"""
return F.elu(x, alpha=alpha)
class Model(nn.Module):
"""
Simple model that performs an ELU activation.
"""
def __init__(self, alpha):
"""
Initializes the ELU model.
Args:
alpha (float): The alpha parameter for the ELU function.
"""
super(Model, self).__init__()
self.alpha = alpha
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
"""
Applies ELU activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with ELU applied, same shape as input.
"""
return fn(x, self.alpha)
batch_size = 16
dim = 16384
alpha = 1.0
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [alpha]
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Simple model that performs an ELU activation.
"""
def __init__(self, alpha: float = 1.0):
"""
Initializes the ELU model.
Args:
alpha (float, optional): The alpha parameter for the ELU function. Defaults to 1.0.
"""
super(Model, self).__init__()
self.alpha = alpha
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Applies ELU activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of any shape.
Returns:
torch.Tensor: Output tensor with ELU applied, same shape as input.
"""
return F.elu(x, alpha=self.alpha)
batch_size = 16
dim = 16384
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [1.0] # Provide alpha value for initialization
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
__global__ void elu_kernel_optimized(const float* x, float* out, float alpha, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < n; i += stride) {
float val = x[i];
out[i] = (val > 0) ? val : alpha * (expf(val) - 1);
}
}
torch::Tensor elu_cuda(torch::Tensor x, float alpha) {
CHECK_INPUT(x);
auto out = torch::empty_like(x);
int n = x.numel();
const int threads = 256;
const int blocks = (n + threads - 1) / threads;
elu_kernel_optimized<<<blocks, threads>>>(x.data_ptr<float>(), out.data_ptr<float>(), alpha, n);
return out;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &elu_cuda, "ELU activation with optimized indexing (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 1.334 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.584 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 35.786 | % | 0.015 | 5 |
Issued Ipc Active | 1.432 | inst/cycle | 0.000 | 5 |
SM Busy | 35.786 | % | 0.015 | 5 |
Memory Throughput | 261105837744.474 | byte/second | 6953858628345210880.000 | 5 |
Mem Busy | 12.354 | % | 0.007 | 5 |
Max Bandwidth | 11.522 | % | 0.011 | 5 |
L1/TEX Hit Rate | 0.000 | % | 0.000 | 5 |
L2 Hit Rate | 66.068 | % | 0.234 | 5 |
Mem Pipes Busy | 14.894 | % | 0.021 | 5 |
Warp Cycles Per Issued Instruction | 34.398 | cycle | 0.018 | 5 |
Warp Cycles Per Executed Instruction | 36.902 | cycle | 0.020 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 26.930 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 10.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 77.680 | % | 0.064 | 5 |
Achieved Active Warps Per SM | 49.718 | warp | 0.027 | 5 |
Rule | Description |
---|---|
INF HighPipeUtilization | ALU is the highest-utilized pipeline (23.3%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (77.3%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 487928.99 | μs |
Device Time | 40.19 | μs |
Self CPU Time | 35.14 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 487893.85 | μs |
Device Time | 40.19 | μs |
Self CPU Time | 82.31 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 507864.36 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 20398.45 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 484357.07 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 484357.07 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 508532.19 | μs |
Device Time | 22905.53 | μs |
Self CPU Time | 508532.19 | μs |
Self Device Time | 22905.53 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
elu_kernel_optimized(float const*, float*, float, int) | ||
CPU Time | 0.00 | μs |
Device Time | 35131.54 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 35131.54 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 21070.56 | μs |
Device Time | 44192.13 | μs |
Self CPU Time | 21070.56 | μs |
Self Device Time | 44192.13 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 73872.55 | μs |
Device Time | 654442.06 | μs |
Self CPU Time | 14893.23 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 58983.88 | μs |
Device Time | 654442.06 | μs |
Self CPU Time | 16462.31 | μs |
Self Device Time | 654442.06 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 654442.06 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 654442.06 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45282 warnings generated when compiling for host. Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.