import torch
import torch.nn as nn
import torch.functional as F
def module_fn(x: torch.Tensor, dim: int) -> torch.Tensor:
"""
Applies argmax over the specified dimension to the input tensor.
Args:
x (torch.Tensor): Input tensor
dim (int): Dimension to perform argmax over
Returns:
torch.Tensor: Output tensor with argmax applied over specified dimension
"""
return torch.argmax(x, dim)
class Model(nn.Module):
"""
Simple model that performs Argmax over a specified dimension.
"""
def __init__(self, dim: int):
"""
Initializes the model with the dimension to perform argmax.
Args:
dim (int): The dimension to perform argmax over.
"""
super(Model, self).__init__()
self.dim = dim
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
"""
Applies argmax over the specified dimension to the input tensor.
Args:
x (torch.Tensor): Input tensor
fn: Function to apply (defaults to module_fn)
Returns:
torch.Tensor: Output tensor with argmax applied, with the specified dimension removed.
"""
return fn(x, self.dim)
batch_size = 16
dim1 = 256
dim2 = 256
def get_inputs():
x = torch.randn(batch_size, dim1, dim2)
return [x]
def get_init_inputs():
return [1]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs Argmax over a specified dimension.
"""
def __init__(self, dim: int):
"""
Initializes the model with the dimension to perform argmax.
Args:
dim (int): The dimension to perform argmax over.
"""
super(Model, self).__init__()
self.dim = dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Applies argmax over the specified dimension to the input tensor.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor with argmax applied, with the specified dimension removed.
"""
return torch.argmax(x, dim=self.dim)
batch_size = 16
dim1 = 256
dim2 = 256
def get_inputs():
x = torch.randn(batch_size, dim1, dim2)
return [x]
def get_init_inputs():
return [1]
#include <torch/extension.h>
#include <vector>
#include <cfloat>
// Optimized kernel using warp-level primitives for efficient reduction
__global__ void efficient_argmax_kernel(
const float* __restrict__ x,
int64_t* __restrict__ indices,
const int dimSize,
const int innerSize) {
// Each block computes one output element corresponding to an (outer, inner) pair.
int global_idx = blockIdx.x;
int outer_idx = global_idx / innerSize;
int inner_idx = global_idx % innerSize;
int base_offset = outer_idx * dimSize * innerSize + inner_idx;
// Each thread computes a local maximum in the reduction dimension
float local_max = -FLT_MAX;
int local_idx = 0;
for (int i = threadIdx.x; i < dimSize; i += blockDim.x) {
float val = x[base_offset + i * innerSize];
if (val > local_max) {
local_max = val;
local_idx = i;
}
}
// Warp-level reduction using shuffle intrinsics
// All threads in a warp reduce their values without shared memory
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
float other_max = __shfl_down_sync(0xFFFFFFFF, local_max, offset);
int other_idx = __shfl_down_sync(0xFFFFFFFF, local_idx, offset);
if (other_max > local_max) {
local_max = other_max;
local_idx = other_idx;
}
}
// Use shared memory to reduce across warps
// Assuming maximum of 1024 threads per block => max 32 warps
__shared__ float sdata[32];
__shared__ int sidx[32];
int lane = threadIdx.x % warpSize;
int warpId = threadIdx.x / warpSize;
// First thread of each warp writes its result to shared memory
if (lane == 0) {
sdata[warpId] = local_max;
sidx[warpId] = local_idx;
}
__syncthreads();
// Final reduction: only the first warp participates
if (threadIdx.x < blockDim.x / warpSize) {
local_max = sdata[lane];
local_idx = sidx[lane];
for (int offset = (blockDim.x / warpSize) / 2; offset > 0; offset /= 2) {
float other_max = __shfl_down_sync(0xFFFFFFFF, local_max, offset);
int other_idx = __shfl_down_sync(0xFFFFFFFF, local_idx, offset);
if (other_max > local_max) {
local_max = other_max;
local_idx = other_idx;
}
}
if (lane == 0) {
indices[global_idx] = local_idx;
}
}
}
// Host function to launch the efficient argmax kernel
torch::Tensor efficient_argmax_forward_cuda(const torch::Tensor& x, const int64_t dim) {
TORCH_CHECK(x.scalar_type() == at::kFloat, "Only float32 is supported.");
auto x_contig = x.contiguous();
auto sizes = x_contig.sizes();
int ndim = x_contig.dim();
TORCH_CHECK(dim >= 0 && dim < ndim, "Invalid dim for argmax.");
// Compute sizes for outer, dim, and inner dimensions
int outerSize = 1;
for (int d = 0; d < dim; d++) {
outerSize *= sizes[d];
}
int dimSize = sizes[dim];
int innerSize = 1;
for (int d = dim + 1; d < ndim; d++) {
innerSize *= sizes[d];
}
// Prepare output shape: remove the reduced dimension
std::vector<int64_t> out_sizes;
for (int d = 0; d < ndim; d++) {
if (d == dim) continue;
out_sizes.push_back(sizes[d]);
}
auto options = torch::TensorOptions().device(x.device()).dtype(torch::kLong);
auto indices = torch::empty(out_sizes, options);
int total_outputs = outerSize * innerSize;
int blockSize = 128; // Chosen to cover reduction dimension; can be tuned for specific scenarios
dim3 grid(total_outputs);
dim3 block(blockSize);
// Launch the kernel. No dynamic shared memory is used because warp reduction minimizes it.
efficient_argmax_kernel<<<grid, block>>>(
x_contig.data_ptr<float>(),
indices.data_ptr<int64_t>(),
dimSize,
innerSize
);
return indices;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &efficient_argmax_forward_cuda, "Efficient ArgMax CUDA forward with warp-level reduction");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 1.402 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 1.116 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 35.744 | % | 0.242 | 5 |
Issued Ipc Active | 1.430 | inst/cycle | 0.000 | 5 |
SM Busy | 35.744 | % | 0.242 | 5 |
Memory Throughput | 363062120088.258 | byte/second | 1060581302856603264.000 | 5 |
Mem Busy | 62.866 | % | 0.689 | 5 |
Max Bandwidth | 28.590 | % | 0.072 | 5 |
L1/TEX Hit Rate | 1.022 | % | 0.057 | 5 |
L2 Hit Rate | 84.784 | % | 0.884 | 5 |
Mem Pipes Busy | 12.202 | % | 0.001 | 5 |
Warp Cycles Per Issued Instruction | 36.244 | cycle | 0.023 | 5 |
Warp Cycles Per Executed Instruction | 36.982 | cycle | 0.024 | 5 |
Avg. Active Threads Per Warp | 28.240 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 26.060 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 32.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 51.000 | block | 0.000 | 5 |
Block Limit Warps | 16.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 82.258 | % | 0.149 | 5 |
Achieved Active Warps Per SM | 52.644 | warp | 0.061 | 5 |
Rule | Description |
---|---|
INF HighPipeUtilization | ALU is the highest-utilized pipeline (29.2%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (82.7%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 441142.57 | μs |
Device Time | 367.48 | μs |
Self CPU Time | 34.21 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 441108.36 | μs |
Device Time | 367.48 | μs |
Self CPU Time | 77.79 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 440443.86 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 76.23 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 440172.50 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 440172.50 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 562431.41 | μs |
Device Time | 21811.28 | μs |
Self CPU Time | 562431.41 | μs |
Self Device Time | 21811.28 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
efficient_argmax_kernel(float const*, long*, int, int) | ||
CPU Time | 0.00 | μs |
Device Time | 85837.62 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 85837.62 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 23975.57 | μs |
Device Time | 43274.96 | μs |
Self CPU Time | 23975.57 | μs |
Self Device Time | 43274.96 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 71833.70 | μs |
Device Time | 647267.49 | μs |
Self CPU Time | 15942.62 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 55893.19 | μs |
Device Time | 647267.49 | μs |
Self CPU Time | 16065.83 | μs |
Self Device Time | 647267.49 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 647346.33 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 647346.33 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45286 warnings generated when compiling for host. Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.