48_Mean_reduction_over_a_dimension
• shared_mean_reduction_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(x: torch.Tensor, dim: int) -> torch.Tensor:
"""
Reduces the input tensor along the specified dimension by taking the mean.
Args:
x (torch.Tensor): Input tensor of arbitrary shape.
dim (int): The dimension to reduce over.
Returns:
torch.Tensor: Output tensor with reduced dimension. The shape of the output is the same as the input except for the reduced dimension which is removed.
"""
return torch.mean(x, dim=dim)
class Model(nn.Module):
"""
Simple model that performs mean reduction over a specific dimension.
"""
def __init__(self, dim: int):
"""
Initializes the model with the dimension to reduce over.
Args:
dim (int): The dimension to reduce over.
"""
super(Model, self).__init__()
self.dim = dim
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
"""
Reduces the input tensor along the specified dimension by taking the mean.
Args:
x (torch.Tensor): Input tensor of arbitrary shape.
Returns:
torch.Tensor: Output tensor with reduced dimension. The shape of the output is the same as the input except for the reduced dimension which is removed.
"""
return fn(x, self.dim)
batch_size = 16
dim1 = 256
dim2 = 256
def get_inputs():
x = torch.randn(batch_size, dim1, dim2)
return [x]
def get_init_inputs():
return [1]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs mean reduction over a specific dimension.
"""
def __init__(self, dim: int):
"""
Initializes the model with the dimension to reduce over.
Args:
dim (int): The dimension to reduce over.
"""
super(Model, self).__init__()
self.dim = dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Reduces the input tensor along the specified dimension by taking the mean.
Args:
x (torch.Tensor): Input tensor of arbitrary shape.
Returns:
torch.Tensor: Output tensor with reduced dimension. The shape of the output is the same as the input except for the reduced dimension which is removed.
"""
return torch.mean(x, dim=self.dim)
batch_size = 16
dim1 = 256
dim2 = 256
def get_inputs():
x = torch.randn(batch_size, dim1, dim2)
return [x]
def get_init_inputs():
return [1]
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
// Tile dimensions for shared memory tiling
#define TILE_R 32
#define TILE_INNER 32
// Kernel using shared memory to stage reduction data
// The input tensor is assumed to have shape [outer, dim, inner] where
// outer = product(sizes[0..dim-1]),
// dim = sizes[dim], and
// inner = product(sizes[dim+1..end]).
// The output tensor has shape [outer, inner] (i.e. the dim dimension is reduced by computing the mean).
template <typename scalar_t>
__global__ void mean_reduce_shared_kernel(
const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
int64_t dim_size,
int64_t inner_size) {
// Determine the outer index from grid.x
int outer = blockIdx.x;
// Determine the starting index for this inner tile from grid.y
int inner_tile_start = blockIdx.y * TILE_INNER;
// Thread indices within the block
int tx = threadIdx.x; // corresponds to a column within the inner tile
int ty = threadIdx.y; // used for cooperative loading from global memory
// Global inner index for the output element
int inner_index = inner_tile_start + tx;
// Allocate shared memory statically for the tile
__shared__ scalar_t tile[TILE_R * TILE_INNER];
// Each thread will accumulate the partial sum for one output element
scalar_t sum = 0;
// Loop over the reduction dimension in chunks of TILE_R
for (int tile_start = 0; tile_start < dim_size; tile_start += TILE_R) {
int i = tile_start + ty; // reduction index for this thread in y-dimension
scalar_t val = 0;
if ((i < dim_size) && (inner_index < inner_size)) {
// Compute global index in input:
// index = outer * (dim_size * inner_size) + i * inner_size + (inner_tile_start + tx)
int idx = outer * (dim_size * inner_size) + i * inner_size + inner_index;
val = input[idx];
} else {
val = 0;
}
// Each thread loads one element into shared memory
tile[ty * TILE_INNER + tx] = val;
__syncthreads();
// Only the threads in the first row sum the loaded tile for their column
if ((ty == 0) && (inner_index < inner_size)) {
#pragma unroll
for (int j = 0; j < TILE_R; j++) {
if (tile_start + j < dim_size) {
sum += tile[j * TILE_INNER + tx];
}
}
}
__syncthreads();
}
// Write the final result from threads with ty == 0
if ((ty == 0) && (inner_index < inner_size)) {
int out_idx = outer * inner_size + inner_index;
output[out_idx] = sum / static_cast<scalar_t>(dim_size);
}
}
// Host function invoked from Python
// It computes the outer_size as the product of dimensions before 'dim' and inner_size as the product of dimensions after 'dim'.
// Then, the kernel is launched with a 2D grid where grid.x covers the outer dimension, and grid.y covers inner tiles of size TILE_INNER.
torch::Tensor mean_reduce_cuda(torch::Tensor input, int64_t dim) {
if (dim < 0) dim += input.dim();
// Get tensor shape as a vector
auto sizes = input.sizes().vec();
int64_t dim_size = sizes[dim];
int64_t outer_size = 1;
for (int i = 0; i < dim; i++) {
outer_size *= sizes[i];
}
int64_t inner_size = 1;
for (int i = dim + 1; i < sizes.size(); i++) {
inner_size *= sizes[i];
}
// Remove the reduced dimension from the output shape
std::vector<int64_t> out_sizes = sizes;
out_sizes.erase(out_sizes.begin() + dim);
auto output = torch::empty(out_sizes, input.options());
// Configure grid and block dimensions
// Grid.x spans the outer dimension; grid.y covers inner tiles of size TILE_INNER
dim3 grid(outer_size, (inner_size + TILE_INNER - 1) / TILE_INNER);
// Block dimensions: TILE_INNER threads for inner dimension and TILE_R threads for cooperative loading
dim3 block(TILE_INNER, TILE_R);
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "mean_reduce_cuda", ([&] {
mean_reduce_shared_kernel<scalar_t><<<grid, block>>>(
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
dim_size,
inner_size
);
}));
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &mean_reduce_cuda, "Mean reduction using shared memory (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.736 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.526 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 18.576 | % | 0.003 | 5 |
Issued Ipc Active | 0.742 | inst/cycle | 0.000 | 5 |
SM Busy | 18.576 | % | 0.003 | 5 |
Memory Throughput | 419665055723.596 | byte/second | 9752124403334672384.000 | 5 |
Mem Busy | 7.264 | % | 0.002 | 5 |
Max Bandwidth | 12.572 | % | 0.007 | 5 |
L1/TEX Hit Rate | 0.000 | % | 0.000 | 5 |
L2 Hit Rate | 18.708 | % | 0.002 | 5 |
Mem Pipes Busy | 9.566 | % | 0.004 | 5 |
Warp Cycles Per Issued Instruction | 42.928 | cycle | 0.494 | 5 |
Warp Cycles Per Executed Instruction | 43.370 | cycle | 0.508 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 30.650 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 2.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 3.000 | block | 0.000 | 5 |
Block Limit Warps | 2.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 49.480 | % | 0.000 | 5 |
Achieved Active Warps Per SM | 31.670 | warp | 0.000 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (49.5%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 218926.43 | μs |
Device Time | 359.42 | μs |
Self CPU Time | 37.59 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 218888.84 | μs |
Device Time | 359.42 | μs |
Self CPU Time | 109.53 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 218162.52 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 96.56 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 217353.23 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 217353.23 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 526471.93 | μs |
Device Time | 20723.28 | μs |
Self CPU Time | 526471.93 | μs |
Self Device Time | 20723.28 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void mean_reduce_shared_kernel<float>(float const*, float*, long, long) | ||
CPU Time | 0.00 | μs |
Device Time | 74109.47 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 74109.47 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 19161.55 | μs |
Device Time | 41212.62 | μs |
Self CPU Time | 19161.55 | μs |
Self Device Time | 41212.62 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 65464.32 | μs |
Device Time | 615333.73 | μs |
Self CPU Time | 13229.58 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 52236.06 | μs |
Device Time | 615333.73 | μs |
Self CPU Time | 16140.82 | μs |
Self Device Time | 615333.73 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 615333.73 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 615333.73 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45285 warnings generated when compiling for host. Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.