72_ConvTranspose3d_BatchNorm_AvgPool_AvgPool
• fused_avgpool_distributed_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
stride: int,
padding: int,
conv_transpose: torch.Tensor,
conv_transpose_bias: torch.Tensor,
bn_weight: torch.Tensor,
bn_bias: torch.Tensor,
bn_running_mean: torch.Tensor,
bn_running_var: torch.Tensor,
bn_eps: torch.Tensor,
bn_momentum: torch.Tensor,
) -> torch.Tensor:
"""
Applies a 3D transposed convolution, batch normalization and two average pooling layers.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
stride (int): Stride of the transposed convolution
padding (int): Padding of the transposed convolution
conv_transpose (torch.Tensor): Transposed convolution weight tensor
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
bn_weight (torch.Tensor): Batch norm weight parameter
bn_bias (torch.Tensor): Batch norm bias parameter
bn_running_mean (torch.Tensor): Batch norm running mean
bn_running_var (torch.Tensor): Batch norm running variance
bn_eps (torch.Tensor): Small constant for numerical stability
bn_momentum (torch.Tensor): Momentum for running stats
Returns:
torch.Tensor: Output tensor after applying transposed conv, batch norm and avg pooling
"""
x = F.conv_transpose3d(
x, conv_transpose, bias=conv_transpose_bias, stride=stride, padding=padding
)
x = F.batch_norm(
x,
bn_running_mean,
bn_running_var,
bn_weight,
bn_bias,
training=True,
momentum=bn_momentum,
eps=bn_eps,
)
x = F.avg_pool3d(x, kernel_size=2)
x = F.avg_pool3d(x, kernel_size=2)
return x
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride, padding, bias_shape
):
super(Model, self).__init__()
conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size)
bn = nn.BatchNorm3d(out_channels)
self.conv_transpose_parameter = nn.Parameter(conv.weight)
self.conv_transpose_bias = nn.Parameter(conv.bias)
self.bn_weight = nn.Parameter(bn.weight + torch.randn(bn.weight.shape) * 0.02)
self.bn_bias = nn.Parameter(bn.bias + torch.randn(bn.bias.shape) * 0.02)
self.register_buffer(
"bn_running_mean",
bn.running_mean + torch.randn(bn.running_mean.shape) * 0.02,
)
self.register_buffer(
"bn_running_var",
bn.running_var + torch.randn(bn.running_var.shape).abs() * 0.02,
)
self.register_buffer("bn_eps", torch.tensor(1e-5))
self.register_buffer("bn_momentum", torch.tensor(0.1))
def forward(self, x, stride, padding, fn=module_fn):
return fn(
x,
stride,
padding,
self.conv_transpose_parameter,
self.conv_transpose_bias,
self.bn_weight,
self.bn_bias,
self.bn_running_mean,
self.bn_running_var,
self.bn_eps,
self.bn_momentum,
)
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width), stride, padding]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias_shape):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
self.batch_norm = nn.BatchNorm3d(out_channels)
# Add noise to batch norm parameters to match functional implementation
self.batch_norm.weight = nn.Parameter(self.batch_norm.weight + torch.randn(self.batch_norm.weight.shape) * 0.02)
self.batch_norm.bias = nn.Parameter(self.batch_norm.bias + torch.randn(self.batch_norm.bias.shape) * 0.02)
self.batch_norm.running_mean = self.batch_norm.running_mean + torch.randn(self.batch_norm.running_mean.shape) * 0.02
self.batch_norm.running_var = self.batch_norm.running_var + torch.randn(self.batch_norm.running_var.shape).abs() * 0.02
self.avg_pool1 = nn.AvgPool3d(kernel_size=2)
self.avg_pool2 = nn.AvgPool3d(kernel_size=2)
def forward(self, x):
x = self.conv_transpose(x)
x = self.batch_norm(x)
x = self.avg_pool1(x)
x = self.avg_pool2(x)
return x
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
#include <vector>
// Define block size for kernel; adjust as needed
#define BLOCK_SIZE 256
// This kernel fuses two consecutive average pooling operations by performing
// an average over a 4x4x4 block. It uses a grid-stride loop to ensure even workload
// distribution across threads and blocks.
__global__ void fused_avgpool_distributed_kernel(const float* __restrict__ input,
float* __restrict__ output,
int N, int C, int D, int H, int W,
int pooled_D, int pooled_H, int pooled_W) {
int total = N * C * pooled_D * pooled_H * pooled_W;
// Use grid-stride loop to distribute work evenly
for (int idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < total;
idx += gridDim.x * blockDim.x) {
// Decode flat index into (n, c, d, h, w) for the pooled output
int w_idx = idx % pooled_W;
int tmp = idx / pooled_W;
int h_idx = tmp % pooled_H;
tmp = tmp / pooled_H;
int d_idx = tmp % pooled_D;
tmp = tmp / pooled_D;
int c_idx = tmp % C;
int n_idx = tmp / C;
// Compute the starting indices for the 4x4x4 window in the input tensor
int d_start = d_idx * 4;
int h_start = h_idx * 4;
int w_start = w_idx * 4;
float sum = 0.0f;
// Unroll the loops for 4x4x4 average pooling
#pragma unroll
for (int dz = 0; dz < 4; dz++) {
int d_val = d_start + dz;
#pragma unroll
for (int dy = 0; dy < 4; dy++) {
int h_val = h_start + dy;
#pragma unroll
for (int dx = 0; dx < 4; dx++) {
int w_val = w_start + dx;
// Compute input index for NCDHW layout
int input_index = (((n_idx * C + c_idx) * D + d_val) * H + h_val) * W + w_val;
sum += input[input_index];
}
}
}
// Write the averaged result to the output (64 elements per window)
output[idx] = sum / 64.0f;
}
}
at::Tensor module_fn_forward(
at::Tensor x,
int64_t stride,
int64_t padding,
at::Tensor conv_transpose,
at::Tensor conv_transpose_bias,
at::Tensor bn_weight,
at::Tensor bn_bias,
at::Tensor bn_running_mean,
at::Tensor bn_running_var,
at::Tensor bn_eps,
at::Tensor bn_momentum
) {
// Ensure all tensors are CUDA tensors
TORCH_CHECK(x.is_cuda(), "x must be a CUDA tensor");
TORCH_CHECK(conv_transpose.is_cuda(), "conv_transpose must be a CUDA tensor");
TORCH_CHECK(conv_transpose_bias.is_cuda(), "conv_transpose_bias must be a CUDA tensor");
TORCH_CHECK(bn_weight.is_cuda(), "bn_weight must be a CUDA tensor");
TORCH_CHECK(bn_bias.is_cuda(), "bn_bias must be a CUDA tensor");
TORCH_CHECK(bn_running_mean.is_cuda(), "bn_running_mean must be a CUDA tensor");
TORCH_CHECK(bn_running_var.is_cuda(), "bn_running_var must be a CUDA tensor");
TORCH_CHECK(bn_eps.is_cuda(), "bn_eps must be a CUDA scalar tensor");
TORCH_CHECK(bn_momentum.is_cuda(), "bn_momentum must be a CUDA scalar tensor");
// Extract scalar values
const double eps_val = bn_eps.item<double>();
const double momentum_val = bn_momentum.item<double>();
// 1) 3D Transposed convolution: use provided stride and padding
std::vector<int64_t> stride_3d = {stride, stride, stride};
std::vector<int64_t> pad_3d = {padding, padding, padding};
auto y = at::conv_transpose3d(x, conv_transpose, conv_transpose_bias, stride_3d, pad_3d);
// 2) Batch Normalization (training mode)
bool training = true;
y = at::batch_norm(y, bn_weight, bn_bias, bn_running_mean, bn_running_var, training, momentum_val, eps_val, true);
// 3) Fused Average Pooling: Replace two consecutive avg_pool3d (kernel_size=2) with a single pooling over a 4x4x4 block
auto sizes = y.sizes(); // y assumed to be in NCDHW format
int N_val = sizes[0];
int C_val = sizes[1];
int D_val = sizes[2];
int H_val = sizes[3];
int W_val = sizes[4];
// Each avg_pool3d with kernel_size=2 reduces spatial dims by factor 2, so two such ops reduce dims by factor 4
int pooled_D = D_val / 4;
int pooled_H = H_val / 4;
int pooled_W = W_val / 4;
auto output = at::empty({N_val, C_val, pooled_D, pooled_H, pooled_W}, y.options());
int total_elements = N_val * C_val * pooled_D * pooled_H * pooled_W;
int threads = BLOCK_SIZE;
int blocks = (total_elements + threads - 1) / threads;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
fused_avgpool_distributed_kernel<<<blocks, threads, 0, stream>>>(
y.data_ptr<float>(),
output.data_ptr<float>(),
N_val, C_val, D_val, H_val, W_val,
pooled_D, pooled_H, pooled_W
);
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &module_fn_forward, "Fused Avg Pooling with Even Workload Distribution (CUDA) forward");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.468 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.460 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 11.674 | % | 0.002 | 5 |
Issued Ipc Active | 0.468 | inst/cycle | 0.000 | 5 |
SM Busy | 11.674 | % | 0.002 | 5 |
Memory Throughput | 2953854266436.417 | byte/second | 2123219988754979584.000 | 5 |
Mem Busy | 55.954 | % | 0.008 | 5 |
Max Bandwidth | 88.116 | % | 0.002 | 5 |
L1/TEX Hit Rate | 75.120 | % | 0.000 | 5 |
L2 Hit Rate | 10.156 | % | 0.000 | 5 |
Mem Pipes Busy | 10.646 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 125.184 | cycle | 0.482 | 5 |
Warp Cycles Per Executed Instruction | 125.228 | cycle | 0.487 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 30.250 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 91.302 | % | 0.002 | 5 |
Achieved Active Warps Per SM | 58.434 | warp | 0.001 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
INF Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 298161.03 | μs |
Device Time | 5621.83 | μs |
Self CPU Time | 59.08 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 298101.95 | μs |
Device Time | 5621.83 | μs |
Self CPU Time | 120.59 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaStreamSynchronize | ||
CPU Time | 9592898.25 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 9592898.25 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::item | ||
CPU Time | 9604386.40 | μs |
Device Time | 1630.75 | μs |
Self CPU Time | 1679.66 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_local_scalar_dense | ||
CPU Time | 9602706.75 | μs |
Device Time | 1630.75 | μs |
Self CPU Time | 4479.71 | μs |
Self Device Time | 1630.75 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::conv_transpose3d | ||
CPU Time | 214422.08 | μs |
Device Time | 3403814.27 | μs |
Self CPU Time | 1271.22 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::batch_norm | ||
CPU Time | 33585.62 | μs |
Device Time | 6025107.97 | μs |
Self CPU Time | 1128.48 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_batch_norm_impl_index | ||
CPU Time | 32457.14 | μs |
Device Time | 6025107.97 | μs |
Self CPU Time | 1228.50 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_batch_norm | ||
CPU Time | 31228.64 | μs |
Device Time | 6025107.97 | μs |
Self CPU Time | 11866.42 | μs |
Self Device Time | 6025107.97 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void cudnn::bn_fw_tr_1C11_kernel_NCHW<float, float, int, 512, true, 1, true>(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float) | ||
CPU Time | 0.00 | μs |
Device Time | 6025107.97 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 6025107.97 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45312 warnings generated when compiling for host. Suppressed 45346 warnings (45299 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.