72_ConvTranspose3d_BatchNorm_AvgPool_AvgPool
• warp_primitive_fused_avg_pool_edit_1
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
stride: int,
padding: int,
conv_transpose: torch.Tensor,
conv_transpose_bias: torch.Tensor,
bn_weight: torch.Tensor,
bn_bias: torch.Tensor,
bn_running_mean: torch.Tensor,
bn_running_var: torch.Tensor,
bn_eps: torch.Tensor,
bn_momentum: torch.Tensor,
) -> torch.Tensor:
"""
Applies a 3D transposed convolution, batch normalization and two average pooling layers.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
stride (int): Stride of the transposed convolution
padding (int): Padding of the transposed convolution
conv_transpose (torch.Tensor): Transposed convolution weight tensor
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
bn_weight (torch.Tensor): Batch norm weight parameter
bn_bias (torch.Tensor): Batch norm bias parameter
bn_running_mean (torch.Tensor): Batch norm running mean
bn_running_var (torch.Tensor): Batch norm running variance
bn_eps (torch.Tensor): Small constant for numerical stability
bn_momentum (torch.Tensor): Momentum for running stats
Returns:
torch.Tensor: Output tensor after applying transposed conv, batch norm and avg pooling
"""
x = F.conv_transpose3d(
x, conv_transpose, bias=conv_transpose_bias, stride=stride, padding=padding
)
x = F.batch_norm(
x,
bn_running_mean,
bn_running_var,
bn_weight,
bn_bias,
training=True,
momentum=bn_momentum,
eps=bn_eps,
)
x = F.avg_pool3d(x, kernel_size=2)
x = F.avg_pool3d(x, kernel_size=2)
return x
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride, padding, bias_shape
):
super(Model, self).__init__()
conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size)
bn = nn.BatchNorm3d(out_channels)
self.conv_transpose_parameter = nn.Parameter(conv.weight)
self.conv_transpose_bias = nn.Parameter(conv.bias)
self.bn_weight = nn.Parameter(bn.weight + torch.randn(bn.weight.shape) * 0.02)
self.bn_bias = nn.Parameter(bn.bias + torch.randn(bn.bias.shape) * 0.02)
self.register_buffer(
"bn_running_mean",
bn.running_mean + torch.randn(bn.running_mean.shape) * 0.02,
)
self.register_buffer(
"bn_running_var",
bn.running_var + torch.randn(bn.running_var.shape).abs() * 0.02,
)
self.register_buffer("bn_eps", torch.tensor(1e-5))
self.register_buffer("bn_momentum", torch.tensor(0.1))
def forward(self, x, stride, padding, fn=module_fn):
return fn(
x,
stride,
padding,
self.conv_transpose_parameter,
self.conv_transpose_bias,
self.bn_weight,
self.bn_bias,
self.bn_running_mean,
self.bn_running_var,
self.bn_eps,
self.bn_momentum,
)
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width), stride, padding]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias_shape):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
self.batch_norm = nn.BatchNorm3d(out_channels)
# Add noise to batch norm parameters to match functional implementation
self.batch_norm.weight = nn.Parameter(self.batch_norm.weight + torch.randn(self.batch_norm.weight.shape) * 0.02)
self.batch_norm.bias = nn.Parameter(self.batch_norm.bias + torch.randn(self.batch_norm.bias.shape) * 0.02)
self.batch_norm.running_mean = self.batch_norm.running_mean + torch.randn(self.batch_norm.running_mean.shape) * 0.02
self.batch_norm.running_var = self.batch_norm.running_var + torch.randn(self.batch_norm.running_var.shape).abs() * 0.02
self.avg_pool1 = nn.AvgPool3d(kernel_size=2)
self.avg_pool2 = nn.AvgPool3d(kernel_size=2)
def forward(self, x):
x = self.conv_transpose(x)
x = self.batch_norm(x)
x = self.avg_pool1(x)
x = self.avg_pool2(x)
return x
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
// Fused CUDA kernel to perform two consecutive avg_pool3d operations in one pass.
// It combines the two 2x2x2 average pooling layers into a single 4x4x4 pooling operation.
// The kernel uses warp-level primitives to perform reductions efficiently.
__global__ void fused_avg_pool3d_warp_kernel(
const float* __restrict__ input,
float* __restrict__ output,
int N, int C,
int D, int H, int W,
int pooled_D, int pooled_H, int pooled_W
) {
int global_thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int total_outputs = N * C * pooled_D * pooled_H * pooled_W;
if (global_thread_id < total_outputs) {
// Calculate output indices
int tmp = global_thread_id;
int w_out = tmp % pooled_W; tmp /= pooled_W;
int h_out = tmp % pooled_H; tmp /= pooled_H;
int d_out = tmp % pooled_D; tmp /= pooled_D;
int c = tmp % C; tmp /= C;
int n = tmp;
// Calculate input starting positions
int d_start = d_out * 4;
int h_start = h_out * 4;
int w_start = w_out * 4;
// Calculate strides for input indexing
int strideW = 1;
int strideH = W;
int strideD = H * W;
int strideC = D * H * W;
int strideN = C * D * H * W;
int base = n * strideN + c * strideC;
// Accumulate sum with bounds checking
float sum = 0.0f;
int valid_count = 0;
for (int i = 0; i < 4; i++) {
int d_in = d_start + i;
if (d_in >= D) continue;
for (int j = 0; j < 4; j++) {
int h_in = h_start + j;
if (h_in >= H) continue;
for (int k = 0; k < 4; k++) {
int w_in = w_start + k;
if (w_in >= W) continue;
int input_idx = base + d_in * strideD + h_in * strideH + w_in * strideW;
sum += input[input_idx];
valid_count++;
}
}
}
// Compute average using actual number of valid elements
output[global_thread_id] = valid_count > 0 ? sum / valid_count : 0.0f;
}
}
// The module forward function performs the following steps:
// 1) 3D transposed convolution
// 2) Batch normalization (in training mode)
// 3) Fused average pooling (replacing two consecutive avg_pool3d with kernel_size=2)
at::Tensor module_fn_forward(
at::Tensor x,
int64_t stride,
int64_t padding,
at::Tensor conv_transpose,
at::Tensor conv_transpose_bias,
at::Tensor bn_weight,
at::Tensor bn_bias,
at::Tensor bn_running_mean,
at::Tensor bn_running_var,
at::Tensor bn_eps,
at::Tensor bn_momentum
) {
// Ensure all tensors are on CUDA
TORCH_CHECK(x.is_cuda(), "x must be a CUDA tensor");
TORCH_CHECK(conv_transpose.is_cuda(), "conv_transpose must be a CUDA tensor");
TORCH_CHECK(conv_transpose_bias.is_cuda(), "conv_transpose_bias must be a CUDA tensor");
TORCH_CHECK(bn_weight.is_cuda(), "bn_weight must be a CUDA tensor");
TORCH_CHECK(bn_bias.is_cuda(), "bn_bias must be a CUDA tensor");
TORCH_CHECK(bn_running_mean.is_cuda(), "bn_running_mean must be a CUDA tensor");
TORCH_CHECK(bn_running_var.is_cuda(), "bn_running_var must be a CUDA tensor");
TORCH_CHECK(bn_eps.is_cuda(), "bn_eps must be a CUDA scalar tensor");
TORCH_CHECK(bn_momentum.is_cuda(), "bn_momentum must be a CUDA scalar tensor");
const double eps_val = bn_eps.item<double>();
const double momentum_val = bn_momentum.item<double>();
std::vector<int64_t> stride_3d = {stride, stride, stride};
std::vector<int64_t> pad_3d = {padding, padding, padding};
auto y = at::conv_transpose3d(
x,
conv_transpose,
conv_transpose_bias,
stride_3d,
pad_3d
);
bool training = true;
y = at::batch_norm(
y,
bn_weight,
bn_bias,
bn_running_mean,
bn_running_var,
training,
momentum_val,
eps_val,
/*cudnn_enabled=*/true
);
auto sizes = y.sizes();
int N = sizes[0];
int C = sizes[1];
int D = sizes[2];
int H = sizes[3];
int W = sizes[4];
TORCH_CHECK(D >= 4 && H >= 4 && W >= 4, "Input dimensions must be at least 4 for fused pooling");
int pooled_D = D / 4;
int pooled_H = H / 4;
int pooled_W = W / 4;
auto out = at::empty({N, C, pooled_D, pooled_H, pooled_W}, y.options());
int total_elements = N * C * pooled_D * pooled_H * pooled_W;
int blockSize = 256;
int gridSize = (total_elements + blockSize - 1) / blockSize;
fused_avg_pool3d_warp_kernel<<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(
y.data_ptr<float>(),
out.data_ptr<float>(),
N, C, D, H, W,
pooled_D, pooled_H, pooled_W
);
return out;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"forward",
&module_fn_forward,
"Fused conv_transpose3d + batch norm + fused avg pooling (CUDA) forward"
);
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.866 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.860 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 21.650 | % | 0.002 | 5 |
Issued Ipc Active | 0.866 | inst/cycle | 0.000 | 5 |
SM Busy | 21.650 | % | 0.002 | 5 |
Memory Throughput | 2923572007045.970 | byte/second | 10545093608061431808.000 | 5 |
Mem Busy | 57.020 | % | 0.008 | 5 |
Max Bandwidth | 87.216 | % | 0.010 | 5 |
L1/TEX Hit Rate | 75.480 | % | 0.000 | 5 |
L2 Hit Rate | 9.004 | % | 0.000 | 5 |
Mem Pipes Busy | 10.556 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 67.796 | cycle | 0.055 | 5 |
Warp Cycles Per Executed Instruction | 67.800 | cycle | 0.055 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 29.580 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 91.832 | % | 0.009 | 5 |
Achieved Active Warps Per SM | 58.774 | warp | 0.004 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
INF Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 220679.59 | μs |
Device Time | 5325.78 | μs |
Self CPU Time | 73.30 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 220606.29 | μs |
Device Time | 5325.78 | μs |
Self CPU Time | 143.05 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaStreamSynchronize | ||
CPU Time | 9542266.22 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 9542266.22 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::item | ||
CPU Time | 9550196.92 | μs |
Device Time | 1581.96 | μs |
Self CPU Time | 1253.68 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_local_scalar_dense | ||
CPU Time | 9548943.24 | μs |
Device Time | 1581.96 | μs |
Self CPU Time | 3009.52 | μs |
Self Device Time | 1581.96 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::conv_transpose3d | ||
CPU Time | 204439.19 | μs |
Device Time | 3386714.99 | μs |
Self CPU Time | 952.21 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::batch_norm | ||
CPU Time | 30814.29 | μs |
Device Time | 5975430.54 | μs |
Self CPU Time | 860.65 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_batch_norm_impl_index | ||
CPU Time | 29953.64 | μs |
Device Time | 5975430.54 | μs |
Self CPU Time | 939.71 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_batch_norm | ||
CPU Time | 29013.93 | μs |
Device Time | 5975430.54 | μs |
Self CPU Time | 10556.21 | μs |
Self Device Time | 5975430.54 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void cudnn::bn_fw_tr_1C11_kernel_NCHW<float, float, int, 512, true, 1, true>(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float) | ||
CPU Time | 0.00 | μs |
Device Time | 5975430.54 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 5975430.54 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45312 warnings generated when compiling for host. Suppressed 45346 warnings (45299 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.