72_ConvTranspose3d_BatchNorm_AvgPool_AvgPool
• fused_optimized_pool_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
stride: int,
padding: int,
conv_transpose: torch.Tensor,
conv_transpose_bias: torch.Tensor,
bn_weight: torch.Tensor,
bn_bias: torch.Tensor,
bn_running_mean: torch.Tensor,
bn_running_var: torch.Tensor,
bn_eps: torch.Tensor,
bn_momentum: torch.Tensor,
) -> torch.Tensor:
"""
Applies a 3D transposed convolution, batch normalization and two average pooling layers.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
stride (int): Stride of the transposed convolution
padding (int): Padding of the transposed convolution
conv_transpose (torch.Tensor): Transposed convolution weight tensor
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
bn_weight (torch.Tensor): Batch norm weight parameter
bn_bias (torch.Tensor): Batch norm bias parameter
bn_running_mean (torch.Tensor): Batch norm running mean
bn_running_var (torch.Tensor): Batch norm running variance
bn_eps (torch.Tensor): Small constant for numerical stability
bn_momentum (torch.Tensor): Momentum for running stats
Returns:
torch.Tensor: Output tensor after applying transposed conv, batch norm and avg pooling
"""
x = F.conv_transpose3d(
x, conv_transpose, bias=conv_transpose_bias, stride=stride, padding=padding
)
x = F.batch_norm(
x,
bn_running_mean,
bn_running_var,
bn_weight,
bn_bias,
training=True,
momentum=bn_momentum,
eps=bn_eps,
)
x = F.avg_pool3d(x, kernel_size=2)
x = F.avg_pool3d(x, kernel_size=2)
return x
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride, padding, bias_shape
):
super(Model, self).__init__()
conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size)
bn = nn.BatchNorm3d(out_channels)
self.conv_transpose_parameter = nn.Parameter(conv.weight)
self.conv_transpose_bias = nn.Parameter(conv.bias)
self.bn_weight = nn.Parameter(bn.weight + torch.randn(bn.weight.shape) * 0.02)
self.bn_bias = nn.Parameter(bn.bias + torch.randn(bn.bias.shape) * 0.02)
self.register_buffer(
"bn_running_mean",
bn.running_mean + torch.randn(bn.running_mean.shape) * 0.02,
)
self.register_buffer(
"bn_running_var",
bn.running_var + torch.randn(bn.running_var.shape).abs() * 0.02,
)
self.register_buffer("bn_eps", torch.tensor(1e-5))
self.register_buffer("bn_momentum", torch.tensor(0.1))
def forward(self, x, stride, padding, fn=module_fn):
return fn(
x,
stride,
padding,
self.conv_transpose_parameter,
self.conv_transpose_bias,
self.bn_weight,
self.bn_bias,
self.bn_running_mean,
self.bn_running_var,
self.bn_eps,
self.bn_momentum,
)
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width), stride, padding]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias_shape):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
self.batch_norm = nn.BatchNorm3d(out_channels)
# Add noise to batch norm parameters to match functional implementation
self.batch_norm.weight = nn.Parameter(self.batch_norm.weight + torch.randn(self.batch_norm.weight.shape) * 0.02)
self.batch_norm.bias = nn.Parameter(self.batch_norm.bias + torch.randn(self.batch_norm.bias.shape) * 0.02)
self.batch_norm.running_mean = self.batch_norm.running_mean + torch.randn(self.batch_norm.running_mean.shape) * 0.02
self.batch_norm.running_var = self.batch_norm.running_var + torch.randn(self.batch_norm.running_var.shape).abs() * 0.02
self.avg_pool1 = nn.AvgPool3d(kernel_size=2)
self.avg_pool2 = nn.AvgPool3d(kernel_size=2)
def forward(self, x):
x = self.conv_transpose(x)
x = self.batch_norm(x)
x = self.avg_pool1(x)
x = self.avg_pool2(x)
return x
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
// This kernel fuses conv_transpose3d, batch norm, and a 4x4x4 average pooling operation
// It combines ideas from two kernels: using channel-based parallel work distribution and grid-stride loops
// with manual loop unrolling to maximize instruction-level parallelism and memory coalescing.
__global__ void fused_optimized_pool_kernel(
const float* __restrict__ input,
float* __restrict__ output,
const int N, const int C,
const int D, const int H, const int W,
const int pooled_D, const int pooled_H, const int pooled_W
) {
// Each (n, c) pair is processed in a grid-stride loop over channels
const int elements_per_channel = pooled_D * pooled_H * pooled_W;
for (int idx_channel = blockIdx.x; idx_channel < N * C; idx_channel += gridDim.x) {
const int n = idx_channel / C;
const int c = idx_channel % C;
// Compute the base offsets for the input and output for this channel
const int input_base = ((n * C + c) * D * H * W);
const int output_base = ((n * C + c) * elements_per_channel);
const int HW = H * W;
// Each thread in the block processes multiple output pooling elements
for (int idx = threadIdx.x; idx < elements_per_channel; idx += blockDim.x) {
// Map linear index to 3D pooling coordinates
int tmp = idx;
const int w_out = tmp % pooled_W;
tmp /= pooled_W;
const int h_out = tmp % pooled_H;
const int d_out = tmp / pooled_H;
// Compute the starting indices in the input for the 4x4x4 window
const int d_start = d_out * 4;
const int h_start = h_out * 4;
const int w_start = w_out * 4;
float sum = 0.0f;
// Unroll the inner loops for better performance
#pragma unroll
for (int i = 0; i < 4; i++) {
const int d_in = d_start + i;
const int d_offset = d_in * HW;
#pragma unroll
for (int j = 0; j < 4; j++) {
const int h_in = h_start + j;
const int h_offset = h_in * W;
#pragma unroll
for (int k = 0; k < 4; k++) {
const int w_in = w_start + k;
int input_idx = input_base + d_offset + h_offset + w_in;
sum += input[input_idx];
}
}
}
// Write averaged result over 64 elements
output[output_base + idx] = sum * 0.015625f; // 1/64
}
}
}
// Combined module function that performs conv_transpose3d, batch normalization, and the fused pooling
at::Tensor module_fn_forward(
at::Tensor x,
int64_t stride,
int64_t padding,
at::Tensor conv_transpose,
at::Tensor conv_transpose_bias,
at::Tensor bn_weight,
at::Tensor bn_bias,
at::Tensor bn_running_mean,
at::Tensor bn_running_var,
at::Tensor bn_eps,
at::Tensor bn_momentum
) {
// Ensure tensors are CUDA tensors
TORCH_CHECK(x.is_cuda(), "x must be a CUDA tensor");
TORCH_CHECK(conv_transpose.is_cuda(), "conv_transpose must be a CUDA tensor");
TORCH_CHECK(conv_transpose_bias.is_cuda(), "conv_transpose_bias must be a CUDA tensor");
TORCH_CHECK(bn_weight.is_cuda(), "bn_weight must be a CUDA tensor");
TORCH_CHECK(bn_bias.is_cuda(), "bn_bias must be a CUDA tensor");
TORCH_CHECK(bn_running_mean.is_cuda(), "bn_running_mean must be a CUDA tensor");
TORCH_CHECK(bn_running_var.is_cuda(), "bn_running_var must be a CUDA tensor");
TORCH_CHECK(bn_eps.is_cuda(), "bn_eps must be a CUDA scalar tensor");
TORCH_CHECK(bn_momentum.is_cuda(), "bn_momentum must be a CUDA scalar tensor");
const double eps_val = bn_eps.item<double>();
const double momentum_val = bn_momentum.item<double>();
// Prepare 3D stride and padding vectors
std::vector<int64_t> stride_3d = {stride, stride, stride};
std::vector<int64_t> pad_3d = {padding, padding, padding};
// 1) 3D transposed convolution
auto y = at::conv_transpose3d(x, conv_transpose, conv_transpose_bias, stride_3d, pad_3d);
// 2) Batch normalization (training mode)
bool training = true;
y = at::batch_norm(y, bn_weight, bn_bias, bn_running_mean, bn_running_var, training, momentum_val, eps_val, true);
// 3) Fused average pooling that simulates two sequential 2x2x2 operations by averaging a 4x4x4 window
auto sizes = y.sizes();
const int N = sizes[0];
const int C = sizes[1];
const int D = sizes[2];
const int H = sizes[3];
const int W = sizes[4];
TORCH_CHECK(D >= 4 && H >= 4 && W >= 4, "Input dimensions must be at least 4 for fused pooling");
const int pooled_D = D / 4;
const int pooled_H = H / 4;
const int pooled_W = W / 4;
auto output = at::empty({N, C, pooled_D, pooled_H, pooled_W}, y.options());
// Setup grid and block dimensions
const int num_channels = N * C;
int threads_per_block = 256;
int gridSize = num_channels < 256 ? num_channels : 256;
// Launch the fused pooling kernel on the current CUDA stream
fused_optimized_pool_kernel<<<gridSize, threads_per_block, 0, at::cuda::getCurrentCUDAStream()>>>(
y.data_ptr<float>(),
output.data_ptr<float>(),
N, C,
D, H, W,
pooled_D, pooled_H, pooled_W
);
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &module_fn_forward, "Fused conv_transpose3d + batch norm + optimized fused avg pooling (CUDA) forward");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.310 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.296 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 7.678 | % | 0.000 | 5 |
Issued Ipc Active | 0.310 | inst/cycle | 0.000 | 5 |
SM Busy | 7.678 | % | 0.000 | 5 |
Memory Throughput | 2396661684191.698 | byte/second | 129276977967719809024.000 | 5 |
Mem Busy | 43.214 | % | 0.047 | 5 |
Max Bandwidth | 71.494 | % | 0.115 | 5 |
L1/TEX Hit Rate | 75.500 | % | 0.000 | 5 |
L2 Hit Rate | 7.950 | % | 0.000 | 5 |
Mem Pipes Busy | 8.322 | % | 0.002 | 5 |
Warp Cycles Per Issued Instruction | 48.714 | cycle | 0.001 | 5 |
Warp Cycles Per Executed Instruction | 48.720 | cycle | 0.001 | 5 |
Avg. Active Threads Per Warp | 31.840 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 30.610 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 23.356 | % | 0.000 | 5 |
Achieved Active Warps Per SM | 14.946 | warp | 0.000 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (23.3%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 463106.10 | μs |
Device Time | 5244.19 | μs |
Self CPU Time | 66.84 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 463039.26 | μs |
Device Time | 5244.19 | μs |
Self CPU Time | 128.82 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaStreamSynchronize | ||
CPU Time | 9586196.86 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 9586196.86 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::item | ||
CPU Time | 9595024.72 | μs |
Device Time | 1623.68 | μs |
Self CPU Time | 1345.49 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_local_scalar_dense | ||
CPU Time | 9593679.23 | μs |
Device Time | 1623.68 | μs |
Self CPU Time | 3590.61 | μs |
Self Device Time | 1623.68 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::conv_transpose3d | ||
CPU Time | 215771.17 | μs |
Device Time | 3377796.71 | μs |
Self CPU Time | 1010.60 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::batch_norm | ||
CPU Time | 33133.96 | μs |
Device Time | 5972593.73 | μs |
Self CPU Time | 988.34 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_batch_norm_impl_index | ||
CPU Time | 32145.62 | μs |
Device Time | 5972593.73 | μs |
Self CPU Time | 1087.24 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_batch_norm | ||
CPU Time | 31058.38 | μs |
Device Time | 5972593.73 | μs |
Self CPU Time | 12039.19 | μs |
Self Device Time | 5972593.73 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void cudnn::bn_fw_tr_1C11_kernel_NCHW<float, float, int, 512, true, 1, true>(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float) | ||
CPU Time | 0.00 | μs |
Device Time | 5972593.73 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 5972593.73 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45314 warnings generated when compiling for host. Suppressed 45346 warnings (45299 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.