72_ConvTranspose3d_BatchNorm_AvgPool_AvgPool
• warp_uniform_control_flow_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
stride: int,
padding: int,
conv_transpose: torch.Tensor,
conv_transpose_bias: torch.Tensor,
bn_weight: torch.Tensor,
bn_bias: torch.Tensor,
bn_running_mean: torch.Tensor,
bn_running_var: torch.Tensor,
bn_eps: torch.Tensor,
bn_momentum: torch.Tensor,
) -> torch.Tensor:
"""
Applies a 3D transposed convolution, batch normalization and two average pooling layers.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
stride (int): Stride of the transposed convolution
padding (int): Padding of the transposed convolution
conv_transpose (torch.Tensor): Transposed convolution weight tensor
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
bn_weight (torch.Tensor): Batch norm weight parameter
bn_bias (torch.Tensor): Batch norm bias parameter
bn_running_mean (torch.Tensor): Batch norm running mean
bn_running_var (torch.Tensor): Batch norm running variance
bn_eps (torch.Tensor): Small constant for numerical stability
bn_momentum (torch.Tensor): Momentum for running stats
Returns:
torch.Tensor: Output tensor after applying transposed conv, batch norm and avg pooling
"""
x = F.conv_transpose3d(
x, conv_transpose, bias=conv_transpose_bias, stride=stride, padding=padding
)
x = F.batch_norm(
x,
bn_running_mean,
bn_running_var,
bn_weight,
bn_bias,
training=True,
momentum=bn_momentum,
eps=bn_eps,
)
x = F.avg_pool3d(x, kernel_size=2)
x = F.avg_pool3d(x, kernel_size=2)
return x
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride, padding, bias_shape
):
super(Model, self).__init__()
conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size)
bn = nn.BatchNorm3d(out_channels)
self.conv_transpose_parameter = nn.Parameter(conv.weight)
self.conv_transpose_bias = nn.Parameter(conv.bias)
self.bn_weight = nn.Parameter(bn.weight + torch.randn(bn.weight.shape) * 0.02)
self.bn_bias = nn.Parameter(bn.bias + torch.randn(bn.bias.shape) * 0.02)
self.register_buffer(
"bn_running_mean",
bn.running_mean + torch.randn(bn.running_mean.shape) * 0.02,
)
self.register_buffer(
"bn_running_var",
bn.running_var + torch.randn(bn.running_var.shape).abs() * 0.02,
)
self.register_buffer("bn_eps", torch.tensor(1e-5))
self.register_buffer("bn_momentum", torch.tensor(0.1))
def forward(self, x, stride, padding, fn=module_fn):
return fn(
x,
stride,
padding,
self.conv_transpose_parameter,
self.conv_transpose_bias,
self.bn_weight,
self.bn_bias,
self.bn_running_mean,
self.bn_running_var,
self.bn_eps,
self.bn_momentum,
)
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width), stride, padding]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
A model that performs a 3D transposed convolution, followed by batch normalization,
two average pooling layers.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias_shape):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
self.batch_norm = nn.BatchNorm3d(out_channels)
# Add noise to batch norm parameters to match functional implementation
self.batch_norm.weight = nn.Parameter(self.batch_norm.weight + torch.randn(self.batch_norm.weight.shape) * 0.02)
self.batch_norm.bias = nn.Parameter(self.batch_norm.bias + torch.randn(self.batch_norm.bias.shape) * 0.02)
self.batch_norm.running_mean = self.batch_norm.running_mean + torch.randn(self.batch_norm.running_mean.shape) * 0.02
self.batch_norm.running_var = self.batch_norm.running_var + torch.randn(self.batch_norm.running_var.shape).abs() * 0.02
self.avg_pool1 = nn.AvgPool3d(kernel_size=2)
self.avg_pool2 = nn.AvgPool3d(kernel_size=2)
def forward(self, x):
x = self.conv_transpose(x)
x = self.batch_norm(x)
x = self.avg_pool1(x)
x = self.avg_pool2(x)
return x
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 32, 32, 32
kernel_size = 3
stride = 2
padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, bias_shape]
#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
#include <vector>
#define BLOCK_SIZE 256
template<int KernelSize>
__global__ void optimized_avgpool_kernel(const float* __restrict__ input, float* __restrict__ output,
int N, int C, int pooled_D, int pooled_H, int pooled_W,
int input_D, int input_H, int input_W) {
const int total = N * C * pooled_D * pooled_H * pooled_W;
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < total;
i += gridDim.x * blockDim.x) {
const int pw = i % pooled_W;
const int ph = (i / pooled_W) % pooled_H;
const int pd = (i / (pooled_W * pooled_H)) % pooled_D;
const int c = (i / (pooled_W * pooled_H * pooled_D)) % C;
const int n = i / (pooled_W * pooled_H * pooled_D * C);
const int d_start = pd * KernelSize;
const int h_start = ph * KernelSize;
const int w_start = pw * KernelSize;
float sum = 0.0f;
#pragma unroll
for (int dz = 0; dz < KernelSize; ++dz) {
const int d = d_start + dz;
#pragma unroll
for (int dy = 0; dy < KernelSize; ++dy) {
const int h = h_start + dy;
#pragma unroll
for (int dx = 0; dx < KernelSize; ++dx) {
const int w = w_start + dx;
sum += input[((n * C + c) * input_D + d) * (input_H * input_W)
+ h * input_W + w];
}
}
}
output[i] = sum / (KernelSize*KernelSize*KernelSize);
}
}
at::Tensor module_fn_forward(
at::Tensor x,
int64_t stride,
int64_t padding,
at::Tensor conv_transpose,
at::Tensor conv_transpose_bias,
at::Tensor bn_weight,
at::Tensor bn_bias,
at::Tensor bn_running_mean,
at::Tensor bn_running_var,
at::Tensor bn_eps,
at::Tensor bn_momentum
) {
// Input validation checks (unchanged from previous implementation)
TORCH_CHECK(x.is_cuda(), "x must be CUDA");
// ... (other tensor checks)
// Existing convolution + batch norm
auto y = at::conv_transpose3d(x, conv_transpose, conv_transpose_bias,
{stride, stride, stride}, {padding, padding, padding});
y = at::batch_norm(y, bn_weight, bn_bias, bn_running_mean, bn_running_var,
true, bn_momentum.item<double>(), bn_eps.item<double>(), true);
// Prepare for fused kernel
const auto sizes = y.sizes();
const int pooled_D = sizes[2]/4, pooled_H = sizes[3]/4, pooled_W = sizes[4]/4;
auto output = at::empty({sizes[0], sizes[1], pooled_D, pooled_H, pooled_W}, y.options());
// Launch config with optimal block/grid sizing
const int blocks = (output.numel() + BLOCK_SIZE - 1) / BLOCK_SIZE;
optimized_avgpool_kernel<4><<<blocks, BLOCK_SIZE, 0, at::cuda::getCurrentCUDAStream()>>>(
y.data_ptr<float>(), output.data_ptr<float>(),
sizes[0], sizes[1],
pooled_D, pooled_H, pooled_W,
sizes[2], sizes[3], sizes[4]
);
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &module_fn_forward, "Warp-uniform fused pool kernel");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.600 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.600 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 15.044 | % | 0.001 | 5 |
Issued Ipc Active | 0.600 | inst/cycle | 0.000 | 5 |
SM Busy | 15.044 | % | 0.001 | 5 |
Memory Throughput | 2952653021879.200 | byte/second | 12430383282176462848.000 | 5 |
Mem Busy | 55.334 | % | 0.005 | 5 |
Max Bandwidth | 88.082 | % | 0.012 | 5 |
L1/TEX Hit Rate | 75.090 | % | 0.000 | 5 |
L2 Hit Rate | 10.036 | % | 0.000 | 5 |
Mem Pipes Busy | 10.652 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 95.898 | cycle | 0.004 | 5 |
Warp Cycles Per Executed Instruction | 95.962 | cycle | 0.004 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 29.180 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 90.094 | % | 0.003 | 5 |
Achieved Active Warps Per SM | 57.664 | warp | 0.001 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
INF Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 294372.36 | μs |
Device Time | 5030.28 | μs |
Self CPU Time | 63.40 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 294308.96 | μs |
Device Time | 5030.28 | μs |
Self CPU Time | 131.83 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaStreamSynchronize | ||
CPU Time | 9600725.61 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 9600725.61 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::conv_transpose3d | ||
CPU Time | 211531.00 | μs |
Device Time | 3409209.18 | μs |
Self CPU Time | 1211.61 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::item | ||
CPU Time | 9612294.04 | μs |
Device Time | 1670.47 | μs |
Self CPU Time | 1596.95 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_local_scalar_dense | ||
CPU Time | 9610697.09 | μs |
Device Time | 1670.47 | μs |
Self CPU Time | 5145.25 | μs |
Self Device Time | 1670.47 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::batch_norm | ||
CPU Time | 40047.05 | μs |
Device Time | 6007647.87 | μs |
Self CPU Time | 1266.05 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_batch_norm_impl_index | ||
CPU Time | 38781.01 | μs |
Device Time | 6007647.87 | μs |
Self CPU Time | 1382.47 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_batch_norm | ||
CPU Time | 37398.54 | μs |
Device Time | 6007647.87 | μs |
Self CPU Time | 13874.26 | μs |
Self Device Time | 6007647.87 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void cudnn::bn_fw_tr_1C11_kernel_NCHW<float, float, int, 512, true, 1, true>(cudnnTensorStruct, float const*, cudnnTensorStruct, float*, float const*, float const*, float, float, float*, float*, float*, float*, float, float) | ||
CPU Time | 0.00 | μs |
Device Time | 6007647.87 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 6007647.87 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45316 warnings generated when compiling for host. Suppressed 45346 warnings (45299 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.