3_ConvTranspose3d_Sum_LayerNorm_AvgPool_GELU
• unrolled_layernorm_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
conv_transpose_weight: torch.Tensor,
conv_transpose_bias: torch.Tensor,
sum_weight: torch.Tensor,
norm_weight: torch.Tensor,
norm_bias: torch.Tensor,
stride: tuple,
padding: tuple,
output_padding: tuple,
pool_kernel_size: tuple,
norm_shape: tuple,
) -> torch.Tensor:
"""
Functional implementation of a sequence of operations:
1. 3D transposed convolution
2. Addition with a learnable weight
3. Layer normalization
4. 3D average pooling
5. GELU activation
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
conv_transpose_weight (torch.Tensor): Weight tensor for transposed convolution
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
sum_weight (torch.Tensor): Learnable weight for addition
norm_weight (torch.Tensor): Weight tensor for layer normalization
norm_bias (torch.Tensor): Bias tensor for layer normalization
stride (tuple): Stride for transposed convolution, as (depth_stride, height_stride, width_stride)
padding (tuple): Padding for transposed convolution, as (depth_pad, height_pad, width_pad)
output_padding (tuple): Output padding for transposed convolution, as (depth_pad, height_pad, width_pad)
pool_kernel_size (tuple): Kernel size for average pooling, as (depth_kernel, height_kernel, width_kernel)
norm_shape (tuple): Shape for layer normalization
Returns:
torch.Tensor: Output tensor after applying all operations
"""
x = F.conv_transpose3d(
x,
conv_transpose_weight,
bias=conv_transpose_bias,
stride=stride,
padding=padding,
output_padding=output_padding,
)
x = x + sum_weight
x = F.layer_norm(x, norm_shape, norm_weight, norm_bias)
x = F.avg_pool3d(x, kernel_size=pool_kernel_size)
x = F.gelu(x)
return x
class Model(nn.Module):
"""
Model that performs a 3D transposed convolution, followed by a sum, layer normalization, average pooling, and GELU activation.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
sum_weight,
norm_shape,
pool_kernel_size,
):
super(Model, self).__init__()
conv = nn.ConvTranspose3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
)
self.conv_transpose_weight = nn.Parameter(conv.weight)
self.conv_transpose_bias = nn.Parameter(conv.bias)
self.sum_weight = nn.Parameter(torch.tensor(sum_weight))
norm = nn.LayerNorm(norm_shape)
self.norm_weight = nn.Parameter(norm.weight + torch.randn(norm_shape) * 0.02)
self.norm_bias = nn.Parameter(norm.bias + torch.randn(norm_shape) * 0.02)
def forward(
self,
x,
stride,
padding,
output_padding,
pool_kernel_size,
norm_shape,
fn=module_fn,
):
return fn(
x,
self.conv_transpose_weight,
self.conv_transpose_bias,
self.sum_weight,
self.norm_weight,
self.norm_bias,
stride,
padding,
output_padding,
pool_kernel_size,
norm_shape,
)
batch_size = 128
in_channels = 32
out_channels = 64
depth, height, width = 16, 32, 32
kernel_size = (3, 3, 3)
stride = (2, 2, 2)
padding = (1, 1, 1)
output_padding = (1, 1, 1)
sum_weight = 1.0
norm_shape = (out_channels,)
pool_kernel_size = (2, 2, 2)
def get_inputs():
return [
torch.randn(batch_size, in_channels, depth, height, width),
stride,
padding,
output_padding,
pool_kernel_size,
norm_shape,
]
def get_init_inputs():
return [
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
sum_weight,
norm_shape,
pool_kernel_size,
]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Model that performs a 3D transposed convolution, followed by a sum, layer normalization, average pooling, and GELU activation.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, output_padding, sum_weight, norm_shape, pool_kernel_size):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding)
self.sum_weight = nn.Parameter(torch.tensor(sum_weight))
self.norm = nn.LayerNorm(norm_shape)
self.norm.weight = nn.Parameter(self.norm.weight + torch.randn(norm_shape)*0.02)
self.norm.bias = nn.Parameter(self.norm.bias + torch.randn(norm_shape)*0.02)
self.avg_pool = nn.AvgPool3d(kernel_size=pool_kernel_size)
self.gelu = nn.GELU()
def forward(self, x):
x = self.conv_transpose(x)
x = x + self.sum_weight
x = self.norm(x)
x = self.avg_pool(x)
x = self.gelu(x)
return x
batch_size = 128
in_channels = 32
out_channels = 64
depth, height, width = 16, 32, 32
kernel_size = (3, 3, 3)
stride = (2, 2, 2)
padding = (1, 1, 1)
output_padding = (1, 1, 1)
sum_weight = 1.0
norm_shape = (out_channels,)
pool_kernel_size = (2, 2, 2)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, output_padding, sum_weight, norm_shape, pool_kernel_size]
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <cuda_runtime.h>
#include <vector>
#define BLOCK_SIZE 256
#define WARP_SIZE 32
// This kernel implements layer normalization with manual loop unrolling
// to reduce loop overhead and improve performance. The critical loops that
// accumulate the sum and sum of squares, as well as the normalization loop,
// are unrolled with '#pragma unroll'.
template <typename T>
__global__ void unrolled_layernorm_kernel(
const T* __restrict__ input,
const T* __restrict__ gamma,
const T* __restrict__ beta,
T* __restrict__ output,
int n1,
int n2
) {
// Shared memory for accumulating sum and square sum
__shared__ float s_mean;
__shared__ float s_variance;
if (threadIdx.x == 0) {
s_mean = 0.0f;
s_variance = 0.0f;
}
__syncthreads();
const int tid = threadIdx.x;
const int bid = blockIdx.x; // each block processes one normalization group
const int offset = bid * n2;
float local_sum = 0.0f;
float local_sq_sum = 0.0f;
// Manually unrolled loop for summing: assume n2 is small enough to benefit
#pragma unroll
for (int i = tid; i < n2; i += BLOCK_SIZE) {
float val = __ldg(&input[offset + i]);
local_sum += val;
local_sq_sum += val * val;
}
// Warp-level reduction using shuffle instructions with unrolling
float warp_sum = local_sum;
float warp_sq_sum = local_sq_sum;
#pragma unroll
for (int offset_w = WARP_SIZE/2; offset_w > 0; offset_w /= 2) {
warp_sum += __shfl_down_sync(0xffffffff, warp_sum, offset_w);
warp_sq_sum += __shfl_down_sync(0xffffffff, warp_sq_sum, offset_w);
}
// Only the first thread of each warp performs atomic addition
if ((tid & (WARP_SIZE - 1)) == 0) {
atomicAdd(&s_mean, warp_sum);
atomicAdd(&s_variance, warp_sq_sum);
}
__syncthreads();
// Compute mean and variance; rsqrtf provides numerical stability
float mean = s_mean / n2;
float variance = s_variance / n2 - mean * mean;
float inv_std = rsqrtf(variance + 1e-5f);
// Unrolled loop for normalization and applying scale and bias
#pragma unroll
for (int i = tid; i < n2; i += BLOCK_SIZE) {
int idx = offset + i;
float normalized = (input[idx] - mean) * inv_std;
output[idx] = gamma[i] * normalized + beta[i];
}
}
// The forward function performs conv_transpose3d, adds sum_weight, applies our unrolled fused layer norm,
// then follows with avg_pool3d and GELU activation.
torch::Tensor forward(
torch::Tensor x,
torch::Tensor conv_transpose_weight,
torch::Tensor conv_transpose_bias,
torch::Tensor sum_weight,
torch::Tensor norm_weight,
torch::Tensor norm_bias,
std::vector<int64_t> stride,
std::vector<int64_t> padding,
std::vector<int64_t> output_padding,
std::vector<int64_t> pool_kernel_size,
std::vector<int64_t> norm_shape
) {
// Ensure all inputs are contiguous
x = x.contiguous();
conv_transpose_weight = conv_transpose_weight.contiguous();
sum_weight = sum_weight.contiguous();
norm_weight = norm_weight.contiguous();
norm_bias = norm_bias.contiguous();
at::IntArrayRef strideRef(stride);
at::IntArrayRef paddingRef(padding);
at::IntArrayRef outputPaddingRef(output_padding);
at::IntArrayRef poolKernelRef(pool_kernel_size);
// 1. 3D transposed convolution
auto out = at::conv_transpose3d(
x,
conv_transpose_weight,
conv_transpose_bias,
strideRef,
paddingRef,
outputPaddingRef,
/*groups=*/1,
/*dilation=*/1
);
// 2. Elementwise addition with sum_weight
out.add_(sum_weight);
// 3. Custom fused layer normalization with unrolled loops
auto out_size = out.sizes();
int64_t n1 = 1;
for (int i = 0; i < out.dim() - norm_shape.size(); ++i) {
n1 *= out_size[i];
}
int64_t n2 = 1;
for (size_t i = 0; i < norm_shape.size(); ++i) {
n2 *= norm_shape[i];
}
auto output = torch::empty_like(out);
dim3 grid(n1);
dim3 block(BLOCK_SIZE);
unrolled_layernorm_kernel<float><<<grid, block>>>(
out.data_ptr<float>(),
norm_weight.data_ptr<float>(),
norm_bias.data_ptr<float>(),
output.data_ptr<float>(),
n1,
n2
);
cudaDeviceSynchronize();
// 4. 3D average pooling
output = at::avg_pool3d(
output,
poolKernelRef, // kernel_size
poolKernelRef, // stride (same as kernel_size)
{0,0,0},
false,
true
);
// 5. GELU activation
output = at::gelu(output);
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Unrolled fused layer norm forward (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 2.750 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 2.740 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 68.664 | % | 0.000 | 5 |
Issued Ipc Active | 2.750 | inst/cycle | 0.000 | 5 |
SM Busy | 68.664 | % | 0.000 | 5 |
Memory Throughput | 402563091128.206 | byte/second | 523755535087189.000 | 5 |
Mem Busy | 71.354 | % | 0.000 | 5 |
Max Bandwidth | 61.288 | % | 0.000 | 5 |
L1/TEX Hit Rate | 59.960 | % | 0.000 | 5 |
L2 Hit Rate | 50.048 | % | 0.000 | 5 |
Mem Pipes Busy | 61.288 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 18.886 | cycle | 0.000 | 5 |
Warp Cycles Per Executed Instruction | 18.886 | cycle | 0.000 | 5 |
Avg. Active Threads Per Warp | 25.740 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 22.750 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 10.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 28.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 81.940 | % | 0.000 | 5 |
Achieved Active Warps Per SM | 52.440 | warp | 0.000 | 5 |
Rule | Description |
---|---|
INF HighPipeUtilization | ALU is the highest-utilized pipeline (32.0%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN ThreadDivergence | Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 25.7 threads being active per cycle. This is further reduced to 22.7 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp(). |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (81.9%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 647932.16 | μs |
Device Time | 27997.50 | μs |
Self CPU Time | 88.39 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 647843.76 | μs |
Device Time | 27997.50 | μs |
Self CPU Time | 177.80 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 620237.29 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 1162.36 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 613451.50 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 613451.50 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::conv_transpose3d | ||
CPU Time | 143987.11 | μs |
Device Time | 2654657.50 | μs |
Self CPU Time | 731.54 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::convolution | ||
CPU Time | 143255.56 | μs |
Device Time | 2654657.50 | μs |
Self CPU Time | 1065.76 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_convolution | ||
CPU Time | 142189.80 | μs |
Device Time | 2654657.50 | μs |
Self CPU Time | 2011.24 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::add_ | ||
CPU Time | 20200.90 | μs |
Device Time | 2001282.33 | μs |
Self CPU Time | 3735.67 | μs |
Self Device Time | 2001282.33 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceSynchronize | ||
CPU Time | 9987195.53 | μs |
Device Time | 39686.17 | μs |
Self CPU Time | 9987195.53 | μs |
Self Device Time | 39686.17 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void unrolled_layernorm_kernel<float>(float const*, float const*, float const*, float*, int, int) | ||
CPU Time | 0.00 | μs |
Device Time | 5492144.44 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 5492144.44 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45297 warnings generated when compiling for host. Suppressed 45331 warnings (45284 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.