89_ConvTranspose3d_MaxPool_Softmax_Subtract_Swish_Max
• balanced_thread_block_distribution_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
stride: int,
padding: int,
output_padding: int,
pool_kernel_size: int,
pool_stride: int,
pool_padding: int,
conv_transpose: torch.Tensor,
conv_transpose_bias: torch.Tensor,
subtract: torch.Tensor,
) -> torch.Tensor:
"""
Applies sequence of operations:
- ConvTranspose3d
- MaxPool3d
- Softmax
- Subtract
- Swish
- Max
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
stride (int): Stride for conv transpose
padding (int): Padding for conv transpose
output_padding (int): Output padding for conv transpose
pool_kernel_size (int): Kernel size for max pooling
pool_stride (int): Stride for max pooling
pool_padding (int): Padding for max pooling
conv_transpose (torch.Tensor): Weight tensor for transposed convolution
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
subtract (torch.Tensor): Subtraction parameter tensor
"""
x = F.conv_transpose3d(
x,
conv_transpose,
bias=conv_transpose_bias,
stride=stride,
padding=padding,
output_padding=output_padding,
)
x = F.max_pool3d(
x, kernel_size=pool_kernel_size, stride=pool_stride, padding=pool_padding
)
x = F.softmax(x, dim=1)
x = x - subtract.view(1, -1, 1, 1, 1)
x = torch.sigmoid(x) * x # Swish
x = torch.max(x, dim=1)[0]
return x
class Model(nn.Module):
"""
A model that performs a sequence of operations:
- ConvTranspose3d
- MaxPool3d
- Softmax
- Subtract
- Swish
- Max
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
pool_kernel_size,
pool_stride,
pool_padding,
):
super(Model, self).__init__()
conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size)
self.conv_transpose_parameter = conv_transpose.weight
self.conv_transpose_bias = conv_transpose.bias
self.subtract_parameter = nn.Parameter(torch.randn(out_channels) * 0.02)
def forward(
self,
x,
stride,
padding,
output_padding,
pool_kernel_size,
pool_stride,
pool_padding,
fn=module_fn,
):
return fn(
x,
stride,
padding,
output_padding,
pool_kernel_size,
pool_stride,
pool_padding,
self.conv_transpose_parameter,
self.conv_transpose_bias,
self.subtract_parameter,
)
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
pool_kernel_size = 2
pool_stride = 2
pool_padding = 0
def get_inputs():
return [
torch.randn(batch_size, in_channels, depth, height, width),
stride,
padding,
output_padding,
pool_kernel_size,
pool_stride,
pool_padding,
]
def get_init_inputs():
return [
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
pool_kernel_size,
pool_stride,
pool_padding,
]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
A model that performs a sequence of operations:
- ConvTranspose3d
- MaxPool3d
- Softmax
- Subtract
- Swish
- Max
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, output_padding, pool_kernel_size, pool_stride, pool_padding):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding)
self.max_pool = nn.MaxPool3d(kernel_size=pool_kernel_size, stride=pool_stride, padding=pool_padding)
self.subtract = nn.Parameter(torch.randn(out_channels)*0.02) # Assuming subtraction is element-wise across channels
def forward(self, x):
x = self.conv_transpose(x)
x = self.max_pool(x)
x = torch.softmax(x, dim=1) # Apply softmax across channels (dim=1)
x = x - self.subtract.view(1, -1, 1, 1, 1) # Subtract across channels
x = torch.sigmoid(x) * x # Swish activation
x = torch.max(x, dim=1)[0] # Max pooling across channels
return x
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
pool_kernel_size = 2
pool_stride = 2
pool_padding = 0
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, output_padding, pool_kernel_size, pool_stride, pool_padding]
Operation Name | 89_ConvTranspose3d_MaxPool_Softmax_Subtract_Swish_Max |
Level ID | 2 |
Task ID | 89 |
Kernel Name | balanced_thread_block_distribution_base |
CUDA Speedup (Native) | 1.137x |
CUDA Speedup (Compile) | 0.994x |
CUDA Runtime | 5.027 ms |
PyTorch Runtime (Native) | 5.715 ms |
PyTorch Runtime (Compile) | 4.998 ms |
Correct | True |
Max Diff (vs. Reference) | 0.000000 |
Model | azure-gpt-4o-2024-08-06 |
Temperature | 1.00 |
#include <torch/extension.h>
#include <pybind11/pybind11.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#include <cmath>
namespace py = pybind11;
// This CUDA kernel distributes workloads evenly across threads and blocks.
// Each thread processes one spatial location for a given n, d, h, w.
// The loops over channels are manually unrolled to reduce loop overhead.
__global__ void balanced_fusion_kernel(
const float* __restrict__ input, // pooled output: shape [N, C, D, H, W]
const float* __restrict__ subtract_tensor, // subtract tensor: shape [C] (broadcast over n, d, h, w)
float* __restrict__ output, // final output: shape [N, D, H, W]
int N, int C, int D, int H, int W) {
// Compute a linear index for each spatial element n, d, h, w
int index = blockIdx.x * blockDim.x + threadIdx.x;
int NDHW = N * D * H * W;
if (index >= NDHW) return;
// Calculate each dimension from the linear index
int w_idx = index % W;
int h_idx = (index / W) % H;
int d_idx = (index / (H * W)) % D;
int n_idx = index / (D * H * W);
int strideC = D * H * W;
int base0 = n_idx * C * strideC + d_idx * H * W + h_idx * W + w_idx;
// 1. Compute maximum value over channels
float max_val = -FLT_MAX;
#pragma unroll
for (int c = 0; c < C; c++) {
max_val = max(max_val, input[base0 + c * strideC]);
}
// 2. Compute sum of exponentials for softmax normalization
float sum_exp = 0.0f;
#pragma unroll
for (int c = 0; c < C; c++) {
sum_exp += expf(input[base0 + c * strideC] - max_val);
}
// 3. Calculate softmax, subtract, apply swish and find the max value over the channels
float final_max = -FLT_MAX;
#pragma unroll
for (int c = 0; c < C; c++) {
float sm_val = expf(input[base0 + c * strideC] - max_val) / sum_exp;
float y = sm_val - subtract_tensor[c];
float swish = y / (1.0f + expf(-y)); // swish activation
final_max = max(final_max, swish);
}
// Write to output
output[index] = final_max;
}
// The forward function calls optimized ATen operations and the new kernel
torch::Tensor forward(
torch::Tensor x,
int64_t stride,
int64_t padding,
int64_t output_padding,
int64_t pool_kernel_size,
int64_t pool_stride,
int64_t pool_padding,
torch::Tensor conv_transpose_weight,
torch::Tensor conv_transpose_bias,
torch::Tensor subtract_tensor
) {
auto conv_out = at::conv_transpose3d(
x,
conv_transpose_weight,
conv_transpose_bias,
{stride, stride, stride},
{padding, padding, padding},
{output_padding, output_padding, output_padding},
1,
{1, 1, 1}
);
auto pool_out = at::max_pool3d(
conv_out,
{pool_kernel_size, pool_kernel_size, pool_kernel_size},
{pool_stride, pool_stride, pool_stride},
{pool_padding, pool_padding, pool_padding}
);
int N = pool_out.size(0);
int C = pool_out.size(1);
int D = pool_out.size(2);
int H = pool_out.size(3);
int W = pool_out.size(4);
auto output = at::empty({N, D, H, W}, pool_out.options());
int NDHW = N * D * H * W;
// Optimal thread and block size for uniform workload distribution
const int threads = 256;
const int blocks = (NDHW + threads - 1) / threads;
balanced_fusion_kernel<<<blocks, threads>>>(
pool_out.data_ptr<float>(),
subtract_tensor.data_ptr<float>(),
output.data_ptr<float>(),
N, C, D, H, W);
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Balanced CUDA forward pass with optimized workload distribution");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 2.376 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 2.272 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 59.458 | % | 0.140 | 5 |
Issued Ipc Active | 2.380 | inst/cycle | 0.000 | 5 |
SM Busy | 63.852 | % | 0.163 | 5 |
Memory Throughput | 1673690493553.476 | byte/second | 179794972988697346048.000 | 5 |
Mem Busy | 31.444 | % | 0.065 | 5 |
Max Bandwidth | 49.948 | % | 0.157 | 5 |
L1/TEX Hit Rate | 58.576 | % | 0.023 | 5 |
L2 Hit Rate | 22.348 | % | 0.184 | 5 |
Mem Pipes Busy | 24.258 | % | 0.035 | 5 |
Warp Cycles Per Issued Instruction | 22.482 | cycle | 0.018 | 5 |
Warp Cycles Per Executed Instruction | 22.494 | cycle | 0.019 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 30.300 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 84.254 | % | 0.227 | 5 |
Achieved Active Warps Per SM | 53.922 | warp | 0.091 | 5 |
Rule | Description |
---|---|
INF HighPipeUtilization | ALU is the highest-utilized pipeline (31.1%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (84.6%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::conv_transpose3d | ||
CPU Time | 8842255.74 | μs |
Device Time | 6953630.01 | μs |
Self CPU Time | 3950.97 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::convolution | ||
CPU Time | 8838304.77 | μs |
Device Time | 6953630.01 | μs |
Self CPU Time | 5629.13 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_convolution | ||
CPU Time | 8832675.64 | μs |
Device Time | 6953630.01 | μs |
Self CPU Time | 13522.70 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_convolution_transpose | ||
CPU Time | 8783706.24 | μs |
Device Time | 5507327.20 | μs |
Self CPU Time | 129352.32 | μs |
Self Device Time | 5507327.20 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaMemsetAsync | ||
CPU Time | 5077423.67 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 5077423.67 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
sm90_xmma_dgrad_implicit_gemm_indexed_f32f32_tf32f32_f32_nhwckrsc_nhwc_tilesize256x64x32_warpgroupsize1x1x1_g1_strided_execute_kernel__5x_cudnn | ||
CPU Time | 0.00 | μs |
Device Time | 3888718.62 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 3888718.62 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45296 warnings generated when compiling for host. Suppressed 45330 warnings (45283 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.