import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
conv_weight: torch.Tensor,
conv_bias: torch.Tensor,
) -> torch.Tensor:
"""Applies 3D convolution, softmax activation, and two max pooling operations.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
conv_weight (torch.Tensor): Convolution weight tensor of shape
(out_channels, in_channels, kernel_size, kernel_size, kernel_size)
conv_bias (torch.Tensor): Bias tensor for convolution of shape (out_channels)
Returns:
torch.Tensor: Output tensor after applying convolution, softmax and max pooling,
with shape (batch_size, out_channels, depth', height', width') where:
depth' = ((depth - kernel_size + 1) // 4)
height' = ((height - kernel_size + 1) // 4)
width' = ((width - kernel_size + 1) // 4)
The //4 comes from two max pooling operations with kernel_size=2
"""
x = F.conv3d(x, conv_weight, conv_bias, stride=1, padding=0)
x = F.softmax(x, dim=1)
x = F.max_pool3d(x, kernel_size=2)
x = F.max_pool3d(x, kernel_size=2)
return x
class Model(nn.Module):
"""
Model that performs a 3D convolution, applies Softmax, and performs two max pooling operations.
"""
def __init__(self, in_channels, out_channels, kernel_size, pool_kernel_size):
super(Model, self).__init__()
conv = nn.Conv3d(in_channels, out_channels, kernel_size, padding=1)
self.conv_weight = nn.Parameter(conv.weight)
self.conv_bias = nn.Parameter(conv.bias)
def forward(self, x, fn=module_fn):
return fn(x, self.conv_weight, self.conv_bias)
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
pool_kernel_size = 2
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, pool_kernel_size]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Model that performs a 3D convolution, applies Softmax, and performs two max pooling operations.
"""
def __init__(self, in_channels, out_channels, kernel_size, pool_kernel_size):
super(Model, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size)
self.pool1 = nn.MaxPool3d(pool_kernel_size)
self.pool2 = nn.MaxPool3d(pool_kernel_size)
def forward(self, x):
"""
Args:
x: Input tensor of shape (batch_size, in_channels, depth, height, width)
Returns:
Output tensor of shape (batch_size, out_channels, depth', height', width') where depth', height', width' are the dimensions after pooling.
"""
x = self.conv(x)
x = torch.softmax(x, dim=1)
x = self.pool1(x)
x = self.pool2(x)
return x
batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
pool_kernel_size = 2
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, pool_kernel_size]
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
// This fused kernel performs two successive max poolings (each with a window of 2x2x2 and stride 2)
// in a single kernel launch by directly computing the maximum over a 4x4x4 window from the softmax output.
// The final output shape is [N, C, D/4, H/4, W/4].
__global__ void max_pool3d_fused_kernel(const float* __restrict__ input, float* __restrict__ output,
const int N, const int C, const int D, const int H, const int W) {
// The input is assumed to have shape [N, C, D, H, W] from softmax (D, H, W are divisible by 4).
// The fused pooling downsamples by a factor of 4 in each spatial dimension.
int D_out = D / 4;
int H_out = H / 4;
int W_out = W / 4;
// Combine the sample, channel, and depth index (for the output) into gridDim.z
// Each block in grid.z corresponds to one slice: (n, c, d_out).
int idx = blockIdx.z; // Range: 0 to (N * C * D_out - 1)
int d_out = idx % D_out;
int nc = idx / D_out;
int c = nc % C;
int n = nc / C;
// Determine the output height and width indices
int h_out_idx = blockIdx.y * blockDim.y + threadIdx.y;
int w_out_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (w_out_idx < W_out && h_out_idx < H_out) {
// Each output element corresponds to a 4x4x4 window in the input starting here:
int d_in_start = d_out * 4;
int h_in_start = h_out_idx * 4;
int w_in_start = w_out_idx * 4;
float max_val = -FLT_MAX;
// Calculate the starting index for (n, c, d_in_start, h_in_start, w_in_start)
int base_index = n * (C * D * H * W) + c * (D * H * W) + d_in_start * (H * W) + h_in_start * W + w_in_start;
// Iterate over the 4x4x4 window
for (int dd = 0; dd < 4; dd++) {
for (int hh = 0; hh < 4; hh++) {
for (int ww = 0; ww < 4; ww++) {
int index = base_index + dd * (H * W) + hh * W + ww;
float val = input[index];
if (val > max_val) {
max_val = val;
}
}
}
}
// Compute the output index for [n, c, d_out, h_out_idx, w_out_idx]
int out_index = n * (C * D_out * H_out * W_out) + c * (D_out * H_out * W_out) +
d_out * (H_out * W_out) + h_out_idx * W_out + w_out_idx;
output[out_index] = max_val;
}
}
// Forward function that applies 3D convolution, softmax, and then the fused max pooling in one kernel call
torch::Tensor forward(
torch::Tensor x,
torch::Tensor conv_weight,
torch::Tensor conv_bias
) {
// Ensure the input tensors are contiguous
x = x.contiguous();
conv_weight = conv_weight.contiguous();
conv_bias = conv_bias.contiguous();
// Apply 3D convolution
auto conv_output = at::conv3d(x, conv_weight, conv_bias, {1, 1, 1}, {0, 0, 0});
// Apply softmax over the channel dimension
auto softmax_output = at::softmax(conv_output, 1);
// Get dimensions of the softmax output: assumed shape [N, C, D, H, W]
int N = softmax_output.size(0);
int C = softmax_output.size(1);
int D = softmax_output.size(2);
int H = softmax_output.size(3);
int W = softmax_output.size(4);
// For the fused pooling, the spatial dimensions must be divisible by 4
int D_out = D / 4;
int H_out = H / 4;
int W_out = W / 4;
auto output = at::empty({N, C, D_out, H_out, W_out}, softmax_output.options());
// Define block dimensions: using 16x16 threads for the (w, h) plane and grid.z for (n, c, d_out)
dim3 block(16, 16, 1);
dim3 grid((W_out + block.x - 1) / block.x,
(H_out + block.y - 1) / block.y,
N * C * D_out);
const float* input_ptr = softmax_output.data_ptr<float>();
float* output_ptr = output.data_ptr<float>();
max_pool3d_fused_kernel<<<grid, block>>>(input_ptr, output_ptr, N, C, D, H, W);
cudaDeviceSynchronize();
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Fused max pooling CUDA forward function (combining two pooling stages)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 1.440 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 1.276 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 36.026 | % | 0.088 | 5 |
Issued Ipc Active | 1.440 | inst/cycle | 0.000 | 5 |
SM Busy | 36.108 | % | 0.090 | 5 |
Memory Throughput | 2393921202862.054 | byte/second | 1192971924423589232640.000 | 5 |
Mem Busy | 44.700 | % | 0.363 | 5 |
Max Bandwidth | 71.468 | % | 1.017 | 5 |
L1/TEX Hit Rate | 76.318 | % | 0.000 | 5 |
L2 Hit Rate | 12.634 | % | 0.000 | 5 |
Mem Pipes Busy | 22.386 | % | 0.093 | 5 |
Warp Cycles Per Issued Instruction | 30.450 | cycle | 0.233 | 5 |
Warp Cycles Per Executed Instruction | 30.460 | cycle | 0.233 | 5 |
Avg. Active Threads Per Warp | 20.090 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 18.560 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 32.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 68.564 | % | 0.191 | 5 |
Achieved Active Warps Per SM | 43.880 | warp | 0.078 | 5 |
Rule | Description |
---|---|
INF HighPipeUtilization | ALU is the highest-utilized pipeline (36.3%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN ThreadDivergence | Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 20.1 threads being active per cycle. This is further reduced to 18.6 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp(). |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (68.4%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 327293.34 | μs |
Device Time | 2489.71 | μs |
Self CPU Time | 51.66 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 327241.68 | μs |
Device Time | 2489.71 | μs |
Self CPU Time | 128.38 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 324383.03 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 106.35 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 325193.86 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 325193.86 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::conv3d | ||
CPU Time | 322295.60 | μs |
Device Time | 3872982.11 | μs |
Self CPU Time | 10345.47 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::convolution | ||
CPU Time | 311950.13 | μs |
Device Time | 3872982.11 | μs |
Self CPU Time | 14050.29 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_convolution | ||
CPU Time | 297899.83 | μs |
Device Time | 3872982.11 | μs |
Self CPU Time | 27319.10 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_convolution | ||
CPU Time | 209207.13 | μs |
Device Time | 3361170.88 | μs |
Self CPU Time | 149711.03 | μs |
Self Device Time | 3361170.88 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
sm80_xmma_fprop_implicit_gemm_indexed_f32f32_f32f32_f32_nchwkcrs_nchw_tilesize32x32x8_stage3_warpsize1x2x1_g1_ffma_aligna4_alignc4_execute_kernel__5x_cudnn | ||
CPU Time | 0.00 | μs |
Device Time | 3361169.22 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 3361169.22 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceSynchronize | ||
CPU Time | 5524485.39 | μs |
Device Time | 249984.86 | μs |
Self CPU Time | 5524485.39 | μs |
Self Device Time | 249984.86 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45285 warnings generated when compiling for host. Suppressed 45323 warnings (45276 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.