20_ConvTranspose3d_Sum_ResidualAdd_Multiply_ResidualAdd
• coalesced_vectorized_fused_kernel_base
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
stride: int,
padding: int,
output_padding: int,
conv_transpose: torch.Tensor,
conv_transpose_bias: torch.Tensor,
bias: torch.Tensor,
) -> torch.Tensor:
"""
Applies a 3D transposed convolution followed by bias addition and residual operations.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
stride (int): Stride of the transposed convolution
padding (int): Padding of the transposed convolution
output_padding (int): Additional size added to output shape
conv_transpose (torch.Tensor): Transposed convolution weight tensor
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
bias (torch.Tensor): Bias tensor for addition
Returns:
torch.Tensor: Output tensor after applying operations
"""
x = F.conv_transpose3d(
x,
conv_transpose,
bias=conv_transpose_bias,
stride=stride,
padding=padding,
output_padding=output_padding,
)
original_x = x.clone().detach()
x = x + bias
x = x + original_x
x = x * original_x
x = x + original_x
return x
class Model(nn.Module):
"""
Model that performs a 3D transposed convolution, followed by a sum,
a residual add, a multiplication, and another residual add.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
bias_shape,
):
super(Model, self).__init__()
conv_transpose = nn.ConvTranspose3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
)
self.conv_transpose_parameter = conv_transpose.weight
self.conv_transpose_bias = nn.Parameter(
conv_transpose.bias + torch.ones_like(conv_transpose.bias) * 0.02
) # make sure its nonzero
self.bias_parameter = nn.Parameter(torch.randn(bias_shape) * 0.02)
def forward(self, x, stride, padding, output_padding, fn=module_fn):
return fn(
x,
stride,
padding,
output_padding,
self.conv_transpose_parameter,
self.conv_transpose_bias,
self.bias_parameter,
)
batch_size = 16
in_channels = 32
out_channels = 64
depth, height, width = 16, 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [
torch.randn(batch_size, in_channels, depth, height, width),
stride,
padding,
output_padding,
]
def get_init_inputs():
return [
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
bias_shape,
]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Model that performs a 3D transposed convolution, followed by a sum,
a residual add, a multiplication, and another residual add.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, output_padding, bias_shape):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding)
self.conv_transpose.bias = nn.Parameter(self.conv_transpose.bias + torch.ones_like(self.conv_transpose.bias) * 0.02)
self.bias = nn.Parameter(torch.randn(bias_shape)*0.02)
def forward(self, x):
x = self.conv_transpose(x)
original_x = x.clone().detach()
x = x + self.bias
x = x + original_x
x = x * original_x
x = x + original_x
return x
batch_size = 16
in_channels = 32
out_channels = 64
depth, height, width = 16, 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
bias_shape = (out_channels, 1, 1, 1)
def get_inputs():
return [torch.randn(batch_size, in_channels, depth, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, output_padding, bias_shape]
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
// This kernel improves memory coalescing by processing data using vectorized loads/stores (float4).
// It also uses a grid-stride loop so that each thread processes multiple contiguous elements.
// The bias values are cached in shared memory to reduce redundant global memory accesses.
// The kernel computes: output[i] = conv_output[i] * (2.0f * conv_output[i] + bias[c] + 1.0f), where c = ((i / spatial_size) % channels).
__global__ void coalesced_vectorized_fused_operations_kernel(
const float* __restrict__ conv_output,
const float* __restrict__ element_bias,
float* output,
int num_elements,
int channels,
int spatial_size
) {
// Allocate shared memory for bias values
extern __shared__ float shared_bias[];
// Each thread loads part of the bias into shared memory
for (int i = threadIdx.x; i < channels; i += blockDim.x) {
shared_bias[i] = element_bias[i];
}
__syncthreads();
// Process elements in vectorized manner using float4
int total_vec = num_elements / 4; // number of complete float4 groups
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Grid-stride loop for the vectorized portion
for (int i = idx; i < total_vec; i += blockDim.x * gridDim.x) {
// Load 4 contiguous floats at once
float4 in_vec = reinterpret_cast<const float4*>(conv_output)[i];
int base = i * 4;
float4 out_vec;
// Unroll the computation for the 4 elements
#pragma unroll
for (int j = 0; j < 4; j++) {
int global_idx = base + j;
int c = (global_idx / spatial_size) % channels;
// Access the j-th component of the vector
float original = ((float*)&in_vec)[j];
float b = shared_bias[c];
((float*)&out_vec)[j] = original * (2.0f * original + b + 1.0f);
}
// Store the computed 4 elements back to global memory
reinterpret_cast<float4*>(output)[i] = out_vec;
}
// Process any remaining elements that don't form a complete float4
int remainder = num_elements % 4;
int start = total_vec * 4;
for (int i = idx; i < remainder; i += blockDim.x * gridDim.x) {
int global_idx = start + i;
int c = (global_idx / spatial_size) % channels;
float orig = conv_output[global_idx];
output[global_idx] = orig * (2.0f * orig + shared_bias[c] + 1.0f);
}
}
// The forward function applies the standard conv_transpose3d and then launches the optimized kernel.
torch::Tensor forward(
torch::Tensor x,
int stride,
int padding,
int output_padding,
torch::Tensor conv_transpose,
torch::Tensor conv_transpose_bias,
torch::Tensor bias
) {
// Compute the transposed convolution using PyTorch's optimized function
auto conv_result = torch::conv_transpose3d(
x,
conv_transpose,
conv_transpose_bias,
stride,
padding,
output_padding
);
// Get dimensions; assume conv_result is in shape [N, C, D, H, W] and is contiguous
auto sizes = conv_result.sizes();
int channels = sizes[1];
int spatial_size = sizes[2] * sizes[3] * sizes[4]; // D * H * W
int num_elements = conv_result.numel();
// Prepare the output tensor
auto output = torch::empty_like(conv_result);
// Configure kernel launch parameters
const int threads_per_block = 256;
int total_vec = num_elements / 4;
int blocks = (total_vec > 0) ? ((total_vec + threads_per_block - 1) / threads_per_block) : ((num_elements + threads_per_block - 1) / threads_per_block);
// Launch the kernel with dynamic shared memory allocation for bias
coalesced_vectorized_fused_operations_kernel<<<blocks, threads_per_block, channels * sizeof(float)>>>(
conv_result.data_ptr<float>(),
bias.data_ptr<float>(),
output.data_ptr<float>(),
num_elements,
channels,
spatial_size
);
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &forward, "Coalesced Vectorized Fused ConvTranspose3D Kernel with Channel-wise Bias");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 2.934 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 2.910 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 73.410 | % | 0.054 | 5 |
Issued Ipc Active | 2.934 | inst/cycle | 0.000 | 5 |
SM Busy | 73.410 | % | 0.054 | 5 |
Memory Throughput | 3000693096226.504 | byte/second | 11463599386044006400.000 | 5 |
Mem Busy | 47.708 | % | 0.003 | 5 |
Max Bandwidth | 89.520 | % | 0.011 | 5 |
L1/TEX Hit Rate | 3.030 | % | 0.000 | 5 |
L2 Hit Rate | 50.002 | % | 0.006 | 5 |
Mem Pipes Busy | 30.814 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 17.454 | cycle | 0.003 | 5 |
Warp Cycles Per Executed Instruction | 17.460 | cycle | 0.003 | 5 |
Avg. Active Threads Per Warp | 32.000 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 27.230 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 8.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 25.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 80.414 | % | 0.002 | 5 |
Achieved Active Warps Per SM | 51.464 | warp | 0.001 | 5 |
Rule | Description |
---|---|
INF HighPipeUtilization | ALU is the highest-utilized pipeline (59.0%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (80.4%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::conv_transpose3d | ||
CPU Time | 1792789.19 | μs |
Device Time | 4761433.03 | μs |
Self CPU Time | 7358.48 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::convolution | ||
CPU Time | 1785430.71 | μs |
Device Time | 4761433.03 | μs |
Self CPU Time | 9952.47 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_convolution | ||
CPU Time | 1775478.24 | μs |
Device Time | 4761433.03 | μs |
Self CPU Time | 20792.02 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_convolution_transpose | ||
CPU Time | 524159.84 | μs |
Device Time | 2893740.84 | μs |
Self CPU Time | 144936.96 | μs |
Self Device Time | 2893740.84 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 1769504.30 | μs |
Device Time | 129196.41 | μs |
Self CPU Time | 1769504.30 | μs |
Self Device Time | 129196.41 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 4229996.80 | μs |
Device Time | 66482.09 | μs |
Self CPU Time | 4229996.80 | μs |
Self Device Time | 66482.09 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::elementwise_kernel<128, 2, at::native::gpu_kernel_impl_nocast<at::native::CUDAFunctor_add<float> >(at::TensorIteratorBase&, at::native::CUDAFunctor_add<float> const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl_nocast<at::native::CUDAFunctor_add<float> >(at::TensorIteratorBase&, at::native::CUDAFunctor_add<float> const&)::{lambda(int)#1}) | ||
CPU Time | 0.00 | μs |
Device Time | 1868153.97 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 1868153.97 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45294 warnings generated when compiling for host. Suppressed 45327 warnings (45280 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.