44_ConvTranspose2d_Multiply_GlobalAvgPool_GlobalAvgPool_Mean
• optimized_strided_avg_pooling_edit_1
import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(
x: torch.Tensor,
stride: int,
padding: int,
output_padding: int,
conv_transpose: torch.Tensor,
conv_transpose_bias: torch.Tensor,
multiplier: float,
) -> torch.Tensor:
"""
Applies transposed convolution, scalar multiplication, and multiple global average pooling operations.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, in_channels, height, width)
stride (int): Stride of the transposed convolution
padding (int): Padding of the transposed convolution
output_padding (int): Additional size added to output shape
conv_transpose (torch.Tensor): Transposed convolution weight tensor
conv_transpose_bias (torch.Tensor): Bias tensor for transposed convolution
multiplier (float): Scalar multiplier value
Returns:
torch.Tensor: Scalar output after applying operations
"""
x = F.conv_transpose2d(
x,
conv_transpose,
bias=conv_transpose_bias,
stride=stride,
padding=padding,
output_padding=output_padding,
)
x = x * multiplier
x = torch.mean(x, dim=[2, 3], keepdim=True)
x = torch.mean(x, dim=[2, 3], keepdim=True)
x = torch.mean(x)
return x
class Model(nn.Module):
"""
Model that performs a transposed convolution, multiplies by a scalar, applies global average pooling,
another global average pooling, and then calculates the mean.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
multiplier,
):
super(Model, self).__init__()
conv = nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding,
)
self.conv_transpose_parameter = nn.Parameter(conv.weight)
self.conv_transpose_bias = nn.Parameter(
conv.bias
+ torch.randn(
conv.bias.shape, device=conv.bias.device, dtype=conv.bias.dtype
)
* 0.02
)
self.multiplier = multiplier
def forward(self, x, stride, padding, output_padding, fn=module_fn):
return fn(
x,
stride,
padding,
output_padding,
self.conv_transpose_parameter,
self.conv_transpose_bias,
self.multiplier,
)
batch_size = 128
in_channels = 3
out_channels = 16
height, width = 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
multiplier = 0.5
def get_inputs():
return [
torch.randn(batch_size, in_channels, height, width),
stride,
padding,
output_padding,
]
def get_init_inputs():
return [
in_channels,
out_channels,
kernel_size,
stride,
padding,
output_padding,
multiplier,
]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Model that performs a transposed convolution, multiplies by a scalar, applies global average pooling,
another global average pooling, and then calculates the mean.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, output_padding, multiplier):
super(Model, self).__init__()
self.conv_transpose = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, output_padding=output_padding)
self.conv_transpose.bias = nn.Parameter(self.conv_transpose.bias + torch.randn(self.conv_transpose.bias.shape, device=self.conv_transpose.bias.device, dtype=self.conv_transpose.bias.dtype) * 0.02)
self.multiplier = multiplier
def forward(self, x):
x = self.conv_transpose(x)
x = x * self.multiplier
x = torch.mean(x, dim=[2, 3], keepdim=True) # First global average pooling
x = torch.mean(x, dim=[2, 3], keepdim=True) # Second global average pooling
x = torch.mean(x)
return x
batch_size = 128
in_channels = 3
out_channels = 16
height, width = 32, 32
kernel_size = 3
stride = 2
padding = 1
output_padding = 1
multiplier = 0.5
def get_inputs():
return [torch.randn(batch_size, in_channels, height, width)]
def get_init_inputs():
return [in_channels, out_channels, kernel_size, stride, padding, output_padding, multiplier]
#include <torch/extension.h>
#include <ATen/ATen.h>
__global__ void optimized_avg_pool_kernel(const float* __restrict__ input, float* __restrict__ output, int height, int width, int channels) {
extern __shared__ float shared_data[];
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
const unsigned int num_threads = blockDim.x;
const unsigned int lane_id = tid % 32;
const unsigned int warp_id = tid / 32;
const unsigned int num_warps = num_threads / 32;
// Each thread initializes its accumulator
float sum = 0.0f;
// Coalesced global memory loads with grid-stride loop
const unsigned int total_elements = height * width;
const unsigned int grid_stride = num_threads * gridDim.x;
const unsigned int start_idx = bid * total_elements + tid;
// Direct accumulation without intermediate shared memory
for (unsigned int i = start_idx; i < (bid + 1) * total_elements; i += grid_stride) {
sum += input[i];
}
// Warp reduction using shuffle
#pragma unroll
for (int offset = 16; offset > 0; offset /= 2) {
sum += __shfl_down_sync(0xffffffff, sum, offset);
}
// First thread in each warp writes to shared memory
if (lane_id == 0) {
shared_data[warp_id] = sum;
}
__syncthreads();
// Final reduction across warps
if (tid < num_warps) {
float warp_sum = shared_data[tid];
#pragma unroll
for (int offset = num_warps/2; offset > 0; offset /= 2) {
warp_sum += __shfl_down_sync(0xffffffff, warp_sum, offset);
}
if (tid == 0) {
output[bid] = warp_sum / (height * width);
}
}
}
at::Tensor module_fn(
at::Tensor x,
int64_t stride,
int64_t padding,
int64_t output_padding,
at::Tensor conv_transpose,
at::Tensor conv_transpose_bias,
double multiplier
) {
// Apply transposed convolution
at::Tensor y = at::conv_transpose2d(
x,
conv_transpose,
conv_transpose_bias,
{stride, stride}, // stride
{padding, padding}, // padding
{output_padding, output_padding}, // output_padding
1, // groups
{1, 1} // dilation
);
// Element-wise multiplication by scalar multiplier
y = y * multiplier;
// Prepare output tensor for global average pooling
auto output = at::zeros({y.size(0), y.size(1)}, y.options());
// Launch kernel for optimized global average pooling
int threads = 256;
int blocks = y.size(0) * y.size(1);
int shared_mem_size = threads * sizeof(float);
optimized_avg_pool_kernel<<<blocks, threads, shared_mem_size>>>(y.data_ptr<float>(), output.data_ptr<float>(), y.size(2), y.size(3), y.size(1));
// Compute mean over all elements to get scalar output
auto result = output.mean();
return result;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &module_fn, "Module function");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 1.356 | inst/cycle | 0.001 | 5 |
Executed Ipc Elapsed | 0.734 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 34.612 | % | 0.543 | 5 |
Issued Ipc Active | 1.386 | inst/cycle | 0.001 | 5 |
SM Busy | 34.612 | % | 0.543 | 5 |
Memory Throughput | 408050118271.562 | byte/second | 76834482876128313344.000 | 5 |
Mem Busy | 13.608 | % | 0.111 | 5 |
Max Bandwidth | 13.168 | % | 0.105 | 5 |
L1/TEX Hit Rate | 0.264 | % | 0.001 | 5 |
L2 Hit Rate | 32.868 | % | 0.137 | 5 |
Mem Pipes Busy | 23.158 | % | 0.321 | 5 |
Warp Cycles Per Issued Instruction | 32.340 | cycle | 0.357 | 5 |
Warp Cycles Per Executed Instruction | 33.074 | cycle | 0.373 | 5 |
Avg. Active Threads Per Warp | 29.870 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 23.930 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 16.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 16.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 64.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 100.000 | % | 0.000 | 5 |
Achieved Occupancy | 71.256 | % | 0.434 | 5 |
Achieved Active Warps Per SM | 45.606 | warp | 0.178 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN ThreadDivergence | Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 29.9 threads being active per cycle. This is further reduced to 23.9 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp(). |
WRN Occupancy | This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (70.9%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::conv_transpose2d | ||
CPU Time | 5125858.37 | μs |
Device Time | 5085312.38 | μs |
Self CPU Time | 61173.19 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::convolution | ||
CPU Time | 5064685.17 | μs |
Device Time | 5085312.38 | μs |
Self CPU Time | 72262.76 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_convolution | ||
CPU Time | 4992422.41 | μs |
Device Time | 5085312.38 | μs |
Self CPU Time | 152739.05 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::cudnn_convolution_transpose | ||
CPU Time | 3969733.43 | μs |
Device Time | 4138899.04 | μs |
Self CPU Time | 684192.02 | μs |
Self Device Time | 4138899.04 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 5735251.89 | μs |
Device Time | 29087.87 | μs |
Self CPU Time | 5735251.89 | μs |
Self Device Time | 29087.87 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 1864945.05 | μs |
Device Time | 2503422.77 | μs |
Self CPU Time | 115797.87 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45290 warnings generated when compiling for host. Suppressed 45327 warnings (45280 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.