import torch
import torch.nn as nn
import torch.nn.functional as F
def module_fn(x: torch.Tensor, dim: int) -> torch.Tensor:
"""
Applies LogSoftmax activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, dim)
dim (int): Dimension along which to apply LogSoftmax
Returns:
torch.Tensor: Output tensor with LogSoftmax applied, same shape as input
"""
return F.log_softmax(x, dim=dim)
class Model(nn.Module):
"""
Simple model that performs a LogSoftmax activation.
"""
def __init__(self, dim):
super(Model, self).__init__()
self.dim = dim
def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
return fn(x, self.dim)
batch_size = 16
dim = 16384
sm_dim = 1
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [sm_dim]
import torch
import torch.nn as nn
class Model(nn.Module):
"""
Simple model that performs a LogSoftmax activation.
"""
def __init__(self, dim: int = 1):
super(Model, self).__init__()
self.dim = dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Applies LogSoftmax activation to the input tensor.
Args:
x (torch.Tensor): Input tensor of shape (batch_size, dim).
Returns:
torch.Tensor: Output tensor with LogSoftmax applied, same shape as input.
"""
return torch.log_softmax(x, dim=self.dim)
batch_size = 16
dim = 16384
sm_dim = 1
def get_inputs():
x = torch.randn(batch_size, dim)
return [x]
def get_init_inputs():
return [sm_dim]
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <limits>
template <typename scalar_t, int BLOCK_SIZE>
__global__ void strided_logsoftmax_kernel(
const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
const int dim_size) {
const int batch_idx = blockIdx.x;
const scalar_t* input_row = input + batch_idx * dim_size;
scalar_t* output_row = output + batch_idx * dim_size;
// Shared memory for reductions
__shared__ scalar_t shared_data[BLOCK_SIZE];
// Phase 1: Find maximum using vectorized loads when possible
scalar_t thread_max = -std::numeric_limits<scalar_t>::infinity();
// Vector loading for aligned portions
const int vec_size = 4;
const int vec_elements = dim_size / vec_size;
const float4* input_vec = reinterpret_cast<const float4*>(input_row);
#pragma unroll 4
for (int idx = threadIdx.x; idx < vec_elements; idx += BLOCK_SIZE) {
float4 vec_val = input_vec[idx];
thread_max = max(thread_max, max(max(vec_val.x, vec_val.y), max(vec_val.z, vec_val.w)));
}
// Handle remaining elements
const int remainder_start = vec_elements * vec_size;
for (int idx = remainder_start + threadIdx.x; idx < dim_size; idx += BLOCK_SIZE) {
thread_max = max(thread_max, input_row[idx]);
}
// Warp reduction for maximum
unsigned int mask = 0xffffffff;
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
thread_max = max(thread_max, __shfl_down_sync(mask, thread_max, offset));
}
// Block reduction for maximum
if (threadIdx.x % warpSize == 0) {
shared_data[threadIdx.x / warpSize] = thread_max;
}
__syncthreads();
if (threadIdx.x < (BLOCK_SIZE / warpSize)) {
thread_max = shared_data[threadIdx.x];
}
if (threadIdx.x == 0) {
scalar_t block_max = thread_max;
for (int i = 1; i < (BLOCK_SIZE / warpSize); i++) {
block_max = max(block_max, shared_data[i]);
}
shared_data[0] = block_max;
}
__syncthreads();
const scalar_t max_val = shared_data[0];
// Phase 2: Compute sum of exponentials using vectorized operations
scalar_t thread_sum = 0;
#pragma unroll 4
for (int idx = threadIdx.x; idx < vec_elements; idx += BLOCK_SIZE) {
float4 vec_val = input_vec[idx];
thread_sum += exp(vec_val.x - max_val);
thread_sum += exp(vec_val.y - max_val);
thread_sum += exp(vec_val.z - max_val);
thread_sum += exp(vec_val.w - max_val);
}
for (int idx = remainder_start + threadIdx.x; idx < dim_size; idx += BLOCK_SIZE) {
thread_sum += exp(input_row[idx] - max_val);
}
// Warp reduction for sum
#pragma unroll
for (int offset = warpSize/2; offset > 0; offset /= 2) {
thread_sum += __shfl_down_sync(mask, thread_sum, offset);
}
if (threadIdx.x % warpSize == 0) {
shared_data[threadIdx.x / warpSize] = thread_sum;
}
__syncthreads();
if (threadIdx.x == 0) {
scalar_t total_sum = 0;
for (int i = 0; i < (BLOCK_SIZE / warpSize); i++) {
total_sum += shared_data[i];
}
shared_data[0] = log(total_sum);
}
__syncthreads();
const scalar_t log_sum = shared_data[0];
// Phase 3: Compute final values with vectorized stores when possible
float4* output_vec = reinterpret_cast<float4*>(output_row);
#pragma unroll 4
for (int idx = threadIdx.x; idx < vec_elements; idx += BLOCK_SIZE) {
float4 vec_val = input_vec[idx];
float4 result;
result.x = (vec_val.x - max_val) - log_sum;
result.y = (vec_val.y - max_val) - log_sum;
result.z = (vec_val.z - max_val) - log_sum;
result.w = (vec_val.w - max_val) - log_sum;
output_vec[idx] = result;
}
for (int idx = remainder_start + threadIdx.x; idx < dim_size; idx += BLOCK_SIZE) {
output_row[idx] = (input_row[idx] - max_val) - log_sum;
}
}
torch::Tensor strided_logsoftmax_cuda_forward(torch::Tensor input, int64_t dim) {
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(
input.scalar_type() == torch::kFloat32 || input.scalar_type() == torch::kFloat64,
"input must be float32 or float64");
int64_t ndim = input.dim();
TORCH_CHECK(dim >= -ndim && dim < ndim, "dim out of range");
dim = dim >= 0 ? dim : dim + ndim;
std::vector<int64_t> permute_dims;
for (int64_t i = 0; i < ndim; ++i) {
if (i != dim) {
permute_dims.push_back(i);
}
}
permute_dims.push_back(dim);
input = input.permute(permute_dims).contiguous();
int64_t batch_size = input.numel() / input.size(-1);
int64_t dim_size = input.size(-1);
auto output = torch::empty_like(input);
constexpr int BLOCK_SIZE = 256;
const int blocks = batch_size;
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "strided_logsoftmax_cuda_forward", ([&] {
strided_logsoftmax_kernel<scalar_t, BLOCK_SIZE><<<blocks, BLOCK_SIZE>>>(
input.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
dim_size);
}));
std::vector<int64_t> inverse_permute_dims(ndim);
for (size_t i = 0; i < permute_dims.size(); ++i) {
inverse_permute_dims[permute_dims[i]] = i;
}
output = output.permute(inverse_permute_dims);
return output;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &strided_logsoftmax_cuda_forward, "Strided LogSoftmax forward (CUDA)");
}
Metric | Value | Unit | Variance | Samples |
---|---|---|---|---|
Executed Ipc Active | 0.686 | inst/cycle | 0.000 | 5 |
Executed Ipc Elapsed | 0.060 | inst/cycle | 0.000 | 5 |
Issue Slots Busy | 17.234 | % | 0.006 | 5 |
Issued Ipc Active | 0.690 | inst/cycle | 0.000 | 5 |
SM Busy | 17.234 | % | 0.006 | 5 |
Memory Throughput | 131559135407.098 | byte/second | 938570792648416000.000 | 5 |
Mem Busy | 6.142 | % | 0.005 | 5 |
Max Bandwidth | 5.816 | % | 0.003 | 5 |
L1/TEX Hit Rate | 50.000 | % | 0.000 | 5 |
L2 Hit Rate | 69.730 | % | 0.027 | 5 |
Mem Pipes Busy | 0.614 | % | 0.000 | 5 |
Warp Cycles Per Issued Instruction | 11.474 | cycle | 0.024 | 5 |
Warp Cycles Per Executed Instruction | 11.520 | cycle | 0.025 | 5 |
Avg. Active Threads Per Warp | 31.540 | 0.000 | 5 | |
Avg. Not Predicated Off Threads Per Warp | 30.790 | 0.000 | 5 | |
Max Active Clusters | 0.000 | cluster | 0.000 | 5 |
Max Cluster Size | 8.000 | block | 0.000 | 5 |
Overall GPU Occupancy | 0.000 | % | 0.000 | 5 |
Cluster Occupancy | 0.000 | % | 0.000 | 5 |
Block Limit SM | 32.000 | block | 0.000 | 5 |
Block Limit Registers | 6.000 | block | 0.000 | 5 |
Block Limit Shared Mem | 16.000 | block | 0.000 | 5 |
Block Limit Warps | 8.000 | block | 0.000 | 5 |
Theoretical Active Warps per SM | 48.000 | warp | 0.000 | 5 |
Theoretical Occupancy | 75.000 | % | 0.000 | 5 |
Achieved Occupancy | 12.264 | % | 0.000 | 5 |
Achieved Active Warps Per SM | 7.850 | warp | 0.000 | 5 |
Rule | Description |
---|---|
WRN HighPipeUtilization | All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details. |
INF CPIStall | Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason. |
WRN Occupancy | This kernel's theoretical occupancy (75.0%) is limited by the number of required registers. The difference between calculated theoretical (75.0%) and measured achieved occupancy (12.3%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy. |
Operation / Metric | Value | Unit |
---|---|---|
aten::to | ||
CPU Time | 400816.46 | μs |
Device Time | 40.10 | μs |
Self CPU Time | 36.80 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::_to_copy | ||
CPU Time | 400779.65 | μs |
Device Time | 40.10 | μs |
Self CPU Time | 91.31 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::empty_strided | ||
CPU Time | 417468.90 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 17127.62 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaDeviceGetStreamPriorityRange | ||
CPU Time | 400138.96 | μs |
Device Time | 0.00 | μs |
Self CPU Time | 400138.96 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaLaunchKernel | ||
CPU Time | 399313.69 | μs |
Device Time | 18260.58 | μs |
Self CPU Time | 399313.69 | μs |
Self Device Time | 18260.58 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void strided_logsoftmax_kernel<float, 256>(float const*, float*, int) | ||
CPU Time | 0.00 | μs |
Device Time | 46119.30 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 46119.30 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
cudaEventRecord | ||
CPU Time | 15144.78 | μs |
Device Time | 35941.82 | μs |
Self CPU Time | 15144.78 | μs |
Self Device Time | 35941.82 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::zero_ | ||
CPU Time | 55031.35 | μs |
Device Time | 526978.62 | μs |
Self CPU Time | 9577.96 | μs |
Self Device Time | 0.00 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
aten::fill_ | ||
CPU Time | 45458.51 | μs |
Device Time | 526978.62 | μs |
Self CPU Time | 13262.59 | μs |
Self Device Time | 526978.62 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>) | ||
CPU Time | 0.00 | μs |
Device Time | 527056.41 | μs |
Self CPU Time | 0.00 | μs |
Self Device Time | 527056.41 | μs |
CPU Memory Usage | 0 | B |
Device Memory Usage | 0 | B |
Self CPU Memory Usage | 0 | B |
Self Device Memory Usage | 0 | B |
45287 warnings generated when compiling for host. Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT). Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.