← Back to Leaderboard

The AI CUDA Engineer 👷

51_Argmax_over_a_dimensionstride_loop_argmax_base

Level 1 • Task 51
import torch
import torch.nn as nn
import torch.functional as F


def module_fn(x: torch.Tensor, dim: int) -> torch.Tensor:
    """
    Applies argmax over the specified dimension to the input tensor.

    Args:
        x (torch.Tensor): Input tensor
        dim (int): Dimension to perform argmax over

    Returns:
        torch.Tensor: Output tensor with argmax applied over specified dimension
    """
    return torch.argmax(x, dim)


class Model(nn.Module):
    """
    Simple model that performs Argmax over a specified dimension.
    """

    def __init__(self, dim: int):
        """
        Initializes the model with the dimension to perform argmax.

        Args:
            dim (int): The dimension to perform argmax over.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Applies argmax over the specified dimension to the input tensor.

        Args:
            x (torch.Tensor): Input tensor
            fn: Function to apply (defaults to module_fn)

        Returns:
            torch.Tensor: Output tensor with argmax applied, with the specified dimension removed.
        """
        return fn(x, self.dim)


batch_size = 16
dim1 = 256
dim2 = 256


def get_inputs():
    x = torch.randn(batch_size, dim1, dim2)
    return [x]


def get_init_inputs():
    return [1]
import torch
import torch.nn as nn


class Model(nn.Module):
    """
    Simple model that performs Argmax over a specified dimension.
    """

    def __init__(self, dim: int):
        """
        Initializes the model with the dimension to perform argmax.

        Args:
            dim (int): The dimension to perform argmax over.
        """
        super(Model, self).__init__()
        self.dim = dim

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies argmax over the specified dimension to the input tensor.

        Args:
            x (torch.Tensor): Input tensor.

        Returns:
            torch.Tensor: Output tensor with argmax applied, with the specified dimension removed.
        """
        return torch.argmax(x, dim=self.dim)


batch_size = 16
dim1 = 256
dim2 = 256


def get_inputs():
    x = torch.randn(batch_size, dim1, dim2)
    return [x]


def get_init_inputs():
    return [1]

Kernel Information

Related Kernels (Level 1, Task 51 • 51_Argmax_over_a_dimension)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 warp_argmax_nosm_edit_1 0.01 1.85 2.54
🥈 warp_level_argmax_base 0.01 1.72 2.36
🥈 warp_level_argmax_edit_1 0.01 1.72 2.36
🥈 efficient_argmax_base 0.01 1.72 2.36
🥈 stride_loop_argmax_stride_base 0.01 1.72 2.36
🥈 stride_loop_argmax_final_edit_1 0.01 1.72 2.36
🥈 argmax_coop_red_tuned_base 0.01 1.72 2.36
🥈 argmax_coop_red_tuned_edit_1 0.01 1.72 2.36
🥈 divergence_free_argmax_base 0.01 1.72 2.36
🥈 optimized_argmax_combination_base 0.01 1.72 2.36
🥈 argmax_ldg_128_opt_base 0.01 1.72 2.36
🥈 argmax_ldg_128_opt_edit_1 0.01 1.72 2.36
🥈 argmax_coop_red_sync_opt_base 0.01 1.72 2.36
🥈 argmax_aligned_mem_base_edit_1 0.01 1.72 2.36
🥈 stride_loop_argmax_final_base 0.01 1.72 2.36
🥈 warp_argmax_nosm_base 0.01 1.72 2.36
17 stride_loop_argmax_base 0.01 1.61 2.20
17 loop_unrolled_argmax_edit_1 0.01 1.61 2.20
17 stride_loop_argmax_edit_1 0.01 1.61 2.20
17 optimized_argmax_kernel_base 0.01 1.61 2.20
#include <torch/extension.h>
#include <cuda_runtime.h>
#include <cfloat>
#include <vector>

// Kernel that uses grid-stride loops to handle workloads larger than the available threads.
// Each thread block processes one or more (outer, inner) pairs and each thread uses a stride loop
// to cover the "dim" dimension. The reduction is performed in shared memory to compute the argmax.
__global__ void stride_loop_argmax_kernel(
    const float* __restrict__ x,
    int64_t* __restrict__ indices,
    const int outerSize,
    const int dimSize,
    const int innerSize) {
  const int total = outerSize * innerSize;
  // Grid-stride loop over output indices
  for (int idx = blockIdx.x; idx < total; idx += gridDim.x) {
    int outer_idx = idx / innerSize;
    int inner_idx = idx % innerSize;
    int start_offset = outer_idx * dimSize * innerSize + inner_idx;

    // Each thread processes a subset of the `dim` dimension via a stride loop
    float thread_max = -FLT_MAX;
    int thread_arg = 0;
    for (int d = threadIdx.x; d < dimSize; d += blockDim.x) {
      float val = x[start_offset + d * innerSize];
      if (val > thread_max) {
        thread_max = val;
        thread_arg = d;
      }
    }

    // Allocate shared memory for reduction. Partition shared memory into two arrays:
    // one for the max values and one for the corresponding indices.
    extern __shared__ char shared_mem[];
    float* svals = reinterpret_cast<float*>(shared_mem);
    int* sidx = reinterpret_cast<int*>(shared_mem + blockDim.x * sizeof(float));

    svals[threadIdx.x] = thread_max;
    sidx[threadIdx.x] = thread_arg;
    __syncthreads();

    // Perform parallel reduction in shared memory
    for (int s = blockDim.x / 2; s > 0; s >>= 1) {
      if (threadIdx.x < s) {
        float other = svals[threadIdx.x + s];
        int other_idx = sidx[threadIdx.x + s];
        if (other > svals[threadIdx.x]) {
          svals[threadIdx.x] = other;
          sidx[threadIdx.x] = other_idx;
        }
      }
      __syncthreads();
    }

    if (threadIdx.x == 0) {
      indices[idx] = sidx[0];
    }
    __syncthreads();  // Ensure shared memory is ready for next iteration if any
  }
}

// Host function to launch the CUDA kernel
torch::Tensor argmax_forward_cuda(const torch::Tensor& x, const int64_t dim) {
  TORCH_CHECK(x.scalar_type() == at::kFloat, "Only float32 is supported.");
  auto x_contig = x.contiguous();
  auto sizes = x_contig.sizes();
  const int ndim = x_contig.dim();
  TORCH_CHECK(dim >= 0 && dim < ndim, "Invalid dimension for argmax.");

  int outerSize = 1;
  for (int d = 0; d < dim; d++) {
    outerSize *= sizes[d];
  }
  const int dimSize = sizes[dim];
  int innerSize = 1;
  for (int d = dim + 1; d < ndim; d++) {
    innerSize *= sizes[d];
  }

  // Build output shape by removing the specified dimension
  std::vector<int64_t> out_sizes;
  for (int i = 0; i < ndim; i++) {
    if (i == dim) continue;
    out_sizes.push_back(sizes[i]);
  }
  auto indices = torch::empty(out_sizes, x.options().dtype(torch::kLong));

  // Launch parameters: use a grid-stride loop for the outer/inner dimensions
  const int total = outerSize * innerSize;
  const int threads = 256;
  const int blocks = (total < 1024 ? total : 1024);
  size_t shared_mem_size = threads * (sizeof(float) + sizeof(int));

  stride_loop_argmax_kernel<<<blocks, threads, shared_mem_size>>>(
      x_contig.data_ptr<float>(),
      indices.data_ptr<int64_t>(),
      outerSize,
      dimSize,
      innerSize);

  return indices;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &argmax_forward_cuda, "ArgMax CUDA forward (stride loops for large workloads)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.866 inst/cycle 0.000 5
Executed Ipc Elapsed 1.504 inst/cycle 0.000 5
Issue Slots Busy 46.908 % 0.005 5
Issued Ipc Active 1.876 inst/cycle 0.000 5
SM Busy 46.908 % 0.005 5
Memory Throughput 292625121360.220 byte/second 10617725231158288384.000 5
Mem Busy 52.332 % 0.319 5
Max Bandwidth 34.646 % 0.345 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 84.538 % 0.112 5
Mem Pipes Busy 31.698 % 0.104 5
Warp Cycles Per Issued Instruction 30.468 cycle 0.054 5
Warp Cycles Per Executed Instruction 30.668 cycle 0.054 5
Avg. Active Threads Per Warp 30.770 0.000 5
Avg. Not Predicated Off Threads Per Warp 26.800 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 10.000 block 0.000 5
Block Limit Shared Mem 21.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 89.084 % 0.049 5
Achieved Active Warps Per SM 57.014 warp 0.020 5
Analysis Rules
Rule Description
INF HighPipeUtilization ALU is the highest-utilized pipeline (32.6%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (88.7%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 380605.15 μs
Device Time 380.61 μs
Self CPU Time 43.55 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 380561.60 μs
Device Time 380.61 μs
Self CPU Time 107.26 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 379837.31 μs
Device Time 0.00 μs
Self CPU Time 104.40 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 379524.32 μs
Device Time 0.00 μs
Self CPU Time 379524.32 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 502929.82 μs
Device Time 20138.54 μs
Self CPU Time 502929.82 μs
Self Device Time 20138.54 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
stride_loop_argmax_kernel(float const*, long*, int, int, int)
CPU Time 0.00 μs
Device Time 87139.05 μs
Self CPU Time 0.00 μs
Self Device Time 87139.05 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 24096.99 μs
Device Time 40048.69 μs
Self CPU Time 24096.99 μs
Self Device Time 40048.69 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 73260.83 μs
Device Time 598650.84 μs
Self CPU Time 14533.10 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 58729.60 μs
Device Time 598650.84 μs
Self CPU Time 17428.45 μs
Self Device Time 598650.84 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 598650.84 μs
Self CPU Time 0.00 μs
Self Device Time 598650.84 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45286 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:12:5 bugprone-easily-swappable-parameters
12 | const int outerSize,
| ^~~~~~~~~~~~~~~~~~~~
13 | const int dimSize,
| ~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:12:15: note: the first parameter in the range is 'outerSize'
12 | const int outerSize,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:13:15: note: the last parameter in the range is 'dimSize'
13 | const int dimSize,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:17:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
17 | for (int idx = blockIdx.x; idx < total; idx += gridDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:17:50: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
17 | for (int idx = blockIdx.x; idx < total; idx += gridDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:25:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
25 | for (int d = threadIdx.x; d < dimSize; d += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:25:49: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
25 | for (int d = threadIdx.x; d < dimSize; d += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:44:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
44 | for (int s = blockDim.x / 2; s > 0; s >>= 1) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:68:20: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
68 | const int ndim = x_contig.dim();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:73:18: warning: narrowing conversion from 'long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
73 | outerSize *= sizes[d];
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:75:23: warning: narrowing conversion from 'long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
75 | const int dimSize = sizes[dim];
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:77:16: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
77 | for (int d = dim + 1; d < ndim; d++) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_51/b3_s2_stride_loop_argmax/base/base.cu:78:18: warning: narrowing conversion from 'long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
78 | innerSize *= sizes[d];
| ^