← Back to Leaderboard

The AI CUDA Engineer 👷

30_Gemm_GroupNorm_Hardtanhwarp_divergence_minimization_base

Level 2 • Task 30
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    weight: torch.Tensor,
    bias: torch.Tensor,
    group_norm_weight: torch.Tensor,
    group_norm_bias: torch.Tensor,
    num_groups: int,
    hardtanh_min: float,
    hardtanh_max: float,
) -> torch.Tensor:
    """
    Applies linear layer, group normalization and hardtanh activation.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
        bias (torch.Tensor): Bias vector of shape (out_features)
        group_norm_weight (torch.Tensor): Group norm weight of shape (out_features)
        group_norm_bias (torch.Tensor): Group norm bias of shape (out_features)
        num_groups (int): Number of groups for group normalization
        hardtanh_min (float): Minimum value for hardtanh
        hardtanh_max (float): Maximum value for hardtanh

    Returns:
        torch.Tensor: Output tensor of shape (batch_size, out_features)
    """
    x = F.linear(x, weight, bias)
    x = F.group_norm(x, num_groups, group_norm_weight, group_norm_bias)
    x = F.hardtanh(x, hardtanh_min, hardtanh_max)
    return x


class Model(nn.Module):
    """
    Simple model that performs a GEMM, applies Group Normalization, and then HardTanh.
    """

    def __init__(
        self, in_features, out_features, num_groups, hardtanh_min, hardtanh_max
    ):
        super(Model, self).__init__()
        gemm = nn.Linear(in_features, out_features)
        group_norm = nn.GroupNorm(num_groups, out_features)
        self.weight = nn.Parameter(gemm.weight)
        self.bias = nn.Parameter(gemm.bias + torch.ones_like(gemm.bias) * 0.02)
        self.group_norm_weight = nn.Parameter(group_norm.weight)
        self.group_norm_bias = nn.Parameter(
            group_norm.bias + torch.ones_like(group_norm.bias) * 0.02
        )

    def forward(self, x, num_groups, hardtanh_min, hardtanh_max, fn=module_fn):
        return fn(
            x,
            self.weight,
            self.bias,
            self.group_norm_weight,
            self.group_norm_bias,
            num_groups,
            hardtanh_min,
            hardtanh_max,
        )


batch_size = 128
in_features = 1024
out_features = 512
num_groups = 8
hardtanh_min = -2.0
hardtanh_max = 2.0


def get_inputs():
    return [
        torch.randn(batch_size, in_features),
        num_groups,
        hardtanh_min,
        hardtanh_max,
    ]


def get_init_inputs():
    return [in_features, out_features, num_groups, hardtanh_min, hardtanh_max]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a GEMM, applies Group Normalization, and then HardTanh.
    """
    def __init__(self, in_features, out_features, num_groups, hardtanh_min, hardtanh_max):
        super(Model, self).__init__()
        self.gemm = nn.Linear(in_features, out_features)
        self.group_norm = nn.GroupNorm(num_groups, out_features)
        self.hardtanh = nn.Hardtanh(min_val=hardtanh_min, max_val=hardtanh_max)
        # Add the same noise as in functional implementation
        self.gemm.bias = nn.Parameter(self.gemm.bias + torch.ones_like(self.gemm.bias) * 0.02)
        self.group_norm.bias = nn.Parameter(self.group_norm.bias + torch.ones_like(self.group_norm.bias) * 0.02)

    def forward(self, x):
        """
        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, in_features).
        Returns:
            torch.Tensor: Output tensor of shape (batch_size, out_features).
        """
        x = self.gemm(x)
        x = self.group_norm(x)
        x = self.hardtanh(x)
        return x

batch_size = 128
in_features = 1024
out_features = 512
num_groups = 8
hardtanh_min = -2.0
hardtanh_max = 2.0

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, num_groups, hardtanh_min, hardtanh_max]

Kernel Information

Related Kernels (Level 2, Task 30 • 30_Gemm_GroupNorm_Hardtanh)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 warp_divergence_minimization_base 0.06 0.88 0.91
🥈 warp_divergence_minimization_edit_1 0.06 0.86 0.89
🥉 optimized_block_sizes_base_edit_1 0.06 0.85 0.88
🥉 optimized_gemm_groupnorm_hardtanh_edit_1 0.06 0.85 0.88
🥉 ldg_memory_alignment_optimization_base 0.06 0.85 0.88
🥉 optimized_kernel_unroll_loops_base 0.06 0.85 0.88
🥉 modular_device_functions_optimized_v2_base 0.06 0.85 0.88
🥉 modular_device_functions_refactor_base 0.06 0.85 0.88
🥉 optimized_kernel_unroll_loops_edit_1 0.06 0.85 0.88
10 shared_mem_reuse_v1_base 0.06 0.83 0.86
11 unroll_loops_optim_base 0.06 0.79 0.82
11 min_warp_divergence_edit_1 0.06 0.79 0.82
11 sync_reduction_optim_edit_1 0.06 0.79 0.82
14 sync_reduction_optim_base 0.06 0.78 0.81
14 min_warp_divergence_base 0.06 0.78 0.81
14 modular_kernel_edit_1 0.06 0.78 0.81
14 constant_memory_optimization_base_edit_1 0.06 0.78 0.81
18 optimized_kernel_constant_memory_base 0.07 0.74 0.77
18 const_memory_optimized_kernel_edit_1 0.07 0.74 0.77
20 const_memory_optimized_kernel_base 0.07 0.73 0.76
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>

// Define tile size for matrix multiplication tiling
constexpr int TILE_SIZE = 16;

//-------------------------------------------------------------------
// Modular device functions for Linear Forward (GEMM) using tiling
//-------------------------------------------------------------------

// Load a tile from the input matrix (x) into shared memory
// x: [batch_size, in_features]
// Each block row corresponds to one row of x, load TILE_SIZE elements per iteration

template <typename scalar_t, int TILE_SIZE>
__device__ inline void load_tile_A(const scalar_t* __restrict__ x,
                                      scalar_t A_tile[TILE_SIZE][TILE_SIZE],
                                      int row, int t, int in_features) {
  int tx = threadIdx.x;
  int ty = threadIdx.y;
  int col = t * TILE_SIZE + tx;
  A_tile[ty][tx] = (col < in_features) ? x[row * in_features + col] : static_cast<scalar_t>(0);
}

// Load a tile from the weight matrix into shared memory
// weight: [out_features, in_features]
// For a given output feature (col), load TILE_SIZE elements from weight

template <typename scalar_t, int TILE_SIZE>
__device__ inline void load_tile_B(const scalar_t* __restrict__ weight,
                                      scalar_t B_tile[TILE_SIZE][TILE_SIZE],
                                      int col, int t, int in_features) {
  int tx = threadIdx.x;
  int ty = threadIdx.y;
  int k = t * TILE_SIZE + ty;
  B_tile[ty][tx] = (k < in_features) ? weight[col * in_features + k] : static_cast<scalar_t>(0);
}

// Compute dot product on a single tile loaded into shared memory
// Multiplying the row of A_tile with the column of B_tile

template <typename scalar_t, int TILE_SIZE>
__device__ inline scalar_t compute_tile_dot(scalar_t A_tile[TILE_SIZE][TILE_SIZE],
                                              scalar_t B_tile[TILE_SIZE][TILE_SIZE]) {
  scalar_t sum = 0;
  #pragma unroll
  for (int i = 0; i < TILE_SIZE; i++) {
    sum += A_tile[threadIdx.y][i] * B_tile[i][threadIdx.x];
  }
  return sum;
}

// Linear Forward Kernel using modular device functions and shared memory tiling

template <typename scalar_t, int TILE_SIZE>
__global__ void linear_forward_kernel_modular(
    const scalar_t* __restrict__ x,
    const scalar_t* __restrict__ weight,
    const scalar_t* __restrict__ bias,
    scalar_t* __restrict__ output,
    int batch_size,
    int in_features,
    int out_features) {
  int row = blockIdx.y * TILE_SIZE + threadIdx.y;
  int col = blockIdx.x * TILE_SIZE + threadIdx.x;
  scalar_t sum = 0;
  int numTiles = (in_features + TILE_SIZE - 1) / TILE_SIZE;

  __shared__ scalar_t A_tile[TILE_SIZE][TILE_SIZE];
  __shared__ scalar_t B_tile[TILE_SIZE][TILE_SIZE];

  for (int t = 0; t < numTiles; t++) {
    load_tile_A<scalar_t, TILE_SIZE>(x, A_tile, row, t, in_features);
    load_tile_B<scalar_t, TILE_SIZE>(weight, B_tile, col, t, in_features);
    __syncthreads();
    sum += compute_tile_dot<scalar_t, TILE_SIZE>(A_tile, B_tile);
    __syncthreads();
  }

  if (row < batch_size && col < out_features) {
    output[row * out_features + col] = sum + bias[col];
  }
}

//-------------------------------------------------------------------
// Modular device functions for Group Normalization with parallel reduction
//-------------------------------------------------------------------

// A simple block-wide reduction to sum up values in shared memory

template <typename scalar_t>
__device__ inline scalar_t blockReduceSum(volatile scalar_t* sdata, int tid, int blockDim) {
  for (int s = blockDim / 2; s > 0; s >>= 1) {
    if (tid < s)
      sdata[tid] += sdata[tid + s];
    __syncthreads();
  }
  return sdata[0];
}

// Group Normalization Kernel: Each block handles one (batch, group) pair

template <typename scalar_t>
__global__ void group_norm_forward_kernel_modular(
    const scalar_t* __restrict__ x,
    const scalar_t* __restrict__ gamma,  // scale parameter
    const scalar_t* __restrict__ beta,   // shift parameter
    scalar_t* __restrict__ output,
    int batch_size,
    int num_channels,
    int num_groups) {
  int channels_per_group = num_channels / num_groups;
  int idx = blockIdx.x; // total blocks = batch_size * num_groups
  int batch = idx / num_groups;
  int group = idx % num_groups;

  extern __shared__ char smem[];
  scalar_t* sdata = reinterpret_cast<scalar_t*>(smem);

  // Compute mean in parallel over channels in the group
  scalar_t sum = 0;
  for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
    int channel = group * channels_per_group + i;
    sum += x[batch * num_channels + channel];
  }
  sdata[threadIdx.x] = sum;
  __syncthreads();

  scalar_t mean = blockReduceSum<scalar_t>(sdata, threadIdx.x, blockDim.x) / channels_per_group;
  __syncthreads();

  // Compute variance in parallel
  scalar_t sq_sum = 0;
  for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
    int channel = group * channels_per_group + i;
    scalar_t diff = x[batch * num_channels + channel] - mean;
    sq_sum += diff * diff;
  }
  sdata[threadIdx.x] = sq_sum;
  __syncthreads();

  scalar_t var = blockReduceSum<scalar_t>(sdata, threadIdx.x, blockDim.x) / channels_per_group;
  __syncthreads();

  scalar_t inv_std = rsqrtf(var + 1e-5f);

  // Normalize, scale, and shift each feature in this group
  for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
    int channel = group * channels_per_group + i;
    scalar_t val = x[batch * num_channels + channel];
    output[batch * num_channels + channel] = ((val - mean) * inv_std) * gamma[channel] + beta[channel];
  }
}

//-------------------------------------------------------------------
// Modular device function for Hardtanh Activation
//-------------------------------------------------------------------

template <typename scalar_t>
__device__ inline scalar_t hardtanh_activation(scalar_t val, scalar_t min_val, scalar_t max_val) {
  return (val < min_val) ? min_val : ((val > max_val) ? max_val : val);
}

// Hardtanh Kernel: Applies the activation in a grid-stride loop

template <typename scalar_t>
__global__ void hardtanh_forward_kernel_modular(
    const scalar_t* __restrict__ x,
    scalar_t min_val,
    scalar_t max_val,
    scalar_t* __restrict__ output,
    size_t total_elements) {
  int idx = blockIdx.x * blockDim.x + threadIdx.x;
  int stride = blockDim.x * gridDim.x;
  for (; idx < total_elements; idx += stride) {
    scalar_t val = x[idx];
    output[idx] = hardtanh_activation<scalar_t>(val, min_val, max_val);
  }
}

//-------------------------------------------------------------------
// Host functions launching the kernels
//-------------------------------------------------------------------

void linear_forward_cuda_modular(
    at::Tensor x, 
    at::Tensor weight, 
    at::Tensor bias, 
    at::Tensor output) {

  const int batch_size = x.size(0);
  const int in_features = x.size(1);
  const int out_features = weight.size(0);

  dim3 block(TILE_SIZE, TILE_SIZE);
  dim3 grid((out_features + TILE_SIZE - 1) / TILE_SIZE,
            (batch_size + TILE_SIZE - 1) / TILE_SIZE);

  AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "linear_forward_cuda_modular", ([&] {
    linear_forward_kernel_modular<scalar_t, TILE_SIZE><<<grid, block>>>(
        x.data_ptr<scalar_t>(),
        weight.data_ptr<scalar_t>(),
        bias.data_ptr<scalar_t>(),
        output.data_ptr<scalar_t>(),
        batch_size,
        in_features,
        out_features);
  }));

  cudaError_t err = cudaGetLastError();
  if (err != cudaSuccess) {
    printf("Error in linear_forward_cuda_modular: %s\n", cudaGetErrorString(err));
  }
}

void group_norm_forward_cuda_modular(
    at::Tensor x, 
    at::Tensor gamma,  // Group norm weight
    at::Tensor beta,   // Group norm bias
    int64_t num_groups,
    at::Tensor output) {

  const int batch_size = x.size(0);
  const int num_channels = x.size(1);
  int total_blocks = batch_size * num_groups;
  int channels_per_group = num_channels / num_groups;
  int threads = (channels_per_group < 256) ? channels_per_group : 256;
  size_t shared_mem = threads * sizeof(float);

  AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "group_norm_forward_cuda_modular", ([&] {
    group_norm_forward_kernel_modular<scalar_t><<<total_blocks, threads, shared_mem>>>(
        x.data_ptr<scalar_t>(),
        gamma.data_ptr<scalar_t>(),
        beta.data_ptr<scalar_t>(),
        output.data_ptr<scalar_t>(),
        batch_size,
        num_channels,
        num_groups);
  }));

  cudaError_t err = cudaGetLastError();
  if (err != cudaSuccess) {
    printf("Error in group_norm_forward_cuda_modular: %s\n", cudaGetErrorString(err));
  }
}


void hardtanh_forward_cuda_modular(
    at::Tensor x, 
    float min_val, 
    float max_val,
    at::Tensor output) {

  const size_t total_elements = x.numel();
  int threads = 256;
  int blocks = (total_elements + threads - 1) / threads;

  AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "hardtanh_forward_cuda_modular", ([&] {
    hardtanh_forward_kernel_modular<scalar_t><<<blocks, threads>>>(
        x.data_ptr<scalar_t>(),
        static_cast<scalar_t>(min_val),
        static_cast<scalar_t>(max_val),
        output.data_ptr<scalar_t>(),
        total_elements);
  }));

  cudaError_t err = cudaGetLastError();
  if (err != cudaSuccess) {
    printf("Error in hardtanh_forward_cuda_modular: %s\n", cudaGetErrorString(err));
  }
}

//-------------------------------------------------------------------
// Combined Host Function: Executes linear, group norm, and hardtanh sequentially
//-------------------------------------------------------------------

at::Tensor module_fn_cuda_forward(
    at::Tensor x,
    at::Tensor weight,
    at::Tensor bias,
    at::Tensor group_norm_weight,
    at::Tensor group_norm_bias,
    int64_t num_groups,
    float hardtanh_min,
    float hardtanh_max) {

  // Ensure inputs are contiguous and on CUDA
  x = x.contiguous();
  weight = weight.contiguous();
  bias = bias.contiguous();
  group_norm_weight = group_norm_weight.contiguous();
  group_norm_bias = group_norm_bias.contiguous();

  int64_t batch_size = x.size(0);
  int64_t in_features = x.size(1);
  int64_t out_features = weight.size(0);

  auto options = x.options();
  at::Tensor linear_output = at::empty({batch_size, out_features}, options);
  at::Tensor group_norm_output = at::empty({batch_size, out_features}, options);
  at::Tensor output = at::empty({batch_size, out_features}, options);

  // Linear layer computation with tiling
  linear_forward_cuda_modular(x, weight, bias, linear_output);

  // Group Normalization with parallel reduction per group
  group_norm_forward_cuda_modular(linear_output, group_norm_weight, group_norm_bias, num_groups, group_norm_output);

  // Hardtanh activation using a grid-stride loop
  hardtanh_forward_cuda_modular(group_norm_output, hardtanh_min, hardtanh_max, output);

  return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("forward", &module_fn_cuda_forward, "Forward pass (CUDA modular optimized)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.300 inst/cycle 0.000 5
Executed Ipc Elapsed 0.108 inst/cycle 0.000 5
Issue Slots Busy 8.572 % 0.167 5
Issued Ipc Active 0.340 inst/cycle 0.000 5
SM Busy 8.572 % 0.167 5
Memory Throughput 79994868058.474 byte/second 4393609934218286080.000 5
Mem Busy 10.982 % 0.100 5
Max Bandwidth 7.178 % 0.038 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 83.052 % 0.109 5
Mem Pipes Busy 2.854 % 0.005 5
Warp Cycles Per Issued Instruction 41.534 cycle 1.727 5
Warp Cycles Per Executed Instruction 47.304 cycle 2.234 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.200 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 16.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 22.498 % 0.026 5
Achieved Active Warps Per SM 14.400 warp 0.011 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (22.8%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 298979.53 μs
Device Time 170.11 μs
Self CPU Time 66.16 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 298913.37 μs
Device Time 170.11 μs
Self CPU Time 137.89 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 298256.70 μs
Device Time 0.00 μs
Self CPU Time 157.49 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 295414.63 μs
Device Time 0.00 μs
Self CPU Time 295414.63 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 45121.33 μs
Device Time 456917.44 μs
Self CPU Time 15183.26 μs
Self Device Time 456917.44 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 58673.59 μs
Device Time 456917.44 μs
Self CPU Time 13580.16 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 525066.14 μs
Device Time 24781.23 μs
Self CPU Time 525066.14 μs
Self Device Time 24781.23 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void linear_forward_kernel_modular<float, 16>(float const*, float const*, float const*, float*, int, int, int)
CPU Time 0.00 μs
Device Time 265553.62 μs
Self CPU Time 0.00 μs
Self Device Time 265553.62 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 456917.44 μs
Self CPU Time 0.00 μs
Self Device Time 456917.44 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45320 warnings generated when compiling for host.
Suppressed 45328 warnings (45281 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:21:39 bugprone-easily-swappable-parameters
21 | int row, int t, int in_features) {
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:21:43: note: the first parameter in the range is 'row'
21 | int row, int t, int in_features) {
| ^~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:21:52: note: the last parameter in the range is 't'
21 | int row, int t, int in_features) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:22:12: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
22 | int tx = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:23:12: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
23 | int ty = threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:35:39: warning: 2 adjacent parameters of 'load_tile_B' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
35 | int col, int t, int in_features) {
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:35:43: note: the first parameter in the range is 'col'
35 | int col, int t, int in_features) {
| ^~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:35:52: note: the last parameter in the range is 't'
35 | int col, int t, int in_features) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:36:12: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
36 | int tx = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:37:12: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
37 | int ty = threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:60:5: warning: 3 adjacent parameters of 'linear_forward_kernel_modular' of similar type ('const scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
60 | const scalar_t* __restrict__ x,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61 | const scalar_t* __restrict__ weight,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
62 | const scalar_t* __restrict__ bias,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:60:34: note: the first parameter in the range is 'x'
60 | const scalar_t* __restrict__ x,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:62:34: note: the last parameter in the range is 'bias'
62 | const scalar_t* __restrict__ bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:64:5: warning: 2 adjacent parameters of 'linear_forward_kernel_modular' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
64 | int batch_size,
| ^~~~~~~~~~~~~~~
65 | int in_features,
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:64:9: note: the first parameter in the range is 'batch_size'
64 | int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:65:9: note: the last parameter in the range is 'in_features'
65 | int in_features,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:95:69: warning: 2 adjacent parameters of 'blockReduceSum' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
95 | __device__ inline scalar_t blockReduceSum(volatile scalar_t* sdata, int tid, int blockDim) {
| ^~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:95:73: note: the first parameter in the range is 'tid'
95 | __device__ inline scalar_t blockReduceSum(volatile scalar_t* sdata, int tid, int blockDim) {
| ^~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:95:82: note: the last parameter in the range is 'blockDim'
95 | __device__ inline scalar_t blockReduceSum(volatile scalar_t* sdata, int tid, int blockDim) {
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:108:5: warning: 2 adjacent parameters of 'group_norm_forward_kernel_modular' of similar type ('const scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
108 | const scalar_t* __restrict__ x,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
109 | const scalar_t* __restrict__ gamma, // scale parameter
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:108:34: note: the first parameter in the range is 'x'
108 | const scalar_t* __restrict__ x,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:109:34: note: the last parameter in the range is 'gamma'
109 | const scalar_t* __restrict__ gamma, // scale parameter
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:112:5: warning: 2 adjacent parameters of 'group_norm_forward_kernel_modular' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
112 | int batch_size,
| ^~~~~~~~~~~~~~~
113 | int num_channels,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:112:9: note: the first parameter in the range is 'batch_size'
112 | int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:113:9: note: the last parameter in the range is 'num_channels'
113 | int num_channels,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:116:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
116 | int idx = blockIdx.x; // total blocks = batch_size * num_groups
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:125:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
125 | for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:125:58: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
125 | for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:137:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
137 | for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:137:58: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
137 | for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:151:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
151 | for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:151:58: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
151 | for (int i = threadIdx.x; i < channels_per_group; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:176:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
176 | int idx = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:177:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
177 | int stride = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:194:26: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
194 | const int batch_size = x.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:195:27: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
195 | const int in_features = x.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:196:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
196 | const int out_features = weight.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:202:3: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
202 | AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "linear_forward_cuda_modular", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:226:26: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
226 | const int batch_size = x.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:227:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
227 | const int num_channels = x.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:228:22: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
228 | int total_blocks = batch_size * num_groups;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:229:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
229 | int channels_per_group = num_channels / num_groups;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:233:3: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
233 | AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "group_norm_forward_cuda_modular", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:259:16: warning: narrowing conversion from 'size_t' (aka 'unsigned long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
259 | int blocks = (total_elements + threads - 1) / threads;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:261:3: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
261 | AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "hardtanh_forward_cuda_modular", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:286:5: warning: 2 adjacent parameters of 'module_fn_cuda_forward' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
286 | int64_t num_groups,
| ^~~~~~~~~~~~~~~~~~~
287 | float hardtanh_min,
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:286:13: note: the first parameter in the range is 'num_groups'
286 | int64_t num_groups,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:287:11: note: the last parameter in the range is 'hardtanh_min'
287 | float hardtanh_min,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:286:5: note:
286 | int64_t num_groups,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:287:5: note: 'int64_t' and 'float' may be implicitly converted: 'int64_t' (as 'long') -> 'float', 'float' -> 'int64_t' (as 'long')
287 | float hardtanh_min,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:298:11: warning: Value stored to 'in_features' during its initialization is never read [clang-analyzer-deadcode.DeadStores]
298 | int64_t in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_2/task_30/b4_s1_warp_divergence_minimization/base/base.cu:298:11: note: Value stored to 'in_features' during its initialization is never read
298 | int64_t in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~