← Back to Leaderboard

The AI CUDA Engineer 👷

97_Matmul_BatchNorm_BiasAdd_Divide_Swishfused_bn_swish_base

Level 2 • Task 97
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    bn_eps: float,
    bn_momentum: float,
    divide_value: float,
    weight: torch.Tensor,
    bias: torch.Tensor,
    bn_weight: torch.Tensor,
    bn_bias: torch.Tensor,
    bn_running_mean: torch.Tensor,
    bn_running_var: torch.Tensor,
    add_bias: torch.Tensor,
) -> torch.Tensor:
    """
    Applies matrix multiplication, batch normalization, bias addition, division and Swish activation.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        bn_eps (float): Small constant for numerical stability in batch norm
        bn_momentum (float): Momentum for batch norm running stats
        divide_value (float): Value to divide by
        weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
        bias (torch.Tensor): Bias vector of shape (out_features)
        bn_weight (torch.Tensor): Batch norm weight of shape (out_features)
        bn_bias (torch.Tensor): Batch norm bias of shape (out_features)
        bn_running_mean (torch.Tensor): Batch norm running mean of shape (out_features)
        bn_running_var (torch.Tensor): Batch norm running variance of shape (out_features)
        add_bias (torch.Tensor): Additional bias term of shape (1,)

    Returns:
        torch.Tensor: Output tensor of shape (batch_size, out_features)
    """
    x = F.linear(x, weight, bias)
    x = F.batch_norm(
        x,
        bn_running_mean,
        bn_running_var,
        bn_weight,
        bn_bias,
        training=True,
        momentum=bn_momentum,
        eps=bn_eps,
    )
    x = x + add_bias
    x = x / divide_value
    x = x * torch.sigmoid(x)
    return x


class Model(nn.Module):
    """
    Model that performs a matrix multiplication, batch normalization, bias addition, division and Swish activation.
    """

    def __init__(
        self, in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value
    ):
        super(Model, self).__init__()
        gemm = nn.Linear(in_features, out_features)
        bn = nn.BatchNorm1d(out_features, eps=bn_eps, momentum=bn_momentum)
        self.weight = gemm.weight
        self.bias = gemm.bias
        self.bn_weight = bn.weight
        self.bn_bias = bn.bias
        self.bn_running_mean = nn.Parameter(bn.running_mean, requires_grad=False)
        self.bn_running_var = nn.Parameter(bn.running_var, requires_grad=False)
        self.add_bias = nn.Parameter(torch.randn(bias_shape) * 0.02)

    def forward(self, x, bn_eps, bn_momentum, divide_value, fn=module_fn):
        return fn(
            x,
            bn_eps,
            bn_momentum,
            divide_value,
            self.weight,
            self.bias,
            self.bn_weight,
            self.bn_bias,
            self.bn_running_mean,
            self.bn_running_var,
            self.add_bias,
        )


batch_size = 128
in_features = 1024
out_features = 512
bn_eps = 1e-5
bn_momentum = 0.1
bias_shape = (1,)
divide_value = 1.0


def get_inputs():
    return [torch.randn(batch_size, in_features), bn_eps, bn_momentum, divide_value]


def get_init_inputs():
    return [in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a matrix multiplication, batch normalization, bias addition, division, and Swish activation.
    """
    def __init__(self, in_features, out_features, bn_eps=1e-5, bn_momentum=0.1, bias_shape=(1,), divide_value=1.0):
        super(Model, self).__init__()
        self.matmul = nn.Linear(in_features, out_features)
        self.bn = nn.BatchNorm1d(out_features, eps=bn_eps, momentum=bn_momentum)
        self.bias = nn.Parameter(torch.randn(bias_shape) * 0.02)
        self.divide_value = divide_value

    def forward(self, x):
        x = self.matmul(x)
        x = self.bn(x)
        x = x + self.bias
        x = x / self.divide_value
        x = x * torch.sigmoid(x)
        return x

batch_size = 128
in_features = 1024
out_features = 512
bn_eps = 1e-5
bn_momentum = 0.1
bias_shape = (1,)
divide_value = 1.0

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value]

Kernel Information

Related Kernels (Level 2, Task 97 • 97_Matmul_BatchNorm_BiasAdd_Divide_Swish)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 block_tuned_fused_bn_swish_base 0.03 2.28 1.94
🥇 optimized_thread_block_mapping_base 0.03 2.28 1.94
🥇 block_experiment_fused_bn_swish_base 0.03 2.28 1.94
🥇 blocksize_optimized_fused_bn_swish_base 0.03 2.28 1.94
🥇 fused_bn_swish_opt_base 0.03 2.28 1.94
🥇 fused_bn_swish_combined_base 0.03 2.28 1.94
🥇 fused_bn_swish_ldg_base_base 0.03 2.28 1.94
8 fused_bn_swish_atomic_opt_base_base 0.03 2.19 1.87
8 optimized_fused_bn_swish_base 0.03 2.19 1.87
8 sync_optimized_fused_bn_swish_base 0.03 2.19 1.87
8 stride_loop_optimized_fused_bn_swish_base_base 0.03 2.19 1.87
8 fused_bn_swish_warp_base 0.03 2.19 1.87
8 fused_bn_swish_atomic_opt_base 0.03 2.19 1.87
8 warp_divergence_optimized_fused_bn_swish_base 0.03 2.19 1.87
8 fused_bn_swish_base 0.03 2.19 1.87
8 shared_param_fused_bn_swish_base 0.03 2.19 1.87
8 tuned_block_size_bn_swish_base 0.03 2.19 1.87
18 adaptive_block_fused_bn_swish_base_base 0.03 2.11 1.80
19 atomic_optimized_matmul_bn_base 0.04 1.44 1.23
20 stream_optimized_fused_bn_swish_base 0.04 1.35 1.14
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <math.h>

// Fused kernel that computes per-feature reduction to obtain mean and variance
// and then applies batch normalization, bias addition, division and Swish activation.
// Each block is assigned to one feature (column) of the x_linear tensor of shape [batch_size, out_features].
// This design avoids unnecessary atomic operations by confining reductions and running stat updates to a single block per feature.

// Template kernel
template <typename scalar_t>
__global__ void fused_bn_swish_kernel(
    const scalar_t* __restrict__ x_linear, // Input: result of linear transformation, shape [batch_size, out_features]
    scalar_t* __restrict__ output,         // Output tensor, same shape as x_linear
    const scalar_t* __restrict__ bn_weight,  // BatchNorm weight, shape [out_features]
    const scalar_t* __restrict__ bn_bias,    // BatchNorm bias, shape [out_features]
    scalar_t* __restrict__ bn_running_mean,  // Running mean, shape [out_features]
    scalar_t* __restrict__ bn_running_var,   // Running variance, shape [out_features]
    const scalar_t* __restrict__ add_bias,   // Additional bias (1-element tensor)
    const float bn_eps,                      // BatchNorm epsilon
    const float bn_momentum,                 // BatchNorm momentum
    const float divide_value,                // Division value
    const int batch_size,
    const int out_features) {

    // Each block handles one feature column
    int f = blockIdx.x;
    if (f >= out_features) return;
    int tid = threadIdx.x;

    // Allocate shared memory for reduction: first half for sum, second half for sum of squares
    extern __shared__ float shared_data[]; // size: 2 * blockDim.x
    float* s_sum = shared_data;              // shared array for sums
    float* s_sumsq = shared_data + blockDim.x; // shared array for sum of squares

    float local_sum = 0.0f;
    float local_sumsq = 0.0f;

    // Loop over the batch dimension. The input is in row-major order.
    for (int i = tid; i < batch_size; i += blockDim.x) {
        int index = i * out_features + f;
        float val = static_cast<float>(x_linear[index]);
        local_sum += val;
        local_sumsq += val * val;
    }
    s_sum[tid] = local_sum;
    s_sumsq[tid] = local_sumsq;
    __syncthreads();

    // Perform reduction in shared memory to compute total sum and sumsq for feature f
    for (int s = blockDim.x / 2; s > 0; s >>= 1) {
        if (tid < s) {
            s_sum[tid] += s_sum[tid + s];
            s_sumsq[tid] += s_sumsq[tid + s];
        }
        __syncthreads();
    }

    // Compute mean and variance for feature f
    float mean = s_sum[0] / batch_size;
    float var = s_sumsq[0] / batch_size - mean * mean;

    // Update running statistics. Since one block exclusively handles feature f, no atomics are needed.
    if (tid == 0) {
        bn_running_mean[f] = bn_running_mean[f] * (1 - bn_momentum) + mean * bn_momentum;
        bn_running_var[f] = bn_running_var[f] * (1 - bn_momentum) + var * bn_momentum;
    }
    __syncthreads();

    // Second pass: apply batch normalization and Swish activation to each element in this feature column
    for (int i = tid; i < batch_size; i += blockDim.x) {
        int index = i * out_features + f;
        float val = static_cast<float>(x_linear[index]);
        float x_hat = (val - mean) / sqrtf(var + bn_eps);
        float x_bn = x_hat * bn_weight[f] + bn_bias[f];
        float x_add = x_bn + add_bias[0];
        float x_div = x_add / divide_value;
        float x_swish = x_div / (1.0f + expf(-x_div));
        output[index] = static_cast<scalar_t>(x_swish);
    }
}

// Main function: performs linear transformation then calls the fused kernel
torch::Tensor module_fn_cuda(
    torch::Tensor x,
    float bn_eps,
    float bn_momentum,
    float divide_value,
    torch::Tensor weight,
    torch::Tensor bias,
    torch::Tensor bn_weight,
    torch::Tensor bn_bias,
    torch::Tensor bn_running_mean,
    torch::Tensor bn_running_var,
    torch::Tensor add_bias) {

    const auto batch_size = x.size(0);
    const auto in_features = x.size(1);
    const auto out_features = weight.size(0);

    // Ensure tensors are contiguous
    x = x.contiguous();
    weight = weight.contiguous();
    bias = bias.contiguous();

    // Perform linear transformation: x_linear = x @ weight.T + bias
    auto x_linear = torch::addmm(bias, x, weight.t());

    // Prepare output tensor
    auto output = torch::empty_like(x_linear);

    // Launch configuration: one block per feature; each block uses a fixed number of threads
    const int threads = 256;
    const int blocks = out_features;  // one block per feature column
    const size_t shared_mem_size = 2 * threads * sizeof(float);

    AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda", ([&] {
        fused_bn_swish_kernel<scalar_t><<<blocks, threads, shared_mem_size>>>(
            x_linear.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            bn_weight.data_ptr<scalar_t>(),
            bn_bias.data_ptr<scalar_t>(),
            bn_running_mean.data_ptr<scalar_t>(),
            bn_running_var.data_ptr<scalar_t>(),
            add_bias.data_ptr<scalar_t>(),
            bn_eps,
            bn_momentum,
            divide_value,
            batch_size,
            out_features);
    }));

    return output;
}

// PyBind11 module definition
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &module_fn_cuda, "Fused BN and Swish forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.708 inst/cycle 0.000 5
Executed Ipc Elapsed 0.436 inst/cycle 0.000 5
Issue Slots Busy 18.414 % 0.053 5
Issued Ipc Active 0.738 inst/cycle 0.000 5
SM Busy 18.414 % 0.053 5
Memory Throughput 40282701766.962 byte/second 184240911646046016.000 5
Mem Busy 22.410 % 0.033 5
Max Bandwidth 21.068 % 0.077 5
L1/TEX Hit Rate 34.940 % 0.000 5
L2 Hit Rate 95.114 % 0.406 5
Mem Pipes Busy 12.876 % 0.017 5
Warp Cycles Per Issued Instruction 35.180 cycle 1.614 5
Warp Cycles Per Executed Instruction 36.694 cycle 1.751 5
Avg. Active Threads Per Warp 30.670 0.000 5
Avg. Not Predicated Off Threads Per Warp 26.440 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 21.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 39.614 % 0.206 5
Achieved Active Warps Per SM 25.356 warp 0.085 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (39.1%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 222646.95 μs
Device Time 203.58 μs
Self CPU Time 62.69 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 222584.26 μs
Device Time 203.58 μs
Self CPU Time 131.00 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 237175.76 μs
Device Time 0.00 μs
Self CPU Time 15447.02 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 217545.02 μs
Device Time 0.00 μs
Self CPU Time 217545.02 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 55402.53 μs
Device Time 439956.79 μs
Self CPU Time 9089.99 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 46334.07 μs
Device Time 439956.79 μs
Self CPU Time 14462.76 μs
Self Device Time 439956.79 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::addmm
CPU Time 389668.39 μs
Device Time 94499.33 μs
Self CPU Time 132135.11 μs
Self Device Time 94499.33 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
sm80_xmma_gemm_f32f32_f32f32_f32_tn_n_tilesize32x32x8_stage3_warpsize1x2x1_ffma_aligna4_alignc4_execute_kernel__51_cublas
CPU Time 0.00 μs
Device Time 85078.49 μs
Self CPU Time 0.00 μs
Self Device Time 85078.49 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 439956.79 μs
Self CPU Time 0.00 μs
Self Device Time 439956.79 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45300 warnings generated when compiling for host.
Suppressed 45331 warnings (45284 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:19:5 bugprone-easily-swappable-parameters
19 | scalar_t* __restrict__ bn_running_mean, // Running mean, shape [out_features]
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 | scalar_t* __restrict__ bn_running_var, // Running variance, shape [out_features]
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:19:28: note: the first parameter in the range is 'bn_running_mean'
19 | scalar_t* __restrict__ bn_running_mean, // Running mean, shape [out_features]
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:20:28: note: the last parameter in the range is 'bn_running_var'
20 | scalar_t* __restrict__ bn_running_var, // Running variance, shape [out_features]
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:22:5: warning: 5 adjacent parameters of 'fused_bn_swish_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
22 | const float bn_eps, // BatchNorm epsilon
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 | const float bn_momentum, // BatchNorm momentum
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 | const float divide_value, // Division value
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 | const int batch_size,
| ~~~~~~~~~~~~~~~~~~~~~
26 | const int out_features) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:22:17: note: the first parameter in the range is 'bn_eps'
22 | const float bn_eps, // BatchNorm epsilon
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:26:15: note: the last parameter in the range is 'out_features'
26 | const int out_features) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:25:5: note: 'const float' and 'const int' may be implicitly converted: 'const float' (as 'float') -> 'const int' (as 'int'), 'const int' (as 'int') -> 'const float' (as 'float')
25 | const int batch_size,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:29:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
29 | int f = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:31:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
31 | int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:42:44: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
42 | for (int i = tid; i < batch_size; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:53:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
53 | for (int s = blockDim.x / 2; s > 0; s >>= 1) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:62:29: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
62 | float mean = s_sum[0] / batch_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:63:30: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
63 | float var = s_sumsq[0] / batch_size - mean * mean;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:73:44: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
73 | for (int i = tid; i < batch_size; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:92:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
92 | torch::Tensor bias,
| ^~~~~~~~~~~~~~~~~~~
93 | torch::Tensor bn_weight,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:92:19: note: the first parameter in the range is 'bias'
92 | torch::Tensor bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:93:19: note: the last parameter in the range is 'bn_weight'
93 | torch::Tensor bn_weight,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:100:16: warning: Value stored to 'in_features' during its initialization is never read [clang-analyzer-deadcode.DeadStores]
100 | const auto in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:100:16: note: Value stored to 'in_features' during its initialization is never read
100 | const auto in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:116:24: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
116 | const int blocks = out_features; // one block per feature column
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:117:36: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
117 | const size_t shared_mem_size = 2 * threads * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:117:36: note: make conversion explicit to silence this warning
4 | const size_t shared_mem_size = 2 * threads * sizeof(float);
| ^~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:117:36: note: perform multiplication in a wider type
117 | const size_t shared_mem_size = 2 * threads * sizeof(float);
| ^
| static_cast<long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:119:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
119 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^