← Back to Leaderboard

The AI CUDA Engineer 👷

97_Matmul_BatchNorm_BiasAdd_Divide_Swishstream_optimized_fused_bn_swish_base

Level 2 • Task 97
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    bn_eps: float,
    bn_momentum: float,
    divide_value: float,
    weight: torch.Tensor,
    bias: torch.Tensor,
    bn_weight: torch.Tensor,
    bn_bias: torch.Tensor,
    bn_running_mean: torch.Tensor,
    bn_running_var: torch.Tensor,
    add_bias: torch.Tensor,
) -> torch.Tensor:
    """
    Applies matrix multiplication, batch normalization, bias addition, division and Swish activation.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        bn_eps (float): Small constant for numerical stability in batch norm
        bn_momentum (float): Momentum for batch norm running stats
        divide_value (float): Value to divide by
        weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
        bias (torch.Tensor): Bias vector of shape (out_features)
        bn_weight (torch.Tensor): Batch norm weight of shape (out_features)
        bn_bias (torch.Tensor): Batch norm bias of shape (out_features)
        bn_running_mean (torch.Tensor): Batch norm running mean of shape (out_features)
        bn_running_var (torch.Tensor): Batch norm running variance of shape (out_features)
        add_bias (torch.Tensor): Additional bias term of shape (1,)

    Returns:
        torch.Tensor: Output tensor of shape (batch_size, out_features)
    """
    x = F.linear(x, weight, bias)
    x = F.batch_norm(
        x,
        bn_running_mean,
        bn_running_var,
        bn_weight,
        bn_bias,
        training=True,
        momentum=bn_momentum,
        eps=bn_eps,
    )
    x = x + add_bias
    x = x / divide_value
    x = x * torch.sigmoid(x)
    return x


class Model(nn.Module):
    """
    Model that performs a matrix multiplication, batch normalization, bias addition, division and Swish activation.
    """

    def __init__(
        self, in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value
    ):
        super(Model, self).__init__()
        gemm = nn.Linear(in_features, out_features)
        bn = nn.BatchNorm1d(out_features, eps=bn_eps, momentum=bn_momentum)
        self.weight = gemm.weight
        self.bias = gemm.bias
        self.bn_weight = bn.weight
        self.bn_bias = bn.bias
        self.bn_running_mean = nn.Parameter(bn.running_mean, requires_grad=False)
        self.bn_running_var = nn.Parameter(bn.running_var, requires_grad=False)
        self.add_bias = nn.Parameter(torch.randn(bias_shape) * 0.02)

    def forward(self, x, bn_eps, bn_momentum, divide_value, fn=module_fn):
        return fn(
            x,
            bn_eps,
            bn_momentum,
            divide_value,
            self.weight,
            self.bias,
            self.bn_weight,
            self.bn_bias,
            self.bn_running_mean,
            self.bn_running_var,
            self.add_bias,
        )


batch_size = 128
in_features = 1024
out_features = 512
bn_eps = 1e-5
bn_momentum = 0.1
bias_shape = (1,)
divide_value = 1.0


def get_inputs():
    return [torch.randn(batch_size, in_features), bn_eps, bn_momentum, divide_value]


def get_init_inputs():
    return [in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a matrix multiplication, batch normalization, bias addition, division, and Swish activation.
    """
    def __init__(self, in_features, out_features, bn_eps=1e-5, bn_momentum=0.1, bias_shape=(1,), divide_value=1.0):
        super(Model, self).__init__()
        self.matmul = nn.Linear(in_features, out_features)
        self.bn = nn.BatchNorm1d(out_features, eps=bn_eps, momentum=bn_momentum)
        self.bias = nn.Parameter(torch.randn(bias_shape) * 0.02)
        self.divide_value = divide_value

    def forward(self, x):
        x = self.matmul(x)
        x = self.bn(x)
        x = x + self.bias
        x = x / self.divide_value
        x = x * torch.sigmoid(x)
        return x

batch_size = 128
in_features = 1024
out_features = 512
bn_eps = 1e-5
bn_momentum = 0.1
bias_shape = (1,)
divide_value = 1.0

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value]

Kernel Information

Related Kernels (Level 2, Task 97 • 97_Matmul_BatchNorm_BiasAdd_Divide_Swish)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 block_tuned_fused_bn_swish_base 0.03 2.28 1.94
🥇 optimized_thread_block_mapping_base 0.03 2.28 1.94
🥇 block_experiment_fused_bn_swish_base 0.03 2.28 1.94
🥇 blocksize_optimized_fused_bn_swish_base 0.03 2.28 1.94
🥇 fused_bn_swish_opt_base 0.03 2.28 1.94
🥇 fused_bn_swish_combined_base 0.03 2.28 1.94
🥇 fused_bn_swish_ldg_base_base 0.03 2.28 1.94
8 fused_bn_swish_atomic_opt_base_base 0.03 2.19 1.87
8 optimized_fused_bn_swish_base 0.03 2.19 1.87
8 sync_optimized_fused_bn_swish_base 0.03 2.19 1.87
8 stride_loop_optimized_fused_bn_swish_base_base 0.03 2.19 1.87
8 fused_bn_swish_warp_base 0.03 2.19 1.87
8 fused_bn_swish_atomic_opt_base 0.03 2.19 1.87
8 warp_divergence_optimized_fused_bn_swish_base 0.03 2.19 1.87
8 fused_bn_swish_base 0.03 2.19 1.87
8 shared_param_fused_bn_swish_base 0.03 2.19 1.87
8 tuned_block_size_bn_swish_base 0.03 2.19 1.87
18 adaptive_block_fused_bn_swish_base_base 0.03 2.11 1.80
19 atomic_optimized_matmul_bn_base 0.04 1.44 1.23
20 stream_optimized_fused_bn_swish_base 0.04 1.35 1.14
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>

// Kernel for computing batch statistics with shared memory
__global__ void compute_batch_stats_kernel(
    const float* __restrict__ x_linear,
    float* __restrict__ batch_mean,
    float* __restrict__ batch_var,
    const int batch_size,
    const int out_features) {
    
    extern __shared__ float shared_data[];
    float* shared_mean = shared_data;
    float* shared_var = &shared_data[blockDim.x];
    
    const int tid = threadIdx.x;
    const int feat_idx = blockIdx.x;
    
    if (feat_idx >= out_features) return;
    
    // Initialize accumulators
    float local_sum = 0.0f;
    float local_sq_sum = 0.0f;
    
    // Compute partial sums
    for (int i = tid; i < batch_size; i += blockDim.x) {
        float val = x_linear[i * out_features + feat_idx];
        local_sum += val;
        local_sq_sum += val * val;
    }
    
    // Store in shared memory
    shared_mean[tid] = local_sum;
    shared_var[tid] = local_sq_sum;
    __syncthreads();
    
    // Parallel reduction in shared memory
    for (int stride = blockDim.x/2; stride > 0; stride >>= 1) {
        if (tid < stride) {
            shared_mean[tid] += shared_mean[tid + stride];
            shared_var[tid] += shared_var[tid + stride];
        }
        __syncthreads();
    }
    
    // Write final results
    if (tid == 0) {
        float mean = shared_mean[0] / batch_size;
        float variance = (shared_var[0] / batch_size) - (mean * mean);
        batch_mean[feat_idx] = mean;
        batch_var[feat_idx] = variance;
    }
}

// Main computation kernel
template <typename scalar_t>
__global__ void module_kernel(
    const scalar_t* __restrict__ x_linear,
    scalar_t* __restrict__ output,
    const scalar_t* __restrict__ bn_weight,
    const scalar_t* __restrict__ bn_bias,
    const scalar_t* __restrict__ batch_mean,
    const scalar_t* __restrict__ batch_var,
    const scalar_t* __restrict__ add_bias,
    const float bn_eps,
    const float divide_value,
    const int total_elements,
    const int out_features) {
    
    const int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < total_elements) {
        int f = idx % out_features;
        
        scalar_t val = x_linear[idx];
        scalar_t mean = batch_mean[f];
        scalar_t var = batch_var[f];
        
        scalar_t x_hat = (val - mean) * rsqrtf(var + bn_eps);
        scalar_t x_bn = fmaf(x_hat, bn_weight[f], bn_bias[f]);
        scalar_t x_add = x_bn + add_bias[0];
        scalar_t x_div = x_add / divide_value;
        scalar_t x_swish = x_div / (1.0f + expf(-x_div));
        
        output[idx] = x_swish;
    }
}

torch::Tensor module_fn_cuda(
    torch::Tensor x,
    float bn_eps,
    float bn_momentum,
    float divide_value,
    torch::Tensor weight,
    torch::Tensor bias,
    torch::Tensor bn_weight,
    torch::Tensor bn_bias,
    torch::Tensor bn_running_mean,
    torch::Tensor bn_running_var,
    torch::Tensor add_bias) {
    
    const auto batch_size = x.size(0);
    const auto out_features = weight.size(0);
    
    auto x_linear = torch::addmm(bias, x, weight.t());
    
    auto batch_mean = torch::empty({out_features}, x.options());
    auto batch_var = torch::empty({out_features}, x.options());
    
    // Compute batch statistics
    const int threads_stats = 256;
    const int shared_mem_size = 2 * threads_stats * sizeof(float);
    compute_batch_stats_kernel<<<out_features, threads_stats, shared_mem_size>>>(
        x_linear.data_ptr<float>(),
        batch_mean.data_ptr<float>(),
        batch_var.data_ptr<float>(),
        batch_size,
        out_features);
    
    // Update running statistics
    bn_running_mean.mul_(1 - bn_momentum).add_(batch_mean * bn_momentum);
    bn_running_var.mul_(1 - bn_momentum).add_(batch_var * bn_momentum);
    
    auto output = torch::empty_like(x_linear);
    const int total_elements = batch_size * out_features;
    const int threads = 256;
    const int blocks = (total_elements + threads - 1) / threads;
    
    AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda", ([&] {
        module_kernel<scalar_t><<<blocks, threads>>>(
            x_linear.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            bn_weight.data_ptr<scalar_t>(),
            bn_bias.data_ptr<scalar_t>(),
            batch_mean.data_ptr<scalar_t>(),
            batch_var.data_ptr<scalar_t>(),
            add_bias.data_ptr<scalar_t>(),
            bn_eps,
            divide_value,
            total_elements,
            out_features);
    }));
    
    return output;
}

// PyBind11 module definition
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &module_fn_cuda, "Stream optimized forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.458 inst/cycle 0.001 5
Executed Ipc Elapsed 0.184 inst/cycle 0.000 5
Issue Slots Busy 12.318 % 0.382 5
Issued Ipc Active 0.494 inst/cycle 0.001 5
SM Busy 12.318 % 0.382 5
Memory Throughput 74783737783.552 byte/second 2108446873915820800.000 5
Mem Busy 10.356 % 0.021 5
Max Bandwidth 6.774 % 0.016 5
L1/TEX Hit Rate 26.740 % 0.000 5
L2 Hit Rate 83.824 % 0.057 5
Mem Pipes Busy 8.066 % 0.022 5
Warp Cycles Per Issued Instruction 28.078 cycle 0.400 5
Warp Cycles Per Executed Instruction 30.140 cycle 0.461 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.650 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 10.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 22.858 % 0.022 5
Achieved Active Warps Per SM 14.630 warp 0.009 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (22.6%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::empty_strided
CPU Time 260848.80 μs
Device Time 0.00 μs
Self CPU Time 44001.29 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 144476.58 μs
Device Time 1410238.20 μs
Self CPU Time 29718.75 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 114771.82 μs
Device Time 1410238.20 μs
Self CPU Time 41099.92 μs
Self Device Time 1410238.20 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::mul
CPU Time 585820.45 μs
Device Time 190316.98 μs
Self CPU Time 300731.85 μs
Self Device Time 190316.98 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::addmm
CPU Time 644189.87 μs
Device Time 305646.66 μs
Self CPU Time 440412.64 μs
Self Device Time 305646.66 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
sm80_xmma_gemm_f32f32_f32f32_f32_tn_n_tilesize32x32x8_stage3_warpsize1x2x1_ffma_aligna4_alignc4_execute_kernel__51_cublas
CPU Time 0.00 μs
Device Time 275326.53 μs
Self CPU Time 0.00 μs
Self Device Time 275326.53 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 639326.97 μs
Device Time 40637.93 μs
Self CPU Time 639326.97 μs
Self Device Time 40637.93 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::mul_
CPU Time 328276.73 μs
Device Time 97015.06 μs
Self CPU Time 66865.21 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 1410238.20 μs
Self CPU Time 0.00 μs
Self Device Time 1410238.20 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45311 warnings generated when compiling for host.
Suppressed 45331 warnings (45284 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:9:5 bugprone-easily-swappable-parameters
9 | float* __restrict__ batch_mean,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 | float* __restrict__ batch_var,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:9:25: note: the first parameter in the range is 'batch_mean'
9 | float* __restrict__ batch_mean,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:10:25: note: the last parameter in the range is 'batch_var'
10 | float* __restrict__ batch_var,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:11:5: warning: 2 adjacent parameters of 'compute_batch_stats_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
11 | const int batch_size,
| ^~~~~~~~~~~~~~~~~~~~~
12 | const int out_features) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:11:15: note: the first parameter in the range is 'batch_size'
11 | const int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:12:15: note: the last parameter in the range is 'out_features'
12 | const int out_features) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:18:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
18 | const int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:19:26: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
19 | const int feat_idx = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:28:44: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
28 | for (int i = tid; i < batch_size; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:40:23: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
40 | for (int stride = blockDim.x/2; stride > 0; stride >>= 1) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:50:39: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
50 | float mean = shared_mean[0] / batch_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:51:43: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
51 | float variance = (shared_var[0] / batch_size) - (mean * mean);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:63:5: warning: 4 adjacent parameters of 'module_kernel' of similar type ('const scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
63 | const scalar_t* __restrict__ bn_bias,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
64 | const scalar_t* __restrict__ batch_mean,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
65 | const scalar_t* __restrict__ batch_var,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
66 | const scalar_t* __restrict__ add_bias,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:63:34: note: the first parameter in the range is 'bn_bias'
63 | const scalar_t* __restrict__ bn_bias,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:66:34: note: the last parameter in the range is 'add_bias'
66 | const scalar_t* __restrict__ add_bias,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:67:5: warning: 4 adjacent parameters of 'module_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
67 | const float bn_eps,
| ^~~~~~~~~~~~~~~~~~~
68 | const float divide_value,
| ~~~~~~~~~~~~~~~~~~~~~~~~~
69 | const int total_elements,
| ~~~~~~~~~~~~~~~~~~~~~~~~~
70 | const int out_features) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:67:17: note: the first parameter in the range is 'bn_eps'
67 | const float bn_eps,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:70:15: note: the last parameter in the range is 'out_features'
70 | const int out_features) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:69:5: note: 'const float' and 'const int' may be implicitly converted: 'const float' (as 'float') -> 'const int' (as 'int'), 'const int' (as 'int') -> 'const float' (as 'float')
69 | const int total_elements,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:72:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
72 | const int idx = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:91:19: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
91 | torch::Tensor x,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:92:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('float') are easily swapped by mistake [bugprone-easily-swappable-parameters]
92 | float bn_eps,
| ^~~~~~~~~~~~~
93 | float bn_momentum,
| ~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:92:11: note: the first parameter in the range is 'bn_eps'
92 | float bn_eps,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:93:11: note: the last parameter in the range is 'bn_momentum'
93 | float bn_momentum,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:95:19: warning: the parameter 'weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
95 | torch::Tensor weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:96:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
96 | torch::Tensor bias,
| ^~~~~~~~~~~~~~~~~~~
97 | torch::Tensor bn_weight,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:96:19: note: the first parameter in the range is 'bias'
96 | torch::Tensor bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:97:19: note: the last parameter in the range is 'bn_weight'
97 | torch::Tensor bn_weight,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:96:19: warning: the parameter 'bias' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
96 | torch::Tensor bias,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:98:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
98 | torch::Tensor bn_bias,
| ^~~~~~~~~~~~~~~~~~~~~~
99 | torch::Tensor bn_running_mean,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:98:19: note: the first parameter in the range is 'bn_bias'
98 | torch::Tensor bn_bias,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:99:19: note: the last parameter in the range is 'bn_running_mean'
99 | torch::Tensor bn_running_mean,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:99:19: warning: the parameter 'bn_running_mean' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
99 | torch::Tensor bn_running_mean,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:100:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
100 | torch::Tensor bn_running_var,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
101 | torch::Tensor add_bias) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:100:19: note: the first parameter in the range is 'bn_running_var'
100 | torch::Tensor bn_running_var,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:101:19: note: the last parameter in the range is 'add_bias'
101 | torch::Tensor add_bias) {
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:100:19: warning: the parameter 'bn_running_var' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
100 | torch::Tensor bn_running_var,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:113:33: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
113 | const int shared_mem_size = 2 * threads_stats * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:113:33: note: make conversion explicit to silence this warning
4 | const int shared_mem_size = 2 * threads_stats * sizeof(float);
| ^~~~~~~~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:113:33: note: perform multiplication in a wider type
113 | const int shared_mem_size = 2 * threads_stats * sizeof(float);
| ^
| static_cast<long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:118:9: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
118 | batch_size,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:119:9: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
119 | out_features);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:126:32: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
126 | const int total_elements = batch_size * out_features;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:130:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
130 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^