← Back to Leaderboard

The AI CUDA Engineer 👷

97_Matmul_BatchNorm_BiasAdd_Divide_Swishblock_experiment_fused_bn_swish_base

Level 2 • Task 97
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    bn_eps: float,
    bn_momentum: float,
    divide_value: float,
    weight: torch.Tensor,
    bias: torch.Tensor,
    bn_weight: torch.Tensor,
    bn_bias: torch.Tensor,
    bn_running_mean: torch.Tensor,
    bn_running_var: torch.Tensor,
    add_bias: torch.Tensor,
) -> torch.Tensor:
    """
    Applies matrix multiplication, batch normalization, bias addition, division and Swish activation.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        bn_eps (float): Small constant for numerical stability in batch norm
        bn_momentum (float): Momentum for batch norm running stats
        divide_value (float): Value to divide by
        weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
        bias (torch.Tensor): Bias vector of shape (out_features)
        bn_weight (torch.Tensor): Batch norm weight of shape (out_features)
        bn_bias (torch.Tensor): Batch norm bias of shape (out_features)
        bn_running_mean (torch.Tensor): Batch norm running mean of shape (out_features)
        bn_running_var (torch.Tensor): Batch norm running variance of shape (out_features)
        add_bias (torch.Tensor): Additional bias term of shape (1,)

    Returns:
        torch.Tensor: Output tensor of shape (batch_size, out_features)
    """
    x = F.linear(x, weight, bias)
    x = F.batch_norm(
        x,
        bn_running_mean,
        bn_running_var,
        bn_weight,
        bn_bias,
        training=True,
        momentum=bn_momentum,
        eps=bn_eps,
    )
    x = x + add_bias
    x = x / divide_value
    x = x * torch.sigmoid(x)
    return x


class Model(nn.Module):
    """
    Model that performs a matrix multiplication, batch normalization, bias addition, division and Swish activation.
    """

    def __init__(
        self, in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value
    ):
        super(Model, self).__init__()
        gemm = nn.Linear(in_features, out_features)
        bn = nn.BatchNorm1d(out_features, eps=bn_eps, momentum=bn_momentum)
        self.weight = gemm.weight
        self.bias = gemm.bias
        self.bn_weight = bn.weight
        self.bn_bias = bn.bias
        self.bn_running_mean = nn.Parameter(bn.running_mean, requires_grad=False)
        self.bn_running_var = nn.Parameter(bn.running_var, requires_grad=False)
        self.add_bias = nn.Parameter(torch.randn(bias_shape) * 0.02)

    def forward(self, x, bn_eps, bn_momentum, divide_value, fn=module_fn):
        return fn(
            x,
            bn_eps,
            bn_momentum,
            divide_value,
            self.weight,
            self.bias,
            self.bn_weight,
            self.bn_bias,
            self.bn_running_mean,
            self.bn_running_var,
            self.add_bias,
        )


batch_size = 128
in_features = 1024
out_features = 512
bn_eps = 1e-5
bn_momentum = 0.1
bias_shape = (1,)
divide_value = 1.0


def get_inputs():
    return [torch.randn(batch_size, in_features), bn_eps, bn_momentum, divide_value]


def get_init_inputs():
    return [in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a matrix multiplication, batch normalization, bias addition, division, and Swish activation.
    """
    def __init__(self, in_features, out_features, bn_eps=1e-5, bn_momentum=0.1, bias_shape=(1,), divide_value=1.0):
        super(Model, self).__init__()
        self.matmul = nn.Linear(in_features, out_features)
        self.bn = nn.BatchNorm1d(out_features, eps=bn_eps, momentum=bn_momentum)
        self.bias = nn.Parameter(torch.randn(bias_shape) * 0.02)
        self.divide_value = divide_value

    def forward(self, x):
        x = self.matmul(x)
        x = self.bn(x)
        x = x + self.bias
        x = x / self.divide_value
        x = x * torch.sigmoid(x)
        return x

batch_size = 128
in_features = 1024
out_features = 512
bn_eps = 1e-5
bn_momentum = 0.1
bias_shape = (1,)
divide_value = 1.0

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, bn_eps, bn_momentum, bias_shape, divide_value]

Kernel Information

Related Kernels (Level 2, Task 97 • 97_Matmul_BatchNorm_BiasAdd_Divide_Swish)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 block_tuned_fused_bn_swish_base 0.03 2.28 1.94
🥇 optimized_thread_block_mapping_base 0.03 2.28 1.94
🥇 block_experiment_fused_bn_swish_base 0.03 2.28 1.94
🥇 blocksize_optimized_fused_bn_swish_base 0.03 2.28 1.94
🥇 fused_bn_swish_opt_base 0.03 2.28 1.94
🥇 fused_bn_swish_combined_base 0.03 2.28 1.94
🥇 fused_bn_swish_ldg_base_base 0.03 2.28 1.94
8 fused_bn_swish_atomic_opt_base_base 0.03 2.19 1.87
8 optimized_fused_bn_swish_base 0.03 2.19 1.87
8 sync_optimized_fused_bn_swish_base 0.03 2.19 1.87
8 stride_loop_optimized_fused_bn_swish_base_base 0.03 2.19 1.87
8 fused_bn_swish_warp_base 0.03 2.19 1.87
8 fused_bn_swish_atomic_opt_base 0.03 2.19 1.87
8 warp_divergence_optimized_fused_bn_swish_base 0.03 2.19 1.87
8 fused_bn_swish_base 0.03 2.19 1.87
8 shared_param_fused_bn_swish_base 0.03 2.19 1.87
8 tuned_block_size_bn_swish_base 0.03 2.19 1.87
18 adaptive_block_fused_bn_swish_base_base 0.03 2.11 1.80
19 atomic_optimized_matmul_bn_base 0.04 1.44 1.23
20 stream_optimized_fused_bn_swish_base 0.04 1.35 1.14
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <cmath>

// Templated kernel that uses a compile-time block size for reduction and elementwise operations
// Each block processes one output feature (i.e., one column of the x_linear matrix with shape [batch_size, out_features]).
// The kernel computes the mean and variance across the batch for that feature, updates the running statistics, and then applies batch normalization, bias addition, division, and swish activation.

template <typename scalar_t, int BLOCK_SIZE>
__global__ void block_experiment_kernel(
    const scalar_t* __restrict__ x_linear,   // [batch_size, out_features]
    scalar_t* __restrict__ output,             // [batch_size, out_features]
    const scalar_t* __restrict__ bn_weight,      // [out_features]
    const scalar_t* __restrict__ bn_bias,        // [out_features]
    scalar_t* __restrict__ bn_running_mean,      // [out_features]
    scalar_t* __restrict__ bn_running_var,       // [out_features]
    const scalar_t* __restrict__ add_bias,       // single element tensor
    const float bn_eps,
    const float bn_momentum,
    const float divide_value,
    const int batch_size,
    const int out_features) {

    // Each block is responsible for a single feature (column) f.
    int f = blockIdx.x;
    if (f >= out_features) return;
    const int tid = threadIdx.x;

    // Allocate shared memory for reduction: two arrays of BLOCK_SIZE floats: one for sum and one for sumsq
    extern __shared__ float shared_data[]; // size = 2 * BLOCK_SIZE * sizeof(float)
    float* s_sum = shared_data;            // indices [0, BLOCK_SIZE-1]
    float* s_sumsq = shared_data + BLOCK_SIZE; // indices [BLOCK_SIZE, 2*BLOCK_SIZE-1]

    // Each thread computes a partial sum and partial sum of squares over the batch dimension
    float local_sum = 0.0f;
    float local_sumsq = 0.0f;
    for (int i = tid; i < batch_size; i += BLOCK_SIZE) {
        int idx = i * out_features + f;
        float val = static_cast<float>(x_linear[idx]);
        local_sum += val;
        local_sumsq += val * val;
    }
    s_sum[tid] = local_sum;
    s_sumsq[tid] = local_sumsq;
    __syncthreads();

    // Reduction in shared memory to sum up partial sums and sumsq
    for (int stride = BLOCK_SIZE / 2; stride > 0; stride /= 2) {
        if (tid < stride) {
            s_sum[tid] += s_sum[tid + stride];
            s_sumsq[tid] += s_sumsq[tid + stride];
        }
        __syncthreads();
    }

    // After reduction, thread 0 has the total sum and sumsq
    if (tid == 0) {
        float mean = s_sum[0] / batch_size;
        float var = s_sumsq[0] / batch_size - mean * mean;
        // Update running statistics
        bn_running_mean[f] = bn_running_mean[f] * (1.0f - bn_momentum) + mean * bn_momentum;
        bn_running_var[f]  = bn_running_var[f]  * (1.0f - bn_momentum) + var * bn_momentum;
        // Store computed mean and variance into shared memory for use by all threads
        s_sum[0] = mean;
        s_sumsq[0] = var;
    }
    __syncthreads();

    float mean = s_sum[0];
    float var  = s_sumsq[0];
    float inv_std = rsqrtf(var + bn_eps);
    float gamma = static_cast<float>(bn_weight[f]);
    float beta  = static_cast<float>(bn_bias[f]);
    float extra_bias = static_cast<float>(add_bias[0]);

    // Second pass: Apply BN, add extra bias, divide and apply Swish activation for each element in the feature column
    for (int i = tid; i < batch_size; i += BLOCK_SIZE) {
        int idx = i * out_features + f;
        float val = static_cast<float>(x_linear[idx]);
        float x_hat = (val - mean) * inv_std;
        float x_bn = x_hat * gamma + beta;
        float x_add = x_bn + extra_bias;
        float x_div = x_add / divide_value;
        // Swish: x * sigmoid(x) -> here computed as x_div * (1 / (1 + exp(-x_div)))
        float x_swish = x_div / (1.0f + expf(-x_div));
        output[idx] = static_cast<scalar_t>(x_swish);
    }
}

// Host function that selects a block size based on the batch_size and launches the templated kernel accordingly

torch::Tensor module_fn_cuda(
    torch::Tensor x,
    float bn_eps,
    float bn_momentum,
    float divide_value,
    torch::Tensor weight,
    torch::Tensor bias,
    torch::Tensor bn_weight,
    torch::Tensor bn_bias,
    torch::Tensor bn_running_mean,
    torch::Tensor bn_running_var,
    torch::Tensor add_bias) {

    const auto batch_size = x.size(0);
    const auto in_features = x.size(1);
    const auto out_features = weight.size(0);

    // Ensure that the inputs are contiguous
    x = x.contiguous();
    weight = weight.contiguous();
    bias = bias.contiguous();

    // Perform the linear transformation: x_linear = x @ weight.T + bias
    auto x_linear = torch::addmm(bias, x, weight.t());
    auto output = torch::empty_like(x_linear);

    // Each block handles one output feature
    const int blocks = out_features;

    // Experiment with block sizes based on the batch_size (tuning parameter)
    int block_size = 32;
    if (batch_size >= 512) {
        block_size = 512;
    } else if (batch_size >= 256) {
        block_size = 256;
    } else if (batch_size >= 128) {
        block_size = 128;
    } else if (batch_size >= 64) {
        block_size = 64;
    } else {
        block_size = 32;
    }

    // Launch the kernel with the selected block size by switching through supported sizes
    switch (block_size) {
        case 32:
            AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
                block_experiment_kernel<scalar_t, 32><<<blocks, 32, 2 * 32 * sizeof(float)>>>(
                    x_linear.data_ptr<scalar_t>(),
                    output.data_ptr<scalar_t>(),
                    bn_weight.data_ptr<scalar_t>(),
                    bn_bias.data_ptr<scalar_t>(),
                    bn_running_mean.data_ptr<scalar_t>(),
                    bn_running_var.data_ptr<scalar_t>(),
                    add_bias.data_ptr<scalar_t>(),
                    bn_eps,
                    bn_momentum,
                    divide_value,
                    batch_size,
                    out_features);
            }));
            break;
        case 64:
            AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
                block_experiment_kernel<scalar_t, 64><<<blocks, 64, 2 * 64 * sizeof(float)>>>(
                    x_linear.data_ptr<scalar_t>(),
                    output.data_ptr<scalar_t>(),
                    bn_weight.data_ptr<scalar_t>(),
                    bn_bias.data_ptr<scalar_t>(),
                    bn_running_mean.data_ptr<scalar_t>(),
                    bn_running_var.data_ptr<scalar_t>(),
                    add_bias.data_ptr<scalar_t>(),
                    bn_eps,
                    bn_momentum,
                    divide_value,
                    batch_size,
                    out_features);
            }));
            break;
        case 128:
            AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
                block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
                    x_linear.data_ptr<scalar_t>(),
                    output.data_ptr<scalar_t>(),
                    bn_weight.data_ptr<scalar_t>(),
                    bn_bias.data_ptr<scalar_t>(),
                    bn_running_mean.data_ptr<scalar_t>(),
                    bn_running_var.data_ptr<scalar_t>(),
                    add_bias.data_ptr<scalar_t>(),
                    bn_eps,
                    bn_momentum,
                    divide_value,
                    batch_size,
                    out_features);
            }));
            break;
        case 256:
            AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
                block_experiment_kernel<scalar_t, 256><<<blocks, 256, 2 * 256 * sizeof(float)>>>(
                    x_linear.data_ptr<scalar_t>(),
                    output.data_ptr<scalar_t>(),
                    bn_weight.data_ptr<scalar_t>(),
                    bn_bias.data_ptr<scalar_t>(),
                    bn_running_mean.data_ptr<scalar_t>(),
                    bn_running_var.data_ptr<scalar_t>(),
                    add_bias.data_ptr<scalar_t>(),
                    bn_eps,
                    bn_momentum,
                    divide_value,
                    batch_size,
                    out_features);
            }));
            break;
        case 512:
            AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
                block_experiment_kernel<scalar_t, 512><<<blocks, 512, 2 * 512 * sizeof(float)>>>(
                    x_linear.data_ptr<scalar_t>(),
                    output.data_ptr<scalar_t>(),
                    bn_weight.data_ptr<scalar_t>(),
                    bn_bias.data_ptr<scalar_t>(),
                    bn_running_mean.data_ptr<scalar_t>(),
                    bn_running_var.data_ptr<scalar_t>(),
                    add_bias.data_ptr<scalar_t>(),
                    bn_eps,
                    bn_momentum,
                    divide_value,
                    batch_size,
                    out_features);
            }));
            break;
        default:
            // Default to 128 if none matched
            AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
                block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
                    x_linear.data_ptr<scalar_t>(),
                    output.data_ptr<scalar_t>(),
                    bn_weight.data_ptr<scalar_t>(),
                    bn_bias.data_ptr<scalar_t>(),
                    bn_running_mean.data_ptr<scalar_t>(),
                    bn_running_var.data_ptr<scalar_t>(),
                    add_bias.data_ptr<scalar_t>(),
                    bn_eps,
                    bn_momentum,
                    divide_value,
                    batch_size,
                    out_features);
            }));
            break;
    }

    return output;
}

// PyBind11 module definition
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &module_fn_cuda, "Fused BN, bias add, division and Swish with block size tuning (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.452 inst/cycle 0.000 5
Executed Ipc Elapsed 0.266 inst/cycle 0.000 5
Issue Slots Busy 11.562 % 0.049 5
Issued Ipc Active 0.462 inst/cycle 0.000 5
SM Busy 11.562 % 0.049 5
Memory Throughput 43258475226.588 byte/second 194714799944136800.000 5
Mem Busy 23.604 % 0.073 5
Max Bandwidth 22.068 % 0.046 5
L1/TEX Hit Rate 34.940 % 0.000 5
L2 Hit Rate 96.290 % 0.159 5
Mem Pipes Busy 8.366 % 0.008 5
Warp Cycles Per Issued Instruction 29.650 cycle 0.347 5
Warp Cycles Per Executed Instruction 30.432 cycle 0.369 5
Avg. Active Threads Per Warp 30.940 0.000 5
Avg. Not Predicated Off Threads Per Warp 20.880 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 16.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 16.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 21.508 % 0.235 5
Achieved Active Warps Per SM 13.766 warp 0.096 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN ThreadDivergence Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 30.9 threads being active per cycle. This is further reduced to 20.9 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp().
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (20.6%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 417333.28 μs
Device Time 188.22 μs
Self CPU Time 57.54 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 417275.74 μs
Device Time 188.22 μs
Self CPU Time 113.64 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 425874.59 μs
Device Time 0.00 μs
Self CPU Time 9355.68 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 415558.25 μs
Device Time 0.00 μs
Self CPU Time 415558.25 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 41112.51 μs
Device Time 244743.39 μs
Self CPU Time 5206.45 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 35924.10 μs
Device Time 244743.39 μs
Self CPU Time 7273.16 μs
Self Device Time 244743.39 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::addmm
CPU Time 232803.08 μs
Device Time 55230.52 μs
Self CPU Time 80366.37 μs
Self Device Time 55230.52 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
sm80_xmma_gemm_f32f32_f32f32_f32_tn_n_tilesize32x32x8_stage3_warpsize1x2x1_ffma_aligna4_alignc4_execute_kernel__51_cublas
CPU Time 0.00 μs
Device Time 49996.23 μs
Self CPU Time 0.00 μs
Self Device Time 49996.23 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 244743.39 μs
Self CPU Time 0.00 μs
Self Device Time 244743.39 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45324 warnings generated when compiling for host.
Suppressed 45331 warnings (45284 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:15:5 bugprone-easily-swappable-parameters
15 | const scalar_t* __restrict__ bn_weight, // [out_features]
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
16 | const scalar_t* __restrict__ bn_bias, // [out_features]
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:15:34: note: the first parameter in the range is 'bn_weight'
15 | const scalar_t* __restrict__ bn_weight, // [out_features]
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:16:34: note: the last parameter in the range is 'bn_bias'
16 | const scalar_t* __restrict__ bn_bias, // [out_features]
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:17:5: warning: 2 adjacent parameters of 'block_experiment_kernel' of similar type ('scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
17 | scalar_t* __restrict__ bn_running_mean, // [out_features]
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
18 | scalar_t* __restrict__ bn_running_var, // [out_features]
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:17:28: note: the first parameter in the range is 'bn_running_mean'
17 | scalar_t* __restrict__ bn_running_mean, // [out_features]
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:18:28: note: the last parameter in the range is 'bn_running_var'
18 | scalar_t* __restrict__ bn_running_var, // [out_features]
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:20:5: warning: 5 adjacent parameters of 'block_experiment_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
20 | const float bn_eps,
| ^~~~~~~~~~~~~~~~~~~
21 | const float bn_momentum,
| ~~~~~~~~~~~~~~~~~~~~~~~~
22 | const float divide_value,
| ~~~~~~~~~~~~~~~~~~~~~~~~~
23 | const int batch_size,
| ~~~~~~~~~~~~~~~~~~~~~
24 | const int out_features) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:20:17: note: the first parameter in the range is 'bn_eps'
20 | const float bn_eps,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:24:15: note: the last parameter in the range is 'out_features'
24 | const int out_features) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:23:5: note: 'const float' and 'const int' may be implicitly converted: 'const float' (as 'float') -> 'const int' (as 'int'), 'const int' (as 'int') -> 'const float' (as 'float')
23 | const int batch_size,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:27:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
27 | int f = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:29:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
29 | const int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:60:33: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
60 | float mean = s_sum[0] / batch_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:61:34: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
61 | float var = s_sumsq[0] / batch_size - mean * mean;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:100:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
100 | torch::Tensor bias,
| ^~~~~~~~~~~~~~~~~~~
101 | torch::Tensor bn_weight,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:100:19: note: the first parameter in the range is 'bias'
100 | torch::Tensor bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:101:19: note: the last parameter in the range is 'bn_weight'
101 | torch::Tensor bn_weight,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:108:16: warning: Value stored to 'in_features' during its initialization is never read [clang-analyzer-deadcode.DeadStores]
108 | const auto in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:108:16: note: Value stored to 'in_features' during its initialization is never read
108 | const auto in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:121:24: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
121 | const int blocks = out_features;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:140:13: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
140 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:141:69: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
141 | block_experiment_kernel<scalar_t, 32><<<blocks, 32, 2 * 32 * sizeof(float)>>>(
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:141:69: note: make conversion explicit to silence this warning
141 | block_experiment_kernel<scalar_t, 32><<<blocks, 32, 2 * 32 * sizeof(float)>>>(
| ^
| static_cast<unsigned long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:141:69: note: perform multiplication in a wider type
141 | block_experiment_kernel<scalar_t, 32><<<blocks, 32, 2 * 32 * sizeof(float)>>>(
| ^
| static_cast<long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:157:13: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
157 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:158:69: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
158 | block_experiment_kernel<scalar_t, 64><<<blocks, 64, 2 * 64 * sizeof(float)>>>(
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:158:69: note: make conversion explicit to silence this warning
158 | block_experiment_kernel<scalar_t, 64><<<blocks, 64, 2 * 64 * sizeof(float)>>>(
| ^
| static_cast<unsigned long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:158:69: note: perform multiplication in a wider type
158 | block_experiment_kernel<scalar_t, 64><<<blocks, 64, 2 * 64 * sizeof(float)>>>(
| ^
| static_cast<long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:174:13: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
174 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:175:71: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
175 | block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:175:71: note: make conversion explicit to silence this warning
175 | block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
| ^
| static_cast<unsigned long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:175:71: note: perform multiplication in a wider type
175 | block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
| ^
| static_cast<long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:191:13: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
191 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:192:71: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
192 | block_experiment_kernel<scalar_t, 256><<<blocks, 256, 2 * 256 * sizeof(float)>>>(
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:192:71: note: make conversion explicit to silence this warning
192 | block_experiment_kernel<scalar_t, 256><<<blocks, 256, 2 * 256 * sizeof(float)>>>(
| ^
| static_cast<unsigned long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:192:71: note: perform multiplication in a wider type
192 | block_experiment_kernel<scalar_t, 256><<<blocks, 256, 2 * 256 * sizeof(float)>>>(
| ^
| static_cast<long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:208:13: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
208 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:209:71: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
209 | block_experiment_kernel<scalar_t, 512><<<blocks, 512, 2 * 512 * sizeof(float)>>>(
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:209:71: note: make conversion explicit to silence this warning
209 | block_experiment_kernel<scalar_t, 512><<<blocks, 512, 2 * 512 * sizeof(float)>>>(
| ^
| static_cast<unsigned long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:209:71: note: perform multiplication in a wider type
209 | block_experiment_kernel<scalar_t, 512><<<blocks, 512, 2 * 512 * sizeof(float)>>>(
| ^
| static_cast<long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:226:13: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
226 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "block_experiment_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:227:71: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
227 | block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:227:71: note: make conversion explicit to silence this warning
227 | block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
| ^
| static_cast<unsigned long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b6_s1_block_experiment_fused_bn_swish/base/base.cu:227:71: note: perform multiplication in a wider type
227 | block_experiment_kernel<scalar_t, 128><<<blocks, 128, 2 * 128 * sizeof(float)>>>(
| ^
| static_cast<long>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~