19 | scalar_t* __restrict__ bn_running_mean, // Running mean, shape [out_features]
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20 | scalar_t* __restrict__ bn_running_var, // Running variance, shape [out_features]
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:19:28: note: the first parameter in the range is 'bn_running_mean'
19 | scalar_t* __restrict__ bn_running_mean, // Running mean, shape [out_features]
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:20:28: note: the last parameter in the range is 'bn_running_var'
20 | scalar_t* __restrict__ bn_running_var, // Running variance, shape [out_features]
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:22:5: warning: 5 adjacent parameters of 'fused_bn_swish_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
22 | const float bn_eps, // BatchNorm epsilon
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 | const float bn_momentum, // BatchNorm momentum
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 | const float divide_value, // Division value
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 | const int batch_size,
| ~~~~~~~~~~~~~~~~~~~~~
26 | const int out_features) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:22:17: note: the first parameter in the range is 'bn_eps'
22 | const float bn_eps, // BatchNorm epsilon
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:26:15: note: the last parameter in the range is 'out_features'
26 | const int out_features) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:25:5: note: 'const float' and 'const int' may be implicitly converted: 'const float' (as 'float') -> 'const int' (as 'int'), 'const int' (as 'int') -> 'const float' (as 'float')
25 | const int batch_size,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:29:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
29 | int f = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:31:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
31 | int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:42:44: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
42 | for (int i = tid; i < batch_size; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:53:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
53 | for (int s = blockDim.x / 2; s > 0; s >>= 1) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:62:29: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
62 | float mean = s_sum[0] / batch_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:63:30: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
63 | float var = s_sumsq[0] / batch_size - mean * mean;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:73:44: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
73 | for (int i = tid; i < batch_size; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:92:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
92 | torch::Tensor bias,
| ^~~~~~~~~~~~~~~~~~~
93 | torch::Tensor bn_weight,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:92:19: note: the first parameter in the range is 'bias'
92 | torch::Tensor bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:93:19: note: the last parameter in the range is 'bn_weight'
93 | torch::Tensor bn_weight,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:100:16: warning: Value stored to 'in_features' during its initialization is never read [clang-analyzer-deadcode.DeadStores]
100 | const auto in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:100:16: note: Value stored to 'in_features' during its initialization is never read
100 | const auto in_features = x.size(1);
| ^~~~~~~~~~~ ~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:116:24: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
116 | const int blocks = out_features; // one block per feature column
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:117:36: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
117 | const size_t shared_mem_size = 2 * threads * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:117:36: note: make conversion explicit to silence this warning
4 | const size_t shared_mem_size = 2 * threads * sizeof(float);
| ^~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:117:36: note: perform multiplication in a wider type
117 | const size_t shared_mem_size = 2 * threads * sizeof(float);
| ^
| static_cast<long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b1_s3_fused_bn_swish/base/base.cu:119:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
119 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^