9 | float* __restrict__ batch_mean,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 | float* __restrict__ batch_var,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:9:25: note: the first parameter in the range is 'batch_mean'
9 | float* __restrict__ batch_mean,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:10:25: note: the last parameter in the range is 'batch_var'
10 | float* __restrict__ batch_var,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:11:5: warning: 2 adjacent parameters of 'compute_batch_stats_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
11 | const int batch_size,
| ^~~~~~~~~~~~~~~~~~~~~
12 | const int out_features) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:11:15: note: the first parameter in the range is 'batch_size'
11 | const int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:12:15: note: the last parameter in the range is 'out_features'
12 | const int out_features) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:18:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
18 | const int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:19:26: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
19 | const int feat_idx = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:28:44: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
28 | for (int i = tid; i < batch_size; i += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:40:23: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
40 | for (int stride = blockDim.x/2; stride > 0; stride >>= 1) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:50:39: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
50 | float mean = shared_mean[0] / batch_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:51:43: warning: narrowing conversion from 'int' to 'float' [bugprone-narrowing-conversions]
51 | float variance = (shared_var[0] / batch_size) - (mean * mean);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:63:5: warning: 4 adjacent parameters of 'module_kernel' of similar type ('const scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
63 | const scalar_t* __restrict__ bn_bias,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
64 | const scalar_t* __restrict__ batch_mean,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
65 | const scalar_t* __restrict__ batch_var,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
66 | const scalar_t* __restrict__ add_bias,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:63:34: note: the first parameter in the range is 'bn_bias'
63 | const scalar_t* __restrict__ bn_bias,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:66:34: note: the last parameter in the range is 'add_bias'
66 | const scalar_t* __restrict__ add_bias,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:67:5: warning: 4 adjacent parameters of 'module_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
67 | const float bn_eps,
| ^~~~~~~~~~~~~~~~~~~
68 | const float divide_value,
| ~~~~~~~~~~~~~~~~~~~~~~~~~
69 | const int total_elements,
| ~~~~~~~~~~~~~~~~~~~~~~~~~
70 | const int out_features) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:67:17: note: the first parameter in the range is 'bn_eps'
67 | const float bn_eps,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:70:15: note: the last parameter in the range is 'out_features'
70 | const int out_features) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:69:5: note: 'const float' and 'const int' may be implicitly converted: 'const float' (as 'float') -> 'const int' (as 'int'), 'const int' (as 'int') -> 'const float' (as 'float')
69 | const int total_elements,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:72:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
72 | const int idx = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:91:19: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
91 | torch::Tensor x,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:92:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('float') are easily swapped by mistake [bugprone-easily-swappable-parameters]
92 | float bn_eps,
| ^~~~~~~~~~~~~
93 | float bn_momentum,
| ~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:92:11: note: the first parameter in the range is 'bn_eps'
92 | float bn_eps,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:93:11: note: the last parameter in the range is 'bn_momentum'
93 | float bn_momentum,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:95:19: warning: the parameter 'weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
95 | torch::Tensor weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:96:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
96 | torch::Tensor bias,
| ^~~~~~~~~~~~~~~~~~~
97 | torch::Tensor bn_weight,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:96:19: note: the first parameter in the range is 'bias'
96 | torch::Tensor bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:97:19: note: the last parameter in the range is 'bn_weight'
97 | torch::Tensor bn_weight,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:96:19: warning: the parameter 'bias' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
96 | torch::Tensor bias,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:98:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
98 | torch::Tensor bn_bias,
| ^~~~~~~~~~~~~~~~~~~~~~
99 | torch::Tensor bn_running_mean,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:98:19: note: the first parameter in the range is 'bn_bias'
98 | torch::Tensor bn_bias,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:99:19: note: the last parameter in the range is 'bn_running_mean'
99 | torch::Tensor bn_running_mean,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:99:19: warning: the parameter 'bn_running_mean' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
99 | torch::Tensor bn_running_mean,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:100:5: warning: 2 adjacent parameters of 'module_fn_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
100 | torch::Tensor bn_running_var,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
101 | torch::Tensor add_bias) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:100:19: note: the first parameter in the range is 'bn_running_var'
100 | torch::Tensor bn_running_var,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:101:19: note: the last parameter in the range is 'add_bias'
101 | torch::Tensor add_bias) {
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:100:19: warning: the parameter 'bn_running_var' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
100 | torch::Tensor bn_running_var,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:113:33: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
113 | const int shared_mem_size = 2 * threads_stats * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:113:33: note: make conversion explicit to silence this warning
4 | const int shared_mem_size = 2 * threads_stats * sizeof(float);
| ^~~~~~~~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:113:33: note: perform multiplication in a wider type
113 | const int shared_mem_size = 2 * threads_stats * sizeof(float);
| ^
| static_cast<long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:118:9: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
118 | batch_size,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:119:9: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
119 | out_features);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:126:32: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
126 | const int total_elements = batch_size * out_features;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_97/b3_s1_stream_optimized_fused_bn_swish/base/base.cu:130:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
130 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^