31 | const scalar_t* __restrict__ input,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
32 | const scalar_t* __restrict__ weight,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:31:34: note: the first parameter in the range is 'input'
31 | const scalar_t* __restrict__ input,
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:32:34: note: the last parameter in the range is 'weight'
32 | const scalar_t* __restrict__ weight,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:36:5: warning: 3 adjacent parameters of 'layernorm_streamed_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
36 | const int normalized_size,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
37 | const int chunk_size,
| ~~~~~~~~~~~~~~~~~~~~~
38 | const int chunk_offset) {
| ~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:36:15: note: the first parameter in the range is 'normalized_size'
36 | const int normalized_size,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:38:15: note: the last parameter in the range is 'chunk_offset'
38 | const int chunk_offset) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:42:22: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
42 | const int tidx = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:43:22: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
43 | const int tidy = threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:44:30: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
44 | const int instance_idx = blockIdx.x + chunk_offset;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:53:31: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
53 | const int thread_stride = blockDim.x * blockDim.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:54:27: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
54 | const int thread_id = tidy * blockDim.x + tidx;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:116:33: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
116 | const int normalized_size = weight.numel();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:117:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
117 | const int outer_size = x.numel() / normalized_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:121:33: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'unsigned int' [bugprone-implicit-widening-of-multiplication-result]
121 | const int shared_mem_size = threads.x * threads.y * 2 * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:121:33: note: make conversion explicit to silence this warning
4 | const int shared_mem_size = threads.x * threads.y * 2 * sizeof(float);
| ^~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:121:33: note: perform multiplication in a wider type
121 | const int shared_mem_size = threads.x * threads.y * 2 * sizeof(float);
| ^~~~~~~~~~~~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:121:33: warning: narrowing conversion from 'unsigned long' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
121 | const int shared_mem_size = threads.x * threads.y * 2 * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250211_optimize_b5_s4_e1_v2/level_1/task_40/b4_s3_optimized_layernorm_streamed/base/base.cu:123:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
123 | AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "layernorm_forward_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^