13 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:14:41: warning: macro argument should be enclosed in parentheses [bugprone-macro-parentheses]
14 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:37:5: warning: 3 adjacent parameters of 'module_fn_kernel_optimized' of similar type ('const scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
37 | const scalar_t* __restrict__ group_norm_weight, // gamma, shape (C)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
38 | const scalar_t* __restrict__ group_norm_bias, // beta, shape (C)
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
39 | const scalar_t* __restrict__ multiply_weight, // weight, shape (C)
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:37:34: note: the first parameter in the range is 'group_norm_weight'
37 | const scalar_t* __restrict__ group_norm_weight, // gamma, shape (C)
| ^~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:39:34: note: the last parameter in the range is 'multiply_weight'
39 | const scalar_t* __restrict__ multiply_weight, // weight, shape (C)
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:40:5: warning: 2 adjacent parameters of 'module_fn_kernel_optimized' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
40 | const int C, // Total channels
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
41 | const int channels_per_group // Channels per group
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:40:15: note: the first parameter in the range is 'C'
40 | const int C, // Total channels
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:41:15: note: the last parameter in the range is 'channels_per_group'
41 | const int channels_per_group // Channels per group
| ^~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:44:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
44 | int n = blockIdx.x; // sample index
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:45:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
45 | int g = blockIdx.y; // group index
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:52:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
52 | for (int c = threadIdx.x; c < channels_per_group; c += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:52:60: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
52 | for (int c = threadIdx.x; c < channels_per_group; c += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:63:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
63 | int lane = threadIdx.x % warpSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:64:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
64 | int warpId = threadIdx.x / warpSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:78:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
78 | int numWarps = (blockDim.x + warpSize - 1) / warpSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:102:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
102 | for (int c = threadIdx.x; c < channels_per_group; c += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:102:60: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
102 | for (int c = threadIdx.x; c < channels_per_group; c += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:127:19: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
127 | torch::Tensor x,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:128:19: warning: the parameter 'gemm_weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
128 | torch::Tensor gemm_weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:129:5: warning: 2 adjacent parameters of 'module_fn_cuda_forward' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
129 | torch::Tensor gemm_bias,
| ^~~~~~~~~~~~~~~~~~~~~~~~
130 | torch::Tensor group_norm_weight,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:129:19: note: the first parameter in the range is 'gemm_bias'
129 | torch::Tensor gemm_bias,
| ^~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:130:19: note: the last parameter in the range is 'group_norm_weight'
130 | torch::Tensor group_norm_weight,
| ^~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:129:19: warning: the parameter 'gemm_bias' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
129 | torch::Tensor gemm_bias,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:141:30: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
141 | int channels_per_group = C / num_groups;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:148:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
148 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:166:19: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
166 | torch::Tensor x,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:167:19: warning: the parameter 'gemm_weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
167 | torch::Tensor gemm_weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:168:19: warning: the parameter 'gemm_bias' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
168 | torch::Tensor gemm_bias,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:169:19: warning: the parameter 'group_norm_weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
169 | torch::Tensor group_norm_weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:170:19: warning: the parameter 'group_norm_bias' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
170 | torch::Tensor group_norm_bias,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250203_optimize_b10_s4_e0_sweep/level_2/task_88/b4_s1_optimized_kernel/base/base.cu:171:19: warning: the parameter 'multiply_weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
171 | torch::Tensor multiply_weight,
| ^
| const &