9 | __device__ void load_A_tile(const scalar_t* __restrict__ A, scalar_t sA[TILE_WIDTH][TILE_WIDTH], int M, int K, int t) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:9:109: note: the first parameter in the range is 'K'
9 | __device__ void load_A_tile(const scalar_t* __restrict__ A, scalar_t sA[TILE_WIDTH][TILE_WIDTH], int M, int K, int t) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:9:116: note: the last parameter in the range is 't'
9 | __device__ void load_A_tile(const scalar_t* __restrict__ A, scalar_t sA[TILE_WIDTH][TILE_WIDTH], int M, int K, int t) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:10:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
10 | int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:11:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
11 | int col = t * TILE_WIDTH + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:20:105: warning: 2 adjacent parameters of 'load_B_tile' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
20 | __device__ void load_B_tile(const scalar_t* __restrict__ B, scalar_t sB[TILE_WIDTH][TILE_WIDTH], int N, int K, int t) {
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:20:109: note: the first parameter in the range is 'K'
20 | __device__ void load_B_tile(const scalar_t* __restrict__ B, scalar_t sB[TILE_WIDTH][TILE_WIDTH], int N, int K, int t) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:20:116: note: the last parameter in the range is 't'
20 | __device__ void load_B_tile(const scalar_t* __restrict__ B, scalar_t sB[TILE_WIDTH][TILE_WIDTH], int N, int K, int t) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:21:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
21 | int row = t * TILE_WIDTH + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:22:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
22 | int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:42:36: warning: 2 adjacent parameters of 'matmul_cuda_kernel' of similar type ('const scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
42 | __global__ void matmul_cuda_kernel(const scalar_t* __restrict__ A, const scalar_t* __restrict__ B,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:42:65: note: the first parameter in the range is 'A'
42 | __global__ void matmul_cuda_kernel(const scalar_t* __restrict__ A, const scalar_t* __restrict__ B,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:42:97: note: the last parameter in the range is 'B'
42 | __global__ void matmul_cuda_kernel(const scalar_t* __restrict__ A, const scalar_t* __restrict__ B,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:47:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
47 | int row = blockIdx.y * TILE_WIDTH + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:48:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
48 | int col = blockIdx.x * TILE_WIDTH + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_6/b3_s2_6_matmul_modular_device/base/base.cu:85:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
85 | AT_DISPATCH_FLOATING_TYPES(A.scalar_type(), "matmul_cuda_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^