14 | int dilation,
| ^~~~~~~~~~~~~
15 | int input_height,
| ~~~~~~~~~~~~~~~~~
16 | int input_width
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:14:9: note: the first parameter in the range is 'dilation'
14 | int dilation,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:16:9: note: the last parameter in the range is 'input_width'
16 | int input_width
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:39:5: warning: 4 adjacent parameters of 'process_kernel_generic' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
39 | int dilation,
| ^~~~~~~~~~~~~
40 | int input_height,
| ~~~~~~~~~~~~~~~~~
41 | int input_width,
| ~~~~~~~~~~~~~~~~
42 | int kernel_size
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:39:9: note: the first parameter in the range is 'dilation'
39 | int dilation,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:42:9: note: the last parameter in the range is 'kernel_size'
42 | int kernel_size
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:62:5: warning: 2 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
62 | const int batch_size,
| ^~~~~~~~~~~~~~~~~~~~~
63 | const int channels,
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:62:15: note: the first parameter in the range is 'batch_size'
62 | const int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:63:15: note: the last parameter in the range is 'channels'
63 | const int channels,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:65:5: warning: 2 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
65 | const int input_width,
| ^~~~~~~~~~~~~~~~~~~~~~
66 | const int output_height,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:65:15: note: the first parameter in the range is 'input_width'
65 | const int input_width,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:66:15: note: the last parameter in the range is 'output_height'
66 | const int output_height,
| ^~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:67:5: warning: 3 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
67 | const int output_width,
| ^~~~~~~~~~~~~~~~~~~~~~~
68 | const int kernel_size,
| ~~~~~~~~~~~~~~~~~~~~~~
69 | const int stride,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:67:15: note: the first parameter in the range is 'output_width'
67 | const int output_width,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:69:15: note: the last parameter in the range is 'stride'
69 | const int stride,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:70:5: warning: 2 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
70 | const int padding,
| ^~~~~~~~~~~~~~~~~~
71 | const int dilation
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:70:15: note: the first parameter in the range is 'padding'
70 | const int padding,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:71:15: note: the last parameter in the range is 'dilation'
71 | const int dilation
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:73:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
73 | const int ow = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:74:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
74 | const int oh = blockIdx.y * blockDim.y + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:75:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
75 | const int bc = blockIdx.z;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:86:29: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
86 | const int curr_ow = ow + i * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:49: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:49: note: make conversion explicit to silence this warning
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:49: note: perform multiplication in a wider type
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:63: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:63: note: make conversion explicit to silence this warning
4 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:63: note: perform multiplication in a wider type
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:47: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:47: note: make conversion explicit to silence this warning
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:47: note: perform multiplication in a wider type
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:61: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:61: note: make conversion explicit to silence this warning
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:61: note: perform multiplication in a wider type
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:26: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'unsigned int' [bugprone-implicit-widening-of-multiplication-result]
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:26: note: make conversion explicit to silence this warning
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:26: note: perform multiplication in a wider type
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:49: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'unsigned int' [bugprone-implicit-widening-of-multiplication-result]
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:49: note: make conversion explicit to silence this warning
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:49: note: perform multiplication in a wider type
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:134:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
134 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:134:5: warning: 'scalar_type' is deprecated: passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, pass an at::ScalarType instead [clang-diagnostic-deprecated-declarations]
134 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:3: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:218:36: note: expanded from macro 'AT_DISPATCH_SWITCH'
218 | at::ScalarType _st = ::detail::scalar_type(the_type); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:106:1: note: 'scalar_type' has been explicitly marked deprecated here
106 | C10_DEPRECATED_MESSAGE(
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:134:38: warning: 'type' is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [clang-diagnostic-deprecated-declarations]
134 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/core/TensorBody.h:224:3: note: 'type' has been explicitly marked deprecated here
224 | C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^