10 | int gidx = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:11:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
11 | int stride = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:16:30: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
16 | for(int vidx = gidx; vidx*vec_size < size; vidx += stride) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:16:30: note: make conversion explicit to silence this warning
4 |
5 | template <typename scalar_t>
6 | __global__ void sigmoid_kernel(const scalar_t* __restrict__ input,
7 | scalar_t* __restrict__ output,
8 | int64_t size) {
9 | constexpr int vec_size = sizeof(float4)/sizeof(scalar_t);
10 | int gidx = blockIdx.x * blockDim.x + threadIdx.x;
11 | int stride = gridDim.x * blockDim.x;
12 |
13 | if constexpr (std::is_same<scalar_t, float>::value) {
14 | // Vectorized path for floats with 128-bit alignment
15 | float4* output_vec = reinterpret_cast<float4*>(output);
16 | for(int vidx = gidx; vidx*vec_size < size; vidx += stride) {
| ^~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:16:30: note: perform multiplication in a wider type
16 | for(int vidx = gidx; vidx*vec_size < size; vidx += stride) {
| ^~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:39:48: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
39 | int blocks = std::min(65535, (int)((size + threads * 4 - 1)/(threads * 4)));
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:39:48: note: make conversion explicit to silence this warning
39 | int blocks = std::min(65535, (int)((size + threads * 4 - 1)/(threads * 4)));
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:39:48: note: perform multiplication in a wider type
39 | int blocks = std::min(65535, (int)((size + threads * 4 - 1)/(threads * 4)));
| ^~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:39:66: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
39 | int blocks = std::min(65535, (int)((size + threads * 4 - 1)/(threads * 4)));
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:39:66: note: make conversion explicit to silence this warning
39 | int blocks = std::min(65535, (int)((size + threads * 4 - 1)/(threads * 4)));
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:39:66: note: perform multiplication in a wider type
39 | int blocks = std::min(65535, (int)((size + threads * 4 - 1)/(threads * 4)));
| ^~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:41:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
41 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "sigmoid_kernel", [&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:43:31: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
43 | blocks = (size + (threads * 4) - 1) / (threads * 4);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:43:31: note: make conversion explicit to silence this warning
43 | blocks = (size + (threads * 4) - 1) / (threads * 4);
| ^
| static_cast<int64_t>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:43:31: note: perform multiplication in a wider type
43 | blocks = (size + (threads * 4) - 1) / (threads * 4);
| ^
| static_cast<int64_t>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:43:52: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
43 | blocks = (size + (threads * 4) - 1) / (threads * 4);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:43:52: note: make conversion explicit to silence this warning
43 | blocks = (size + (threads * 4) - 1) / (threads * 4);
| ^
| static_cast<int64_t>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_21/b4_s3_vectorized_ldg_aligned/base/base.cu:43:52: note: perform multiplication in a wider type
43 | blocks = (size + (threads * 4) - 1) / (threads * 4);
| ^
| static_cast<int64_t>(
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:66: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:44: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:56: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:70:12: note: expanded from macro 'AT_PRIVATE_CASE_TYPE_USING_HINT'
70 | return __VA_ARGS__(); \
| ^~~~~~~~~~~
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:221:7: note: expanded from macro 'AT_DISPATCH_SWITCH'
221 | __VA_ARGS__ \
| ^~~~~~~~~~~