← Back to Leaderboard

The AI CUDA Engineer 👷

42_Max_Pooling_2Dmodularized_unroll_base_base

Level 1 • Task 42
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    kernel_size: int,
    stride: int,
    padding: int,
    dilation: int,
) -> torch.Tensor:
    """
    Applies Max Pooling 2D using functional interface.

    Args:
        x (torch.Tensor): Input tensor
        kernel_size (int): Size of pooling window
        stride (int): Stride of pooling window
        padding (int): Padding to be applied
        dilation (int): Spacing between kernel elements

    Returns:
        torch.Tensor: Output tensor after max pooling
    """
    return F.max_pool2d(
        x, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation
    )


class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """

    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the model parameters.
        """
        super(Model, self).__init__()
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Forward pass that calls module_fn.
        """
        return fn(
            x,
            self.kernel_size,
            self.stride,
            self.padding,
            self.dilation,
        )


batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3


def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]


def get_init_inputs():
    return [kernel_size, stride, padding, dilation]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """
    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the Max Pooling 2D layer.

        Args:
            kernel_size (int): Size of the pooling window.
            stride (int): Stride of the pooling window.
            padding (int): Padding to be applied before pooling.
            dilation (int): Spacing between kernel elements.
        """
        super(Model, self).__init__()
        self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies Max Pooling 2D to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, channels, height, width).

        Returns:
            torch.Tensor: Output tensor after Max Pooling 2D, shape (batch_size, channels, pooled_height, pooled_width).
        """
        return self.maxpool(x)

batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3

def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]

def get_init_inputs():
    return [kernel_size, stride, padding, dilation]

Kernel Information

Related Kernels (Level 1, Task 42 • 42_Max_Pooling_2D)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 textured_modular_unroll_base_base 0.02 1.43 3.04
🥇 max_pool2d_optimized_strided_base 0.02 1.43 3.04
🥇 tuned_block_size_maxpool2d_base_base 0.02 1.43 3.04
🥇 modularized_unroll_base_base 0.02 1.43 3.04
🥇 fused_unroll_constmem_pool_base 0.02 1.43 3.04
🥇 base_unrolled_combo_base 0.02 1.43 3.04
🥇 warp_divergence_optimized_unroll_base 0.02 1.43 3.04
8 max_pool2d_strided_base_base 0.02 1.37 2.91
8 streams_unroll_pipelined_batch_base_base 0.02 1.37 2.91
8 optimized_max_pool2d_base 0.02 1.37 2.91
8 42_Max_Pooling_2D_manual_unroll_base 0.02 1.37 2.91
8 coalesced_1d_unroll_base_base 0.02 1.37 2.91
8 max_pool2d_kernel_manually_unrolled_base_base 0.02 1.37 2.91
8 coalesced_ldg_unrolled_base_base 0.02 1.37 2.91
8 unrolled_coalesced_maxpool_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 max_pool2d_combined_optimized_base 0.02 1.37 2.91
8 unroll_base_optimized_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 fully_unrolled_maxpool2d_base_base 0.02 1.37 2.91
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>

constexpr int MAX_KERNEL_SIZE = 7;

template <typename scalar_t>
__device__ void process_kernel_2x2(
    const scalar_t* input_channel,
    scalar_t& max_val,
    int base_ih,
    int base_iw,
    int dilation,
    int input_height,
    int input_width
) {
    #pragma unroll
    for (int kh = 0; kh < 2; kh++) {
        const int ih = base_ih + kh * dilation;
        const bool valid_h = ih >= 0 && ih < input_height;
        
        #pragma unroll
        for (int kw = 0; kw < 2; kw++) {
            const int iw = base_iw + kw * dilation;
            if (valid_h && iw >= 0 && iw < input_width) {
                max_val = max(max_val, input_channel[ih * input_width + iw]);
            }
        }
    }
}

template <typename scalar_t>
__device__ void process_kernel_3x3(
    const scalar_t* input_channel,
    scalar_t& max_val,
    int base_ih,
    int base_iw,
    int dilation,
    int input_height,
    int input_width
) {
    #pragma unroll
    for (int kh = 0; kh < 3; kh++) {
        const int ih = base_ih + kh * dilation;
        const bool valid_h = ih >= 0 && ih < input_height;
        
        #pragma unroll
        for (int kw = 0; kw < 3; kw++) {
            const int iw = base_iw + kw * dilation;
            if (valid_h && iw >= 0 && iw < input_width) {
                max_val = max(max_val, input_channel[ih * input_width + iw]);
            }
        }
    }
}

template <typename scalar_t>
__device__ void process_kernel_generic(
    const scalar_t* input_channel,
    scalar_t& max_val,
    int base_ih,
    int base_iw,
    int dilation,
    int input_height,
    int input_width,
    int kernel_size
) {
    for (int kh = 0; kh < kernel_size; kh++) {
        const int ih = base_ih + kh * dilation;
        const bool valid_h = ih >= 0 && ih < input_height;
        
        for (int kw = 0; kw < kernel_size; kw++) {
            const int iw = base_iw + kw * dilation;
            if (valid_h && iw >= 0 && iw < input_width) {
                max_val = max(max_val, input_channel[ih * input_width + iw]);
            }
        }
    }
}

template <typename scalar_t>
__global__ void max_pool2d_modular_kernel(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const int batch_size,
    const int channels,
    const int input_height,
    const int input_width,
    const int output_height,
    const int output_width,
    const int kernel_size,
    const int stride,
    const int padding,
    const int dilation
) {
    const int ow = blockIdx.x * blockDim.x + threadIdx.x;
    const int oh = blockIdx.y * blockDim.y + threadIdx.y;
    const int bc = blockIdx.z;
    const int b = bc / channels;
    const int c = bc % channels;

    if (b >= batch_size || c >= channels || oh >= output_height || ow >= output_width)
        return;

    scalar_t max_val = -std::numeric_limits<scalar_t>::infinity();
    const scalar_t* input_channel = input + (b * channels + c) * input_height * input_width;

    const int base_ih = oh * stride - padding;
    const int base_iw = ow * stride - padding;

    if (kernel_size == 2) {
        process_kernel_2x2(input_channel, max_val, base_ih, base_iw,
                          dilation, input_height, input_width);
    } else if (kernel_size == 3) {
        process_kernel_3x3(input_channel, max_val, base_ih, base_iw,
                          dilation, input_height, input_width);
    } else if (kernel_size <= MAX_KERNEL_SIZE) {
        process_kernel_generic(input_channel, max_val, base_ih, base_iw,
                             dilation, input_height, input_width, kernel_size);
    }

    output[(b * channels + c) * output_height * output_width + oh * output_width + ow] = max_val;
}

torch::Tensor max_pool2d_cuda_forward(
    torch::Tensor input,
    int kernel_size,
    int stride,
    int padding,
    int dilation
) {
    const auto batch_size = input.size(0);
    const auto channels = input.size(1);
    const auto input_height = input.size(2);
    const auto input_width = input.size(3);

    const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
    const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;

    auto output = torch::empty({batch_size, channels, output_height, output_width}, input.options());

    const dim3 threads(32, 8);
    const dim3 blocks(
        (output_width + threads.x - 1) / threads.x,
        (output_height + threads.y - 1) / threads.y,
        batch_size * channels
    );

    AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
        max_pool2d_modular_kernel<scalar_t><<<blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            batch_size,
            channels,
            input_height,
            input_width,
            output_height,
            output_width,
            kernel_size,
            stride,
            padding,
            dilation
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &max_pool2d_cuda_forward, "Modular Max Pool 2D forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 2.686 inst/cycle 0.003 5
Executed Ipc Elapsed 2.186 inst/cycle 0.000 5
Issue Slots Busy 67.470 % 1.761 5
Issued Ipc Active 2.698 inst/cycle 0.003 5
SM Busy 67.776 % 1.782 5
Memory Throughput 2218586994378.060 byte/second 486889982979818192896.000 5
Mem Busy 42.028 % 0.196 5
Max Bandwidth 66.236 % 0.453 5
L1/TEX Hit Rate 42.040 % 0.000 5
L2 Hit Rate 29.602 % 0.004 5
Mem Pipes Busy 26.910 % 0.068 5
Warp Cycles Per Issued Instruction 18.564 cycle 0.087 5
Warp Cycles Per Executed Instruction 18.642 cycle 0.087 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.280 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 79.128 % 0.008 5
Achieved Active Warps Per SM 50.644 warp 0.003 5
Analysis Rules
Rule Description
WRN HighPipeUtilization ALU is the highest-utilized pipeline (68.5%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. The pipeline is well-utilized, but might become a bottleneck if more work is added. Based on the number of executed instructions, the highest utilized pipeline (68.5%) is ALU. It executes integer and logic operations. Comparing the two, the overall pipeline utilization appears to be caused by frequent, low-latency instructions. See the Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-decoder) or hover over the pipeline name to understand the workloads handled by each pipeline. The Instruction Statistics section shows the mix of executed instructions in this kernel. Check the Warp State Statistics section for which reasons cause warps to stall.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (79.2%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 380538.64 μs
Device Time 3325.48 μs
Self CPU Time 50.91 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 380487.74 μs
Device Time 3325.48 μs
Self CPU Time 119.12 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 376793.31 μs
Device Time 0.00 μs
Self CPU Time 88.25 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 376480.54 μs
Device Time 0.00 μs
Self CPU Time 376480.54 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 545868.98 μs
Device Time 20302.17 μs
Self CPU Time 545868.98 μs
Self Device Time 20302.17 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void max_pool2d_modular_kernel<float>(float const*, float*, int, int, int, int, int, int, int, int, int, int)
CPU Time 0.00 μs
Device Time 146293.71 μs
Self CPU Time 0.00 μs
Self Device Time 146293.71 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 21597.68 μs
Device Time 37428.11 μs
Self CPU Time 21597.68 μs
Self Device Time 37428.11 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 117373.33 μs
Device Time 561462.17 μs
Self CPU Time 13583.57 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 103795.63 μs
Device Time 561462.17 μs
Self CPU Time 14177.47 μs
Self Device Time 561462.17 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 561462.17 μs
Self CPU Time 0.00 μs
Self Device Time 561462.17 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45296 warnings generated when compiling for host.
Suppressed 45325 warnings (45278 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:14:5 bugprone-easily-swappable-parameters
14 | int dilation,
| ^~~~~~~~~~~~~
15 | int input_height,
| ~~~~~~~~~~~~~~~~~
16 | int input_width
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:14:9: note: the first parameter in the range is 'dilation'
14 | int dilation,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:16:9: note: the last parameter in the range is 'input_width'
16 | int input_width
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:39:5: warning: 3 adjacent parameters of 'process_kernel_3x3' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
39 | int dilation,
| ^~~~~~~~~~~~~
40 | int input_height,
| ~~~~~~~~~~~~~~~~~
41 | int input_width
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:39:9: note: the first parameter in the range is 'dilation'
39 | int dilation,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:41:9: note: the last parameter in the range is 'input_width'
41 | int input_width
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:64:5: warning: 4 adjacent parameters of 'process_kernel_generic' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
64 | int dilation,
| ^~~~~~~~~~~~~
65 | int input_height,
| ~~~~~~~~~~~~~~~~~
66 | int input_width,
| ~~~~~~~~~~~~~~~~
67 | int kernel_size
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:64:9: note: the first parameter in the range is 'dilation'
64 | int dilation,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:67:9: note: the last parameter in the range is 'kernel_size'
67 | int kernel_size
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:89:5: warning: 2 adjacent parameters of 'max_pool2d_modular_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
89 | const int input_width,
| ^~~~~~~~~~~~~~~~~~~~~~
90 | const int output_height,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:89:15: note: the first parameter in the range is 'input_width'
89 | const int input_width,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:90:15: note: the last parameter in the range is 'output_height'
90 | const int output_height,
| ^~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:91:5: warning: 3 adjacent parameters of 'max_pool2d_modular_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
91 | const int output_width,
| ^~~~~~~~~~~~~~~~~~~~~~~
92 | const int kernel_size,
| ~~~~~~~~~~~~~~~~~~~~~~
93 | const int stride,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:91:15: note: the first parameter in the range is 'output_width'
91 | const int output_width,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:93:15: note: the last parameter in the range is 'stride'
93 | const int stride,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:94:5: warning: 2 adjacent parameters of 'max_pool2d_modular_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
94 | const int padding,
| ^~~~~~~~~~~~~~~~~~
95 | const int dilation
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:94:15: note: the first parameter in the range is 'padding'
94 | const int padding,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:95:15: note: the last parameter in the range is 'dilation'
95 | const int dilation
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:97:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
97 | const int ow = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:98:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
98 | const int oh = blockIdx.y * blockDim.y + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:99:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
99 | const int bc = blockIdx.z;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:138:49: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
138 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:138:49: note: make conversion explicit to silence this warning
138 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:138:49: note: perform multiplication in a wider type
138 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:138:63: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
138 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:138:63: note: make conversion explicit to silence this warning
4 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:138:63: note: perform multiplication in a wider type
138 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:139:47: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
139 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:139:47: note: make conversion explicit to silence this warning
139 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:139:47: note: perform multiplication in a wider type
139 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:139:61: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
139 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:139:61: note: make conversion explicit to silence this warning
139 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:139:61: note: perform multiplication in a wider type
139 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:150:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
150 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:150:5: warning: 'scalar_type' is deprecated: passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, pass an at::ScalarType instead [clang-diagnostic-deprecated-declarations]
150 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:3: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:218:36: note: expanded from macro 'AT_DISPATCH_SWITCH'
218 | at::ScalarType _st = ::detail::scalar_type(the_type); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:106:1: note: 'scalar_type' has been explicitly marked deprecated here
106 | C10_DEPRECATED_MESSAGE(
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b9_s2_modularized_unroll_base/base/base.cu:150:38: warning: 'type' is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [clang-diagnostic-deprecated-declarations]
150 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/core/TensorBody.h:224:3: note: 'type' has been explicitly marked deprecated here
224 | C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^