← Back to Leaderboard

The AI CUDA Engineer 👷

42_Max_Pooling_2Dbase_unrolled_combo_base

Level 1 • Task 42
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    kernel_size: int,
    stride: int,
    padding: int,
    dilation: int,
) -> torch.Tensor:
    """
    Applies Max Pooling 2D using functional interface.

    Args:
        x (torch.Tensor): Input tensor
        kernel_size (int): Size of pooling window
        stride (int): Stride of pooling window
        padding (int): Padding to be applied
        dilation (int): Spacing between kernel elements

    Returns:
        torch.Tensor: Output tensor after max pooling
    """
    return F.max_pool2d(
        x, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation
    )


class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """

    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the model parameters.
        """
        super(Model, self).__init__()
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Forward pass that calls module_fn.
        """
        return fn(
            x,
            self.kernel_size,
            self.stride,
            self.padding,
            self.dilation,
        )


batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3


def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]


def get_init_inputs():
    return [kernel_size, stride, padding, dilation]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """
    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the Max Pooling 2D layer.

        Args:
            kernel_size (int): Size of the pooling window.
            stride (int): Stride of the pooling window.
            padding (int): Padding to be applied before pooling.
            dilation (int): Spacing between kernel elements.
        """
        super(Model, self).__init__()
        self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies Max Pooling 2D to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, channels, height, width).

        Returns:
            torch.Tensor: Output tensor after Max Pooling 2D, shape (batch_size, channels, pooled_height, pooled_width).
        """
        return self.maxpool(x)

batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3

def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]

def get_init_inputs():
    return [kernel_size, stride, padding, dilation]

Kernel Information

Related Kernels (Level 1, Task 42 • 42_Max_Pooling_2D)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 textured_modular_unroll_base_base 0.02 1.43 3.04
🥇 max_pool2d_optimized_strided_base 0.02 1.43 3.04
🥇 tuned_block_size_maxpool2d_base_base 0.02 1.43 3.04
🥇 modularized_unroll_base_base 0.02 1.43 3.04
🥇 fused_unroll_constmem_pool_base 0.02 1.43 3.04
🥇 base_unrolled_combo_base 0.02 1.43 3.04
🥇 warp_divergence_optimized_unroll_base 0.02 1.43 3.04
8 max_pool2d_strided_base_base 0.02 1.37 2.91
8 streams_unroll_pipelined_batch_base_base 0.02 1.37 2.91
8 optimized_max_pool2d_base 0.02 1.37 2.91
8 42_Max_Pooling_2D_manual_unroll_base 0.02 1.37 2.91
8 coalesced_1d_unroll_base_base 0.02 1.37 2.91
8 max_pool2d_kernel_manually_unrolled_base_base 0.02 1.37 2.91
8 coalesced_ldg_unrolled_base_base 0.02 1.37 2.91
8 unrolled_coalesced_maxpool_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 max_pool2d_combined_optimized_base 0.02 1.37 2.91
8 unroll_base_optimized_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 fully_unrolled_maxpool2d_base_base 0.02 1.37 2.91
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>

template <typename scalar_t>
__global__ void max_pool2d_combined_kernel(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const int batch_size,
    const int channels,
    const int input_height,
    const int input_width,
    const int output_height,
    const int output_width,
    const int kernel_size,
    const int stride,
    const int padding,
    const int dilation
) {
    const int ow = blockIdx.x * blockDim.x + threadIdx.x;
    const int oh = blockIdx.y * blockDim.y + threadIdx.y;
    const int bc = blockIdx.z;
    const int b = bc / channels;
    const int c = bc % channels;

    if (b >= batch_size || c >= channels || oh >= output_height || ow >= output_width)
        return;

    scalar_t max_val = -std::numeric_limits<scalar_t>::infinity();
    const scalar_t* input_channel = input + (b * channels + c) * input_height * input_width;

    const int base_ih = oh * stride - padding;
    const int base_iw = ow * stride - padding;

    if (kernel_size == 2) {
        #pragma unroll
        for (int kh = 0; kh < 2; kh++) {
            const int ih = base_ih + kh * dilation;
            const bool valid_h = ih >= 0 && ih < input_height;
            #pragma unroll
            for (int kw = 0; kw < 2; kw++) {
                const int iw = base_iw + kw * dilation;
                if (valid_h && iw >= 0 && iw < input_width)
                    max_val = max(max_val, input_channel[ih * input_width + iw]);
            }
        }
    } else if (kernel_size == 3) {
        #pragma unroll
        for (int kh = 0; kh < 3; kh++) {
            const int ih = base_ih + kh * dilation;
            const bool valid_h = ih >= 0 && ih < input_height;
            #pragma unroll
            for (int kw = 0; kw < 3; kw++) {
                const int iw = base_iw + kw * dilation;
                if (valid_h && iw >= 0 && iw < input_width)
                    max_val = max(max_val, input_channel[ih * input_width + iw]);
            }
        }
    } else {
        #pragma unroll 4
        for (int kh = 0; kh < kernel_size; kh++) {
            const int ih = base_ih + kh * dilation;
            const bool valid_h = ih >= 0 && ih < input_height;
            #pragma unroll 4
            for (int kw = 0; kw < kernel_size; kw++) {
                const int iw = base_iw + kw * dilation;
                if (valid_h && iw >= 0 && iw < input_width)
                    max_val = max(max_val, input_channel[ih * input_width + iw]);
            }
        }
    }

    output[(b * channels + c) * output_height * output_width + oh * output_width + ow] = max_val;
}

torch::Tensor max_pool2d_cuda_forward(
    torch::Tensor input,
    int kernel_size,
    int stride,
    int padding,
    int dilation
) {
    const auto batch_size = input.size(0);
    const auto channels = input.size(1);
    const auto input_height = input.size(2);
    const auto input_width = input.size(3);

    const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
    const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;

    auto output = torch::empty({batch_size, channels, output_height, output_width}, input.options());

    const dim3 threads(32, 8);
    const dim3 blocks(
        (output_width + threads.x - 1) / threads.x,
        (output_height + threads.y - 1) / threads.y,
        batch_size * channels
    );

    AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
        max_pool2d_combined_kernel<scalar_t><<<blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            batch_size,
            channels,
            input_height,
            input_width,
            output_height,
            output_width,
            kernel_size,
            stride,
            padding,
            dilation
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &max_pool2d_cuda_forward, "Max Pool 2D forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 2.786 inst/cycle 0.000 5
Executed Ipc Elapsed 2.270 inst/cycle 0.000 5
Issue Slots Busy 69.900 % 0.006 5
Issued Ipc Active 2.796 inst/cycle 0.000 5
SM Busy 69.900 % 0.006 5
Memory Throughput 2157060688980.542 byte/second 422465715284011253760.000 5
Mem Busy 40.772 % 0.108 5
Max Bandwidth 64.528 % 0.387 5
L1/TEX Hit Rate 42.040 % 0.000 5
L2 Hit Rate 29.758 % 0.002 5
Mem Pipes Busy 33.514 % 0.038 5
Warp Cycles Per Issued Instruction 18.298 cycle 0.049 5
Warp Cycles Per Executed Instruction 18.372 cycle 0.050 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.390 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 80.138 % 0.003 5
Achieved Active Warps Per SM 51.288 warp 0.001 5
Analysis Rules
Rule Description
WRN HighPipeUtilization ALU is the highest-utilized pipeline (65.6%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. The pipeline is well-utilized, but might become a bottleneck if more work is added. Based on the number of executed instructions, the highest utilized pipeline (65.6%) is ALU. It executes integer and logic operations. Comparing the two, the overall pipeline utilization appears to be caused by frequent, low-latency instructions. See the Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-decoder) or hover over the pipeline name to understand the workloads handled by each pipeline. The Instruction Statistics section shows the mix of executed instructions in this kernel. Check the Warp State Statistics section for which reasons cause warps to stall.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (80.2%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 318058.69 μs
Device Time 3285.81 μs
Self CPU Time 42.01 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 318016.68 μs
Device Time 3285.81 μs
Self CPU Time 92.95 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 314421.54 μs
Device Time 0.00 μs
Self CPU Time 67.78 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 314117.55 μs
Device Time 0.00 μs
Self CPU Time 314117.55 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 610360.89 μs
Device Time 21073.83 μs
Self CPU Time 610360.89 μs
Self Device Time 21073.83 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void max_pool2d_combined_kernel<float>(float const*, float*, int, int, int, int, int, int, int, int, int, int)
CPU Time 0.00 μs
Device Time 164413.92 μs
Self CPU Time 0.00 μs
Self Device Time 164413.92 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 21973.30 μs
Device Time 41952.58 μs
Self CPU Time 21973.30 μs
Self Device Time 41952.58 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 130927.75 μs
Device Time 626861.71 μs
Self CPU Time 14854.78 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 116074.35 μs
Device Time 626861.71 μs
Self CPU Time 15709.16 μs
Self Device Time 626861.71 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 626940.34 μs
Self CPU Time 0.00 μs
Self Device Time 626940.34 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45293 warnings generated when compiling for host.
Suppressed 45325 warnings (45278 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:13:5 bugprone-easily-swappable-parameters
13 | const int input_width,
| ^~~~~~~~~~~~~~~~~~~~~~
14 | const int output_height,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:13:15: note: the first parameter in the range is 'input_width'
13 | const int input_width,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:14:15: note: the last parameter in the range is 'output_height'
14 | const int output_height,
| ^~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:15:5: warning: 3 adjacent parameters of 'max_pool2d_combined_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
15 | const int output_width,
| ^~~~~~~~~~~~~~~~~~~~~~~
16 | const int kernel_size,
| ~~~~~~~~~~~~~~~~~~~~~~
17 | const int stride,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:15:15: note: the first parameter in the range is 'output_width'
15 | const int output_width,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:17:15: note: the last parameter in the range is 'stride'
17 | const int stride,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:18:5: warning: 2 adjacent parameters of 'max_pool2d_combined_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
18 | const int padding,
| ^~~~~~~~~~~~~~~~~~
19 | const int dilation
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:18:15: note: the first parameter in the range is 'padding'
18 | const int padding,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:19:15: note: the last parameter in the range is 'dilation'
19 | const int dilation
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:21:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
21 | const int ow = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:22:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
22 | const int oh = blockIdx.y * blockDim.y + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:23:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
23 | const int bc = blockIdx.z;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:89:49: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
89 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:89:49: note: make conversion explicit to silence this warning
89 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:89:49: note: perform multiplication in a wider type
89 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:89:63: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
89 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:89:63: note: make conversion explicit to silence this warning
4 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:89:63: note: perform multiplication in a wider type
89 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:90:47: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
90 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:90:47: note: make conversion explicit to silence this warning
90 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:90:47: note: perform multiplication in a wider type
90 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:90:61: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
90 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:90:61: note: make conversion explicit to silence this warning
90 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:90:61: note: perform multiplication in a wider type
90 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:101:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
101 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:101:5: warning: 'scalar_type' is deprecated: passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, pass an at::ScalarType instead [clang-diagnostic-deprecated-declarations]
101 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:3: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:218:36: note: expanded from macro 'AT_DISPATCH_SWITCH'
218 | at::ScalarType _st = ::detail::scalar_type(the_type); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:106:1: note: 'scalar_type' has been explicitly marked deprecated here
106 | C10_DEPRECATED_MESSAGE(
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b4_s3_base_unrolled_combo/base/base.cu:101:38: warning: 'type' is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [clang-diagnostic-deprecated-declarations]
101 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/core/TensorBody.h:224:3: note: 'type' has been explicitly marked deprecated here
224 | C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^