← Back to Leaderboard

The AI CUDA Engineer 👷

42_Max_Pooling_2Dfused_unroll_constmem_pool_base

Level 1 • Task 42
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    kernel_size: int,
    stride: int,
    padding: int,
    dilation: int,
) -> torch.Tensor:
    """
    Applies Max Pooling 2D using functional interface.

    Args:
        x (torch.Tensor): Input tensor
        kernel_size (int): Size of pooling window
        stride (int): Stride of pooling window
        padding (int): Padding to be applied
        dilation (int): Spacing between kernel elements

    Returns:
        torch.Tensor: Output tensor after max pooling
    """
    return F.max_pool2d(
        x, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation
    )


class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """

    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the model parameters.
        """
        super(Model, self).__init__()
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Forward pass that calls module_fn.
        """
        return fn(
            x,
            self.kernel_size,
            self.stride,
            self.padding,
            self.dilation,
        )


batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3


def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]


def get_init_inputs():
    return [kernel_size, stride, padding, dilation]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """
    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the Max Pooling 2D layer.

        Args:
            kernel_size (int): Size of the pooling window.
            stride (int): Stride of the pooling window.
            padding (int): Padding to be applied before pooling.
            dilation (int): Spacing between kernel elements.
        """
        super(Model, self).__init__()
        self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies Max Pooling 2D to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, channels, height, width).

        Returns:
            torch.Tensor: Output tensor after Max Pooling 2D, shape (batch_size, channels, pooled_height, pooled_width).
        """
        return self.maxpool(x)

batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3

def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]

def get_init_inputs():
    return [kernel_size, stride, padding, dilation]

Kernel Information

Related Kernels (Level 1, Task 42 • 42_Max_Pooling_2D)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 textured_modular_unroll_base_base 0.02 1.43 3.04
🥇 max_pool2d_optimized_strided_base 0.02 1.43 3.04
🥇 tuned_block_size_maxpool2d_base_base 0.02 1.43 3.04
🥇 modularized_unroll_base_base 0.02 1.43 3.04
🥇 fused_unroll_constmem_pool_base 0.02 1.43 3.04
🥇 base_unrolled_combo_base 0.02 1.43 3.04
🥇 warp_divergence_optimized_unroll_base 0.02 1.43 3.04
8 max_pool2d_strided_base_base 0.02 1.37 2.91
8 streams_unroll_pipelined_batch_base_base 0.02 1.37 2.91
8 optimized_max_pool2d_base 0.02 1.37 2.91
8 42_Max_Pooling_2D_manual_unroll_base 0.02 1.37 2.91
8 coalesced_1d_unroll_base_base 0.02 1.37 2.91
8 max_pool2d_kernel_manually_unrolled_base_base 0.02 1.37 2.91
8 coalesced_ldg_unrolled_base_base 0.02 1.37 2.91
8 unrolled_coalesced_maxpool_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 max_pool2d_combined_optimized_base 0.02 1.37 2.91
8 unroll_base_optimized_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 fully_unrolled_maxpool2d_base_base 0.02 1.37 2.91
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <limits>
#include <stdexcept>

#define MAX_POOL_SIZE 81

struct PoolOffset {
    int dr;
    int dc;
};

__constant__ PoolOffset d_pool_offsets[MAX_POOL_SIZE];
__constant__ int d_pool_offsets_count;

template <typename scalar_t>
__global__ void max_pool2d_combined_kernel(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const int batch_size,
    const int channels,
    const int input_height,
    const int input_width,
    const int output_height,
    const int output_width,
    const int kernel_size,
    const int stride,
    const int padding,
    const int dilation
) {
    const int ow = blockIdx.x * blockDim.x + threadIdx.x;
    const int oh = blockIdx.y * blockDim.y + threadIdx.y;
    const int bc = blockIdx.z;
    const int b = bc / channels;
    const int c = bc % channels;

    if (b >= batch_size || c >= channels || oh >= output_height || ow >= output_width)
        return;

    scalar_t max_val = -std::numeric_limits<scalar_t>::infinity();
    const scalar_t* input_channel = input + (b * channels + c) * input_height * input_width;

    const int base_ih = oh * stride - padding;
    const int base_iw = ow * stride - padding;

    if (kernel_size == 2) {
        #pragma unroll
        for (int kh = 0; kh < 2; kh++) {
            #pragma unroll
            for (int kw = 0; kw < 2; kw++) {
                const int ih = base_ih + kh * dilation;
                const int iw = base_iw + kw * dilation;
                if (ih >= 0 && ih < input_height && iw >= 0 && iw < input_width) {
                    max_val = max(max_val, input_channel[ih * input_width + iw]);
                }
            }
        }
    } else if (kernel_size == 3) {
        #pragma unroll
        for (int kh = 0; kh < 3; kh++) {
            #pragma unroll
            for (int kw = 0; kw < 3; kw++) {
                const int ih = base_ih + kh * dilation;
                const int iw = base_iw + kw * dilation;
                if (ih >= 0 && ih < input_height && iw >= 0 && iw < input_width) {
                    max_val = max(max_val, input_channel[ih * input_width + iw]);
                }
            }
        }
    } else {
        for (int i = 0; i < d_pool_offsets_count; i++) {
            const int ih = base_ih + d_pool_offsets[i].dr;
            const int iw = base_iw + d_pool_offsets[i].dc;
            if (ih >= 0 && ih < input_height && iw >= 0 && iw < input_width) {
                max_val = max(max_val, input_channel[ih * input_width + iw]);
            }
        }
    }

    output[(b * channels + c) * output_height * output_width + oh * output_width + ow] = max_val;
}

torch::Tensor max_pool2d_cuda_forward(
    torch::Tensor input,
    int kernel_size,
    int stride,
    int padding,
    int dilation
) {
    const auto batch_size = input.size(0);
    const auto channels = input.size(1);
    const auto input_height = input.size(2);
    const auto input_width = input.size(3);

    const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
    const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;

    auto output = torch::empty({batch_size, channels, output_height, output_width}, input.options());

    if (kernel_size != 2 && kernel_size != 3) {
        int count = kernel_size * kernel_size;
        if (count > MAX_POOL_SIZE) {
            throw std::runtime_error("Kernel size too large for constant memory pool offsets");
        }
        PoolOffset h_pool_offsets[MAX_POOL_SIZE];
        for (int r = 0; r < kernel_size; r++) {
            for (int c = 0; c < kernel_size; c++) {
                h_pool_offsets[r * kernel_size + c].dr = r * dilation;
                h_pool_offsets[r * kernel_size + c].dc = c * dilation;
            }
        }
        cudaMemcpyToSymbol(d_pool_offsets, h_pool_offsets, count * sizeof(PoolOffset));
        cudaMemcpyToSymbol(d_pool_offsets_count, &count, sizeof(int));
    }

    const dim3 threads(32, 8);
    const dim3 blocks(
        (output_width + threads.x - 1) / threads.x,
        (output_height + threads.y - 1) / threads.y,
        batch_size * channels
    );

    AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
        max_pool2d_combined_kernel<scalar_t><<<blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            batch_size,
            channels,
            input_height,
            input_width,
            output_height,
            output_width,
            kernel_size,
            stride,
            padding,
            dilation
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &max_pool2d_cuda_forward, "Combined Max Pool 2D forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 2.636 inst/cycle 0.003 5
Executed Ipc Elapsed 2.160 inst/cycle 0.001 5
Issue Slots Busy 66.192 % 1.969 5
Issued Ipc Active 2.646 inst/cycle 0.003 5
SM Busy 66.524 % 1.989 5
Memory Throughput 2191519085183.700 byte/second 1819935192524472713216.000 5
Mem Busy 41.430 % 0.497 5
Max Bandwidth 65.476 % 1.494 5
L1/TEX Hit Rate 42.040 % 0.000 5
L2 Hit Rate 29.744 % 0.004 5
Mem Pipes Busy 26.592 % 0.192 5
Warp Cycles Per Issued Instruction 18.630 cycle 0.006 5
Warp Cycles Per Executed Instruction 18.700 cycle 0.006 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.280 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 79.016 % 0.030 5
Achieved Active Warps Per SM 50.574 warp 0.013 5
Analysis Rules
Rule Description
WRN HighPipeUtilization ALU is the highest-utilized pipeline (67.1%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. The pipeline is well-utilized, but might become a bottleneck if more work is added. Based on the number of executed instructions, the highest utilized pipeline (67.1%) is ALU. It executes integer and logic operations. Comparing the two, the overall pipeline utilization appears to be caused by frequent, low-latency instructions. See the Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-decoder) or hover over the pipeline name to understand the workloads handled by each pipeline. The Instruction Statistics section shows the mix of executed instructions in this kernel. Check the Warp State Statistics section for which reasons cause warps to stall.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (78.7%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 421252.86 μs
Device Time 3378.04 μs
Self CPU Time 41.37 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 421211.49 μs
Device Time 3378.04 μs
Self CPU Time 106.50 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 417492.00 μs
Device Time 0.00 μs
Self CPU Time 92.41 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 417186.42 μs
Device Time 0.00 μs
Self CPU Time 417186.42 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 619862.07 μs
Device Time 21321.48 μs
Self CPU Time 619862.07 μs
Self Device Time 21321.48 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void max_pool2d_combined_kernel<float>(float const*, float*, int, int, int, int, int, int, int, int, int, int)
CPU Time 0.00 μs
Device Time 165388.58 μs
Self CPU Time 0.00 μs
Self Device Time 165388.58 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 22600.31 μs
Device Time 42202.76 μs
Self CPU Time 22600.31 μs
Self Device Time 42202.76 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 134559.27 μs
Device Time 632367.89 μs
Self CPU Time 15286.05 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 119277.34 μs
Device Time 632367.89 μs
Self CPU Time 17143.72 μs
Self Device Time 632367.89 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 632444.79 μs
Self CPU Time 0.00 μs
Self Device Time 632444.79 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45293 warnings generated when compiling for host.
Suppressed 45325 warnings (45278 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:25:5 bugprone-easily-swappable-parameters
25 | const int input_width,
| ^~~~~~~~~~~~~~~~~~~~~~
26 | const int output_height,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:25:15: note: the first parameter in the range is 'input_width'
25 | const int input_width,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:26:15: note: the last parameter in the range is 'output_height'
26 | const int output_height,
| ^~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:27:5: warning: 3 adjacent parameters of 'max_pool2d_combined_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
27 | const int output_width,
| ^~~~~~~~~~~~~~~~~~~~~~~
28 | const int kernel_size,
| ~~~~~~~~~~~~~~~~~~~~~~
29 | const int stride,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:27:15: note: the first parameter in the range is 'output_width'
27 | const int output_width,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:29:15: note: the last parameter in the range is 'stride'
29 | const int stride,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:30:5: warning: 2 adjacent parameters of 'max_pool2d_combined_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
30 | const int padding,
| ^~~~~~~~~~~~~~~~~~
31 | const int dilation
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:30:15: note: the first parameter in the range is 'padding'
30 | const int padding,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:31:15: note: the last parameter in the range is 'dilation'
31 | const int dilation
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:33:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
33 | const int ow = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:34:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
34 | const int oh = blockIdx.y * blockDim.y + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:35:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
35 | const int bc = blockIdx.z;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:97:49: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
97 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:97:49: note: make conversion explicit to silence this warning
97 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:97:49: note: perform multiplication in a wider type
97 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:97:63: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
97 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:97:63: note: make conversion explicit to silence this warning
4 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:97:63: note: perform multiplication in a wider type
97 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:98:47: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
98 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:98:47: note: make conversion explicit to silence this warning
98 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:98:47: note: perform multiplication in a wider type
98 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:98:61: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
98 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:98:61: note: make conversion explicit to silence this warning
98 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:98:61: note: perform multiplication in a wider type
98 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:125:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
125 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:125:5: warning: 'scalar_type' is deprecated: passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, pass an at::ScalarType instead [clang-diagnostic-deprecated-declarations]
125 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:3: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:218:36: note: expanded from macro 'AT_DISPATCH_SWITCH'
218 | at::ScalarType _st = ::detail::scalar_type(the_type); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:106:1: note: 'scalar_type' has been explicitly marked deprecated here
106 | C10_DEPRECATED_MESSAGE(
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b8_s3_fused_unroll_constmem_pool/base/base.cu:125:38: warning: 'type' is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [clang-diagnostic-deprecated-declarations]
125 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/core/TensorBody.h:224:3: note: 'type' has been explicitly marked deprecated here
224 | C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^