← Back to Leaderboard

The AI CUDA Engineer 👷

42_Max_Pooling_2Dtextured_modular_unroll_base_base

Level 1 • Task 42
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    kernel_size: int,
    stride: int,
    padding: int,
    dilation: int,
) -> torch.Tensor:
    """
    Applies Max Pooling 2D using functional interface.

    Args:
        x (torch.Tensor): Input tensor
        kernel_size (int): Size of pooling window
        stride (int): Stride of pooling window
        padding (int): Padding to be applied
        dilation (int): Spacing between kernel elements

    Returns:
        torch.Tensor: Output tensor after max pooling
    """
    return F.max_pool2d(
        x, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation
    )


class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """

    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the model parameters.
        """
        super(Model, self).__init__()
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.dilation = dilation

    def forward(self, x: torch.Tensor, fn=module_fn) -> torch.Tensor:
        """
        Forward pass that calls module_fn.
        """
        return fn(
            x,
            self.kernel_size,
            self.stride,
            self.padding,
            self.dilation,
        )


batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3


def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]


def get_init_inputs():
    return [kernel_size, stride, padding, dilation]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs Max Pooling 2D.
    """
    def __init__(self, kernel_size: int, stride: int, padding: int, dilation: int):
        """
        Initializes the Max Pooling 2D layer.

        Args:
            kernel_size (int): Size of the pooling window.
            stride (int): Stride of the pooling window.
            padding (int): Padding to be applied before pooling.
            dilation (int): Spacing between kernel elements.
        """
        super(Model, self).__init__()
        self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        Applies Max Pooling 2D to the input tensor.

        Args:
            x (torch.Tensor): Input tensor of shape (batch_size, channels, height, width).

        Returns:
            torch.Tensor: Output tensor after Max Pooling 2D, shape (batch_size, channels, pooled_height, pooled_width).
        """
        return self.maxpool(x)

batch_size = 16
channels = 32
height = 128
width = 128
kernel_size = 2
stride = 2
padding = 1
dilation = 3

def get_inputs():
    x = torch.randn(batch_size, channels, height, width)
    return [x]

def get_init_inputs():
    return [kernel_size, stride, padding, dilation]

Kernel Information

Related Kernels (Level 1, Task 42 • 42_Max_Pooling_2D)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 textured_modular_unroll_base_base 0.02 1.43 3.04
🥇 max_pool2d_optimized_strided_base 0.02 1.43 3.04
🥇 tuned_block_size_maxpool2d_base_base 0.02 1.43 3.04
🥇 modularized_unroll_base_base 0.02 1.43 3.04
🥇 fused_unroll_constmem_pool_base 0.02 1.43 3.04
🥇 base_unrolled_combo_base 0.02 1.43 3.04
🥇 warp_divergence_optimized_unroll_base 0.02 1.43 3.04
8 max_pool2d_strided_base_base 0.02 1.37 2.91
8 streams_unroll_pipelined_batch_base_base 0.02 1.37 2.91
8 optimized_max_pool2d_base 0.02 1.37 2.91
8 42_Max_Pooling_2D_manual_unroll_base 0.02 1.37 2.91
8 coalesced_1d_unroll_base_base 0.02 1.37 2.91
8 max_pool2d_kernel_manually_unrolled_base_base 0.02 1.37 2.91
8 coalesced_ldg_unrolled_base_base 0.02 1.37 2.91
8 unrolled_coalesced_maxpool_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 max_pool2d_combined_optimized_base 0.02 1.37 2.91
8 unroll_base_optimized_base 0.02 1.37 2.91
8 max_pool2d_combined_base 0.02 1.37 2.91
8 fully_unrolled_maxpool2d_base_base 0.02 1.37 2.91
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>

constexpr int MAX_KERNEL_SIZE = 7;

template <int KERNEL_SIZE, typename scalar_t>
__device__ void process_kernel(
    const scalar_t* __restrict__ input_channel,
    scalar_t& max_val,
    int base_ih,
    int base_iw,
    int dilation,
    int input_height,
    int input_width
) {
    #pragma unroll
    for (int kh = 0; kh < KERNEL_SIZE; kh++) {
        const int ih = base_ih + kh * dilation;
        const bool valid_h = ih >= 0 && ih < input_height;
        
        #pragma unroll
        for (int kw = 0; kw < KERNEL_SIZE; kw++) {
            const int iw = base_iw + kw * dilation;
            if (valid_h && iw >= 0 && iw < input_width) {
                max_val = max(max_val, __ldg(&input_channel[ih * input_width + iw]));
            }
        }
    }
}

template <typename scalar_t>
__device__ void process_kernel_generic(
    const scalar_t* __restrict__ input_channel,
    scalar_t& max_val,
    int base_ih,
    int base_iw,
    int dilation,
    int input_height,
    int input_width,
    int kernel_size
) {
    for (int kh = 0; kh < kernel_size; kh++) {
        const int ih = base_ih + kh * dilation;
        const bool valid_h = ih >= 0 && ih < input_height;
        
        #pragma unroll 4
        for (int kw = 0; kw < kernel_size; kw++) {
            const int iw = base_iw + kw * dilation;
            if (valid_h && iw >= 0 && iw < input_width) {
                max_val = max(max_val, __ldg(&input_channel[ih * input_width + iw]));
            }
        }
    }
}

template <typename scalar_t>
__global__ void max_pool2d_textured_kernel(
    const scalar_t* __restrict__ input,
    scalar_t* __restrict__ output,
    const int batch_size,
    const int channels,
    const int input_height,
    const int input_width,
    const int output_height,
    const int output_width,
    const int kernel_size,
    const int stride,
    const int padding,
    const int dilation
) {
    const int ow = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
    const int oh = blockIdx.y * blockDim.y + threadIdx.y;
    const int bc = blockIdx.z;
    const int b = bc / channels;
    const int c = bc % channels;

    if (oh >= output_height) return;

    scalar_t max_val[2] = {-std::numeric_limits<scalar_t>::infinity(),
                          -std::numeric_limits<scalar_t>::infinity()};
    const scalar_t* input_channel = input + (b * channels + c) * input_height * input_width;

    for (int i = 0; i < 2; ++i) {
        const int curr_ow = ow + i * blockDim.x;
        if (curr_ow >= output_width) break;

        const int base_ih = oh * stride - padding;
        const int base_iw = curr_ow * stride - padding;

        switch (kernel_size) {
            case 2: process_kernel<2>(input_channel, max_val[i], base_ih, base_iw,
                                     dilation, input_height, input_width); break;
            case 3: process_kernel<3>(input_channel, max_val[i], base_ih, base_iw,
                                     dilation, input_height, input_width); break;
            default:
                if (kernel_size <= MAX_KERNEL_SIZE) {
                    process_kernel_generic(input_channel, max_val[i], base_ih, base_iw,
                                          dilation, input_height, input_width, kernel_size);
                }
        }

        if (curr_ow < output_width) {
            output[(b * channels + c) * output_height * output_width + oh * output_width + curr_ow] = max_val[i];
        }
    }
}

torch::Tensor max_pool2d_cuda_forward(
    torch::Tensor input,
    int kernel_size,
    int stride,
    int padding,
    int dilation
) {
    const auto batch_size = input.size(0);
    const auto channels = input.size(1);
    const auto input_height = input.size(2);
    const auto input_width = input.size(3);

    const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
    const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;

    auto output = torch::empty({batch_size, channels, output_height, output_width}, input.options());

    const dim3 threads(32, 4);  // 2x wider blocks for better memory coalescing
    const dim3 blocks(
        (output_width + (threads.x * 2) - 1) / (threads.x * 2),
        (output_height + threads.y - 1) / threads.y,
        batch_size * channels
    );

    AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
        max_pool2d_textured_kernel<scalar_t><<<blocks, threads>>>(
            input.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            batch_size,
            channels,
            input_height,
            input_width,
            output_height,
            output_width,
            kernel_size,
            stride,
            padding,
            dilation
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &max_pool2d_cuda_forward, "Textured Max Pool 2D forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 2.042 inst/cycle 0.000 5
Executed Ipc Elapsed 1.610 inst/cycle 0.000 5
Issue Slots Busy 51.560 % 0.006 5
Issued Ipc Active 2.062 inst/cycle 0.000 5
SM Busy 52.346 % 0.006 5
Memory Throughput 2320878265662.806 byte/second 56325075479940390912.000 5
Mem Busy 42.600 % 0.014 5
Max Bandwidth 69.430 % 0.056 5
L1/TEX Hit Rate 47.188 % 0.000 5
L2 Hit Rate 25.236 % 0.010 5
Mem Pipes Busy 35.102 % 0.015 5
Warp Cycles Per Issued Instruction 25.492 cycle 0.423 5
Warp Cycles Per Executed Instruction 25.744 cycle 0.429 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 30.990 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 16.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 16.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 81.738 % 0.012 5
Achieved Active Warps Per SM 52.314 warp 0.005 5
Analysis Rules
Rule Description
INF HighPipeUtilization ALU is the highest-utilized pipeline (52.2%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (81.7%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 226409.78 μs
Device Time 3307.54 μs
Self CPU Time 47.06 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 226362.72 μs
Device Time 3307.54 μs
Self CPU Time 117.66 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 222681.60 μs
Device Time 0.00 μs
Self CPU Time 224.60 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 222204.22 μs
Device Time 0.00 μs
Self CPU Time 222204.22 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 539167.76 μs
Device Time 18588.71 μs
Self CPU Time 539167.76 μs
Self Device Time 18588.71 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void max_pool2d_textured_kernel<float>(float const*, float*, int, int, int, int, int, int, int, int, int, int)
CPU Time 0.00 μs
Device Time 139985.35 μs
Self CPU Time 0.00 μs
Self Device Time 139985.35 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 19515.91 μs
Device Time 36786.28 μs
Self CPU Time 19515.91 μs
Self Device Time 36786.28 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 113721.03 μs
Device Time 552763.23 μs
Self CPU Time 12877.09 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 100845.61 μs
Device Time 552763.23 μs
Self CPU Time 15606.94 μs
Self Device Time 552763.23 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 552763.23 μs
Self CPU Time 0.00 μs
Self Device Time 552763.23 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45299 warnings generated when compiling for host.
Suppressed 45325 warnings (45278 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:14:5 bugprone-easily-swappable-parameters
14 | int dilation,
| ^~~~~~~~~~~~~
15 | int input_height,
| ~~~~~~~~~~~~~~~~~
16 | int input_width
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:14:9: note: the first parameter in the range is 'dilation'
14 | int dilation,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:16:9: note: the last parameter in the range is 'input_width'
16 | int input_width
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:39:5: warning: 4 adjacent parameters of 'process_kernel_generic' of similar type ('int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
39 | int dilation,
| ^~~~~~~~~~~~~
40 | int input_height,
| ~~~~~~~~~~~~~~~~~
41 | int input_width,
| ~~~~~~~~~~~~~~~~
42 | int kernel_size
| ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:39:9: note: the first parameter in the range is 'dilation'
39 | int dilation,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:42:9: note: the last parameter in the range is 'kernel_size'
42 | int kernel_size
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:62:5: warning: 2 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
62 | const int batch_size,
| ^~~~~~~~~~~~~~~~~~~~~
63 | const int channels,
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:62:15: note: the first parameter in the range is 'batch_size'
62 | const int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:63:15: note: the last parameter in the range is 'channels'
63 | const int channels,
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:65:5: warning: 2 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
65 | const int input_width,
| ^~~~~~~~~~~~~~~~~~~~~~
66 | const int output_height,
| ~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:65:15: note: the first parameter in the range is 'input_width'
65 | const int input_width,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:66:15: note: the last parameter in the range is 'output_height'
66 | const int output_height,
| ^~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:67:5: warning: 3 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
67 | const int output_width,
| ^~~~~~~~~~~~~~~~~~~~~~~
68 | const int kernel_size,
| ~~~~~~~~~~~~~~~~~~~~~~
69 | const int stride,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:67:15: note: the first parameter in the range is 'output_width'
67 | const int output_width,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:69:15: note: the last parameter in the range is 'stride'
69 | const int stride,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:70:5: warning: 2 adjacent parameters of 'max_pool2d_textured_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
70 | const int padding,
| ^~~~~~~~~~~~~~~~~~
71 | const int dilation
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:70:15: note: the first parameter in the range is 'padding'
70 | const int padding,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:71:15: note: the last parameter in the range is 'dilation'
71 | const int dilation
| ^~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:73:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
73 | const int ow = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:74:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
74 | const int oh = blockIdx.y * blockDim.y + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:75:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
75 | const int bc = blockIdx.z;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:86:29: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
86 | const int curr_ow = ow + i * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:49: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:49: note: make conversion explicit to silence this warning
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:49: note: perform multiplication in a wider type
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:63: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:63: note: make conversion explicit to silence this warning
4 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:122:63: note: perform multiplication in a wider type
122 | const auto output_height = ((input_height + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:47: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:47: note: make conversion explicit to silence this warning
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:47: note: perform multiplication in a wider type
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:61: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:61: note: make conversion explicit to silence this warning
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:123:61: note: perform multiplication in a wider type
123 | const auto output_width = ((input_width + 2 * padding - dilation * (kernel_size - 1) - 1) / stride) + 1;
| ^~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:26: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'unsigned int' [bugprone-implicit-widening-of-multiplication-result]
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:26: note: make conversion explicit to silence this warning
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:26: note: perform multiplication in a wider type
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:49: warning: performing an implicit widening conversion to type 'int64_t' (aka 'long') of a multiplication performed in type 'unsigned int' [bugprone-implicit-widening-of-multiplication-result]
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:49: note: make conversion explicit to silence this warning
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~~~~~
| static_cast<int64_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:129:49: note: perform multiplication in a wider type
129 | (output_width + (threads.x * 2) - 1) / (threads.x * 2),
| ^~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:134:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
134 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:134:5: warning: 'scalar_type' is deprecated: passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, pass an at::ScalarType instead [clang-diagnostic-deprecated-declarations]
134 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:3: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:218:36: note: expanded from macro 'AT_DISPATCH_SWITCH'
218 | at::ScalarType _st = ::detail::scalar_type(the_type); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:106:1: note: 'scalar_type' has been explicitly marked deprecated here
106 | C10_DEPRECATED_MESSAGE(
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu/level_1/task_42/b10_s3_textured_modular_unroll_base/base/base.cu:134:38: warning: 'type' is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [clang-diagnostic-deprecated-declarations]
134 | AT_DISPATCH_FLOATING_TYPES(input.type(), "max_pool2d_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/core/TensorBody.h:224:3: note: 'type' has been explicitly marked deprecated here
224 | C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^