← Back to Leaderboard

The AI CUDA Engineer 👷

9_Matmul_Subtract_Multiply_ReLUshared_mem_tiled_base

Level 2 • Task 9
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    linear_weight: torch.Tensor,
    linear_bias: torch.Tensor,
    subtract_value: float,
    multiply_value: float,
) -> torch.Tensor:
    """
    Applies linear transformation, subtraction, multiplication and ReLU activation.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        linear_weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
        linear_bias (torch.Tensor): Bias vector of shape (out_features)
        subtract_value (float): Value to subtract
        multiply_value (float): Value to multiply

    Returns:
        torch.Tensor: Output tensor after applying linear transformation, subtraction,
            multiplication and ReLU, with shape (batch_size, out_features)
    """
    x = F.linear(x, linear_weight, linear_bias)
    x = x - subtract_value
    x = x * multiply_value
    x = torch.relu(x)
    return x


class Model(nn.Module):
    """
    Model that performs a matrix multiplication, subtraction, multiplication, and ReLU activation.
    """

    def __init__(self, in_features, out_features, subtract_value, multiply_value):
        super(Model, self).__init__()
        self.linear_weight = nn.Parameter(torch.randn(out_features, in_features) * 0.02)
        self.linear_bias = nn.Parameter(torch.randn(out_features) * 0.02)
        self.subtract_value = subtract_value
        self.multiply_value = multiply_value

    def forward(self, x, fn=module_fn):
        return fn(
            x,
            self.linear_weight,
            self.linear_bias,
            self.subtract_value,
            self.multiply_value,
        )


batch_size = 128
in_features = 10
out_features = 5
subtract_value = 2.0
multiply_value = 1.5


def get_inputs():
    return [torch.randn(batch_size, in_features)]


def get_init_inputs():
    return [in_features, out_features, subtract_value, multiply_value]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a matrix multiplication, subtraction, multiplication, and ReLU activation.
    """
    def __init__(self, in_features, out_features, subtract_value, multiply_value):
        super(Model, self).__init__()
        self.linear = nn.Linear(in_features, out_features)
        self.subtract_value = subtract_value
        self.multiply_value = multiply_value

    def forward(self, x):
        x = self.linear(x)
        x = x - self.subtract_value
        x = x * self.multiply_value
        x = torch.relu(x)
        return x

batch_size = 128
in_features = 10
out_features = 5
subtract_value = 2.0
multiply_value = 1.5

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, subtract_value, multiply_value]

Kernel Information

Related Kernels (Level 2, Task 9 • 9_Matmul_Subtract_Multiply_ReLU)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 unrolled_loop_kernel_base 0.01 4.05 2.63
🥇 9_Matmul_Subtract_Multiply_ReLU 0.01 4.05 2.63
🥇 9_matmul_subtract_multiply_relu_unroll_base 0.01 4.05 2.63
🥇 9_matmul_subtract_multiply_relu_unroll_base 0.01 4.05 2.63
🥇 modular_matmul_subtract_multiply_relu_base 0.01 4.05 2.63
🥇 efficient_indexing_tile_kernel_base 0.01 4.05 2.63
🥇 efficient_thread_block_mapping_base 0.01 4.05 2.63
🥇 warp_divergence_optimized_base 0.01 4.05 2.63
🥇 warp_level_fused_kernel_base 0.01 4.05 2.63
🥇 shared_mem_tiled_base 0.01 4.05 2.63
🥇 tiled_sharedmem_optimized_base 0.01 4.05 2.63
🥇 warp_level_reduction_kernel_base 0.01 4.05 2.63
🥇 strided_thread_blocks_base_base 0.01 4.05 2.63
🥇 optimized_block_size_base 0.01 4.05 2.63
🥇 double_buffered_tiled_kernel_base 0.01 4.05 2.63
🥇 coalesced_memory_matmul_base_base 0.01 4.05 2.63
🥇 tiled_matmul_shared_mem_base 0.01 4.05 2.63
🥇 optimized_tiled_2d_base 0.01 4.05 2.63
🥇 matmul_1d_thread_mapping_base 0.01 4.05 2.63
🥇 modularized_matmul_ops_base 0.01 4.05 2.63
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

template <typename scalar_t>
__global__ void optimized_fused_kernel(
    const scalar_t* __restrict__ input,
    const scalar_t* __restrict__ weight,
    const scalar_t* __restrict__ bias,
    scalar_t* __restrict__ output,
    const int batch_size,
    const int in_features,
    const int out_features,
    const float subtract_value,
    const float multiply_value) {

    extern __shared__ char shared_mem[];
    const int TILE = 16;
    scalar_t* in_tile = (scalar_t*)shared_mem;
    scalar_t* wt_tile = (scalar_t*)&in_tile[TILE * TILE];

    int row = blockIdx.x * TILE + threadIdx.x;
    int col = blockIdx.y * TILE + threadIdx.y;

    scalar_t sum = 0;

    for (int t = 0; t < (in_features + TILE - 1) / TILE; ++t) {
        int load_row = row < batch_size ? t * TILE + threadIdx.y : 0;
        int load_col = col < out_features ? t * TILE + threadIdx.x : 0;

        if (load_row < in_features && row < batch_size)
            in_tile[threadIdx.x * TILE + threadIdx.y] = input[row * in_features + load_row];
        else
            in_tile[threadIdx.x * TILE + threadIdx.y] = 0;

        if (load_col < in_features && col < out_features)
            wt_tile[threadIdx.y * TILE + threadIdx.x] = weight[col * in_features + load_col];
        else
            wt_tile[threadIdx.y * TILE + threadIdx.x] = 0;

        __syncthreads();

        for (int k = 0; k < TILE; ++k) {
            sum += in_tile[threadIdx.x * TILE + k] * wt_tile[k * TILE + threadIdx.y];
        }
        __syncthreads();
    }

    if (row < batch_size && col < out_features) {
        sum += bias[col];
        sum = (sum - subtract_value) * multiply_value;
        output[row * out_features + col] = max(sum, scalar_t(0));
    }
}

torch::Tensor forward(
    torch::Tensor input,
    torch::Tensor weight,
    torch::Tensor bias,
    float subtract_value,
    float multiply_value) {
    
    auto batch_size = input.size(0);
    auto in_features = input.size(1);
    auto out_features = weight.size(0);

    auto output = torch::empty({batch_size, out_features}, input.options());

    const int TILE = 16;
    dim3 threads(TILE, TILE);
    dim3 blocks(
        (batch_size + TILE - 1) / TILE,
        (out_features + TILE - 1) / TILE
    );

    size_t shared_mem_size = 2 * TILE * TILE * sizeof(float);

    AT_DISPATCH_FLOATING_TYPES(input.type(), "optimized_fused_kernel", ([&] {
        optimized_fused_kernel<scalar_t><<<blocks, threads, shared_mem_size>>>(
            input.data_ptr<scalar_t>(),
            weight.data_ptr<scalar_t>(),
            bias.data_ptr<scalar_t>(),
            output.data_ptr<scalar_t>(),
            batch_size,
            in_features,
            out_features,
            subtract_value,
            multiply_value
        );
    }));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Optimized fused kernel with shared memory tiling");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.290 inst/cycle 0.000 5
Executed Ipc Elapsed 0.010 inst/cycle 0.000 5
Issue Slots Busy 7.638 % 0.049 5
Issued Ipc Active 0.304 inst/cycle 0.000 5
SM Busy 7.638 % 0.049 5
Memory Throughput 2525170967.498 byte/second 993208890869342.000 5
Mem Busy 8.314 % 0.042 5
Max Bandwidth 4.250 % 0.004 5
L1/TEX Hit Rate 68.188 % 0.258 5
L2 Hit Rate 100.922 % 0.415 5
Mem Pipes Busy 0.212 % 0.000 5
Warp Cycles Per Issued Instruction 22.596 cycle 0.355 5
Warp Cycles Per Executed Instruction 23.854 cycle 0.397 5
Avg. Active Threads Per Warp 31.730 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.550 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 21.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 10.868 % 0.013 5
Achieved Active Warps Per SM 6.956 warp 0.005 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (10.8%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 291792.66 μs
Device Time 5.41 μs
Self CPU Time 50.49 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 291742.17 μs
Device Time 5.41 μs
Self CPU Time 97.48 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 291526.95 μs
Device Time 0.00 μs
Self CPU Time 90.58 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 291246.63 μs
Device Time 0.00 μs
Self CPU Time 291246.63 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 499518.76 μs
Device Time 21884.90 μs
Self CPU Time 499518.76 μs
Self Device Time 21884.90 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void optimized_fused_kernel<float>(float const*, float const*, float const*, float*, int, int, int, float, float)
CPU Time 0.00 μs
Device Time 29454.46 μs
Self CPU Time 0.00 μs
Self Device Time 29454.46 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 22608.05 μs
Device Time 43708.24 μs
Self CPU Time 22608.05 μs
Self Device Time 43708.24 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 67430.54 μs
Device Time 650735.39 μs
Self CPU Time 13489.22 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 53942.39 μs
Device Time 650735.39 μs
Self CPU Time 17699.15 μs
Self Device Time 650735.39 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 650735.39 μs
Self CPU Time 0.00 μs
Self Device Time 650735.39 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45291 warnings generated when compiling for host.
Suppressed 45325 warnings (45278 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:7:5 bugprone-easily-swappable-parameters
7 | const scalar_t* __restrict__ input,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 | const scalar_t* __restrict__ weight,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 | const scalar_t* __restrict__ bias,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:7:34: note: the first parameter in the range is 'input'
7 | const scalar_t* __restrict__ input,
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:9:34: note: the last parameter in the range is 'bias'
9 | const scalar_t* __restrict__ bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:13:5: warning: 2 adjacent parameters of 'optimized_fused_kernel' of convertible types are easily swapped by mistake [bugprone-easily-swappable-parameters]
13 | const int out_features,
| ^~~~~~~~~~~~~~~~~~~~~~~
14 | const float subtract_value,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:13:15: note: the first parameter in the range is 'out_features'
13 | const int out_features,
| ^~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:14:17: note: the last parameter in the range is 'subtract_value'
14 | const float subtract_value,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:14:5: note: 'const int' and 'const float' may be implicitly converted: 'const int' (as 'int') -> 'const float' (as 'float'), 'const float' (as 'float') -> 'const int' (as 'int')
14 | const float subtract_value,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:20:37: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
20 | scalar_t* wt_tile = (scalar_t*)&in_tile[TILE * TILE];
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:20:45: note: make conversion explicit to silence this warning
4 | scalar_t* wt_tile = (scalar_t*)&in_tile[TILE * TILE];
| ^~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:20:45: note: perform multiplication in a wider type
20 | scalar_t* wt_tile = (scalar_t*)&in_tile[TILE * TILE];
| ^~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:22:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
22 | int row = blockIdx.x * TILE + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:23:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
23 | int col = blockIdx.y * TILE + threadIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:28:52: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
28 | int load_row = row < batch_size ? t * TILE + threadIdx.y : 0;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:29:54: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
29 | int load_col = col < out_features ? t * TILE + threadIdx.x : 0;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:76:30: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
76 | size_t shared_mem_size = 2 * TILE * TILE * sizeof(float);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:76:30: note: make conversion explicit to silence this warning
76 | size_t shared_mem_size = 2 * TILE * TILE * sizeof(float);
| ^~~~~~~~~~~~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:76:30: note: perform multiplication in a wider type
76 | size_t shared_mem_size = 2 * TILE * TILE * sizeof(float);
| ^~~~~~~~
| static_cast<long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:78:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
78 | AT_DISPATCH_FLOATING_TYPES(input.type(), "optimized_fused_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:78:5: warning: 'scalar_type' is deprecated: passing at::DeprecatedTypeProperties to an AT_DISPATCH macro is deprecated, pass an at::ScalarType instead [clang-diagnostic-deprecated-declarations]
78 | AT_DISPATCH_FLOATING_TYPES(input.type(), "optimized_fused_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:3: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:218:36: note: expanded from macro 'AT_DISPATCH_SWITCH'
218 | at::ScalarType _st = ::detail::scalar_type(the_type); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:106:1: note: 'scalar_type' has been explicitly marked deprecated here
106 | C10_DEPRECATED_MESSAGE(
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250213_optimize_b10_s4_e0_sweep_rag_translate/level_2/task_9/b4_s3_shared_mem_tiled/base/base.cu:78:38: warning: 'type' is deprecated: Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device(). [clang-diagnostic-deprecated-declarations]
78 | AT_DISPATCH_FLOATING_TYPES(input.type(), "optimized_fused_kernel", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/core/TensorBody.h:224:3: note: 'type' has been explicitly marked deprecated here
224 | C10_DEPRECATED_MESSAGE("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().")
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Deprecated.h:24:43: note: expanded from macro 'C10_DEPRECATED_MESSAGE'
24 | #define C10_DEPRECATED_MESSAGE(message) [[deprecated(message)]]
| ^