← Back to Leaderboard

The AI CUDA Engineer 👷

35_LTSM35_lstm_workload_balanced_base

Level 3 • Task 35
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import _VF


def module_fn(
    x: torch.Tensor,
    lstm_weights_ih: torch.Tensor,
    lstm_weights_hh: torch.Tensor,
    lstm_biases_ih: torch.Tensor,
    lstm_biases_hh: torch.Tensor,
    fc_weight: torch.Tensor,
    fc_bias: torch.Tensor,
    h0: torch.Tensor,
    c0: torch.Tensor,
    is_training: bool,
) -> torch.Tensor:
    """
    LSTM forward pass

    Args:
        x: Input tensor of shape (batch_size, sequence_length, input_size)
        lstm_weights_ih: List of input-hidden weight tensors for each LSTM layer
        lstm_weights_hh: List of hidden-hidden weight tensors for each LSTM layer
        lstm_biases_ih: List of input-hidden bias tensors for each LSTM layer
        lstm_biases_hh: List of hidden-hidden bias tensors for each LSTM layer
        fc_weight: Weight tensor for final linear layer
        fc_bias: Bias tensor for final linear layer
        h0: Initial hidden state
        c0: Initial cell state
        is_training: Whether in training mode

    Returns:
        Output tensor of shape (batch_size, output_size)
    """
    h0 = h0.to(x.device)
    c0 = c0.to(x.device)

    # Run LSTM layers
    out = x

    for i in range(len(lstm_weights_ih)):
        params = (
            lstm_weights_ih[i],
            lstm_weights_hh[i],
            lstm_biases_ih[i],
            lstm_biases_hh[i],
        )
        out = _VF.lstm(
            out,
            (h0[i : i + 1], c0[i : i + 1]),
            params,
            True,  # has_biases
            1,  # num_layers
            0.0 if not is_training else dropout,  # dropout
            is_training,  # training
            False,  # bidirectional
            True,
        )[
            0
        ]  # batch_first, only keep output

    # Get last timestep and apply final linear layer
    out = F.linear(out[:, -1, :], fc_weight, fc_bias)

    return out


class Model(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.0):
        """
        Initialize the LSTM model.

        :param input_size: The number of expected features in the input `x`
        :param hidden_size: The number of features in the hidden state `h`
        :param num_layers: Number of recurrent layers
        :param output_size: The number of output features
        :param dropout: If non-zero, introduces a Dropout layer on the outputs of each LSTM layer except the last layer
        """
        super(Model, self).__init__()

        # Initialize hidden states
        self.h0 = torch.randn((num_layers, batch_size, hidden_size))
        self.c0 = torch.randn((num_layers, batch_size, hidden_size))

        # Extract LSTM parameters
        lstm = nn.LSTM(
            input_size,
            hidden_size,
            num_layers,
            batch_first=True,
            dropout=dropout,
            bidirectional=False,
        )

        # Get weights and biases for each layer
        self.lstm_weights_ih = nn.ParameterList()
        self.lstm_weights_hh = nn.ParameterList()
        self.lstm_biases_ih = nn.ParameterList()
        self.lstm_biases_hh = nn.ParameterList()

        for i in range(num_layers):
            self.lstm_weights_ih.append(
                nn.Parameter(getattr(lstm, f"weight_ih_l{i}").data.clone())
            )
            self.lstm_weights_hh.append(
                nn.Parameter(getattr(lstm, f"weight_hh_l{i}").data.clone())
            )
            self.lstm_biases_ih.append(
                nn.Parameter(getattr(lstm, f"bias_ih_l{i}").data.clone())
            )
            self.lstm_biases_hh.append(
                nn.Parameter(getattr(lstm, f"bias_hh_l{i}").data.clone())
            )

        # Extract linear layer parameters
        fc = nn.Linear(hidden_size, output_size)
        self.fc_weight = nn.Parameter(fc.weight.data.clone())
        self.fc_bias = nn.Parameter(fc.bias.data.clone())

    def forward(self, x, fn=module_fn):
        return fn(
            x,
            self.lstm_weights_ih,
            self.lstm_weights_hh,
            self.lstm_biases_ih,
            self.lstm_biases_hh,
            self.fc_weight,
            self.fc_bias,
            self.h0,
            self.c0,
            self.training,
        )


# Test code
batch_size = 10
sequence_length = 512
input_size = 128
hidden_size = 256
num_layers = 6
output_size = 10
dropout = 0.0


def get_inputs():
    return [torch.randn(batch_size, sequence_length, input_size)]


def get_init_inputs():
    return [input_size, hidden_size, num_layers, output_size, dropout]
import torch
import torch.nn as nn

class Model(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size, dropout=0.0):
        """
        Initialize the LSTM model.

        :param input_size: The number of expected features in the input `x`
        :param hidden_size: The number of features in the hidden state `h`
        :param num_layers: Number of recurrent layers
        :param output_size: The number of output features
        :param dropout: If non-zero, introduces a Dropout layer on the outputs of each LSTM layer except the last layer, with dropout probability equal to `dropout`
        """
        super(Model, self).__init__()
        # Initialize hidden state with random values
        self.h0 = torch.randn((num_layers, batch_size, hidden_size))
        self.c0 = torch.randn((num_layers, batch_size, hidden_size))
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout, bidirectional=False)
        self.fc = nn.Linear(hidden_size, output_size)
    
    def forward(self, x):
        """
        Forward pass through the LSTM model.

        :param x: The input tensor, shape (batch_size, sequence_length, input_size)
        :return: The output tensor, shape (batch_size, sequence_length, output_size)
        """
        self.h0 = self.h0.to(x.device)
        self.c0 = self.h0.to(x.device)
        
        # Forward propagate LSTM
        out, hn = self.lstm(x, (self.h0, self.c0))  # out: tensor of shape (batch_size, seq_length, hidden_size)
        
        # Decode the hidden state of the last time step
        out = self.fc(out[:, -1, :])  # out: tensor of shape (batch_size, output_size)
        
        return out

# Test code
batch_size = 10
sequence_length = 512
input_size = 128
hidden_size = 256
num_layers = 6
output_size = 10
dropout = 0.0

def get_inputs():
    return [torch.randn(batch_size, sequence_length, input_size)]

def get_init_inputs():
    return [input_size, hidden_size, num_layers, output_size, dropout]

Kernel Information

Related Kernels (Level 3, Task 35 • 35_LTSM)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 35_lstm_grid_stride_base_base 72.97 0.44 0.83
🥈 35_lstm_modular_device_edit_1 75.07 0.43 0.81
🥉 35_lstm_shared_memory_base 86.54 0.37 0.70
4 35_lstm_atomic_reduction_base_base 86.99 0.37 0.69
5 35_lstm_workload_balanced_base 87.77 0.36 0.69
6 35_lstm_aligned_base 88.03 0.36 0.69
7 35_lstm_tiled_unroll_edit_1 88.19 0.36 0.69
8 35_lstm_load_balancing_base 88.28 0.36 0.68
9 fused_tiled_base 88.40 0.36 0.68
10 35_lstm_ldg_aligned_v2_base 88.50 0.36 0.68
11 35_lstm_load_balancing_edit_1 88.68 0.36 0.68
12 35_LTSM 88.90 0.36 0.68
13 35_lstm_memory_coalescing_edit_1 89.05 0.36 0.68
14 modular_35_ltsm_base 89.17 0.36 0.68
15 35_lstm_shared_memory_edit_1 89.34 0.36 0.68
16 fused_tiled_edit_1 89.35 0.36 0.68
17 35_lstm_unrolled_base 89.58 0.36 0.67
18 35_lstm_memory_coalescing_base 89.77 0.36 0.67
19 35_lstm_warp_reduce_base 89.78 0.36 0.67
20 35_lstm_warp_aligned_base 89.81 0.36 0.67
#include <torch/extension.h>
#include <vector>
#include <cmath>

// Optimized device functions
__device__ __forceinline__ float sigmoid_fast(float x) {
    return 1.0f / (1.0f + __expf(-x));
}

// Optimized LSTM kernel with better workload distribution
__global__ void lstm_elementwise_forward(
    const float* __restrict__ gates,
    const float* __restrict__ prev_c,
    float* __restrict__ h,
    float* __restrict__ c,
    const int batch_size,
    const int hidden_size
) {
    const int tid = threadIdx.x;
    const int bid = blockIdx.x;
    const int num_threads = blockDim.x * gridDim.x;
    const int total_elements = batch_size * hidden_size;
    
    // Each thread processes multiple elements in a strided fashion
    for (int idx = bid * blockDim.x + tid; idx < total_elements; idx += num_threads) {
        const int b = idx / hidden_size;
        const int n = idx % hidden_size;
        const int gate_offset = b * hidden_size * 4 + n;
        
        // Coalesced memory access for gates
        const float i_gate = sigmoid_fast(gates[gate_offset]);
        const float f_gate = sigmoid_fast(gates[gate_offset + hidden_size]);
        const float g_gate = tanhf(gates[gate_offset + 2 * hidden_size]);
        const float o_gate = sigmoid_fast(gates[gate_offset + 3 * hidden_size]);
        
        const float c_prev = prev_c[idx];
        const float c_new = f_gate * c_prev + i_gate * g_gate;
        const float h_new = o_gate * tanhf(c_new);
        
        c[idx] = c_new;
        h[idx] = h_new;
    }
}

// Optimized linear kernel with balanced workload
__global__ void linear_forward_balanced(
    const float* __restrict__ input,
    const float* __restrict__ weight,
    const float* __restrict__ bias,
    float* __restrict__ output,
    const int batch_size,
    const int in_features,
    const int out_features
) {
    extern __shared__ float shmem[];
    
    const int tid = threadIdx.x;
    const int wid = tid / 32;  // warp ID
    const int lane = tid % 32;  // lane within warp
    const int num_warps = blockDim.x / 32;
    
    for (int out_idx = blockIdx.x; out_idx < batch_size * out_features; out_idx += gridDim.x) {
        const int batch = out_idx / out_features;
        const int feat = out_idx % out_features;
        
        float sum = 0.0f;
        const float* in_row = input + batch * in_features;
        const float* w_row = weight + feat * in_features;
        
        // Each warp processes a chunk of the input features
        for (int k = lane; k < in_features; k += 32) {
            sum += in_row[k] * w_row[k];
        }
        
        // Warp reduction
        #pragma unroll
        for (int offset = 16; offset > 0; offset /= 2) {
            sum += __shfl_down_sync(0xffffffff, sum, offset);
        }
        
        // First thread in warp writes result
        if (lane == 0) {
            float final_sum = sum;
            if (bias != nullptr) {
                final_sum += bias[feat];
            }
            output[out_idx] = final_sum;
        }
    }
}

torch::Tensor lstm_forward_cuda(
    torch::Tensor input,
    torch::Tensor w_ih,
    torch::Tensor w_hh,
    torch::Tensor b_ih,
    torch::Tensor b_hh,
    torch::Tensor h0,
    torch::Tensor c0
) {
    const int batch_size = input.size(0);
    const int seq_len = input.size(1);
    const int hidden_size = h0.size(1);
    
    auto h = h0.clone();
    auto c = c0.clone();
    std::vector<torch::Tensor> outputs;
    
    // Optimize thread block size for H100
    const int threads_per_block = 256;
    const int num_sms = 132;  // H100 has 132 SMs
    const int blocks_per_sm = 16;
    const int total_blocks = num_sms * blocks_per_sm;
    
    for (int t = 0; t < seq_len; t++) {
        auto xt = input.select(1, t);
        auto gates = torch::addmm(b_ih, xt, w_ih.t());
        gates = torch::addmm(gates, h, w_hh.t());
        gates += b_hh;
        
        lstm_elementwise_forward<<<total_blocks, threads_per_block>>>(
            gates.data_ptr<float>(),
            c.data_ptr<float>(),
            h.data_ptr<float>(),
            c.data_ptr<float>(),
            batch_size,
            hidden_size
        );
        
        outputs.push_back(h.unsqueeze(1));
    }
    
    return torch::cat(outputs, 1);
}

torch::Tensor linear_forward_cuda(
    torch::Tensor input,
    torch::Tensor weight,
    torch::Tensor bias
) {
    const int batch_size = input.size(0);
    const int in_features = input.size(1);
    const int out_features = weight.size(0);
    
    auto output = torch::empty({batch_size, out_features}, input.options());
    
    const int threads_per_block = 128;
    const int num_blocks = std::min(65535, (batch_size * out_features + threads_per_block - 1) / threads_per_block);
    
    linear_forward_balanced<<<num_blocks, threads_per_block>>>(
        input.data_ptr<float>(),
        weight.data_ptr<float>(),
        bias.defined() ? bias.data_ptr<float>() : nullptr,
        output.data_ptr<float>(),
        batch_size,
        in_features,
        out_features
    );
    
    return output;
}

torch::Tensor forward(
    torch::Tensor x,
    std::vector<torch::Tensor> lstm_weights_ih,
    std::vector<torch::Tensor> lstm_weights_hh,
    std::vector<torch::Tensor> lstm_biases_ih,
    std::vector<torch::Tensor> lstm_biases_hh,
    torch::Tensor fc_weight,
    torch::Tensor fc_bias,
    torch::Tensor h0,
    torch::Tensor c0,
    bool is_training
) {
    h0 = h0.to(x.device());
    c0 = c0.to(x.device());
    
    torch::Tensor out = x;
    const int num_layers = lstm_weights_ih.size();
    
    for (int i = 0; i < num_layers; i++) {
        auto w_ih = lstm_weights_ih[i].to(x.device());
        auto w_hh = lstm_weights_hh[i].to(x.device());
        auto b_ih = lstm_biases_ih[i].to(x.device());
        auto b_hh = lstm_biases_hh[i].to(x.device());
        
        auto h_i = h0.narrow(0, i, 1).squeeze(0);
        auto c_i = c0.narrow(0, i, 1).squeeze(0);
        
        out = lstm_forward_cuda(out, w_ih, w_hh, b_ih, b_hh, h_i, c_i);
    }
    
    out = out.select(1, -1);
    out = linear_forward_cuda(out, fc_weight, fc_bias);
    
    return out;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "LSTM forward with balanced workload distribution");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.840 inst/cycle 0.000 5
Executed Ipc Elapsed 0.010 inst/cycle 0.000 5
Issue Slots Busy 21.080 % 0.001 5
Issued Ipc Active 0.840 inst/cycle 0.000 5
SM Busy 21.080 % 0.001 5
Memory Throughput 1181916844.280 byte/second 1274642083600706560.000 5
Mem Busy 0.568 % 0.000 5
Max Bandwidth 0.304 % 0.000 5
L1/TEX Hit Rate 97.550 % 0.000 5
L2 Hit Rate 106.106 % 75.231 5
Mem Pipes Busy 0.080 % 0.000 5
Warp Cycles Per Issued Instruction 4.750 cycle 0.001 5
Warp Cycles Per Executed Instruction 4.750 cycle 0.001 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.500 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 10.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 16.000 block 0.000 5
Theoretical Active Warps per SM 40.000 warp 0.000 5
Theoretical Occupancy 62.500 % 0.000 5
Achieved Occupancy 6.250 % 0.000 5
Achieved Active Warps Per SM 4.000 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy (62.5%) is limited by the number of required registers. The difference between calculated theoretical (62.5%) and measured achieved occupancy (6.2%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
cudaMemcpyAsync
CPU Time 805263.13 μs
Device Time 0.00 μs
Self CPU Time 805263.13 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::t
CPU Time 878335.44 μs
Device Time 0.00 μs
Self CPU Time 387606.95 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::addmm
CPU Time 6279391.30 μs
Device Time 2599514.59 μs
Self CPU Time 3928187.10 μs
Self Device Time 2599514.59 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 2585685.29 μs
Device Time 89717.01 μs
Self CPU Time 2585685.29 μs
Self Device Time 89717.01 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void gemmSN_TN_kernel<float, 128, 16, 2, 4, 10, 11, false, cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float> >(cublasGemmSmallNParams<cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float const>, cublasGemvTensorStridedBatched<float>, float>)
CPU Time 0.00 μs
Device Time 1879261.42 μs
Self CPU Time 0.00 μs
Self Device Time 1879261.42 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::add_
CPU Time 1250396.72 μs
Device Time 676036.18 μs
Self CPU Time 552854.56 μs
Self Device Time 676036.18 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::elementwise_kernel<128, 2, at::native::gpu_kernel_impl_nocast<at::native::CUDAFunctor_add<float> >(at::TensorIteratorBase&, at::native::CUDAFunctor_add<float> const&)::{lambda(int)#1}>(int, at::native::gpu_kernel_impl_nocast<at::native::CUDAFunctor_add<float> >(at::TensorIteratorBase&, at::native::CUDAFunctor_add<float> const&)::{lambda(int)#1})
CPU Time 0.00 μs
Device Time 676036.18 μs
Self CPU Time 0.00 μs
Self Device Time 676036.18 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
lstm_elementwise_forward(float const*, float const*, float*, float*, int, int)
CPU Time 0.00 μs
Device Time 697810.86 μs
Self CPU Time 0.00 μs
Self Device Time 697810.86 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45325 warnings generated when compiling for host.
Suppressed 45332 warnings (45285 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:12:5 bugprone-easily-swappable-parameters
12 | const float* __restrict__ gates,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 | const float* __restrict__ prev_c,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:12:31: note: the first parameter in the range is 'gates'
12 | const float* __restrict__ gates,
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:13:31: note: the last parameter in the range is 'prev_c'
13 | const float* __restrict__ prev_c,
| ^~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:14:5: warning: 2 adjacent parameters of 'lstm_elementwise_forward' of similar type ('float *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
14 | float* __restrict__ h,
| ^~~~~~~~~~~~~~~~~~~~~~
15 | float* __restrict__ c,
| ~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:14:25: note: the first parameter in the range is 'h'
14 | float* __restrict__ h,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:15:25: note: the last parameter in the range is 'c'
15 | float* __restrict__ c,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:19:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
19 | const int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:20:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
20 | const int bid = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:21:29: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
21 | const int num_threads = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:25:20: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
25 | for (int idx = bid * blockDim.x + tid; idx < total_elements; idx += num_threads) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:47:5: warning: 3 adjacent parameters of 'linear_forward_balanced' of similar type ('const float *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
47 | const float* __restrict__ input,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
48 | const float* __restrict__ weight,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
49 | const float* __restrict__ bias,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:47:31: note: the first parameter in the range is 'input'
47 | const float* __restrict__ input,
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:49:31: note: the last parameter in the range is 'bias'
49 | const float* __restrict__ bias,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:51:5: warning: 2 adjacent parameters of 'linear_forward_balanced' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
51 | const int batch_size,
| ^~~~~~~~~~~~~~~~~~~~~
52 | const int in_features,
| ~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:51:15: note: the first parameter in the range is 'batch_size'
51 | const int batch_size,
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:52:15: note: the last parameter in the range is 'in_features'
52 | const int in_features,
| ^~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:57:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
57 | const int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:58:15: warning: Value stored to 'wid' during its initialization is never read [clang-analyzer-deadcode.DeadStores]
58 | const int wid = tid / 32; // warp ID
| ^~~ ~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:58:15: note: Value stored to 'wid' during its initialization is never read
58 | const int wid = tid / 32; // warp ID
| ^~~ ~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:60:15: warning: Value stored to 'num_warps' during its initialization is never read [clang-analyzer-deadcode.DeadStores]
60 | const int num_warps = blockDim.x / 32;
| ^~~~~~~~~ ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:60:15: note: Value stored to 'num_warps' during its initialization is never read
60 | const int num_warps = blockDim.x / 32;
| ^~~~~~~~~ ~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:60:27: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
60 | const int num_warps = blockDim.x / 32;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:62:24: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
62 | for (int out_idx = blockIdx.x; out_idx < batch_size * out_features; out_idx += gridDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:62:84: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
62 | for (int out_idx = blockIdx.x; out_idx < batch_size * out_features; out_idx += gridDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:67:31: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
67 | const float* in_row = input + batch * in_features;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:67:39: note: make conversion explicit to silence this warning
2 | const float* in_row = input + batch * in_features;
| ^~~~~~~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:67:39: note: perform multiplication in a wider type
67 | const float* in_row = input + batch * in_features;
| ^~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:68:30: warning: result of multiplication in type 'int' is used as a pointer offset after an implicit widening conversion to type 'ptrdiff_t' [bugprone-implicit-widening-of-multiplication-result]
68 | const float* w_row = weight + feat * in_features;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:68:39: note: make conversion explicit to silence this warning
68 | const float* w_row = weight + feat * in_features;
| ^~~~~~~~~~~~~~~~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:68:39: note: perform multiplication in a wider type
68 | const float* w_row = weight + feat * in_features;
| ^~~~
| static_cast<ptrdiff_t>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:93:5: warning: 2 adjacent parameters of 'lstm_forward_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
93 | torch::Tensor input,
| ^~~~~~~~~~~~~~~~~~~~
94 | torch::Tensor w_ih,
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:93:19: note: the first parameter in the range is 'input'
93 | torch::Tensor input,
| ^~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:94:19: note: the last parameter in the range is 'w_ih'
94 | torch::Tensor w_ih,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:93:19: warning: the parameter 'input' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
93 | torch::Tensor input,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:94:19: warning: the parameter 'w_ih' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
94 | torch::Tensor w_ih,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:95:5: warning: 4 adjacent parameters of 'lstm_forward_cuda' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
95 | torch::Tensor w_hh,
| ^~~~~~~~~~~~~~~~~~~
96 | torch::Tensor b_ih,
| ~~~~~~~~~~~~~~~~~~~
97 | torch::Tensor b_hh,
| ~~~~~~~~~~~~~~~~~~~
98 | torch::Tensor h0,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:95:19: note: the first parameter in the range is 'w_hh'
95 | torch::Tensor w_hh,
| ^~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:98:19: note: the last parameter in the range is 'h0'
98 | torch::Tensor h0,
| ^~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:95:19: warning: the parameter 'w_hh' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
95 | torch::Tensor w_hh,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:96:19: warning: the parameter 'b_ih' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
96 | torch::Tensor b_ih,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:97:19: warning: the parameter 'b_hh' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
97 | torch::Tensor b_hh,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:98:19: warning: the parameter 'h0' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
98 | torch::Tensor h0,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:99:19: warning: the parameter 'c0' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
99 | torch::Tensor c0
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:101:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
101 | const int batch_size = input.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:102:25: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
102 | const int seq_len = input.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:103:29: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
103 | const int hidden_size = h0.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:137:19: warning: the parameter 'input' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
137 | torch::Tensor input,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:138:19: warning: the parameter 'weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
138 | torch::Tensor weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:139:19: warning: the parameter 'bias' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
139 | torch::Tensor bias
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:141:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
141 | const int batch_size = input.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:142:29: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
142 | const int in_features = input.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:143:30: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
143 | const int out_features = weight.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:164:19: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
164 | torch::Tensor x,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:165:5: warning: 4 adjacent parameters of 'forward' of similar type ('std::vector<torch::Tensor>') are easily swapped by mistake [bugprone-easily-swappable-parameters]
165 | std::vector<torch::Tensor> lstm_weights_ih,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
166 | std::vector<torch::Tensor> lstm_weights_hh,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
167 | std::vector<torch::Tensor> lstm_biases_ih,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
168 | std::vector<torch::Tensor> lstm_biases_hh,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:165:32: note: the first parameter in the range is 'lstm_weights_ih'
165 | std::vector<torch::Tensor> lstm_weights_ih,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:168:32: note: the last parameter in the range is 'lstm_biases_hh'
168 | std::vector<torch::Tensor> lstm_biases_hh,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:170:5: warning: 2 adjacent parameters of 'forward' of similar type ('torch::Tensor') are easily swapped by mistake [bugprone-easily-swappable-parameters]
170 | torch::Tensor fc_bias,
| ^~~~~~~~~~~~~~~~~~~~~~
171 | torch::Tensor h0,
| ~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:170:19: note: the first parameter in the range is 'fc_bias'
170 | torch::Tensor fc_bias,
| ^~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:171:19: note: the last parameter in the range is 'h0'
171 | torch::Tensor h0,
| ^~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:179:28: warning: narrowing conversion from 'size_type' (aka 'unsigned long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
179 | const int num_layers = lstm_weights_ih.size();
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:194:36: warning: parameter 'fc_weight' is passed by value and only copied once; consider moving it to avoid unnecessary copies [performance-unnecessary-value-param]
2 | out = linear_forward_cuda(out, fc_weight, fc_bias);
| ^
| std::move( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_3/task_35/b5_s2_35_lstm_workload_balanced/base/base.cu:194:47: warning: parameter 'fc_bias' is passed by value and only copied once; consider moving it to avoid unnecessary copies [performance-unnecessary-value-param]
194 | out = linear_forward_cuda(out, fc_weight, fc_bias);
| ^
| std::move( )