← Back to Leaderboard

The AI CUDA Engineer 👷

88_Gemm_GroupNorm_Swish_Multiply_Swishminimal_sync_88_gemm_groupnorm_swish_base

Level 2 • Task 88
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    gemm_weight: torch.Tensor,
    gemm_bias: torch.Tensor,
    group_norm_weight: torch.Tensor,
    group_norm_bias: torch.Tensor,
    multiply_weight: torch.Tensor,
    num_groups: int,
) -> torch.Tensor:
    """
    Performs GEMM, GroupNorm, Swish, Multiply, and Swish operations.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        gemm_weight (torch.Tensor): Weight matrix for linear layer of shape (out_features, in_features)
        gemm_bias (torch.Tensor): Bias vector for linear layer of shape (out_features)
        group_norm_weight (torch.Tensor): Weight parameter for group norm of shape (out_features)
        group_norm_bias (torch.Tensor): Bias parameter for group norm of shape (out_features)
        multiply_weight (torch.Tensor): Weight tensor for multiplication of shape (out_features)
        num_groups (int): Number of groups for group normalization

    Returns:
        torch.Tensor: Output tensor of shape (batch_size, out_features)
    """
    x = F.linear(x, gemm_weight, gemm_bias)
    x = F.group_norm(x, num_groups, group_norm_weight, group_norm_bias)
    x = x * torch.sigmoid(x)
    x = x * multiply_weight
    x = x * torch.sigmoid(x)
    return x


class Model(nn.Module):
    """
    Model that performs a GEMM, GroupNorm, Swish, Multiply, and Swish operations.
    """

    def __init__(self, in_features, out_features, num_groups, multiply_weight_shape):
        super(Model, self).__init__()
        gemm = nn.Linear(in_features, out_features)
        self.gemm_weight = gemm.weight
        self.gemm_bias = gemm.bias
        group_norm = nn.GroupNorm(num_groups, out_features)
        self.group_norm_weight = group_norm.weight
        self.group_norm_bias = group_norm.bias
        self.multiply_weight = nn.Parameter(torch.randn(multiply_weight_shape) * 0.02)
        self.num_groups = num_groups

    def forward(self, x, fn=module_fn):
        return fn(
            x,
            self.gemm_weight,
            self.gemm_bias,
            self.group_norm_weight,
            self.group_norm_bias,
            self.multiply_weight,
            self.num_groups,
        )


batch_size = 128
in_features = 512
out_features = 1024
num_groups = 16
multiply_weight_shape = (out_features,)


def get_inputs():
    return [torch.randn(batch_size, in_features)]


def get_init_inputs():
    return [in_features, out_features, num_groups, multiply_weight_shape]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a GEMM, GroupNorm, Swish, Multiply, and Swish operations.
    """
    def __init__(self, in_features, out_features, num_groups, multiply_weight_shape):
        super(Model, self).__init__()
        self.gemm = nn.Linear(in_features, out_features)
        self.group_norm = nn.GroupNorm(num_groups, out_features)
        self.multiply_weight = nn.Parameter(torch.randn(multiply_weight_shape) * 0.02) 

    def forward(self, x):
        # (batch_size, in_features) -> (batch_size, out_features)
        x = self.gemm(x)
        # (batch_size, out_features) -> (batch_size, out_features)
        x = self.group_norm(x)
        # (batch_size, out_features) -> (batch_size, out_features)
        x = x * torch.sigmoid(x)
        # (batch_size, out_features) -> (batch_size, out_features)
        x = x * self.multiply_weight
        # (batch_size, out_features) -> (batch_size, out_features)
        x = x * torch.sigmoid(x)
        return x

batch_size = 128
in_features = 512
out_features = 1024
num_groups = 16
multiply_weight_shape = (out_features,)

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, num_groups, multiply_weight_shape]

Kernel Information

#include <torch/extension.h>
#include <ATen/ATen.h>
#include <vector>
#include <c10/cuda/CUDAStream.h>

// CUDA forward declarations
torch::Tensor module_fn_cuda_forward(
    torch::Tensor x,
    torch::Tensor gemm_weight,
    torch::Tensor gemm_bias,
    torch::Tensor group_norm_weight,
    torch::Tensor group_norm_bias,
    torch::Tensor multiply_weight,
    int64_t num_groups
);

#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

// Helper function for warp reduction
template <typename scalar_t>
__inline__ __device__
scalar_t warpReduceSum(scalar_t val) {
    #pragma unroll
    for (int offset = warpSize/2; offset > 0; offset /= 2)
        val += __shfl_down_sync(0xffffffff, val, offset);
    return val;
}

template <typename scalar_t>
__inline__ __device__
scalar_t blockReduceSum(scalar_t val) {
    __shared__ scalar_t shared[32];
    int lane = threadIdx.x % warpSize;
    int wid = threadIdx.x / warpSize;

    val = warpReduceSum(val);
    if (lane == 0) shared[wid] = val;
    __syncthreads();

    // Avoid unnecessary synchronization
    if (wid == 0)
        val = warpReduceSum((threadIdx.x < blockDim.x/warpSize) ? shared[lane] : 0);

    return val;
}

template<typename scalar_t>
__global__ void module_fn_kernel(
    const scalar_t* __restrict__ x,
    scalar_t* __restrict__ output,
    const scalar_t* __restrict__ group_norm_weight,
    const scalar_t* __restrict__ group_norm_bias,
    const scalar_t* __restrict__ multiply_weight,
    const int C,
    const int channels_per_group,
    const int chunk_size
) {
    const int chunk_idx = blockIdx.x / chunk_size;
    const int local_n = blockIdx.x % chunk_size;
    const int g = blockIdx.y;
    const int tid = threadIdx.x;
    
    const int n = chunk_idx * chunk_size + local_n;

    __shared__ scalar_t mean_shared;
    __shared__ scalar_t var_shared;

    scalar_t sum = 0.0f;
    scalar_t sumsq = 0.0f;

    #pragma unroll 4
    for (int c = tid; c < channels_per_group; c += blockDim.x) {
        const int channel_idx = g * channels_per_group + c;
        const int idx = n * C + channel_idx;
        scalar_t val = __ldg(&x[idx]);
        sum += val;
        sumsq += val * val;
    }

    sum = blockReduceSum(sum);
    sumsq = blockReduceSum(sumsq);

    if (threadIdx.x == 0) {
        mean_shared = sum / channels_per_group;
        var_shared = sumsq / channels_per_group - mean_shared * mean_shared + 1e-5f;
    }
    __syncthreads();  // Necessary after writing to shared memory

    const scalar_t mean = mean_shared;
    const scalar_t inv_std = rsqrtf(var_shared);

    #pragma unroll 4
    for (int c = tid; c < channels_per_group; c += blockDim.x) {
        const int channel_idx = g * channels_per_group + c;
        const int idx = n * C + channel_idx;
        
        scalar_t val = x[idx];
        scalar_t gamma = group_norm_weight[channel_idx];
        scalar_t beta = group_norm_bias[channel_idx];
        scalar_t w = multiply_weight[channel_idx];

        scalar_t y = (val - mean) * inv_std;
        y = gamma * y + beta;

        scalar_t sigmoid_y = 1.0f / (1.0f + expf(-y));
        y = y * sigmoid_y;

        y = y * w;

        sigmoid_y = 1.0f / (1.0f + expf(-y));
        y = y * sigmoid_y;

        output[idx] = y;
    }
}

torch::Tensor module_fn_cuda_forward(
    torch::Tensor x,
    torch::Tensor gemm_weight,
    torch::Tensor gemm_bias,
    torch::Tensor group_norm_weight,
    torch::Tensor group_norm_bias,
    torch::Tensor multiply_weight,
    int64_t num_groups
) {
    CHECK_INPUT(x);
    CHECK_INPUT(gemm_weight);
    CHECK_INPUT(gemm_bias);
    CHECK_INPUT(group_norm_weight);
    CHECK_INPUT(group_norm_bias);
    CHECK_INPUT(multiply_weight);

    const int NUM_STREAMS = 4;
    std::vector<at::cuda::CUDAStream> streams;
    for(int i = 0; i < NUM_STREAMS; i++) {
        streams.push_back(at::cuda::getStreamFromPool());
    }

    auto x_linear = torch::addmm(gemm_bias, x, gemm_weight.t());
    auto output = torch::empty_like(x_linear);

    auto N = x_linear.size(0);
    auto C = x_linear.size(1);
    int channels_per_group = C / num_groups;
    
    const int chunk_size = (N + NUM_STREAMS - 1) / NUM_STREAMS;
    
    for(int i = 0; i < NUM_STREAMS; i++) {
        int start_n = i * chunk_size;
        int end_n = std::min((i + 1) * chunk_size, (int)N);
        if(start_n >= end_n) continue;

        auto stream = streams[i];

        dim3 blocks(end_n - start_n, num_groups);
        int threads = std::min(channels_per_group, 1024);

        AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda_forward", ([&] {
            module_fn_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
                x_linear.data_ptr<scalar_t>() + start_n * C,
                output.data_ptr<scalar_t>() + start_n * C,
                group_norm_weight.data_ptr<scalar_t>(),
                group_norm_bias.data_ptr<scalar_t>(),
                multiply_weight.data_ptr<scalar_t>(),
                C,
                channels_per_group,
                chunk_size
            );
        }));
    }

    // Synchronize all streams
    for(auto& stream : streams) {
        stream.synchronize();
    }

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &module_fn_cuda_forward, "Module function forward");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.528 inst/cycle 0.000 5
Executed Ipc Elapsed 0.274 inst/cycle 0.000 5
Issue Slots Busy 13.486 % 0.011 5
Issued Ipc Active 0.538 inst/cycle 0.000 5
SM Busy 13.486 % 0.011 5
Memory Throughput 31378881633.026 byte/second 480630092491569152.000 5
Mem Busy 7.270 % 0.038 5
Max Bandwidth 4.326 % 0.010 5
L1/TEX Hit Rate 16.670 % 0.000 5
L2 Hit Rate 91.382 % 0.583 5
Mem Pipes Busy 4.440 % 0.010 5
Warp Cycles Per Issued Instruction 14.088 cycle 0.037 5
Warp Cycles Per Executed Instruction 14.374 cycle 0.038 5
Avg. Active Threads Per Warp 31.250 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.480 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 32.000 block 0.000 5
Block Limit Shared Mem 51.000 block 0.000 5
Block Limit Warps 32.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 11.844 % 0.002 5
Achieved Active Warps Per SM 7.578 warp 0.001 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (11.8%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
Operation / Metric Value Unit
aten::to
CPU Time 301704.73 μs
Device Time 155.33 μs
Self CPU Time 65.92 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 301638.81 μs
Device Time 155.33 μs
Self CPU Time 125.77 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 321419.55 μs
Device Time 0.00 μs
Self CPU Time 20513.52 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 300460.21 μs
Device Time 0.00 μs
Self CPU Time 300460.21 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaStreamSynchronize
CPU Time 281909.26 μs
Device Time 35713.57 μs
Self CPU Time 281909.26 μs
Self Device Time 35713.57 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 52144.41 μs
Device Time 602461.87 μs
Self CPU Time 19169.55 μs
Self Device Time 602461.87 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 65701.05 μs
Device Time 602461.87 μs
Self CPU Time 13586.16 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
sm80_xmma_gemm_f32f32_f32f32_f32_tn_n_tilesize32x32x8_stage3_warpsize1x2x1_ffma_aligna4_alignc4_execute_kernel__5x_cublas
CPU Time 0.00 μs
Device Time 148427.84 μs
Self CPU Time 0.00 μs
Self Device Time 148427.84 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void module_fn_kernel<float>(float const*, float*, float const*, float const*, float const*, int, int, int)
CPU Time 0.00 μs
Device Time 172921.65 μs
Self CPU Time 0.00 μs
Self Device Time 172921.65 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 602461.87 μs
Self CPU Time 0.00 μs
Self Device Time 602461.87 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45308 warnings generated when compiling for host.
Suppressed 45334 warnings (45287 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:17:35 bugprone-macro-parentheses
17 | #define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:18:41: warning: macro argument should be enclosed in parentheses [bugprone-macro-parentheses]
18 | #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
| ^
| ()
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:35:16: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
35 | int lane = threadIdx.x % warpSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:36:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
36 | int wid = threadIdx.x / warpSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:53:5: warning: 3 adjacent parameters of 'module_fn_kernel' of similar type ('const scalar_t *__restrict') are easily swapped by mistake [bugprone-easily-swappable-parameters]
53 | const scalar_t* __restrict__ group_norm_weight,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
54 | const scalar_t* __restrict__ group_norm_bias,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
55 | const scalar_t* __restrict__ multiply_weight,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:53:34: note: the first parameter in the range is 'group_norm_weight'
53 | const scalar_t* __restrict__ group_norm_weight,
| ^~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:55:34: note: the last parameter in the range is 'multiply_weight'
55 | const scalar_t* __restrict__ multiply_weight,
| ^~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:56:5: warning: 3 adjacent parameters of 'module_fn_kernel' of similar type ('const int') are easily swapped by mistake [bugprone-easily-swappable-parameters]
56 | const int C,
| ^~~~~~~~~~~~
57 | const int channels_per_group,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
58 | const int chunk_size
| ~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:56:15: note: the first parameter in the range is 'C'
56 | const int C,
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:58:15: note: the last parameter in the range is 'chunk_size'
58 | const int chunk_size
| ^~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:60:27: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
60 | const int chunk_idx = blockIdx.x / chunk_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:61:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
61 | const int local_n = blockIdx.x % chunk_size;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:62:19: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
62 | const int g = blockIdx.y;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:63:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
63 | const int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:74:52: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
74 | for (int c = tid; c < channels_per_group; c += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:95:52: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
95 | for (int c = tid; c < channels_per_group; c += blockDim.x) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:120:19: warning: the parameter 'x' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
8 | torch::Tensor x,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:121:19: warning: the parameter 'gemm_weight' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
9 | torch::Tensor gemm_weight,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:122:19: warning: the parameter 'gemm_bias' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
10 | torch::Tensor gemm_bias,
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:138:9: warning: 'push_back' is called inside a loop; consider pre-allocating the container capacity before the loop [performance-inefficient-vector-operation]
137 | for(int i = 0; i < NUM_STREAMS; i++) {
138 | streams.push_back(at::cuda::getStreamFromPool());
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:146:30: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
146 | int channels_per_group = C / num_groups;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:148:28: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
148 | const int chunk_size = (N + NUM_STREAMS - 1) / NUM_STREAMS;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250208_optimize_b5_s4_e1_sweep/level_2/task_88/b3_s1_minimal_sync_88_gemm_groupnorm_swish/base/base.cu:160:9: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
160 | AT_DISPATCH_FLOATING_TYPES(x_linear.scalar_type(), "module_fn_cuda_forward", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^