← Back to Leaderboard

The AI CUDA Engineer 👷

59_Matmul_Swish_Scalingbalanced_workload_swish_scaling_base

Level 2 • Task 59
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    weight: torch.Tensor,
    bias: torch.Tensor,
    scaling_factor: float,
) -> torch.Tensor:
    """
    Applies linear transformation, Swish activation, and scaling.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_features)
        weight (torch.Tensor): Weight matrix of shape (out_features, in_features)
        bias (torch.Tensor): Bias vector of shape (out_features)
        scaling_factor (float): Factor to scale the output by

    Returns:
        torch.Tensor: Output tensor of shape (batch_size, out_features)
    """
    x = F.linear(x, weight, bias)
    x = x * torch.sigmoid(x)  # Swish activation
    x = x * scaling_factor
    return x


class Model(nn.Module):
    """
    Simple model that performs a matrix multiplication, applies Swish activation, and scales the result.
    """

    def __init__(self, in_features, out_features, scaling_factor):
        super(Model, self).__init__()
        gemm = nn.Linear(in_features, out_features)
        self.weight = nn.Parameter(gemm.weight)
        self.bias = nn.Parameter(gemm.bias)
        self.scaling_factor = scaling_factor

    def forward(self, x, fn=module_fn):
        return fn(x, self.weight, self.bias, self.scaling_factor)


batch_size = 128
in_features = 1024
out_features = 512
scaling_factor = 2.0


def get_inputs():
    return [torch.randn(batch_size, in_features)]


def get_init_inputs():
    return [in_features, out_features, scaling_factor]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Simple model that performs a matrix multiplication, applies Swish activation, and scales the result.
    """
    def __init__(self, in_features, out_features, scaling_factor):
        super(Model, self).__init__()
        self.matmul = nn.Linear(in_features, out_features)
        self.scaling_factor = scaling_factor

    def forward(self, x):
        x = self.matmul(x)
        x = x * torch.sigmoid(x)  # Swish activation
        x = x * self.scaling_factor
        return x

batch_size = 128
in_features = 1024
out_features = 512
scaling_factor = 2.0

def get_inputs():
    return [torch.randn(batch_size, in_features)]

def get_init_inputs():
    return [in_features, out_features, scaling_factor]

Kernel Information

Related Kernels (Level 2, Task 59 • 59_Matmul_Swish_Scaling)

#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

// Kernel that evenly partitions the workload across all threads
__global__ void balanced_workload_swish_scaling_kernel(const float* __restrict__ input,
                                                        float* __restrict__ output,
                                                        float scaling_factor,
                                                        int N) {
    // Compute a unique thread id
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    int total_threads = gridDim.x * blockDim.x;
    
    // Determine how many elements each thread should process
    int work_per_thread = N / total_threads;
    int remainder = N % total_threads;

    // Calculate start index for this thread using a balanced partitioning
    int start = tid * work_per_thread + (tid < remainder ? tid : remainder);
    int num_elements = work_per_thread + (tid < remainder ? 1 : 0);
    int end = start + num_elements;

    // Process assigned elements
    for (int i = start; i < end; i++) {
        float x = input[i];
        float sigmoid = 1.0f / (1.0f + expf(-x));
        output[i] = x * sigmoid * scaling_factor;
    }
}

// Forward function: computes the linear transformation then applies the swish activation and scaling
torch::Tensor forward(
    torch::Tensor x,
    torch::Tensor weight,
    torch::Tensor bias,
    double scaling_factor) {

    x = x.contiguous();
    weight = weight.contiguous();
    bias = bias.contiguous();

    TORCH_CHECK(x.is_cuda(), "Input tensor 'x' must be a CUDA tensor.");
    TORCH_CHECK(weight.is_cuda(), "Weight tensor must be a CUDA tensor.");
    TORCH_CHECK(bias.is_cuda(), "Bias tensor must be a CUDA tensor.");

    TORCH_CHECK(x.scalar_type() == at::kFloat, "Input tensor 'x' must be of type torch.float32.");
    TORCH_CHECK(weight.scalar_type() == at::kFloat, "Weight tensor must be of type torch.float32.");
    TORCH_CHECK(bias.scalar_type() == at::kFloat, "Bias tensor must be of type torch.float32.");

    // Compute the linear transformation: y = x @ weight.T + bias
    auto y = at::addmm(bias, x, weight.t());
    auto output = at::empty_like(y);

    int N = y.numel();
   
    // Use a configuration with 256 threads per block and enough blocks to cover all elements
    int threads = 256;
    int blocks = (N + threads - 1) / threads;

    // Launch the balanced workload kernel
    balanced_workload_swish_scaling_kernel<<<blocks, threads>>>(
        y.data_ptr<float>(),
        output.data_ptr<float>(),
        static_cast<float>(scaling_factor),
        N
    );

    cudaError_t err = cudaGetLastError();
    TORCH_CHECK(err == cudaSuccess, "CUDA kernel failed : ", cudaGetErrorString(err));

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "CUDA forward function with balanced workload distribution");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.604 inst/cycle 0.001 5
Executed Ipc Elapsed 0.236 inst/cycle 0.000 5
Issue Slots Busy 16.214 % 0.671 5
Issued Ipc Active 0.646 inst/cycle 0.001 5
SM Busy 17.032 % 0.738 5
Memory Throughput 74384583905.940 byte/second 5527683404087961600.000 5
Mem Busy 10.104 % 0.108 5
Max Bandwidth 6.608 % 0.054 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 84.414 % 0.107 5
Mem Pipes Busy 3.342 % 0.012 5
Warp Cycles Per Issued Instruction 21.776 cycle 0.094 5
Warp Cycles Per Executed Instruction 23.348 cycle 0.106 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 29.790 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 22.540 % 0.048 5
Achieved Active Warps Per SM 14.424 warp 0.020 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (22.6%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 368867.39 μs
Device Time 299.93 μs
Self CPU Time 64.11 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 368803.28 μs
Device Time 299.93 μs
Self CPU Time 116.58 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::empty_strided
CPU Time 391949.86 μs
Device Time 0.00 μs
Self CPU Time 24084.92 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaDeviceGetStreamPriorityRange
CPU Time 366899.57 μs
Device Time 0.00 μs
Self CPU Time 366899.57 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::addmm
CPU Time 559592.76 μs
Device Time 139795.26 μs
Self CPU Time 195073.37 μs
Self Device Time 139795.26 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
sm80_xmma_gemm_f32f32_f32f32_f32_tn_n_tilesize32x32x8_stage3_warpsize1x2x1_ffma_aligna4_alignc4_execute_kernel__51_cublas
CPU Time 0.00 μs
Device Time 125993.80 μs
Self CPU Time 0.00 μs
Self Device Time 125993.80 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 70485.67 μs
Device Time 654267.96 μs
Self CPU Time 13185.57 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 57301.43 μs
Device Time 654267.96 μs
Self CPU Time 19784.35 μs
Self Device Time 654267.96 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 654267.96 μs
Self CPU Time 0.00 μs
Self Device Time 654267.96 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45281 warnings generated when compiling for host.
Suppressed 45324 warnings (45277 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_59/b6_s1_balanced_workload_swish_scaling/base/base.cu:8:57 bugprone-easily-swappable-parameters
8 | float scaling_factor,
| ^~~~~~~~~~~~~~~~~~~~~
9 | int N) {
| ~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_59/b6_s1_balanced_workload_swish_scaling/base/base.cu:8:63: note: the first parameter in the range is 'scaling_factor'
8 | float scaling_factor,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_59/b6_s1_balanced_workload_swish_scaling/base/base.cu:9:61: note: the last parameter in the range is 'N'
9 | int N) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_59/b6_s1_balanced_workload_swish_scaling/base/base.cu:9:57: note: 'float' and 'int' may be implicitly converted
9 | int N) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_59/b6_s1_balanced_workload_swish_scaling/base/base.cu:11:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
11 | int tid = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_59/b6_s1_balanced_workload_swish_scaling/base/base.cu:12:25: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
12 | int total_threads = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_2/task_59/b6_s1_balanced_workload_swish_scaling/base/base.cu:54:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
54 | int N = y.numel();
| ^