← Back to Leaderboard

The AI CUDA Engineer 👷

94_MSELossvectorized_ldg_mse_base

Level 1 • Task 94
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
    """
    Computes the Mean Squared Error loss for regression tasks.

    Args:
        predictions (torch.Tensor): Predicted values.
        targets (torch.Tensor): Target values.

    Returns:
        torch.Tensor: Mean Squared Error loss.
    """
    return F.mse_loss(predictions, targets, reduction="mean")


class Model(nn.Module):
    """
    A model that computes the Mean Squared Error loss for regression tasks.

    Parameters:
        None
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets, fn=module_fn):
        return fn(predictions, targets)


batch_size = 128
input_shape = (4096,)
dim = 1


def get_inputs():
    return [
        torch.randn(batch_size, *input_shape),
        torch.randn(batch_size, *input_shape),
    ]


def get_init_inputs():
    return []
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A model that computes the Mean Squared Error loss for regression tasks.

    Parameters:
        None
    """
    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets):
        return torch.mean((predictions - targets) ** 2)

batch_size = 128
input_shape = (4096, )
dim = 1

def get_inputs():
    return [torch.randn(batch_size, *input_shape), torch.randn(batch_size, *input_shape)]

def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 94 • 94_MSELoss)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 optimized_thread_indexing_base 0.02 1.03 2.04
🥇 coalesced_shfl_mse_base 0.02 1.03 2.04
🥇 efficient_mse_base 0.02 1.03 2.04
🥇 mse_unrolled_optimized_base 0.02 1.03 2.04
🥇 mse_min_sync_edit_1 0.02 1.03 2.04
🥇 vectorized_ldg_mse_base 0.02 1.03 2.04
🥇 optimized_grid_stride_warp_reduce_base 0.02 1.03 2.04
🥇 mse_1d_optimized_indexing_base 0.02 1.03 2.04
🥇 mse_unrolled_optimized_edit_1 0.02 1.03 2.04
🥇 mse_warp_reduction_base 0.02 1.03 2.04
🥇 mse_unroll_pragma_base_base 0.02 1.03 2.04
🥇 mse_blocksize_experiment_base 0.02 1.03 2.04
🥇 mse_ldg_vectorized_edit_edit_1 0.02 1.03 2.04
🥇 mse_ldg_vectorized_edit_base 0.02 1.03 2.04
15 optimized_block_size_mse_base 0.02 0.97 1.92
15 stride_mse_loss_base 0.02 0.97 1.92
15 warp_uniform_mse_base 0.02 0.97 1.92
15 block_size_experimentation_base_base 0.02 0.97 1.92
15 optimized_mse_forward_base 0.02 0.97 1.92
15 warp_aligned_mse_base_base 0.02 0.97 1.92
#include <pybind11/pybind11.h>
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <algorithm>

// Block size set for efficient occupancy
static const int BLOCK_SIZE = 256;

// This kernel computes the Mean Squared Error (MSE) using vectorized global memory loads
// with __ldg() for read-only accesses, aligning accesses to 128-bit boundaries.
// For float (4 bytes), we use float4 (128-bit) loads, and for double (8 bytes) we use double2 (128-bit) loads.

template <typename scalar_t>
__global__ void vectorized_ldg_mse_kernel(
    const scalar_t* __restrict__ preds,
    const scalar_t* __restrict__ tgts,
    double* __restrict__ sum_out,
    const int64_t num_elements
) {
    double local_sum = 0.0;
    int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;

    // Use vectorized loads if possible
    if (sizeof(scalar_t) == 4) {
        // For float: 4 floats = 16 bytes
        const int vecSize = 4;
        int vecCount = num_elements / vecSize;
        const float4* preds_vec = reinterpret_cast<const float4*>(preds);
        const float4* tgts_vec = reinterpret_cast<const float4*>(tgts);

        // Process vectorized chunks
        for (int i = thread_id; i < vecCount; i += stride) {
            float4 p = __ldg(&preds_vec[i]);
            float4 t = __ldg(&tgts_vec[i]);
            float diff0 = p.x - t.x;
            float diff1 = p.y - t.y;
            float diff2 = p.z - t.z;
            float diff3 = p.w - t.w;
            local_sum += (double)diff0 * diff0 + (double)diff1 * diff1 +
                         (double)diff2 * diff2 + (double)diff3 * diff3;
        }

        // Process any tail elements
        int remainder = num_elements % vecSize;
        int start = vecCount * vecSize;
        for (int i = thread_id; i < remainder; i += stride) {
            float p = __ldg(&preds[start + i]);
            float t = __ldg(&tgts[start + i]);
            float diff = p - t;
            local_sum += (double)diff * diff;
        }
    } else if (sizeof(scalar_t) == 8) {
        // For double: 2 doubles = 16 bytes
        const int vecSize = 2;
        int vecCount = num_elements / vecSize;
        const double2* preds_vec = reinterpret_cast<const double2*>(preds);
        const double2* tgts_vec = reinterpret_cast<const double2*>(tgts);

        // Process vectorized chunks
        for (int i = thread_id; i < vecCount; i += stride) {
            double2 p = __ldg(&preds_vec[i]);
            double2 t = __ldg(&tgts_vec[i]);
            double diff0 = p.x - t.x;
            double diff1 = p.y - t.y;
            local_sum += diff0 * diff0 + diff1 * diff1;
        }

        // Process any tail elements
        int remainder = num_elements % vecSize;
        int start = vecCount * vecSize;
        for (int i = thread_id; i < remainder; i += stride) {
            double p = __ldg(&preds[start + i]);
            double t = __ldg(&tgts[start + i]);
            double diff = p - t;
            local_sum += diff * diff;
        }
    } else {
        // Fallback for other types (shouldn't occur for floating point types)
        for (int i = thread_id; i < num_elements; i += stride) {
            double diff = static_cast<double>(__ldg(&preds[i])) - static_cast<double>(__ldg(&tgts[i]));
            local_sum += diff * diff;
        }
    }

    // Reduce within the block using shared memory
    __shared__ double smem[BLOCK_SIZE];
    smem[threadIdx.x] = local_sum;
    __syncthreads();

    // Standard reduction in shared memory
    for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
        if (threadIdx.x < s) {
            smem[threadIdx.x] += smem[threadIdx.x + s];
        }
        __syncthreads();
    }

    if (threadIdx.x == 0) {
        atomicAdd(sum_out, smem[0]);
    }
}

// Host function that sets up the kernel launch

torch::Tensor forward(torch::Tensor predictions, torch::Tensor targets) {
    TORCH_CHECK(predictions.is_cuda(), "predictions must be a CUDA tensor");
    TORCH_CHECK(targets.is_cuda(), "targets must be a CUDA tensor");
    TORCH_CHECK(predictions.numel() == targets.numel(),
                "predictions and targets must have the same number of elements");

    const int64_t num_elements = predictions.numel();
    auto accumulator = torch::zeros({1}, predictions.options().dtype(at::kDouble));

    int grid_size = (num_elements + BLOCK_SIZE - 1) / BLOCK_SIZE;
    grid_size = std::min(grid_size, 1024);

    AT_DISPATCH_FLOATING_TYPES(predictions.scalar_type(), "vectorized_ldg_mse_cuda", ([&] {
        vectorized_ldg_mse_kernel<scalar_t><<<grid_size, BLOCK_SIZE>>>(
            predictions.data_ptr<scalar_t>(),
            targets.data_ptr<scalar_t>(),
            accumulator.data_ptr<double>(),
            num_elements
        );
    }));

    // Final MSE = accumulated squared error divided by number of elements
    auto result = accumulator.div_(static_cast<double>(num_elements));
    return result.to(predictions.dtype());
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Vectorized LDG MSE forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.308 inst/cycle 0.000 5
Executed Ipc Elapsed 0.668 inst/cycle 0.000 5
Issue Slots Busy 33.920 % 0.025 5
Issued Ipc Active 1.358 inst/cycle 0.000 5
SM Busy 33.920 % 0.025 5
Memory Throughput 746601082136.586 byte/second 180938149006676393984.000 5
Mem Busy 25.204 % 0.216 5
Max Bandwidth 22.398 % 0.183 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 18.860 % 0.014 5
Mem Pipes Busy 23.490 % 0.186 5
Warp Cycles Per Issued Instruction 33.494 cycle 1.708 5
Warp Cycles Per Executed Instruction 34.712 cycle 1.835 5
Avg. Active Threads Per Warp 31.840 0.000 5
Avg. Not Predicated Off Threads Per Warp 22.060 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 21.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 68.770 % 0.041 5
Achieved Active Warps Per SM 44.014 warp 0.016 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
WRN ThreadDivergence Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 31.8 threads being active per cycle. This is further reduced to 22.1 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp().
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (68.8%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 1338655.30 μs
Device Time 236682.33 μs
Self CPU Time 83275.58 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 1255379.72 μs
Device Time 236682.33 μs
Self CPU Time 172368.89 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 4650423.76 μs
Device Time 7023184.26 μs
Self CPU Time 308572.48 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 4341852.23 μs
Device Time 7023184.26 μs
Self CPU Time 354098.39 μs
Self Device Time 7023184.26 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 4993933.83 μs
Device Time 1326.36 μs
Self CPU Time 4993933.83 μs
Self Device Time 1326.36 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void vectorized_ldg_mse_kernel<float>(float const*, float const*, double*, long)
CPU Time 0.00 μs
Device Time 435942.03 μs
Self CPU Time 0.00 μs
Self Device Time 435942.03 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 249708.67 μs
Device Time 537661.17 μs
Self CPU Time 249708.67 μs
Self Device Time 537661.17 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 6813301.53 μs
Self CPU Time 0.00 μs
Self Device Time 6813301.53 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45284 warnings generated when compiling for host.
Suppressed 45321 warnings (45274 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:22:21 bugprone-narrowing-conversions
22 | int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:23:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
23 | int stride = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:29:24: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
29 | int vecCount = num_elements / vecSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:46:25: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
46 | int remainder = num_elements % vecSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:57:24: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
57 | int vecCount = num_elements / vecSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:71:25: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
71 | int remainder = num_elements % vecSize;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:116:21: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
116 | int grid_size = (num_elements + BLOCK_SIZE - 1) / BLOCK_SIZE;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b10_s3_vectorized_ldg_mse/base/base.cu:119:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
119 | AT_DISPATCH_FLOATING_TYPES(predictions.scalar_type(), "vectorized_ldg_mse_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^