← Back to Leaderboard

The AI CUDA Engineer 👷

94_MSELossstride_mse_loss_base

Level 1 • Task 94
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
    """
    Computes the Mean Squared Error loss for regression tasks.

    Args:
        predictions (torch.Tensor): Predicted values.
        targets (torch.Tensor): Target values.

    Returns:
        torch.Tensor: Mean Squared Error loss.
    """
    return F.mse_loss(predictions, targets, reduction="mean")


class Model(nn.Module):
    """
    A model that computes the Mean Squared Error loss for regression tasks.

    Parameters:
        None
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets, fn=module_fn):
        return fn(predictions, targets)


batch_size = 128
input_shape = (4096,)
dim = 1


def get_inputs():
    return [
        torch.randn(batch_size, *input_shape),
        torch.randn(batch_size, *input_shape),
    ]


def get_init_inputs():
    return []
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A model that computes the Mean Squared Error loss for regression tasks.

    Parameters:
        None
    """
    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets):
        return torch.mean((predictions - targets) ** 2)

batch_size = 128
input_shape = (4096, )
dim = 1

def get_inputs():
    return [torch.randn(batch_size, *input_shape), torch.randn(batch_size, *input_shape)]

def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 94 • 94_MSELoss)

Rank Kernel Name Runtime (ms) Speedup Native Speedup Compile
🥇 optimized_thread_indexing_base 0.02 1.03 2.04
🥇 coalesced_shfl_mse_base 0.02 1.03 2.04
🥇 efficient_mse_base 0.02 1.03 2.04
🥇 mse_unrolled_optimized_base 0.02 1.03 2.04
🥇 mse_min_sync_edit_1 0.02 1.03 2.04
🥇 vectorized_ldg_mse_base 0.02 1.03 2.04
🥇 optimized_grid_stride_warp_reduce_base 0.02 1.03 2.04
🥇 mse_1d_optimized_indexing_base 0.02 1.03 2.04
🥇 mse_unrolled_optimized_edit_1 0.02 1.03 2.04
🥇 mse_warp_reduction_base 0.02 1.03 2.04
🥇 mse_unroll_pragma_base_base 0.02 1.03 2.04
🥇 mse_blocksize_experiment_base 0.02 1.03 2.04
🥇 mse_ldg_vectorized_edit_edit_1 0.02 1.03 2.04
🥇 mse_ldg_vectorized_edit_base 0.02 1.03 2.04
15 optimized_block_size_mse_base 0.02 0.97 1.92
15 stride_mse_loss_base 0.02 0.97 1.92
15 warp_uniform_mse_base 0.02 0.97 1.92
15 block_size_experimentation_base_base 0.02 0.97 1.92
15 optimized_mse_forward_base 0.02 0.97 1.92
15 warp_aligned_mse_base_base 0.02 0.97 1.92
#include <pybind11/pybind11.h>
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>

static const int BLOCK_SIZE = 256;

// CUDA kernel using a grid-stride loop with correct boundary handling
template <typename scalar_t>
__global__ void stride_mse_kernel(
    const scalar_t* __restrict__ preds,
    const scalar_t* __restrict__ tgts,
    double* __restrict__ sum_out,
    const int64_t num_elements
) {
    // Shared memory for reduction
    __shared__ double shmem[BLOCK_SIZE];

    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = blockDim.x * gridDim.x;
    double local_sum = 0.0;

    // Grid-stride loop to handle workloads larger than available threads
    for (; idx < num_elements; idx += stride) {
        // Verify boundary before reading
        double diff = static_cast<double>(preds[idx]) - static_cast<double>(tgts[idx]);
        local_sum += diff * diff;
    }

    // Store local sum to shared memory
    shmem[threadIdx.x] = local_sum;
    __syncthreads();

    // Intra-block reduction in shared memory
    for (int s = blockDim.x / 2; s > 0; s >>= 1) {
        if (threadIdx.x < s) {
            shmem[threadIdx.x] += shmem[threadIdx.x + s];
        }
        __syncthreads();
    }

    // The first thread of each block updates the global accumulator
    if (threadIdx.x == 0) {
        atomicAdd(sum_out, shmem[0]);
    }
}

// Host function to launch the kernel

torch::Tensor forward(torch::Tensor predictions, torch::Tensor targets) {
    TORCH_CHECK(predictions.is_cuda(), "predictions must be a CUDA tensor");
    TORCH_CHECK(targets.is_cuda(), "targets must be a CUDA tensor");
    TORCH_CHECK(predictions.numel() == targets.numel(), "predictions and targets must have the same number of elements");

    const int64_t num_elements = predictions.numel();
    auto accumulator = torch::zeros({1}, predictions.options().dtype(at::kDouble));

    // Determine grid size ensuring we don't oversubscribe
    int grid_size = (num_elements + BLOCK_SIZE - 1) / BLOCK_SIZE;
    grid_size = (grid_size < 1024) ? grid_size : 1024;

    AT_DISPATCH_FLOATING_TYPES(predictions.scalar_type(), "stride_mse_cuda", ([&] {
        stride_mse_kernel<scalar_t><<<grid_size, BLOCK_SIZE>>>(
            predictions.data_ptr<scalar_t>(),
            targets.data_ptr<scalar_t>(),
            accumulator.data_ptr<double>(),
            num_elements);
    }));

    // Compute the mean squared error
    auto result = accumulator.div_(static_cast<double>(num_elements));
    return result.to(predictions.dtype());
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &forward, "Stride Loop Reduction MSE Forward (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 1.692 inst/cycle 0.001 5
Executed Ipc Elapsed 0.970 inst/cycle 0.000 5
Issue Slots Busy 44.206 % 0.860 5
Issued Ipc Active 1.768 inst/cycle 0.002 5
SM Busy 44.206 % 0.860 5
Memory Throughput 670749262198.834 byte/second 76626340838587842560.000 5
Mem Busy 22.676 % 0.100 5
Max Bandwidth 20.116 % 0.075 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 18.574 % 0.006 5
Mem Pipes Busy 19.020 % 0.070 5
Warp Cycles Per Issued Instruction 31.068 cycle 0.687 5
Warp Cycles Per Executed Instruction 32.436 cycle 0.754 5
Avg. Active Threads Per Warp 31.820 0.000 5
Avg. Not Predicated Off Threads Per Warp 23.710 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 10.000 block 0.000 5
Block Limit Shared Mem 21.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 85.264 % 0.039 5
Achieved Active Warps Per SM 54.570 warp 0.016 5
Analysis Rules
Rule Description
INF HighPipeUtilization ALU is the highest-utilized pipeline (26.2%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. It is well-utilized, but should not be a bottleneck.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN ThreadDivergence Instructions are executed in warps, which are groups of 32 threads. Optimal instruction throughput is achieved if all 32 threads of a warp execute the same instruction. The chosen launch configuration, early thread completion, and divergent flow control can significantly lower the number of active threads in a warp per cycle. This kernel achieves an average of 31.8 threads being active per cycle. This is further reduced to 23.7 threads per warp due to predication. The compiler may use predication to avoid an actual branch. Instead, all instructions are scheduled, but a per-thread condition code or predicate controls which threads execute the instructions. Try to avoid different execution paths within a warp when possible. In addition, ensure your kernel makes use of Independent Thread Scheduling, which allows a warp to reconverge after a data-dependent conditional block by explicitly calling __syncwarp().
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (85.2%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::to
CPU Time 1298730.39 μs
Device Time 229206.05 μs
Self CPU Time 47978.15 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_to_copy
CPU Time 1250752.25 μs
Device Time 229206.05 μs
Self CPU Time 194882.07 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 4319538.15 μs
Device Time 6809221.93 μs
Self CPU Time 244563.40 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 4074975.69 μs
Device Time 6809221.93 μs
Self CPU Time 345795.21 μs
Self Device Time 6809141.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 4723678.85 μs
Device Time 522633.94 μs
Self CPU Time 4723678.85 μs
Self Device Time 522633.94 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void stride_mse_kernel<float>(float const*, float const*, double*, long)
CPU Time 0.00 μs
Device Time 468141.60 μs
Self CPU Time 0.00 μs
Self Device Time 468141.60 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 6605653.74 μs
Self CPU Time 0.00 μs
Self Device Time 6605653.74 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45279 warnings generated when compiling for host.
Suppressed 45319 warnings (45272 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b6_s2_stride_mse_loss/base/base.cu:19:15 bugprone-narrowing-conversions
19 | int idx = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b6_s2_stride_mse_loss/base/base.cu:20:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
20 | int stride = blockDim.x * gridDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b6_s2_stride_mse_loss/base/base.cu:35:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
35 | for (int s = blockDim.x / 2; s > 0; s >>= 1) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b6_s2_stride_mse_loss/base/base.cu:59:21: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
59 | int grid_size = (num_elements + BLOCK_SIZE - 1) / BLOCK_SIZE;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250202_optimize_b10_s4_e0_sweep/level_1/task_94/b6_s2_stride_mse_loss/base/base.cu:62:5: warning: inside a lambda, '__func__' expands to the name of the function call operator; consider capturing the name of the enclosing function explicitly [bugprone-lambda-function-name]
62 | AT_DISPATCH_FLOATING_TYPES(predictions.scalar_type(), "stride_mse_cuda", ([&] {
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:237:34: note: expanded from macro 'AT_DISPATCH_FLOATING_TYPES'
237 | AT_DISPATCH_SWITCH(TYPE, NAME, AT_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__))
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:233:3: note: expanded from macro 'AT_DISPATCH_CASE_FLOATING_TYPES'
233 | AT_DISPATCH_CASE(at::ScalarType::Double, __VA_ARGS__) \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:74:3: note: expanded from macro 'AT_DISPATCH_CASE'
74 | AT_PRIVATE_CASE_TYPE_USING_HINT(enum_type, scalar_t, __VA_ARGS__)
| ^
note: (skipping 1 expansions in backtrace; use -fmacro-backtrace-limit=0 to see all)
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/ATen/Dispatch.h:58:7: note: expanded from macro 'AT_PRIVATE_CHECK_SELECTIVE_BUILD'
58 | AT_ERROR( \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:711:32: note: expanded from macro 'AT_ERROR'
711 | C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
| ^
/home/robert_sakana_ai/miniconda3/envs/llm2cuda/lib/python3.11/site-packages/torch/include/c10/util/Exception.h:536:9: note: expanded from macro 'TORCH_CHECK'
536 | __func__, \
| ^