← Back to Leaderboard

The AI CUDA Engineer 👷

97_CosineSimilarityLoss97_cosine_similarity_loss_constant_memory_base

Level 1 • Task 97
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(predictions: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
    """
    Computes the Cosine Similarity Loss for comparing vectors.

    Args:
        predictions (torch.Tensor): Predicted values.
        targets (torch.Tensor): Target values.

    Returns:
        torch.Tensor: Cosine Similarity Loss.
    """
    cosine_sim = F.cosine_similarity(predictions, targets, dim=1)
    return torch.mean(1 - cosine_sim)


class Model(nn.Module):
    """
    A model that computes Cosine Similarity Loss for comparing vectors.

    Parameters:
        None
    """

    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets, fn=module_fn):
        return fn(predictions, targets)


batch_size = 128
input_shape = (4096,)
dim = 1


def get_inputs():
    return [
        torch.randn(batch_size, *input_shape),
        torch.randn(batch_size, *input_shape),
    ]


def get_init_inputs():
    return []
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    A model that computes Cosine Similarity Loss for comparing vectors.

    Parameters:
        None
    """
    def __init__(self):
        super(Model, self).__init__()

    def forward(self, predictions, targets):
        cosine_sim = torch.nn.functional.cosine_similarity(predictions, targets, dim=1)
        return torch.mean(1 - cosine_sim)

batch_size = 128
input_shape = (4096, )
dim = 1

def get_inputs():
    return [torch.randn(batch_size, *input_shape), torch.randn(batch_size, *input_shape)]

def get_init_inputs():
    return []

Kernel Information

Related Kernels (Level 1, Task 97 • 97_CosineSimilarityLoss)

#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

// Constant memory for storing data
__constant__ float const_predictions[1024]; // Adjust size as needed
__constant__ float const_targets[1024]; // Adjust size as needed

// Optimized kernel using constant memory for predictions and targets
__global__ void cosine_similarity_loss_kernel_constant(const int N, const int D, float* output) {
    extern __shared__ float shared_mem[];

    int row = blockIdx.x;
    int tid = threadIdx.x;
    int blockSize = blockDim.x;

    float* shared_preds = shared_mem;
    float* shared_targets = shared_preds + D;

    // Load data into shared memory only once per row
    if (tid < D) {
        shared_preds[tid] = const_predictions[row * D + tid];
        shared_targets[tid] = const_targets[row * D + tid];
    }
    __syncthreads();

    float sum_dot = 0.0f;
    float sum_pred_sq = 0.0f;
    float sum_target_sq = 0.0f;

    for (int i = tid; i < D; i += blockSize) {
        float p = shared_preds[i];
        float t = shared_targets[i];
        sum_dot += p * t;
        sum_pred_sq += p * p;
        sum_target_sq += t * t;
    }

    // Warp-level reduction using __shfl_down_sync
    unsigned int mask = 0xffffffff;
    for (int offset = 16; offset > 0; offset /= 2) {
        sum_dot += __shfl_down_sync(mask, sum_dot, offset);
        sum_pred_sq += __shfl_down_sync(mask, sum_pred_sq, offset);
        sum_target_sq += __shfl_down_sync(mask, sum_target_sq, offset);
    }

    int lane = tid & 31;        // tid % 32
    int warpId = tid >> 5;      // tid / 32
    int numWarps = (blockSize + warpSize - 1) / warpSize;

    extern __shared__ float shared[];  // size: 3 * numWarps floats
    float* s_dot      = shared;
    float* s_pred_sq  = s_dot + numWarps;
    float* s_target_sq= s_pred_sq + numWarps;

    if (lane == 0) {
        s_dot[warpId] = sum_dot;
        s_pred_sq[warpId] = sum_pred_sq;
        s_target_sq[warpId] = sum_target_sq;
    }
    __syncthreads(); // Only synchronize here once for warp-level reduction

    if (tid < numWarps) {
        sum_dot      = s_dot[tid];
        sum_pred_sq  = s_pred_sq[tid];
        sum_target_sq= s_target_sq[tid];

        for (int offset = (numWarps >> 1); offset > 0; offset /= 2) {
            sum_dot += __shfl_down_sync(0xffffffff, sum_dot, offset);
            sum_pred_sq += __shfl_down_sync(0xffffffff, sum_pred_sq, offset);
            sum_target_sq += __shfl_down_sync(0xffffffff, sum_target_sq, offset);
        }

        if (tid == 0) {
            const float eps = 1e-8f;
            float norm_pred = sqrtf(sum_pred_sq);
            float norm_target = sqrtf(sum_target_sq);
            float denominator = norm_pred * norm_target;
            denominator = fmaxf(denominator, eps);
            float cos_sim = sum_dot / denominator;
            atomicAdd(output, 1.0f - cos_sim);
        }
    }
}

// Host function to copy data to constant memory and launch kernel
torch::Tensor cosine_similarity_loss_forward(torch::Tensor predictions, torch::Tensor targets) {
    TORCH_CHECK(predictions.dim() == 2, "predictions must be 2D");
    TORCH_CHECK(targets.dim() == 2, "targets must be 2D");
    TORCH_CHECK(predictions.sizes() == targets.sizes(), "Input tensors must have the same shape");
    TORCH_CHECK(predictions.scalar_type() == torch::kFloat32, "predictions must be float32");
    TORCH_CHECK(targets.scalar_type() == torch::kFloat32, "targets must be float32");

    int N = predictions.size(0);
    int D = predictions.size(1);

    auto output = torch::zeros({1}, predictions.options());

    // Copy predictions and targets to constant memory
    cudaMemcpyToSymbol(const_predictions, predictions.data_ptr<float>(), N * D * sizeof(float));
    cudaMemcpyToSymbol(const_targets, targets.data_ptr<float>(), N * D * sizeof(float));

    const int block_size = 256;
    size_t shared_mem = (2 * D + 3 * ((block_size + 31) / 32)) * sizeof(float);

    // Launch one block per sample
    cosine_similarity_loss_kernel_constant<<<N, block_size, shared_mem>>>(N, D, output.data_ptr<float>());

    output.div_(N);
    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &cosine_similarity_loss_forward, "Cosine Similarity Loss Forward using constant memory (CUDA)");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 0.646 inst/cycle 0.000 5
Executed Ipc Elapsed 0.250 inst/cycle 0.000 5
Issue Slots Busy 16.200 % 0.001 5
Issued Ipc Active 0.650 inst/cycle 0.000 5
SM Busy 16.200 % 0.001 5
Memory Throughput 1507208326.726 byte/second 1325111860618848.250 5
Mem Busy 6.182 % 0.038 5
Max Bandwidth 5.226 % 0.028 5
L1/TEX Hit Rate 0.000 % 0.000 5
L2 Hit Rate 102.510 % 0.038 5
Mem Pipes Busy 14.636 % 0.217 5
Warp Cycles Per Issued Instruction 11.078 cycle 0.146 5
Warp Cycles Per Executed Instruction 11.128 cycle 0.150 5
Avg. Active Threads Per Warp 30.070 0.000 5
Avg. Not Predicated Off Threads Per Warp 28.590 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 6.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 48.000 warp 0.000 5
Theoretical Occupancy 75.000 % 0.000 5
Achieved Occupancy 11.112 % 0.000 5
Achieved Active Warps Per SM 7.110 warp 0.000 5
Analysis Rules
Rule Description
WRN HighPipeUtilization All compute pipelines are under-utilized. Either this kernel is very small or it doesn't issue enough warps per scheduler. Check the Launch Statistics and Scheduler Statistics sections for further details.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy (75.0%) is limited by the required amount of shared memory. The difference between calculated theoretical (75.0%) and measured achieved occupancy (11.1%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::zeros
CPU Time 5126751.99 μs
Device Time 207528.54 μs
Self CPU Time 145179.68 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::zero_
CPU Time 5433791.09 μs
Device Time 7588439.51 μs
Self CPU Time 290899.45 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::fill_
CPU Time 5142893.02 μs
Device Time 7588439.51 μs
Self CPU Time 384551.81 μs
Self Device Time 7588360.38 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 5490212.64 μs
Device Time 181382.37 μs
Self CPU Time 5490212.64 μs
Self Device Time 181382.37 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaMemcpyToSymbol
CPU Time 102182.35 μs
Device Time 362831.72 μs
Self CPU Time 102182.35 μs
Self Device Time 362831.72 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cosine_similarity_loss_kernel_constant(int, int, float*)
CPU Time 0.00 μs
Device Time 394402.95 μs
Self CPU Time 0.00 μs
Self Device Time 394402.95 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::div_
CPU Time 861513.32 μs
Device Time 247844.16 μs
Self CPU Time 465145.87 μs
Self Device Time 247765.54 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
void at::native::vectorized_elementwise_kernel<4, at::native::FillFunctor<int>, at::detail::Array<char*, 1> >(int, at::native::FillFunctor<int>, at::detail::Array<char*, 1>)
CPU Time 0.00 μs
Device Time 7381304.57 μs
Self CPU Time 0.00 μs
Self Device Time 7381304.57 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Completed
45285 warnings generated when compiling for host.
Suppressed 45322 warnings (45275 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:11:56 bugprone-easily-swappable-parameters
11 | __global__ void cosine_similarity_loss_kernel_constant(const int N, const int D, float* output) {
| ^~~~~~~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:11:66: note: the first parameter in the range is 'N'
11 | __global__ void cosine_similarity_loss_kernel_constant(const int N, const int D, float* output) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:11:79: note: the last parameter in the range is 'D'
11 | __global__ void cosine_similarity_loss_kernel_constant(const int N, const int D, float* output) {
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:14:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
14 | int row = blockIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:15:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
15 | int tid = threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:16:21: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
16 | int blockSize = blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:88:60: warning: the parameter 'predictions' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
88 | torch::Tensor cosine_similarity_loss_forward(torch::Tensor predictions, torch::Tensor targets) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:88:87: warning: the parameter 'targets' is copied for each invocation but only used as a const reference; consider making it a const reference [performance-unnecessary-value-param]
88 | torch::Tensor cosine_similarity_loss_forward(torch::Tensor predictions, torch::Tensor targets) {
| ^
| const &
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:95:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
95 | int N = predictions.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:96:13: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
96 | int D = predictions.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:101:74: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
101 | cudaMemcpyToSymbol(const_predictions, predictions.data_ptr<float>(), N * D * sizeof(float));
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:101:74: note: make conversion explicit to silence this warning
5 | cudaMemcpyToSymbol(const_predictions, predictions.data_ptr<float>(), N * D * sizeof(float));
| ^~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:101:74: note: perform multiplication in a wider type
101 | cudaMemcpyToSymbol(const_predictions, predictions.data_ptr<float>(), N * D * sizeof(float));
| ^
| static_cast<long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:102:66: warning: performing an implicit widening conversion to type 'unsigned long' of a multiplication performed in type 'int' [bugprone-implicit-widening-of-multiplication-result]
102 | cudaMemcpyToSymbol(const_targets, targets.data_ptr<float>(), N * D * sizeof(float));
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:102:66: note: make conversion explicit to silence this warning
102 | cudaMemcpyToSymbol(const_targets, targets.data_ptr<float>(), N * D * sizeof(float));
| ^~~~~
| static_cast<unsigned long>( )
/home/robert_sakana_ai/llm_cuda/experiments/20250212_optimize_b5_s4_e1_v2/level_1/task_97/b5_s1_97_cosine_similarity_loss_constant_memory/base/base.cu:102:66: note: perform multiplication in a wider type
102 | cudaMemcpyToSymbol(const_targets, targets.data_ptr<float>(), N * D * sizeof(float));
| ^
| static_cast<long>( )