← Back to Leaderboard

The AI CUDA Engineer 👷

7_Conv3d_ReLU_LeakyReLU_GELU_Sigmoid_BiasAddmodular_vectorized_activations_base

Level 2 • Task 7
import torch
import torch.nn as nn
import torch.nn.functional as F


def module_fn(
    x: torch.Tensor,
    conv_weight: torch.Tensor,
    conv_bias: torch.Tensor,
    bias: torch.Tensor,
) -> torch.Tensor:
    """
    Applies 3D convolution followed by ReLU, LeakyReLU, GELU, Sigmoid activations and bias addition.

    Args:
        x (torch.Tensor): Input tensor of shape (batch_size, in_channels, depth, height, width)
        conv_weight (torch.Tensor): 3D convolution weight tensor of shape
            (out_channels, in_channels, kernel_size, kernel_size, kernel_size)
        conv_bias (torch.Tensor): Bias tensor for 3D convolution of shape (out_channels)
        bias (torch.Tensor): Bias tensor for addition of shape (out_channels, 1, 1, 1)

    Returns:
        torch.Tensor: Output tensor after applying convolution and activations
    """
    x = F.conv3d(x, conv_weight, bias=conv_bias)
    x = F.relu(x)
    x = F.leaky_relu(x, negative_slope=0.01)
    x = F.gelu(x)
    x = torch.sigmoid(x)
    x = x + bias
    return x


class Model(nn.Module):
    """
    Model that performs a 3D convolution, applies ReLU, LeakyReLU, GELU, Sigmoid activations, and bias in sequence.
    """

    def __init__(self, in_channels, out_channels, kernel_size, bias_shape):
        super(Model, self).__init__()
        conv = nn.Conv3d(in_channels, out_channels, kernel_size)
        self.bias = nn.Parameter(torch.randn(bias_shape) * 0.02)

        self.conv_weight = nn.Parameter(conv.weight)
        self.conv_bias = nn.Parameter(conv.bias)
        self.bias = self.bias

    def forward(self, x, fn=module_fn):
        return fn(x, self.conv_weight, self.conv_bias, self.bias)


batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
bias_shape = (out_channels, 1, 1, 1)


def get_inputs():
    return [torch.randn(batch_size, in_channels, depth, height, width)]


def get_init_inputs():
    return [in_channels, out_channels, kernel_size, bias_shape]
import torch
import torch.nn as nn

class Model(nn.Module):
    """
    Model that performs a 3D convolution, applies ReLU, LeakyReLU, GELU, Sigmoid activations, and bias in sequence.
    """
    def __init__(self, in_channels, out_channels, kernel_size, bias_shape):
        super(Model, self).__init__()
        self.conv = nn.Conv3d(in_channels, out_channels, kernel_size)
        self.bias = nn.Parameter(torch.randn(bias_shape) * 0.02) 

    def forward(self, x):
        x = self.conv(x)
        x = torch.relu(x)
        x = torch.nn.functional.leaky_relu(x, negative_slope=0.01)
        x = torch.nn.functional.gelu(x)
        x = torch.sigmoid(x)
        x = x + self.bias
        return x

batch_size = 128
in_channels = 3
out_channels = 16
depth, height, width = 16, 32, 32
kernel_size = 3
bias_shape = (out_channels, 1, 1, 1)

def get_inputs():
    return [torch.randn(batch_size, in_channels, depth, height, width)]

def get_init_inputs():
    return [in_channels, out_channels, kernel_size, bias_shape]

Kernel Information

Related Kernels (Level 2, Task 7 • 7_Conv3d_ReLU_LeakyReLU_GELU_Sigmoid_BiasAdd)

#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

// Macros for input checking
#define CHECK_CUDA(x) TORCH_CHECK((x).is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK((x).is_contiguous(), #x " must be contiguous")
#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)

// Device function for Leaky ReLU
__device__ __forceinline__ float leaky_relu(float x) {
    return (x > 0.0f) ? x : x * 0.01f;
}

// Device function for GELU approximation using tanh
__device__ __forceinline__ float gelu(float x) {
    float x_cube = x * x * x;
    float inner = 0.79788456f * (x + 0.044715f * x_cube);
    return 0.5f * x * (1.0f + tanhf(inner));
}

// Device function for Sigmoid
__device__ __forceinline__ float sigmoid(float x) {
    return 1.0f / (1.0f + expf(-x));
}

// Device function that combines all activations
__device__ __forceinline__ float activation_chain(float x) {
    x = leaky_relu(x);
    x = gelu(x);
    return sigmoid(x);
}

__global__ void apply_activations_and_bias(
    float* __restrict__ output,
    const float* __restrict__ bias,
    int total_elements,
    int channel_stride,
    int out_channels
) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int stride = gridDim.x * blockDim.x;

    if ((total_elements & 3) == 0) {
        int vec_elements = total_elements >> 2;
        float4* out_vec = reinterpret_cast<float4*>(output);
        while (idx < vec_elements) {
            float4 v = out_vec[idx];
            int base_index = idx << 2; // idx * 4
            float elems[4] = { v.x, v.y, v.z, v.w };
            #pragma unroll
            for (int j = 0; j < 4; j++) {
                int global_idx = base_index + j;
                int channel = (global_idx / channel_stride) & (out_channels - 1);
                float bias_val = __ldg(&bias[channel]);
                elems[j] = activation_chain(elems[j]) + bias_val;
            }
            out_vec[idx] = make_float4(elems[0], elems[1], elems[2], elems[3]);
            idx += stride;
        }
    } else {
        while (idx < total_elements) {
            int channel = (idx / channel_stride) & (out_channels - 1);
            float bias_val = __ldg(&bias[channel]);
            output[idx] = activation_chain(output[idx]) + bias_val;
            idx += stride;
        }
    }
}

// Host function: performs 3D convolution and then applies fused activations with bias.
torch::Tensor module_fn_cuda(
    const torch::Tensor& x,
    const torch::Tensor& conv_weight,
    const torch::Tensor& conv_bias,
    const torch::Tensor& bias
) {
    CHECK_INPUT(x);
    CHECK_INPUT(conv_weight);
    CHECK_INPUT(conv_bias);
    CHECK_INPUT(bias);

    auto output = torch::conv3d(x, conv_weight, conv_bias);

    int batch_size = output.size(0);
    int out_channels = output.size(1);
    int depth = output.size(2);
    int height = output.size(3);
    int width = output.size(4);
    int total_elements = batch_size * out_channels * depth * height * width;
    int channel_stride = depth * height * width;

    int threads = 256;
    int blocks;
    if ((total_elements & 3) == 0) {
        int vec_elements = total_elements >> 2;
        blocks = min(65535, (vec_elements + threads - 1) / threads);
    } else {
        blocks = min(65535, (total_elements + threads - 1) / threads);
    }

    apply_activations_and_bias<<<blocks, threads>>>(
        output.data_ptr<float>(),
        bias.data_ptr<float>(),
        total_elements,
        channel_stride,
        out_channels
    );

    return output;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("forward", &module_fn_cuda, "Modular vectorized activations and bias");
}
Performance Metrics
Metric Value Unit Variance Samples
Executed Ipc Active 3.158 inst/cycle 0.000 5
Executed Ipc Elapsed 3.052 inst/cycle 0.000 5
Issue Slots Busy 78.944 % 0.091 5
Issued Ipc Active 3.160 inst/cycle 0.000 5
SM Busy 78.944 % 0.091 5
Memory Throughput 2402038397421.234 byte/second 35489956584342835200.000 5
Mem Busy 40.694 % 0.002 5
Max Bandwidth 71.686 % 0.032 5
L1/TEX Hit Rate 54.000 % 0.001 5
L2 Hit Rate 50.492 % 0.023 5
Mem Pipes Busy 17.862 % 0.002 5
Warp Cycles Per Issued Instruction 16.868 cycle 0.001 5
Warp Cycles Per Executed Instruction 16.878 cycle 0.001 5
Avg. Active Threads Per Warp 32.000 0.000 5
Avg. Not Predicated Off Threads Per Warp 24.640 0.000 5
Max Active Clusters 0.000 cluster 0.000 5
Max Cluster Size 8.000 block 0.000 5
Overall GPU Occupancy 0.000 % 0.000 5
Cluster Occupancy 0.000 % 0.000 5
Block Limit SM 32.000 block 0.000 5
Block Limit Registers 8.000 block 0.000 5
Block Limit Shared Mem 32.000 block 0.000 5
Block Limit Warps 8.000 block 0.000 5
Theoretical Active Warps per SM 64.000 warp 0.000 5
Theoretical Occupancy 100.000 % 0.000 5
Achieved Occupancy 83.738 % 0.021 5
Achieved Active Warps Per SM 53.590 warp 0.009 5
Analysis Rules
Rule Description
WRN HighPipeUtilization ALU is the highest-utilized pipeline (62.6%) based on active cycles, taking into account the rates of its different instructions. It executes integer and logic operations. The pipeline is well-utilized, but might become a bottleneck if more work is added. Based on the number of executed instructions, the highest utilized pipeline (62.6%) is ALU. It executes integer and logic operations. Comparing the two, the overall pipeline utilization appears to be caused by frequent, low-latency instructions. See the Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-decoder) or hover over the pipeline name to understand the workloads handled by each pipeline. The Instruction Statistics section shows the mix of executed instructions in this kernel. Check the Warp State Statistics section for which reasons cause warps to stall.
INF CPIStall Check the Warp Stall Sampling (All Cycles) table for the top stall locations in your source based on sampling data. The Kernel Profiling Guide (https://docs.nvidia.com/nsight-compute/ProfilingGuide/index.html#metrics-reference) provides more details on each stall reason.
WRN Occupancy This kernel's theoretical occupancy is not impacted by any block limit. The difference between calculated theoretical (100.0%) and measured achieved occupancy (83.8%) can be the result of warp scheduling overheads or workload imbalances during the kernel execution. Load imbalances can occur between warps within a block as well as across blocks of the same kernel. See the CUDA Best Practices Guide (https://docs.nvidia.com/cuda/cuda-c-best-practices-guide/index.html#occupancy) for more details on optimizing occupancy.
Operation / Metric Value Unit
aten::conv3d
CPU Time 751868.45 μs
Device Time 5446130.09 μs
Self CPU Time 13428.96 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::convolution
CPU Time 738439.49 μs
Device Time 5446130.09 μs
Self CPU Time 18250.02 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::_convolution
CPU Time 720189.47 μs
Device Time 5446130.09 μs
Self CPU Time 37658.39 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
aten::cudnn_convolution
CPU Time 598110.03 μs
Device Time 4721023.47 μs
Self CPU Time 179083.73 μs
Self Device Time 4721023.47 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaEventRecord
CPU Time 630065.23 μs
Device Time 0.00 μs
Self CPU Time 630065.23 μs
Self Device Time 0.00 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
sm80_xmma_fprop_implicit_gemm_indexed_f32f32_f32f32_f32_nchwkcrs_nchw_tilesize32x32x8_stage3_warpsize1x2x1_g1_ffma_aligna4_alignc4_execute_kernel__5x_cudnn
CPU Time 0.00 μs
Device Time 4721020.82 μs
Self CPU Time 0.00 μs
Self Device Time 4721020.82 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
cudaLaunchKernel
CPU Time 5065150.81 μs
Device Time 329741.21 μs
Self CPU Time 5065150.81 μs
Self Device Time 329741.21 μs
CPU Memory Usage 0 B
Device Memory Usage 0 B
Self CPU Memory Usage 0 B
Self Device Memory Usage 0 B
Status: Failed
45250 warnings and 2 errors generated when compiling for host.
Error while processing /home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu.
Suppressed 45289 warnings (45242 in non-user code, 47 NOLINT).
Use -header-filter=.* to display errors from all non-system headers. Use -system-headers to display errors from system headers as well.
Found compiler error(s).
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:38:5 bugprone-easily-swappable-parameters
38 | int total_elements,
| ^~~~~~~~~~~~~~~~~~~
39 | int channel_stride,
| ~~~~~~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:38:9: note: the first parameter in the range is 'total_elements'
38 | int total_elements,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:39:9: note: the last parameter in the range is 'channel_stride'
39 | int channel_stride,
| ^~~~~~~~~~~~~~
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:42:15: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
42 | int idx = blockIdx.x * blockDim.x + threadIdx.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:43:18: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
43 | int stride = gridDim.x * blockDim.x;
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:86:22: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
86 | int batch_size = output.size(0);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:87:24: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
87 | int out_channels = output.size(1);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:88:17: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
88 | int depth = output.size(2);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:89:18: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
89 | int height = output.size(3);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:90:17: warning: narrowing conversion from 'int64_t' (aka 'long') to signed type 'int' is implementation-defined [bugprone-narrowing-conversions]
90 | int width = output.size(4);
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:98:18: error: no matching function for call to 'min' [clang-diagnostic-error]
98 | blocks = min(65535, (vec_elements + threads - 1) / threads);
| ^~~
/home/common_modules/clang-tidy/20.0.0git/lib/clang/20/include/__clang_cuda_math.h:201:16: note: candidate function not viable: call to __device__ function from __host__ function
201 | __DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
| ^
/usr/local/cuda/include/crt/math_functions.hpp:868:38: note: candidate function not viable: call to __device__ function from __host__ function
868 | __MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const unsigned int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:873:38: note: candidate function not viable: call to __device__ function from __host__ function
873 | __MATH_FUNCTIONS_DECL__ unsigned int min(const int a, const unsigned int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:878:38: note: candidate function not viable: call to __device__ function from __host__ function
878 | __MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:883:34: note: candidate function not viable: call to __device__ function from __host__ function
883 | __MATH_FUNCTIONS_DECL__ long int min(const long int a, const long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:902:43: note: candidate function not viable: call to __device__ function from __host__ function
902 | __MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const unsigned long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:919:43: note: candidate function not viable: call to __device__ function from __host__ function
919 | __MATH_FUNCTIONS_DECL__ unsigned long int min(const long int a, const unsigned long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:936:43: note: candidate function not viable: call to __device__ function from __host__ function
936 | __MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:953:39: note: candidate function not viable: call to __device__ function from __host__ function
953 | __MATH_FUNCTIONS_DECL__ long long int min(const long long int a, const long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:958:48: note: candidate function not viable: call to __device__ function from __host__ function
958 | __MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const unsigned long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:963:48: note: candidate function not viable: call to __device__ function from __host__ function
963 | __MATH_FUNCTIONS_DECL__ unsigned long long int min(const long long int a, const unsigned long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:968:48: note: candidate function not viable: call to __device__ function from __host__ function
968 | __MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:973:31: note: candidate function not viable: call to __device__ function from __host__ function
973 | __MATH_FUNCTIONS_DECL__ float min(const float a, const float b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:978:32: note: candidate function not viable: call to __device__ function from __host__ function
978 | __MATH_FUNCTIONS_DECL__ double min(const double a, const double b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:983:32: note: candidate function not viable: call to __device__ function from __host__ function
983 | __MATH_FUNCTIONS_DECL__ double min(const float a, const double b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:988:32: note: candidate function not viable: call to __device__ function from __host__ function
988 | __MATH_FUNCTIONS_DECL__ double min(const double a, const float b)
| ^
/home/robert_sakana_ai/llm_cuda/experiments/20250214_optimize_b10_s4_e0_prof_ncu_clang/level_2/task_7/b10_s1_modular_vectorized_activations/base/base.cu:100:18: error: no matching function for call to 'min' [clang-diagnostic-error]
100 | blocks = min(65535, (total_elements + threads - 1) / threads);
| ^~~
/home/common_modules/clang-tidy/20.0.0git/lib/clang/20/include/__clang_cuda_math.h:201:16: note: candidate function not viable: call to __device__ function from __host__ function
201 | __DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
| ^
/usr/local/cuda/include/crt/math_functions.hpp:868:38: note: candidate function not viable: call to __device__ function from __host__ function
868 | __MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const unsigned int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:873:38: note: candidate function not viable: call to __device__ function from __host__ function
873 | __MATH_FUNCTIONS_DECL__ unsigned int min(const int a, const unsigned int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:878:38: note: candidate function not viable: call to __device__ function from __host__ function
878 | __MATH_FUNCTIONS_DECL__ unsigned int min(const unsigned int a, const int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:883:34: note: candidate function not viable: call to __device__ function from __host__ function
883 | __MATH_FUNCTIONS_DECL__ long int min(const long int a, const long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:902:43: note: candidate function not viable: call to __device__ function from __host__ function
902 | __MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const unsigned long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:919:43: note: candidate function not viable: call to __device__ function from __host__ function
919 | __MATH_FUNCTIONS_DECL__ unsigned long int min(const long int a, const unsigned long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:936:43: note: candidate function not viable: call to __device__ function from __host__ function
936 | __MATH_FUNCTIONS_DECL__ unsigned long int min(const unsigned long int a, const long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:953:39: note: candidate function not viable: call to __device__ function from __host__ function
953 | __MATH_FUNCTIONS_DECL__ long long int min(const long long int a, const long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:958:48: note: candidate function not viable: call to __device__ function from __host__ function
958 | __MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const unsigned long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:963:48: note: candidate function not viable: call to __device__ function from __host__ function
963 | __MATH_FUNCTIONS_DECL__ unsigned long long int min(const long long int a, const unsigned long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:968:48: note: candidate function not viable: call to __device__ function from __host__ function
968 | __MATH_FUNCTIONS_DECL__ unsigned long long int min(const unsigned long long int a, const long long int b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:973:31: note: candidate function not viable: call to __device__ function from __host__ function
973 | __MATH_FUNCTIONS_DECL__ float min(const float a, const float b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:978:32: note: candidate function not viable: call to __device__ function from __host__ function
978 | __MATH_FUNCTIONS_DECL__ double min(const double a, const double b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:983:32: note: candidate function not viable: call to __device__ function from __host__ function
983 | __MATH_FUNCTIONS_DECL__ double min(const float a, const double b)
| ^
/usr/local/cuda/include/crt/math_functions.hpp:988:32: note: candidate function not viable: call to __device__ function from __host__ function
988 | __MATH_FUNCTIONS_DECL__ double min(const double a, const float b)
| ^