Home
last modified time | relevance | path

Searched refs:CUDA (Results 1 – 25 of 89) sorted by relevance

1234

/freebsd/contrib/llvm-project/clang/lib/Frontend/
H A DFrontendOptions.cpp22 .Case("cui", InputKind(Language::CUDA).getPreprocessed()) in getInputKindForExtension()
33 .Cases("cu", "cuh", Language::CUDA) in getInputKindForExtension()
/freebsd/contrib/llvm-project/clang/lib/Basic/
H A DLangStandards.cpp36 case Language::CUDA: in languageToString()
100 case Language::CUDA: in getDefaultLanguageStandard()
H A DLangOptions.cpp182 Opts.CUDA = Lang == Language::CUDA || Opts.HIP; in setLangDefaults()
193 } else if (Opts.CUDA) { in setLangDefaults()
H A DBuiltins.cpp117 if (!LangOpts.CUDA && BuiltinInfo.Langs == CUDA_LANG) in builtinIsSupported()
/freebsd/contrib/llvm-project/clang/lib/Sema/
H A DSemaCUDA.cpp38 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in PushForceCUDAHostDevice()
43 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in PopForceCUDAHostDevice()
725 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in maybeAddCUDAHostDeviceAttrs()
803 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in CUDADiagIfDeviceCode()
833 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in CUDADiagIfHostCode()
861 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in CheckCUDACall()
972 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in CUDASetLambdaAttrs()
981 assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); in checkCUDATargetOverload()
/freebsd/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/
H A Dcmath1 /*===---- cmath - CUDA wrapper for <cmath> ---------------------------------===
31 // libc++ will need long double variants of these functions, but CUDA does not
71 // which we can't handle on GPU. We need to forward those to CUDA-provided
H A Dnew1 /*===---- new - CUDA wrapper for <new> -------------------------------------===
31 // and CUDA-specific macros are not available yet.
H A Dcomplex1 /*===---- complex - CUDA wrapper for <complex> ------------------------------===
47 // functions that don't exist when compiling CUDA device code).
H A Dalgorithm1 /*===---- algorithm - CUDA wrapper for <algorithm> -------------------------===
/freebsd/lib/clang/headers/
H A DMakefile205 INCSGROUPS+= CUDA
207 CUDA+= cuda_wrappers/algorithm
208 CUDA+= cuda_wrappers/cmath
209 CUDA+= cuda_wrappers/complex
210 CUDA+= cuda_wrappers/new
/freebsd/contrib/llvm-project/llvm/lib/Target/NVPTX/
H A DNVPTXLowerArgs.cpp434 if (TM.getDrvInterface() == NVPTX::CUDA) { in runOnKernelFunction()
461 else if (TM.getDrvInterface() == NVPTX::CUDA) in runOnKernelFunction()
464 TM.getDrvInterface() == NVPTX::CUDA) { in runOnKernelFunction()
H A DNVPTXSubtarget.cpp62 if (TM.getDrvInterface() == NVPTX::CUDA) in hasImageHandles()
H A DNVPTX.h80 CUDA enumerator
H A DNVPTXAsmPrinter.h250 NVPTX::CUDA) {} in NVPTXAsmPrinter()
/freebsd/contrib/llvm-project/clang/include/clang/Basic/
H A DLangOptions.def245 LANGOPT(CUDA , 1, 0, "CUDA")
254 LANGOPT(OpenMPCUDANumSMs , 32, 0, "Number of SMs for CUDA devices.")
255 LANGOPT(OpenMPCUDABlocksPerSM , 32, 0, "Number of blocks per SM for CUDA devices.")
271 LANGOPT(CUDAIsDevice , 1, 0, "compiling for CUDA device")
272 LANGOPT(CUDAAllowVariadicFunctions, 1, 0, "allowing variadic functions in CUDA device code")
276 …eTemplates, 1, 0, "assume template functions to be implicitly host device by default for CUDA/HIP")
279 LANGOPT(GPUDeferDiag, 1, 0, "defer host/device related diagnostic messages for CUDA/HIP")
280 …gSideOverloads, 1, 0, "always exclude wrong side overloads in overloading resolution for CUDA/HIP")
288 …sume that kernels are launched with uniform block sizes (default true for CUDA/HIP and false other…
H A DDiagnosticDriverKinds.td58 def err_drv_cuda_bad_gpu_arch : Error<"unsupported CUDA gpu architecture: %0">;
61 "cannot find CUDA installation; provide its path via '--cuda-path', or pass "
62 "'-nocudainc' to build without CUDA includes">;
64 "cannot find libdevice for %0; provide path to different CUDA installation "
101 "GPU arch %0 is supported by CUDA versions between %1 and %2 (inclusive), "
102 "but installation at %3 is %4; use '--cuda-path' to specify a different CUDA "
106 "CUDA version%0 is newer than the latest%select{| partially}1 supported version %2">,
109 "CUDA version %0 is only partially supported">,
114 "mixed CUDA and HIP compilation is not supported">;
357 "NVPTX target requires CUDA 9.2 or above; CUDA %0 detected">;
[all …]
H A DLangStandard.h40 CUDA, enumerator
H A DFeatures.def291 // CUDA/HIP Features
292 FEATURE(cuda_noinline_keyword, LangOpts.CUDA)
293 EXTENSION(cuda_implicit_host_device_templates, LangOpts.CUDA && LangOpts.OffloadImplicitHostDeviceT…
H A DAttr.td398 def CUDA : LangOpt<"CUDA">;
1261 let LangOpts = [CUDA];
1267 let LangOpts = [CUDA];
1273 let LangOpts = [CUDA];
1279 let LangOpts = [CUDA];
1285 let LangOpts = [CUDA];
1295 let LangOpts = [CUDA];
1307 let LangOpts = [CUDA];
1315 let LangOpts = [CUDA];
1337 let LangOpts = [CUDA];
[all …]
/freebsd/contrib/llvm-project/clang/lib/AST/
H A DMicrosoftCXXABI.cpp122 if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) { in MicrosoftCXXABI()
192 if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) { in createMangleNumberingContext()
/freebsd/contrib/llvm-project/clang/lib/CodeGen/Targets/
H A DNVPTX.cpp223 if (M.getLangOpts().CUDA) { in setTargetAttributes()
251 if (M.getLangOpts().CUDA) { in setTargetAttributes()
/freebsd/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/
H A Dcmath39 // Overloads not provided by the CUDA wrappers but by the CUDA system headers.
/freebsd/sys/powerpc/conf/
H A DNOTES58 device cuda # VIA-CUDA ADB interface
/freebsd/contrib/llvm-project/clang/lib/CodeGen/
H A DTargetInfo.cpp127 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) && in getGlobalVarAddressSpace()
/freebsd/contrib/llvm-project/clang/include/clang/Sema/
H A DSemaInternal.h45 if (!LangOpts.CUDA || !D) in DeclAttrsMatchCUDAMode()

1234