1 //===--- Cuda.cpp - Cuda Tool and ToolChain Implementations -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "Cuda.h"
10 #include "CommonArgs.h"
11 #include "clang/Basic/Cuda.h"
12 #include "clang/Config/config.h"
13 #include "clang/Driver/Compilation.h"
14 #include "clang/Driver/Distro.h"
15 #include "clang/Driver/Driver.h"
16 #include "clang/Driver/DriverDiagnostic.h"
17 #include "clang/Driver/InputInfo.h"
18 #include "clang/Driver/Options.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Option/ArgList.h"
21 #include "llvm/Support/FileSystem.h"
22 #include "llvm/Support/FormatAdapters.h"
23 #include "llvm/Support/FormatVariadic.h"
24 #include "llvm/Support/Host.h"
25 #include "llvm/Support/Path.h"
26 #include "llvm/Support/Process.h"
27 #include "llvm/Support/Program.h"
28 #include "llvm/Support/TargetParser.h"
29 #include "llvm/Support/VirtualFileSystem.h"
30 #include <system_error>
31
32 using namespace clang::driver;
33 using namespace clang::driver::toolchains;
34 using namespace clang::driver::tools;
35 using namespace clang;
36 using namespace llvm::opt;
37
38 namespace {
39
getCudaVersion(uint32_t raw_version)40 CudaVersion getCudaVersion(uint32_t raw_version) {
41 if (raw_version < 7050)
42 return CudaVersion::CUDA_70;
43 if (raw_version < 8000)
44 return CudaVersion::CUDA_75;
45 if (raw_version < 9000)
46 return CudaVersion::CUDA_80;
47 if (raw_version < 9010)
48 return CudaVersion::CUDA_90;
49 if (raw_version < 9020)
50 return CudaVersion::CUDA_91;
51 if (raw_version < 10000)
52 return CudaVersion::CUDA_92;
53 if (raw_version < 10010)
54 return CudaVersion::CUDA_100;
55 if (raw_version < 10020)
56 return CudaVersion::CUDA_101;
57 if (raw_version < 11000)
58 return CudaVersion::CUDA_102;
59 if (raw_version < 11010)
60 return CudaVersion::CUDA_110;
61 if (raw_version < 11020)
62 return CudaVersion::CUDA_111;
63 if (raw_version < 11030)
64 return CudaVersion::CUDA_112;
65 if (raw_version < 11040)
66 return CudaVersion::CUDA_113;
67 if (raw_version < 11050)
68 return CudaVersion::CUDA_114;
69 if (raw_version < 11060)
70 return CudaVersion::CUDA_115;
71 if (raw_version < 11070)
72 return CudaVersion::CUDA_116;
73 if (raw_version < 11080)
74 return CudaVersion::CUDA_117;
75 if (raw_version < 11090)
76 return CudaVersion::CUDA_118;
77 return CudaVersion::NEW;
78 }
79
parseCudaHFile(llvm::StringRef Input)80 CudaVersion parseCudaHFile(llvm::StringRef Input) {
81 // Helper lambda which skips the words if the line starts with them or returns
82 // std::nullopt otherwise.
83 auto StartsWithWords =
84 [](llvm::StringRef Line,
85 const SmallVector<StringRef, 3> words) -> std::optional<StringRef> {
86 for (StringRef word : words) {
87 if (!Line.consume_front(word))
88 return {};
89 Line = Line.ltrim();
90 }
91 return Line;
92 };
93
94 Input = Input.ltrim();
95 while (!Input.empty()) {
96 if (auto Line =
97 StartsWithWords(Input.ltrim(), {"#", "define", "CUDA_VERSION"})) {
98 uint32_t RawVersion;
99 Line->consumeInteger(10, RawVersion);
100 return getCudaVersion(RawVersion);
101 }
102 // Find next non-empty line.
103 Input = Input.drop_front(Input.find_first_of("\n\r")).ltrim();
104 }
105 return CudaVersion::UNKNOWN;
106 }
107 } // namespace
108
WarnIfUnsupportedVersion()109 void CudaInstallationDetector::WarnIfUnsupportedVersion() {
110 if (Version > CudaVersion::PARTIALLY_SUPPORTED) {
111 std::string VersionString = CudaVersionToString(Version);
112 if (!VersionString.empty())
113 VersionString.insert(0, " ");
114 D.Diag(diag::warn_drv_new_cuda_version)
115 << VersionString
116 << (CudaVersion::PARTIALLY_SUPPORTED != CudaVersion::FULLY_SUPPORTED)
117 << CudaVersionToString(CudaVersion::PARTIALLY_SUPPORTED);
118 } else if (Version > CudaVersion::FULLY_SUPPORTED)
119 D.Diag(diag::warn_drv_partially_supported_cuda_version)
120 << CudaVersionToString(Version);
121 }
122
CudaInstallationDetector(const Driver & D,const llvm::Triple & HostTriple,const llvm::opt::ArgList & Args)123 CudaInstallationDetector::CudaInstallationDetector(
124 const Driver &D, const llvm::Triple &HostTriple,
125 const llvm::opt::ArgList &Args)
126 : D(D) {
127 struct Candidate {
128 std::string Path;
129 bool StrictChecking;
130
131 Candidate(std::string Path, bool StrictChecking = false)
132 : Path(Path), StrictChecking(StrictChecking) {}
133 };
134 SmallVector<Candidate, 4> Candidates;
135
136 // In decreasing order so we prefer newer versions to older versions.
137 std::initializer_list<const char *> Versions = {"8.0", "7.5", "7.0"};
138 auto &FS = D.getVFS();
139
140 if (Args.hasArg(clang::driver::options::OPT_cuda_path_EQ)) {
141 Candidates.emplace_back(
142 Args.getLastArgValue(clang::driver::options::OPT_cuda_path_EQ).str());
143 } else if (HostTriple.isOSWindows()) {
144 for (const char *Ver : Versions)
145 Candidates.emplace_back(
146 D.SysRoot + "/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v" +
147 Ver);
148 } else {
149 if (!Args.hasArg(clang::driver::options::OPT_cuda_path_ignore_env)) {
150 // Try to find ptxas binary. If the executable is located in a directory
151 // called 'bin/', its parent directory might be a good guess for a valid
152 // CUDA installation.
153 // However, some distributions might installs 'ptxas' to /usr/bin. In that
154 // case the candidate would be '/usr' which passes the following checks
155 // because '/usr/include' exists as well. To avoid this case, we always
156 // check for the directory potentially containing files for libdevice,
157 // even if the user passes -nocudalib.
158 if (llvm::ErrorOr<std::string> ptxas =
159 llvm::sys::findProgramByName("ptxas")) {
160 SmallString<256> ptxasAbsolutePath;
161 llvm::sys::fs::real_path(*ptxas, ptxasAbsolutePath);
162
163 StringRef ptxasDir = llvm::sys::path::parent_path(ptxasAbsolutePath);
164 if (llvm::sys::path::filename(ptxasDir) == "bin")
165 Candidates.emplace_back(
166 std::string(llvm::sys::path::parent_path(ptxasDir)),
167 /*StrictChecking=*/true);
168 }
169 }
170
171 Candidates.emplace_back(D.SysRoot + "/usr/local/cuda");
172 for (const char *Ver : Versions)
173 Candidates.emplace_back(D.SysRoot + "/usr/local/cuda-" + Ver);
174
175 Distro Dist(FS, llvm::Triple(llvm::sys::getProcessTriple()));
176 if (Dist.IsDebian() || Dist.IsUbuntu())
177 // Special case for Debian to have nvidia-cuda-toolkit work
178 // out of the box. More info on http://bugs.debian.org/882505
179 Candidates.emplace_back(D.SysRoot + "/usr/lib/cuda");
180 }
181
182 bool NoCudaLib = Args.hasArg(options::OPT_nogpulib);
183
184 for (const auto &Candidate : Candidates) {
185 InstallPath = Candidate.Path;
186 if (InstallPath.empty() || !FS.exists(InstallPath))
187 continue;
188
189 BinPath = InstallPath + "/bin";
190 IncludePath = InstallPath + "/include";
191 LibDevicePath = InstallPath + "/nvvm/libdevice";
192
193 if (!(FS.exists(IncludePath) && FS.exists(BinPath)))
194 continue;
195 bool CheckLibDevice = (!NoCudaLib || Candidate.StrictChecking);
196 if (CheckLibDevice && !FS.exists(LibDevicePath))
197 continue;
198
199 Version = CudaVersion::UNKNOWN;
200 if (auto CudaHFile = FS.getBufferForFile(InstallPath + "/include/cuda.h"))
201 Version = parseCudaHFile((*CudaHFile)->getBuffer());
202 // As the last resort, make an educated guess between CUDA-7.0, which had
203 // old-style libdevice bitcode, and an unknown recent CUDA version.
204 if (Version == CudaVersion::UNKNOWN) {
205 Version = FS.exists(LibDevicePath + "/libdevice.10.bc")
206 ? CudaVersion::NEW
207 : CudaVersion::CUDA_70;
208 }
209
210 if (Version >= CudaVersion::CUDA_90) {
211 // CUDA-9+ uses single libdevice file for all GPU variants.
212 std::string FilePath = LibDevicePath + "/libdevice.10.bc";
213 if (FS.exists(FilePath)) {
214 for (int Arch = (int)CudaArch::SM_30, E = (int)CudaArch::LAST; Arch < E;
215 ++Arch) {
216 CudaArch GpuArch = static_cast<CudaArch>(Arch);
217 if (!IsNVIDIAGpuArch(GpuArch))
218 continue;
219 std::string GpuArchName(CudaArchToString(GpuArch));
220 LibDeviceMap[GpuArchName] = FilePath;
221 }
222 }
223 } else {
224 std::error_code EC;
225 for (llvm::vfs::directory_iterator LI = FS.dir_begin(LibDevicePath, EC),
226 LE;
227 !EC && LI != LE; LI = LI.increment(EC)) {
228 StringRef FilePath = LI->path();
229 StringRef FileName = llvm::sys::path::filename(FilePath);
230 // Process all bitcode filenames that look like
231 // libdevice.compute_XX.YY.bc
232 const StringRef LibDeviceName = "libdevice.";
233 if (!(FileName.startswith(LibDeviceName) && FileName.endswith(".bc")))
234 continue;
235 StringRef GpuArch = FileName.slice(
236 LibDeviceName.size(), FileName.find('.', LibDeviceName.size()));
237 LibDeviceMap[GpuArch] = FilePath.str();
238 // Insert map entries for specific devices with this compute
239 // capability. NVCC's choice of the libdevice library version is
240 // rather peculiar and depends on the CUDA version.
241 if (GpuArch == "compute_20") {
242 LibDeviceMap["sm_20"] = std::string(FilePath);
243 LibDeviceMap["sm_21"] = std::string(FilePath);
244 LibDeviceMap["sm_32"] = std::string(FilePath);
245 } else if (GpuArch == "compute_30") {
246 LibDeviceMap["sm_30"] = std::string(FilePath);
247 if (Version < CudaVersion::CUDA_80) {
248 LibDeviceMap["sm_50"] = std::string(FilePath);
249 LibDeviceMap["sm_52"] = std::string(FilePath);
250 LibDeviceMap["sm_53"] = std::string(FilePath);
251 }
252 LibDeviceMap["sm_60"] = std::string(FilePath);
253 LibDeviceMap["sm_61"] = std::string(FilePath);
254 LibDeviceMap["sm_62"] = std::string(FilePath);
255 } else if (GpuArch == "compute_35") {
256 LibDeviceMap["sm_35"] = std::string(FilePath);
257 LibDeviceMap["sm_37"] = std::string(FilePath);
258 } else if (GpuArch == "compute_50") {
259 if (Version >= CudaVersion::CUDA_80) {
260 LibDeviceMap["sm_50"] = std::string(FilePath);
261 LibDeviceMap["sm_52"] = std::string(FilePath);
262 LibDeviceMap["sm_53"] = std::string(FilePath);
263 }
264 }
265 }
266 }
267
268 // Check that we have found at least one libdevice that we can link in if
269 // -nocudalib hasn't been specified.
270 if (LibDeviceMap.empty() && !NoCudaLib)
271 continue;
272
273 IsValid = true;
274 break;
275 }
276 }
277
AddCudaIncludeArgs(const ArgList & DriverArgs,ArgStringList & CC1Args) const278 void CudaInstallationDetector::AddCudaIncludeArgs(
279 const ArgList &DriverArgs, ArgStringList &CC1Args) const {
280 if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
281 // Add cuda_wrappers/* to our system include path. This lets us wrap
282 // standard library headers.
283 SmallString<128> P(D.ResourceDir);
284 llvm::sys::path::append(P, "include");
285 llvm::sys::path::append(P, "cuda_wrappers");
286 CC1Args.push_back("-internal-isystem");
287 CC1Args.push_back(DriverArgs.MakeArgString(P));
288 }
289
290 if (DriverArgs.hasArg(options::OPT_nogpuinc))
291 return;
292
293 if (!isValid()) {
294 D.Diag(diag::err_drv_no_cuda_installation);
295 return;
296 }
297
298 CC1Args.push_back("-include");
299 CC1Args.push_back("__clang_cuda_runtime_wrapper.h");
300 }
301
CheckCudaVersionSupportsArch(CudaArch Arch) const302 void CudaInstallationDetector::CheckCudaVersionSupportsArch(
303 CudaArch Arch) const {
304 if (Arch == CudaArch::UNKNOWN || Version == CudaVersion::UNKNOWN ||
305 ArchsWithBadVersion[(int)Arch])
306 return;
307
308 auto MinVersion = MinVersionForCudaArch(Arch);
309 auto MaxVersion = MaxVersionForCudaArch(Arch);
310 if (Version < MinVersion || Version > MaxVersion) {
311 ArchsWithBadVersion[(int)Arch] = true;
312 D.Diag(diag::err_drv_cuda_version_unsupported)
313 << CudaArchToString(Arch) << CudaVersionToString(MinVersion)
314 << CudaVersionToString(MaxVersion) << InstallPath
315 << CudaVersionToString(Version);
316 }
317 }
318
print(raw_ostream & OS) const319 void CudaInstallationDetector::print(raw_ostream &OS) const {
320 if (isValid())
321 OS << "Found CUDA installation: " << InstallPath << ", version "
322 << CudaVersionToString(Version) << "\n";
323 }
324
325 namespace {
326 /// Debug info level for the NVPTX devices. We may need to emit different debug
327 /// info level for the host and for the device itselfi. This type controls
328 /// emission of the debug info for the devices. It either prohibits disable info
329 /// emission completely, or emits debug directives only, or emits same debug
330 /// info as for the host.
331 enum DeviceDebugInfoLevel {
332 DisableDebugInfo, /// Do not emit debug info for the devices.
333 DebugDirectivesOnly, /// Emit only debug directives.
334 EmitSameDebugInfoAsHost, /// Use the same debug info level just like for the
335 /// host.
336 };
337 } // anonymous namespace
338
339 /// Define debug info level for the NVPTX devices. If the debug info for both
340 /// the host and device are disabled (-g0/-ggdb0 or no debug options at all). If
341 /// only debug directives are requested for the both host and device
342 /// (-gline-directvies-only), or the debug info only for the device is disabled
343 /// (optimization is on and --cuda-noopt-device-debug was not specified), the
344 /// debug directves only must be emitted for the device. Otherwise, use the same
345 /// debug info level just like for the host (with the limitations of only
346 /// supported DWARF2 standard).
mustEmitDebugInfo(const ArgList & Args)347 static DeviceDebugInfoLevel mustEmitDebugInfo(const ArgList &Args) {
348 const Arg *A = Args.getLastArg(options::OPT_O_Group);
349 bool IsDebugEnabled = !A || A->getOption().matches(options::OPT_O0) ||
350 Args.hasFlag(options::OPT_cuda_noopt_device_debug,
351 options::OPT_no_cuda_noopt_device_debug,
352 /*Default=*/false);
353 if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
354 const Option &Opt = A->getOption();
355 if (Opt.matches(options::OPT_gN_Group)) {
356 if (Opt.matches(options::OPT_g0) || Opt.matches(options::OPT_ggdb0))
357 return DisableDebugInfo;
358 if (Opt.matches(options::OPT_gline_directives_only))
359 return DebugDirectivesOnly;
360 }
361 return IsDebugEnabled ? EmitSameDebugInfoAsHost : DebugDirectivesOnly;
362 }
363 return willEmitRemarks(Args) ? DebugDirectivesOnly : DisableDebugInfo;
364 }
365
ConstructJob(Compilation & C,const JobAction & JA,const InputInfo & Output,const InputInfoList & Inputs,const ArgList & Args,const char * LinkingOutput) const366 void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
367 const InputInfo &Output,
368 const InputInfoList &Inputs,
369 const ArgList &Args,
370 const char *LinkingOutput) const {
371 const auto &TC =
372 static_cast<const toolchains::NVPTXToolChain &>(getToolChain());
373 assert(TC.getTriple().isNVPTX() && "Wrong platform");
374
375 StringRef GPUArchName;
376 // If this is a CUDA action we need to extract the device architecture
377 // from the Job's associated architecture, otherwise use the -march=arch
378 // option. This option may come from -Xopenmp-target flag or the default
379 // value.
380 if (JA.isDeviceOffloading(Action::OFK_Cuda)) {
381 GPUArchName = JA.getOffloadingArch();
382 } else {
383 GPUArchName = Args.getLastArgValue(options::OPT_march_EQ);
384 assert(!GPUArchName.empty() && "Must have an architecture passed in.");
385 }
386
387 // Obtain architecture from the action.
388 CudaArch gpu_arch = StringToCudaArch(GPUArchName);
389 assert(gpu_arch != CudaArch::UNKNOWN &&
390 "Device action expected to have an architecture.");
391
392 // Check that our installation's ptxas supports gpu_arch.
393 if (!Args.hasArg(options::OPT_no_cuda_version_check)) {
394 TC.CudaInstallation.CheckCudaVersionSupportsArch(gpu_arch);
395 }
396
397 ArgStringList CmdArgs;
398 CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-m64" : "-m32");
399 DeviceDebugInfoLevel DIKind = mustEmitDebugInfo(Args);
400 if (DIKind == EmitSameDebugInfoAsHost) {
401 // ptxas does not accept -g option if optimization is enabled, so
402 // we ignore the compiler's -O* options if we want debug info.
403 CmdArgs.push_back("-g");
404 CmdArgs.push_back("--dont-merge-basicblocks");
405 CmdArgs.push_back("--return-at-end");
406 } else if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
407 // Map the -O we received to -O{0,1,2,3}.
408 //
409 // TODO: Perhaps we should map host -O2 to ptxas -O3. -O3 is ptxas's
410 // default, so it may correspond more closely to the spirit of clang -O2.
411
412 // -O3 seems like the least-bad option when -Osomething is specified to
413 // clang but it isn't handled below.
414 StringRef OOpt = "3";
415 if (A->getOption().matches(options::OPT_O4) ||
416 A->getOption().matches(options::OPT_Ofast))
417 OOpt = "3";
418 else if (A->getOption().matches(options::OPT_O0))
419 OOpt = "0";
420 else if (A->getOption().matches(options::OPT_O)) {
421 // -Os, -Oz, and -O(anything else) map to -O2, for lack of better options.
422 OOpt = llvm::StringSwitch<const char *>(A->getValue())
423 .Case("1", "1")
424 .Case("2", "2")
425 .Case("3", "3")
426 .Case("s", "2")
427 .Case("z", "2")
428 .Default("2");
429 }
430 CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
431 } else {
432 // If no -O was passed, pass -O0 to ptxas -- no opt flag should correspond
433 // to no optimizations, but ptxas's default is -O3.
434 CmdArgs.push_back("-O0");
435 }
436 if (DIKind == DebugDirectivesOnly)
437 CmdArgs.push_back("-lineinfo");
438
439 // Pass -v to ptxas if it was passed to the driver.
440 if (Args.hasArg(options::OPT_v))
441 CmdArgs.push_back("-v");
442
443 CmdArgs.push_back("--gpu-name");
444 CmdArgs.push_back(Args.MakeArgString(CudaArchToString(gpu_arch)));
445 CmdArgs.push_back("--output-file");
446 std::string OutputFileName = TC.getInputFilename(Output);
447
448 // If we are invoking `nvlink` internally we need to output a `.cubin` file.
449 // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
450 if (!C.getInputArgs().getLastArg(options::OPT_c)) {
451 SmallString<256> Filename(Output.getFilename());
452 llvm::sys::path::replace_extension(Filename, "cubin");
453 OutputFileName = Filename.str();
454 }
455 if (Output.isFilename() && OutputFileName != Output.getFilename())
456 C.addTempFile(Args.MakeArgString(OutputFileName));
457
458 CmdArgs.push_back(Args.MakeArgString(OutputFileName));
459 for (const auto &II : Inputs)
460 CmdArgs.push_back(Args.MakeArgString(II.getFilename()));
461
462 for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
463 CmdArgs.push_back(Args.MakeArgString(A));
464
465 bool Relocatable;
466 if (JA.isOffloading(Action::OFK_OpenMP))
467 // In OpenMP we need to generate relocatable code.
468 Relocatable = Args.hasFlag(options::OPT_fopenmp_relocatable_target,
469 options::OPT_fnoopenmp_relocatable_target,
470 /*Default=*/true);
471 else if (JA.isOffloading(Action::OFK_Cuda))
472 // In CUDA we generate relocatable code by default.
473 Relocatable = Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
474 /*Default=*/false);
475 else
476 // Otherwise, we are compiling directly and should create linkable output.
477 Relocatable = true;
478
479 if (Relocatable)
480 CmdArgs.push_back("-c");
481
482 const char *Exec;
483 if (Arg *A = Args.getLastArg(options::OPT_ptxas_path_EQ))
484 Exec = A->getValue();
485 else
486 Exec = Args.MakeArgString(TC.GetProgramPath("ptxas"));
487 C.addCommand(std::make_unique<Command>(
488 JA, *this,
489 ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
490 "--options-file"},
491 Exec, CmdArgs, Inputs, Output));
492 }
493
shouldIncludePTX(const ArgList & Args,const char * gpu_arch)494 static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
495 bool includePTX = true;
496 for (Arg *A : Args) {
497 if (!(A->getOption().matches(options::OPT_cuda_include_ptx_EQ) ||
498 A->getOption().matches(options::OPT_no_cuda_include_ptx_EQ)))
499 continue;
500 A->claim();
501 const StringRef ArchStr = A->getValue();
502 if (ArchStr == "all" || ArchStr == gpu_arch) {
503 includePTX = A->getOption().matches(options::OPT_cuda_include_ptx_EQ);
504 continue;
505 }
506 }
507 return includePTX;
508 }
509
510 // All inputs to this linker must be from CudaDeviceActions, as we need to look
511 // at the Inputs' Actions in order to figure out which GPU architecture they
512 // correspond to.
ConstructJob(Compilation & C,const JobAction & JA,const InputInfo & Output,const InputInfoList & Inputs,const ArgList & Args,const char * LinkingOutput) const513 void NVPTX::FatBinary::ConstructJob(Compilation &C, const JobAction &JA,
514 const InputInfo &Output,
515 const InputInfoList &Inputs,
516 const ArgList &Args,
517 const char *LinkingOutput) const {
518 const auto &TC =
519 static_cast<const toolchains::CudaToolChain &>(getToolChain());
520 assert(TC.getTriple().isNVPTX() && "Wrong platform");
521
522 ArgStringList CmdArgs;
523 if (TC.CudaInstallation.version() <= CudaVersion::CUDA_100)
524 CmdArgs.push_back("--cuda");
525 CmdArgs.push_back(TC.getTriple().isArch64Bit() ? "-64" : "-32");
526 CmdArgs.push_back(Args.MakeArgString("--create"));
527 CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
528 if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
529 CmdArgs.push_back("-g");
530
531 for (const auto &II : Inputs) {
532 auto *A = II.getAction();
533 assert(A->getInputs().size() == 1 &&
534 "Device offload action is expected to have a single input");
535 const char *gpu_arch_str = A->getOffloadingArch();
536 assert(gpu_arch_str &&
537 "Device action expected to have associated a GPU architecture!");
538 CudaArch gpu_arch = StringToCudaArch(gpu_arch_str);
539
540 if (II.getType() == types::TY_PP_Asm &&
541 !shouldIncludePTX(Args, gpu_arch_str))
542 continue;
543 // We need to pass an Arch of the form "sm_XX" for cubin files and
544 // "compute_XX" for ptx.
545 const char *Arch = (II.getType() == types::TY_PP_Asm)
546 ? CudaArchToVirtualArchString(gpu_arch)
547 : gpu_arch_str;
548 CmdArgs.push_back(
549 Args.MakeArgString(llvm::Twine("--image=profile=") + Arch +
550 ",file=" + getToolChain().getInputFilename(II)));
551 }
552
553 for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_fatbinary))
554 CmdArgs.push_back(Args.MakeArgString(A));
555
556 const char *Exec = Args.MakeArgString(TC.GetProgramPath("fatbinary"));
557 C.addCommand(std::make_unique<Command>(
558 JA, *this,
559 ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
560 "--options-file"},
561 Exec, CmdArgs, Inputs, Output));
562 }
563
ConstructJob(Compilation & C,const JobAction & JA,const InputInfo & Output,const InputInfoList & Inputs,const ArgList & Args,const char * LinkingOutput) const564 void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
565 const InputInfo &Output,
566 const InputInfoList &Inputs,
567 const ArgList &Args,
568 const char *LinkingOutput) const {
569 const auto &TC =
570 static_cast<const toolchains::NVPTXToolChain &>(getToolChain());
571 assert(TC.getTriple().isNVPTX() && "Wrong platform");
572
573 ArgStringList CmdArgs;
574 if (Output.isFilename()) {
575 CmdArgs.push_back("-o");
576 CmdArgs.push_back(Output.getFilename());
577 } else {
578 assert(Output.isNothing() && "Invalid output.");
579 }
580
581 if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
582 CmdArgs.push_back("-g");
583
584 if (Args.hasArg(options::OPT_v))
585 CmdArgs.push_back("-v");
586
587 StringRef GPUArch = Args.getLastArgValue(options::OPT_march_EQ);
588 assert(!GPUArch.empty() && "At least one GPU Arch required for nvlink.");
589
590 CmdArgs.push_back("-arch");
591 CmdArgs.push_back(Args.MakeArgString(GPUArch));
592
593 // Add paths specified in LIBRARY_PATH environment variable as -L options.
594 addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
595
596 // Add paths for the default clang library path.
597 SmallString<256> DefaultLibPath =
598 llvm::sys::path::parent_path(TC.getDriver().Dir);
599 llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
600 CmdArgs.push_back(Args.MakeArgString(Twine("-L") + DefaultLibPath));
601
602 for (const auto &II : Inputs) {
603 if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
604 II.getType() == types::TY_LTO_BC || II.getType() == types::TY_LLVM_BC) {
605 C.getDriver().Diag(diag::err_drv_no_linker_llvm_support)
606 << getToolChain().getTripleString();
607 continue;
608 }
609
610 // Currently, we only pass the input files to the linker, we do not pass
611 // any libraries that may be valid only for the host.
612 if (!II.isFilename())
613 continue;
614
615 // The 'nvlink' application performs RDC-mode linking when given a '.o'
616 // file and device linking when given a '.cubin' file. We always want to
617 // perform device linking, so just rename any '.o' files.
618 // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
619 auto InputFile = getToolChain().getInputFilename(II);
620 if (llvm::sys::path::extension(InputFile) != ".cubin") {
621 // If there are no actions above this one then this is direct input and we
622 // can copy it. Otherwise the input is internal so a `.cubin` file should
623 // exist.
624 if (II.getAction() && II.getAction()->getInputs().size() == 0) {
625 const char *CubinF =
626 Args.MakeArgString(getToolChain().getDriver().GetTemporaryPath(
627 llvm::sys::path::stem(InputFile), "cubin"));
628 if (std::error_code EC =
629 llvm::sys::fs::copy_file(InputFile, C.addTempFile(CubinF)))
630 continue;
631
632 CmdArgs.push_back(CubinF);
633 } else {
634 SmallString<256> Filename(InputFile);
635 llvm::sys::path::replace_extension(Filename, "cubin");
636 CmdArgs.push_back(Args.MakeArgString(Filename));
637 }
638 } else {
639 CmdArgs.push_back(Args.MakeArgString(InputFile));
640 }
641 }
642
643 C.addCommand(std::make_unique<Command>(
644 JA, *this,
645 ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
646 "--options-file"},
647 Args.MakeArgString(getToolChain().GetProgramPath("nvlink")), CmdArgs,
648 Inputs, Output));
649 }
650
getNVPTXTargetFeatures(const Driver & D,const llvm::Triple & Triple,const llvm::opt::ArgList & Args,std::vector<StringRef> & Features)651 void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
652 const llvm::opt::ArgList &Args,
653 std::vector<StringRef> &Features) {
654 if (Args.hasArg(options::OPT_cuda_feature_EQ)) {
655 StringRef PtxFeature =
656 Args.getLastArgValue(options::OPT_cuda_feature_EQ, "+ptx42");
657 Features.push_back(Args.MakeArgString(PtxFeature));
658 return;
659 }
660 CudaInstallationDetector CudaInstallation(D, Triple, Args);
661
662 // New CUDA versions often introduce new instructions that are only supported
663 // by new PTX version, so we need to raise PTX level to enable them in NVPTX
664 // back-end.
665 const char *PtxFeature = nullptr;
666 switch (CudaInstallation.version()) {
667 #define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \
668 case CudaVersion::CUDA_##CUDA_VER: \
669 PtxFeature = "+ptx" #PTX_VER; \
670 break;
671 CASE_CUDA_VERSION(118, 78);
672 CASE_CUDA_VERSION(117, 77);
673 CASE_CUDA_VERSION(116, 76);
674 CASE_CUDA_VERSION(115, 75);
675 CASE_CUDA_VERSION(114, 74);
676 CASE_CUDA_VERSION(113, 73);
677 CASE_CUDA_VERSION(112, 72);
678 CASE_CUDA_VERSION(111, 71);
679 CASE_CUDA_VERSION(110, 70);
680 CASE_CUDA_VERSION(102, 65);
681 CASE_CUDA_VERSION(101, 64);
682 CASE_CUDA_VERSION(100, 63);
683 CASE_CUDA_VERSION(92, 61);
684 CASE_CUDA_VERSION(91, 61);
685 CASE_CUDA_VERSION(90, 60);
686 #undef CASE_CUDA_VERSION
687 default:
688 PtxFeature = "+ptx42";
689 }
690 Features.push_back(PtxFeature);
691 }
692
693 /// NVPTX toolchain. Our assembler is ptxas, and our linker is nvlink. This
694 /// operates as a stand-alone version of the NVPTX tools without the host
695 /// toolchain.
NVPTXToolChain(const Driver & D,const llvm::Triple & Triple,const llvm::Triple & HostTriple,const ArgList & Args)696 NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
697 const llvm::Triple &HostTriple,
698 const ArgList &Args)
699 : ToolChain(D, Triple, Args), CudaInstallation(D, HostTriple, Args) {
700 if (CudaInstallation.isValid()) {
701 CudaInstallation.WarnIfUnsupportedVersion();
702 getProgramPaths().push_back(std::string(CudaInstallation.getBinPath()));
703 }
704 // Lookup binaries into the driver directory, this is used to
705 // discover the clang-offload-bundler executable.
706 getProgramPaths().push_back(getDriver().Dir);
707 }
708
709 /// We only need the host triple to locate the CUDA binary utilities, use the
710 /// system's default triple if not provided.
NVPTXToolChain(const Driver & D,const llvm::Triple & Triple,const ArgList & Args)711 NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
712 const ArgList &Args)
713 : NVPTXToolChain(D, Triple,
714 llvm::Triple(llvm::sys::getDefaultTargetTriple()), Args) {}
715
716 llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList & Args,StringRef BoundArch,Action::OffloadKind DeviceOffloadKind) const717 NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
718 StringRef BoundArch,
719 Action::OffloadKind DeviceOffloadKind) const {
720 DerivedArgList *DAL =
721 ToolChain::TranslateArgs(Args, BoundArch, DeviceOffloadKind);
722 if (!DAL)
723 DAL = new DerivedArgList(Args.getBaseArgs());
724
725 const OptTable &Opts = getDriver().getOpts();
726
727 for (Arg *A : Args)
728 if (!llvm::is_contained(*DAL, A))
729 DAL->append(A);
730
731 if (!DAL->hasArg(options::OPT_march_EQ))
732 DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
733 CudaArchToString(CudaArch::CudaDefault));
734
735 return DAL;
736 }
737
supportsDebugInfoOption(const llvm::opt::Arg * A) const738 bool NVPTXToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
739 const Option &O = A->getOption();
740 return (O.matches(options::OPT_gN_Group) &&
741 !O.matches(options::OPT_gmodules)) ||
742 O.matches(options::OPT_g_Flag) ||
743 O.matches(options::OPT_ggdbN_Group) || O.matches(options::OPT_ggdb) ||
744 O.matches(options::OPT_gdwarf) || O.matches(options::OPT_gdwarf_2) ||
745 O.matches(options::OPT_gdwarf_3) || O.matches(options::OPT_gdwarf_4) ||
746 O.matches(options::OPT_gdwarf_5) ||
747 O.matches(options::OPT_gcolumn_info);
748 }
749
adjustDebugInfoKind(codegenoptions::DebugInfoKind & DebugInfoKind,const ArgList & Args) const750 void NVPTXToolChain::adjustDebugInfoKind(
751 codegenoptions::DebugInfoKind &DebugInfoKind, const ArgList &Args) const {
752 switch (mustEmitDebugInfo(Args)) {
753 case DisableDebugInfo:
754 DebugInfoKind = codegenoptions::NoDebugInfo;
755 break;
756 case DebugDirectivesOnly:
757 DebugInfoKind = codegenoptions::DebugDirectivesOnly;
758 break;
759 case EmitSameDebugInfoAsHost:
760 // Use same debug info level as the host.
761 break;
762 }
763 }
764
765 /// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
766 /// which isn't properly a linker but nonetheless performs the step of stitching
767 /// together object files from the assembler into a single blob.
768
CudaToolChain(const Driver & D,const llvm::Triple & Triple,const ToolChain & HostTC,const ArgList & Args)769 CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
770 const ToolChain &HostTC, const ArgList &Args)
771 : NVPTXToolChain(D, Triple, HostTC.getTriple(), Args), HostTC(HostTC) {}
772
addClangTargetOptions(const llvm::opt::ArgList & DriverArgs,llvm::opt::ArgStringList & CC1Args,Action::OffloadKind DeviceOffloadingKind) const773 void CudaToolChain::addClangTargetOptions(
774 const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
775 Action::OffloadKind DeviceOffloadingKind) const {
776 HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
777
778 StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
779 assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
780 assert((DeviceOffloadingKind == Action::OFK_OpenMP ||
781 DeviceOffloadingKind == Action::OFK_Cuda) &&
782 "Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs.");
783
784 if (DeviceOffloadingKind == Action::OFK_Cuda) {
785 CC1Args.append(
786 {"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
787
788 if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
789 options::OPT_fno_cuda_approx_transcendentals, false))
790 CC1Args.push_back("-fcuda-approx-transcendentals");
791 }
792
793 if (DriverArgs.hasArg(options::OPT_nogpulib))
794 return;
795
796 if (DeviceOffloadingKind == Action::OFK_OpenMP &&
797 DriverArgs.hasArg(options::OPT_S))
798 return;
799
800 std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(GpuArch);
801 if (LibDeviceFile.empty()) {
802 getDriver().Diag(diag::err_drv_no_cuda_libdevice) << GpuArch;
803 return;
804 }
805
806 CC1Args.push_back("-mlink-builtin-bitcode");
807 CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
808
809 clang::CudaVersion CudaInstallationVersion = CudaInstallation.version();
810
811 if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
812 options::OPT_fno_cuda_short_ptr, false))
813 CC1Args.append({"-mllvm", "--nvptx-short-ptr"});
814
815 if (CudaInstallationVersion >= CudaVersion::UNKNOWN)
816 CC1Args.push_back(
817 DriverArgs.MakeArgString(Twine("-target-sdk-version=") +
818 CudaVersionToString(CudaInstallationVersion)));
819
820 if (DeviceOffloadingKind == Action::OFK_OpenMP) {
821 if (CudaInstallationVersion < CudaVersion::CUDA_92) {
822 getDriver().Diag(
823 diag::err_drv_omp_offload_target_cuda_version_not_support)
824 << CudaVersionToString(CudaInstallationVersion);
825 return;
826 }
827
828 // Link the bitcode library late if we're using device LTO.
829 if (getDriver().isUsingLTO(/* IsOffload */ true))
830 return;
831
832 addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(),
833 getTriple());
834 }
835 }
836
getDefaultDenormalModeForType(const llvm::opt::ArgList & DriverArgs,const JobAction & JA,const llvm::fltSemantics * FPType) const837 llvm::DenormalMode CudaToolChain::getDefaultDenormalModeForType(
838 const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
839 const llvm::fltSemantics *FPType) const {
840 if (JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
841 if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
842 DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero,
843 options::OPT_fno_gpu_flush_denormals_to_zero, false))
844 return llvm::DenormalMode::getPreserveSign();
845 }
846
847 assert(JA.getOffloadingDeviceKind() != Action::OFK_Host);
848 return llvm::DenormalMode::getIEEE();
849 }
850
AddCudaIncludeArgs(const ArgList & DriverArgs,ArgStringList & CC1Args) const851 void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
852 ArgStringList &CC1Args) const {
853 // Check our CUDA version if we're going to include the CUDA headers.
854 if (!DriverArgs.hasArg(options::OPT_nogpuinc) &&
855 !DriverArgs.hasArg(options::OPT_no_cuda_version_check)) {
856 StringRef Arch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
857 assert(!Arch.empty() && "Must have an explicit GPU arch.");
858 CudaInstallation.CheckCudaVersionSupportsArch(StringToCudaArch(Arch));
859 }
860 CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
861 }
862
getInputFilename(const InputInfo & Input) const863 std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
864 // Only object files are changed, for example assembly files keep their .s
865 // extensions. If the user requested device-only compilation don't change it.
866 if (Input.getType() != types::TY_Object || getDriver().offloadDeviceOnly())
867 return ToolChain::getInputFilename(Input);
868
869 // Replace extension for object files with cubin because nvlink relies on
870 // these particular file names.
871 SmallString<256> Filename(ToolChain::getInputFilename(Input));
872 llvm::sys::path::replace_extension(Filename, "cubin");
873 return std::string(Filename.str());
874 }
875
876 llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList & Args,StringRef BoundArch,Action::OffloadKind DeviceOffloadKind) const877 CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
878 StringRef BoundArch,
879 Action::OffloadKind DeviceOffloadKind) const {
880 DerivedArgList *DAL =
881 HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
882 if (!DAL)
883 DAL = new DerivedArgList(Args.getBaseArgs());
884
885 const OptTable &Opts = getDriver().getOpts();
886
887 // For OpenMP device offloading, append derived arguments. Make sure
888 // flags are not duplicated.
889 // Also append the compute capability.
890 if (DeviceOffloadKind == Action::OFK_OpenMP) {
891 for (Arg *A : Args)
892 if (!llvm::is_contained(*DAL, A))
893 DAL->append(A);
894
895 if (!DAL->hasArg(options::OPT_march_EQ)) {
896 StringRef Arch = BoundArch;
897 if (Arch.empty()) {
898 auto ArchsOrErr = getSystemGPUArchs(Args);
899 if (!ArchsOrErr) {
900 std::string ErrMsg =
901 llvm::formatv("{0}", llvm::fmt_consume(ArchsOrErr.takeError()));
902 getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
903 << llvm::Triple::getArchTypeName(getArch()) << ErrMsg << "-march";
904 Arch = CudaArchToString(CudaArch::CudaDefault);
905 } else {
906 Arch = Args.MakeArgString(ArchsOrErr->front());
907 }
908 }
909 DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), Arch);
910 }
911
912 return DAL;
913 }
914
915 for (Arg *A : Args) {
916 DAL->append(A);
917 }
918
919 if (!BoundArch.empty()) {
920 DAL->eraseArg(options::OPT_march_EQ);
921 DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
922 BoundArch);
923 }
924 return DAL;
925 }
926
927 Expected<SmallVector<std::string>>
getSystemGPUArchs(const ArgList & Args) const928 CudaToolChain::getSystemGPUArchs(const ArgList &Args) const {
929 // Detect NVIDIA GPUs availible on the system.
930 std::string Program;
931 if (Arg *A = Args.getLastArg(options::OPT_nvptx_arch_tool_EQ))
932 Program = A->getValue();
933 else
934 Program = GetProgramPath("nvptx-arch");
935
936 auto StdoutOrErr = executeToolChainProgram(Program);
937 if (!StdoutOrErr)
938 return StdoutOrErr.takeError();
939
940 SmallVector<std::string, 1> GPUArchs;
941 for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n"))
942 if (!Arch.empty())
943 GPUArchs.push_back(Arch.str());
944
945 if (GPUArchs.empty())
946 return llvm::createStringError(std::error_code(),
947 "No NVIDIA GPU detected in the system");
948
949 return std::move(GPUArchs);
950 }
951
buildAssembler() const952 Tool *NVPTXToolChain::buildAssembler() const {
953 return new tools::NVPTX::Assembler(*this);
954 }
955
buildLinker() const956 Tool *NVPTXToolChain::buildLinker() const {
957 return new tools::NVPTX::Linker(*this);
958 }
959
buildAssembler() const960 Tool *CudaToolChain::buildAssembler() const {
961 return new tools::NVPTX::Assembler(*this);
962 }
963
buildLinker() const964 Tool *CudaToolChain::buildLinker() const {
965 return new tools::NVPTX::FatBinary(*this);
966 }
967
addClangWarningOptions(ArgStringList & CC1Args) const968 void CudaToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
969 HostTC.addClangWarningOptions(CC1Args);
970 }
971
972 ToolChain::CXXStdlibType
GetCXXStdlibType(const ArgList & Args) const973 CudaToolChain::GetCXXStdlibType(const ArgList &Args) const {
974 return HostTC.GetCXXStdlibType(Args);
975 }
976
AddClangSystemIncludeArgs(const ArgList & DriverArgs,ArgStringList & CC1Args) const977 void CudaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
978 ArgStringList &CC1Args) const {
979 HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
980
981 if (!DriverArgs.hasArg(options::OPT_nogpuinc) && CudaInstallation.isValid())
982 CC1Args.append(
983 {"-internal-isystem",
984 DriverArgs.MakeArgString(CudaInstallation.getIncludePath())});
985 }
986
AddClangCXXStdlibIncludeArgs(const ArgList & Args,ArgStringList & CC1Args) const987 void CudaToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
988 ArgStringList &CC1Args) const {
989 HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
990 }
991
AddIAMCUIncludeArgs(const ArgList & Args,ArgStringList & CC1Args) const992 void CudaToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
993 ArgStringList &CC1Args) const {
994 HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
995 }
996
getSupportedSanitizers() const997 SanitizerMask CudaToolChain::getSupportedSanitizers() const {
998 // The CudaToolChain only supports sanitizers in the sense that it allows
999 // sanitizer arguments on the command line if they are supported by the host
1000 // toolchain. The CudaToolChain will actually ignore any command line
1001 // arguments for any of these "supported" sanitizers. That means that no
1002 // sanitization of device code is actually supported at this time.
1003 //
1004 // This behavior is necessary because the host and device toolchains
1005 // invocations often share the command line, so the device toolchain must
1006 // tolerate flags meant only for the host toolchain.
1007 return HostTC.getSupportedSanitizers();
1008 }
1009
computeMSVCVersion(const Driver * D,const ArgList & Args) const1010 VersionTuple CudaToolChain::computeMSVCVersion(const Driver *D,
1011 const ArgList &Args) const {
1012 return HostTC.computeMSVCVersion(D, Args);
1013 }
1014