1 //===--- NVPTX.h - Declare NVPTX target feature support ---------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares NVPTX TargetInfo objects.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_CLANG_LIB_BASIC_TARGETS_NVPTX_H
14 #define LLVM_CLANG_LIB_BASIC_TARGETS_NVPTX_H
15 
16 #include "clang/Basic/Cuda.h"
17 #include "clang/Basic/TargetInfo.h"
18 #include "clang/Basic/TargetOptions.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/Support/Compiler.h"
21 
22 namespace clang {
23 namespace targets {
24 
25 static const unsigned NVPTXAddrSpaceMap[] = {
26     0, // Default
27     1, // opencl_global
28     3, // opencl_local
29     4, // opencl_constant
30     0, // opencl_private
31     // FIXME: generic has to be added to the target
32     0, // opencl_generic
33     1, // cuda_device
34     4, // cuda_constant
35     3, // cuda_shared
36 };
37 
38 /// The DWARF address class. Taken from
39 /// https://docs.nvidia.com/cuda/archive/10.0/ptx-writers-guide-to-interoperability/index.html#cuda-specific-dwarf
40 static const int NVPTXDWARFAddrSpaceMap[] = {
41     -1, // Default, opencl_private or opencl_generic - not defined
42     5,  // opencl_global
43     -1,
44     8,  // opencl_local or cuda_shared
45     4,  // opencl_constant or cuda_constant
46 };
47 
48 class LLVM_LIBRARY_VISIBILITY NVPTXTargetInfo : public TargetInfo {
49   static const char *const GCCRegNames[];
50   static const Builtin::Info BuiltinInfo[];
51   CudaArch GPU;
52   uint32_t PTXVersion;
53   std::unique_ptr<TargetInfo> HostTarget;
54 
55 public:
56   NVPTXTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts,
57                   unsigned TargetPointerWidth);
58 
59   void getTargetDefines(const LangOptions &Opts,
60                         MacroBuilder &Builder) const override;
61 
62   ArrayRef<Builtin::Info> getTargetBuiltins() const override;
63 
64   bool
65   initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
66                  StringRef CPU,
67                  const std::vector<std::string> &FeaturesVec) const override {
68     Features[CudaArchToString(GPU)] = true;
69     Features["ptx" + std::to_string(PTXVersion)] = true;
70     return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
71   }
72 
73   bool hasFeature(StringRef Feature) const override;
74 
75   ArrayRef<const char *> getGCCRegNames() const override;
76 
77   ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
78     // No aliases.
79     return None;
80   }
81 
82   bool validateAsmConstraint(const char *&Name,
83                              TargetInfo::ConstraintInfo &Info) const override {
84     switch (*Name) {
85     default:
86       return false;
87     case 'c':
88     case 'h':
89     case 'r':
90     case 'l':
91     case 'f':
92     case 'd':
93       Info.setAllowsRegister();
94       return true;
95     }
96   }
97 
98   const char *getClobbers() const override {
99     // FIXME: Is this really right?
100     return "";
101   }
102 
103   BuiltinVaListKind getBuiltinVaListKind() const override {
104     // FIXME: implement
105     return TargetInfo::CharPtrBuiltinVaList;
106   }
107 
108   bool isValidCPUName(StringRef Name) const override {
109     return StringToCudaArch(Name) != CudaArch::UNKNOWN;
110   }
111 
112   void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override {
113     for (int i = static_cast<int>(CudaArch::SM_20);
114          i < static_cast<int>(CudaArch::LAST); ++i)
115       Values.emplace_back(CudaArchToString(static_cast<CudaArch>(i)));
116   }
117 
118   bool setCPU(const std::string &Name) override {
119     GPU = StringToCudaArch(Name);
120     return GPU != CudaArch::UNKNOWN;
121   }
122 
123   void setSupportedOpenCLOpts() override {
124     auto &Opts = getSupportedOpenCLOpts();
125     Opts.support("cl_clang_storage_class_specifiers");
126     Opts.support("cl_khr_gl_sharing");
127     Opts.support("cl_khr_icd");
128 
129     Opts.support("cl_khr_fp64");
130     Opts.support("cl_khr_byte_addressable_store");
131     Opts.support("cl_khr_global_int32_base_atomics");
132     Opts.support("cl_khr_global_int32_extended_atomics");
133     Opts.support("cl_khr_local_int32_base_atomics");
134     Opts.support("cl_khr_local_int32_extended_atomics");
135   }
136 
137   /// \returns If a target requires an address within a target specific address
138   /// space \p AddressSpace to be converted in order to be used, then return the
139   /// corresponding target specific DWARF address space.
140   ///
141   /// \returns Otherwise return None and no conversion will be emitted in the
142   /// DWARF.
143   Optional<unsigned>
144   getDWARFAddressSpace(unsigned AddressSpace) const override {
145     if (AddressSpace >= llvm::array_lengthof(NVPTXDWARFAddrSpaceMap) ||
146         NVPTXDWARFAddrSpaceMap[AddressSpace] < 0)
147       return llvm::None;
148     return NVPTXDWARFAddrSpaceMap[AddressSpace];
149   }
150 
151   CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
152     // CUDA compilations support all of the host's calling conventions.
153     //
154     // TODO: We should warn if you apply a non-default CC to anything other than
155     // a host function.
156     if (HostTarget)
157       return HostTarget->checkCallingConvention(CC);
158     return CCCR_Warning;
159   }
160 };
161 } // namespace targets
162 } // namespace clang
163 #endif // LLVM_CLANG_LIB_BASIC_TARGETS_NVPTX_H
164