1 //===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
15 #define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
16 
17 #include "CodeGenModule.h"
18 #include "CGValue.h"
19 #include "clang/AST/Type.h"
20 #include "clang/AST/Expr.h"
21 #include "clang/Basic/LLVM.h"
22 #include "clang/Basic/SyncScope.h"
23 #include "llvm/ADT/SmallString.h"
24 #include "llvm/ADT/StringRef.h"
25 
26 namespace llvm {
27 class Constant;
28 class GlobalValue;
29 class Type;
30 class Value;
31 }
32 
33 namespace clang {
34 class Decl;
35 
36 namespace CodeGen {
37 class ABIInfo;
38 class CallArgList;
39 class CodeGenFunction;
40 class CGBlockInfo;
41 class CGFunctionInfo;
42 
43 /// TargetCodeGenInfo - This class organizes various target-specific
44 /// codegeneration issues, like target-specific attributes, builtins and so
45 /// on.
46 class TargetCodeGenInfo {
47   std::unique_ptr<ABIInfo> Info = nullptr;
48 
49 public:
TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info)50   TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info) : Info(std::move(Info)) {}
51   virtual ~TargetCodeGenInfo();
52 
53   /// getABIInfo() - Returns ABI info helper for the target.
getABIInfo()54   const ABIInfo &getABIInfo() const { return *Info; }
55 
56   /// setTargetAttributes - Provides a convenient hook to handle extra
57   /// target-specific attributes for the given global.
setTargetAttributes(const Decl * D,llvm::GlobalValue * GV,CodeGen::CodeGenModule & M)58   virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
59                                    CodeGen::CodeGenModule &M) const {}
60 
61   /// emitTargetMetadata - Provides a convenient hook to handle extra
62   /// target-specific metadata for the given globals.
emitTargetMetadata(CodeGen::CodeGenModule & CGM,const llvm::MapVector<GlobalDecl,StringRef> & MangledDeclNames)63   virtual void emitTargetMetadata(
64       CodeGen::CodeGenModule &CGM,
65       const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {}
66 
67   /// Any further codegen related checks that need to be done on a function call
68   /// in a target specific manner.
checkFunctionCallABI(CodeGenModule & CGM,SourceLocation CallLoc,const FunctionDecl * Caller,const FunctionDecl * Callee,const CallArgList & Args)69   virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
70                                     const FunctionDecl *Caller,
71                                     const FunctionDecl *Callee,
72                                     const CallArgList &Args) const {}
73 
74   /// Determines the size of struct _Unwind_Exception on this platform,
75   /// in 8-bit units.  The Itanium ABI defines this as:
76   ///   struct _Unwind_Exception {
77   ///     uint64 exception_class;
78   ///     _Unwind_Exception_Cleanup_Fn exception_cleanup;
79   ///     uint64 private_1;
80   ///     uint64 private_2;
81   ///   };
82   virtual unsigned getSizeOfUnwindException() const;
83 
84   /// Controls whether __builtin_extend_pointer should sign-extend
85   /// pointers to uint64_t or zero-extend them (the default).  Has
86   /// no effect for targets:
87   ///   - that have 64-bit pointers, or
88   ///   - that cannot address through registers larger than pointers, or
89   ///   - that implicitly ignore/truncate the top bits when addressing
90   ///     through such registers.
extendPointerWithSExt()91   virtual bool extendPointerWithSExt() const { return false; }
92 
93   /// Determines the DWARF register number for the stack pointer, for
94   /// exception-handling purposes.  Implements __builtin_dwarf_sp_column.
95   ///
96   /// Returns -1 if the operation is unsupported by this target.
getDwarfEHStackPointer(CodeGen::CodeGenModule & M)97   virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
98     return -1;
99   }
100 
101   /// Initializes the given DWARF EH register-size table, a char*.
102   /// Implements __builtin_init_dwarf_reg_size_table.
103   ///
104   /// Returns true if the operation is unsupported by this target.
initDwarfEHRegSizeTable(CodeGen::CodeGenFunction & CGF,llvm::Value * Address)105   virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
106                                        llvm::Value *Address) const {
107     return true;
108   }
109 
110   /// Performs the code-generation required to convert a return
111   /// address as stored by the system into the actual address of the
112   /// next instruction that will be executed.
113   ///
114   /// Used by __builtin_extract_return_addr().
decodeReturnAddress(CodeGen::CodeGenFunction & CGF,llvm::Value * Address)115   virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
116                                            llvm::Value *Address) const {
117     return Address;
118   }
119 
120   /// Performs the code-generation required to convert the address
121   /// of an instruction into a return address suitable for storage
122   /// by the system in a return slot.
123   ///
124   /// Used by __builtin_frob_return_addr().
encodeReturnAddress(CodeGen::CodeGenFunction & CGF,llvm::Value * Address)125   virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
126                                            llvm::Value *Address) const {
127     return Address;
128   }
129 
130   /// Corrects the low-level LLVM type for a given constraint and "usual"
131   /// type.
132   ///
133   /// \returns A pointer to a new LLVM type, possibly the same as the original
134   /// on success; 0 on failure.
adjustInlineAsmType(CodeGen::CodeGenFunction & CGF,StringRef Constraint,llvm::Type * Ty)135   virtual llvm::Type *adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
136                                           StringRef Constraint,
137                                           llvm::Type *Ty) const {
138     return Ty;
139   }
140 
141   /// Adds constraints and types for result registers.
addReturnRegisterOutputs(CodeGen::CodeGenFunction & CGF,CodeGen::LValue ReturnValue,std::string & Constraints,std::vector<llvm::Type * > & ResultRegTypes,std::vector<llvm::Type * > & ResultTruncRegTypes,std::vector<CodeGen::LValue> & ResultRegDests,std::string & AsmString,unsigned NumOutputs)142   virtual void addReturnRegisterOutputs(
143       CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue,
144       std::string &Constraints, std::vector<llvm::Type *> &ResultRegTypes,
145       std::vector<llvm::Type *> &ResultTruncRegTypes,
146       std::vector<CodeGen::LValue> &ResultRegDests, std::string &AsmString,
147       unsigned NumOutputs) const {}
148 
149   /// doesReturnSlotInterfereWithArgs - Return true if the target uses an
150   /// argument slot for an 'sret' type.
doesReturnSlotInterfereWithArgs()151   virtual bool doesReturnSlotInterfereWithArgs() const { return true; }
152 
153   /// Retrieve the address of a function to call immediately before
154   /// calling objc_retainAutoreleasedReturnValue.  The
155   /// implementation of objc_autoreleaseReturnValue sniffs the
156   /// instruction stream following its return address to decide
157   /// whether it's a call to objc_retainAutoreleasedReturnValue.
158   /// This can be prohibitively expensive, depending on the
159   /// relocation model, and so on some targets it instead sniffs for
160   /// a particular instruction sequence.  This functions returns
161   /// that instruction sequence in inline assembly, which will be
162   /// empty if none is required.
getARCRetainAutoreleasedReturnValueMarker()163   virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
164     return "";
165   }
166 
167   /// Determine whether a call to objc_retainAutoreleasedReturnValue should be
168   /// marked as 'notail'.
shouldSuppressTailCallsOfRetainAutoreleasedReturnValue()169   virtual bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const {
170     return false;
171   }
172 
173   /// Return a constant used by UBSan as a signature to identify functions
174   /// possessing type information, or 0 if the platform is unsupported.
175   virtual llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule & CGM)176   getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
177     return nullptr;
178   }
179 
180   /// Determine whether a call to an unprototyped functions under
181   /// the given calling convention should use the variadic
182   /// convention or the non-variadic convention.
183   ///
184   /// There's a good reason to make a platform's variadic calling
185   /// convention be different from its non-variadic calling
186   /// convention: the non-variadic arguments can be passed in
187   /// registers (better for performance), and the variadic arguments
188   /// can be passed on the stack (also better for performance).  If
189   /// this is done, however, unprototyped functions *must* use the
190   /// non-variadic convention, because C99 states that a call
191   /// through an unprototyped function type must succeed if the
192   /// function was defined with a non-variadic prototype with
193   /// compatible parameters.  Therefore, splitting the conventions
194   /// makes it impossible to call a variadic function through an
195   /// unprototyped type.  Since function prototypes came out in the
196   /// late 1970s, this is probably an acceptable trade-off.
197   /// Nonetheless, not all platforms are willing to make it, and in
198   /// particularly x86-64 bends over backwards to make the
199   /// conventions compatible.
200   ///
201   /// The default is false.  This is correct whenever:
202   ///   - the conventions are exactly the same, because it does not
203   ///     matter and the resulting IR will be somewhat prettier in
204   ///     certain cases; or
205   ///   - the conventions are substantively different in how they pass
206   ///     arguments, because in this case using the variadic convention
207   ///     will lead to C99 violations.
208   ///
209   /// However, some platforms make the conventions identical except
210   /// for passing additional out-of-band information to a variadic
211   /// function: for example, x86-64 passes the number of SSE
212   /// arguments in %al.  On these platforms, it is desirable to
213   /// call unprototyped functions using the variadic convention so
214   /// that unprototyped calls to varargs functions still succeed.
215   ///
216   /// Relatedly, platforms which pass the fixed arguments to this:
217   ///   A foo(B, C, D);
218   /// differently than they would pass them to this:
219   ///   A foo(B, C, D, ...);
220   /// may need to adjust the debugger-support code in Sema to do the
221   /// right thing when calling a function with no know signature.
222   virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
223                                      const FunctionNoProtoType *fnType) const;
224 
225   /// Gets the linker options necessary to link a dependent library on this
226   /// platform.
227   virtual void getDependentLibraryOption(llvm::StringRef Lib,
228                                          llvm::SmallString<24> &Opt) const;
229 
230   /// Gets the linker options necessary to detect object file mismatches on
231   /// this platform.
getDetectMismatchOption(llvm::StringRef Name,llvm::StringRef Value,llvm::SmallString<32> & Opt)232   virtual void getDetectMismatchOption(llvm::StringRef Name,
233                                        llvm::StringRef Value,
234                                        llvm::SmallString<32> &Opt) const {}
235 
getDefaultAS()236   virtual unsigned getDefaultAS() const {
237 #if 0
238     // For e.g. AMDGPU this should not return 0 but instead whatever LangAS::Default maps to
239     return getABIInfo().getContext().getTargetAddressSpace(LangAS::Default, nullptr);
240 #else
241     return 0; // XXXAR: to keep code the same as upstream
242 #endif
243   }
244   /// The address space for thead_local variables in the IR. This should be the
245   /// same as getDefaultAS() but for CHERI we still place TLS vars in AS0 when
246   /// using the legacy TLS ABI.
getTlsAddressSpace()247   virtual unsigned getTlsAddressSpace() const { return getDefaultAS(); }
248 
cheriCapabilityAtomicNeedsLibcall(AtomicExpr::AtomicOp Op)249   virtual bool cheriCapabilityAtomicNeedsLibcall(AtomicExpr::AtomicOp Op) const {
250     return true;
251   }
252 
getCHERICapabilityAS()253   virtual unsigned getCHERICapabilityAS() const {
254     llvm_unreachable("Target does not support capabilities!\n");
255     return 0;
256   }
257 
getPointerOffset(CodeGen::CodeGenFunction &,llvm::Value * V)258   virtual llvm::Value *getPointerOffset(CodeGen::CodeGenFunction &,
259                                         llvm::Value *V) const {
260     llvm_unreachable("Target does not support capabilities!\n");
261     return nullptr;
262   }
setPointerOffset(CodeGen::CodeGenFunction &,llvm::Value * Ptr,llvm::Value * Offset)263   virtual llvm::Value *setPointerOffset(CodeGen::CodeGenFunction &,
264           llvm::Value *Ptr, llvm::Value *Offset) const {
265     llvm_unreachable("Target does not support capabilities!\n");
266     return nullptr;
267   }
setPointerAddress(CodeGen::CodeGenFunction &,llvm::Value * Ptr,llvm::Value * Offset)268   virtual llvm::Value *setPointerAddress(CodeGen::CodeGenFunction &,
269                                          llvm::Value *Ptr,
270                                          llvm::Value *Offset) const {
271     llvm_unreachable("Target does not support capabilities!\n");
272     return nullptr;
273   }
setPointerBounds(CodeGen::CodeGenFunction &,llvm::Value * Ptr,llvm::Value * Size,const llvm::Twine & Name)274   virtual llvm::Value *setPointerBounds(CodeGen::CodeGenFunction &,
275                                         llvm::Value *Ptr, llvm::Value *Size,
276                                         const llvm::Twine &Name) const {
277     llvm_unreachable("Target does not support capabilities!\n");
278     return nullptr;
279   }
getPointerBase(CodeGen::CodeGenFunction &,llvm::Value * V)280   virtual llvm::Value *getPointerBase(CodeGen::CodeGenFunction &,
281                                       llvm::Value *V) const {
282       return V;
283   }
284   virtual llvm::Value *getPointerAddress(CodeGen::CodeGenFunction &CGF,
285                                          llvm::Value *V,
286                                          const llvm::Twine &Name) const;
287 
288   /// Get LLVM calling convention for OpenCL kernel.
289   virtual unsigned getOpenCLKernelCallingConv() const;
290 
291   /// Get target specific null pointer.
292   /// \param T is the LLVM type of the null pointer.
293   /// \param QT is the clang QualType of the null pointer.
294   /// \return ConstantPointerNull with the given type \p T.
295   /// Each target can override it to return its own desired constant value.
296   virtual llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
297       llvm::PointerType *T, QualType QT) const;
298 
299   /// Get target favored AST address space of a global variable for languages
300   /// other than OpenCL and CUDA.
301   /// If \p D is nullptr, returns the default target favored address space
302   /// for global variable.
303   virtual LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
304                                           const VarDecl *D) const;
305 
306   /// Get the AST address space for alloca.
getASTAllocaAddressSpace()307   virtual LangAS getASTAllocaAddressSpace() const { return LangAS::Default; }
308 
309   /// Perform address space cast of an expression of pointer type.
310   /// \param V is the LLVM value to be casted to another address space.
311   /// \param SrcAddr is the language address space of \p V.
312   /// \param DestAddr is the targeted language address space.
313   /// \param DestTy is the destination LLVM pointer type.
314   /// \param IsNonNull is the flag indicating \p V is known to be non null.
315   virtual llvm::Value *performAddrSpaceCast(CodeGen::CodeGenFunction &CGF,
316                                             llvm::Value *V, LangAS SrcAddr,
317                                             LangAS DestAddr, llvm::Type *DestTy,
318                                             bool IsNonNull = false) const;
319 
320   virtual unsigned getAddressSpaceForType(QualType DestTy,
321                                           ASTContext& Context) const;
322   virtual bool canMarkAsNonNull(QualType DestTy, ASTContext& Context) const;
323   /// Perform address space cast of a constant expression of pointer type.
324   /// \param V is the LLVM constant to be casted to another address space.
325   /// \param SrcAddr is the language address space of \p V.
326   /// \param DestAddr is the targeted language address space.
327   /// \param DestTy is the destination LLVM pointer type.
328   virtual llvm::Constant *performAddrSpaceCast(CodeGenModule &CGM,
329                                                llvm::Constant *V,
330                                                LangAS SrcAddr, LangAS DestAddr,
331                                                llvm::Type *DestTy) const;
332 
333   /// Get address space of pointer parameter for __cxa_atexit.
getAddrSpaceOfCxaAtexitPtrParam()334   virtual LangAS getAddrSpaceOfCxaAtexitPtrParam() const {
335     return LangAS::Default;
336   }
337 
338   /// Get the syncscope used in LLVM IR.
339   virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
340                                                  SyncScope Scope,
341                                                  llvm::AtomicOrdering Ordering,
342                                                  llvm::LLVMContext &Ctx) const;
343 
344   /// Interface class for filling custom fields of a block literal for OpenCL.
345   class TargetOpenCLBlockHelper {
346   public:
347     typedef std::pair<llvm::Value *, StringRef> ValueTy;
TargetOpenCLBlockHelper()348     TargetOpenCLBlockHelper() {}
~TargetOpenCLBlockHelper()349     virtual ~TargetOpenCLBlockHelper() {}
350     /// Get the custom field types for OpenCL blocks.
351     virtual llvm::SmallVector<llvm::Type *, 1> getCustomFieldTypes() = 0;
352     /// Get the custom field values for OpenCL blocks.
353     virtual llvm::SmallVector<ValueTy, 1>
354     getCustomFieldValues(CodeGenFunction &CGF, const CGBlockInfo &Info) = 0;
355     virtual bool areAllCustomFieldValuesConstant(const CGBlockInfo &Info) = 0;
356     /// Get the custom field values for OpenCL blocks if all values are LLVM
357     /// constants.
358     virtual llvm::SmallVector<llvm::Constant *, 1>
359     getCustomFieldValues(CodeGenModule &CGM, const CGBlockInfo &Info) = 0;
360   };
getTargetOpenCLBlockHelper()361   virtual TargetOpenCLBlockHelper *getTargetOpenCLBlockHelper() const {
362     return nullptr;
363   }
364 
365   /// Create an OpenCL kernel for an enqueued block. The kernel function is
366   /// a wrapper for the block invoke function with target-specific calling
367   /// convention and ABI as an OpenCL kernel. The wrapper function accepts
368   /// block context and block arguments in target-specific way and calls
369   /// the original block invoke function.
370   virtual llvm::Function *
371   createEnqueuedBlockKernel(CodeGenFunction &CGF,
372                             llvm::Function *BlockInvokeFunc,
373                             llvm::Value *BlockLiteral) const;
374 
375   /// \return true if the target supports alias from the unmangled name to the
376   /// mangled name of functions declared within an extern "C" region and marked
377   /// as 'used', and having internal linkage.
shouldEmitStaticExternCAliases()378   virtual bool shouldEmitStaticExternCAliases() const { return true; }
379 
setCUDAKernelCallingConvention(const FunctionType * & FT)380   virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
381 
382   /// Return the device-side type for the CUDA device builtin surface type.
getCUDADeviceBuiltinSurfaceDeviceType()383   virtual llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const {
384     // By default, no change from the original one.
385     return nullptr;
386   }
387   /// Return the device-side type for the CUDA device builtin texture type.
getCUDADeviceBuiltinTextureDeviceType()388   virtual llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const {
389     // By default, no change from the original one.
390     return nullptr;
391   }
392 
393   /// Emit the device-side copy of the builtin surface type.
emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction & CGF,LValue Dst,LValue Src)394   virtual bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF,
395                                                       LValue Dst,
396                                                       LValue Src) const {
397     // DO NOTHING by default.
398     return false;
399   }
400   /// Emit the device-side copy of the builtin texture type.
emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction & CGF,LValue Dst,LValue Src)401   virtual bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF,
402                                                       LValue Dst,
403                                                       LValue Src) const {
404     // DO NOTHING by default.
405     return false;
406   }
407 };
408 
409 } // namespace CodeGen
410 } // namespace clang
411 
412 #endif // LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
413