1 use crate::builder::Builder;
2 use crate::context::CodegenCx;
3 use crate::llvm::{self, AttributePlace};
4 use crate::type_::Type;
5 use crate::type_of::LayoutLlvmExt;
6 use crate::value::Value;
7 
8 use rustc_codegen_ssa::mir::operand::OperandValue;
9 use rustc_codegen_ssa::mir::place::PlaceRef;
10 use rustc_codegen_ssa::traits::*;
11 use rustc_codegen_ssa::MemFlags;
12 use rustc_middle::bug;
13 use rustc_middle::ty::layout::LayoutOf;
14 pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
15 use rustc_middle::ty::Ty;
16 use rustc_target::abi::call::ArgAbi;
17 pub use rustc_target::abi::call::*;
18 use rustc_target::abi::{self, HasDataLayout, Int};
19 pub use rustc_target::spec::abi::Abi;
20 
21 use libc::c_uint;
22 
23 macro_rules! for_each_kind {
24     ($flags: ident, $f: ident, $($kind: ident),+) => ({
25         $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
26     })
27 }
28 
29 trait ArgAttributeExt {
for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute)30     fn for_each_kind<F>(&self, f: F)
31     where
32         F: FnMut(llvm::Attribute);
33 }
34 
35 impl ArgAttributeExt for ArgAttribute {
for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute),36     fn for_each_kind<F>(&self, mut f: F)
37     where
38         F: FnMut(llvm::Attribute),
39     {
40         for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, InReg)
41     }
42 }
43 
44 pub trait ArgAttributesExt {
apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value)45     fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
apply_attrs_to_callsite( &self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, callsite: &Value, )46     fn apply_attrs_to_callsite(
47         &self,
48         idx: AttributePlace,
49         cx: &CodegenCx<'_, '_>,
50         callsite: &Value,
51     );
52 }
53 
should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool54 fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
55     // LLVM prior to version 12 had known miscompiles in the presence of
56     // noalias attributes (see #54878), but we don't support earlier
57     // versions at all anymore. We now enable mutable noalias by default.
58     cx.tcx.sess.opts.debugging_opts.mutable_noalias.unwrap_or(true)
59 }
60 
61 impl ArgAttributesExt for ArgAttributes {
apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value)62     fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
63         let mut regular = self.regular;
64         unsafe {
65             let deref = self.pointee_size.bytes();
66             if deref != 0 {
67                 if regular.contains(ArgAttribute::NonNull) {
68                     llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref);
69                 } else {
70                     llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref);
71                 }
72                 regular -= ArgAttribute::NonNull;
73             }
74             if let Some(align) = self.pointee_align {
75                 llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
76             }
77             regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
78             if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
79                 llvm::Attribute::NoAlias.apply_llfn(idx, llfn);
80             }
81             match self.arg_ext {
82                 ArgExtension::None => {}
83                 ArgExtension::Zext => {
84                     llvm::Attribute::ZExt.apply_llfn(idx, llfn);
85                 }
86                 ArgExtension::Sext => {
87                     llvm::Attribute::SExt.apply_llfn(idx, llfn);
88                 }
89             }
90         }
91     }
92 
apply_attrs_to_callsite( &self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, callsite: &Value, )93     fn apply_attrs_to_callsite(
94         &self,
95         idx: AttributePlace,
96         cx: &CodegenCx<'_, '_>,
97         callsite: &Value,
98     ) {
99         let mut regular = self.regular;
100         unsafe {
101             let deref = self.pointee_size.bytes();
102             if deref != 0 {
103                 if regular.contains(ArgAttribute::NonNull) {
104                     llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref);
105                 } else {
106                     llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(
107                         callsite,
108                         idx.as_uint(),
109                         deref,
110                     );
111                 }
112                 regular -= ArgAttribute::NonNull;
113             }
114             if let Some(align) = self.pointee_align {
115                 llvm::LLVMRustAddAlignmentCallSiteAttr(
116                     callsite,
117                     idx.as_uint(),
118                     align.bytes() as u32,
119                 );
120             }
121             regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
122             if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
123                 llvm::Attribute::NoAlias.apply_callsite(idx, callsite);
124             }
125             match self.arg_ext {
126                 ArgExtension::None => {}
127                 ArgExtension::Zext => {
128                     llvm::Attribute::ZExt.apply_callsite(idx, callsite);
129                 }
130                 ArgExtension::Sext => {
131                     llvm::Attribute::SExt.apply_callsite(idx, callsite);
132                 }
133             }
134         }
135     }
136 }
137 
138 pub trait LlvmType {
llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type139     fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
140 }
141 
142 impl LlvmType for Reg {
llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type143     fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
144         match self.kind {
145             RegKind::Integer => cx.type_ix(self.size.bits()),
146             RegKind::Float => match self.size.bits() {
147                 32 => cx.type_f32(),
148                 64 => cx.type_f64(),
149                 _ => bug!("unsupported float: {:?}", self),
150             },
151             RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
152         }
153     }
154 }
155 
156 impl LlvmType for CastTarget {
llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type157     fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
158         let rest_ll_unit = self.rest.unit.llvm_type(cx);
159         let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
160             (0, 0)
161         } else {
162             (
163                 self.rest.total.bytes() / self.rest.unit.size.bytes(),
164                 self.rest.total.bytes() % self.rest.unit.size.bytes(),
165             )
166         };
167 
168         if self.prefix.iter().all(|x| x.is_none()) {
169             // Simplify to a single unit when there is no prefix and size <= unit size
170             if self.rest.total <= self.rest.unit.size {
171                 return rest_ll_unit;
172             }
173 
174             // Simplify to array when all chunks are the same size and type
175             if rem_bytes == 0 {
176                 return cx.type_array(rest_ll_unit, rest_count);
177             }
178         }
179 
180         // Create list of fields in the main structure
181         let mut args: Vec<_> = self
182             .prefix
183             .iter()
184             .flat_map(|option_kind| {
185                 option_kind.map(|kind| Reg { kind, size: self.prefix_chunk_size }.llvm_type(cx))
186             })
187             .chain((0..rest_count).map(|_| rest_ll_unit))
188             .collect();
189 
190         // Append final integer
191         if rem_bytes != 0 {
192             // Only integers can be really split further.
193             assert_eq!(self.rest.unit.kind, RegKind::Integer);
194             args.push(cx.type_ix(rem_bytes * 8));
195         }
196 
197         cx.type_struct(&args, false)
198     }
199 }
200 
201 pub trait ArgAbiExt<'ll, 'tcx> {
memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type202     fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
store( &self, bx: &mut Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>, )203     fn store(
204         &self,
205         bx: &mut Builder<'_, 'll, 'tcx>,
206         val: &'ll Value,
207         dst: PlaceRef<'tcx, &'ll Value>,
208     );
store_fn_arg( &self, bx: &mut Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value>, )209     fn store_fn_arg(
210         &self,
211         bx: &mut Builder<'_, 'll, 'tcx>,
212         idx: &mut usize,
213         dst: PlaceRef<'tcx, &'ll Value>,
214     );
215 }
216 
217 impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
218     /// Gets the LLVM type for a place of the original Rust type of
219     /// this argument/return, i.e., the result of `type_of::type_of`.
memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type220     fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
221         self.layout.llvm_type(cx)
222     }
223 
224     /// Stores a direct/indirect value described by this ArgAbi into a
225     /// place for the original Rust type of this argument/return.
226     /// Can be used for both storing formal arguments into Rust variables
227     /// or results of call/invoke instructions into their destinations.
store( &self, bx: &mut Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>, )228     fn store(
229         &self,
230         bx: &mut Builder<'_, 'll, 'tcx>,
231         val: &'ll Value,
232         dst: PlaceRef<'tcx, &'ll Value>,
233     ) {
234         if self.is_ignore() {
235             return;
236         }
237         if self.is_sized_indirect() {
238             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
239         } else if self.is_unsized_indirect() {
240             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
241         } else if let PassMode::Cast(cast) = self.mode {
242             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
243             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
244             let can_store_through_cast_ptr = false;
245             if can_store_through_cast_ptr {
246                 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
247                 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
248                 bx.store(val, cast_dst, self.layout.align.abi);
249             } else {
250                 // The actual return type is a struct, but the ABI
251                 // adaptation code has cast it into some scalar type.  The
252                 // code that follows is the only reliable way I have
253                 // found to do a transform like i64 -> {i32,i32}.
254                 // Basically we dump the data onto the stack then memcpy it.
255                 //
256                 // Other approaches I tried:
257                 // - Casting rust ret pointer to the foreign type and using Store
258                 //   is (a) unsafe if size of foreign type > size of rust type and
259                 //   (b) runs afoul of strict aliasing rules, yielding invalid
260                 //   assembly under -O (specifically, the store gets removed).
261                 // - Truncating foreign type to correct integral type and then
262                 //   bitcasting to the struct type yields invalid cast errors.
263 
264                 // We instead thus allocate some scratch space...
265                 let scratch_size = cast.size(bx);
266                 let scratch_align = cast.align(bx);
267                 let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
268                 bx.lifetime_start(llscratch, scratch_size);
269 
270                 // ... where we first store the value...
271                 bx.store(val, llscratch, scratch_align);
272 
273                 // ... and then memcpy it to the intended destination.
274                 bx.memcpy(
275                     dst.llval,
276                     self.layout.align.abi,
277                     llscratch,
278                     scratch_align,
279                     bx.const_usize(self.layout.size.bytes()),
280                     MemFlags::empty(),
281                 );
282 
283                 bx.lifetime_end(llscratch, scratch_size);
284             }
285         } else {
286             OperandValue::Immediate(val).store(bx, dst);
287         }
288     }
289 
store_fn_arg( &self, bx: &mut Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value>, )290     fn store_fn_arg(
291         &self,
292         bx: &mut Builder<'a, 'll, 'tcx>,
293         idx: &mut usize,
294         dst: PlaceRef<'tcx, &'ll Value>,
295     ) {
296         let mut next = || {
297             let val = llvm::get_param(bx.llfn(), *idx as c_uint);
298             *idx += 1;
299             val
300         };
301         match self.mode {
302             PassMode::Ignore => {}
303             PassMode::Pair(..) => {
304                 OperandValue::Pair(next(), next()).store(bx, dst);
305             }
306             PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
307                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
308             }
309             PassMode::Direct(_)
310             | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
311             | PassMode::Cast(_) => {
312                 let next_arg = next();
313                 self.store(bx, next_arg, dst);
314             }
315         }
316     }
317 }
318 
319 impl ArgAbiMethods<'tcx> for Builder<'a, 'll, 'tcx> {
store_fn_arg( &mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>, )320     fn store_fn_arg(
321         &mut self,
322         arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
323         idx: &mut usize,
324         dst: PlaceRef<'tcx, Self::Value>,
325     ) {
326         arg_abi.store_fn_arg(self, idx, dst)
327     }
store_arg( &mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>, )328     fn store_arg(
329         &mut self,
330         arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
331         val: &'ll Value,
332         dst: PlaceRef<'tcx, &'ll Value>,
333     ) {
334         arg_abi.store(self, val, dst)
335     }
arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type336     fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
337         arg_abi.memory_ty(self)
338     }
339 }
340 
341 pub trait FnAbiLlvmExt<'tcx> {
llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type342     fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type343     fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
llvm_cconv(&self) -> llvm::CallConv344     fn llvm_cconv(&self) -> llvm::CallConv;
apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value)345     fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value)346     fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
347 }
348 
349 impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type350     fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
351         // Ignore "extra" args from the call site for C variadic functions.
352         // Only the "fixed" args are part of the LLVM function signature.
353         let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
354 
355         let args_capacity: usize = args.iter().map(|arg|
356             if arg.pad.is_some() { 1 } else { 0 } +
357             if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
358         ).sum();
359         let mut llargument_tys = Vec::with_capacity(
360             if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
361         );
362 
363         let llreturn_ty = match self.ret.mode {
364             PassMode::Ignore => cx.type_void(),
365             PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
366             PassMode::Cast(cast) => cast.llvm_type(cx),
367             PassMode::Indirect { .. } => {
368                 llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
369                 cx.type_void()
370             }
371         };
372 
373         for arg in args {
374             // add padding
375             if let Some(ty) = arg.pad {
376                 llargument_tys.push(ty.llvm_type(cx));
377             }
378 
379             let llarg_ty = match arg.mode {
380                 PassMode::Ignore => continue,
381                 PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
382                 PassMode::Pair(..) => {
383                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
384                     llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
385                     continue;
386                 }
387                 PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
388                     let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
389                     let ptr_layout = cx.layout_of(ptr_ty);
390                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
391                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
392                     continue;
393                 }
394                 PassMode::Cast(cast) => cast.llvm_type(cx),
395                 PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
396                     cx.type_ptr_to(arg.memory_ty(cx))
397                 }
398             };
399             llargument_tys.push(llarg_ty);
400         }
401 
402         if self.c_variadic {
403             cx.type_variadic_func(&llargument_tys, llreturn_ty)
404         } else {
405             cx.type_func(&llargument_tys, llreturn_ty)
406         }
407     }
408 
ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type409     fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
410         unsafe {
411             llvm::LLVMPointerType(
412                 self.llvm_type(cx),
413                 cx.data_layout().instruction_address_space.0 as c_uint,
414             )
415         }
416     }
417 
llvm_cconv(&self) -> llvm::CallConv418     fn llvm_cconv(&self) -> llvm::CallConv {
419         match self.conv {
420             Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
421             Conv::AmdGpuKernel => llvm::AmdGpuKernel,
422             Conv::AvrInterrupt => llvm::AvrInterrupt,
423             Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
424             Conv::ArmAapcs => llvm::ArmAapcsCallConv,
425             Conv::Msp430Intr => llvm::Msp430Intr,
426             Conv::PtxKernel => llvm::PtxKernel,
427             Conv::X86Fastcall => llvm::X86FastcallCallConv,
428             Conv::X86Intr => llvm::X86_Intr,
429             Conv::X86Stdcall => llvm::X86StdcallCallConv,
430             Conv::X86ThisCall => llvm::X86_ThisCall,
431             Conv::X86VectorCall => llvm::X86_VectorCall,
432             Conv::X86_64SysV => llvm::X86_64_SysV,
433             Conv::X86_64Win64 => llvm::X86_64_Win64,
434         }
435     }
436 
apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value)437     fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
438         // FIXME(eddyb) can this also be applied to callsites?
439         if self.ret.layout.abi.is_uninhabited() {
440             llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
441         }
442 
443         // FIXME(eddyb, wesleywiser): apply this to callsites as well?
444         if !self.can_unwind {
445             llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
446         }
447 
448         let mut i = 0;
449         let mut apply = |attrs: &ArgAttributes| {
450             attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
451             i += 1;
452             i - 1
453         };
454         match self.ret.mode {
455             PassMode::Direct(ref attrs) => {
456                 attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
457             }
458             PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
459                 assert!(!on_stack);
460                 let i = apply(attrs);
461                 unsafe {
462                     llvm::LLVMRustAddStructRetAttr(
463                         llfn,
464                         llvm::AttributePlace::Argument(i).as_uint(),
465                         self.ret.layout.llvm_type(cx),
466                     );
467                 }
468             }
469             _ => {}
470         }
471         for arg in &self.args {
472             if arg.pad.is_some() {
473                 apply(&ArgAttributes::new());
474             }
475             match arg.mode {
476                 PassMode::Ignore => {}
477                 PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
478                     let i = apply(attrs);
479                     unsafe {
480                         llvm::LLVMRustAddByValAttr(
481                             llfn,
482                             llvm::AttributePlace::Argument(i).as_uint(),
483                             arg.layout.llvm_type(cx),
484                         );
485                     }
486                 }
487                 PassMode::Direct(ref attrs)
488                 | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
489                     apply(attrs);
490                 }
491                 PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
492                     assert!(!on_stack);
493                     apply(attrs);
494                     apply(extra_attrs);
495                 }
496                 PassMode::Pair(ref a, ref b) => {
497                     apply(a);
498                     apply(b);
499                 }
500                 PassMode::Cast(_) => {
501                     apply(&ArgAttributes::new());
502                 }
503             }
504         }
505     }
506 
apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value)507     fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
508         if self.ret.layout.abi.is_uninhabited() {
509             llvm::Attribute::NoReturn.apply_callsite(llvm::AttributePlace::Function, callsite);
510         }
511         if !self.can_unwind {
512             llvm::Attribute::NoUnwind.apply_callsite(llvm::AttributePlace::Function, callsite);
513         }
514 
515         let mut i = 0;
516         let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
517             attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
518             i += 1;
519             i - 1
520         };
521         match self.ret.mode {
522             PassMode::Direct(ref attrs) => {
523                 attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
524             }
525             PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
526                 assert!(!on_stack);
527                 let i = apply(bx.cx, attrs);
528                 unsafe {
529                     llvm::LLVMRustAddStructRetCallSiteAttr(
530                         callsite,
531                         llvm::AttributePlace::Argument(i).as_uint(),
532                         self.ret.layout.llvm_type(bx),
533                     );
534                 }
535             }
536             _ => {}
537         }
538         if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
539             // If the value is a boolean, the range is 0..2 and that ultimately
540             // become 0..0 when the type becomes i1, which would be rejected
541             // by the LLVM verifier.
542             if let Int(..) = scalar.value {
543                 if !scalar.is_bool() && !scalar.is_always_valid(bx) {
544                     bx.range_metadata(callsite, scalar.valid_range);
545                 }
546             }
547         }
548         for arg in &self.args {
549             if arg.pad.is_some() {
550                 apply(bx.cx, &ArgAttributes::new());
551             }
552             match arg.mode {
553                 PassMode::Ignore => {}
554                 PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
555                     let i = apply(bx.cx, attrs);
556                     unsafe {
557                         llvm::LLVMRustAddByValCallSiteAttr(
558                             callsite,
559                             llvm::AttributePlace::Argument(i).as_uint(),
560                             arg.layout.llvm_type(bx),
561                         );
562                     }
563                 }
564                 PassMode::Direct(ref attrs)
565                 | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
566                     apply(bx.cx, attrs);
567                 }
568                 PassMode::Indirect {
569                     ref attrs,
570                     extra_attrs: Some(ref extra_attrs),
571                     on_stack: _,
572                 } => {
573                     apply(bx.cx, attrs);
574                     apply(bx.cx, extra_attrs);
575                 }
576                 PassMode::Pair(ref a, ref b) => {
577                     apply(bx.cx, a);
578                     apply(bx.cx, b);
579                 }
580                 PassMode::Cast(_) => {
581                     apply(bx.cx, &ArgAttributes::new());
582                 }
583             }
584         }
585 
586         let cconv = self.llvm_cconv();
587         if cconv != llvm::CCallConv {
588             llvm::SetInstructionCallConv(callsite, cconv);
589         }
590 
591         if self.conv == Conv::CCmseNonSecureCall {
592             // This will probably get ignored on all targets but those supporting the TrustZone-M
593             // extension (thumbv8m targets).
594             unsafe {
595                 llvm::AddCallSiteAttrString(
596                     callsite,
597                     llvm::AttributePlace::Function,
598                     cstr::cstr!("cmse_nonsecure_call"),
599                 );
600             }
601         }
602     }
603 }
604 
605 impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value)606     fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
607         fn_abi.apply_attrs_callsite(self, callsite)
608     }
609 
get_param(&mut self, index: usize) -> Self::Value610     fn get_param(&mut self, index: usize) -> Self::Value {
611         llvm::get_param(self.llfn(), index as c_uint)
612     }
613 }
614