1 pub mod llvm;
2 mod simd;
3 
4 use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
5 use rustc_codegen_ssa::MemFlags;
6 use rustc_codegen_ssa::base::wants_msvc_seh;
7 use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
8 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
9 use rustc_codegen_ssa::mir::place::PlaceRef;
10 use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
11 use rustc_middle::bug;
12 use rustc_middle::ty::{self, Instance, Ty};
13 use rustc_middle::ty::layout::LayoutOf;
14 use rustc_span::{Span, Symbol, symbol::kw, sym};
15 use rustc_target::abi::HasDataLayout;
16 use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
17 use rustc_target::spec::PanicStrategy;
18 
19 use crate::abi::GccType;
20 use crate::builder::Builder;
21 use crate::common::{SignType, TypeReflection};
22 use crate::context::CodegenCx;
23 use crate::type_of::LayoutGccExt;
24 use crate::intrinsic::simd::generic_simd_intrinsic;
25 
get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>>26 fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
27     let gcc_name = match name {
28         sym::sqrtf32 => "sqrtf",
29         sym::sqrtf64 => "sqrt",
30         sym::powif32 => "__builtin_powif",
31         sym::powif64 => "__builtin_powi",
32         sym::sinf32 => "sinf",
33         sym::sinf64 => "sin",
34         sym::cosf32 => "cosf",
35         sym::cosf64 => "cos",
36         sym::powf32 => "powf",
37         sym::powf64 => "pow",
38         sym::expf32 => "expf",
39         sym::expf64 => "exp",
40         sym::exp2f32 => "exp2f",
41         sym::exp2f64 => "exp2",
42         sym::logf32 => "logf",
43         sym::logf64 => "log",
44         sym::log10f32 => "log10f",
45         sym::log10f64 => "log10",
46         sym::log2f32 => "log2f",
47         sym::log2f64 => "log2",
48         sym::fmaf32 => "fmaf",
49         sym::fmaf64 => "fma",
50         sym::fabsf32 => "fabsf",
51         sym::fabsf64 => "fabs",
52         sym::minnumf32 => "fminf",
53         sym::minnumf64 => "fmin",
54         sym::maxnumf32 => "fmaxf",
55         sym::maxnumf64 => "fmax",
56         sym::copysignf32 => "copysignf",
57         sym::copysignf64 => "copysign",
58         sym::floorf32 => "floorf",
59         sym::floorf64 => "floor",
60         sym::ceilf32 => "ceilf",
61         sym::ceilf64 => "ceil",
62         sym::truncf32 => "truncf",
63         sym::truncf64 => "trunc",
64         sym::rintf32 => "rintf",
65         sym::rintf64 => "rint",
66         sym::nearbyintf32 => "nearbyintf",
67         sym::nearbyintf64 => "nearbyint",
68         sym::roundf32 => "roundf",
69         sym::roundf64 => "round",
70         sym::abort => "abort",
71         _ => return None,
72     };
73     Some(cx.context.get_builtin_function(&gcc_name))
74 }
75 
76 impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span)77     fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
78         let tcx = self.tcx;
79         let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
80 
81         let (def_id, substs) = match *callee_ty.kind() {
82             ty::FnDef(def_id, substs) => (def_id, substs),
83             _ => bug!("expected fn item type, found {}", callee_ty),
84         };
85 
86         let sig = callee_ty.fn_sig(tcx);
87         let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
88         let arg_tys = sig.inputs();
89         let ret_ty = sig.output();
90         let name = tcx.item_name(def_id);
91         let name_str = &*name.as_str();
92 
93         let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
94         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
95 
96         let simple = get_simple_intrinsic(self, name);
97         let llval =
98             match name {
99                 _ if simple.is_some() => {
100                     // FIXME(antoyo): remove this cast when the API supports function.
101                     let func = unsafe { std::mem::transmute(simple.expect("simple")) };
102                     self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
103                 },
104                 sym::likely => {
105                     self.expect(args[0].immediate(), true)
106                 }
107                 sym::unlikely => {
108                     self.expect(args[0].immediate(), false)
109                 }
110                 kw::Try => {
111                     try_intrinsic(
112                         self,
113                         args[0].immediate(),
114                         args[1].immediate(),
115                         args[2].immediate(),
116                         llresult,
117                     );
118                     return;
119                 }
120                 sym::breakpoint => {
121                     unimplemented!();
122                 }
123                 sym::va_copy => {
124                     unimplemented!();
125                 }
126                 sym::va_arg => {
127                     unimplemented!();
128                 }
129 
130                 sym::volatile_load | sym::unaligned_volatile_load => {
131                     let tp_ty = substs.type_at(0);
132                     let mut ptr = args[0].immediate();
133                     if let PassMode::Cast(ty) = fn_abi.ret.mode {
134                         ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
135                     }
136                     let load = self.volatile_load(ptr.get_type(), ptr);
137                     // TODO(antoyo): set alignment.
138                     self.to_immediate(load, self.layout_of(tp_ty))
139                 }
140                 sym::volatile_store => {
141                     let dst = args[0].deref(self.cx());
142                     args[1].val.volatile_store(self, dst);
143                     return;
144                 }
145                 sym::unaligned_volatile_store => {
146                     let dst = args[0].deref(self.cx());
147                     args[1].val.unaligned_volatile_store(self, dst);
148                     return;
149                 }
150                 sym::prefetch_read_data
151                     | sym::prefetch_write_data
152                     | sym::prefetch_read_instruction
153                     | sym::prefetch_write_instruction => {
154                         unimplemented!();
155                     }
156                 sym::ctlz
157                     | sym::ctlz_nonzero
158                     | sym::cttz
159                     | sym::cttz_nonzero
160                     | sym::ctpop
161                     | sym::bswap
162                     | sym::bitreverse
163                     | sym::rotate_left
164                     | sym::rotate_right
165                     | sym::saturating_add
166                     | sym::saturating_sub => {
167                         let ty = arg_tys[0];
168                         match int_type_width_signed(ty, self) {
169                             Some((width, signed)) => match name {
170                                 sym::ctlz | sym::cttz => {
171                                     let func = self.current_func.borrow().expect("func");
172                                     let then_block = func.new_block("then");
173                                     let else_block = func.new_block("else");
174                                     let after_block = func.new_block("after");
175 
176                                     let arg = args[0].immediate();
177                                     let result = func.new_local(None, arg.get_type(), "zeros");
178                                     let zero = self.cx.context.new_rvalue_zero(arg.get_type());
179                                     let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
180                                     self.llbb().end_with_conditional(None, cond, then_block, else_block);
181 
182                                     let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
183                                     then_block.add_assignment(None, result, zero_result);
184                                     then_block.end_with_jump(None, after_block);
185 
186                                     // NOTE: since jumps were added in a place
187                                     // count_leading_zeroes() does not expect, the current blocks
188                                     // in the state need to be updated.
189                                     *self.current_block.borrow_mut() = Some(else_block);
190                                     self.block = Some(else_block);
191 
192                                     let zeros =
193                                         match name {
194                                             sym::ctlz => self.count_leading_zeroes(width, arg),
195                                             sym::cttz => self.count_trailing_zeroes(width, arg),
196                                             _ => unreachable!(),
197                                         };
198                                     else_block.add_assignment(None, result, zeros);
199                                     else_block.end_with_jump(None, after_block);
200 
201                                     // NOTE: since jumps were added in a place rustc does not
202                                     // expect, the current blocks in the state need to be updated.
203                                     *self.current_block.borrow_mut() = Some(after_block);
204                                     self.block = Some(after_block);
205 
206                                     result.to_rvalue()
207                                 }
208                                 sym::ctlz_nonzero => {
209                                     self.count_leading_zeroes(width, args[0].immediate())
210                                 },
211                                 sym::cttz_nonzero => {
212                                     self.count_trailing_zeroes(width, args[0].immediate())
213                                 }
214                                 sym::ctpop => self.pop_count(args[0].immediate()),
215                                 sym::bswap => {
216                                     if width == 8 {
217                                         args[0].immediate() // byte swap a u8/i8 is just a no-op
218                                     }
219                                     else {
220                                         // TODO(antoyo): check if it's faster to use string literals and a
221                                         // match instead of format!.
222                                         let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
223                                         let mut arg = args[0].immediate();
224                                         // FIXME(antoyo): this cast should not be necessary. Remove
225                                         // when having proper sized integer types.
226                                         let param_type = bswap.get_param(0).to_rvalue().get_type();
227                                         if param_type != arg.get_type() {
228                                             arg = self.bitcast(arg, param_type);
229                                         }
230                                         self.cx.context.new_call(None, bswap, &[arg])
231                                     }
232                                 },
233                                 sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
234                                 sym::rotate_left | sym::rotate_right => {
235                                     // TODO(antoyo): implement using algorithm from:
236                                     // https://blog.regehr.org/archives/1063
237                                     // for other platforms.
238                                     let is_left = name == sym::rotate_left;
239                                     let val = args[0].immediate();
240                                     let raw_shift = args[1].immediate();
241                                     if is_left {
242                                         self.rotate_left(val, raw_shift, width)
243                                     }
244                                     else {
245                                         self.rotate_right(val, raw_shift, width)
246                                     }
247                                 },
248                                 sym::saturating_add => {
249                                     self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
250                                 },
251                                 sym::saturating_sub => {
252                                     self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
253                                 },
254                                 _ => bug!(),
255                             },
256                             None => {
257                                 span_invalid_monomorphization_error(
258                                     tcx.sess,
259                                     span,
260                                     &format!(
261                                         "invalid monomorphization of `{}` intrinsic: \
262                                       expected basic integer type, found `{}`",
263                                       name, ty
264                                     ),
265                                 );
266                                 return;
267                             }
268                         }
269                     }
270 
271                 sym::raw_eq => {
272                     use rustc_target::abi::Abi::*;
273                     let tp_ty = substs.type_at(0);
274                     let layout = self.layout_of(tp_ty).layout;
275                     let _use_integer_compare = match layout.abi {
276                         Scalar(_) | ScalarPair(_, _) => true,
277                         Uninhabited | Vector { .. } => false,
278                         Aggregate { .. } => {
279                             // For rusty ABIs, small aggregates are actually passed
280                             // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
281                             // so we re-use that same threshold here.
282                             layout.size <= self.data_layout().pointer_size * 2
283                         }
284                     };
285 
286                     let a = args[0].immediate();
287                     let b = args[1].immediate();
288                     if layout.size.bytes() == 0 {
289                         self.const_bool(true)
290                     }
291                     /*else if use_integer_compare {
292                         let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
293                         let ptr_ty = self.type_ptr_to(integer_ty);
294                         let a_ptr = self.bitcast(a, ptr_ty);
295                         let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
296                         let b_ptr = self.bitcast(b, ptr_ty);
297                         let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
298                         self.icmp(IntPredicate::IntEQ, a_val, b_val)
299                     }*/
300                     else {
301                         let void_ptr_type = self.context.new_type::<*const ()>();
302                         let a_ptr = self.bitcast(a, void_ptr_type);
303                         let b_ptr = self.bitcast(b, void_ptr_type);
304                         let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
305                         let builtin = self.context.get_builtin_function("memcmp");
306                         let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
307                         self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
308                     }
309                 }
310 
311                 sym::black_box => {
312                     args[0].val.store(self, result);
313 
314                     let block = self.llbb();
315                     let extended_asm = block.add_extended_asm(None, "");
316                     extended_asm.add_input_operand(None, "r", result.llval);
317                     extended_asm.add_clobber("memory");
318                     extended_asm.set_volatile_flag(true);
319 
320                     // We have copied the value to `result` already.
321                     return;
322                 }
323 
324                 _ if name_str.starts_with("simd_") => {
325                     match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
326                         Ok(llval) => llval,
327                         Err(()) => return,
328                     }
329                 }
330 
331                 _ => bug!("unknown intrinsic '{}'", name),
332             };
333 
334         if !fn_abi.ret.is_ignore() {
335             if let PassMode::Cast(ty) = fn_abi.ret.mode {
336                 let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
337                 let ptr = self.pointercast(result.llval, ptr_llty);
338                 self.store(llval, ptr, result.align);
339             }
340             else {
341                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
342                     .val
343                     .store(self, result);
344             }
345         }
346     }
347 
abort(&mut self)348     fn abort(&mut self) {
349         let func = self.context.get_builtin_function("abort");
350         let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
351         self.call(self.type_void(), func, &[], None);
352     }
353 
assume(&mut self, value: Self::Value)354     fn assume(&mut self, value: Self::Value) {
355         // TODO(antoyo): switch to asumme when it exists.
356         // Or use something like this:
357         // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
358         self.expect(value, true);
359     }
360 
expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value361     fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
362         // TODO(antoyo)
363         cond
364     }
365 
type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value366     fn type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value {
367         // Unsupported.
368         self.context.new_rvalue_from_int(self.int_type, 0)
369     }
370 
va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc>371     fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
372         unimplemented!();
373     }
374 
va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc>375     fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
376         unimplemented!();
377     }
378 }
379 
380 impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>)381     fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
382         arg_abi.store_fn_arg(self, idx, dst)
383     }
384 
store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>)385     fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
386         arg_abi.store(self, val, dst)
387     }
388 
arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc>389     fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
390         arg_abi.memory_ty(self)
391     }
392 }
393 
394 pub trait ArgAbiExt<'gcc, 'tcx> {
memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>395     fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>)396     fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>)397     fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
398 }
399 
400 impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
401     /// Gets the LLVM type for a place of the original Rust type of
402     /// this argument/return, i.e., the result of `type_of::type_of`.
memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>403     fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
404         self.layout.gcc_type(cx, true)
405     }
406 
407     /// Stores a direct/indirect value described by this ArgAbi into a
408     /// place for the original Rust type of this argument/return.
409     /// Can be used for both storing formal arguments into Rust variables
410     /// or results of call/invoke instructions into their destinations.
store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>)411     fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
412         if self.is_ignore() {
413             return;
414         }
415         if self.is_sized_indirect() {
416             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
417         }
418         else if self.is_unsized_indirect() {
419             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
420         }
421         else if let PassMode::Cast(cast) = self.mode {
422             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
423             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
424             let can_store_through_cast_ptr = false;
425             if can_store_through_cast_ptr {
426                 let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
427                 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
428                 bx.store(val, cast_dst, self.layout.align.abi);
429             }
430             else {
431                 // The actual return type is a struct, but the ABI
432                 // adaptation code has cast it into some scalar type.  The
433                 // code that follows is the only reliable way I have
434                 // found to do a transform like i64 -> {i32,i32}.
435                 // Basically we dump the data onto the stack then memcpy it.
436                 //
437                 // Other approaches I tried:
438                 // - Casting rust ret pointer to the foreign type and using Store
439                 //   is (a) unsafe if size of foreign type > size of rust type and
440                 //   (b) runs afoul of strict aliasing rules, yielding invalid
441                 //   assembly under -O (specifically, the store gets removed).
442                 // - Truncating foreign type to correct integral type and then
443                 //   bitcasting to the struct type yields invalid cast errors.
444 
445                 // We instead thus allocate some scratch space...
446                 let scratch_size = cast.size(bx);
447                 let scratch_align = cast.align(bx);
448                 let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
449                 bx.lifetime_start(llscratch, scratch_size);
450 
451                 // ... where we first store the value...
452                 bx.store(val, llscratch, scratch_align);
453 
454                 // ... and then memcpy it to the intended destination.
455                 bx.memcpy(
456                     dst.llval,
457                     self.layout.align.abi,
458                     llscratch,
459                     scratch_align,
460                     bx.const_usize(self.layout.size.bytes()),
461                     MemFlags::empty(),
462                 );
463 
464                 bx.lifetime_end(llscratch, scratch_size);
465             }
466         }
467         else {
468             OperandValue::Immediate(val).store(bx, dst);
469         }
470     }
471 
store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>)472     fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
473         let mut next = || {
474             let val = bx.current_func().get_param(*idx as i32);
475             *idx += 1;
476             val.to_rvalue()
477         };
478         match self.mode {
479             PassMode::Ignore => {}
480             PassMode::Pair(..) => {
481                 OperandValue::Pair(next(), next()).store(bx, dst);
482             }
483             PassMode::Indirect { extra_attrs: Some(_), .. } => {
484                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
485             }
486             PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
487                 let next_arg = next();
488                 self.store(bx, next_arg.to_rvalue(), dst);
489             }
490         }
491     }
492 }
493 
int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)>494 fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
495     match ty.kind() {
496         ty::Int(t) => Some((
497             match t {
498                 rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
499                 rustc_middle::ty::IntTy::I8 => 8,
500                 rustc_middle::ty::IntTy::I16 => 16,
501                 rustc_middle::ty::IntTy::I32 => 32,
502                 rustc_middle::ty::IntTy::I64 => 64,
503                 rustc_middle::ty::IntTy::I128 => 128,
504             },
505             true,
506         )),
507         ty::Uint(t) => Some((
508             match t {
509                 rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
510                 rustc_middle::ty::UintTy::U8 => 8,
511                 rustc_middle::ty::UintTy::U16 => 16,
512                 rustc_middle::ty::UintTy::U32 => 32,
513                 rustc_middle::ty::UintTy::U64 => 64,
514                 rustc_middle::ty::UintTy::U128 => 128,
515             },
516             false,
517         )),
518         _ => None,
519     }
520 }
521 
522 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc>523     fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
524         let result_type = value.get_type();
525         let typ = result_type.to_unsigned(self.cx);
526 
527         let value =
528             if result_type.is_signed(self.cx) {
529                 self.context.new_bitcast(None, value, typ)
530             }
531             else {
532                 value
533             };
534 
535         let context = &self.cx.context;
536         let result =
537             match width {
538                 8 => {
539                     // First step.
540                     let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
541                     let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
542                     let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
543                     let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
544                     let step1 = self.or(left, right);
545 
546                     // Second step.
547                     let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
548                     let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
549                     let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
550                     let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
551                     let step2 = self.or(left, right);
552 
553                     // Third step.
554                     let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
555                     let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
556                     let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
557                     let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
558                     let step3 = self.or(left, right);
559 
560                     step3
561                 },
562                 16 => {
563                     // First step.
564                     let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
565                     let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
566                     let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
567                     let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
568                     let step1 = self.or(left, right);
569 
570                     // Second step.
571                     let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
572                     let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
573                     let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
574                     let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
575                     let step2 = self.or(left, right);
576 
577                     // Third step.
578                     let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
579                     let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
580                     let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
581                     let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
582                     let step3 = self.or(left, right);
583 
584                     // Fourth step.
585                     let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
586                     let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
587                     let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
588                     let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
589                     let step4 = self.or(left, right);
590 
591                     step4
592                 },
593                 32 => {
594                     // TODO(antoyo): Refactor with other implementations.
595                     // First step.
596                     let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
597                     let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
598                     let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
599                     let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
600                     let step1 = self.or(left, right);
601 
602                     // Second step.
603                     let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
604                     let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
605                     let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
606                     let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
607                     let step2 = self.or(left, right);
608 
609                     // Third step.
610                     let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
611                     let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
612                     let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
613                     let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
614                     let step3 = self.or(left, right);
615 
616                     // Fourth step.
617                     let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
618                     let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
619                     let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
620                     let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
621                     let step4 = self.or(left, right);
622 
623                     // Fifth step.
624                     let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
625                     let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
626                     let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
627                     let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
628                     let step5 = self.or(left, right);
629 
630                     step5
631                 },
632                 64 => {
633                     // First step.
634                     let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
635                     let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
636                     let step1 = self.or(left, right);
637 
638                     // Second step.
639                     let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
640                     let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
641                     let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
642                     let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
643                     let step2 = self.or(left, right);
644 
645                     // Third step.
646                     let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
647                     let left = self.xor(step2, left);
648                     let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
649 
650                     let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
651                     let left = self.or(temp, left);
652                     let step3 = self.xor(left, step2);
653 
654                     // Fourth step.
655                     let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
656                     let left = self.xor(step3, left);
657                     let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
658 
659                     let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
660                     let left = self.or(temp, left);
661                     let step4 = self.xor(left, step3);
662 
663                     // Fifth step.
664                     let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
665                     let left = self.xor(step4, left);
666                     let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
667 
668                     let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
669                     let left = self.or(temp, left);
670                     let step5 = self.xor(left, step4);
671 
672                     step5
673                 },
674                 128 => {
675                     // TODO(antoyo): find a more efficient implementation?
676                     let sixty_four = self.context.new_rvalue_from_long(typ, 64);
677                     let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
678                     let low = self.context.new_cast(None, value, self.u64_type);
679 
680                     let reversed_high = self.bit_reverse(64, high);
681                     let reversed_low = self.bit_reverse(64, low);
682 
683                     let new_low = self.context.new_cast(None, reversed_high, typ);
684                     let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
685 
686                     new_low | new_high
687                 },
688                 _ => {
689                     panic!("cannot bit reverse with width = {}", width);
690                 },
691             };
692 
693         self.context.new_bitcast(None, result, result_type)
694     }
695 
count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc>696     fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
697         // TODO(antoyo): use width?
698         let arg_type = arg.get_type();
699         let count_leading_zeroes =
700             if arg_type.is_uint(&self.cx) {
701                 "__builtin_clz"
702             }
703             else if arg_type.is_ulong(&self.cx) {
704                 "__builtin_clzl"
705             }
706             else if arg_type.is_ulonglong(&self.cx) {
707                 "__builtin_clzll"
708             }
709             else if width == 128 {
710                 // Algorithm from: https://stackoverflow.com/a/28433850/389119
711                 let array_type = self.context.new_array_type(None, arg_type, 3);
712                 let result = self.current_func()
713                     .new_local(None, array_type, "count_loading_zeroes_results");
714 
715                 let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
716                 let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
717                 let low = self.context.new_cast(None, arg, self.u64_type);
718 
719                 let zero = self.context.new_rvalue_zero(self.usize_type);
720                 let one = self.context.new_rvalue_one(self.usize_type);
721                 let two = self.context.new_rvalue_from_long(self.usize_type, 2);
722 
723                 let clzll = self.context.get_builtin_function("__builtin_clzll");
724 
725                 let first_elem = self.context.new_array_access(None, result, zero);
726                 let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
727                 self.llbb()
728                     .add_assignment(None, first_elem, first_value);
729 
730                 let second_elem = self.context.new_array_access(None, result, one);
731                 let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
732                 self.llbb()
733                     .add_assignment(None, second_elem, second_value);
734 
735                 let third_elem = self.context.new_array_access(None, result, two);
736                 let third_value = self.context.new_rvalue_from_long(arg_type, 128);
737                 self.llbb()
738                     .add_assignment(None, third_elem, third_value);
739 
740                 let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
741                 let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
742                 let not_low_and_not_high = not_low & not_high;
743                 let index = not_high + not_low_and_not_high;
744 
745                 let res = self.context.new_array_access(None, result, index);
746 
747                 return self.context.new_cast(None, res, arg_type);
748             }
749             else {
750                 let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
751                 let arg = self.context.new_cast(None, arg, self.uint_type);
752                 let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
753                 let diff = self.context.new_rvalue_from_long(self.int_type, diff);
754                 let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
755                 return self.context.new_cast(None, res, arg_type);
756             };
757         let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
758         let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
759         self.context.new_cast(None, res, arg_type)
760     }
761 
count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc>762     fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
763         let result_type = arg.get_type();
764         let arg =
765             if result_type.is_signed(self.cx) {
766                 let new_type = result_type.to_unsigned(self.cx);
767                 self.context.new_bitcast(None, arg, new_type)
768             }
769             else {
770                 arg
771             };
772         let arg_type = arg.get_type();
773         let (count_trailing_zeroes, expected_type) =
774             if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
775                 // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
776                 ("__builtin_ctz", self.cx.uint_type)
777             }
778             else if arg_type.is_ulong(&self.cx) {
779                 ("__builtin_ctzl", self.cx.ulong_type)
780             }
781             else if arg_type.is_ulonglong(&self.cx) {
782                 ("__builtin_ctzll", self.cx.ulonglong_type)
783             }
784             else if arg_type.is_u128(&self.cx) {
785                 // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
786                 let array_type = self.context.new_array_type(None, arg_type, 3);
787                 let result = self.current_func()
788                     .new_local(None, array_type, "count_loading_zeroes_results");
789 
790                 let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
791                 let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
792                 let low = self.context.new_cast(None, arg, self.u64_type);
793 
794                 let zero = self.context.new_rvalue_zero(self.usize_type);
795                 let one = self.context.new_rvalue_one(self.usize_type);
796                 let two = self.context.new_rvalue_from_long(self.usize_type, 2);
797 
798                 let ctzll = self.context.get_builtin_function("__builtin_ctzll");
799 
800                 let first_elem = self.context.new_array_access(None, result, zero);
801                 let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
802                 self.llbb()
803                     .add_assignment(None, first_elem, first_value);
804 
805                 let second_elem = self.context.new_array_access(None, result, one);
806                 let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
807                 self.llbb()
808                     .add_assignment(None, second_elem, second_value);
809 
810                 let third_elem = self.context.new_array_access(None, result, two);
811                 let third_value = self.context.new_rvalue_from_long(arg_type, 128);
812                 self.llbb()
813                     .add_assignment(None, third_elem, third_value);
814 
815                 let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
816                 let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
817                 let not_low_and_not_high = not_low & not_high;
818                 let index = not_low + not_low_and_not_high;
819 
820                 let res = self.context.new_array_access(None, result, index);
821 
822                 return self.context.new_bitcast(None, res, result_type);
823             }
824             else {
825                 unimplemented!("count_trailing_zeroes for {:?}", arg_type);
826             };
827         let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
828         let arg =
829             if arg_type != expected_type {
830                 self.context.new_cast(None, arg, expected_type)
831             }
832             else {
833                 arg
834             };
835         let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
836         self.context.new_bitcast(None, res, result_type)
837     }
838 
int_width(&self, typ: Type<'gcc>) -> i64839     fn int_width(&self, typ: Type<'gcc>) -> i64 {
840         self.cx.int_width(typ) as i64
841     }
842 
pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc>843     fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
844         // TODO(antoyo): use the optimized version with fewer operations.
845         let result_type = value.get_type();
846         let value_type = result_type.to_unsigned(self.cx);
847 
848         let value =
849             if result_type.is_signed(self.cx) {
850                 self.context.new_bitcast(None, value, value_type)
851             }
852             else {
853                 value
854             };
855 
856         if value_type.is_u128(&self.cx) {
857             // TODO(antoyo): implement in the normal algorithm below to have a more efficient
858             // implementation (that does not require a call to __popcountdi2).
859             let popcount = self.context.get_builtin_function("__builtin_popcountll");
860             let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
861             let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
862             let high = self.context.new_call(None, popcount, &[high]);
863             let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
864             let low = self.context.new_call(None, popcount, &[low]);
865             let res = high + low;
866             return self.context.new_bitcast(None, res, result_type);
867         }
868 
869         // First step.
870         let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
871         let left = value & mask;
872         let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
873         let right = shifted & mask;
874         let value = left + right;
875 
876         // Second step.
877         let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
878         let left = value & mask;
879         let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
880         let right = shifted & mask;
881         let value = left + right;
882 
883         // Third step.
884         let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
885         let left = value & mask;
886         let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
887         let right = shifted & mask;
888         let value = left + right;
889 
890         if value_type.is_u8(&self.cx) {
891             return self.context.new_bitcast(None, value, result_type);
892         }
893 
894         // Fourth step.
895         let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
896         let left = value & mask;
897         let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
898         let right = shifted & mask;
899         let value = left + right;
900 
901         if value_type.is_u16(&self.cx) {
902             return self.context.new_bitcast(None, value, result_type);
903         }
904 
905         // Fifth step.
906         let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
907         let left = value & mask;
908         let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
909         let right = shifted & mask;
910         let value = left + right;
911 
912         if value_type.is_u32(&self.cx) {
913             return self.context.new_bitcast(None, value, result_type);
914         }
915 
916         // Sixth step.
917         let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
918         let left = value & mask;
919         let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
920         let right = shifted & mask;
921         let value = left + right;
922 
923         self.context.new_bitcast(None, value, result_type)
924     }
925 
926     // Algorithm from: https://blog.regehr.org/archives/1063
rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc>927     fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
928         let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
929         let shift = shift % max;
930         let lhs = self.shl(value, shift);
931         let result_and =
932             self.and(
933                 self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
934                 self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
935             );
936         let rhs = self.lshr(value, result_and);
937         self.or(lhs, rhs)
938     }
939 
940     // Algorithm from: https://blog.regehr.org/archives/1063
rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc>941     fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
942         let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
943         let shift = shift % max;
944         let lhs = self.lshr(value, shift);
945         let result_and =
946             self.and(
947                 self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
948                 self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
949             );
950         let rhs = self.shl(value, result_and);
951         self.or(lhs, rhs)
952     }
953 
saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc>954     fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
955         let func = self.current_func.borrow().expect("func");
956 
957         if signed {
958             // Algorithm from: https://stackoverflow.com/a/56531252/389119
959             let after_block = func.new_block("after");
960             let func_name =
961                 match width {
962                     8 => "__builtin_add_overflow",
963                     16 => "__builtin_add_overflow",
964                     32 => "__builtin_sadd_overflow",
965                     64 => "__builtin_saddll_overflow",
966                     128 => "__builtin_add_overflow",
967                     _ => unreachable!(),
968                 };
969             let overflow_func = self.context.get_builtin_function(func_name);
970             let result_type = lhs.get_type();
971             let res = func.new_local(None, result_type, "saturating_sum");
972             let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
973 
974             let then_block = func.new_block("then");
975 
976             let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
977             let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
978             let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
979                 self.context.new_rvalue_from_int(unsigned_type, 0)
980             );
981             let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
982             then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
983             then_block.end_with_jump(None, after_block);
984 
985             self.llbb().end_with_conditional(None, overflow, then_block, after_block);
986 
987             // NOTE: since jumps were added in a place rustc does not
988             // expect, the current blocks in the state need to be updated.
989             *self.current_block.borrow_mut() = Some(after_block);
990             self.block = Some(after_block);
991 
992             res.to_rvalue()
993         }
994         else {
995             // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
996             let res = lhs + rhs;
997             let res_type = res.get_type();
998             let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
999             let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type));
1000             res | value
1001         }
1002     }
1003 
1004     // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc>1005     fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
1006         if signed {
1007             // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
1008             let func_name =
1009                 match width {
1010                     8 => "__builtin_sub_overflow",
1011                     16 => "__builtin_sub_overflow",
1012                     32 => "__builtin_ssub_overflow",
1013                     64 => "__builtin_ssubll_overflow",
1014                     128 => "__builtin_sub_overflow",
1015                     _ => unreachable!(),
1016                 };
1017             let overflow_func = self.context.get_builtin_function(func_name);
1018             let result_type = lhs.get_type();
1019             let func = self.current_func.borrow().expect("func");
1020             let res = func.new_local(None, result_type, "saturating_diff");
1021             let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
1022 
1023             let then_block = func.new_block("then");
1024             let after_block = func.new_block("after");
1025 
1026             let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
1027             let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
1028             let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
1029                 self.context.new_rvalue_from_int(unsigned_type, 0)
1030             );
1031             let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
1032             then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
1033             then_block.end_with_jump(None, after_block);
1034 
1035             self.llbb().end_with_conditional(None, overflow, then_block, after_block);
1036 
1037             // NOTE: since jumps were added in a place rustc does not
1038             // expect, the current blocks in the state need to be updated.
1039             *self.current_block.borrow_mut() = Some(after_block);
1040             self.block = Some(after_block);
1041 
1042             res.to_rvalue()
1043         }
1044         else {
1045             let res = lhs - rhs;
1046             let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
1047             let comparison = self.context.new_cast(None, comparison, lhs.get_type());
1048             let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison);
1049             self.and(res, unary_op)
1050         }
1051     }
1052 }
1053 
try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>)1054 fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
1055     if bx.sess().panic_strategy() == PanicStrategy::Abort {
1056         bx.call(bx.type_void(), try_func, &[data], None);
1057         // Return 0 unconditionally from the intrinsic call;
1058         // we can never unwind.
1059         let ret_align = bx.tcx.data_layout.i32_align.abi;
1060         bx.store(bx.const_i32(0), dest, ret_align);
1061     }
1062     else if wants_msvc_seh(bx.sess()) {
1063         unimplemented!();
1064     }
1065     else {
1066         unimplemented!();
1067     }
1068 }
1069