1 use std::borrow::Cow;
2 use std::cell::Cell;
3 use std::convert::TryFrom;
4 use std::ops::Deref;
5 
6 use gccjit::FunctionType;
7 use gccjit::{
8     BinaryOp,
9     Block,
10     ComparisonOp,
11     Function,
12     LValue,
13     RValue,
14     ToRValue,
15     Type,
16     UnaryOp,
17 };
18 use rustc_codegen_ssa::MemFlags;
19 use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
20 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
21 use rustc_codegen_ssa::mir::place::PlaceRef;
22 use rustc_codegen_ssa::traits::{
23     BackendTypes,
24     BaseTypeMethods,
25     BuilderMethods,
26     ConstMethods,
27     DerivedTypeMethods,
28     LayoutTypeMethods,
29     HasCodegen,
30     OverflowOp,
31     StaticBuilderMethods,
32 };
33 use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
34 use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
35 use rustc_span::Span;
36 use rustc_span::def_id::DefId;
37 use rustc_target::abi::{
38     self,
39     call::FnAbi,
40     Align,
41     HasDataLayout,
42     Size,
43     TargetDataLayout,
44     WrappingRange,
45 };
46 use rustc_target::spec::{HasTargetSpec, Target};
47 
48 use crate::common::{SignType, TypeReflection, type_is_pointer};
49 use crate::context::CodegenCx;
50 use crate::type_of::LayoutGccExt;
51 
52 // TODO(antoyo)
53 type Funclet = ();
54 
55 // TODO(antoyo): remove this variable.
56 static mut RETURN_VALUE_COUNT: usize = 0;
57 
58 enum ExtremumOperation {
59     Max,
60     Min,
61 }
62 
63 trait EnumClone {
clone(&self) -> Self64     fn clone(&self) -> Self;
65 }
66 
67 impl EnumClone for AtomicOrdering {
clone(&self) -> Self68     fn clone(&self) -> Self {
69         match *self {
70             AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
71             AtomicOrdering::Unordered => AtomicOrdering::Unordered,
72             AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
73             AtomicOrdering::Acquire => AtomicOrdering::Acquire,
74             AtomicOrdering::Release => AtomicOrdering::Release,
75             AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
76             AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
77         }
78     }
79 }
80 
81 pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
82     pub cx: &'a CodegenCx<'gcc, 'tcx>,
83     pub block: Option<Block<'gcc>>,
84     stack_var_count: Cell<usize>,
85 }
86 
87 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self88     fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
89         Builder {
90             cx,
91             block: None,
92             stack_var_count: Cell::new(0),
93         }
94     }
95 
atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc>96     fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
97         let size = self.cx.int_width(src.get_type()) / 8;
98 
99         let func = self.current_func();
100 
101         let load_ordering =
102             match order {
103                 // TODO(antoyo): does this make sense?
104                 AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
105                 _ => order.clone(),
106             };
107         let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
108         let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
109         let return_value = func.new_local(None, previous_value.get_type(), "return_value");
110         self.llbb().add_assignment(None, previous_var, previous_value);
111         self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
112 
113         let while_block = func.new_block("while");
114         let after_block = func.new_block("after_while");
115         self.llbb().end_with_jump(None, while_block);
116 
117         // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
118         // state need to be updated.
119         self.block = Some(while_block);
120         *self.cx.current_block.borrow_mut() = Some(while_block);
121 
122         let comparison_operator =
123             match operation {
124                 ExtremumOperation::Max => ComparisonOp::LessThan,
125                 ExtremumOperation::Min => ComparisonOp::GreaterThan,
126             };
127 
128         let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
129         let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
130         let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
131         let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
132 
133         while_block.end_with_conditional(None, cond, while_block, after_block);
134 
135         // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
136         // state need to be updated.
137         self.block = Some(after_block);
138         *self.cx.current_block.borrow_mut() = Some(after_block);
139 
140         return_value.to_rvalue()
141     }
142 
compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc>143     fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
144         let size = self.cx.int_width(src.get_type());
145         let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
146         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
147         let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
148         let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
149 
150         let void_ptr_type = self.context.new_type::<*mut ()>();
151         let volatile_void_ptr_type = void_ptr_type.make_volatile();
152         let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
153         let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
154 
155         // NOTE: not sure why, but we have the wrong type here.
156         let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
157         let src = self.context.new_cast(None, src, int_type);
158         self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
159     }
160 
assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>)161     pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
162         self.llbb().add_assignment(None, lvalue, value);
163     }
164 
check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]>165     fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
166         let mut all_args_match = true;
167         let mut param_types = vec![];
168         let param_count = func.get_param_count();
169         for (index, arg) in args.iter().enumerate().take(param_count) {
170             let param = func.get_param(index as i32);
171             let param = param.to_rvalue().get_type();
172             if param != arg.get_type() {
173                 all_args_match = false;
174             }
175             param_types.push(param);
176         }
177 
178         if all_args_match {
179             return Cow::Borrowed(args);
180         }
181 
182         let casted_args: Vec<_> = param_types
183             .into_iter()
184             .zip(args.iter())
185             .enumerate()
186             .map(|(_i, (expected_ty, &actual_val))| {
187                 let actual_ty = actual_val.get_type();
188                 if expected_ty != actual_ty {
189                     self.bitcast(actual_val, expected_ty)
190                 }
191                 else {
192                     actual_val
193                 }
194             })
195             .collect();
196 
197         Cow::Owned(casted_args)
198     }
199 
check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]>200     fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
201         let mut all_args_match = true;
202         let mut param_types = vec![];
203         let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
204         for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
205             let param = gcc_func.get_param_type(index);
206             if param != arg.get_type() {
207                 all_args_match = false;
208             }
209             param_types.push(param);
210         }
211 
212         if all_args_match {
213             return Cow::Borrowed(args);
214         }
215 
216         let casted_args: Vec<_> = param_types
217             .into_iter()
218             .zip(args.iter())
219             .enumerate()
220             .map(|(_i, (expected_ty, &actual_val))| {
221                 let actual_ty = actual_val.get_type();
222                 if expected_ty != actual_ty {
223                     self.bitcast(actual_val, expected_ty)
224                 }
225                 else {
226                     actual_val
227                 }
228             })
229             .collect();
230 
231         Cow::Owned(casted_args)
232     }
233 
check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc>234     fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
235         let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
236         let stored_ty = self.cx.val_ty(val);
237         let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
238 
239         if dest_ptr_ty == stored_ptr_ty {
240             ptr
241         }
242         else {
243             self.bitcast(ptr, stored_ptr_ty)
244         }
245     }
246 
current_func(&self) -> Function<'gcc>247     pub fn current_func(&self) -> Function<'gcc> {
248         self.block.expect("block").get_function()
249     }
250 
function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc>251     fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
252         // TODO(antoyo): remove when the API supports a different type for functions.
253         let func: Function<'gcc> = self.cx.rvalue_as_function(func);
254         let args = self.check_call("call", func, args);
255 
256         // gccjit requires to use the result of functions, even when it's not used.
257         // That's why we assign the result to a local or call add_eval().
258         let return_type = func.get_return_type();
259         let current_block = self.current_block.borrow().expect("block");
260         let void_type = self.context.new_type::<()>();
261         let current_func = current_block.get_function();
262         if return_type != void_type {
263             unsafe { RETURN_VALUE_COUNT += 1 };
264             let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
265             current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
266             result.to_rvalue()
267         }
268         else {
269             current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
270             // Return dummy value when not having return value.
271             self.context.new_rvalue_from_long(self.isize_type, 0)
272         }
273     }
274 
function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc>275     fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
276         let args = self.check_ptr_call("call", func_ptr, args);
277 
278         // gccjit requires to use the result of functions, even when it's not used.
279         // That's why we assign the result to a local or call add_eval().
280         let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
281         let mut return_type = gcc_func.get_return_type();
282         let current_block = self.current_block.borrow().expect("block");
283         let void_type = self.context.new_type::<()>();
284         let current_func = current_block.get_function();
285 
286         // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
287         if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
288             return_type = self.int_type;
289         }
290 
291         if return_type != void_type {
292             unsafe { RETURN_VALUE_COUNT += 1 };
293             let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
294             current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
295             result.to_rvalue()
296         }
297         else {
298             if gcc_func.get_param_count() == 0 {
299                 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
300                 current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
301             }
302             else {
303                 current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
304             }
305             // Return dummy value when not having return value.
306             let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
307             current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
308             result.to_rvalue()
309         }
310     }
311 
overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc>312     pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
313         // gccjit requires to use the result of functions, even when it's not used.
314         // That's why we assign the result to a local.
315         let return_type = self.context.new_type::<bool>();
316         let current_block = self.current_block.borrow().expect("block");
317         let current_func = current_block.get_function();
318         // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
319         unsafe { RETURN_VALUE_COUNT += 1 };
320         let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
321         current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
322         result.to_rvalue()
323     }
324 }
325 
326 impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
327     type CodegenCx = CodegenCx<'gcc, 'tcx>;
328 }
329 
330 impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
tcx(&self) -> TyCtxt<'tcx>331     fn tcx(&self) -> TyCtxt<'tcx> {
332         self.cx.tcx()
333     }
334 }
335 
336 impl HasDataLayout for Builder<'_, '_, '_> {
data_layout(&self) -> &TargetDataLayout337     fn data_layout(&self) -> &TargetDataLayout {
338         self.cx.data_layout()
339     }
340 }
341 
342 impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
343     type LayoutOfResult = TyAndLayout<'tcx>;
344 
345     #[inline]
handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> !346     fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
347         self.cx.handle_layout_err(err, span, ty)
348     }
349 }
350 
351 impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
352     type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
353 
354     #[inline]
handle_fn_abi_err( &self, err: FnAbiError<'tcx>, span: Span, fn_abi_request: FnAbiRequest<'tcx>, ) -> !355     fn handle_fn_abi_err(
356         &self,
357         err: FnAbiError<'tcx>,
358         span: Span,
359         fn_abi_request: FnAbiRequest<'tcx>,
360     ) -> ! {
361         self.cx.handle_fn_abi_err(err, span, fn_abi_request)
362     }
363 }
364 
365 impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
366     type Target = CodegenCx<'gcc, 'tcx>;
367 
deref(&self) -> &Self::Target368     fn deref(&self) -> &Self::Target {
369         self.cx
370     }
371 }
372 
373 impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
374     type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
375     type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
376     type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
377     type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
378     type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
379 
380     type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
381     type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
382     type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
383 }
384 
385 impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self386     fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
387         let mut bx = Builder::with_cx(cx);
388         *cx.current_block.borrow_mut() = Some(block);
389         bx.block = Some(block);
390         bx
391     }
392 
build_sibling_block(&mut self, name: &str) -> Self393     fn build_sibling_block(&mut self, name: &str) -> Self {
394         let block = self.append_sibling_block(name);
395         Self::build(self.cx, block)
396     }
397 
llbb(&self) -> Block<'gcc>398     fn llbb(&self) -> Block<'gcc> {
399         self.block.expect("block")
400     }
401 
append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc>402     fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
403         let func = cx.rvalue_as_function(func);
404         func.new_block(name)
405     }
406 
append_sibling_block(&mut self, name: &str) -> Block<'gcc>407     fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
408         let func = self.current_func();
409         func.new_block(name)
410     }
411 
ret_void(&mut self)412     fn ret_void(&mut self) {
413         self.llbb().end_with_void_return(None)
414     }
415 
ret(&mut self, value: RValue<'gcc>)416     fn ret(&mut self, value: RValue<'gcc>) {
417         let value =
418             if self.structs_as_pointer.borrow().contains(&value) {
419                 // NOTE: hack to workaround a limitation of the rustc API: see comment on
420                 // CodegenCx.structs_as_pointer
421                 value.dereference(None).to_rvalue()
422             }
423             else {
424                 value
425             };
426         self.llbb().end_with_return(None, value);
427     }
428 
br(&mut self, dest: Block<'gcc>)429     fn br(&mut self, dest: Block<'gcc>) {
430         self.llbb().end_with_jump(None, dest)
431     }
432 
cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>)433     fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
434         self.llbb().end_with_conditional(None, cond, then_block, else_block)
435     }
436 
switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>)437     fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
438         let mut gcc_cases = vec![];
439         let typ = self.val_ty(value);
440         for (on_val, dest) in cases {
441             let on_val = self.const_uint_big(typ, on_val);
442             gcc_cases.push(self.context.new_case(on_val, on_val, dest));
443         }
444         self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
445     }
446 
invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc>447     fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
448         let condition = self.context.new_rvalue_from_int(self.bool_type, 0);
449         self.llbb().end_with_conditional(None, condition, then, catch);
450         self.context.new_rvalue_from_int(self.int_type, 0)
451 
452         // TODO(antoyo)
453     }
454 
unreachable(&mut self)455     fn unreachable(&mut self) {
456         let func = self.context.get_builtin_function("__builtin_unreachable");
457         let block = self.block.expect("block");
458         block.add_eval(None, self.context.new_call(None, func, &[]));
459         let return_type = block.get_function().get_return_type();
460         let void_type = self.context.new_type::<()>();
461         if return_type == void_type {
462             block.end_with_void_return(None)
463         }
464         else {
465             let return_value = self.current_func()
466                 .new_local(None, return_type, "unreachableReturn");
467             block.end_with_return(None, return_value)
468         }
469     }
470 
add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc>471     fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
472         // FIXME(antoyo): this should not be required.
473         if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
474             b = self.context.new_cast(None, b, a.get_type());
475         }
476         a + b
477     }
478 
fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>479     fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
480         a + b
481     }
482 
sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc>483     fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
484         if a.get_type() != b.get_type() {
485             b = self.context.new_cast(None, b, a.get_type());
486         }
487         a - b
488     }
489 
fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>490     fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
491         a - b
492     }
493 
mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>494     fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
495         a * b
496     }
497 
fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>498     fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
499         a * b
500     }
501 
udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>502     fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
503         // TODO(antoyo): convert the arguments to unsigned?
504         a / b
505     }
506 
exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>507     fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
508         // TODO(antoyo): convert the arguments to unsigned?
509         // TODO(antoyo): poison if not exact.
510         a / b
511     }
512 
sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>513     fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
514         // TODO(antoyo): convert the arguments to signed?
515         a / b
516     }
517 
exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>518     fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
519         // TODO(antoyo): posion if not exact.
520         // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
521         // should be the same.
522         let typ = a.get_type().to_signed(self);
523         let b = self.context.new_cast(None, b, typ);
524         a / b
525     }
526 
fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>527     fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
528         a / b
529     }
530 
urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>531     fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
532         a % b
533     }
534 
srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>535     fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
536         a % b
537     }
538 
frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>539     fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
540         if a.get_type() == self.cx.float_type {
541             let fmodf = self.context.get_builtin_function("fmodf");
542             // FIXME(antoyo): this seems to produce the wrong result.
543             return self.context.new_call(None, fmodf, &[a, b]);
544         }
545         assert_eq!(a.get_type(), self.cx.double_type);
546 
547         let fmod = self.context.get_builtin_function("fmod");
548         return self.context.new_call(None, fmod, &[a, b]);
549     }
550 
shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>551     fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
552         // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
553         let a_type = a.get_type();
554         let b_type = b.get_type();
555         if a_type.is_unsigned(self) && b_type.is_signed(self) {
556             let a = self.context.new_cast(None, a, b_type);
557             let result = a << b;
558             self.context.new_cast(None, result, a_type)
559         }
560         else if a_type.is_signed(self) && b_type.is_unsigned(self) {
561             let b = self.context.new_cast(None, b, a_type);
562             a << b
563         }
564         else {
565             a << b
566         }
567     }
568 
lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>569     fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
570         // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
571         // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
572         let a_type = a.get_type();
573         let b_type = b.get_type();
574         if a_type.is_unsigned(self) && b_type.is_signed(self) {
575             let a = self.context.new_cast(None, a, b_type);
576             let result = a >> b;
577             self.context.new_cast(None, result, a_type)
578         }
579         else if a_type.is_signed(self) && b_type.is_unsigned(self) {
580             let b = self.context.new_cast(None, b, a_type);
581             a >> b
582         }
583         else {
584             a >> b
585         }
586     }
587 
ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>588     fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
589         // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
590         // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
591         let a_type = a.get_type();
592         let b_type = b.get_type();
593         if a_type.is_unsigned(self) && b_type.is_signed(self) {
594             let a = self.context.new_cast(None, a, b_type);
595             let result = a >> b;
596             self.context.new_cast(None, result, a_type)
597         }
598         else if a_type.is_signed(self) && b_type.is_unsigned(self) {
599             let b = self.context.new_cast(None, b, a_type);
600             a >> b
601         }
602         else {
603             a >> b
604         }
605     }
606 
and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc>607     fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
608         // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
609         // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
610         if a.get_type() != b.get_type() {
611             b = self.context.new_cast(None, b, a.get_type());
612         }
613         let res = self.current_func().new_local(None, b.get_type(), "andResult");
614         self.llbb().add_assignment(None, res, a & b);
615         res.to_rvalue()
616     }
617 
or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>618     fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
619         // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
620         // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
621         let res = self.current_func().new_local(None, b.get_type(), "orResult");
622         self.llbb().add_assignment(None, res, a | b);
623         res.to_rvalue()
624     }
625 
xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>626     fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
627         a ^ b
628     }
629 
neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc>630     fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
631         // TODO(antoyo): use new_unary_op()?
632         self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
633     }
634 
fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc>635     fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
636         self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
637     }
638 
not(&mut self, a: RValue<'gcc>) -> RValue<'gcc>639     fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
640         let operation =
641             if a.get_type().is_bool() {
642                 UnaryOp::LogicalNegate
643             }
644             else {
645                 UnaryOp::BitwiseNegate
646             };
647         self.cx.context.new_unary_op(None, operation, a.get_type(), a)
648     }
649 
unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>650     fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
651         a + b
652     }
653 
unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>654     fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
655         a + b
656     }
657 
unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>658     fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
659         a - b
660     }
661 
unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>662     fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
663         // TODO(antoyo): should generate poison value?
664         a - b
665     }
666 
unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>667     fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
668         a * b
669     }
670 
unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc>671     fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
672         a * b
673     }
674 
fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc>675     fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
676         unimplemented!();
677     }
678 
fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc>679     fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
680         unimplemented!();
681     }
682 
fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc>683     fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
684         unimplemented!();
685     }
686 
fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc>687     fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
688         unimplemented!();
689     }
690 
frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc>691     fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
692         unimplemented!();
693     }
694 
checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value)695     fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
696         use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
697 
698         let new_kind =
699             match typ.kind() {
700                 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
701                 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
702                 t @ (Uint(_) | Int(_)) => t.clone(),
703                 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
704             };
705 
706         // TODO(antoyo): remove duplication with intrinsic?
707         let name =
708             match oop {
709                 OverflowOp::Add =>
710                     match new_kind {
711                         Int(I8) => "__builtin_add_overflow",
712                         Int(I16) => "__builtin_add_overflow",
713                         Int(I32) => "__builtin_sadd_overflow",
714                         Int(I64) => "__builtin_saddll_overflow",
715                         Int(I128) => "__builtin_add_overflow",
716 
717                         Uint(U8) => "__builtin_add_overflow",
718                         Uint(U16) => "__builtin_add_overflow",
719                         Uint(U32) => "__builtin_uadd_overflow",
720                         Uint(U64) => "__builtin_uaddll_overflow",
721                         Uint(U128) => "__builtin_add_overflow",
722 
723                         _ => unreachable!(),
724                     },
725                 OverflowOp::Sub =>
726                     match new_kind {
727                         Int(I8) => "__builtin_sub_overflow",
728                         Int(I16) => "__builtin_sub_overflow",
729                         Int(I32) => "__builtin_ssub_overflow",
730                         Int(I64) => "__builtin_ssubll_overflow",
731                         Int(I128) => "__builtin_sub_overflow",
732 
733                         Uint(U8) => "__builtin_sub_overflow",
734                         Uint(U16) => "__builtin_sub_overflow",
735                         Uint(U32) => "__builtin_usub_overflow",
736                         Uint(U64) => "__builtin_usubll_overflow",
737                         Uint(U128) => "__builtin_sub_overflow",
738 
739                         _ => unreachable!(),
740                     },
741                 OverflowOp::Mul =>
742                     match new_kind {
743                         Int(I8) => "__builtin_mul_overflow",
744                         Int(I16) => "__builtin_mul_overflow",
745                         Int(I32) => "__builtin_smul_overflow",
746                         Int(I64) => "__builtin_smulll_overflow",
747                         Int(I128) => "__builtin_mul_overflow",
748 
749                         Uint(U8) => "__builtin_mul_overflow",
750                         Uint(U16) => "__builtin_mul_overflow",
751                         Uint(U32) => "__builtin_umul_overflow",
752                         Uint(U64) => "__builtin_umulll_overflow",
753                         Uint(U128) => "__builtin_mul_overflow",
754 
755                         _ => unreachable!(),
756                     },
757             };
758 
759         let intrinsic = self.context.get_builtin_function(&name);
760         let res = self.current_func()
761             // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
762             .new_local(None, rhs.get_type(), "binopResult")
763             .get_address(None);
764         let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
765         (res.dereference(None).to_rvalue(), overflow)
766     }
767 
alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc>768     fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
769         // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
770         // Ideally, we shouldn't need to do this check.
771         let aligned_type =
772             if ty == self.cx.u128_type || ty == self.cx.i128_type {
773                 ty
774             }
775             else {
776                 ty.get_aligned(align.bytes())
777             };
778         // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
779         self.stack_var_count.set(self.stack_var_count.get() + 1);
780         self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
781     }
782 
dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc>783     fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
784         unimplemented!();
785     }
786 
array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc>787     fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
788         unimplemented!();
789     }
790 
load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc>791     fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
792         // TODO(antoyo): use ty.
793         let block = self.llbb();
794         let function = block.get_function();
795         // NOTE: instead of returning the dereference here, we have to assign it to a variable in
796         // the current basic block. Otherwise, it could be used in another basic block, causing a
797         // dereference after a drop, for instance.
798         // TODO(antoyo): handle align.
799         let deref = ptr.dereference(None).to_rvalue();
800         let value_type = deref.get_type();
801         unsafe { RETURN_VALUE_COUNT += 1 };
802         let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
803         block.add_assignment(None, loaded_value, deref);
804         loaded_value.to_rvalue()
805     }
806 
volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc>807     fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
808         // TODO(antoyo): use ty.
809         let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
810         ptr.dereference(None).to_rvalue()
811     }
812 
atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc>813     fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
814         // TODO(antoyo): use ty.
815         // TODO(antoyo): handle alignment.
816         let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
817         let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
818 
819         let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
820         let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
821         self.context.new_call(None, atomic_load, &[ptr, ordering])
822     }
823 
load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>>824     fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
825         assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
826 
827         if place.layout.is_zst() {
828             return OperandRef::new_zst(self, place.layout);
829         }
830 
831         fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
832             let vr = scalar.valid_range.clone();
833             match scalar.value {
834                 abi::Int(..) => {
835                     if !scalar.is_always_valid(bx) {
836                         bx.range_metadata(load, scalar.valid_range);
837                     }
838                 }
839                 abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
840                     bx.nonnull_metadata(load);
841                 }
842                 _ => {}
843             }
844         }
845 
846         let val =
847             if let Some(llextra) = place.llextra {
848                 OperandValue::Ref(place.llval, Some(llextra), place.align)
849             }
850             else if place.layout.is_gcc_immediate() {
851                 let load = self.load(place.llval.get_type(), place.llval, place.align);
852                 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
853                     scalar_load_metadata(self, load, scalar);
854                 }
855                 OperandValue::Immediate(self.to_immediate(load, place.layout))
856             }
857             else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
858                 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
859                 let pair_type = place.layout.gcc_type(self, false);
860 
861                 let mut load = |i, scalar: &abi::Scalar, align| {
862                     let llptr = self.struct_gep(pair_type, place.llval, i as u64);
863                     let load = self.load(llptr.get_type(), llptr, align);
864                     scalar_load_metadata(self, load, scalar);
865                     if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
866                 };
867 
868                 OperandValue::Pair(
869                     load(0, a, place.align),
870                     load(1, b, place.align.restrict_for_offset(b_offset)),
871                 )
872             }
873             else {
874                 OperandValue::Ref(place.llval, None, place.align)
875             };
876 
877         OperandRef { val, layout: place.layout }
878     }
879 
write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self880     fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
881         let zero = self.const_usize(0);
882         let count = self.const_usize(count);
883         let start = dest.project_index(&mut self, zero).llval;
884         let end = dest.project_index(&mut self, count).llval;
885 
886         let mut header_bx = self.build_sibling_block("repeat_loop_header");
887         let mut body_bx = self.build_sibling_block("repeat_loop_body");
888         let next_bx = self.build_sibling_block("repeat_loop_next");
889 
890         let ptr_type = start.get_type();
891         let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
892         let current_val = current.to_rvalue();
893         self.assign(current, start);
894 
895         self.br(header_bx.llbb());
896 
897         let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end);
898         header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
899 
900         let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
901         cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
902 
903         let next = body_bx.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
904         body_bx.llbb().add_assignment(None, current, next);
905         body_bx.br(header_bx.llbb());
906 
907         next_bx
908     }
909 
range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange)910     fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
911         // TODO(antoyo)
912     }
913 
nonnull_metadata(&mut self, _load: RValue<'gcc>)914     fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
915         // TODO(antoyo)
916     }
917 
type_metadata(&mut self, _function: RValue<'gcc>, _typeid: String)918     fn type_metadata(&mut self, _function: RValue<'gcc>, _typeid: String) {
919         // Unsupported.
920     }
921 
typeid_metadata(&mut self, _typeid: String) -> RValue<'gcc>922     fn typeid_metadata(&mut self, _typeid: String) -> RValue<'gcc> {
923         // Unsupported.
924         self.context.new_rvalue_from_int(self.int_type, 0)
925     }
926 
927 
store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc>928     fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
929         self.store_with_flags(val, ptr, align, MemFlags::empty())
930     }
931 
store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc>932     fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
933         let ptr = self.check_store(val, ptr);
934         self.llbb().add_assignment(None, ptr.dereference(None), val);
935         // TODO(antoyo): handle align and flags.
936         // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
937         self.cx.context.new_rvalue_zero(self.type_i32())
938     }
939 
atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size)940     fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
941         // TODO(antoyo): handle alignment.
942         let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
943         let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
944         let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
945         let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
946 
947         // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
948         // the following cast is required to avoid this error:
949         // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int  __attribute__((aligned(4))))
950         let int_type = atomic_store.get_param(1).to_rvalue().get_type();
951         let value = self.context.new_cast(None, value, int_type);
952         self.llbb()
953             .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
954     }
955 
gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc>956     fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
957         let mut result = ptr;
958         for index in indices {
959             result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
960         }
961         result
962     }
963 
inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc>964     fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
965         // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
966         // TODO(antoyo): specify inbounds somehow.
967         match indices.len() {
968             1 => {
969                 self.context.new_array_access(None, ptr, indices[0]).get_address(None)
970             },
971             2 => {
972                 let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
973                 self.context.new_array_access(None, array, indices[1]).get_address(None)
974             },
975             _ => unimplemented!(),
976         }
977     }
978 
struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc>979     fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
980         // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
981         assert_eq!(idx as usize as u64, idx);
982         let value = ptr.dereference(None).to_rvalue();
983 
984         if value_type.is_array().is_some() {
985             let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
986             let element = self.context.new_array_access(None, value, index);
987             element.get_address(None)
988         }
989         else if let Some(vector_type) = value_type.is_vector() {
990             let array_type = vector_type.get_element_type().make_pointer();
991             let array = self.bitcast(ptr, array_type);
992             let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
993             let element = self.context.new_array_access(None, array, index);
994             element.get_address(None)
995         }
996         else if let Some(struct_type) = value_type.is_struct() {
997             ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
998         }
999         else {
1000             panic!("Unexpected type {:?}", value_type);
1001         }
1002     }
1003 
1004     /* Casts */
trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1005     fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1006         // TODO(antoyo): check that it indeed truncate the value.
1007         self.context.new_cast(None, value, dest_ty)
1008     }
1009 
sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1010     fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1011         // TODO(antoyo): check that it indeed sign extend the value.
1012         if dest_ty.is_vector().is_some() {
1013             // TODO(antoyo): nothing to do as it is only for LLVM?
1014             return value;
1015         }
1016         self.context.new_cast(None, value, dest_ty)
1017     }
1018 
fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1019     fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1020         self.context.new_cast(None, value, dest_ty)
1021     }
1022 
fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1023     fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1024         self.context.new_cast(None, value, dest_ty)
1025     }
1026 
uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1027     fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1028         self.context.new_cast(None, value, dest_ty)
1029     }
1030 
sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1031     fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1032         self.context.new_cast(None, value, dest_ty)
1033     }
1034 
fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1035     fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1036         // TODO(antoyo): make sure it truncates.
1037         self.context.new_cast(None, value, dest_ty)
1038     }
1039 
fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1040     fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1041         self.context.new_cast(None, value, dest_ty)
1042     }
1043 
ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1044     fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1045         self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
1046     }
1047 
inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1048     fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1049         self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
1050     }
1051 
bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1052     fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1053         self.cx.const_bitcast(value, dest_ty)
1054     }
1055 
intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc>1056     fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
1057         // NOTE: is_signed is for value, not dest_typ.
1058         self.cx.context.new_cast(None, value, dest_typ)
1059     }
1060 
pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc>1061     fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1062         let val_type = value.get_type();
1063         match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
1064             (false, true) => {
1065                 // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to
1066                 // a pointer, which is not supported by gccjit.
1067                 return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
1068             },
1069             (false, false) => {
1070                 // When they are not pointers, we want a transmute (or reinterpret_cast).
1071                 self.bitcast(value, dest_ty)
1072             },
1073             (true, true) => self.cx.context.new_cast(None, value, dest_ty),
1074             (true, false) => unimplemented!(),
1075         }
1076     }
1077 
1078     /* Comparisons */
icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc>1079     fn icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
1080         let left_type = lhs.get_type();
1081         let right_type = rhs.get_type();
1082         if left_type != right_type {
1083             // NOTE: because libgccjit cannot compare function pointers.
1084             if left_type.is_function_ptr_type().is_some() && right_type.is_function_ptr_type().is_some() {
1085                 lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
1086                 rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
1087             }
1088             // NOTE: hack because we try to cast a vector type to the same vector type.
1089             else if format!("{:?}", left_type) != format!("{:?}", right_type) {
1090                 rhs = self.context.new_cast(None, rhs, left_type);
1091             }
1092         }
1093         self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
1094     }
1095 
fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc>1096     fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
1097         self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
1098     }
1099 
1100     /* Miscellaneous instructions */
memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags)1101     fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1102         if flags.contains(MemFlags::NONTEMPORAL) {
1103             // HACK(nox): This is inefficient but there is no nontemporal memcpy.
1104             let val = self.load(src.get_type(), src, src_align);
1105             let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
1106             self.store_with_flags(val, ptr, dst_align, flags);
1107             return;
1108         }
1109         let size = self.intcast(size, self.type_size_t(), false);
1110         let _is_volatile = flags.contains(MemFlags::VOLATILE);
1111         let dst = self.pointercast(dst, self.type_i8p());
1112         let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1113         let memcpy = self.context.get_builtin_function("memcpy");
1114         let block = self.block.expect("block");
1115         // TODO(antoyo): handle aligns and is_volatile.
1116         block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
1117     }
1118 
memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags)1119     fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1120         if flags.contains(MemFlags::NONTEMPORAL) {
1121             // HACK(nox): This is inefficient but there is no nontemporal memmove.
1122             let val = self.load(src.get_type(), src, src_align);
1123             let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
1124             self.store_with_flags(val, ptr, dst_align, flags);
1125             return;
1126         }
1127         let size = self.intcast(size, self.type_size_t(), false);
1128         let _is_volatile = flags.contains(MemFlags::VOLATILE);
1129         let dst = self.pointercast(dst, self.type_i8p());
1130         let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1131 
1132         let memmove = self.context.get_builtin_function("memmove");
1133         let block = self.block.expect("block");
1134         // TODO(antoyo): handle is_volatile.
1135         block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
1136     }
1137 
memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags)1138     fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
1139         let _is_volatile = flags.contains(MemFlags::VOLATILE);
1140         let ptr = self.pointercast(ptr, self.type_i8p());
1141         let memset = self.context.get_builtin_function("memset");
1142         let block = self.block.expect("block");
1143         // TODO(antoyo): handle align and is_volatile.
1144         let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
1145         let size = self.intcast(size, self.type_size_t(), false);
1146         block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
1147     }
1148 
select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc>1149     fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
1150         let func = self.current_func();
1151         let variable = func.new_local(None, then_val.get_type(), "selectVar");
1152         let then_block = func.new_block("then");
1153         let else_block = func.new_block("else");
1154         let after_block = func.new_block("after");
1155         self.llbb().end_with_conditional(None, cond, then_block, else_block);
1156 
1157         then_block.add_assignment(None, variable, then_val);
1158         then_block.end_with_jump(None, after_block);
1159 
1160         if then_val.get_type() != else_val.get_type() {
1161             else_val = self.context.new_cast(None, else_val, then_val.get_type());
1162         }
1163         else_block.add_assignment(None, variable, else_val);
1164         else_block.end_with_jump(None, after_block);
1165 
1166         // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
1167         // state need to be updated.
1168         self.block = Some(after_block);
1169         *self.cx.current_block.borrow_mut() = Some(after_block);
1170 
1171         variable.to_rvalue()
1172     }
1173 
1174     #[allow(dead_code)]
va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc>1175     fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
1176         unimplemented!();
1177     }
1178 
extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc>1179     fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
1180         unimplemented!();
1181     }
1182 
vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc>1183     fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
1184         unimplemented!();
1185     }
1186 
extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc>1187     fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1188         // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1189         assert_eq!(idx as usize as u64, idx);
1190         let value_type = aggregate_value.get_type();
1191 
1192         if value_type.is_array().is_some() {
1193             let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1194             let element = self.context.new_array_access(None, aggregate_value, index);
1195             element.get_address(None)
1196         }
1197         else if value_type.is_vector().is_some() {
1198             panic!();
1199         }
1200         else if let Some(pointer_type) = value_type.get_pointee() {
1201             if let Some(struct_type) = pointer_type.is_struct() {
1202                 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1203                 // CodegenCx.structs_as_pointer
1204                 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1205             }
1206             else {
1207                 panic!("Unexpected type {:?}", value_type);
1208             }
1209         }
1210         else if let Some(struct_type) = value_type.is_struct() {
1211             aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1212         }
1213         else {
1214             panic!("Unexpected type {:?}", value_type);
1215         }
1216     }
1217 
insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc>1218     fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1219         // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1220         assert_eq!(idx as usize as u64, idx);
1221         let value_type = aggregate_value.get_type();
1222 
1223         let lvalue =
1224             if value_type.is_array().is_some() {
1225                 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1226                 self.context.new_array_access(None, aggregate_value, index)
1227             }
1228             else if value_type.is_vector().is_some() {
1229                 panic!();
1230             }
1231             else if let Some(pointer_type) = value_type.get_pointee() {
1232                 if let Some(struct_type) = pointer_type.is_struct() {
1233                     // NOTE: hack to workaround a limitation of the rustc API: see comment on
1234                     // CodegenCx.structs_as_pointer
1235                     aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
1236                 }
1237                 else {
1238                     panic!("Unexpected type {:?}", value_type);
1239                 }
1240             }
1241             else {
1242                 panic!("Unexpected type {:?}", value_type);
1243             };
1244 
1245         let lvalue_type = lvalue.to_rvalue().get_type();
1246         let value =
1247             // NOTE: sometimes, rustc will create a value with the wrong type.
1248             if lvalue_type != value.get_type() {
1249                 self.context.new_cast(None, value, lvalue_type)
1250             }
1251             else {
1252                 value
1253             };
1254 
1255         self.llbb().add_assignment(None, lvalue, value);
1256 
1257         aggregate_value
1258     }
1259 
landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc>1260     fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> {
1261         let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1");
1262         let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
1263         let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
1264         self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
1265             .to_rvalue()
1266         // TODO(antoyo): Properly implement unwinding.
1267         // the above is just to make the compilation work as it seems
1268         // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
1269     }
1270 
set_cleanup(&mut self, _landing_pad: RValue<'gcc>)1271     fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
1272         // TODO(antoyo)
1273     }
1274 
resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc>1275     fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
1276         unimplemented!();
1277     }
1278 
cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet1279     fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
1280         unimplemented!();
1281     }
1282 
cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc>1283     fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
1284         unimplemented!();
1285     }
1286 
catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet1287     fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
1288         unimplemented!();
1289     }
1290 
catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc>1291     fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
1292         unimplemented!();
1293     }
1294 
add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>)1295     fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
1296         unimplemented!();
1297     }
1298 
set_personality_fn(&mut self, _personality: RValue<'gcc>)1299     fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
1300         // TODO(antoyo)
1301     }
1302 
1303     // Atomic Operations
atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc>1304     fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
1305         let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
1306         self.llbb().add_assignment(None, expected, cmp);
1307         let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
1308 
1309         let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
1310         let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
1311         let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
1312 
1313         let value_type = result.to_rvalue().get_type();
1314         if let Some(struct_type) = value_type.is_struct() {
1315             self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
1316             // NOTE: since success contains the call to the intrinsic, it must be stored before
1317             // expected so that we store expected after the call.
1318             self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
1319         }
1320         // TODO(antoyo): handle when value is not a struct.
1321 
1322         result.to_rvalue()
1323     }
1324 
atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc>1325     fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
1326         let size = self.cx.int_width(src.get_type()) / 8;
1327         let name =
1328             match op {
1329                 AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
1330                 AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
1331                 AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
1332                 AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
1333                 AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
1334                 AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
1335                 AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
1336                 AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1337                 AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1338                 AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1339                 AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1340             };
1341 
1342 
1343         let atomic_function = self.context.get_builtin_function(name);
1344         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1345 
1346         let void_ptr_type = self.context.new_type::<*mut ()>();
1347         let volatile_void_ptr_type = void_ptr_type.make_volatile();
1348         let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
1349         // FIXME(antoyo): not sure why, but we have the wrong type here.
1350         let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
1351         let src = self.context.new_cast(None, src, new_src_type);
1352         let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
1353         self.context.new_cast(None, res, src.get_type())
1354     }
1355 
atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope)1356     fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
1357         let name =
1358             match scope {
1359                 SynchronizationScope::SingleThread => "__atomic_signal_fence",
1360                 SynchronizationScope::CrossThread => "__atomic_thread_fence",
1361             };
1362         let thread_fence = self.context.get_builtin_function(name);
1363         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1364         self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
1365     }
1366 
set_invariant_load(&mut self, load: RValue<'gcc>)1367     fn set_invariant_load(&mut self, load: RValue<'gcc>) {
1368         // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
1369         self.normal_function_addresses.borrow_mut().insert(load);
1370         // TODO(antoyo)
1371     }
1372 
lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size)1373     fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1374         // TODO(antoyo)
1375     }
1376 
lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size)1377     fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1378         // TODO(antoyo)
1379     }
1380 
call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc>1381     fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
1382         // FIXME(antoyo): remove when having a proper API.
1383         let gcc_func = unsafe { std::mem::transmute(func) };
1384         if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
1385             self.function_call(func, args, funclet)
1386         }
1387         else {
1388             // If it's a not function that was defined, it's a function pointer.
1389             self.function_ptr_call(func, args, funclet)
1390         }
1391     }
1392 
zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc>1393     fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
1394         // FIXME(antoyo): this does not zero-extend.
1395         if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
1396             // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
1397             // Fix the code in codegen_ssa::base::from_immediate.
1398             return value;
1399         }
1400         self.context.new_cast(None, value, dest_typ)
1401     }
1402 
cx(&self) -> &CodegenCx<'gcc, 'tcx>1403     fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
1404         self.cx
1405     }
1406 
do_not_inline(&mut self, _llret: RValue<'gcc>)1407     fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
1408         unimplemented!();
1409     }
1410 
set_span(&mut self, _span: Span)1411     fn set_span(&mut self, _span: Span) {}
1412 
from_immediate(&mut self, val: Self::Value) -> Self::Value1413     fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
1414         if self.cx().val_ty(val) == self.cx().type_i1() {
1415             self.zext(val, self.cx().type_i8())
1416         }
1417         else {
1418             val
1419         }
1420     }
1421 
to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value1422     fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
1423         if scalar.is_bool() {
1424             return self.trunc(val, self.cx().type_i1());
1425         }
1426         val
1427     }
1428 
fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>>1429     fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1430         None
1431     }
1432 
fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>>1433     fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1434         None
1435     }
1436 
instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>)1437     fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
1438         unimplemented!();
1439     }
1440 }
1441 
1442 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc>1443     pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
1444         let return_type = v1.get_type();
1445         let params = [
1446             self.context.new_parameter(None, return_type, "v1"),
1447             self.context.new_parameter(None, return_type, "v2"),
1448             self.context.new_parameter(None, mask.get_type(), "mask"),
1449         ];
1450         let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, &params, "_mm_shuffle_epi8", false);
1451         self.context.new_call(None, shuffle, &[v1, v2, mask])
1452     }
1453 }
1454 
1455 impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
get_static(&mut self, def_id: DefId) -> RValue<'gcc>1456     fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
1457         // Forward to the `get_static` method of `CodegenCx`
1458         self.cx().get_static(def_id).get_address(None)
1459     }
1460 }
1461 
1462 impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
param_env(&self) -> ParamEnv<'tcx>1463     fn param_env(&self) -> ParamEnv<'tcx> {
1464         self.cx.param_env()
1465     }
1466 }
1467 
1468 impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
target_spec(&self) -> &Target1469     fn target_spec(&self) -> &Target {
1470         &self.cx.target_spec()
1471     }
1472 }
1473 
1474 trait ToGccComp {
to_gcc_comparison(&self) -> ComparisonOp1475     fn to_gcc_comparison(&self) -> ComparisonOp;
1476 }
1477 
1478 impl ToGccComp for IntPredicate {
to_gcc_comparison(&self) -> ComparisonOp1479     fn to_gcc_comparison(&self) -> ComparisonOp {
1480         match *self {
1481             IntPredicate::IntEQ => ComparisonOp::Equals,
1482             IntPredicate::IntNE => ComparisonOp::NotEquals,
1483             IntPredicate::IntUGT => ComparisonOp::GreaterThan,
1484             IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
1485             IntPredicate::IntULT => ComparisonOp::LessThan,
1486             IntPredicate::IntULE => ComparisonOp::LessThanEquals,
1487             IntPredicate::IntSGT => ComparisonOp::GreaterThan,
1488             IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
1489             IntPredicate::IntSLT => ComparisonOp::LessThan,
1490             IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
1491         }
1492     }
1493 }
1494 
1495 impl ToGccComp for RealPredicate {
to_gcc_comparison(&self) -> ComparisonOp1496     fn to_gcc_comparison(&self) -> ComparisonOp {
1497         // TODO(antoyo): check that ordered vs non-ordered is respected.
1498         match *self {
1499             RealPredicate::RealPredicateFalse => unreachable!(),
1500             RealPredicate::RealOEQ => ComparisonOp::Equals,
1501             RealPredicate::RealOGT => ComparisonOp::GreaterThan,
1502             RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
1503             RealPredicate::RealOLT => ComparisonOp::LessThan,
1504             RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
1505             RealPredicate::RealONE => ComparisonOp::NotEquals,
1506             RealPredicate::RealORD => unreachable!(),
1507             RealPredicate::RealUNO => unreachable!(),
1508             RealPredicate::RealUEQ => ComparisonOp::Equals,
1509             RealPredicate::RealUGT => ComparisonOp::GreaterThan,
1510             RealPredicate::RealUGE => ComparisonOp::GreaterThan,
1511             RealPredicate::RealULT => ComparisonOp::LessThan,
1512             RealPredicate::RealULE => ComparisonOp::LessThan,
1513             RealPredicate::RealUNE => ComparisonOp::NotEquals,
1514             RealPredicate::RealPredicateTrue => unreachable!(),
1515         }
1516     }
1517 }
1518 
1519 #[repr(C)]
1520 #[allow(non_camel_case_types)]
1521 enum MemOrdering {
1522     __ATOMIC_RELAXED,
1523     __ATOMIC_CONSUME,
1524     __ATOMIC_ACQUIRE,
1525     __ATOMIC_RELEASE,
1526     __ATOMIC_ACQ_REL,
1527     __ATOMIC_SEQ_CST,
1528 }
1529 
1530 trait ToGccOrdering {
to_gcc(self) -> i321531     fn to_gcc(self) -> i32;
1532 }
1533 
1534 impl ToGccOrdering for AtomicOrdering {
to_gcc(self) -> i321535     fn to_gcc(self) -> i32 {
1536         use MemOrdering::*;
1537 
1538         let ordering =
1539             match self {
1540                 AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
1541                 AtomicOrdering::Unordered => __ATOMIC_RELAXED,
1542                 AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
1543                 AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
1544                 AtomicOrdering::Release => __ATOMIC_RELEASE,
1545                 AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
1546                 AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
1547             };
1548         ordering as i32
1549     }
1550 }
1551