1 //! Legalization of heaps.
2 //!
3 //! This module exports the `expand_heap_addr` function which transforms a `heap_addr`
4 //! instruction into code that depends on the kind of heap referenced.
5 
6 use crate::cursor::{Cursor, FuncCursor};
7 use crate::flowgraph::ControlFlowGraph;
8 use crate::ir::condcodes::IntCC;
9 use crate::ir::{self, InstBuilder};
10 use crate::isa::TargetIsa;
11 
12 /// Expand a `heap_addr` instruction according to the definition of the heap.
expand_heap_addr( inst: ir::Inst, func: &mut ir::Function, cfg: &mut ControlFlowGraph, isa: &dyn TargetIsa, )13 pub fn expand_heap_addr(
14     inst: ir::Inst,
15     func: &mut ir::Function,
16     cfg: &mut ControlFlowGraph,
17     isa: &dyn TargetIsa,
18 ) {
19     // Unpack the instruction.
20     let (heap, offset, access_size) = match func.dfg[inst] {
21         ir::InstructionData::HeapAddr {
22             opcode,
23             heap,
24             arg,
25             imm,
26         } => {
27             debug_assert_eq!(opcode, ir::Opcode::HeapAddr);
28             (heap, arg, imm.into())
29         }
30         _ => panic!("Wanted heap_addr: {}", func.dfg.display_inst(inst, None)),
31     };
32 
33     match func.heaps[heap].style {
34         ir::HeapStyle::Dynamic { bound_gv } => {
35             dynamic_addr(isa, inst, heap, offset, access_size, bound_gv, func)
36         }
37         ir::HeapStyle::Static { bound } => static_addr(
38             isa,
39             inst,
40             heap,
41             offset,
42             access_size,
43             bound.into(),
44             func,
45             cfg,
46         ),
47     }
48 }
49 
50 /// Expand a `heap_addr` for a dynamic heap.
dynamic_addr( isa: &dyn TargetIsa, inst: ir::Inst, heap: ir::Heap, offset: ir::Value, access_size: u32, bound_gv: ir::GlobalValue, func: &mut ir::Function, )51 fn dynamic_addr(
52     isa: &dyn TargetIsa,
53     inst: ir::Inst,
54     heap: ir::Heap,
55     offset: ir::Value,
56     access_size: u32,
57     bound_gv: ir::GlobalValue,
58     func: &mut ir::Function,
59 ) {
60     let access_size = u64::from(access_size);
61     let offset_ty = func.dfg.value_type(offset);
62     let addr_ty = func.dfg.value_type(func.dfg.first_result(inst));
63     let min_size = func.heaps[heap].min_size.into();
64     let mut pos = FuncCursor::new(func).at_inst(inst);
65     pos.use_srcloc(inst);
66 
67     // Start with the bounds check. Trap if `offset + access_size > bound`.
68     let bound = pos.ins().global_value(offset_ty, bound_gv);
69     let (cc, lhs, bound) = if access_size == 1 {
70         // `offset > bound - 1` is the same as `offset >= bound`.
71         (IntCC::UnsignedGreaterThanOrEqual, offset, bound)
72     } else if access_size <= min_size {
73         // We know that bound >= min_size, so here we can compare `offset > bound - access_size`
74         // without wrapping.
75         let adj_bound = pos.ins().iadd_imm(bound, -(access_size as i64));
76         (IntCC::UnsignedGreaterThan, offset, adj_bound)
77     } else {
78         // We need an overflow check for the adjusted offset.
79         let access_size_val = pos.ins().iconst(offset_ty, access_size as i64);
80         let (adj_offset, overflow) = pos.ins().iadd_ifcout(offset, access_size_val);
81         pos.ins().trapif(
82             isa.unsigned_add_overflow_condition(),
83             overflow,
84             ir::TrapCode::HeapOutOfBounds,
85         );
86         (IntCC::UnsignedGreaterThan, adj_offset, bound)
87     };
88     let oob = pos.ins().icmp(cc, lhs, bound);
89     pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
90 
91     let spectre_oob_comparison = if isa.flags().enable_heap_access_spectre_mitigation() {
92         Some((cc, lhs, bound))
93     } else {
94         None
95     };
96 
97     compute_addr(
98         isa,
99         inst,
100         heap,
101         addr_ty,
102         offset,
103         offset_ty,
104         pos.func,
105         spectre_oob_comparison,
106     );
107 }
108 
109 /// Expand a `heap_addr` for a static heap.
static_addr( isa: &dyn TargetIsa, inst: ir::Inst, heap: ir::Heap, offset: ir::Value, access_size: u32, bound: u64, func: &mut ir::Function, cfg: &mut ControlFlowGraph, )110 fn static_addr(
111     isa: &dyn TargetIsa,
112     inst: ir::Inst,
113     heap: ir::Heap,
114     offset: ir::Value,
115     access_size: u32,
116     bound: u64,
117     func: &mut ir::Function,
118     cfg: &mut ControlFlowGraph,
119 ) {
120     let access_size = u64::from(access_size);
121     let offset_ty = func.dfg.value_type(offset);
122     let addr_ty = func.dfg.value_type(func.dfg.first_result(inst));
123     let mut pos = FuncCursor::new(func).at_inst(inst);
124     pos.use_srcloc(inst);
125 
126     // The goal here is to trap if `offset + access_size > bound`.
127     //
128     // This first case is a trivial case where we can easily trap.
129     if access_size > bound {
130         // This will simply always trap since `offset >= 0`.
131         pos.ins().trap(ir::TrapCode::HeapOutOfBounds);
132         pos.func.dfg.replace(inst).iconst(addr_ty, 0);
133 
134         // Split Block, as the trap is a terminator instruction.
135         let curr_block = pos.current_block().expect("Cursor is not in a block");
136         let new_block = pos.func.dfg.make_block();
137         pos.insert_block(new_block);
138         cfg.recompute_block(pos.func, curr_block);
139         cfg.recompute_block(pos.func, new_block);
140         return;
141     }
142 
143     // After the trivial case is done we're now mostly interested in trapping
144     // if `offset > bound - access_size`. We know `bound - access_size` here is
145     // non-negative from the above comparison.
146     //
147     // If we can know `bound - access_size >= 4GB` then with a 32-bit offset
148     // we're guaranteed:
149     //
150     //      bound - access_size >= 4GB > offset
151     //
152     // or, in other words, `offset < bound - access_size`, meaning we can't trap
153     // for any value of `offset`.
154     //
155     // With that we have an optimization here where with 32-bit offsets and
156     // `bound - access_size >= 4GB` we can omit a bounds check.
157     let limit = bound - access_size;
158     let mut spectre_oob_comparison = None;
159     if offset_ty != ir::types::I32 || limit < 0xffff_ffff {
160         let (cc, lhs, limit_imm) = if limit & 1 == 1 {
161             // Prefer testing `offset >= limit - 1` when limit is odd because an even number is
162             // likely to be a convenient constant on ARM and other RISC architectures.
163             let limit = limit as i64 - 1;
164             (IntCC::UnsignedGreaterThanOrEqual, offset, limit)
165         } else {
166             let limit = limit as i64;
167             (IntCC::UnsignedGreaterThan, offset, limit)
168         };
169         let oob = pos.ins().icmp_imm(cc, lhs, limit_imm);
170         pos.ins().trapnz(oob, ir::TrapCode::HeapOutOfBounds);
171         if isa.flags().enable_heap_access_spectre_mitigation() {
172             let limit = pos.ins().iconst(offset_ty, limit_imm);
173             spectre_oob_comparison = Some((cc, lhs, limit));
174         }
175     }
176 
177     compute_addr(
178         isa,
179         inst,
180         heap,
181         addr_ty,
182         offset,
183         offset_ty,
184         pos.func,
185         spectre_oob_comparison,
186     );
187 }
188 
189 /// Emit code for the base address computation of a `heap_addr` instruction.
compute_addr( isa: &dyn TargetIsa, inst: ir::Inst, heap: ir::Heap, addr_ty: ir::Type, mut offset: ir::Value, offset_ty: ir::Type, func: &mut ir::Function, spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>, )190 fn compute_addr(
191     isa: &dyn TargetIsa,
192     inst: ir::Inst,
193     heap: ir::Heap,
194     addr_ty: ir::Type,
195     mut offset: ir::Value,
196     offset_ty: ir::Type,
197     func: &mut ir::Function,
198     // If we are performing Spectre mitigation with conditional selects, the
199     // values to compare and the condition code that indicates an out-of bounds
200     // condition; on this condition, the conditional move will choose a
201     // speculatively safe address (a zero / null pointer) instead.
202     spectre_oob_comparison: Option<(IntCC, ir::Value, ir::Value)>,
203 ) {
204     let mut pos = FuncCursor::new(func).at_inst(inst);
205     pos.use_srcloc(inst);
206 
207     // Convert `offset` to `addr_ty`.
208     if offset_ty != addr_ty {
209         let labels_value = offset;
210         offset = pos.ins().uextend(addr_ty, offset);
211         if let Some(values_labels) = pos.func.dfg.values_labels.as_mut() {
212             values_labels.insert(
213                 offset,
214                 ir::ValueLabelAssignments::Alias {
215                     from: pos.func.srclocs[inst],
216                     value: labels_value,
217                 },
218             );
219         }
220     }
221 
222     // Add the heap base address base
223     let base = if isa.flags().enable_pinned_reg() && isa.flags().use_pinned_reg_as_heap_base() {
224         pos.ins().get_pinned_reg(isa.pointer_type())
225     } else {
226         let base_gv = pos.func.heaps[heap].base;
227         pos.ins().global_value(addr_ty, base_gv)
228     };
229 
230     if let Some((cc, a, b)) = spectre_oob_comparison {
231         let final_addr = pos.ins().iadd(base, offset);
232         let zero = pos.ins().iconst(addr_ty, 0);
233         let flags = pos.ins().ifcmp(a, b);
234         pos.func
235             .dfg
236             .replace(inst)
237             .selectif_spectre_guard(addr_ty, cc, flags, zero, final_addr);
238     } else {
239         pos.func.dfg.replace(inst).iadd(base, offset);
240     }
241 }
242