1 //! 32-bit ARM ISA: binary code emission.
2
3 use crate::binemit::{Reloc, StackMap};
4 use crate::ir::SourceLoc;
5 use crate::isa::arm32::inst::*;
6
7 use core::convert::TryFrom;
8 use log::debug;
9
10 /// Memory addressing mode finalization: convert "special" modes (e.g.,
11 /// nominal stack offset) into real addressing modes, possibly by
12 /// emitting some helper instructions that come immediately before the use
13 /// of this amode.
mem_finalize(mem: &AMode, state: &EmitState) -> (SmallVec<[Inst; 4]>, AMode)14 pub fn mem_finalize(mem: &AMode, state: &EmitState) -> (SmallVec<[Inst; 4]>, AMode) {
15 match mem {
16 &AMode::RegOffset(_, off)
17 | &AMode::SPOffset(off, _)
18 | &AMode::FPOffset(off, _)
19 | &AMode::NominalSPOffset(off, _) => {
20 let basereg = match mem {
21 &AMode::RegOffset(reg, _) => reg,
22 &AMode::SPOffset(..) | &AMode::NominalSPOffset(..) => sp_reg(),
23 &AMode::FPOffset(..) => fp_reg(),
24 _ => unreachable!(),
25 };
26 let adj = match mem {
27 &AMode::NominalSPOffset(..) => {
28 debug!(
29 "mem_finalize: nominal SP offset {} + adj {} -> {}",
30 off,
31 state.virtual_sp_offset,
32 off + state.virtual_sp_offset
33 );
34 state.virtual_sp_offset
35 }
36 _ => 0,
37 };
38 let off = off + adj;
39
40 assert!(-(1 << 31) <= off && off <= (1 << 32));
41
42 if let Some(off) = UImm12::maybe_from_i64(off) {
43 let mem = AMode::RegOffset12(basereg, off);
44 (smallvec![], mem)
45 } else {
46 let tmp = writable_ip_reg();
47 let const_insts = Inst::load_constant(tmp, off as u32);
48 let mem = AMode::reg_plus_reg(basereg, tmp.to_reg(), 0);
49 (const_insts, mem)
50 }
51 }
52 // Just assert immediate is valid here.
53 _ => (smallvec![], mem.clone()),
54 }
55 }
56
57 //=============================================================================
58 // Instructions and subcomponents: emission
59
machreg_to_gpr(m: Reg) -> u1660 fn machreg_to_gpr(m: Reg) -> u16 {
61 assert_eq!(m.get_class(), RegClass::I32);
62 u16::try_from(m.to_real_reg().get_hw_encoding()).unwrap()
63 }
64
machreg_to_gpr_lo(m: Reg) -> u1665 fn machreg_to_gpr_lo(m: Reg) -> u16 {
66 let gpr_lo = machreg_to_gpr(m);
67 assert!(gpr_lo < 8);
68 gpr_lo
69 }
70
machreg_is_lo(m: Reg) -> bool71 fn machreg_is_lo(m: Reg) -> bool {
72 machreg_to_gpr(m) < 8
73 }
74
enc_16_rr(bits_15_6: u16, rd: Reg, rm: Reg) -> u1675 fn enc_16_rr(bits_15_6: u16, rd: Reg, rm: Reg) -> u16 {
76 (bits_15_6 << 6) | machreg_to_gpr_lo(rd) | (machreg_to_gpr_lo(rm) << 3)
77 }
78
enc_16_rr_any(bits_15_8: u16, rd: Reg, rm: Reg) -> u1679 fn enc_16_rr_any(bits_15_8: u16, rd: Reg, rm: Reg) -> u16 {
80 let rd = machreg_to_gpr(rd);
81 (bits_15_8 << 8) | (rd & 0x7) | ((rd >> 3) << 7) | (machreg_to_gpr(rm) << 3)
82 }
83
enc_16_mov(rd: Writable<Reg>, rm: Reg) -> u1684 fn enc_16_mov(rd: Writable<Reg>, rm: Reg) -> u16 {
85 enc_16_rr_any(0b01000110, rd.to_reg(), rm)
86 }
87
enc_16_it(cond: Cond, insts: &Vec<CondInst>) -> u1688 fn enc_16_it(cond: Cond, insts: &Vec<CondInst>) -> u16 {
89 let cond = cond.bits();
90 let mut mask: u16 = 0;
91 for inst in insts.iter().skip(1) {
92 if inst.then {
93 mask |= cond & 0x1;
94 } else {
95 mask |= (cond & 0x1) ^ 0x1;
96 }
97 mask <<= 1;
98 }
99 mask |= 0x1;
100 mask <<= 4 - insts.len();
101 0b1011_1111_0000_0000 | (cond << 4) | mask
102 }
103
enc_32_regs( mut inst: u32, reg_0: Option<Reg>, reg_8: Option<Reg>, reg_12: Option<Reg>, reg_16: Option<Reg>, ) -> u32104 fn enc_32_regs(
105 mut inst: u32,
106 reg_0: Option<Reg>,
107 reg_8: Option<Reg>,
108 reg_12: Option<Reg>,
109 reg_16: Option<Reg>,
110 ) -> u32 {
111 if let Some(reg_0) = reg_0 {
112 inst |= u32::from(machreg_to_gpr(reg_0));
113 }
114 if let Some(reg_8) = reg_8 {
115 inst |= u32::from(machreg_to_gpr(reg_8)) << 8;
116 }
117 if let Some(reg_12) = reg_12 {
118 inst |= u32::from(machreg_to_gpr(reg_12)) << 12;
119 }
120 if let Some(reg_16) = reg_16 {
121 inst |= u32::from(machreg_to_gpr(reg_16)) << 16;
122 }
123 inst
124 }
125
enc_32_reg_shift(inst: u32, shift: &Option<ShiftOpAndAmt>) -> u32126 fn enc_32_reg_shift(inst: u32, shift: &Option<ShiftOpAndAmt>) -> u32 {
127 match shift {
128 Some(shift) => {
129 let op = u32::from(shift.op().bits());
130 let amt = u32::from(shift.amt().value());
131 let imm2 = amt & 0x3;
132 let imm3 = (amt >> 2) & 0x7;
133
134 inst | (op << 4) | (imm2 << 6) | (imm3 << 12)
135 }
136 None => inst,
137 }
138 }
139
enc_32_r_imm16(bits_31_20: u32, rd: Reg, imm16: u16) -> u32140 fn enc_32_r_imm16(bits_31_20: u32, rd: Reg, imm16: u16) -> u32 {
141 let imm16 = u32::from(imm16);
142 let imm8 = imm16 & 0xff;
143 let imm3 = (imm16 >> 8) & 0x7;
144 let i = (imm16 >> 11) & 0x1;
145 let imm4 = (imm16 >> 12) & 0xf;
146
147 let inst = ((bits_31_20 << 20) & !(1 << 26)) | imm8 | (imm3 << 12) | (imm4 << 16) | (i << 26);
148 enc_32_regs(inst, None, Some(rd), None, None)
149 }
150
enc_32_rrr(bits_31_20: u32, bits_15_12: u32, bits_7_4: u32, rd: Reg, rm: Reg, rn: Reg) -> u32151 fn enc_32_rrr(bits_31_20: u32, bits_15_12: u32, bits_7_4: u32, rd: Reg, rm: Reg, rn: Reg) -> u32 {
152 let inst = (bits_31_20 << 20) | (bits_15_12 << 12) | (bits_7_4 << 4);
153 enc_32_regs(inst, Some(rm), Some(rd), None, Some(rn))
154 }
155
enc_32_imm12(inst: u32, imm12: UImm12) -> u32156 fn enc_32_imm12(inst: u32, imm12: UImm12) -> u32 {
157 let imm12 = imm12.bits();
158 let imm8 = imm12 & 0xff;
159 let imm3 = (imm12 >> 8) & 0x7;
160 let i = (imm12 >> 11) & 0x1;
161 inst | imm8 | (imm3 << 12) | (i << 26)
162 }
163
enc_32_mem_r(bits_24_20: u32, rt: Reg, rn: Reg, rm: Reg, imm2: u8) -> u32164 fn enc_32_mem_r(bits_24_20: u32, rt: Reg, rn: Reg, rm: Reg, imm2: u8) -> u32 {
165 let imm2 = u32::from(imm2);
166 let inst = (imm2 << 4) | (bits_24_20 << 20) | (0b11111 << 27);
167 enc_32_regs(inst, Some(rm), None, Some(rt), Some(rn))
168 }
169
enc_32_mem_off12(bits_24_20: u32, rt: Reg, rn: Reg, off12: UImm12) -> u32170 fn enc_32_mem_off12(bits_24_20: u32, rt: Reg, rn: Reg, off12: UImm12) -> u32 {
171 let off12 = off12.bits();
172 let inst = off12 | (bits_24_20 << 20) | (0b11111 << 27);
173 enc_32_regs(inst, None, None, Some(rt), Some(rn))
174 }
175
enc_32_jump(target: BranchTarget) -> u32176 fn enc_32_jump(target: BranchTarget) -> u32 {
177 let off24 = target.as_off24();
178 let imm11 = off24 & 0x7ff;
179 let imm10 = (off24 >> 11) & 0x3ff;
180 let i2 = (off24 >> 21) & 0x1;
181 let i1 = (off24 >> 22) & 0x1;
182 let s = (off24 >> 23) & 0x1;
183 let j1 = (i1 ^ s) ^ 1;
184 let j2 = (i2 ^ s) ^ 1;
185
186 0b11110_0_0000000000_10_0_1_0_00000000000
187 | imm11
188 | (j2 << 11)
189 | (j1 << 13)
190 | (imm10 << 16)
191 | (s << 26)
192 }
193
enc_32_cond_branch(cond: Cond, target: BranchTarget) -> u32194 fn enc_32_cond_branch(cond: Cond, target: BranchTarget) -> u32 {
195 let cond = u32::from(cond.bits());
196 let off20 = target.as_off20();
197 let imm11 = off20 & 0x7ff;
198 let imm6 = (off20 >> 11) & 0x3f;
199 let j1 = (off20 >> 17) & 0x1;
200 let j2 = (off20 >> 18) & 0x1;
201 let s = (off20 >> 19) & 0x1;
202
203 0b11110_0_0000_000000_10_0_0_0_00000000000
204 | imm11
205 | (j2 << 11)
206 | (j1 << 13)
207 | (imm6 << 16)
208 | (cond << 22)
209 | (s << 26)
210 }
211
u32_swap_halfwords(x: u32) -> u32212 fn u32_swap_halfwords(x: u32) -> u32 {
213 (x >> 16) | (x << 16)
214 }
215
emit_32(inst: u32, sink: &mut MachBuffer<Inst>)216 fn emit_32(inst: u32, sink: &mut MachBuffer<Inst>) {
217 let inst_hi = (inst >> 16) as u16;
218 let inst_lo = (inst & 0xffff) as u16;
219 sink.put2(inst_hi);
220 sink.put2(inst_lo);
221 }
222
223 /// State carried between emissions of a sequence of instructions.
224 #[derive(Default, Clone, Debug)]
225 pub struct EmitState {
226 /// Addend to convert nominal-SP offsets to real-SP offsets at the current
227 /// program point.
228 pub(crate) virtual_sp_offset: i64,
229 /// Offset of FP from nominal-SP.
230 pub(crate) nominal_sp_to_fp: i64,
231 /// Safepoint stack map for upcoming instruction, as provided to `pre_safepoint()`.
232 stack_map: Option<StackMap>,
233 /// Source location of next machine code instruction to be emitted.
234 cur_srcloc: SourceLoc,
235 }
236
237 impl MachInstEmitState<Inst> for EmitState {
new(abi: &dyn ABICallee<I = Inst>) -> Self238 fn new(abi: &dyn ABICallee<I = Inst>) -> Self {
239 EmitState {
240 virtual_sp_offset: 0,
241 nominal_sp_to_fp: abi.frame_size() as i64,
242 stack_map: None,
243 cur_srcloc: SourceLoc::default(),
244 }
245 }
246
pre_safepoint(&mut self, stack_map: StackMap)247 fn pre_safepoint(&mut self, stack_map: StackMap) {
248 self.stack_map = Some(stack_map);
249 }
250
pre_sourceloc(&mut self, srcloc: SourceLoc)251 fn pre_sourceloc(&mut self, srcloc: SourceLoc) {
252 self.cur_srcloc = srcloc;
253 }
254 }
255
256 impl EmitState {
take_stack_map(&mut self) -> Option<StackMap>257 fn take_stack_map(&mut self) -> Option<StackMap> {
258 self.stack_map.take()
259 }
260
clear_post_insn(&mut self)261 fn clear_post_insn(&mut self) {
262 self.stack_map = None;
263 }
264
cur_srcloc(&self) -> SourceLoc265 fn cur_srcloc(&self) -> SourceLoc {
266 self.cur_srcloc
267 }
268 }
269
270 pub struct EmitInfo {
271 flags: settings::Flags,
272 }
273
274 impl EmitInfo {
new(flags: settings::Flags) -> Self275 pub(crate) fn new(flags: settings::Flags) -> Self {
276 EmitInfo { flags }
277 }
278 }
279
280 impl MachInstEmitInfo for EmitInfo {
flags(&self) -> &settings::Flags281 fn flags(&self) -> &settings::Flags {
282 &self.flags
283 }
284 }
285
286 impl MachInstEmit for Inst {
287 type Info = EmitInfo;
288 type State = EmitState;
289
emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState)290 fn emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState) {
291 let start_off = sink.cur_offset();
292
293 match self {
294 &Inst::Nop0 | &Inst::EpiloguePlaceholder => {}
295 &Inst::Nop2 => {
296 sink.put2(0b1011_1111_0000_0000);
297 }
298 &Inst::AluRRR { alu_op, rd, rn, rm } => {
299 let (bits_31_20, bits_15_12, bits_7_4) = match alu_op {
300 ALUOp::Lsl => (0b111110100000, 0b1111, 0b0000),
301 ALUOp::Lsr => (0b111110100010, 0b1111, 0b0000),
302 ALUOp::Asr => (0b111110100100, 0b1111, 0b0000),
303 ALUOp::Ror => (0b111110100110, 0b1111, 0b0000),
304 ALUOp::Qadd => (0b111110101000, 0b1111, 0b1000),
305 ALUOp::Qsub => (0b111110101000, 0b1111, 0b1010),
306 ALUOp::Mul => (0b111110110000, 0b1111, 0b0000),
307 ALUOp::Udiv => (0b111110111011, 0b1111, 0b1111),
308 ALUOp::Sdiv => (0b111110111001, 0b1111, 0b1111),
309 _ => panic!("Invalid ALUOp {:?} in RRR form!", alu_op),
310 };
311 emit_32(
312 enc_32_rrr(bits_31_20, bits_15_12, bits_7_4, rd.to_reg(), rm, rn),
313 sink,
314 );
315 }
316 &Inst::AluRRRShift {
317 alu_op,
318 rd,
319 rn,
320 rm,
321 ref shift,
322 } => {
323 let bits_31_24 = 0b111_0101;
324 let bits_24_20 = match alu_op {
325 ALUOp::And => 0b00000,
326 ALUOp::Bic => 0b00010,
327 ALUOp::Orr => 0b00100,
328 ALUOp::Orn => 0b00110,
329 ALUOp::Eor => 0b01000,
330 ALUOp::Add => 0b10000,
331 ALUOp::Adds => 0b10001,
332 ALUOp::Adc => 0b10100,
333 ALUOp::Adcs => 0b10101,
334 ALUOp::Sbc => 0b10110,
335 ALUOp::Sbcs => 0b10111,
336 ALUOp::Sub => 0b11010,
337 ALUOp::Subs => 0b11011,
338 ALUOp::Rsb => 0b11100,
339 _ => panic!("Invalid ALUOp {:?} in RRRShift form!", alu_op),
340 };
341 let bits_31_20 = (bits_31_24 << 5) | bits_24_20;
342 let inst = enc_32_rrr(bits_31_20, 0, 0, rd.to_reg(), rm, rn);
343 let inst = enc_32_reg_shift(inst, shift);
344 emit_32(inst, sink);
345 }
346 &Inst::AluRRShift {
347 alu_op,
348 rd,
349 rm,
350 ref shift,
351 } => {
352 let bits_24_21 = match alu_op {
353 ALUOp1::Mvn => 0b0011,
354 ALUOp1::Mov => 0b0010,
355 };
356 let inst = 0b1110101_0000_0_1111_0_000_0000_00_00_0000 | (bits_24_21 << 21);
357 let inst = enc_32_regs(inst, Some(rm), Some(rd.to_reg()), None, None);
358 let inst = enc_32_reg_shift(inst, shift);
359 emit_32(inst, sink);
360 }
361 &Inst::AluRRRR {
362 alu_op,
363 rd_hi,
364 rd_lo,
365 rn,
366 rm,
367 } => {
368 let (bits_22_20, bits_7_4) = match alu_op {
369 ALUOp::Smull => (0b000, 0b0000),
370 ALUOp::Umull => (0b010, 0b0000),
371 _ => panic!("Invalid ALUOp {:?} in RRRR form!", alu_op),
372 };
373 let inst = (0b111110111 << 23) | (bits_22_20 << 20) | (bits_7_4 << 4);
374 let inst = enc_32_regs(
375 inst,
376 Some(rm),
377 Some(rd_hi.to_reg()),
378 Some(rd_lo.to_reg()),
379 Some(rn),
380 );
381 emit_32(inst, sink);
382 }
383 &Inst::AluRRImm12 {
384 alu_op,
385 rd,
386 rn,
387 imm12,
388 } => {
389 let bits_24_20 = match alu_op {
390 ALUOp::Add => 0b00000,
391 ALUOp::Sub => 0b01010,
392 _ => panic!("Invalid ALUOp {:?} in RRImm12 form!", alu_op),
393 };
394 let inst = (0b11110_0_1 << 25) | (bits_24_20 << 20);
395 let inst = enc_32_regs(inst, None, Some(rd.to_reg()), None, Some(rn));
396 let inst = enc_32_imm12(inst, imm12);
397 emit_32(inst, sink);
398 }
399 &Inst::AluRRImm8 {
400 alu_op,
401 rd,
402 rn,
403 imm8,
404 } => {
405 let bits_24_20 = match alu_op {
406 ALUOp::And => 0b00000,
407 ALUOp::Bic => 0b00010,
408 ALUOp::Orr => 0b00100,
409 ALUOp::Orn => 0b00110,
410 ALUOp::Eor => 0b01000,
411 ALUOp::Add => 0b10000,
412 ALUOp::Adds => 0b10001,
413 ALUOp::Adc => 0b10100,
414 ALUOp::Adcs => 0b10101,
415 ALUOp::Sbc => 0b10110,
416 ALUOp::Sbcs => 0b10111,
417 ALUOp::Sub => 0b11010,
418 ALUOp::Subs => 0b11011,
419 ALUOp::Rsb => 0b11100,
420 _ => panic!("Invalid ALUOp {:?} in RRImm8 form!", alu_op),
421 };
422 let imm8 = imm8.bits();
423 let inst = 0b11110_0_0_00000_0000_0_000_0000_00000000 | imm8 | (bits_24_20 << 20);
424 let inst = enc_32_regs(inst, None, Some(rd.to_reg()), None, Some(rn));
425 emit_32(inst, sink);
426 }
427 &Inst::AluRImm8 { alu_op, rd, imm8 } => {
428 let bits_24_20 = match alu_op {
429 ALUOp1::Mvn => 0b00110,
430 ALUOp1::Mov => 0b00100,
431 };
432 let imm8 = imm8.bits();
433 let inst = 0b11110_0_0_00000_1111_0_000_0000_00000000 | imm8 | (bits_24_20 << 20);
434 let inst = enc_32_regs(inst, None, Some(rd.to_reg()), None, None);
435 emit_32(inst, sink);
436 }
437 &Inst::BitOpRR { bit_op, rd, rm } => {
438 let (bits_22_20, bits_7_4) = match bit_op {
439 BitOp::Rbit => (0b001, 0b1010),
440 BitOp::Rev => (0b001, 0b1000),
441 BitOp::Clz => (0b011, 0b1000),
442 };
443 let inst =
444 0b111110101_000_0000_1111_0000_0000_0000 | (bits_22_20 << 20) | (bits_7_4 << 4);
445 let inst = enc_32_regs(inst, Some(rm), Some(rd.to_reg()), None, Some(rm));
446 emit_32(inst, sink);
447 }
448 &Inst::Mov { rd, rm } => {
449 sink.put2(enc_16_mov(rd, rm));
450 }
451 &Inst::MovImm16 { rd, imm16 } => {
452 emit_32(enc_32_r_imm16(0b11110_0_100100, rd.to_reg(), imm16), sink);
453 }
454 &Inst::Movt { rd, imm16 } => {
455 emit_32(enc_32_r_imm16(0b11110_0_101100, rd.to_reg(), imm16), sink);
456 }
457 &Inst::Cmp { rn, rm } => {
458 // Check which 16-bit encoding is allowed.
459 if machreg_is_lo(rn) && machreg_is_lo(rm) {
460 sink.put2(enc_16_rr(0b0100001010, rn, rm));
461 } else {
462 sink.put2(enc_16_rr_any(0b01000101, rn, rm));
463 }
464 }
465 &Inst::CmpImm8 { rn, imm8 } => {
466 let inst = 0b11110_0_011011_0000_0_000_1111_00000000 | u32::from(imm8);
467 let inst = enc_32_regs(inst, None, None, None, Some(rn));
468 emit_32(inst, sink);
469 }
470 &Inst::Store { rt, ref mem, bits } => {
471 let (mem_insts, mem) = mem_finalize(mem, state);
472 for inst in mem_insts.into_iter() {
473 inst.emit(sink, emit_info, state);
474 }
475 let srcloc = state.cur_srcloc();
476 if srcloc != SourceLoc::default() {
477 // Register the offset at which the store instruction starts.
478 sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
479 }
480 match mem {
481 AMode::RegReg(rn, rm, imm2) => {
482 let bits_24_20 = match bits {
483 32 => 0b00100,
484 16 => 0b00010,
485 8 => 0b00000,
486 _ => panic!("Unsupported store case {:?}", self),
487 };
488 emit_32(enc_32_mem_r(bits_24_20, rt, rn, rm, imm2), sink);
489 }
490 AMode::RegOffset12(rn, off12) => {
491 let bits_24_20 = match bits {
492 32 => 0b01100,
493 16 => 0b01010,
494 8 => 0b01000,
495 _ => panic!("Unsupported store case {:?}", self),
496 };
497 emit_32(enc_32_mem_off12(bits_24_20, rt, rn, off12), sink);
498 }
499 AMode::PCRel(_) => panic!("Unsupported store case {:?}", self),
500 _ => unreachable!(),
501 }
502 }
503 &Inst::Load {
504 rt,
505 ref mem,
506 bits,
507 sign_extend,
508 } => {
509 let (mem_insts, mem) = mem_finalize(mem, state);
510 for inst in mem_insts.into_iter() {
511 inst.emit(sink, emit_info, state);
512 }
513 let srcloc = state.cur_srcloc();
514 if srcloc != SourceLoc::default() {
515 // Register the offset at which the load instruction starts.
516 sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
517 }
518 match mem {
519 AMode::RegReg(rn, rm, imm2) => {
520 let bits_24_20 = match (bits, sign_extend) {
521 (32, _) => 0b00101,
522 (16, true) => 0b10011,
523 (16, false) => 0b00011,
524 (8, true) => 0b10001,
525 (8, false) => 0b00001,
526 _ => panic!("Unsupported load case {:?}", self),
527 };
528 emit_32(enc_32_mem_r(bits_24_20, rt.to_reg(), rn, rm, imm2), sink);
529 }
530 AMode::RegOffset12(rn, off12) => {
531 let bits_24_20 = match (bits, sign_extend) {
532 (32, _) => 0b01101,
533 (16, true) => 0b11011,
534 (16, false) => 0b01011,
535 (8, true) => 0b11001,
536 (8, false) => 0b01001,
537 _ => panic!("Unsupported load case {:?}", self),
538 };
539 emit_32(enc_32_mem_off12(bits_24_20, rt.to_reg(), rn, off12), sink);
540 }
541 AMode::PCRel(off12) => {
542 let mut bits_24_20 = match (bits, sign_extend) {
543 (32, _) => 0b00101,
544 (16, true) => 0b10011,
545 (16, false) => 0b00011,
546 (8, true) => 0b10001,
547 (8, false) => 0b00001,
548 _ => panic!("Unsupported load case {:?}", self),
549 };
550 let (u, off12) = if off12 > 0 { (1, off12) } else { (0, -off12) };
551 let off12 = UImm12::maybe_from_i64(i64::from(off12)).unwrap();
552 bits_24_20 |= u << 3;
553
554 emit_32(
555 enc_32_mem_off12(bits_24_20, rt.to_reg(), pc_reg(), off12),
556 sink,
557 );
558 }
559 _ => unreachable!(),
560 }
561 }
562 &Inst::LoadAddr { rd, ref mem } => {
563 let (mem_insts, mem) = mem_finalize(mem, state);
564 for inst in mem_insts.into_iter() {
565 inst.emit(sink, emit_info, state);
566 }
567 let inst = match mem {
568 AMode::RegReg(reg1, reg2, shift) => {
569 let shift = u32::from(shift);
570 let shift_amt = ShiftOpShiftImm::maybe_from_shift(shift).unwrap();
571 let shift = ShiftOpAndAmt::new(ShiftOp::LSL, shift_amt);
572 Inst::AluRRRShift {
573 alu_op: ALUOp::Add,
574 rd,
575 rn: reg1,
576 rm: reg2,
577 shift: Some(shift),
578 }
579 }
580 AMode::RegOffset12(reg, imm12) => Inst::AluRRImm12 {
581 alu_op: ALUOp::Add,
582 rd,
583 rn: reg,
584 imm12,
585 },
586 AMode::PCRel(off12) => {
587 let (off12, alu_op) = if off12 > 0 {
588 (off12, ALUOp::Add)
589 } else {
590 (-off12, ALUOp::Sub)
591 };
592 let imm12 = UImm12::maybe_from_i64(i64::from(off12)).unwrap();
593 Inst::AluRRImm12 {
594 alu_op,
595 rd,
596 rn: pc_reg(),
597 imm12,
598 }
599 }
600 _ => unreachable!(),
601 };
602 inst.emit(sink, emit_info, state);
603 }
604 &Inst::Extend {
605 rd,
606 rm,
607 from_bits,
608 signed,
609 } if from_bits >= 8 => {
610 let rd = rd.to_reg();
611 if machreg_is_lo(rd) && machreg_is_lo(rm) {
612 let bits_15_9 = match (from_bits, signed) {
613 (16, true) => 0b1011001000,
614 (16, false) => 0b1011001010,
615 (8, true) => 0b1011001001,
616 (8, false) => 0b1011001011,
617 _ => panic!("Unsupported Extend case: {:?}", self),
618 };
619 sink.put2(enc_16_rr(bits_15_9, rd, rm));
620 } else {
621 let bits_22_20 = match (from_bits, signed) {
622 (16, true) => 0b000,
623 (16, false) => 0b001,
624 (8, true) => 0b100,
625 (8, false) => 0b101,
626 _ => panic!("Unsupported Extend case: {:?}", self),
627 };
628 let inst = 0b111110100_000_11111111_0000_1000_0000 | (bits_22_20 << 20);
629 let inst = enc_32_regs(inst, Some(rm), Some(rd), None, None);
630 emit_32(inst, sink);
631 }
632 }
633 &Inst::Extend {
634 rd,
635 rm,
636 from_bits,
637 signed,
638 } if from_bits == 1 => {
639 let inst = Inst::AluRRImm8 {
640 alu_op: ALUOp::And,
641 rd,
642 rn: rm,
643 imm8: UImm8::maybe_from_i64(1).unwrap(),
644 };
645 inst.emit(sink, emit_info, state);
646
647 if signed {
648 let inst = Inst::AluRRImm8 {
649 alu_op: ALUOp::Rsb,
650 rd,
651 rn: rd.to_reg(),
652 imm8: UImm8::maybe_from_i64(1).unwrap(),
653 };
654 inst.emit(sink, emit_info, state);
655 }
656 }
657 &Inst::Extend { .. } => {
658 panic!("Unsupported extend variant");
659 }
660 &Inst::It { cond, ref insts } => {
661 assert!(1 <= insts.len() && insts.len() <= 4);
662 assert!(insts[0].then);
663
664 sink.put2(enc_16_it(cond, insts));
665 for inst in insts.iter() {
666 inst.inst.emit(sink, emit_info, state);
667 }
668 }
669 &Inst::Push { ref reg_list } => match reg_list.len() {
670 0 => panic!("Unsupported Push case: {:?}", self),
671 1 => {
672 let reg = u32::from(machreg_to_gpr(reg_list[0]));
673 let inst: u32 = 0b1111100001001101_0000_110100000100 | (reg << 12);
674 emit_32(inst, sink);
675 }
676 _ => {
677 let mut inst: u32 = 0b1110100100101101 << 16;
678 for reg in reg_list {
679 inst |= 1 << machreg_to_gpr(*reg);
680 }
681 if inst & ((1 << 13) | (1 << 15)) != 0 {
682 panic!("Unsupported Push case: {:?}", self);
683 }
684 emit_32(inst, sink);
685 }
686 },
687 &Inst::Pop { ref reg_list } => match reg_list.len() {
688 0 => panic!("Unsupported Pop case: {:?}", self),
689 1 => {
690 let reg = u32::from(machreg_to_gpr(reg_list[0].to_reg()));
691 let inst: u32 = 0b1111100001011101_0000_101100000100 | (reg << 12);
692 emit_32(inst, sink);
693 }
694 _ => {
695 let mut inst: u32 = 0b1110100010111101 << 16;
696 for reg in reg_list {
697 inst |= 1 << machreg_to_gpr(reg.to_reg());
698 }
699 if (inst & (1 << 14) != 0) && (inst & (1 << 15) != 0) {
700 panic!("Unsupported Pop case: {:?}", self);
701 }
702 emit_32(inst, sink);
703 }
704 },
705 &Inst::Call { ref info } => {
706 let srcloc = state.cur_srcloc();
707 sink.add_reloc(srcloc, Reloc::Arm32Call, &info.dest, 0);
708 emit_32(0b11110_0_0000000000_11_0_1_0_00000000000, sink);
709 if info.opcode.is_call() {
710 sink.add_call_site(srcloc, info.opcode);
711 }
712 }
713 &Inst::CallInd { ref info } => {
714 let srcloc = state.cur_srcloc();
715 sink.put2(0b01000111_1_0000_000 | (machreg_to_gpr(info.rm) << 3));
716 if info.opcode.is_call() {
717 sink.add_call_site(srcloc, info.opcode);
718 }
719 }
720 &Inst::LoadExtName {
721 rt,
722 ref name,
723 offset,
724 } => {
725 // maybe nop2 (0|2) bytes (pc is now 4-aligned)
726 // ldr rt, [pc, #4] 4 bytes
727 // b continue 4 bytes
728 // addr 4 bytes
729 // continue:
730 //
731 if start_off & 0x3 != 0 {
732 Inst::Nop2.emit(sink, emit_info, state);
733 }
734 assert_eq!(sink.cur_offset() & 0x3, 0);
735
736 let mem = AMode::PCRel(4);
737 let inst = Inst::Load {
738 rt,
739 mem,
740 bits: 32,
741 sign_extend: false,
742 };
743 inst.emit(sink, emit_info, state);
744
745 let inst = Inst::Jump {
746 dest: BranchTarget::ResolvedOffset(4),
747 };
748 inst.emit(sink, emit_info, state);
749
750 let srcloc = state.cur_srcloc();
751 sink.add_reloc(srcloc, Reloc::Abs4, name, offset.into());
752 sink.put4(0);
753 }
754 &Inst::Ret => {
755 sink.put2(0b010001110_1110_000); // bx lr
756 }
757 &Inst::Jump { dest } => {
758 let off = sink.cur_offset();
759 // Indicate that the jump uses a label, if so, so that a fixup can occur later.
760 if let Some(l) = dest.as_label() {
761 sink.use_label_at_offset(off, l, LabelUse::Branch24);
762 sink.add_uncond_branch(off, off + 4, l);
763 }
764 emit_32(enc_32_jump(dest), sink);
765 }
766 &Inst::CondBr {
767 taken,
768 not_taken,
769 cond,
770 } => {
771 // Conditional part first.
772 let cond_off = sink.cur_offset();
773 if let Some(l) = taken.as_label() {
774 let label_use = LabelUse::Branch20;
775 sink.use_label_at_offset(cond_off, l, label_use);
776 let inverted = enc_32_cond_branch(cond.invert(), taken);
777 let inverted = u32_swap_halfwords(inverted).to_le_bytes();
778 sink.add_cond_branch(cond_off, cond_off + 4, l, &inverted[..]);
779 }
780 emit_32(enc_32_cond_branch(cond, taken), sink);
781
782 // Unconditional part.
783 let uncond_off = sink.cur_offset();
784 if let Some(l) = not_taken.as_label() {
785 sink.use_label_at_offset(uncond_off, l, LabelUse::Branch24);
786 sink.add_uncond_branch(uncond_off, uncond_off + 4, l);
787 }
788 emit_32(enc_32_jump(not_taken), sink);
789 }
790 &Inst::IndirectBr { rm, .. } => {
791 let inst = 0b010001110_0000_000 | (machreg_to_gpr(rm) << 3);
792 sink.put2(inst);
793 }
794 &Inst::Udf { trap_info } => {
795 let srcloc = state.cur_srcloc();
796 let code = trap_info;
797 sink.add_trap(srcloc, code);
798 sink.put2(0b11011110_00000000);
799 }
800 &Inst::Bkpt => {
801 sink.put2(0b10111110_00000000);
802 }
803 &Inst::TrapIf { cond, trap_info } => {
804 let cond = cond.invert();
805 let dest = BranchTarget::ResolvedOffset(2);
806 emit_32(enc_32_cond_branch(cond, dest), sink);
807
808 let trap = Inst::Udf { trap_info };
809 trap.emit(sink, emit_info, state);
810 }
811 &Inst::VirtualSPOffsetAdj { offset } => {
812 debug!(
813 "virtual sp offset adjusted by {} -> {}",
814 offset,
815 state.virtual_sp_offset + offset,
816 );
817 state.virtual_sp_offset += offset;
818 }
819 }
820
821 let end_off = sink.cur_offset();
822 debug_assert!((end_off - start_off) <= Inst::worst_case_size());
823 }
824
pretty_print(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String825 fn pretty_print(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String {
826 self.print_with_state(mb_rru, state)
827 }
828 }
829