1 //! Lower a single Cranelift instruction into vcode.
2
3 use crate::ir::types::*;
4 use crate::ir::Inst as IRInst;
5 use crate::ir::Opcode;
6 use crate::machinst::lower::*;
7 use crate::machinst::*;
8 use crate::settings::Flags;
9 use crate::CodegenResult;
10
11 use crate::isa::arm32::abi::*;
12 use crate::isa::arm32::inst::*;
13
14 use smallvec::SmallVec;
15
16 use super::lower::*;
17
18 /// Actually codegen an instruction's results into registers.
lower_insn_to_regs<C: LowerCtx<I = Inst>>( ctx: &mut C, insn: IRInst, flags: &Flags, ) -> CodegenResult<()>19 pub(crate) fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
20 ctx: &mut C,
21 insn: IRInst,
22 flags: &Flags,
23 ) -> CodegenResult<()> {
24 let op = ctx.data(insn).opcode();
25 let inputs: SmallVec<[InsnInput; 4]> = (0..ctx.num_inputs(insn))
26 .map(|i| InsnInput { insn, input: i })
27 .collect();
28 let outputs: SmallVec<[InsnOutput; 2]> = (0..ctx.num_outputs(insn))
29 .map(|i| InsnOutput { insn, output: i })
30 .collect();
31 let ty = if outputs.len() > 0 {
32 let ty = ctx.output_ty(insn, 0);
33 if ty.bits() > 32 || ty.is_float() {
34 panic!("Cannot lower inst with type {}!", ty);
35 }
36 Some(ty)
37 } else {
38 None
39 };
40
41 match op {
42 Opcode::Iconst | Opcode::Bconst | Opcode::Null => {
43 let value = output_to_const(ctx, outputs[0]).unwrap();
44 let rd = output_to_reg(ctx, outputs[0]);
45 lower_constant(ctx, rd, value);
46 }
47 Opcode::Iadd
48 | Opcode::IaddIfcin
49 | Opcode::IaddIfcout
50 | Opcode::IaddIfcarry
51 | Opcode::Isub
52 | Opcode::IsubIfbin
53 | Opcode::IsubIfbout
54 | Opcode::IsubIfborrow
55 | Opcode::Band
56 | Opcode::Bor
57 | Opcode::Bxor
58 | Opcode::BandNot
59 | Opcode::BorNot => {
60 let rd = output_to_reg(ctx, outputs[0]);
61 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
62 let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
63
64 let alu_op = match op {
65 Opcode::Iadd => ALUOp::Add,
66 Opcode::IaddIfcin => ALUOp::Adc,
67 Opcode::IaddIfcout => ALUOp::Adds,
68 Opcode::IaddIfcarry => ALUOp::Adcs,
69 Opcode::Isub => ALUOp::Sub,
70 Opcode::IsubIfbin => ALUOp::Sbc,
71 Opcode::IsubIfbout => ALUOp::Subs,
72 Opcode::IsubIfborrow => ALUOp::Sbcs,
73 Opcode::Band => ALUOp::And,
74 Opcode::Bor => ALUOp::Orr,
75 Opcode::Bxor => ALUOp::Eor,
76 Opcode::BandNot => ALUOp::Bic,
77 Opcode::BorNot => ALUOp::Orn,
78 _ => unreachable!(),
79 };
80 ctx.emit(Inst::AluRRRShift {
81 alu_op,
82 rd,
83 rn,
84 rm,
85 shift: None,
86 });
87 }
88 Opcode::Imul | Opcode::Udiv | Opcode::Sdiv => {
89 let rd = output_to_reg(ctx, outputs[0]);
90 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
91 let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
92
93 let alu_op = match op {
94 Opcode::Imul => ALUOp::Mul,
95 Opcode::Udiv => ALUOp::Udiv,
96 Opcode::Sdiv => ALUOp::Sdiv,
97 _ => unreachable!(),
98 };
99 ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm });
100 }
101 Opcode::Ineg => {
102 let rd = output_to_reg(ctx, outputs[0]);
103 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
104
105 ctx.emit(Inst::AluRRImm8 {
106 alu_op: ALUOp::Rsb,
107 rd,
108 rn,
109 imm8: UImm8::maybe_from_i64(0).unwrap(),
110 });
111 }
112 Opcode::Ishl | Opcode::Ushr | Opcode::Sshr => {
113 let (alu_op, ext) = match op {
114 Opcode::Ishl => (ALUOp::Lsl, NarrowValueMode::None),
115 Opcode::Ushr => (ALUOp::Lsr, NarrowValueMode::ZeroExtend),
116 Opcode::Sshr => (ALUOp::Asr, NarrowValueMode::SignExtend),
117 _ => unreachable!(),
118 };
119 let rd = output_to_reg(ctx, outputs[0]);
120 let rn = input_to_reg(ctx, inputs[0], ext);
121 let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::ZeroExtend);
122 ctx.emit(Inst::AluRRR { alu_op, rd, rn, rm });
123 }
124 Opcode::Rotr => {
125 if ty.unwrap().bits() != 32 {
126 unimplemented!()
127 }
128 let rd = output_to_reg(ctx, outputs[0]);
129 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
130 let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
131 ctx.emit(Inst::AluRRR {
132 alu_op: ALUOp::Ror,
133 rd,
134 rn,
135 rm,
136 });
137 }
138 Opcode::Rotl => {
139 if ty.unwrap().bits() != 32 {
140 unimplemented!()
141 }
142 let rd = output_to_reg(ctx, outputs[0]);
143 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
144 let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
145 let tmp = ctx.alloc_tmp(I32).only_reg().unwrap();
146
147 // ror rd, rn, 32 - (rm & 31)
148 ctx.emit(Inst::AluRRImm8 {
149 alu_op: ALUOp::And,
150 rd: tmp,
151 rn: rm,
152 imm8: UImm8::maybe_from_i64(31).unwrap(),
153 });
154 ctx.emit(Inst::AluRRImm8 {
155 alu_op: ALUOp::Rsb,
156 rd: tmp,
157 rn: tmp.to_reg(),
158 imm8: UImm8::maybe_from_i64(32).unwrap(),
159 });
160 ctx.emit(Inst::AluRRR {
161 alu_op: ALUOp::Ror,
162 rd,
163 rn,
164 rm: tmp.to_reg(),
165 });
166 }
167 Opcode::Smulhi | Opcode::Umulhi => {
168 let ty = ty.unwrap();
169 let is_signed = op == Opcode::Smulhi;
170 match ty {
171 I32 => {
172 let rd_hi = output_to_reg(ctx, outputs[0]);
173 let rd_lo = ctx.alloc_tmp(ty).only_reg().unwrap();
174 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
175 let rm = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
176
177 let alu_op = if is_signed {
178 ALUOp::Smull
179 } else {
180 ALUOp::Umull
181 };
182 ctx.emit(Inst::AluRRRR {
183 alu_op,
184 rd_hi,
185 rd_lo,
186 rn,
187 rm,
188 });
189 }
190 I16 | I8 => {
191 let narrow_mode = if is_signed {
192 NarrowValueMode::SignExtend
193 } else {
194 NarrowValueMode::ZeroExtend
195 };
196 let rd = output_to_reg(ctx, outputs[0]);
197 let rn = input_to_reg(ctx, inputs[0], narrow_mode);
198 let rm = input_to_reg(ctx, inputs[1], narrow_mode);
199
200 ctx.emit(Inst::AluRRR {
201 alu_op: ALUOp::Mul,
202 rd,
203 rn,
204 rm,
205 });
206 let shift_amt = if ty == I16 { 16 } else { 8 };
207 let imm8 = UImm8::maybe_from_i64(shift_amt).unwrap();
208 let alu_op = if is_signed { ALUOp::Asr } else { ALUOp::Lsr };
209
210 ctx.emit(Inst::AluRRImm8 {
211 alu_op,
212 rd,
213 rn: rd.to_reg(),
214 imm8,
215 });
216 }
217 _ => panic!("Unexpected type {} in lower {}!", ty, op),
218 }
219 }
220 Opcode::Bnot => {
221 let rd = output_to_reg(ctx, outputs[0]);
222 let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
223
224 ctx.emit(Inst::AluRRShift {
225 alu_op: ALUOp1::Mvn,
226 rd,
227 rm,
228 shift: None,
229 });
230 }
231 Opcode::Clz | Opcode::Ctz => {
232 let rd = output_to_reg(ctx, outputs[0]);
233 let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend);
234 let ty = ctx.output_ty(insn, 0);
235
236 let in_reg = if op == Opcode::Ctz {
237 ctx.emit(Inst::BitOpRR {
238 bit_op: BitOp::Rbit,
239 rd,
240 rm,
241 });
242 rd.to_reg()
243 } else {
244 rm
245 };
246 ctx.emit(Inst::BitOpRR {
247 bit_op: BitOp::Clz,
248 rd,
249 rm: in_reg,
250 });
251
252 if ty.bits() < 32 {
253 let imm12 = UImm12::maybe_from_i64(32 - ty.bits() as i64).unwrap();
254 ctx.emit(Inst::AluRRImm12 {
255 alu_op: ALUOp::Sub,
256 rd,
257 rn: rd.to_reg(),
258 imm12,
259 });
260 }
261 }
262 Opcode::Bitrev => {
263 let rd = output_to_reg(ctx, outputs[0]);
264 let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
265 let ty = ctx.output_ty(insn, 0);
266 let bit_op = BitOp::Rbit;
267
268 match ty.bits() {
269 32 => ctx.emit(Inst::BitOpRR { bit_op, rd, rm }),
270 n if n < 32 => {
271 let shift = ShiftOpAndAmt::new(
272 ShiftOp::LSL,
273 ShiftOpShiftImm::maybe_from_shift(32 - n as u32).unwrap(),
274 );
275 ctx.emit(Inst::AluRRShift {
276 alu_op: ALUOp1::Mov,
277 rd,
278 rm,
279 shift: Some(shift),
280 });
281 ctx.emit(Inst::BitOpRR {
282 bit_op,
283 rd,
284 rm: rd.to_reg(),
285 });
286 }
287 _ => panic!("Unexpected output type {}", ty),
288 }
289 }
290 Opcode::Icmp | Opcode::Ifcmp => {
291 let condcode = inst_condcode(ctx.data(insn)).unwrap();
292 let cond = lower_condcode(condcode);
293 let is_signed = condcode_is_signed(condcode);
294
295 let narrow_mode = if is_signed {
296 NarrowValueMode::SignExtend
297 } else {
298 NarrowValueMode::ZeroExtend
299 };
300 let rd = output_to_reg(ctx, outputs[0]);
301 let rn = input_to_reg(ctx, inputs[0], narrow_mode);
302 let rm = input_to_reg(ctx, inputs[1], narrow_mode);
303
304 ctx.emit(Inst::Cmp { rn, rm });
305
306 if op == Opcode::Icmp {
307 let mut it_insts = vec![];
308 it_insts.push(CondInst::new(Inst::MovImm16 { rd, imm16: 1 }, true));
309 it_insts.push(CondInst::new(Inst::MovImm16 { rd, imm16: 0 }, false));
310 ctx.emit(Inst::It {
311 cond,
312 insts: it_insts,
313 });
314 }
315 }
316 Opcode::Trueif => {
317 let cmp_insn = ctx
318 .get_input_as_source_or_const(inputs[0].insn, inputs[0].input)
319 .inst
320 .unwrap()
321 .0;
322 debug_assert_eq!(ctx.data(cmp_insn).opcode(), Opcode::Ifcmp);
323 emit_cmp(ctx, cmp_insn);
324
325 let condcode = inst_condcode(ctx.data(insn)).unwrap();
326 let cond = lower_condcode(condcode);
327 let rd = output_to_reg(ctx, outputs[0]);
328
329 let mut it_insts = vec![];
330 it_insts.push(CondInst::new(Inst::MovImm16 { rd, imm16: 1 }, true));
331 it_insts.push(CondInst::new(Inst::MovImm16 { rd, imm16: 0 }, false));
332
333 ctx.emit(Inst::It {
334 cond,
335 insts: it_insts,
336 });
337 }
338 Opcode::Select | Opcode::Selectif => {
339 let cond = if op == Opcode::Select {
340 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend);
341 ctx.emit(Inst::CmpImm8 { rn, imm8: 0 });
342 Cond::Ne
343 } else {
344 // Verification ensures that the input is always a single-def ifcmp.
345 let cmp_insn = ctx
346 .get_input_as_source_or_const(inputs[0].insn, inputs[0].input)
347 .inst
348 .unwrap()
349 .0;
350 debug_assert_eq!(ctx.data(cmp_insn).opcode(), Opcode::Ifcmp);
351 emit_cmp(ctx, cmp_insn);
352
353 let condcode = inst_condcode(ctx.data(insn)).unwrap();
354 lower_condcode(condcode)
355 };
356 let r1 = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
357 let r2 = input_to_reg(ctx, inputs[2], NarrowValueMode::None);
358 let out_reg = output_to_reg(ctx, outputs[0]);
359
360 let mut it_insts = vec![];
361 it_insts.push(CondInst::new(Inst::mov(out_reg, r1), true));
362 it_insts.push(CondInst::new(Inst::mov(out_reg, r2), false));
363
364 ctx.emit(Inst::It {
365 cond,
366 insts: it_insts,
367 });
368 }
369 Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => {
370 let off = ldst_offset(ctx.data(insn)).unwrap();
371 let elem_ty = match op {
372 Opcode::Istore8 => I8,
373 Opcode::Istore16 => I16,
374 Opcode::Istore32 => I32,
375 Opcode::Store => ctx.input_ty(insn, 0),
376 _ => unreachable!(),
377 };
378 if elem_ty.bits() > 32 {
379 unimplemented!()
380 }
381 let bits = elem_ty.bits() as u8;
382
383 assert_eq!(inputs.len(), 2, "only one input for store memory operands");
384 let rt = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
385 let base = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
386
387 let mem = AMode::RegOffset(base, i64::from(off));
388
389 ctx.emit(Inst::Store { rt, mem, bits });
390 }
391 Opcode::Load
392 | Opcode::Uload8
393 | Opcode::Sload8
394 | Opcode::Uload16
395 | Opcode::Sload16
396 | Opcode::Uload32
397 | Opcode::Sload32 => {
398 let off = ldst_offset(ctx.data(insn)).unwrap();
399 let elem_ty = match op {
400 Opcode::Sload8 | Opcode::Uload8 => I8,
401 Opcode::Sload16 | Opcode::Uload16 => I16,
402 Opcode::Sload32 | Opcode::Uload32 => I32,
403 Opcode::Load => ctx.output_ty(insn, 0),
404 _ => unreachable!(),
405 };
406 if elem_ty.bits() > 32 {
407 unimplemented!()
408 }
409 let bits = elem_ty.bits() as u8;
410
411 let sign_extend = match op {
412 Opcode::Sload8 | Opcode::Sload16 | Opcode::Sload32 => true,
413 _ => false,
414 };
415 let out_reg = output_to_reg(ctx, outputs[0]);
416
417 assert_eq!(inputs.len(), 2, "only one input for store memory operands");
418 let base = input_to_reg(ctx, inputs[1], NarrowValueMode::None);
419 let mem = AMode::RegOffset(base, i64::from(off));
420
421 ctx.emit(Inst::Load {
422 rt: out_reg,
423 mem,
424 bits,
425 sign_extend,
426 });
427 }
428 Opcode::Uextend | Opcode::Sextend => {
429 let output_ty = ty.unwrap();
430 let input_ty = ctx.input_ty(insn, 0);
431 let from_bits = input_ty.bits() as u8;
432 let to_bits = 32;
433 let signed = op == Opcode::Sextend;
434
435 let rm = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
436 let rd = output_to_reg(ctx, outputs[0]);
437
438 if output_ty.bits() > 32 {
439 panic!("Unexpected output type {}", output_ty);
440 }
441 if from_bits < to_bits {
442 ctx.emit(Inst::Extend {
443 rd,
444 rm,
445 from_bits,
446 signed,
447 });
448 }
449 }
450 Opcode::Bint | Opcode::Breduce | Opcode::Bextend | Opcode::Ireduce => {
451 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend);
452 let rd = output_to_reg(ctx, outputs[0]);
453 let ty = ctx.input_ty(insn, 0);
454
455 ctx.emit(Inst::gen_move(rd, rn, ty));
456 }
457 Opcode::Copy => {
458 let rd = output_to_reg(ctx, outputs[0]);
459 let rn = input_to_reg(ctx, inputs[0], NarrowValueMode::None);
460 let ty = ctx.input_ty(insn, 0);
461
462 ctx.emit(Inst::gen_move(rd, rn, ty));
463 }
464 Opcode::Debugtrap => {
465 ctx.emit(Inst::Bkpt);
466 }
467 Opcode::Trap => {
468 let trap_info = inst_trapcode(ctx.data(insn)).unwrap();
469 ctx.emit(Inst::Udf { trap_info })
470 }
471 Opcode::Trapif => {
472 let cmp_insn = ctx
473 .get_input_as_source_or_const(inputs[0].insn, inputs[0].input)
474 .inst
475 .unwrap()
476 .0;
477 debug_assert_eq!(ctx.data(cmp_insn).opcode(), Opcode::Ifcmp);
478 emit_cmp(ctx, cmp_insn);
479
480 let trap_info = inst_trapcode(ctx.data(insn)).unwrap();
481 let condcode = inst_condcode(ctx.data(insn)).unwrap();
482 let cond = lower_condcode(condcode);
483
484 ctx.emit(Inst::TrapIf { cond, trap_info });
485 }
486 Opcode::FallthroughReturn | Opcode::Return => {
487 for (i, input) in inputs.iter().enumerate() {
488 let reg = input_to_reg(ctx, *input, NarrowValueMode::None);
489 let retval_reg = ctx.retval(i).only_reg().unwrap();
490 let ty = ctx.input_ty(insn, i);
491
492 ctx.emit(Inst::gen_move(retval_reg, reg, ty));
493 }
494 }
495 Opcode::Call | Opcode::CallIndirect => {
496 let caller_conv = ctx.abi().call_conv();
497 let (mut abi, inputs) = match op {
498 Opcode::Call => {
499 let (extname, dist) = ctx.call_target(insn).unwrap();
500 let extname = extname.clone();
501 let sig = ctx.call_sig(insn).unwrap();
502 assert_eq!(inputs.len(), sig.params.len());
503 assert_eq!(outputs.len(), sig.returns.len());
504 (
505 Arm32ABICaller::from_func(sig, &extname, dist, caller_conv, flags)?,
506 &inputs[..],
507 )
508 }
509 Opcode::CallIndirect => {
510 let ptr = input_to_reg(ctx, inputs[0], NarrowValueMode::ZeroExtend);
511 let sig = ctx.call_sig(insn).unwrap();
512 assert_eq!(inputs.len() - 1, sig.params.len());
513 assert_eq!(outputs.len(), sig.returns.len());
514 (
515 Arm32ABICaller::from_ptr(sig, ptr, op, caller_conv, flags)?,
516 &inputs[1..],
517 )
518 }
519 _ => unreachable!(),
520 };
521 assert_eq!(inputs.len(), abi.num_args());
522 for (i, input) in inputs.iter().enumerate().filter(|(i, _)| *i <= 3) {
523 let arg_reg = input_to_reg(ctx, *input, NarrowValueMode::None);
524 abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg));
525 }
526 abi.emit_call(ctx);
527 for (i, output) in outputs.iter().enumerate() {
528 let retval_reg = output_to_reg(ctx, *output);
529 abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg));
530 }
531 }
532 _ => panic!("lowering {} unimplemented!", op),
533 }
534
535 Ok(())
536 }
537
lower_branch<C: LowerCtx<I = Inst>>( ctx: &mut C, branches: &[IRInst], targets: &[MachLabel], ) -> CodegenResult<()>538 pub(crate) fn lower_branch<C: LowerCtx<I = Inst>>(
539 ctx: &mut C,
540 branches: &[IRInst],
541 targets: &[MachLabel],
542 ) -> CodegenResult<()> {
543 // A block should end with at most two branches. The first may be a
544 // conditional branch; a conditional branch can be followed only by an
545 // unconditional branch or fallthrough. Otherwise, if only one branch,
546 // it may be an unconditional branch, a fallthrough, a return, or a
547 // trap. These conditions are verified by `is_ebb_basic()` during the
548 // verifier pass.
549 assert!(branches.len() <= 2);
550
551 if branches.len() == 2 {
552 // Must be a conditional branch followed by an unconditional branch.
553 let op0 = ctx.data(branches[0]).opcode();
554 let op1 = ctx.data(branches[1]).opcode();
555
556 assert!(op1 == Opcode::Jump || op1 == Opcode::Fallthrough);
557 let taken = BranchTarget::Label(targets[0]);
558 let not_taken = BranchTarget::Label(targets[1]);
559
560 match op0 {
561 Opcode::Brz | Opcode::Brnz => {
562 let rn = input_to_reg(
563 ctx,
564 InsnInput {
565 insn: branches[0],
566 input: 0,
567 },
568 NarrowValueMode::ZeroExtend,
569 );
570 let cond = if op0 == Opcode::Brz {
571 Cond::Eq
572 } else {
573 Cond::Ne
574 };
575
576 ctx.emit(Inst::CmpImm8 { rn, imm8: 0 });
577 ctx.emit(Inst::CondBr {
578 taken,
579 not_taken,
580 cond,
581 });
582 }
583 _ => unimplemented!(),
584 }
585 } else {
586 // Must be an unconditional branch or an indirect branch.
587 let op = ctx.data(branches[0]).opcode();
588 match op {
589 Opcode::Jump | Opcode::Fallthrough => {
590 assert_eq!(branches.len(), 1);
591 // In the Fallthrough case, the machine-independent driver
592 // fills in `targets[0]` with our fallthrough block, so this
593 // is valid for both Jump and Fallthrough.
594 ctx.emit(Inst::Jump {
595 dest: BranchTarget::Label(targets[0]),
596 });
597 }
598 _ => unimplemented!(),
599 }
600 }
601
602 Ok(())
603 }
604