1 //! This module defines s390x-specific machine instruction types.
2
3 // Some variants are not constructed, but we still want them as options in the future.
4 #![allow(dead_code)]
5
6 use crate::binemit::CodeOffset;
7 use crate::ir::{types, ExternalName, Opcode, TrapCode, Type, ValueLabel};
8 use crate::isa::unwind::UnwindInst;
9 use crate::machinst::*;
10 use crate::{settings, CodegenError, CodegenResult};
11
12 use regalloc::{PrettyPrint, RegUsageCollector, RegUsageMapper};
13 use regalloc::{RealRegUniverse, Reg, RegClass, SpillSlot, VirtualReg, Writable};
14
15 use alloc::boxed::Box;
16 use alloc::vec::Vec;
17 use core::convert::TryFrom;
18 use smallvec::{smallvec, SmallVec};
19 use std::string::{String, ToString};
20
21 pub mod regs;
22 pub use self::regs::*;
23 pub mod imms;
24 pub use self::imms::*;
25 pub mod args;
26 pub use self::args::*;
27 pub mod emit;
28 pub use self::emit::*;
29 pub mod unwind;
30
31 #[cfg(test)]
32 mod emit_tests;
33
34 //=============================================================================
35 // Instructions (top level): definition
36
37 /// Supported instruction sets
38 #[allow(non_camel_case_types)]
39 #[derive(Debug)]
40 pub(crate) enum InstructionSet {
41 /// Baseline ISA for cranelift is z14.
42 Base,
43 /// Miscellaneous-Instruction-Extensions Facility 2 (z15)
44 MIE2,
45 /// Vector-Enhancements Facility 2 (z15)
46 VXRS_EXT2,
47 }
48
49 /// An ALU operation. This can be paired with several instruction formats
50 /// below (see `Inst`) in any combination.
51 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
52 pub enum ALUOp {
53 Add32,
54 Add32Ext16,
55 Add64,
56 Add64Ext16,
57 Add64Ext32,
58 Sub32,
59 Sub32Ext16,
60 Sub64,
61 Sub64Ext16,
62 Sub64Ext32,
63 Mul32,
64 Mul32Ext16,
65 Mul64,
66 Mul64Ext16,
67 Mul64Ext32,
68 And32,
69 And64,
70 Orr32,
71 Orr64,
72 Xor32,
73 Xor64,
74 /// NAND
75 AndNot32,
76 AndNot64,
77 /// NOR
78 OrrNot32,
79 OrrNot64,
80 /// XNOR
81 XorNot32,
82 XorNot64,
83 }
84
85 impl ALUOp {
available_from(&self) -> InstructionSet86 pub(crate) fn available_from(&self) -> InstructionSet {
87 match self {
88 ALUOp::AndNot32 | ALUOp::AndNot64 => InstructionSet::MIE2,
89 ALUOp::OrrNot32 | ALUOp::OrrNot64 => InstructionSet::MIE2,
90 ALUOp::XorNot32 | ALUOp::XorNot64 => InstructionSet::MIE2,
91 _ => InstructionSet::Base,
92 }
93 }
94 }
95
96 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
97 pub enum UnaryOp {
98 Abs32,
99 Abs64,
100 Abs64Ext32,
101 Neg32,
102 Neg64,
103 Neg64Ext32,
104 PopcntByte,
105 PopcntReg,
106 }
107
108 impl UnaryOp {
available_from(&self) -> InstructionSet109 pub(crate) fn available_from(&self) -> InstructionSet {
110 match self {
111 UnaryOp::PopcntReg => InstructionSet::MIE2,
112 _ => InstructionSet::Base,
113 }
114 }
115 }
116
117 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
118 pub enum ShiftOp {
119 RotL32,
120 RotL64,
121 LShL32,
122 LShL64,
123 LShR32,
124 LShR64,
125 AShR32,
126 AShR64,
127 }
128
129 /// An integer comparison operation.
130 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
131 pub enum CmpOp {
132 CmpS32,
133 CmpS32Ext16,
134 CmpS64,
135 CmpS64Ext16,
136 CmpS64Ext32,
137 CmpL32,
138 CmpL32Ext16,
139 CmpL64,
140 CmpL64Ext16,
141 CmpL64Ext32,
142 }
143
144 /// A floating-point unit (FPU) operation with one arg.
145 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
146 pub enum FPUOp1 {
147 Abs32,
148 Abs64,
149 Neg32,
150 Neg64,
151 NegAbs32,
152 NegAbs64,
153 Sqrt32,
154 Sqrt64,
155 Cvt32To64,
156 Cvt64To32,
157 }
158
159 /// A floating-point unit (FPU) operation with two args.
160 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
161 pub enum FPUOp2 {
162 Add32,
163 Add64,
164 Sub32,
165 Sub64,
166 Mul32,
167 Mul64,
168 Div32,
169 Div64,
170 Max32,
171 Max64,
172 Min32,
173 Min64,
174 }
175
176 /// A floating-point unit (FPU) operation with three args.
177 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
178 pub enum FPUOp3 {
179 MAdd32,
180 MAdd64,
181 MSub32,
182 MSub64,
183 }
184
185 /// A conversion from an FP to an integer value.
186 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
187 pub enum FpuToIntOp {
188 F32ToU32,
189 F32ToI32,
190 F32ToU64,
191 F32ToI64,
192 F64ToU32,
193 F64ToI32,
194 F64ToU64,
195 F64ToI64,
196 }
197
198 /// A conversion from an integer to an FP value.
199 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
200 pub enum IntToFpuOp {
201 U32ToF32,
202 I32ToF32,
203 U32ToF64,
204 I32ToF64,
205 U64ToF32,
206 I64ToF32,
207 U64ToF64,
208 I64ToF64,
209 }
210
211 /// Modes for FP rounding ops: round down (floor) or up (ceil), or toward zero (trunc), or to
212 /// nearest, and for 32- or 64-bit FP values.
213 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
214 pub enum FpuRoundMode {
215 Minus32,
216 Minus64,
217 Plus32,
218 Plus64,
219 Zero32,
220 Zero64,
221 Nearest32,
222 Nearest64,
223 }
224
225 /// Additional information for (direct) Call instructions, left out of line to lower the size of
226 /// the Inst enum.
227 #[derive(Clone, Debug)]
228 pub struct CallInfo {
229 pub dest: ExternalName,
230 pub uses: Vec<Reg>,
231 pub defs: Vec<Writable<Reg>>,
232 pub opcode: Opcode,
233 }
234
235 /// Additional information for CallInd instructions, left out of line to lower the size of the Inst
236 /// enum.
237 #[derive(Clone, Debug)]
238 pub struct CallIndInfo {
239 pub rn: Reg,
240 pub uses: Vec<Reg>,
241 pub defs: Vec<Writable<Reg>>,
242 pub opcode: Opcode,
243 }
244
245 /// Additional information for JTSequence instructions, left out of line to lower the size of the Inst
246 /// enum.
247 #[derive(Clone, Debug)]
248 pub struct JTSequenceInfo {
249 pub default_target: BranchTarget,
250 pub targets: Vec<BranchTarget>,
251 pub targets_for_term: Vec<MachLabel>, // needed for MachTerminator.
252 }
253
254 /// Instruction formats.
255 #[derive(Clone, Debug)]
256 pub enum Inst {
257 /// A no-op of zero size.
258 Nop0,
259
260 /// A no-op of size two bytes.
261 Nop2,
262
263 /// An ALU operation with two register sources and a register destination.
264 AluRRR {
265 alu_op: ALUOp,
266 rd: Writable<Reg>,
267 rn: Reg,
268 rm: Reg,
269 },
270 /// An ALU operation with a register source and a signed 16-bit
271 /// immediate source, and a separate register destination.
272 AluRRSImm16 {
273 alu_op: ALUOp,
274 rd: Writable<Reg>,
275 rn: Reg,
276 imm: i16,
277 },
278 /// An ALU operation with a register in-/out operand and
279 /// a second register source.
280 AluRR {
281 alu_op: ALUOp,
282 rd: Writable<Reg>,
283 rm: Reg,
284 },
285 /// An ALU operation with a register in-/out operand and
286 /// a memory source.
287 AluRX {
288 alu_op: ALUOp,
289 rd: Writable<Reg>,
290 mem: MemArg,
291 },
292 /// An ALU operation with a register in-/out operand and a signed 16-bit
293 /// immediate source.
294 AluRSImm16 {
295 alu_op: ALUOp,
296 rd: Writable<Reg>,
297 imm: i16,
298 },
299 /// An ALU operation with a register in-/out operand and a signed 32-bit
300 /// immediate source.
301 AluRSImm32 {
302 alu_op: ALUOp,
303 rd: Writable<Reg>,
304 imm: i32,
305 },
306 /// An ALU operation with a register in-/out operand and an unsigned 32-bit
307 /// immediate source.
308 AluRUImm32 {
309 alu_op: ALUOp,
310 rd: Writable<Reg>,
311 imm: u32,
312 },
313 /// An ALU operation with a register in-/out operand and a shifted 16-bit
314 /// immediate source.
315 AluRUImm16Shifted {
316 alu_op: ALUOp,
317 rd: Writable<Reg>,
318 imm: UImm16Shifted,
319 },
320 /// An ALU operation with a register in-/out operand and a shifted 32-bit
321 /// immediate source.
322 AluRUImm32Shifted {
323 alu_op: ALUOp,
324 rd: Writable<Reg>,
325 imm: UImm32Shifted,
326 },
327 /// A multiply operation with two register sources and a register pair destination.
328 /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs.
329 SMulWide {
330 rn: Reg,
331 rm: Reg,
332 },
333 /// A multiply operation with an in/out register pair, and an extra register source.
334 /// Only the lower half of the register pair is used as input.
335 /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs.
336 UMulWide {
337 rn: Reg,
338 },
339 /// A divide operation with an in/out register pair, and an extra register source.
340 /// Only the lower half of the register pair is used as input.
341 /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs.
342 SDivMod32 {
343 rn: Reg,
344 },
345 SDivMod64 {
346 rn: Reg,
347 },
348 /// A divide operation with an in/out register pair, and an extra register source.
349 /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs.
350 UDivMod32 {
351 rn: Reg,
352 },
353 UDivMod64 {
354 rn: Reg,
355 },
356 /// A FLOGR operation with a register source and a register pair destination.
357 /// FIXME: The pair is hard-coded as %r0/%r1 because regalloc cannot handle pairs.
358 Flogr {
359 rn: Reg,
360 },
361
362 /// A shift instruction with a register source, a register destination,
363 /// and an immediate plus an optional register as shift count.
364 ShiftRR {
365 shift_op: ShiftOp,
366 rd: Writable<Reg>,
367 rn: Reg,
368 shift_imm: SImm20,
369 shift_reg: Option<Reg>,
370 },
371
372 /// An unary operation with a register source and a register destination.
373 UnaryRR {
374 op: UnaryOp,
375 rd: Writable<Reg>,
376 rn: Reg,
377 },
378
379 /// A compare operation with two register sources.
380 CmpRR {
381 op: CmpOp,
382 rn: Reg,
383 rm: Reg,
384 },
385 /// A compare operation with a register source and a memory source.
386 CmpRX {
387 op: CmpOp,
388 rn: Reg,
389 mem: MemArg,
390 },
391 /// A compare operation with a register source and a signed 16-bit
392 /// immediate source.
393 CmpRSImm16 {
394 op: CmpOp,
395 rn: Reg,
396 imm: i16,
397 },
398 /// A compare operation with a register source and a signed 32-bit
399 /// immediate source.
400 CmpRSImm32 {
401 op: CmpOp,
402 rn: Reg,
403 imm: i32,
404 },
405 /// A compare operation with a register source and a unsigned 32-bit
406 /// immediate source.
407 CmpRUImm32 {
408 op: CmpOp,
409 rn: Reg,
410 imm: u32,
411 },
412 /// A compare-and-trap instruction with two register sources.
413 CmpTrapRR {
414 op: CmpOp,
415 rn: Reg,
416 rm: Reg,
417 cond: Cond,
418 trap_code: TrapCode,
419 },
420 /// A compare-and-trap operation with a register source and a signed 16-bit
421 /// immediate source.
422 CmpTrapRSImm16 {
423 op: CmpOp,
424 rn: Reg,
425 imm: i16,
426 cond: Cond,
427 trap_code: TrapCode,
428 },
429 /// A compare-and-trap operation with a register source and an unsigned 16-bit
430 /// immediate source.
431 CmpTrapRUImm16 {
432 op: CmpOp,
433 rn: Reg,
434 imm: u16,
435 cond: Cond,
436 trap_code: TrapCode,
437 },
438
439 /// An atomic read-modify-write operation with a memory in-/out operand,
440 /// a register destination, and a register source.
441 /// a memory source.
442 AtomicRmw {
443 alu_op: ALUOp,
444 rd: Writable<Reg>,
445 rn: Reg,
446 mem: MemArg,
447 },
448 /// A 32-bit atomic compare-and-swap operation.
449 AtomicCas32 {
450 rd: Writable<Reg>,
451 rn: Reg,
452 mem: MemArg,
453 },
454 /// A 64-bit atomic compare-and-swap operation.
455 AtomicCas64 {
456 rd: Writable<Reg>,
457 rn: Reg,
458 mem: MemArg,
459 },
460 /// A memory fence operation.
461 Fence,
462
463 /// A 32-bit load.
464 Load32 {
465 rd: Writable<Reg>,
466 mem: MemArg,
467 },
468 /// An unsigned (zero-extending) 8-bit to 32-bit load.
469 Load32ZExt8 {
470 rd: Writable<Reg>,
471 mem: MemArg,
472 },
473 /// A signed (sign-extending) 8-bit to 32-bit load.
474 Load32SExt8 {
475 rd: Writable<Reg>,
476 mem: MemArg,
477 },
478 /// An unsigned (zero-extending) 16-bit to 32-bit load.
479 Load32ZExt16 {
480 rd: Writable<Reg>,
481 mem: MemArg,
482 },
483 /// A signed (sign-extending) 16-bit to 32-bit load.
484 Load32SExt16 {
485 rd: Writable<Reg>,
486 mem: MemArg,
487 },
488 /// A 64-bit load.
489 Load64 {
490 rd: Writable<Reg>,
491 mem: MemArg,
492 },
493 /// An unsigned (zero-extending) 8-bit to 64-bit load.
494 Load64ZExt8 {
495 rd: Writable<Reg>,
496 mem: MemArg,
497 },
498 /// A signed (sign-extending) 8-bit to 64-bit load.
499 Load64SExt8 {
500 rd: Writable<Reg>,
501 mem: MemArg,
502 },
503 /// An unsigned (zero-extending) 16-bit to 64-bit load.
504 Load64ZExt16 {
505 rd: Writable<Reg>,
506 mem: MemArg,
507 },
508 /// A signed (sign-extending) 16-bit to 64-bit load.
509 Load64SExt16 {
510 rd: Writable<Reg>,
511 mem: MemArg,
512 },
513 /// An unsigned (zero-extending) 32-bit to 64-bit load.
514 Load64ZExt32 {
515 rd: Writable<Reg>,
516 mem: MemArg,
517 },
518 /// A signed (sign-extending) 32-bit to 64-bit load.
519 Load64SExt32 {
520 rd: Writable<Reg>,
521 mem: MemArg,
522 },
523
524 /// A 16-bit byte-reversed load.
525 LoadRev16 {
526 rd: Writable<Reg>,
527 mem: MemArg,
528 },
529 /// A 32-bit byte-reversed load.
530 LoadRev32 {
531 rd: Writable<Reg>,
532 mem: MemArg,
533 },
534 /// A 64-bit byte-reversed load.
535 LoadRev64 {
536 rd: Writable<Reg>,
537 mem: MemArg,
538 },
539
540 /// An 8-bit store.
541 Store8 {
542 rd: Reg,
543 mem: MemArg,
544 },
545 /// A 16-bit store.
546 Store16 {
547 rd: Reg,
548 mem: MemArg,
549 },
550 /// A 32-bit store.
551 Store32 {
552 rd: Reg,
553 mem: MemArg,
554 },
555 /// A 64-bit store.
556 Store64 {
557 rd: Reg,
558 mem: MemArg,
559 },
560 /// An 8-bit store of an immediate.
561 StoreImm8 {
562 imm: u8,
563 mem: MemArg,
564 },
565 /// A 16-bit store of an immediate.
566 StoreImm16 {
567 imm: i16,
568 mem: MemArg,
569 },
570 /// A 32-bit store of a sign-extended 16-bit immediate.
571 StoreImm32SExt16 {
572 imm: i16,
573 mem: MemArg,
574 },
575 /// A 64-bit store of a sign-extended 16-bit immediate.
576 StoreImm64SExt16 {
577 imm: i16,
578 mem: MemArg,
579 },
580
581 /// A 16-bit byte-reversed store.
582 StoreRev16 {
583 rd: Reg,
584 mem: MemArg,
585 },
586 /// A 32-bit byte-reversed store.
587 StoreRev32 {
588 rd: Reg,
589 mem: MemArg,
590 },
591 /// A 64-bit byte-reversed store.
592 StoreRev64 {
593 rd: Reg,
594 mem: MemArg,
595 },
596
597 /// A load-multiple instruction.
598 LoadMultiple64 {
599 rt: Writable<Reg>,
600 rt2: Writable<Reg>,
601 addr_reg: Reg,
602 addr_off: SImm20,
603 },
604 /// A store-multiple instruction.
605 StoreMultiple64 {
606 rt: Reg,
607 rt2: Reg,
608 addr_reg: Reg,
609 addr_off: SImm20,
610 },
611
612 /// A 32-bit move instruction.
613 Mov32 {
614 rd: Writable<Reg>,
615 rm: Reg,
616 },
617 /// A 64-bit move instruction.
618 Mov64 {
619 rd: Writable<Reg>,
620 rm: Reg,
621 },
622 /// A 32-bit move instruction with a full 32-bit immediate.
623 Mov32Imm {
624 rd: Writable<Reg>,
625 imm: u32,
626 },
627 /// A 32-bit move instruction with a 16-bit signed immediate.
628 Mov32SImm16 {
629 rd: Writable<Reg>,
630 imm: i16,
631 },
632 /// A 64-bit move instruction with a 16-bit signed immediate.
633 Mov64SImm16 {
634 rd: Writable<Reg>,
635 imm: i16,
636 },
637 /// A 64-bit move instruction with a 32-bit signed immediate.
638 Mov64SImm32 {
639 rd: Writable<Reg>,
640 imm: i32,
641 },
642 /// A 64-bit move instruction with a shifted 16-bit immediate.
643 Mov64UImm16Shifted {
644 rd: Writable<Reg>,
645 imm: UImm16Shifted,
646 },
647 /// A 64-bit move instruction with a shifted 32-bit immediate.
648 Mov64UImm32Shifted {
649 rd: Writable<Reg>,
650 imm: UImm32Shifted,
651 },
652
653 /// A 64-bit insert instruction with a shifted 16-bit immediate.
654 Insert64UImm16Shifted {
655 rd: Writable<Reg>,
656 imm: UImm16Shifted,
657 },
658 /// A 64-bit insert instruction with a shifted 32-bit immediate.
659 Insert64UImm32Shifted {
660 rd: Writable<Reg>,
661 imm: UImm32Shifted,
662 },
663
664 /// A sign- or zero-extend operation.
665 Extend {
666 rd: Writable<Reg>,
667 rn: Reg,
668 signed: bool,
669 from_bits: u8,
670 to_bits: u8,
671 },
672
673 /// A 32-bit conditional move instruction.
674 CMov32 {
675 rd: Writable<Reg>,
676 cond: Cond,
677 rm: Reg,
678 },
679 /// A 64-bit conditional move instruction.
680 CMov64 {
681 rd: Writable<Reg>,
682 cond: Cond,
683 rm: Reg,
684 },
685 /// A 32-bit conditional move instruction with a 16-bit signed immediate.
686 CMov32SImm16 {
687 rd: Writable<Reg>,
688 cond: Cond,
689 imm: i16,
690 },
691 /// A 64-bit conditional move instruction with a 16-bit signed immediate.
692 CMov64SImm16 {
693 rd: Writable<Reg>,
694 cond: Cond,
695 imm: i16,
696 },
697
698 /// 32-bit FPU move.
699 FpuMove32 {
700 rd: Writable<Reg>,
701 rn: Reg,
702 },
703 /// 64-bit FPU move.
704 FpuMove64 {
705 rd: Writable<Reg>,
706 rn: Reg,
707 },
708
709 /// A 32-bit conditional move FPU instruction.
710 FpuCMov32 {
711 rd: Writable<Reg>,
712 cond: Cond,
713 rm: Reg,
714 },
715 /// A 64-bit conditional move FPU instruction.
716 FpuCMov64 {
717 rd: Writable<Reg>,
718 cond: Cond,
719 rm: Reg,
720 },
721
722 /// A 64-bit move instruction from GPR to FPR.
723 MovToFpr {
724 rd: Writable<Reg>,
725 rn: Reg,
726 },
727 /// A 64-bit move instruction from FPR to GPR.
728 MovFromFpr {
729 rd: Writable<Reg>,
730 rn: Reg,
731 },
732
733 /// 1-op FPU instruction.
734 FpuRR {
735 fpu_op: FPUOp1,
736 rd: Writable<Reg>,
737 rn: Reg,
738 },
739
740 /// 2-op FPU instruction.
741 FpuRRR {
742 fpu_op: FPUOp2,
743 rd: Writable<Reg>,
744 rm: Reg,
745 },
746
747 /// 3-op FPU instruction.
748 FpuRRRR {
749 fpu_op: FPUOp3,
750 rd: Writable<Reg>,
751 rn: Reg,
752 rm: Reg,
753 },
754
755 /// FPU copy sign instruction.
756 FpuCopysign {
757 rd: Writable<Reg>,
758 rn: Reg,
759 rm: Reg,
760 },
761
762 /// FPU comparison, single-precision (32 bit).
763 FpuCmp32 {
764 rn: Reg,
765 rm: Reg,
766 },
767
768 /// FPU comparison, double-precision (64 bit).
769 FpuCmp64 {
770 rn: Reg,
771 rm: Reg,
772 },
773
774 /// Floating-point load, single-precision (32 bit).
775 FpuLoad32 {
776 rd: Writable<Reg>,
777 mem: MemArg,
778 },
779 /// Floating-point store, single-precision (32 bit).
780 FpuStore32 {
781 rd: Reg,
782 mem: MemArg,
783 },
784 /// Floating-point load, double-precision (64 bit).
785 FpuLoad64 {
786 rd: Writable<Reg>,
787 mem: MemArg,
788 },
789 /// Floating-point store, double-precision (64 bit).
790 FpuStore64 {
791 rd: Reg,
792 mem: MemArg,
793 },
794 /// Floating-point byte-reversed load, single-precision (32 bit).
795 FpuLoadRev32 {
796 rd: Writable<Reg>,
797 mem: MemArg,
798 },
799 /// Floating-point byte-reversed store, single-precision (32 bit).
800 FpuStoreRev32 {
801 rd: Reg,
802 mem: MemArg,
803 },
804 /// Floating-point byte-reversed load, double-precision (64 bit).
805 FpuLoadRev64 {
806 rd: Writable<Reg>,
807 mem: MemArg,
808 },
809 /// Floating-point byte-reversed store, double-precision (64 bit).
810 FpuStoreRev64 {
811 rd: Reg,
812 mem: MemArg,
813 },
814
815 LoadFpuConst32 {
816 rd: Writable<Reg>,
817 const_data: f32,
818 },
819
820 LoadFpuConst64 {
821 rd: Writable<Reg>,
822 const_data: f64,
823 },
824
825 /// Conversion: FP -> integer.
826 FpuToInt {
827 op: FpuToIntOp,
828 rd: Writable<Reg>,
829 rn: Reg,
830 },
831
832 /// Conversion: integer -> FP.
833 IntToFpu {
834 op: IntToFpuOp,
835 rd: Writable<Reg>,
836 rn: Reg,
837 },
838
839 /// Round to integer.
840 FpuRound {
841 op: FpuRoundMode,
842 rd: Writable<Reg>,
843 rn: Reg,
844 },
845
846 /// 2-op FPU instruction implemented as vector instruction with the W bit.
847 FpuVecRRR {
848 fpu_op: FPUOp2,
849 rd: Writable<Reg>,
850 rn: Reg,
851 rm: Reg,
852 },
853
854 /// A machine call instruction.
855 Call {
856 link: Writable<Reg>,
857 info: Box<CallInfo>,
858 },
859 /// A machine indirect-call instruction.
860 CallInd {
861 link: Writable<Reg>,
862 info: Box<CallIndInfo>,
863 },
864
865 // ---- branches (exactly one must appear at end of BB) ----
866 /// A machine return instruction.
867 Ret {
868 link: Reg,
869 },
870
871 /// A placeholder instruction, generating no code, meaning that a function epilogue must be
872 /// inserted there.
873 EpiloguePlaceholder,
874
875 /// An unconditional branch.
876 Jump {
877 dest: BranchTarget,
878 },
879
880 /// A conditional branch. Contains two targets; at emission time, both are emitted, but
881 /// the MachBuffer knows to truncate the trailing branch if fallthrough. We optimize the
882 /// choice of taken/not_taken (inverting the branch polarity as needed) based on the
883 /// fallthrough at the time of lowering.
884 CondBr {
885 taken: BranchTarget,
886 not_taken: BranchTarget,
887 cond: Cond,
888 },
889
890 /// A conditional trap: execute a `Trap` if the condition is true. This is
891 /// one VCode instruction because it uses embedded control flow; it is
892 /// logically a single-in, single-out region, but needs to appear as one
893 /// unit to the register allocator.
894 ///
895 /// The `Cond` gives the conditional-branch condition that will
896 /// *execute* the embedded `Trap`. (In the emitted code, we use the inverse
897 /// of this condition in a branch that skips the trap instruction.)
898 TrapIf {
899 cond: Cond,
900 trap_code: TrapCode,
901 },
902
903 /// A one-way conditional branch, invisible to the CFG processing; used *only* as part of
904 /// straight-line sequences in code to be emitted.
905 ///
906 /// In more detail:
907 /// - This branch is lowered to a branch at the machine-code level, but does not end a basic
908 /// block, and does not create edges in the CFG seen by regalloc.
909 /// - Thus, it is *only* valid to use as part of a single-in, single-out sequence that is
910 /// lowered from a single CLIF instruction. For example, certain arithmetic operations may
911 /// use these branches to handle certain conditions, such as overflows, traps, etc.
912 ///
913 /// See, e.g., the lowering of `trapif` (conditional trap) for an example.
914 OneWayCondBr {
915 target: BranchTarget,
916 cond: Cond,
917 },
918
919 /// An indirect branch through a register, augmented with set of all
920 /// possible successors.
921 IndirectBr {
922 rn: Reg,
923 targets: Vec<MachLabel>,
924 },
925
926 /// A "debugtrap" instruction, used for e.g. traps and debug breakpoints.
927 Debugtrap,
928
929 /// An instruction guaranteed to always be undefined and to trigger an illegal instruction at
930 /// runtime.
931 Trap {
932 trap_code: TrapCode,
933 },
934
935 /// Jump-table sequence, as one compound instruction (see note in lower.rs
936 /// for rationale).
937 JTSequence {
938 info: Box<JTSequenceInfo>,
939 ridx: Reg,
940 rtmp1: Writable<Reg>,
941 rtmp2: Writable<Reg>,
942 },
943
944 /// Load an inline symbol reference with RelocDistance::Far.
945 LoadExtNameFar {
946 rd: Writable<Reg>,
947 name: Box<ExternalName>,
948 offset: i64,
949 },
950
951 /// Load address referenced by `mem` into `rd`.
952 LoadAddr {
953 rd: Writable<Reg>,
954 mem: MemArg,
955 },
956
957 /// Marker, no-op in generated code: SP "virtual offset" is adjusted. This
958 /// controls how MemArg::NominalSPOffset args are lowered.
959 VirtualSPOffsetAdj {
960 offset: i64,
961 },
962
963 /// A definition of a value label.
964 ValueLabelMarker {
965 reg: Reg,
966 label: ValueLabel,
967 },
968
969 /// An unwind pseudoinstruction describing the state of the
970 /// machine at this program point.
971 Unwind {
972 inst: UnwindInst,
973 },
974 }
975
976 #[test]
inst_size_test()977 fn inst_size_test() {
978 // This test will help with unintentionally growing the size
979 // of the Inst enum.
980 assert_eq!(32, std::mem::size_of::<Inst>());
981 }
982
983 impl Inst {
984 /// Retrieve the ISA feature set in which the instruction is available.
available_in_isa(&self) -> InstructionSet985 fn available_in_isa(&self) -> InstructionSet {
986 match self {
987 // These instructions are part of the baseline ISA for cranelift (z14)
988 Inst::Nop0
989 | Inst::Nop2
990 | Inst::AluRRSImm16 { .. }
991 | Inst::AluRR { .. }
992 | Inst::AluRX { .. }
993 | Inst::AluRSImm16 { .. }
994 | Inst::AluRSImm32 { .. }
995 | Inst::AluRUImm32 { .. }
996 | Inst::AluRUImm16Shifted { .. }
997 | Inst::AluRUImm32Shifted { .. }
998 | Inst::ShiftRR { .. }
999 | Inst::SMulWide { .. }
1000 | Inst::UMulWide { .. }
1001 | Inst::SDivMod32 { .. }
1002 | Inst::SDivMod64 { .. }
1003 | Inst::UDivMod32 { .. }
1004 | Inst::UDivMod64 { .. }
1005 | Inst::Flogr { .. }
1006 | Inst::CmpRR { .. }
1007 | Inst::CmpRX { .. }
1008 | Inst::CmpRSImm16 { .. }
1009 | Inst::CmpRSImm32 { .. }
1010 | Inst::CmpRUImm32 { .. }
1011 | Inst::CmpTrapRR { .. }
1012 | Inst::CmpTrapRSImm16 { .. }
1013 | Inst::CmpTrapRUImm16 { .. }
1014 | Inst::AtomicRmw { .. }
1015 | Inst::AtomicCas32 { .. }
1016 | Inst::AtomicCas64 { .. }
1017 | Inst::Fence
1018 | Inst::Load32 { .. }
1019 | Inst::Load32ZExt8 { .. }
1020 | Inst::Load32SExt8 { .. }
1021 | Inst::Load32ZExt16 { .. }
1022 | Inst::Load32SExt16 { .. }
1023 | Inst::Load64 { .. }
1024 | Inst::Load64ZExt8 { .. }
1025 | Inst::Load64SExt8 { .. }
1026 | Inst::Load64ZExt16 { .. }
1027 | Inst::Load64SExt16 { .. }
1028 | Inst::Load64ZExt32 { .. }
1029 | Inst::Load64SExt32 { .. }
1030 | Inst::LoadRev16 { .. }
1031 | Inst::LoadRev32 { .. }
1032 | Inst::LoadRev64 { .. }
1033 | Inst::Store8 { .. }
1034 | Inst::Store16 { .. }
1035 | Inst::Store32 { .. }
1036 | Inst::Store64 { .. }
1037 | Inst::StoreImm8 { .. }
1038 | Inst::StoreImm16 { .. }
1039 | Inst::StoreImm32SExt16 { .. }
1040 | Inst::StoreImm64SExt16 { .. }
1041 | Inst::StoreRev16 { .. }
1042 | Inst::StoreRev32 { .. }
1043 | Inst::StoreRev64 { .. }
1044 | Inst::LoadMultiple64 { .. }
1045 | Inst::StoreMultiple64 { .. }
1046 | Inst::Mov32 { .. }
1047 | Inst::Mov64 { .. }
1048 | Inst::Mov32Imm { .. }
1049 | Inst::Mov32SImm16 { .. }
1050 | Inst::Mov64SImm16 { .. }
1051 | Inst::Mov64SImm32 { .. }
1052 | Inst::Mov64UImm16Shifted { .. }
1053 | Inst::Mov64UImm32Shifted { .. }
1054 | Inst::Insert64UImm16Shifted { .. }
1055 | Inst::Insert64UImm32Shifted { .. }
1056 | Inst::Extend { .. }
1057 | Inst::CMov32 { .. }
1058 | Inst::CMov64 { .. }
1059 | Inst::CMov32SImm16 { .. }
1060 | Inst::CMov64SImm16 { .. }
1061 | Inst::FpuMove32 { .. }
1062 | Inst::FpuMove64 { .. }
1063 | Inst::FpuCMov32 { .. }
1064 | Inst::FpuCMov64 { .. }
1065 | Inst::MovToFpr { .. }
1066 | Inst::MovFromFpr { .. }
1067 | Inst::FpuRR { .. }
1068 | Inst::FpuRRR { .. }
1069 | Inst::FpuRRRR { .. }
1070 | Inst::FpuCopysign { .. }
1071 | Inst::FpuCmp32 { .. }
1072 | Inst::FpuCmp64 { .. }
1073 | Inst::FpuLoad32 { .. }
1074 | Inst::FpuStore32 { .. }
1075 | Inst::FpuLoad64 { .. }
1076 | Inst::FpuStore64 { .. }
1077 | Inst::LoadFpuConst32 { .. }
1078 | Inst::LoadFpuConst64 { .. }
1079 | Inst::FpuToInt { .. }
1080 | Inst::IntToFpu { .. }
1081 | Inst::FpuRound { .. }
1082 | Inst::FpuVecRRR { .. }
1083 | Inst::Call { .. }
1084 | Inst::CallInd { .. }
1085 | Inst::Ret { .. }
1086 | Inst::EpiloguePlaceholder
1087 | Inst::Jump { .. }
1088 | Inst::CondBr { .. }
1089 | Inst::TrapIf { .. }
1090 | Inst::OneWayCondBr { .. }
1091 | Inst::IndirectBr { .. }
1092 | Inst::Debugtrap
1093 | Inst::Trap { .. }
1094 | Inst::JTSequence { .. }
1095 | Inst::LoadExtNameFar { .. }
1096 | Inst::LoadAddr { .. }
1097 | Inst::VirtualSPOffsetAdj { .. }
1098 | Inst::ValueLabelMarker { .. }
1099 | Inst::Unwind { .. } => InstructionSet::Base,
1100
1101 // These depend on the opcode
1102 Inst::AluRRR { alu_op, .. } => alu_op.available_from(),
1103 Inst::UnaryRR { op, .. } => op.available_from(),
1104
1105 // These are all part of VXRS_EXT2
1106 Inst::FpuLoadRev32 { .. }
1107 | Inst::FpuStoreRev32 { .. }
1108 | Inst::FpuLoadRev64 { .. }
1109 | Inst::FpuStoreRev64 { .. } => InstructionSet::VXRS_EXT2,
1110 }
1111 }
1112
1113 /// Create a 64-bit move instruction.
mov64(to_reg: Writable<Reg>, from_reg: Reg) -> Inst1114 pub fn mov64(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
1115 assert!(to_reg.to_reg().get_class() == from_reg.get_class());
1116 if from_reg.get_class() == RegClass::I64 {
1117 Inst::Mov64 {
1118 rd: to_reg,
1119 rm: from_reg,
1120 }
1121 } else {
1122 Inst::FpuMove64 {
1123 rd: to_reg,
1124 rn: from_reg,
1125 }
1126 }
1127 }
1128
1129 /// Create a 32-bit move instruction.
mov32(to_reg: Writable<Reg>, from_reg: Reg) -> Inst1130 pub fn mov32(to_reg: Writable<Reg>, from_reg: Reg) -> Inst {
1131 if from_reg.get_class() == RegClass::I64 {
1132 Inst::Mov32 {
1133 rd: to_reg,
1134 rm: from_reg,
1135 }
1136 } else {
1137 Inst::FpuMove32 {
1138 rd: to_reg,
1139 rn: from_reg,
1140 }
1141 }
1142 }
1143
1144 /// Create an instruction that loads a 64-bit integer constant.
load_constant64(rd: Writable<Reg>, value: u64) -> SmallVec<[Inst; 4]>1145 pub fn load_constant64(rd: Writable<Reg>, value: u64) -> SmallVec<[Inst; 4]> {
1146 if let Ok(imm) = i16::try_from(value as i64) {
1147 // 16-bit signed immediate
1148 smallvec![Inst::Mov64SImm16 { rd, imm }]
1149 } else if let Ok(imm) = i32::try_from(value as i64) {
1150 // 32-bit signed immediate
1151 smallvec![Inst::Mov64SImm32 { rd, imm }]
1152 } else if let Some(imm) = UImm16Shifted::maybe_from_u64(value) {
1153 // 16-bit shifted immediate
1154 smallvec![Inst::Mov64UImm16Shifted { rd, imm }]
1155 } else if let Some(imm) = UImm32Shifted::maybe_from_u64(value) {
1156 // 32-bit shifted immediate
1157 smallvec![Inst::Mov64UImm32Shifted { rd, imm }]
1158 } else {
1159 let mut insts = smallvec![];
1160 let hi = value & 0xffff_ffff_0000_0000u64;
1161 let lo = value & 0x0000_0000_ffff_ffffu64;
1162
1163 if let Some(imm) = UImm16Shifted::maybe_from_u64(hi) {
1164 // 16-bit shifted immediate
1165 insts.push(Inst::Mov64UImm16Shifted { rd, imm });
1166 } else if let Some(imm) = UImm32Shifted::maybe_from_u64(hi) {
1167 // 32-bit shifted immediate
1168 insts.push(Inst::Mov64UImm32Shifted { rd, imm });
1169 } else {
1170 unreachable!();
1171 }
1172
1173 if let Some(imm) = UImm16Shifted::maybe_from_u64(lo) {
1174 // 16-bit shifted immediate
1175 insts.push(Inst::Insert64UImm16Shifted { rd, imm });
1176 } else if let Some(imm) = UImm32Shifted::maybe_from_u64(lo) {
1177 // 32-bit shifted immediate
1178 insts.push(Inst::Insert64UImm32Shifted { rd, imm });
1179 } else {
1180 unreachable!();
1181 }
1182
1183 insts
1184 }
1185 }
1186
1187 /// Create an instruction that loads a 32-bit integer constant.
load_constant32(rd: Writable<Reg>, value: u32) -> SmallVec<[Inst; 4]>1188 pub fn load_constant32(rd: Writable<Reg>, value: u32) -> SmallVec<[Inst; 4]> {
1189 if let Ok(imm) = i16::try_from(value as i32) {
1190 // 16-bit signed immediate
1191 smallvec![Inst::Mov32SImm16 { rd, imm }]
1192 } else {
1193 // 32-bit full immediate
1194 smallvec![Inst::Mov32Imm { rd, imm: value }]
1195 }
1196 }
1197
1198 /// Create an instruction that loads a 32-bit floating-point constant.
load_fp_constant32(rd: Writable<Reg>, value: f32) -> Inst1199 pub fn load_fp_constant32(rd: Writable<Reg>, value: f32) -> Inst {
1200 // TODO: use LZER to load 0.0
1201 Inst::LoadFpuConst32 {
1202 rd,
1203 const_data: value,
1204 }
1205 }
1206
1207 /// Create an instruction that loads a 64-bit floating-point constant.
load_fp_constant64(rd: Writable<Reg>, value: f64) -> Inst1208 pub fn load_fp_constant64(rd: Writable<Reg>, value: f64) -> Inst {
1209 // TODO: use LZDR to load 0.0
1210 Inst::LoadFpuConst64 {
1211 rd,
1212 const_data: value,
1213 }
1214 }
1215
1216 /// Generic constructor for a load (zero-extending where appropriate).
gen_load(into_reg: Writable<Reg>, mem: MemArg, ty: Type) -> Inst1217 pub fn gen_load(into_reg: Writable<Reg>, mem: MemArg, ty: Type) -> Inst {
1218 match ty {
1219 types::B1 | types::B8 | types::I8 => Inst::Load64ZExt8 { rd: into_reg, mem },
1220 types::B16 | types::I16 => Inst::Load64ZExt16 { rd: into_reg, mem },
1221 types::B32 | types::I32 => Inst::Load64ZExt32 { rd: into_reg, mem },
1222 types::B64 | types::I64 | types::R64 => Inst::Load64 { rd: into_reg, mem },
1223 types::F32 => Inst::FpuLoad32 { rd: into_reg, mem },
1224 types::F64 => Inst::FpuLoad64 { rd: into_reg, mem },
1225 _ => unimplemented!("gen_load({})", ty),
1226 }
1227 }
1228
1229 /// Generic constructor for a store.
gen_store(mem: MemArg, from_reg: Reg, ty: Type) -> Inst1230 pub fn gen_store(mem: MemArg, from_reg: Reg, ty: Type) -> Inst {
1231 match ty {
1232 types::B1 | types::B8 | types::I8 => Inst::Store8 { rd: from_reg, mem },
1233 types::B16 | types::I16 => Inst::Store16 { rd: from_reg, mem },
1234 types::B32 | types::I32 => Inst::Store32 { rd: from_reg, mem },
1235 types::B64 | types::I64 | types::R64 => Inst::Store64 { rd: from_reg, mem },
1236 types::F32 => Inst::FpuStore32 { rd: from_reg, mem },
1237 types::F64 => Inst::FpuStore64 { rd: from_reg, mem },
1238 _ => unimplemented!("gen_store({})", ty),
1239 }
1240 }
1241 }
1242
1243 //=============================================================================
1244 // Instructions: get_regs
1245
memarg_regs(memarg: &MemArg, collector: &mut RegUsageCollector)1246 fn memarg_regs(memarg: &MemArg, collector: &mut RegUsageCollector) {
1247 match memarg {
1248 &MemArg::BXD12 { base, index, .. } | &MemArg::BXD20 { base, index, .. } => {
1249 if base != zero_reg() {
1250 collector.add_use(base);
1251 }
1252 if index != zero_reg() {
1253 collector.add_use(index);
1254 }
1255 }
1256 &MemArg::Label { .. } | &MemArg::Symbol { .. } => {}
1257 &MemArg::RegOffset { reg, .. } => {
1258 collector.add_use(reg);
1259 }
1260 &MemArg::InitialSPOffset { .. } | &MemArg::NominalSPOffset { .. } => {
1261 collector.add_use(stack_reg());
1262 }
1263 }
1264 }
1265
s390x_get_regs(inst: &Inst, collector: &mut RegUsageCollector)1266 fn s390x_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
1267 match inst {
1268 &Inst::AluRRR { rd, rn, rm, .. } => {
1269 collector.add_def(rd);
1270 collector.add_use(rn);
1271 collector.add_use(rm);
1272 }
1273 &Inst::AluRRSImm16 { rd, rn, .. } => {
1274 collector.add_def(rd);
1275 collector.add_use(rn);
1276 }
1277 &Inst::AluRR { rd, rm, .. } => {
1278 collector.add_mod(rd);
1279 collector.add_use(rm);
1280 }
1281 &Inst::AluRX { rd, ref mem, .. } => {
1282 collector.add_mod(rd);
1283 memarg_regs(mem, collector);
1284 }
1285 &Inst::AluRSImm16 { rd, .. } => {
1286 collector.add_mod(rd);
1287 }
1288 &Inst::AluRSImm32 { rd, .. } => {
1289 collector.add_mod(rd);
1290 }
1291 &Inst::AluRUImm32 { rd, .. } => {
1292 collector.add_mod(rd);
1293 }
1294 &Inst::AluRUImm16Shifted { rd, .. } => {
1295 collector.add_mod(rd);
1296 }
1297 &Inst::AluRUImm32Shifted { rd, .. } => {
1298 collector.add_mod(rd);
1299 }
1300 &Inst::SMulWide { rn, rm, .. } => {
1301 collector.add_def(writable_gpr(0));
1302 collector.add_def(writable_gpr(1));
1303 collector.add_use(rn);
1304 collector.add_use(rm);
1305 }
1306 &Inst::UMulWide { rn, .. } => {
1307 collector.add_def(writable_gpr(0));
1308 collector.add_mod(writable_gpr(1));
1309 collector.add_use(rn);
1310 }
1311 &Inst::SDivMod32 { rn, .. } | &Inst::SDivMod64 { rn, .. } => {
1312 collector.add_def(writable_gpr(0));
1313 collector.add_mod(writable_gpr(1));
1314 collector.add_use(rn);
1315 }
1316 &Inst::UDivMod32 { rn, .. } | &Inst::UDivMod64 { rn, .. } => {
1317 collector.add_mod(writable_gpr(0));
1318 collector.add_mod(writable_gpr(1));
1319 collector.add_use(rn);
1320 }
1321 &Inst::Flogr { rn, .. } => {
1322 collector.add_def(writable_gpr(0));
1323 collector.add_def(writable_gpr(1));
1324 collector.add_use(rn);
1325 }
1326 &Inst::ShiftRR {
1327 rd, rn, shift_reg, ..
1328 } => {
1329 collector.add_def(rd);
1330 collector.add_use(rn);
1331 if let Some(reg) = shift_reg {
1332 collector.add_use(reg);
1333 }
1334 }
1335 &Inst::UnaryRR { rd, rn, .. } => {
1336 collector.add_def(rd);
1337 collector.add_use(rn);
1338 }
1339 &Inst::CmpRR { rn, rm, .. } => {
1340 collector.add_use(rn);
1341 collector.add_use(rm);
1342 }
1343 &Inst::CmpRX { rn, ref mem, .. } => {
1344 collector.add_use(rn);
1345 memarg_regs(mem, collector);
1346 }
1347 &Inst::CmpRSImm16 { rn, .. } => {
1348 collector.add_use(rn);
1349 }
1350 &Inst::CmpRSImm32 { rn, .. } => {
1351 collector.add_use(rn);
1352 }
1353 &Inst::CmpRUImm32 { rn, .. } => {
1354 collector.add_use(rn);
1355 }
1356 &Inst::CmpTrapRR { rn, rm, .. } => {
1357 collector.add_use(rn);
1358 collector.add_use(rm);
1359 }
1360 &Inst::CmpTrapRSImm16 { rn, .. } => {
1361 collector.add_use(rn);
1362 }
1363 &Inst::CmpTrapRUImm16 { rn, .. } => {
1364 collector.add_use(rn);
1365 }
1366 &Inst::AtomicRmw {
1367 rd, rn, ref mem, ..
1368 } => {
1369 collector.add_def(rd);
1370 collector.add_use(rn);
1371 memarg_regs(mem, collector);
1372 }
1373 &Inst::AtomicCas32 {
1374 rd, rn, ref mem, ..
1375 }
1376 | &Inst::AtomicCas64 {
1377 rd, rn, ref mem, ..
1378 } => {
1379 collector.add_mod(rd);
1380 collector.add_use(rn);
1381 memarg_regs(mem, collector);
1382 }
1383 &Inst::Fence => {}
1384 &Inst::Load32 { rd, ref mem, .. }
1385 | &Inst::Load32ZExt8 { rd, ref mem, .. }
1386 | &Inst::Load32SExt8 { rd, ref mem, .. }
1387 | &Inst::Load32ZExt16 { rd, ref mem, .. }
1388 | &Inst::Load32SExt16 { rd, ref mem, .. }
1389 | &Inst::Load64 { rd, ref mem, .. }
1390 | &Inst::Load64ZExt8 { rd, ref mem, .. }
1391 | &Inst::Load64SExt8 { rd, ref mem, .. }
1392 | &Inst::Load64ZExt16 { rd, ref mem, .. }
1393 | &Inst::Load64SExt16 { rd, ref mem, .. }
1394 | &Inst::Load64ZExt32 { rd, ref mem, .. }
1395 | &Inst::Load64SExt32 { rd, ref mem, .. }
1396 | &Inst::LoadRev16 { rd, ref mem, .. }
1397 | &Inst::LoadRev32 { rd, ref mem, .. }
1398 | &Inst::LoadRev64 { rd, ref mem, .. } => {
1399 collector.add_def(rd);
1400 memarg_regs(mem, collector);
1401 }
1402 &Inst::Store8 { rd, ref mem, .. }
1403 | &Inst::Store16 { rd, ref mem, .. }
1404 | &Inst::Store32 { rd, ref mem, .. }
1405 | &Inst::Store64 { rd, ref mem, .. }
1406 | &Inst::StoreRev16 { rd, ref mem, .. }
1407 | &Inst::StoreRev32 { rd, ref mem, .. }
1408 | &Inst::StoreRev64 { rd, ref mem, .. } => {
1409 collector.add_use(rd);
1410 memarg_regs(mem, collector);
1411 }
1412 &Inst::StoreImm8 { ref mem, .. }
1413 | &Inst::StoreImm16 { ref mem, .. }
1414 | &Inst::StoreImm32SExt16 { ref mem, .. }
1415 | &Inst::StoreImm64SExt16 { ref mem, .. } => {
1416 memarg_regs(mem, collector);
1417 }
1418 &Inst::LoadMultiple64 {
1419 rt, rt2, addr_reg, ..
1420 } => {
1421 let first_regnum = rt.to_reg().get_hw_encoding();
1422 let last_regnum = rt2.to_reg().get_hw_encoding();
1423 for regnum in first_regnum..last_regnum + 1 {
1424 collector.add_def(writable_gpr(regnum));
1425 }
1426 collector.add_use(addr_reg);
1427 }
1428 &Inst::StoreMultiple64 {
1429 rt, rt2, addr_reg, ..
1430 } => {
1431 let first_regnum = rt.get_hw_encoding();
1432 let last_regnum = rt2.get_hw_encoding();
1433 for regnum in first_regnum..last_regnum + 1 {
1434 collector.add_use(gpr(regnum));
1435 }
1436 collector.add_use(addr_reg);
1437 }
1438 &Inst::Mov64 { rd, rm } => {
1439 collector.add_def(rd);
1440 collector.add_use(rm);
1441 }
1442 &Inst::Mov32 { rd, rm } => {
1443 collector.add_def(rd);
1444 collector.add_use(rm);
1445 }
1446 &Inst::Mov32Imm { rd, .. }
1447 | &Inst::Mov32SImm16 { rd, .. }
1448 | &Inst::Mov64SImm16 { rd, .. }
1449 | &Inst::Mov64SImm32 { rd, .. }
1450 | &Inst::Mov64UImm16Shifted { rd, .. }
1451 | &Inst::Mov64UImm32Shifted { rd, .. } => {
1452 collector.add_def(rd);
1453 }
1454 &Inst::CMov32 { rd, rm, .. } | &Inst::CMov64 { rd, rm, .. } => {
1455 collector.add_mod(rd);
1456 collector.add_use(rm);
1457 }
1458 &Inst::CMov32SImm16 { rd, .. } | &Inst::CMov64SImm16 { rd, .. } => {
1459 collector.add_mod(rd);
1460 }
1461 &Inst::Insert64UImm16Shifted { rd, .. } | &Inst::Insert64UImm32Shifted { rd, .. } => {
1462 collector.add_mod(rd);
1463 }
1464 &Inst::FpuMove32 { rd, rn } | &Inst::FpuMove64 { rd, rn } => {
1465 collector.add_def(rd);
1466 collector.add_use(rn);
1467 }
1468 &Inst::FpuCMov32 { rd, rm, .. } | &Inst::FpuCMov64 { rd, rm, .. } => {
1469 collector.add_mod(rd);
1470 collector.add_use(rm);
1471 }
1472 &Inst::MovToFpr { rd, rn } | &Inst::MovFromFpr { rd, rn } => {
1473 collector.add_def(rd);
1474 collector.add_use(rn);
1475 }
1476 &Inst::FpuRR { rd, rn, .. } => {
1477 collector.add_def(rd);
1478 collector.add_use(rn);
1479 }
1480 &Inst::FpuRRR { rd, rm, .. } => {
1481 collector.add_mod(rd);
1482 collector.add_use(rm);
1483 }
1484 &Inst::FpuRRRR { rd, rn, rm, .. } => {
1485 collector.add_mod(rd);
1486 collector.add_use(rn);
1487 collector.add_use(rm);
1488 }
1489 &Inst::FpuCopysign { rd, rn, rm, .. } => {
1490 collector.add_def(rd);
1491 collector.add_use(rn);
1492 collector.add_use(rm);
1493 }
1494 &Inst::FpuCmp32 { rn, rm } | &Inst::FpuCmp64 { rn, rm } => {
1495 collector.add_use(rn);
1496 collector.add_use(rm);
1497 }
1498 &Inst::FpuLoad32 { rd, ref mem, .. } => {
1499 collector.add_def(rd);
1500 memarg_regs(mem, collector);
1501 }
1502 &Inst::FpuLoad64 { rd, ref mem, .. } => {
1503 collector.add_def(rd);
1504 memarg_regs(mem, collector);
1505 }
1506 &Inst::FpuStore32 { rd, ref mem, .. } => {
1507 collector.add_use(rd);
1508 memarg_regs(mem, collector);
1509 }
1510 &Inst::FpuStore64 { rd, ref mem, .. } => {
1511 collector.add_use(rd);
1512 memarg_regs(mem, collector);
1513 }
1514 &Inst::FpuLoadRev32 { rd, ref mem, .. } => {
1515 collector.add_def(rd);
1516 memarg_regs(mem, collector);
1517 }
1518 &Inst::FpuLoadRev64 { rd, ref mem, .. } => {
1519 collector.add_def(rd);
1520 memarg_regs(mem, collector);
1521 }
1522 &Inst::FpuStoreRev32 { rd, ref mem, .. } => {
1523 collector.add_use(rd);
1524 memarg_regs(mem, collector);
1525 }
1526 &Inst::FpuStoreRev64 { rd, ref mem, .. } => {
1527 collector.add_use(rd);
1528 memarg_regs(mem, collector);
1529 }
1530 &Inst::LoadFpuConst32 { rd, .. } | &Inst::LoadFpuConst64 { rd, .. } => {
1531 collector.add_def(rd);
1532 }
1533 &Inst::FpuToInt { rd, rn, .. } => {
1534 collector.add_def(rd);
1535 collector.add_use(rn);
1536 }
1537 &Inst::IntToFpu { rd, rn, .. } => {
1538 collector.add_def(rd);
1539 collector.add_use(rn);
1540 }
1541 &Inst::FpuRound { rd, rn, .. } => {
1542 collector.add_def(rd);
1543 collector.add_use(rn);
1544 }
1545 &Inst::FpuVecRRR { rd, rn, rm, .. } => {
1546 collector.add_def(rd);
1547 collector.add_use(rn);
1548 collector.add_use(rm);
1549 }
1550 &Inst::Extend { rd, rn, .. } => {
1551 collector.add_def(rd);
1552 collector.add_use(rn);
1553 }
1554 &Inst::Call { link, ref info } => {
1555 collector.add_def(link);
1556 collector.add_uses(&*info.uses);
1557 collector.add_defs(&*info.defs);
1558 }
1559 &Inst::CallInd { link, ref info } => {
1560 collector.add_def(link);
1561 collector.add_uses(&*info.uses);
1562 collector.add_defs(&*info.defs);
1563 collector.add_use(info.rn);
1564 }
1565 &Inst::Ret { .. } => {}
1566 &Inst::Jump { .. } | &Inst::EpiloguePlaceholder => {}
1567 &Inst::IndirectBr { rn, .. } => {
1568 collector.add_use(rn);
1569 }
1570 &Inst::CondBr { .. } | &Inst::OneWayCondBr { .. } => {}
1571 &Inst::Nop0 | Inst::Nop2 => {}
1572 &Inst::Debugtrap => {}
1573 &Inst::Trap { .. } => {}
1574 &Inst::TrapIf { .. } => {}
1575 &Inst::JTSequence {
1576 ridx, rtmp1, rtmp2, ..
1577 } => {
1578 collector.add_use(ridx);
1579 collector.add_def(rtmp1);
1580 collector.add_def(rtmp2);
1581 }
1582 &Inst::LoadExtNameFar { rd, .. } => {
1583 collector.add_def(rd);
1584 }
1585 &Inst::LoadAddr { rd, ref mem } => {
1586 collector.add_def(rd);
1587 memarg_regs(mem, collector);
1588 }
1589 &Inst::VirtualSPOffsetAdj { .. } => {}
1590 &Inst::ValueLabelMarker { reg, .. } => {
1591 collector.add_use(reg);
1592 }
1593 &Inst::Unwind { .. } => {}
1594 }
1595 }
1596
1597 //=============================================================================
1598 // Instructions: map_regs
1599
s390x_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM)1600 fn s390x_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM) {
1601 fn map_use<RUM: RegUsageMapper>(m: &RUM, r: &mut Reg) {
1602 if r.is_virtual() {
1603 let new = m.get_use(r.to_virtual_reg()).unwrap().to_reg();
1604 *r = new;
1605 }
1606 }
1607
1608 fn map_def<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
1609 if r.to_reg().is_virtual() {
1610 let new = m.get_def(r.to_reg().to_virtual_reg()).unwrap().to_reg();
1611 *r = Writable::from_reg(new);
1612 }
1613 }
1614
1615 fn map_mod<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
1616 if r.to_reg().is_virtual() {
1617 let new = m.get_mod(r.to_reg().to_virtual_reg()).unwrap().to_reg();
1618 *r = Writable::from_reg(new);
1619 }
1620 }
1621
1622 fn map_mem<RUM: RegUsageMapper>(m: &RUM, mem: &mut MemArg) {
1623 match mem {
1624 &mut MemArg::BXD12 {
1625 ref mut base,
1626 ref mut index,
1627 ..
1628 }
1629 | &mut MemArg::BXD20 {
1630 ref mut base,
1631 ref mut index,
1632 ..
1633 } => {
1634 if *base != zero_reg() {
1635 map_use(m, base);
1636 }
1637 if *index != zero_reg() {
1638 map_use(m, index);
1639 }
1640 }
1641 &mut MemArg::Label { .. } | &mut MemArg::Symbol { .. } => {}
1642 &mut MemArg::RegOffset { ref mut reg, .. } => map_use(m, reg),
1643 &mut MemArg::InitialSPOffset { .. } | &mut MemArg::NominalSPOffset { .. } => {}
1644 };
1645 }
1646
1647 match inst {
1648 &mut Inst::AluRRR {
1649 ref mut rd,
1650 ref mut rn,
1651 ref mut rm,
1652 ..
1653 } => {
1654 map_def(mapper, rd);
1655 map_use(mapper, rn);
1656 map_use(mapper, rm);
1657 }
1658 &mut Inst::AluRRSImm16 {
1659 ref mut rd,
1660 ref mut rn,
1661 ..
1662 } => {
1663 map_def(mapper, rd);
1664 map_use(mapper, rn);
1665 }
1666 &mut Inst::AluRX {
1667 ref mut rd,
1668 ref mut mem,
1669 ..
1670 } => {
1671 map_mod(mapper, rd);
1672 map_mem(mapper, mem);
1673 }
1674 &mut Inst::AluRR {
1675 ref mut rd,
1676 ref mut rm,
1677 ..
1678 } => {
1679 map_mod(mapper, rd);
1680 map_use(mapper, rm);
1681 }
1682 &mut Inst::AluRSImm16 { ref mut rd, .. } => {
1683 map_mod(mapper, rd);
1684 }
1685 &mut Inst::AluRSImm32 { ref mut rd, .. } => {
1686 map_mod(mapper, rd);
1687 }
1688 &mut Inst::AluRUImm32 { ref mut rd, .. } => {
1689 map_mod(mapper, rd);
1690 }
1691 &mut Inst::AluRUImm16Shifted { ref mut rd, .. } => {
1692 map_mod(mapper, rd);
1693 }
1694 &mut Inst::AluRUImm32Shifted { ref mut rd, .. } => {
1695 map_mod(mapper, rd);
1696 }
1697 &mut Inst::SMulWide {
1698 ref mut rn,
1699 ref mut rm,
1700 ..
1701 } => {
1702 map_use(mapper, rn);
1703 map_use(mapper, rm);
1704 }
1705 &mut Inst::UMulWide { ref mut rn, .. } => {
1706 map_use(mapper, rn);
1707 }
1708 &mut Inst::SDivMod32 { ref mut rn, .. } => {
1709 map_use(mapper, rn);
1710 }
1711 &mut Inst::SDivMod64 { ref mut rn, .. } => {
1712 map_use(mapper, rn);
1713 }
1714 &mut Inst::UDivMod32 { ref mut rn, .. } => {
1715 map_use(mapper, rn);
1716 }
1717 &mut Inst::UDivMod64 { ref mut rn, .. } => {
1718 map_use(mapper, rn);
1719 }
1720 &mut Inst::Flogr { ref mut rn, .. } => {
1721 map_use(mapper, rn);
1722 }
1723 &mut Inst::ShiftRR {
1724 ref mut rd,
1725 ref mut rn,
1726 ref mut shift_reg,
1727 ..
1728 } => {
1729 map_def(mapper, rd);
1730 map_use(mapper, rn);
1731 if let Some(reg) = shift_reg {
1732 map_use(mapper, reg);
1733 }
1734 }
1735 &mut Inst::UnaryRR {
1736 ref mut rd,
1737 ref mut rn,
1738 ..
1739 } => {
1740 map_def(mapper, rd);
1741 map_use(mapper, rn);
1742 }
1743 &mut Inst::CmpRR {
1744 ref mut rn,
1745 ref mut rm,
1746 ..
1747 } => {
1748 map_use(mapper, rn);
1749 map_use(mapper, rm);
1750 }
1751 &mut Inst::CmpRX {
1752 ref mut rn,
1753 ref mut mem,
1754 ..
1755 } => {
1756 map_use(mapper, rn);
1757 map_mem(mapper, mem);
1758 }
1759 &mut Inst::CmpRSImm16 { ref mut rn, .. } => {
1760 map_use(mapper, rn);
1761 }
1762 &mut Inst::CmpRSImm32 { ref mut rn, .. } => {
1763 map_use(mapper, rn);
1764 }
1765 &mut Inst::CmpRUImm32 { ref mut rn, .. } => {
1766 map_use(mapper, rn);
1767 }
1768 &mut Inst::CmpTrapRR {
1769 ref mut rn,
1770 ref mut rm,
1771 ..
1772 } => {
1773 map_use(mapper, rn);
1774 map_use(mapper, rm);
1775 }
1776 &mut Inst::CmpTrapRSImm16 { ref mut rn, .. } => {
1777 map_use(mapper, rn);
1778 }
1779 &mut Inst::CmpTrapRUImm16 { ref mut rn, .. } => {
1780 map_use(mapper, rn);
1781 }
1782
1783 &mut Inst::AtomicRmw {
1784 ref mut rd,
1785 ref mut rn,
1786 ref mut mem,
1787 ..
1788 } => {
1789 map_def(mapper, rd);
1790 map_use(mapper, rn);
1791 map_mem(mapper, mem);
1792 }
1793 &mut Inst::AtomicCas32 {
1794 ref mut rd,
1795 ref mut rn,
1796 ref mut mem,
1797 ..
1798 } => {
1799 map_mod(mapper, rd);
1800 map_use(mapper, rn);
1801 map_mem(mapper, mem);
1802 }
1803 &mut Inst::AtomicCas64 {
1804 ref mut rd,
1805 ref mut rn,
1806 ref mut mem,
1807 ..
1808 } => {
1809 map_mod(mapper, rd);
1810 map_use(mapper, rn);
1811 map_mem(mapper, mem);
1812 }
1813 &mut Inst::Fence => {}
1814
1815 &mut Inst::Load32 {
1816 ref mut rd,
1817 ref mut mem,
1818 ..
1819 } => {
1820 map_def(mapper, rd);
1821 map_mem(mapper, mem);
1822 }
1823 &mut Inst::Load32ZExt8 {
1824 ref mut rd,
1825 ref mut mem,
1826 ..
1827 } => {
1828 map_def(mapper, rd);
1829 map_mem(mapper, mem);
1830 }
1831 &mut Inst::Load32SExt8 {
1832 ref mut rd,
1833 ref mut mem,
1834 ..
1835 } => {
1836 map_def(mapper, rd);
1837 map_mem(mapper, mem);
1838 }
1839 &mut Inst::Load32ZExt16 {
1840 ref mut rd,
1841 ref mut mem,
1842 ..
1843 } => {
1844 map_def(mapper, rd);
1845 map_mem(mapper, mem);
1846 }
1847 &mut Inst::Load32SExt16 {
1848 ref mut rd,
1849 ref mut mem,
1850 ..
1851 } => {
1852 map_def(mapper, rd);
1853 map_mem(mapper, mem);
1854 }
1855 &mut Inst::Load64 {
1856 ref mut rd,
1857 ref mut mem,
1858 ..
1859 } => {
1860 map_def(mapper, rd);
1861 map_mem(mapper, mem);
1862 }
1863 &mut Inst::Load64ZExt8 {
1864 ref mut rd,
1865 ref mut mem,
1866 ..
1867 } => {
1868 map_def(mapper, rd);
1869 map_mem(mapper, mem);
1870 }
1871 &mut Inst::Load64SExt8 {
1872 ref mut rd,
1873 ref mut mem,
1874 ..
1875 } => {
1876 map_def(mapper, rd);
1877 map_mem(mapper, mem);
1878 }
1879 &mut Inst::Load64ZExt16 {
1880 ref mut rd,
1881 ref mut mem,
1882 ..
1883 } => {
1884 map_def(mapper, rd);
1885 map_mem(mapper, mem);
1886 }
1887 &mut Inst::Load64SExt16 {
1888 ref mut rd,
1889 ref mut mem,
1890 ..
1891 } => {
1892 map_def(mapper, rd);
1893 map_mem(mapper, mem);
1894 }
1895 &mut Inst::Load64ZExt32 {
1896 ref mut rd,
1897 ref mut mem,
1898 ..
1899 } => {
1900 map_def(mapper, rd);
1901 map_mem(mapper, mem);
1902 }
1903 &mut Inst::Load64SExt32 {
1904 ref mut rd,
1905 ref mut mem,
1906 ..
1907 } => {
1908 map_def(mapper, rd);
1909 map_mem(mapper, mem);
1910 }
1911 &mut Inst::LoadRev16 {
1912 ref mut rd,
1913 ref mut mem,
1914 ..
1915 } => {
1916 map_def(mapper, rd);
1917 map_mem(mapper, mem);
1918 }
1919 &mut Inst::LoadRev32 {
1920 ref mut rd,
1921 ref mut mem,
1922 ..
1923 } => {
1924 map_def(mapper, rd);
1925 map_mem(mapper, mem);
1926 }
1927 &mut Inst::LoadRev64 {
1928 ref mut rd,
1929 ref mut mem,
1930 ..
1931 } => {
1932 map_def(mapper, rd);
1933 map_mem(mapper, mem);
1934 }
1935
1936 &mut Inst::Store8 {
1937 ref mut rd,
1938 ref mut mem,
1939 ..
1940 } => {
1941 map_use(mapper, rd);
1942 map_mem(mapper, mem);
1943 }
1944 &mut Inst::Store16 {
1945 ref mut rd,
1946 ref mut mem,
1947 ..
1948 } => {
1949 map_use(mapper, rd);
1950 map_mem(mapper, mem);
1951 }
1952 &mut Inst::Store32 {
1953 ref mut rd,
1954 ref mut mem,
1955 ..
1956 } => {
1957 map_use(mapper, rd);
1958 map_mem(mapper, mem);
1959 }
1960 &mut Inst::Store64 {
1961 ref mut rd,
1962 ref mut mem,
1963 ..
1964 } => {
1965 map_use(mapper, rd);
1966 map_mem(mapper, mem);
1967 }
1968 &mut Inst::StoreImm8 { ref mut mem, .. } => {
1969 map_mem(mapper, mem);
1970 }
1971 &mut Inst::StoreImm16 { ref mut mem, .. } => {
1972 map_mem(mapper, mem);
1973 }
1974 &mut Inst::StoreImm32SExt16 { ref mut mem, .. } => {
1975 map_mem(mapper, mem);
1976 }
1977 &mut Inst::StoreImm64SExt16 { ref mut mem, .. } => {
1978 map_mem(mapper, mem);
1979 }
1980 &mut Inst::StoreRev16 {
1981 ref mut rd,
1982 ref mut mem,
1983 ..
1984 } => {
1985 map_use(mapper, rd);
1986 map_mem(mapper, mem);
1987 }
1988 &mut Inst::StoreRev32 {
1989 ref mut rd,
1990 ref mut mem,
1991 ..
1992 } => {
1993 map_use(mapper, rd);
1994 map_mem(mapper, mem);
1995 }
1996 &mut Inst::StoreRev64 {
1997 ref mut rd,
1998 ref mut mem,
1999 ..
2000 } => {
2001 map_use(mapper, rd);
2002 map_mem(mapper, mem);
2003 }
2004 &mut Inst::LoadMultiple64 { .. } => {
2005 // This instruction accesses all registers between rt and rt2,
2006 // so it cannot be remapped. But this does not matter since
2007 // the instruction is only ever used after register allocation.
2008 unreachable!();
2009 }
2010 &mut Inst::StoreMultiple64 { .. } => {
2011 // This instruction accesses all registers between rt and rt2,
2012 // so it cannot be remapped. But this does not matter since
2013 // the instruction is only ever used after register allocation.
2014 unreachable!();
2015 }
2016
2017 &mut Inst::Mov64 {
2018 ref mut rd,
2019 ref mut rm,
2020 } => {
2021 map_def(mapper, rd);
2022 map_use(mapper, rm);
2023 }
2024 &mut Inst::Mov32 {
2025 ref mut rd,
2026 ref mut rm,
2027 } => {
2028 map_def(mapper, rd);
2029 map_use(mapper, rm);
2030 }
2031 &mut Inst::Mov32Imm { ref mut rd, .. } => {
2032 map_def(mapper, rd);
2033 }
2034 &mut Inst::Mov32SImm16 { ref mut rd, .. } => {
2035 map_def(mapper, rd);
2036 }
2037 &mut Inst::Mov64SImm16 { ref mut rd, .. } => {
2038 map_def(mapper, rd);
2039 }
2040 &mut Inst::Mov64SImm32 { ref mut rd, .. } => {
2041 map_def(mapper, rd);
2042 }
2043 &mut Inst::Mov64UImm16Shifted { ref mut rd, .. } => {
2044 map_def(mapper, rd);
2045 }
2046 &mut Inst::Mov64UImm32Shifted { ref mut rd, .. } => {
2047 map_def(mapper, rd);
2048 }
2049 &mut Inst::Insert64UImm16Shifted { ref mut rd, .. } => {
2050 map_mod(mapper, rd);
2051 }
2052 &mut Inst::Insert64UImm32Shifted { ref mut rd, .. } => {
2053 map_mod(mapper, rd);
2054 }
2055 &mut Inst::CMov64 {
2056 ref mut rd,
2057 ref mut rm,
2058 ..
2059 } => {
2060 map_mod(mapper, rd);
2061 map_use(mapper, rm);
2062 }
2063 &mut Inst::CMov32 {
2064 ref mut rd,
2065 ref mut rm,
2066 ..
2067 } => {
2068 map_mod(mapper, rd);
2069 map_use(mapper, rm);
2070 }
2071 &mut Inst::CMov32SImm16 { ref mut rd, .. } => {
2072 map_mod(mapper, rd);
2073 }
2074 &mut Inst::CMov64SImm16 { ref mut rd, .. } => {
2075 map_mod(mapper, rd);
2076 }
2077 &mut Inst::FpuMove32 {
2078 ref mut rd,
2079 ref mut rn,
2080 } => {
2081 map_def(mapper, rd);
2082 map_use(mapper, rn);
2083 }
2084 &mut Inst::FpuMove64 {
2085 ref mut rd,
2086 ref mut rn,
2087 } => {
2088 map_def(mapper, rd);
2089 map_use(mapper, rn);
2090 }
2091 &mut Inst::FpuCMov64 {
2092 ref mut rd,
2093 ref mut rm,
2094 ..
2095 } => {
2096 map_mod(mapper, rd);
2097 map_use(mapper, rm);
2098 }
2099 &mut Inst::FpuCMov32 {
2100 ref mut rd,
2101 ref mut rm,
2102 ..
2103 } => {
2104 map_mod(mapper, rd);
2105 map_use(mapper, rm);
2106 }
2107 &mut Inst::MovToFpr {
2108 ref mut rd,
2109 ref mut rn,
2110 } => {
2111 map_def(mapper, rd);
2112 map_use(mapper, rn);
2113 }
2114 &mut Inst::MovFromFpr {
2115 ref mut rd,
2116 ref mut rn,
2117 } => {
2118 map_def(mapper, rd);
2119 map_use(mapper, rn);
2120 }
2121 &mut Inst::FpuRR {
2122 ref mut rd,
2123 ref mut rn,
2124 ..
2125 } => {
2126 map_def(mapper, rd);
2127 map_use(mapper, rn);
2128 }
2129 &mut Inst::FpuRRR {
2130 ref mut rd,
2131 ref mut rm,
2132 ..
2133 } => {
2134 map_mod(mapper, rd);
2135 map_use(mapper, rm);
2136 }
2137 &mut Inst::FpuRRRR {
2138 ref mut rd,
2139 ref mut rn,
2140 ref mut rm,
2141 ..
2142 } => {
2143 map_mod(mapper, rd);
2144 map_use(mapper, rn);
2145 map_use(mapper, rm);
2146 }
2147 &mut Inst::FpuCopysign {
2148 ref mut rd,
2149 ref mut rn,
2150 ref mut rm,
2151 ..
2152 } => {
2153 map_def(mapper, rd);
2154 map_use(mapper, rn);
2155 map_use(mapper, rm);
2156 }
2157 &mut Inst::FpuCmp32 {
2158 ref mut rn,
2159 ref mut rm,
2160 } => {
2161 map_use(mapper, rn);
2162 map_use(mapper, rm);
2163 }
2164 &mut Inst::FpuCmp64 {
2165 ref mut rn,
2166 ref mut rm,
2167 } => {
2168 map_use(mapper, rn);
2169 map_use(mapper, rm);
2170 }
2171 &mut Inst::FpuLoad32 {
2172 ref mut rd,
2173 ref mut mem,
2174 ..
2175 } => {
2176 map_def(mapper, rd);
2177 map_mem(mapper, mem);
2178 }
2179 &mut Inst::FpuLoad64 {
2180 ref mut rd,
2181 ref mut mem,
2182 ..
2183 } => {
2184 map_def(mapper, rd);
2185 map_mem(mapper, mem);
2186 }
2187 &mut Inst::FpuStore32 {
2188 ref mut rd,
2189 ref mut mem,
2190 ..
2191 } => {
2192 map_use(mapper, rd);
2193 map_mem(mapper, mem);
2194 }
2195 &mut Inst::FpuStore64 {
2196 ref mut rd,
2197 ref mut mem,
2198 ..
2199 } => {
2200 map_use(mapper, rd);
2201 map_mem(mapper, mem);
2202 }
2203 &mut Inst::FpuLoadRev32 {
2204 ref mut rd,
2205 ref mut mem,
2206 ..
2207 } => {
2208 map_def(mapper, rd);
2209 map_mem(mapper, mem);
2210 }
2211 &mut Inst::FpuLoadRev64 {
2212 ref mut rd,
2213 ref mut mem,
2214 ..
2215 } => {
2216 map_def(mapper, rd);
2217 map_mem(mapper, mem);
2218 }
2219 &mut Inst::FpuStoreRev32 {
2220 ref mut rd,
2221 ref mut mem,
2222 ..
2223 } => {
2224 map_use(mapper, rd);
2225 map_mem(mapper, mem);
2226 }
2227 &mut Inst::FpuStoreRev64 {
2228 ref mut rd,
2229 ref mut mem,
2230 ..
2231 } => {
2232 map_use(mapper, rd);
2233 map_mem(mapper, mem);
2234 }
2235 &mut Inst::LoadFpuConst32 { ref mut rd, .. } => {
2236 map_def(mapper, rd);
2237 }
2238 &mut Inst::LoadFpuConst64 { ref mut rd, .. } => {
2239 map_def(mapper, rd);
2240 }
2241 &mut Inst::FpuToInt {
2242 ref mut rd,
2243 ref mut rn,
2244 ..
2245 } => {
2246 map_def(mapper, rd);
2247 map_use(mapper, rn);
2248 }
2249 &mut Inst::IntToFpu {
2250 ref mut rd,
2251 ref mut rn,
2252 ..
2253 } => {
2254 map_def(mapper, rd);
2255 map_use(mapper, rn);
2256 }
2257 &mut Inst::FpuRound {
2258 ref mut rd,
2259 ref mut rn,
2260 ..
2261 } => {
2262 map_def(mapper, rd);
2263 map_use(mapper, rn);
2264 }
2265 &mut Inst::FpuVecRRR {
2266 ref mut rd,
2267 ref mut rn,
2268 ref mut rm,
2269 ..
2270 } => {
2271 map_def(mapper, rd);
2272 map_use(mapper, rn);
2273 map_use(mapper, rm);
2274 }
2275 &mut Inst::Extend {
2276 ref mut rd,
2277 ref mut rn,
2278 ..
2279 } => {
2280 map_def(mapper, rd);
2281 map_use(mapper, rn);
2282 }
2283 &mut Inst::Call {
2284 ref mut link,
2285 ref mut info,
2286 } => {
2287 map_def(mapper, link);
2288 for r in info.uses.iter_mut() {
2289 map_use(mapper, r);
2290 }
2291 for r in info.defs.iter_mut() {
2292 map_def(mapper, r);
2293 }
2294 }
2295 &mut Inst::CallInd {
2296 ref mut link,
2297 ref mut info,
2298 ..
2299 } => {
2300 map_def(mapper, link);
2301 for r in info.uses.iter_mut() {
2302 map_use(mapper, r);
2303 }
2304 for r in info.defs.iter_mut() {
2305 map_def(mapper, r);
2306 }
2307 map_use(mapper, &mut info.rn);
2308 }
2309 &mut Inst::Ret { .. } => {}
2310 &mut Inst::EpiloguePlaceholder => {}
2311 &mut Inst::Jump { .. } => {}
2312 &mut Inst::IndirectBr { ref mut rn, .. } => {
2313 map_use(mapper, rn);
2314 }
2315 &mut Inst::CondBr { .. } | &mut Inst::OneWayCondBr { .. } => {}
2316 &mut Inst::Debugtrap | &mut Inst::Trap { .. } | &mut Inst::TrapIf { .. } => {}
2317 &mut Inst::Nop0 | &mut Inst::Nop2 => {}
2318 &mut Inst::JTSequence {
2319 ref mut ridx,
2320 ref mut rtmp1,
2321 ref mut rtmp2,
2322 ..
2323 } => {
2324 map_use(mapper, ridx);
2325 map_def(mapper, rtmp1);
2326 map_def(mapper, rtmp2);
2327 }
2328 &mut Inst::LoadExtNameFar { ref mut rd, .. } => {
2329 map_def(mapper, rd);
2330 }
2331 &mut Inst::LoadAddr {
2332 ref mut rd,
2333 ref mut mem,
2334 } => {
2335 map_def(mapper, rd);
2336 map_mem(mapper, mem);
2337 }
2338 &mut Inst::VirtualSPOffsetAdj { .. } => {}
2339 &mut Inst::ValueLabelMarker { ref mut reg, .. } => {
2340 map_use(mapper, reg);
2341 }
2342 &mut Inst::Unwind { .. } => {}
2343 }
2344 }
2345
2346 //=============================================================================
2347 // Instructions: misc functions and external interface
2348
2349 impl MachInst for Inst {
2350 type LabelUse = LabelUse;
2351
get_regs(&self, collector: &mut RegUsageCollector)2352 fn get_regs(&self, collector: &mut RegUsageCollector) {
2353 s390x_get_regs(self, collector)
2354 }
2355
map_regs<RUM: RegUsageMapper>(&mut self, mapper: &RUM)2356 fn map_regs<RUM: RegUsageMapper>(&mut self, mapper: &RUM) {
2357 s390x_map_regs(self, mapper);
2358 }
2359
is_move(&self) -> Option<(Writable<Reg>, Reg)>2360 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
2361 match self {
2362 &Inst::Mov32 { rd, rm } => Some((rd, rm)),
2363 &Inst::Mov64 { rd, rm } => Some((rd, rm)),
2364 &Inst::FpuMove32 { rd, rn } => Some((rd, rn)),
2365 &Inst::FpuMove64 { rd, rn } => Some((rd, rn)),
2366 _ => None,
2367 }
2368 }
2369
is_epilogue_placeholder(&self) -> bool2370 fn is_epilogue_placeholder(&self) -> bool {
2371 if let Inst::EpiloguePlaceholder = self {
2372 true
2373 } else {
2374 false
2375 }
2376 }
2377
is_term<'a>(&'a self) -> MachTerminator<'a>2378 fn is_term<'a>(&'a self) -> MachTerminator<'a> {
2379 match self {
2380 &Inst::Ret { .. } | &Inst::EpiloguePlaceholder => MachTerminator::Ret,
2381 &Inst::Jump { dest } => MachTerminator::Uncond(dest.as_label().unwrap()),
2382 &Inst::CondBr {
2383 taken, not_taken, ..
2384 } => MachTerminator::Cond(taken.as_label().unwrap(), not_taken.as_label().unwrap()),
2385 &Inst::OneWayCondBr { .. } => {
2386 // Explicitly invisible to CFG processing.
2387 MachTerminator::None
2388 }
2389 &Inst::IndirectBr { ref targets, .. } => MachTerminator::Indirect(&targets[..]),
2390 &Inst::JTSequence { ref info, .. } => {
2391 MachTerminator::Indirect(&info.targets_for_term[..])
2392 }
2393 _ => MachTerminator::None,
2394 }
2395 }
2396
stack_op_info(&self) -> Option<MachInstStackOpInfo>2397 fn stack_op_info(&self) -> Option<MachInstStackOpInfo> {
2398 match self {
2399 &Inst::VirtualSPOffsetAdj { offset } => Some(MachInstStackOpInfo::NomSPAdj(offset)),
2400 &Inst::Store64 {
2401 rd,
2402 mem: MemArg::NominalSPOffset { off },
2403 } => Some(MachInstStackOpInfo::StoreNomSPOff(rd, off)),
2404 &Inst::Load64 {
2405 rd,
2406 mem: MemArg::NominalSPOffset { off },
2407 } => Some(MachInstStackOpInfo::LoadNomSPOff(rd.to_reg(), off)),
2408 _ => None,
2409 }
2410 }
2411
gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst2412 fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
2413 assert!(ty.bits() <= 64);
2414 if ty.bits() <= 32 {
2415 Inst::mov32(to_reg, from_reg)
2416 } else {
2417 Inst::mov64(to_reg, from_reg)
2418 }
2419 }
2420
gen_constant<F: FnMut(Type) -> Writable<Reg>>( to_regs: ValueRegs<Writable<Reg>>, value: u128, ty: Type, _alloc_tmp: F, ) -> SmallVec<[Inst; 4]>2421 fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
2422 to_regs: ValueRegs<Writable<Reg>>,
2423 value: u128,
2424 ty: Type,
2425 _alloc_tmp: F,
2426 ) -> SmallVec<[Inst; 4]> {
2427 let to_reg = to_regs
2428 .only_reg()
2429 .expect("multi-reg values not supported yet");
2430 let value = value as u64;
2431 match ty {
2432 types::F64 => {
2433 let mut ret = SmallVec::new();
2434 ret.push(Inst::load_fp_constant64(to_reg, f64::from_bits(value)));
2435 ret
2436 }
2437 types::F32 => {
2438 let mut ret = SmallVec::new();
2439 ret.push(Inst::load_fp_constant32(
2440 to_reg,
2441 f32::from_bits(value as u32),
2442 ));
2443 ret
2444 }
2445 types::I64 | types::B64 | types::R64 => Inst::load_constant64(to_reg, value),
2446 types::B1
2447 | types::I8
2448 | types::B8
2449 | types::I16
2450 | types::B16
2451 | types::I32
2452 | types::B32 => Inst::load_constant32(to_reg, value as u32),
2453 _ => unreachable!(),
2454 }
2455 }
2456
gen_nop(preferred_size: usize) -> Inst2457 fn gen_nop(preferred_size: usize) -> Inst {
2458 if preferred_size == 0 {
2459 Inst::Nop0
2460 } else {
2461 // We can't give a NOP (or any insn) < 2 bytes.
2462 assert!(preferred_size >= 2);
2463 Inst::Nop2
2464 }
2465 }
2466
maybe_direct_reload(&self, _reg: VirtualReg, _slot: SpillSlot) -> Option<Inst>2467 fn maybe_direct_reload(&self, _reg: VirtualReg, _slot: SpillSlot) -> Option<Inst> {
2468 None
2469 }
2470
rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])>2471 fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
2472 match ty {
2473 types::I8 => Ok((&[RegClass::I64], &[types::I8])),
2474 types::I16 => Ok((&[RegClass::I64], &[types::I16])),
2475 types::I32 => Ok((&[RegClass::I64], &[types::I32])),
2476 types::I64 => Ok((&[RegClass::I64], &[types::I64])),
2477 types::B1 => Ok((&[RegClass::I64], &[types::B1])),
2478 types::B8 => Ok((&[RegClass::I64], &[types::B8])),
2479 types::B16 => Ok((&[RegClass::I64], &[types::B16])),
2480 types::B32 => Ok((&[RegClass::I64], &[types::B32])),
2481 types::B64 => Ok((&[RegClass::I64], &[types::B64])),
2482 types::R32 => panic!("32-bit reftype pointer should never be seen on s390x"),
2483 types::R64 => Ok((&[RegClass::I64], &[types::R64])),
2484 types::F32 => Ok((&[RegClass::F64], &[types::F32])),
2485 types::F64 => Ok((&[RegClass::F64], &[types::F64])),
2486 types::I128 => Ok((&[RegClass::I64, RegClass::I64], &[types::I64, types::I64])),
2487 types::B128 => Ok((&[RegClass::I64, RegClass::I64], &[types::B64, types::B64])),
2488 // FIXME: We don't really have IFLAGS, but need to allow it here
2489 // for now to support the SelectifSpectreGuard instruction.
2490 types::IFLAGS => Ok((&[RegClass::I64], &[types::I64])),
2491 _ => Err(CodegenError::Unsupported(format!(
2492 "Unexpected SSA-value type: {}",
2493 ty
2494 ))),
2495 }
2496 }
2497
gen_jump(target: MachLabel) -> Inst2498 fn gen_jump(target: MachLabel) -> Inst {
2499 Inst::Jump {
2500 dest: BranchTarget::Label(target),
2501 }
2502 }
2503
reg_universe(flags: &settings::Flags) -> RealRegUniverse2504 fn reg_universe(flags: &settings::Flags) -> RealRegUniverse {
2505 create_reg_universe(flags)
2506 }
2507
worst_case_size() -> CodeOffset2508 fn worst_case_size() -> CodeOffset {
2509 // The maximum size, in bytes, of any `Inst`'s emitted code. We have at least one case of
2510 // an 8-instruction sequence (saturating int-to-float conversions) with three embedded
2511 // 64-bit f64 constants.
2512 //
2513 // Note that inline jump-tables handle island/pool insertion separately, so we do not need
2514 // to account for them here (otherwise the worst case would be 2^31 * 4, clearly not
2515 // feasible for other reasons).
2516 44
2517 }
2518
ref_type_regclass(_: &settings::Flags) -> RegClass2519 fn ref_type_regclass(_: &settings::Flags) -> RegClass {
2520 RegClass::I64
2521 }
2522
gen_value_label_marker(label: ValueLabel, reg: Reg) -> Self2523 fn gen_value_label_marker(label: ValueLabel, reg: Reg) -> Self {
2524 Inst::ValueLabelMarker { label, reg }
2525 }
2526
defines_value_label(&self) -> Option<(ValueLabel, Reg)>2527 fn defines_value_label(&self) -> Option<(ValueLabel, Reg)> {
2528 match self {
2529 Inst::ValueLabelMarker { label, reg } => Some((*label, *reg)),
2530 _ => None,
2531 }
2532 }
2533 }
2534
2535 //=============================================================================
2536 // Pretty-printing of instructions.
2537
mem_finalize_for_show( mem: &MemArg, mb_rru: Option<&RealRegUniverse>, state: &EmitState, have_d12: bool, have_d20: bool, have_pcrel: bool, have_index: bool, ) -> (String, MemArg)2538 fn mem_finalize_for_show(
2539 mem: &MemArg,
2540 mb_rru: Option<&RealRegUniverse>,
2541 state: &EmitState,
2542 have_d12: bool,
2543 have_d20: bool,
2544 have_pcrel: bool,
2545 have_index: bool,
2546 ) -> (String, MemArg) {
2547 let (mem_insts, mem) = mem_finalize(mem, state, have_d12, have_d20, have_pcrel, have_index);
2548 let mut mem_str = mem_insts
2549 .into_iter()
2550 .map(|inst| inst.show_rru(mb_rru))
2551 .collect::<Vec<_>>()
2552 .join(" ; ");
2553 if !mem_str.is_empty() {
2554 mem_str += " ; ";
2555 }
2556
2557 (mem_str, mem)
2558 }
2559
2560 impl PrettyPrint for Inst {
show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String2561 fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
2562 self.pretty_print(mb_rru, &mut EmitState::default())
2563 }
2564 }
2565
2566 impl Inst {
print_with_state(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String2567 fn print_with_state(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String {
2568 match self {
2569 &Inst::Nop0 => "nop-zero-len".to_string(),
2570 &Inst::Nop2 => "nop".to_string(),
2571 &Inst::AluRRR { alu_op, rd, rn, rm } => {
2572 let (op, have_rr) = match alu_op {
2573 ALUOp::Add32 => ("ark", true),
2574 ALUOp::Add64 => ("agrk", true),
2575 ALUOp::Sub32 => ("srk", true),
2576 ALUOp::Sub64 => ("sgrk", true),
2577 ALUOp::Mul32 => ("msrkc", true),
2578 ALUOp::Mul64 => ("msgrkc", true),
2579 ALUOp::And32 => ("nrk", true),
2580 ALUOp::And64 => ("ngrk", true),
2581 ALUOp::Orr32 => ("ork", true),
2582 ALUOp::Orr64 => ("ogrk", true),
2583 ALUOp::Xor32 => ("xrk", true),
2584 ALUOp::Xor64 => ("xgrk", true),
2585 ALUOp::AndNot32 => ("nnrk", false),
2586 ALUOp::AndNot64 => ("nngrk", false),
2587 ALUOp::OrrNot32 => ("nork", false),
2588 ALUOp::OrrNot64 => ("nogrk", false),
2589 ALUOp::XorNot32 => ("nxrk", false),
2590 ALUOp::XorNot64 => ("nxgrk", false),
2591 _ => unreachable!(),
2592 };
2593 if have_rr && rd.to_reg() == rn {
2594 let inst = Inst::AluRR { alu_op, rd, rm };
2595 return inst.print_with_state(mb_rru, state);
2596 }
2597 let rd = rd.to_reg().show_rru(mb_rru);
2598 let rn = rn.show_rru(mb_rru);
2599 let rm = rm.show_rru(mb_rru);
2600 format!("{} {}, {}, {}", op, rd, rn, rm)
2601 }
2602 &Inst::AluRRSImm16 {
2603 alu_op,
2604 rd,
2605 rn,
2606 imm,
2607 } => {
2608 if rd.to_reg() == rn {
2609 let inst = Inst::AluRSImm16 { alu_op, rd, imm };
2610 return inst.print_with_state(mb_rru, state);
2611 }
2612 let op = match alu_op {
2613 ALUOp::Add32 => "ahik",
2614 ALUOp::Add64 => "aghik",
2615 _ => unreachable!(),
2616 };
2617 let rd = rd.to_reg().show_rru(mb_rru);
2618 let rn = rn.show_rru(mb_rru);
2619 format!("{} {}, {}, {}", op, rd, rn, imm)
2620 }
2621 &Inst::AluRR { alu_op, rd, rm } => {
2622 let op = match alu_op {
2623 ALUOp::Add32 => "ar",
2624 ALUOp::Add64 => "agr",
2625 ALUOp::Add64Ext32 => "agfr",
2626 ALUOp::Sub32 => "sr",
2627 ALUOp::Sub64 => "sgr",
2628 ALUOp::Sub64Ext32 => "sgfr",
2629 ALUOp::Mul32 => "msr",
2630 ALUOp::Mul64 => "msgr",
2631 ALUOp::Mul64Ext32 => "msgfr",
2632 ALUOp::And32 => "nr",
2633 ALUOp::And64 => "ngr",
2634 ALUOp::Orr32 => "or",
2635 ALUOp::Orr64 => "ogr",
2636 ALUOp::Xor32 => "xr",
2637 ALUOp::Xor64 => "xgr",
2638 _ => unreachable!(),
2639 };
2640 let rd = rd.to_reg().show_rru(mb_rru);
2641 let rm = rm.show_rru(mb_rru);
2642 format!("{} {}, {}", op, rd, rm)
2643 }
2644 &Inst::AluRX {
2645 alu_op,
2646 rd,
2647 ref mem,
2648 } => {
2649 let (opcode_rx, opcode_rxy) = match alu_op {
2650 ALUOp::Add32 => (Some("a"), Some("ay")),
2651 ALUOp::Add32Ext16 => (Some("ah"), Some("ahy")),
2652 ALUOp::Add64 => (None, Some("ag")),
2653 ALUOp::Add64Ext16 => (None, Some("agh")),
2654 ALUOp::Add64Ext32 => (None, Some("agf")),
2655 ALUOp::Sub32 => (Some("s"), Some("sy")),
2656 ALUOp::Sub32Ext16 => (Some("sh"), Some("shy")),
2657 ALUOp::Sub64 => (None, Some("sg")),
2658 ALUOp::Sub64Ext16 => (None, Some("sgh")),
2659 ALUOp::Sub64Ext32 => (None, Some("sgf")),
2660 ALUOp::Mul32 => (Some("ms"), Some("msy")),
2661 ALUOp::Mul32Ext16 => (Some("mh"), Some("mhy")),
2662 ALUOp::Mul64 => (None, Some("msg")),
2663 ALUOp::Mul64Ext16 => (None, Some("mgh")),
2664 ALUOp::Mul64Ext32 => (None, Some("msgf")),
2665 ALUOp::And32 => (Some("n"), Some("ny")),
2666 ALUOp::And64 => (None, Some("ng")),
2667 ALUOp::Orr32 => (Some("o"), Some("oy")),
2668 ALUOp::Orr64 => (None, Some("og")),
2669 ALUOp::Xor32 => (Some("x"), Some("xy")),
2670 ALUOp::Xor64 => (None, Some("xg")),
2671 _ => unreachable!(),
2672 };
2673
2674 let (mem_str, mem) = mem_finalize_for_show(
2675 mem,
2676 mb_rru,
2677 state,
2678 opcode_rx.is_some(),
2679 opcode_rxy.is_some(),
2680 false,
2681 true,
2682 );
2683
2684 let op = match &mem {
2685 &MemArg::BXD12 { .. } => opcode_rx,
2686 &MemArg::BXD20 { .. } => opcode_rxy,
2687 _ => unreachable!(),
2688 };
2689
2690 let rd = rd.to_reg().show_rru(mb_rru);
2691 let mem = mem.show_rru(mb_rru);
2692 format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
2693 }
2694 &Inst::AluRSImm16 { alu_op, rd, imm } => {
2695 let op = match alu_op {
2696 ALUOp::Add32 => "ahi",
2697 ALUOp::Add64 => "aghi",
2698 ALUOp::Mul32 => "mhi",
2699 ALUOp::Mul64 => "mghi",
2700 _ => unreachable!(),
2701 };
2702 let rd = rd.to_reg().show_rru(mb_rru);
2703 format!("{} {}, {}", op, rd, imm)
2704 }
2705 &Inst::AluRSImm32 { alu_op, rd, imm } => {
2706 let op = match alu_op {
2707 ALUOp::Add32 => "afi",
2708 ALUOp::Add64 => "agfi",
2709 ALUOp::Mul32 => "msfi",
2710 ALUOp::Mul64 => "msgfi",
2711 _ => unreachable!(),
2712 };
2713 let rd = rd.to_reg().show_rru(mb_rru);
2714 format!("{} {}, {}", op, rd, imm)
2715 }
2716 &Inst::AluRUImm32 { alu_op, rd, imm } => {
2717 let op = match alu_op {
2718 ALUOp::Add32 => "alfi",
2719 ALUOp::Add64 => "algfi",
2720 ALUOp::Sub32 => "slfi",
2721 ALUOp::Sub64 => "slgfi",
2722 _ => unreachable!(),
2723 };
2724 let rd = rd.to_reg().show_rru(mb_rru);
2725 format!("{} {}, {}", op, rd, imm)
2726 }
2727 &Inst::AluRUImm16Shifted { alu_op, rd, imm } => {
2728 let op = match (alu_op, imm.shift) {
2729 (ALUOp::And32, 0) => "nill",
2730 (ALUOp::And32, 1) => "nilh",
2731 (ALUOp::And64, 0) => "nill",
2732 (ALUOp::And64, 1) => "nilh",
2733 (ALUOp::And64, 2) => "nihl",
2734 (ALUOp::And64, 3) => "nihh",
2735 (ALUOp::Orr32, 0) => "oill",
2736 (ALUOp::Orr32, 1) => "oilh",
2737 (ALUOp::Orr64, 0) => "oill",
2738 (ALUOp::Orr64, 1) => "oilh",
2739 (ALUOp::Orr64, 2) => "oihl",
2740 (ALUOp::Orr64, 3) => "oihh",
2741 _ => unreachable!(),
2742 };
2743 let rd = rd.to_reg().show_rru(mb_rru);
2744 format!("{} {}, {}", op, rd, imm.bits)
2745 }
2746 &Inst::AluRUImm32Shifted { alu_op, rd, imm } => {
2747 let op = match (alu_op, imm.shift) {
2748 (ALUOp::And32, 0) => "nilf",
2749 (ALUOp::And64, 0) => "nilf",
2750 (ALUOp::And64, 1) => "nihf",
2751 (ALUOp::Orr32, 0) => "oilf",
2752 (ALUOp::Orr64, 0) => "oilf",
2753 (ALUOp::Orr64, 1) => "oihf",
2754 (ALUOp::Xor32, 0) => "xilf",
2755 (ALUOp::Xor64, 0) => "xilf",
2756 (ALUOp::Xor64, 1) => "xihf",
2757 _ => unreachable!(),
2758 };
2759 let rd = rd.to_reg().show_rru(mb_rru);
2760 format!("{} {}, {}", op, rd, imm.bits)
2761 }
2762 &Inst::SMulWide { rn, rm } => {
2763 let op = "mgrk";
2764 let rd = gpr(0).show_rru(mb_rru);
2765 let rn = rn.show_rru(mb_rru);
2766 let rm = rm.show_rru(mb_rru);
2767 format!("{} {}, {}, {}", op, rd, rn, rm)
2768 }
2769 &Inst::UMulWide { rn } => {
2770 let op = "mlgr";
2771 let rd = gpr(0).show_rru(mb_rru);
2772 let rn = rn.show_rru(mb_rru);
2773 format!("{} {}, {}", op, rd, rn)
2774 }
2775 &Inst::SDivMod32 { rn, .. } => {
2776 let op = "dsgfr";
2777 let rd = gpr(0).show_rru(mb_rru);
2778 let rn = rn.show_rru(mb_rru);
2779 format!("{} {}, {}", op, rd, rn)
2780 }
2781 &Inst::SDivMod64 { rn, .. } => {
2782 let op = "dsgr";
2783 let rd = gpr(0).show_rru(mb_rru);
2784 let rn = rn.show_rru(mb_rru);
2785 format!("{} {}, {}", op, rd, rn)
2786 }
2787 &Inst::UDivMod32 { rn, .. } => {
2788 let op = "dlr";
2789 let rd = gpr(0).show_rru(mb_rru);
2790 let rn = rn.show_rru(mb_rru);
2791 format!("{} {}, {}", op, rd, rn)
2792 }
2793 &Inst::UDivMod64 { rn, .. } => {
2794 let op = "dlgr";
2795 let rd = gpr(0).show_rru(mb_rru);
2796 let rn = rn.show_rru(mb_rru);
2797 format!("{} {}, {}", op, rd, rn)
2798 }
2799 &Inst::Flogr { rn } => {
2800 let op = "flogr";
2801 let rd = gpr(0).show_rru(mb_rru);
2802 let rn = rn.show_rru(mb_rru);
2803 format!("{} {}, {}", op, rd, rn)
2804 }
2805 &Inst::ShiftRR {
2806 shift_op,
2807 rd,
2808 rn,
2809 shift_imm,
2810 ref shift_reg,
2811 } => {
2812 let op = match shift_op {
2813 ShiftOp::RotL32 => "rll",
2814 ShiftOp::RotL64 => "rllg",
2815 ShiftOp::LShL32 => "sllk",
2816 ShiftOp::LShL64 => "sllg",
2817 ShiftOp::LShR32 => "srlk",
2818 ShiftOp::LShR64 => "srlg",
2819 ShiftOp::AShR32 => "srak",
2820 ShiftOp::AShR64 => "srag",
2821 };
2822 let rd = rd.to_reg().show_rru(mb_rru);
2823 let rn = rn.show_rru(mb_rru);
2824 let shift_imm = shift_imm.show_rru(mb_rru);
2825 let shift_reg = match shift_reg {
2826 Some(reg) => format!("({})", reg.show_rru(mb_rru)),
2827 None => "".to_string(),
2828 };
2829 format!("{} {}, {}, {}{}", op, rd, rn, shift_imm, shift_reg)
2830 }
2831 &Inst::UnaryRR { op, rd, rn } => {
2832 let (op, extra) = match op {
2833 UnaryOp::Abs32 => ("lpr", ""),
2834 UnaryOp::Abs64 => ("lpgr", ""),
2835 UnaryOp::Abs64Ext32 => ("lpgfr", ""),
2836 UnaryOp::Neg32 => ("lcr", ""),
2837 UnaryOp::Neg64 => ("lcgr", ""),
2838 UnaryOp::Neg64Ext32 => ("lcgfr", ""),
2839 UnaryOp::PopcntByte => ("popcnt", ""),
2840 UnaryOp::PopcntReg => ("popcnt", ", 8"),
2841 };
2842 let rd = rd.to_reg().show_rru(mb_rru);
2843 let rn = rn.show_rru(mb_rru);
2844 format!("{} {}, {}{}", op, rd, rn, extra)
2845 }
2846 &Inst::CmpRR { op, rn, rm } => {
2847 let op = match op {
2848 CmpOp::CmpS32 => "cr",
2849 CmpOp::CmpS64 => "cgr",
2850 CmpOp::CmpS64Ext32 => "cgfr",
2851 CmpOp::CmpL32 => "clr",
2852 CmpOp::CmpL64 => "clgr",
2853 CmpOp::CmpL64Ext32 => "clgfr",
2854 _ => unreachable!(),
2855 };
2856 let rn = rn.show_rru(mb_rru);
2857 let rm = rm.show_rru(mb_rru);
2858 format!("{} {}, {}", op, rn, rm)
2859 }
2860 &Inst::CmpRX { op, rn, ref mem } => {
2861 let (opcode_rx, opcode_rxy, opcode_ril) = match op {
2862 CmpOp::CmpS32 => (Some("c"), Some("cy"), Some("crl")),
2863 CmpOp::CmpS32Ext16 => (Some("ch"), Some("chy"), Some("chrl")),
2864 CmpOp::CmpS64 => (None, Some("cg"), Some("cgrl")),
2865 CmpOp::CmpS64Ext16 => (None, Some("cgh"), Some("cghrl")),
2866 CmpOp::CmpS64Ext32 => (None, Some("cgf"), Some("cgfrl")),
2867 CmpOp::CmpL32 => (Some("cl"), Some("cly"), Some("clrl")),
2868 CmpOp::CmpL32Ext16 => (None, None, Some("clhrl")),
2869 CmpOp::CmpL64 => (None, Some("clg"), Some("clgrl")),
2870 CmpOp::CmpL64Ext16 => (None, None, Some("clghrl")),
2871 CmpOp::CmpL64Ext32 => (None, Some("clgf"), Some("clgfrl")),
2872 };
2873
2874 let (mem_str, mem) = mem_finalize_for_show(
2875 mem,
2876 mb_rru,
2877 state,
2878 opcode_rx.is_some(),
2879 opcode_rxy.is_some(),
2880 opcode_ril.is_some(),
2881 true,
2882 );
2883
2884 let op = match &mem {
2885 &MemArg::BXD12 { .. } => opcode_rx,
2886 &MemArg::BXD20 { .. } => opcode_rxy,
2887 &MemArg::Label { .. } | &MemArg::Symbol { .. } => opcode_ril,
2888 _ => unreachable!(),
2889 };
2890
2891 let rn = rn.show_rru(mb_rru);
2892 let mem = mem.show_rru(mb_rru);
2893 format!("{}{} {}, {}", mem_str, op.unwrap(), rn, mem)
2894 }
2895 &Inst::CmpRSImm16 { op, rn, imm } => {
2896 let op = match op {
2897 CmpOp::CmpS32 => "chi",
2898 CmpOp::CmpS64 => "cghi",
2899 _ => unreachable!(),
2900 };
2901 let rn = rn.show_rru(mb_rru);
2902 format!("{} {}, {}", op, rn, imm)
2903 }
2904 &Inst::CmpRSImm32 { op, rn, imm } => {
2905 let op = match op {
2906 CmpOp::CmpS32 => "cfi",
2907 CmpOp::CmpS64 => "cgfi",
2908 _ => unreachable!(),
2909 };
2910 let rn = rn.show_rru(mb_rru);
2911 format!("{} {}, {}", op, rn, imm)
2912 }
2913 &Inst::CmpRUImm32 { op, rn, imm } => {
2914 let op = match op {
2915 CmpOp::CmpL32 => "clfi",
2916 CmpOp::CmpL64 => "clgfi",
2917 _ => unreachable!(),
2918 };
2919 let rn = rn.show_rru(mb_rru);
2920 format!("{} {}, {}", op, rn, imm)
2921 }
2922 &Inst::CmpTrapRR {
2923 op, rn, rm, cond, ..
2924 } => {
2925 let op = match op {
2926 CmpOp::CmpS32 => "crt",
2927 CmpOp::CmpS64 => "cgrt",
2928 CmpOp::CmpL32 => "clrt",
2929 CmpOp::CmpL64 => "clgrt",
2930 _ => unreachable!(),
2931 };
2932 let rn = rn.show_rru(mb_rru);
2933 let rm = rm.show_rru(mb_rru);
2934 let cond = cond.show_rru(mb_rru);
2935 format!("{}{} {}, {}", op, cond, rn, rm)
2936 }
2937 &Inst::CmpTrapRSImm16 {
2938 op, rn, imm, cond, ..
2939 } => {
2940 let op = match op {
2941 CmpOp::CmpS32 => "cit",
2942 CmpOp::CmpS64 => "cgit",
2943 _ => unreachable!(),
2944 };
2945 let rn = rn.show_rru(mb_rru);
2946 let cond = cond.show_rru(mb_rru);
2947 format!("{}{} {}, {}", op, cond, rn, imm)
2948 }
2949 &Inst::CmpTrapRUImm16 {
2950 op, rn, imm, cond, ..
2951 } => {
2952 let op = match op {
2953 CmpOp::CmpL32 => "clfit",
2954 CmpOp::CmpL64 => "clgit",
2955 _ => unreachable!(),
2956 };
2957 let rn = rn.show_rru(mb_rru);
2958 let cond = cond.show_rru(mb_rru);
2959 format!("{}{} {}, {}", op, cond, rn, imm)
2960 }
2961 &Inst::AtomicRmw {
2962 alu_op,
2963 rd,
2964 rn,
2965 ref mem,
2966 } => {
2967 let op = match alu_op {
2968 ALUOp::Add32 => "laa",
2969 ALUOp::Add64 => "laag",
2970 ALUOp::And32 => "lan",
2971 ALUOp::And64 => "lang",
2972 ALUOp::Orr32 => "lao",
2973 ALUOp::Orr64 => "laog",
2974 ALUOp::Xor32 => "lax",
2975 ALUOp::Xor64 => "laxg",
2976 _ => unreachable!(),
2977 };
2978
2979 let (mem_str, mem) =
2980 mem_finalize_for_show(mem, mb_rru, state, false, true, false, false);
2981
2982 let rd = rd.to_reg().show_rru(mb_rru);
2983 let rn = rn.show_rru(mb_rru);
2984 let mem = mem.show_rru(mb_rru);
2985 format!("{}{} {}, {}, {}", mem_str, op, rd, rn, mem)
2986 }
2987 &Inst::AtomicCas32 { rd, rn, ref mem } | &Inst::AtomicCas64 { rd, rn, ref mem } => {
2988 let (opcode_rs, opcode_rsy) = match self {
2989 &Inst::AtomicCas32 { .. } => (Some("cs"), Some("csy")),
2990 &Inst::AtomicCas64 { .. } => (None, Some("csg")),
2991 _ => unreachable!(),
2992 };
2993
2994 let (mem_str, mem) = mem_finalize_for_show(
2995 mem,
2996 mb_rru,
2997 state,
2998 opcode_rs.is_some(),
2999 opcode_rsy.is_some(),
3000 false,
3001 false,
3002 );
3003
3004 let op = match &mem {
3005 &MemArg::BXD12 { .. } => opcode_rs,
3006 &MemArg::BXD20 { .. } => opcode_rsy,
3007 _ => unreachable!(),
3008 };
3009
3010 let rd = rd.to_reg().show_rru(mb_rru);
3011 let rn = rn.show_rru(mb_rru);
3012 let mem = mem.show_rru(mb_rru);
3013 format!("{}{} {}, {}, {}", mem_str, op.unwrap(), rd, rn, mem)
3014 }
3015 &Inst::Fence => "bcr 14, 0".to_string(),
3016 &Inst::Load32 { rd, ref mem }
3017 | &Inst::Load32ZExt8 { rd, ref mem }
3018 | &Inst::Load32SExt8 { rd, ref mem }
3019 | &Inst::Load32ZExt16 { rd, ref mem }
3020 | &Inst::Load32SExt16 { rd, ref mem }
3021 | &Inst::Load64 { rd, ref mem }
3022 | &Inst::Load64ZExt8 { rd, ref mem }
3023 | &Inst::Load64SExt8 { rd, ref mem }
3024 | &Inst::Load64ZExt16 { rd, ref mem }
3025 | &Inst::Load64SExt16 { rd, ref mem }
3026 | &Inst::Load64ZExt32 { rd, ref mem }
3027 | &Inst::Load64SExt32 { rd, ref mem }
3028 | &Inst::LoadRev16 { rd, ref mem }
3029 | &Inst::LoadRev32 { rd, ref mem }
3030 | &Inst::LoadRev64 { rd, ref mem }
3031 | &Inst::FpuLoad32 { rd, ref mem }
3032 | &Inst::FpuLoad64 { rd, ref mem } => {
3033 let (opcode_rx, opcode_rxy, opcode_ril) = match self {
3034 &Inst::Load32 { .. } => (Some("l"), Some("ly"), Some("lrl")),
3035 &Inst::Load32ZExt8 { .. } => (None, Some("llc"), None),
3036 &Inst::Load32SExt8 { .. } => (None, Some("lb"), None),
3037 &Inst::Load32ZExt16 { .. } => (None, Some("llh"), Some("llhrl")),
3038 &Inst::Load32SExt16 { .. } => (Some("lh"), Some("lhy"), Some("lhrl")),
3039 &Inst::Load64 { .. } => (None, Some("lg"), Some("lgrl")),
3040 &Inst::Load64ZExt8 { .. } => (None, Some("llgc"), None),
3041 &Inst::Load64SExt8 { .. } => (None, Some("lgb"), None),
3042 &Inst::Load64ZExt16 { .. } => (None, Some("llgh"), Some("llghrl")),
3043 &Inst::Load64SExt16 { .. } => (None, Some("lgh"), Some("lghrl")),
3044 &Inst::Load64ZExt32 { .. } => (None, Some("llgf"), Some("llgfrl")),
3045 &Inst::Load64SExt32 { .. } => (None, Some("lgf"), Some("lgfrl")),
3046 &Inst::LoadRev16 { .. } => (None, Some("lrvh"), None),
3047 &Inst::LoadRev32 { .. } => (None, Some("lrv"), None),
3048 &Inst::LoadRev64 { .. } => (None, Some("lrvg"), None),
3049 &Inst::FpuLoad32 { .. } => (Some("le"), Some("ley"), None),
3050 &Inst::FpuLoad64 { .. } => (Some("ld"), Some("ldy"), None),
3051 _ => unreachable!(),
3052 };
3053
3054 let (mem_str, mem) = mem_finalize_for_show(
3055 mem,
3056 mb_rru,
3057 state,
3058 opcode_rx.is_some(),
3059 opcode_rxy.is_some(),
3060 opcode_ril.is_some(),
3061 true,
3062 );
3063
3064 let op = match &mem {
3065 &MemArg::BXD12 { .. } => opcode_rx,
3066 &MemArg::BXD20 { .. } => opcode_rxy,
3067 &MemArg::Label { .. } | &MemArg::Symbol { .. } => opcode_ril,
3068 _ => unreachable!(),
3069 };
3070
3071 let rd = rd.to_reg().show_rru(mb_rru);
3072 let mem = mem.show_rru(mb_rru);
3073 format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
3074 }
3075 &Inst::FpuLoadRev32 { rd, ref mem } | &Inst::FpuLoadRev64 { rd, ref mem } => {
3076 let (mem_str, mem) =
3077 mem_finalize_for_show(mem, mb_rru, state, true, false, false, true);
3078
3079 let op = match self {
3080 &Inst::FpuLoadRev32 { .. } => "vlebrf",
3081 &Inst::FpuLoadRev64 { .. } => "vlebrg",
3082 _ => unreachable!(),
3083 };
3084 let rd = rd.to_reg().show_rru(mb_rru);
3085 let mem = mem.show_rru(mb_rru);
3086 format!("{}{} {}, {}, 0", mem_str, op, rd, mem)
3087 }
3088 &Inst::Store8 { rd, ref mem }
3089 | &Inst::Store16 { rd, ref mem }
3090 | &Inst::Store32 { rd, ref mem }
3091 | &Inst::Store64 { rd, ref mem }
3092 | &Inst::StoreRev16 { rd, ref mem }
3093 | &Inst::StoreRev32 { rd, ref mem }
3094 | &Inst::StoreRev64 { rd, ref mem }
3095 | &Inst::FpuStore32 { rd, ref mem }
3096 | &Inst::FpuStore64 { rd, ref mem } => {
3097 let (opcode_rx, opcode_rxy, opcode_ril) = match self {
3098 &Inst::Store8 { .. } => (Some("stc"), Some("stcy"), None),
3099 &Inst::Store16 { .. } => (Some("sth"), Some("sthy"), Some("sthrl")),
3100 &Inst::Store32 { .. } => (Some("st"), Some("sty"), Some("strl")),
3101 &Inst::Store64 { .. } => (None, Some("stg"), Some("stgrl")),
3102 &Inst::StoreRev16 { .. } => (None, Some("strvh"), None),
3103 &Inst::StoreRev32 { .. } => (None, Some("strv"), None),
3104 &Inst::StoreRev64 { .. } => (None, Some("strvg"), None),
3105 &Inst::FpuStore32 { .. } => (Some("ste"), Some("stey"), None),
3106 &Inst::FpuStore64 { .. } => (Some("std"), Some("stdy"), None),
3107 _ => unreachable!(),
3108 };
3109
3110 let (mem_str, mem) = mem_finalize_for_show(
3111 mem,
3112 mb_rru,
3113 state,
3114 opcode_rx.is_some(),
3115 opcode_rxy.is_some(),
3116 opcode_ril.is_some(),
3117 true,
3118 );
3119
3120 let op = match &mem {
3121 &MemArg::BXD12 { .. } => opcode_rx,
3122 &MemArg::BXD20 { .. } => opcode_rxy,
3123 &MemArg::Label { .. } | &MemArg::Symbol { .. } => opcode_ril,
3124 _ => unreachable!(),
3125 };
3126
3127 let rd = rd.show_rru(mb_rru);
3128 let mem = mem.show_rru(mb_rru);
3129 format!("{}{} {}, {}", mem_str, op.unwrap(), rd, mem)
3130 }
3131 &Inst::StoreImm8 { imm, ref mem } => {
3132 let (mem_str, mem) =
3133 mem_finalize_for_show(mem, mb_rru, state, true, true, false, false);
3134 let op = match &mem {
3135 &MemArg::BXD12 { .. } => "mvi",
3136 &MemArg::BXD20 { .. } => "mviy",
3137 _ => unreachable!(),
3138 };
3139
3140 let mem = mem.show_rru(mb_rru);
3141 format!("{}{} {}, {}", mem_str, op, mem, imm)
3142 }
3143 &Inst::StoreImm16 { imm, ref mem }
3144 | &Inst::StoreImm32SExt16 { imm, ref mem }
3145 | &Inst::StoreImm64SExt16 { imm, ref mem } => {
3146 let (mem_str, mem) =
3147 mem_finalize_for_show(mem, mb_rru, state, false, true, false, false);
3148 let op = match self {
3149 &Inst::StoreImm16 { .. } => "mvhhi",
3150 &Inst::StoreImm32SExt16 { .. } => "mvhi",
3151 &Inst::StoreImm64SExt16 { .. } => "mvghi",
3152 _ => unreachable!(),
3153 };
3154
3155 let mem = mem.show_rru(mb_rru);
3156 format!("{}{} {}, {}", mem_str, op, mem, imm)
3157 }
3158 &Inst::FpuStoreRev32 { rd, ref mem } | &Inst::FpuStoreRev64 { rd, ref mem } => {
3159 let (mem_str, mem) =
3160 mem_finalize_for_show(mem, mb_rru, state, true, false, false, true);
3161
3162 let op = match self {
3163 &Inst::FpuStoreRev32 { .. } => "vstebrf",
3164 &Inst::FpuStoreRev64 { .. } => "vstebrg",
3165 _ => unreachable!(),
3166 };
3167 let rd = rd.show_rru(mb_rru);
3168 let mem = mem.show_rru(mb_rru);
3169 format!("{}{} {}, {}, 0", mem_str, op, rd, mem)
3170 }
3171 &Inst::LoadMultiple64 {
3172 rt,
3173 rt2,
3174 addr_reg,
3175 addr_off,
3176 } => {
3177 let rt = rt.show_rru(mb_rru);
3178 let rt2 = rt2.show_rru(mb_rru);
3179 let addr_reg = addr_reg.show_rru(mb_rru);
3180 let addr_off = addr_off.show_rru(mb_rru);
3181 format!("lmg {}, {}, {}({})", rt, rt2, addr_off, addr_reg)
3182 }
3183 &Inst::StoreMultiple64 {
3184 rt,
3185 rt2,
3186 addr_reg,
3187 addr_off,
3188 } => {
3189 let rt = rt.show_rru(mb_rru);
3190 let rt2 = rt2.show_rru(mb_rru);
3191 let addr_reg = addr_reg.show_rru(mb_rru);
3192 let addr_off = addr_off.show_rru(mb_rru);
3193 format!("stmg {}, {}, {}({})", rt, rt2, addr_off, addr_reg)
3194 }
3195 &Inst::Mov64 { rd, rm } => {
3196 let rd = rd.to_reg().show_rru(mb_rru);
3197 let rm = rm.show_rru(mb_rru);
3198 format!("lgr {}, {}", rd, rm)
3199 }
3200 &Inst::Mov32 { rd, rm } => {
3201 let rd = rd.to_reg().show_rru(mb_rru);
3202 let rm = rm.show_rru(mb_rru);
3203 format!("lr {}, {}", rd, rm)
3204 }
3205 &Inst::Mov32Imm { rd, ref imm } => {
3206 let rd = rd.to_reg().show_rru(mb_rru);
3207 format!("iilf {}, {}", rd, imm)
3208 }
3209 &Inst::Mov32SImm16 { rd, ref imm } => {
3210 let rd = rd.to_reg().show_rru(mb_rru);
3211 format!("lhi {}, {}", rd, imm)
3212 }
3213 &Inst::Mov64SImm16 { rd, ref imm } => {
3214 let rd = rd.to_reg().show_rru(mb_rru);
3215 format!("lghi {}, {}", rd, imm)
3216 }
3217 &Inst::Mov64SImm32 { rd, ref imm } => {
3218 let rd = rd.to_reg().show_rru(mb_rru);
3219 format!("lgfi {}, {}", rd, imm)
3220 }
3221 &Inst::Mov64UImm16Shifted { rd, ref imm } => {
3222 let rd = rd.to_reg().show_rru(mb_rru);
3223 let op = match imm.shift {
3224 0 => "llill",
3225 1 => "llilh",
3226 2 => "llihl",
3227 3 => "llihh",
3228 _ => unreachable!(),
3229 };
3230 format!("{} {}, {}", op, rd, imm.bits)
3231 }
3232 &Inst::Mov64UImm32Shifted { rd, ref imm } => {
3233 let rd = rd.to_reg().show_rru(mb_rru);
3234 let op = match imm.shift {
3235 0 => "llilf",
3236 1 => "llihf",
3237 _ => unreachable!(),
3238 };
3239 format!("{} {}, {}", op, rd, imm.bits)
3240 }
3241 &Inst::Insert64UImm16Shifted { rd, ref imm } => {
3242 let rd = rd.to_reg().show_rru(mb_rru);
3243 let op = match imm.shift {
3244 0 => "iill",
3245 1 => "iilh",
3246 2 => "iihl",
3247 3 => "iihh",
3248 _ => unreachable!(),
3249 };
3250 format!("{} {}, {}", op, rd, imm.bits)
3251 }
3252 &Inst::Insert64UImm32Shifted { rd, ref imm } => {
3253 let rd = rd.to_reg().show_rru(mb_rru);
3254 let op = match imm.shift {
3255 0 => "iilf",
3256 1 => "iihf",
3257 _ => unreachable!(),
3258 };
3259 format!("{} {}, {}", op, rd, imm.bits)
3260 }
3261 &Inst::CMov32 { rd, cond, rm } => {
3262 let rd = rd.to_reg().show_rru(mb_rru);
3263 let rm = rm.show_rru(mb_rru);
3264 let cond = cond.show_rru(mb_rru);
3265 format!("locr{} {}, {}", cond, rd, rm)
3266 }
3267 &Inst::CMov64 { rd, cond, rm } => {
3268 let rd = rd.to_reg().show_rru(mb_rru);
3269 let rm = rm.show_rru(mb_rru);
3270 let cond = cond.show_rru(mb_rru);
3271 format!("locgr{} {}, {}", cond, rd, rm)
3272 }
3273 &Inst::CMov32SImm16 { rd, cond, ref imm } => {
3274 let rd = rd.to_reg().show_rru(mb_rru);
3275 let cond = cond.show_rru(mb_rru);
3276 format!("lochi{} {}, {}", cond, rd, imm)
3277 }
3278 &Inst::CMov64SImm16 { rd, cond, ref imm } => {
3279 let rd = rd.to_reg().show_rru(mb_rru);
3280 let cond = cond.show_rru(mb_rru);
3281 format!("locghi{} {}, {}", cond, rd, imm)
3282 }
3283 &Inst::FpuMove32 { rd, rn } => {
3284 let rd = rd.to_reg().show_rru(mb_rru);
3285 let rn = rn.show_rru(mb_rru);
3286 format!("ler {}, {}", rd, rn)
3287 }
3288 &Inst::FpuMove64 { rd, rn } => {
3289 let rd = rd.to_reg().show_rru(mb_rru);
3290 let rn = rn.show_rru(mb_rru);
3291 format!("ldr {}, {}", rd, rn)
3292 }
3293 &Inst::FpuCMov32 { rd, cond, rm } => {
3294 let rd = rd.to_reg().show_rru(mb_rru);
3295 let rm = rm.show_rru(mb_rru);
3296 let cond = cond.invert().show_rru(mb_rru);
3297 format!("j{} 6 ; ler {}, {}", cond, rd, rm)
3298 }
3299 &Inst::FpuCMov64 { rd, cond, rm } => {
3300 let rd = rd.to_reg().show_rru(mb_rru);
3301 let rm = rm.show_rru(mb_rru);
3302 let cond = cond.invert().show_rru(mb_rru);
3303 format!("j{} 6 ; ldr {}, {}", cond, rd, rm)
3304 }
3305 &Inst::MovToFpr { rd, rn } => {
3306 let rd = rd.to_reg().show_rru(mb_rru);
3307 let rn = rn.show_rru(mb_rru);
3308 format!("ldgr {}, {}", rd, rn)
3309 }
3310 &Inst::MovFromFpr { rd, rn } => {
3311 let rd = rd.to_reg().show_rru(mb_rru);
3312 let rn = rn.show_rru(mb_rru);
3313 format!("lgdr {}, {}", rd, rn)
3314 }
3315 &Inst::FpuRR { fpu_op, rd, rn } => {
3316 let op = match fpu_op {
3317 FPUOp1::Abs32 => "lpebr",
3318 FPUOp1::Abs64 => "lpdbr",
3319 FPUOp1::Neg32 => "lcebr",
3320 FPUOp1::Neg64 => "lcdbr",
3321 FPUOp1::NegAbs32 => "lnebr",
3322 FPUOp1::NegAbs64 => "lndbr",
3323 FPUOp1::Sqrt32 => "sqebr",
3324 FPUOp1::Sqrt64 => "sqdbr",
3325 FPUOp1::Cvt32To64 => "ldebr",
3326 FPUOp1::Cvt64To32 => "ledbr",
3327 };
3328 let rd = rd.to_reg().show_rru(mb_rru);
3329 let rn = rn.show_rru(mb_rru);
3330 format!("{} {}, {}", op, rd, rn)
3331 }
3332 &Inst::FpuRRR { fpu_op, rd, rm } => {
3333 let op = match fpu_op {
3334 FPUOp2::Add32 => "aebr",
3335 FPUOp2::Add64 => "adbr",
3336 FPUOp2::Sub32 => "sebr",
3337 FPUOp2::Sub64 => "sdbr",
3338 FPUOp2::Mul32 => "meebr",
3339 FPUOp2::Mul64 => "mdbr",
3340 FPUOp2::Div32 => "debr",
3341 FPUOp2::Div64 => "ddbr",
3342 _ => unimplemented!(),
3343 };
3344 let rd = rd.to_reg().show_rru(mb_rru);
3345 let rm = rm.show_rru(mb_rru);
3346 format!("{} {}, {}", op, rd, rm)
3347 }
3348 &Inst::FpuRRRR { fpu_op, rd, rn, rm } => {
3349 let op = match fpu_op {
3350 FPUOp3::MAdd32 => "maebr",
3351 FPUOp3::MAdd64 => "madbr",
3352 FPUOp3::MSub32 => "msebr",
3353 FPUOp3::MSub64 => "msdbr",
3354 };
3355 let rd = rd.to_reg().show_rru(mb_rru);
3356 let rn = rn.show_rru(mb_rru);
3357 let rm = rm.show_rru(mb_rru);
3358 format!("{} {}, {}, {}", op, rd, rn, rm)
3359 }
3360 &Inst::FpuCopysign { rd, rn, rm } => {
3361 let rd = rd.to_reg().show_rru(mb_rru);
3362 let rn = rn.show_rru(mb_rru);
3363 let rm = rm.show_rru(mb_rru);
3364 format!("cpsdr {}, {}, {}", rd, rm, rn)
3365 }
3366 &Inst::FpuCmp32 { rn, rm } => {
3367 let rn = rn.show_rru(mb_rru);
3368 let rm = rm.show_rru(mb_rru);
3369 format!("cebr {}, {}", rn, rm)
3370 }
3371 &Inst::FpuCmp64 { rn, rm } => {
3372 let rn = rn.show_rru(mb_rru);
3373 let rm = rm.show_rru(mb_rru);
3374 format!("cdbr {}, {}", rn, rm)
3375 }
3376 &Inst::LoadFpuConst32 { rd, const_data } => {
3377 let rd = rd.to_reg().show_rru(mb_rru);
3378 let tmp = writable_spilltmp_reg().to_reg().show_rru(mb_rru);
3379 format!(
3380 "bras {}, 8 ; data.f32 {} ; le {}, 0({})",
3381 tmp, const_data, rd, tmp
3382 )
3383 }
3384 &Inst::LoadFpuConst64 { rd, const_data } => {
3385 let rd = rd.to_reg().show_rru(mb_rru);
3386 let tmp = writable_spilltmp_reg().to_reg().show_rru(mb_rru);
3387 format!(
3388 "bras {}, 12 ; data.f64 {} ; ld {}, 0({})",
3389 tmp, const_data, rd, tmp
3390 )
3391 }
3392 &Inst::FpuToInt { op, rd, rn } => {
3393 let op = match op {
3394 FpuToIntOp::F32ToI32 => "cfebra",
3395 FpuToIntOp::F32ToU32 => "clfebr",
3396 FpuToIntOp::F32ToI64 => "cgebra",
3397 FpuToIntOp::F32ToU64 => "clgebr",
3398 FpuToIntOp::F64ToI32 => "cfdbra",
3399 FpuToIntOp::F64ToU32 => "clfdbr",
3400 FpuToIntOp::F64ToI64 => "cgdbra",
3401 FpuToIntOp::F64ToU64 => "clgdbr",
3402 };
3403 let rd = rd.to_reg().show_rru(mb_rru);
3404 let rn = rn.show_rru(mb_rru);
3405 format!("{} {}, 5, {}, 0", op, rd, rn)
3406 }
3407 &Inst::IntToFpu { op, rd, rn } => {
3408 let op = match op {
3409 IntToFpuOp::I32ToF32 => "cefbra",
3410 IntToFpuOp::U32ToF32 => "celfbr",
3411 IntToFpuOp::I64ToF32 => "cegbra",
3412 IntToFpuOp::U64ToF32 => "celgbr",
3413 IntToFpuOp::I32ToF64 => "cdfbra",
3414 IntToFpuOp::U32ToF64 => "cdlfbr",
3415 IntToFpuOp::I64ToF64 => "cdgbra",
3416 IntToFpuOp::U64ToF64 => "cdlgbr",
3417 };
3418 let rd = rd.to_reg().show_rru(mb_rru);
3419 let rn = rn.show_rru(mb_rru);
3420 format!("{} {}, 0, {}, 0", op, rd, rn)
3421 }
3422 &Inst::FpuRound { op, rd, rn } => {
3423 let (op, m3) = match op {
3424 FpuRoundMode::Minus32 => ("fiebr", 7),
3425 FpuRoundMode::Minus64 => ("fidbr", 7),
3426 FpuRoundMode::Plus32 => ("fiebr", 6),
3427 FpuRoundMode::Plus64 => ("fidbr", 6),
3428 FpuRoundMode::Zero32 => ("fiebr", 5),
3429 FpuRoundMode::Zero64 => ("fidbr", 5),
3430 FpuRoundMode::Nearest32 => ("fiebr", 4),
3431 FpuRoundMode::Nearest64 => ("fidbr", 4),
3432 };
3433 let rd = rd.to_reg().show_rru(mb_rru);
3434 let rn = rn.show_rru(mb_rru);
3435 format!("{} {}, {}, {}", op, rd, rn, m3)
3436 }
3437 &Inst::FpuVecRRR { fpu_op, rd, rn, rm } => {
3438 let op = match fpu_op {
3439 FPUOp2::Max32 => "wfmaxsb",
3440 FPUOp2::Max64 => "wfmaxdb",
3441 FPUOp2::Min32 => "wfminsb",
3442 FPUOp2::Min64 => "wfmindb",
3443 _ => unimplemented!(),
3444 };
3445 let rd = rd.to_reg().show_rru(mb_rru);
3446 let rn = rn.show_rru(mb_rru);
3447 let rm = rm.show_rru(mb_rru);
3448 format!("{} {}, {}, {}, 1", op, rd, rn, rm)
3449 }
3450 &Inst::Extend {
3451 rd,
3452 rn,
3453 signed,
3454 from_bits,
3455 to_bits,
3456 } => {
3457 let rd = rd.to_reg().show_rru(mb_rru);
3458 let rn = rn.show_rru(mb_rru);
3459 let op = match (signed, from_bits, to_bits) {
3460 (_, 1, 32) => "llcr",
3461 (_, 1, 64) => "llgcr",
3462 (false, 8, 32) => "llcr",
3463 (false, 8, 64) => "llgcr",
3464 (true, 8, 32) => "lbr",
3465 (true, 8, 64) => "lgbr",
3466 (false, 16, 32) => "llhr",
3467 (false, 16, 64) => "llghr",
3468 (true, 16, 32) => "lhr",
3469 (true, 16, 64) => "lghr",
3470 (false, 32, 64) => "llgfr",
3471 (true, 32, 64) => "lgfr",
3472 _ => panic!("Unsupported Extend case: {:?}", self),
3473 };
3474 format!("{} {}, {}", op, rd, rn)
3475 }
3476 &Inst::Call { link, ref info, .. } => {
3477 let link = link.show_rru(mb_rru);
3478 format!("brasl {}, {}", link, info.dest)
3479 }
3480 &Inst::CallInd { link, ref info, .. } => {
3481 let link = link.show_rru(mb_rru);
3482 let rn = info.rn.show_rru(mb_rru);
3483 format!("basr {}, {}", link, rn)
3484 }
3485 &Inst::Ret { link } => {
3486 let link = link.show_rru(mb_rru);
3487 format!("br {}", link)
3488 }
3489 &Inst::EpiloguePlaceholder => "epilogue placeholder".to_string(),
3490 &Inst::Jump { ref dest } => {
3491 let dest = dest.show_rru(mb_rru);
3492 format!("jg {}", dest)
3493 }
3494 &Inst::IndirectBr { rn, .. } => {
3495 let rn = rn.show_rru(mb_rru);
3496 format!("br {}", rn)
3497 }
3498 &Inst::CondBr {
3499 ref taken,
3500 ref not_taken,
3501 cond,
3502 } => {
3503 let taken = taken.show_rru(mb_rru);
3504 let not_taken = not_taken.show_rru(mb_rru);
3505 let cond = cond.show_rru(mb_rru);
3506 format!("jg{} {} ; jg {}", cond, taken, not_taken)
3507 }
3508 &Inst::OneWayCondBr { ref target, cond } => {
3509 let target = target.show_rru(mb_rru);
3510 let cond = cond.show_rru(mb_rru);
3511 format!("jg{} {}", cond, target)
3512 }
3513 &Inst::Debugtrap => "debugtrap".to_string(),
3514 &Inst::Trap { .. } => "trap".to_string(),
3515 &Inst::TrapIf { cond, .. } => {
3516 let cond = cond.invert().show_rru(mb_rru);
3517 format!("j{} 6 ; trap", cond)
3518 }
3519 &Inst::JTSequence {
3520 ref info,
3521 ridx,
3522 rtmp1,
3523 rtmp2,
3524 ..
3525 } => {
3526 let ridx = ridx.show_rru(mb_rru);
3527 let rtmp1 = rtmp1.show_rru(mb_rru);
3528 let rtmp2 = rtmp2.show_rru(mb_rru);
3529 let default_target = info.default_target.show_rru(mb_rru);
3530 format!(
3531 concat!(
3532 "clgfi {}, {} ; ",
3533 "jghe {} ; ",
3534 "sllg {}, {}, 2 ; ",
3535 "larl {}, 18 ; ",
3536 "lgf {}, 0({}, {}) ; ",
3537 "agrk {}, {}, {} ; ",
3538 "br {} ; ",
3539 "jt_entries {:?}"
3540 ),
3541 ridx,
3542 info.targets.len(),
3543 default_target,
3544 rtmp2,
3545 ridx,
3546 rtmp1,
3547 rtmp2,
3548 rtmp2,
3549 rtmp1,
3550 rtmp1,
3551 rtmp1,
3552 rtmp2,
3553 rtmp1,
3554 info.targets
3555 )
3556 }
3557 &Inst::LoadExtNameFar {
3558 rd,
3559 ref name,
3560 offset,
3561 } => {
3562 let rd = rd.show_rru(mb_rru);
3563 let tmp = writable_spilltmp_reg().to_reg().show_rru(mb_rru);
3564 format!(
3565 "bras {}, 12 ; data {} + {} ; lg {}, 0({})",
3566 tmp, name, offset, rd, tmp
3567 )
3568 }
3569 &Inst::LoadAddr { rd, ref mem } => {
3570 let (mem_str, mem) =
3571 mem_finalize_for_show(mem, mb_rru, state, true, true, true, true);
3572
3573 let op = match &mem {
3574 &MemArg::BXD12 { .. } => "la",
3575 &MemArg::BXD20 { .. } => "lay",
3576 &MemArg::Label { .. } | &MemArg::Symbol { .. } => "larl",
3577 _ => unreachable!(),
3578 };
3579 let rd = rd.show_rru(mb_rru);
3580 let mem = mem.show_rru(mb_rru);
3581 format!("{}{} {}, {}", mem_str, op, rd, mem)
3582 }
3583 &Inst::VirtualSPOffsetAdj { offset } => {
3584 state.virtual_sp_offset += offset;
3585 format!("virtual_sp_offset_adjust {}", offset)
3586 }
3587 &Inst::ValueLabelMarker { label, reg } => {
3588 format!("value_label {:?}, {}", label, reg.show_rru(mb_rru))
3589 }
3590 &Inst::Unwind { ref inst } => {
3591 format!("unwind {:?}", inst)
3592 }
3593 }
3594 }
3595 }
3596
3597 //=============================================================================
3598 // Label fixups and jump veneers.
3599
3600 /// Different forms of label references for different instruction formats.
3601 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
3602 pub enum LabelUse {
3603 /// RI-format branch. 16-bit signed offset. PC-relative, offset is imm << 1.
3604 BranchRI,
3605 /// RIL-format branch. 32-bit signed offset. PC-relative, offset is imm << 1.
3606 BranchRIL,
3607 /// 32-bit PC relative constant offset (from address of constant itself),
3608 /// signed. Used in jump tables.
3609 PCRel32,
3610 }
3611
3612 impl MachInstLabelUse for LabelUse {
3613 /// Alignment for veneer code.
3614 const ALIGN: CodeOffset = 2;
3615
3616 /// Maximum PC-relative range (positive), inclusive.
max_pos_range(self) -> CodeOffset3617 fn max_pos_range(self) -> CodeOffset {
3618 match self {
3619 // 16-bit signed immediate, left-shifted by 1.
3620 LabelUse::BranchRI => (1 << 20) - 1,
3621 // This can address any valid CodeOffset.
3622 LabelUse::BranchRIL => 0x7fff_ffff,
3623 LabelUse::PCRel32 => 0x7fff_ffff,
3624 }
3625 }
3626
3627 /// Maximum PC-relative range (negative).
max_neg_range(self) -> CodeOffset3628 fn max_neg_range(self) -> CodeOffset {
3629 match self {
3630 // 16-bit signed immediate, left-shifted by 1.
3631 LabelUse::BranchRI => 1 << 20,
3632 // This can address any valid CodeOffset.
3633 LabelUse::BranchRIL => 0x8000_0000,
3634 LabelUse::PCRel32 => 0x8000_0000,
3635 }
3636 }
3637
3638 /// Size of window into code needed to do the patch.
patch_size(self) -> CodeOffset3639 fn patch_size(self) -> CodeOffset {
3640 match self {
3641 LabelUse::BranchRI => 4,
3642 LabelUse::BranchRIL => 6,
3643 LabelUse::PCRel32 => 4,
3644 }
3645 }
3646
3647 /// Perform the patch.
patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset)3648 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
3649 let pc_rel = (label_offset as i64) - (use_offset as i64);
3650 debug_assert!(pc_rel <= self.max_pos_range() as i64);
3651 debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
3652 debug_assert!(pc_rel & 1 == 0);
3653 let pc_rel_shifted = pc_rel >> 1;
3654
3655 match self {
3656 LabelUse::BranchRI => {
3657 buffer[2..4].clone_from_slice(&u16::to_be_bytes(pc_rel_shifted as u16));
3658 }
3659 LabelUse::BranchRIL => {
3660 buffer[2..6].clone_from_slice(&u32::to_be_bytes(pc_rel_shifted as u32));
3661 }
3662 LabelUse::PCRel32 => {
3663 let insn_word = u32::from_be_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
3664 let insn_word = insn_word.wrapping_add(pc_rel as u32);
3665 buffer[0..4].clone_from_slice(&u32::to_be_bytes(insn_word));
3666 }
3667 }
3668 }
3669
3670 /// Is a veneer supported for this label reference type?
supports_veneer(self) -> bool3671 fn supports_veneer(self) -> bool {
3672 false
3673 }
3674
3675 /// How large is the veneer, if supported?
veneer_size(self) -> CodeOffset3676 fn veneer_size(self) -> CodeOffset {
3677 0
3678 }
3679
3680 /// Generate a veneer into the buffer, given that this veneer is at `veneer_offset`, and return
3681 /// an offset and label-use for the veneer's use of the original label.
generate_veneer( self, _buffer: &mut [u8], _veneer_offset: CodeOffset, ) -> (CodeOffset, LabelUse)3682 fn generate_veneer(
3683 self,
3684 _buffer: &mut [u8],
3685 _veneer_offset: CodeOffset,
3686 ) -> (CodeOffset, LabelUse) {
3687 unreachable!();
3688 }
3689 }
3690