1 //! This module defines x86_64-specific machine instruction types.
2 
3 use crate::binemit::{CodeOffset, StackMap};
4 use crate::ir::{types, ExternalName, Opcode, SourceLoc, TrapCode, Type, ValueLabel};
5 use crate::isa::unwind::UnwindInst;
6 use crate::isa::x64::abi::X64ABIMachineSpec;
7 use crate::isa::x64::settings as x64_settings;
8 use crate::isa::CallConv;
9 use crate::machinst::*;
10 use crate::{settings, settings::Flags, CodegenError, CodegenResult};
11 use alloc::boxed::Box;
12 use alloc::vec::Vec;
13 use regalloc::{
14     PrettyPrint, PrettyPrintSized, RealRegUniverse, Reg, RegClass, RegUsageCollector,
15     RegUsageMapper, SpillSlot, VirtualReg, Writable,
16 };
17 use smallvec::{smallvec, SmallVec};
18 use std::fmt;
19 use std::string::{String, ToString};
20 
21 pub mod args;
22 mod emit;
23 #[cfg(test)]
24 mod emit_tests;
25 pub mod regs;
26 pub mod unwind;
27 
28 use args::*;
29 use regs::{create_reg_universe_systemv, show_ireg_sized};
30 
31 //=============================================================================
32 // Instructions (top level): definition
33 
34 // Don't build these directly.  Instead use the Inst:: functions to create them.
35 
36 /// Instructions.  Destinations are on the RIGHT (a la AT&T syntax).
37 #[derive(Clone)]
38 pub enum Inst {
39     /// Nops of various sizes, including zero.
40     Nop { len: u8 },
41 
42     // =====================================
43     // Integer instructions.
44     /// Integer arithmetic/bit-twiddling: (add sub and or xor mul adc? sbb?) (32 64) (reg addr imm) reg
45     AluRmiR {
46         size: OperandSize, // 4 or 8
47         op: AluRmiROpcode,
48         src: RegMemImm,
49         dst: Writable<Reg>,
50     },
51 
52     /// Instructions on GPR that only read src and defines dst (dst is not modified): bsr, etc.
53     UnaryRmR {
54         size: OperandSize, // 2, 4 or 8
55         op: UnaryRmROpcode,
56         src: RegMem,
57         dst: Writable<Reg>,
58     },
59 
60     /// Bitwise not
61     Not {
62         size: OperandSize, // 1, 2, 4 or 8
63         src: Writable<Reg>,
64     },
65 
66     /// Integer negation
67     Neg {
68         size: OperandSize, // 1, 2, 4 or 8
69         src: Writable<Reg>,
70     },
71 
72     /// Integer quotient and remainder: (div idiv) $rax $rdx (reg addr)
73     Div {
74         size: OperandSize, // 1, 2, 4 or 8
75         signed: bool,
76         divisor: RegMem,
77     },
78 
79     /// The high bits (RDX) of a (un)signed multiply: RDX:RAX := RAX * rhs.
80     MulHi {
81         size: OperandSize, // 2, 4, or 8
82         signed: bool,
83         rhs: RegMem,
84     },
85 
86     /// A synthetic sequence to implement the right inline checks for remainder and division,
87     /// assuming the dividend is in %rax.
88     /// Puts the result back into %rax if is_div, %rdx if !is_div, to mimic what the div
89     /// instruction does.
90     /// The generated code sequence is described in the emit's function match arm for this
91     /// instruction.
92     ///
93     /// Note: %rdx is marked as modified by this instruction, to avoid an early clobber problem
94     /// with the temporary and divisor registers. Make sure to zero %rdx right before this
95     /// instruction, or you might run into regalloc failures where %rdx is live before its first
96     /// def!
97     CheckedDivOrRemSeq {
98         kind: DivOrRemKind,
99         size: OperandSize,
100         /// The divisor operand. Note it's marked as modified so that it gets assigned a register
101         /// different from the temporary.
102         divisor: Writable<Reg>,
103         tmp: Option<Writable<Reg>>,
104     },
105 
106     /// Do a sign-extend based on the sign of the value in rax into rdx: (cwd cdq cqo)
107     /// or al into ah: (cbw)
108     SignExtendData {
109         size: OperandSize, // 1, 2, 4 or 8
110     },
111 
112     /// Constant materialization: (imm32 imm64) reg.
113     /// Either: movl $imm32, %reg32 or movabsq $imm64, %reg32.
114     Imm {
115         dst_size: OperandSize, // 4 or 8
116         simm64: u64,
117         dst: Writable<Reg>,
118     },
119 
120     /// GPR to GPR move: mov (64 32) reg reg.
121     MovRR {
122         size: OperandSize, // 4 or 8
123         src: Reg,
124         dst: Writable<Reg>,
125     },
126 
127     /// Zero-extended loads, except for 64 bits: movz (bl bq wl wq lq) addr reg.
128     /// Note that the lq variant doesn't really exist since the default zero-extend rule makes it
129     /// unnecessary. For that case we emit the equivalent "movl AM, reg32".
130     MovzxRmR {
131         ext_mode: ExtMode,
132         src: RegMem,
133         dst: Writable<Reg>,
134     },
135 
136     /// A plain 64-bit integer load, since MovZX_RM_R can't represent that.
137     Mov64MR {
138         src: SyntheticAmode,
139         dst: Writable<Reg>,
140     },
141 
142     /// Loads the memory address of addr into dst.
143     LoadEffectiveAddress {
144         addr: SyntheticAmode,
145         dst: Writable<Reg>,
146     },
147 
148     /// Sign-extended loads and moves: movs (bl bq wl wq lq) addr reg.
149     MovsxRmR {
150         ext_mode: ExtMode,
151         src: RegMem,
152         dst: Writable<Reg>,
153     },
154 
155     /// Integer stores: mov (b w l q) reg addr.
156     MovRM {
157         size: OperandSize, // 1, 2, 4 or 8.
158         src: Reg,
159         dst: SyntheticAmode,
160     },
161 
162     /// Arithmetic shifts: (shl shr sar) (b w l q) imm reg.
163     ShiftR {
164         size: OperandSize, // 1, 2, 4 or 8
165         kind: ShiftKind,
166         /// shift count: Some(0 .. #bits-in-type - 1), or None to mean "%cl".
167         num_bits: Option<u8>,
168         dst: Writable<Reg>,
169     },
170 
171     /// Arithmetic SIMD shifts.
172     XmmRmiReg {
173         opcode: SseOpcode,
174         src: RegMemImm,
175         dst: Writable<Reg>,
176     },
177 
178     /// Integer comparisons/tests: cmp or test (b w l q) (reg addr imm) reg.
179     CmpRmiR {
180         size: OperandSize, // 1, 2, 4 or 8
181         opcode: CmpOpcode,
182         src: RegMemImm,
183         dst: Reg,
184     },
185 
186     /// Materializes the requested condition code in the destination reg.
187     Setcc { cc: CC, dst: Writable<Reg> },
188 
189     /// Integer conditional move.
190     /// Overwrites the destination register.
191     Cmove {
192         size: OperandSize, // 2, 4, or 8
193         cc: CC,
194         src: RegMem,
195         dst: Writable<Reg>,
196     },
197 
198     // =====================================
199     // Stack manipulation.
200     /// pushq (reg addr imm)
201     Push64 { src: RegMemImm },
202 
203     /// popq reg
204     Pop64 { dst: Writable<Reg> },
205 
206     // =====================================
207     // Floating-point operations.
208     /// XMM (scalar or vector) binary op: (add sub and or xor mul adc? sbb?) (32 64) (reg addr) reg
209     XmmRmR {
210         op: SseOpcode,
211         src: RegMem,
212         dst: Writable<Reg>,
213     },
214 
215     XmmRmREvex {
216         op: Avx512Opcode,
217         src1: RegMem,
218         src2: Reg,
219         dst: Writable<Reg>,
220     },
221 
222     /// XMM (scalar or vector) unary op: mov between XMM registers (32 64) (reg addr) reg, sqrt,
223     /// etc.
224     ///
225     /// This differs from XMM_RM_R in that the dst register of XmmUnaryRmR is not used in the
226     /// computation of the instruction dst value and so does not have to be a previously valid
227     /// value. This is characteristic of mov instructions.
228     XmmUnaryRmR {
229         op: SseOpcode,
230         src: RegMem,
231         dst: Writable<Reg>,
232     },
233 
234     XmmUnaryRmREvex {
235         op: Avx512Opcode,
236         src: RegMem,
237         dst: Writable<Reg>,
238     },
239 
240     /// XMM (scalar or vector) unary op (from xmm to reg/mem): stores, movd, movq
241     XmmMovRM {
242         op: SseOpcode,
243         src: Reg,
244         dst: SyntheticAmode,
245     },
246 
247     /// XMM (vector) unary op (to move a constant value into an xmm register): movups
248     XmmLoadConst {
249         src: VCodeConstant,
250         dst: Writable<Reg>,
251         ty: Type,
252     },
253 
254     /// XMM (scalar) unary op (from xmm to integer reg): movd, movq, cvtts{s,d}2si
255     XmmToGpr {
256         op: SseOpcode,
257         src: Reg,
258         dst: Writable<Reg>,
259         dst_size: OperandSize,
260     },
261 
262     /// XMM (scalar) unary op (from integer to float reg): movd, movq, cvtsi2s{s,d}
263     GprToXmm {
264         op: SseOpcode,
265         src: RegMem,
266         dst: Writable<Reg>,
267         src_size: OperandSize,
268     },
269 
270     /// Converts an unsigned int64 to a float32/float64.
271     CvtUint64ToFloatSeq {
272         dst_size: OperandSize, // 4 or 8
273         /// A copy of the source register, fed by lowering. It is marked as modified during
274         /// register allocation to make sure that the temporary registers differ from the src
275         /// register, since both registers are live at the same time in the generated code
276         /// sequence.
277         src: Writable<Reg>,
278         dst: Writable<Reg>,
279         tmp_gpr1: Writable<Reg>,
280         tmp_gpr2: Writable<Reg>,
281     },
282 
283     /// Converts a scalar xmm to a signed int32/int64.
284     CvtFloatToSintSeq {
285         dst_size: OperandSize,
286         src_size: OperandSize,
287         is_saturating: bool,
288         /// A copy of the source register, fed by lowering. It is marked as modified during
289         /// register allocation to make sure that the temporary xmm register differs from the src
290         /// register, since both registers are live at the same time in the generated code
291         /// sequence.
292         src: Writable<Reg>,
293         dst: Writable<Reg>,
294         tmp_gpr: Writable<Reg>,
295         tmp_xmm: Writable<Reg>,
296     },
297 
298     /// Converts a scalar xmm to an unsigned int32/int64.
299     CvtFloatToUintSeq {
300         src_size: OperandSize,
301         dst_size: OperandSize,
302         is_saturating: bool,
303         /// A copy of the source register, fed by lowering, reused as a temporary. It is marked as
304         /// modified during register allocation to make sure that the temporary xmm register
305         /// differs from the src register, since both registers are live at the same time in the
306         /// generated code sequence.
307         src: Writable<Reg>,
308         dst: Writable<Reg>,
309         tmp_gpr: Writable<Reg>,
310         tmp_xmm: Writable<Reg>,
311     },
312 
313     /// A sequence to compute min/max with the proper NaN semantics for xmm registers.
314     XmmMinMaxSeq {
315         size: OperandSize,
316         is_min: bool,
317         lhs: Reg,
318         rhs_dst: Writable<Reg>,
319     },
320 
321     /// XMM (scalar) conditional move.
322     /// Overwrites the destination register if cc is set.
323     XmmCmove {
324         size: OperandSize, // 4 or 8
325         cc: CC,
326         src: RegMem,
327         dst: Writable<Reg>,
328     },
329 
330     /// Float comparisons/tests: cmp (b w l q) (reg addr imm) reg.
331     XmmCmpRmR {
332         op: SseOpcode,
333         src: RegMem,
334         dst: Reg,
335     },
336 
337     /// A binary XMM instruction with an 8-bit immediate: e.g. cmp (ps pd) imm (reg addr) reg
338     XmmRmRImm {
339         op: SseOpcode,
340         src: RegMem,
341         dst: Writable<Reg>,
342         imm: u8,
343         size: OperandSize, // 4 or 8
344     },
345 
346     // =====================================
347     // Control flow instructions.
348     /// Direct call: call simm32.
349     CallKnown {
350         dest: ExternalName,
351         uses: Vec<Reg>,
352         defs: Vec<Writable<Reg>>,
353         opcode: Opcode,
354     },
355 
356     /// Indirect call: callq (reg mem).
357     CallUnknown {
358         dest: RegMem,
359         uses: Vec<Reg>,
360         defs: Vec<Writable<Reg>>,
361         opcode: Opcode,
362     },
363 
364     /// Return.
365     Ret,
366 
367     /// A placeholder instruction, generating no code, meaning that a function epilogue must be
368     /// inserted there.
369     EpiloguePlaceholder,
370 
371     /// Jump to a known target: jmp simm32.
372     JmpKnown { dst: MachLabel },
373 
374     /// One-way conditional branch: jcond cond target.
375     ///
376     /// This instruction is useful when we have conditional jumps depending on more than two
377     /// conditions, see for instance the lowering of Brz/brnz with Fcmp inputs.
378     ///
379     /// A note of caution: in contexts where the branch target is another block, this has to be the
380     /// same successor as the one specified in the terminator branch of the current block.
381     /// Otherwise, this might confuse register allocation by creating new invisible edges.
382     JmpIf { cc: CC, taken: MachLabel },
383 
384     /// Two-way conditional branch: jcond cond target target.
385     /// Emitted as a compound sequence; the MachBuffer will shrink it as appropriate.
386     JmpCond {
387         cc: CC,
388         taken: MachLabel,
389         not_taken: MachLabel,
390     },
391 
392     /// Jump-table sequence, as one compound instruction (see note in lower.rs for rationale).
393     /// The generated code sequence is described in the emit's function match arm for this
394     /// instruction.
395     /// See comment in lowering about the temporaries signedness.
396     JmpTableSeq {
397         idx: Reg,
398         tmp1: Writable<Reg>,
399         tmp2: Writable<Reg>,
400         default_target: MachLabel,
401         targets: Vec<MachLabel>,
402         targets_for_term: Vec<MachLabel>,
403     },
404 
405     /// Indirect jump: jmpq (reg mem).
406     JmpUnknown { target: RegMem },
407 
408     /// Traps if the condition code is set.
409     TrapIf { cc: CC, trap_code: TrapCode },
410 
411     /// A debug trap.
412     Hlt,
413 
414     /// An instruction that will always trigger the illegal instruction exception.
415     Ud2 { trap_code: TrapCode },
416 
417     /// Loads an external symbol in a register, with a relocation:
418     ///
419     /// movq $name@GOTPCREL(%rip), dst    if PIC is enabled, or
420     /// movabsq $name, dst                otherwise.
421     LoadExtName {
422         dst: Writable<Reg>,
423         name: Box<ExternalName>,
424         offset: i64,
425     },
426 
427     // =====================================
428     // Instructions pertaining to atomic memory accesses.
429     /// A standard (native) `lock cmpxchg src, (amode)`, with register conventions:
430     ///
431     /// `dst`  (read) address
432     /// `src`  (read) replacement value
433     /// %rax   (modified) in: expected value, out: value that was actually at `dst`
434     /// %rflags is written.  Do not assume anything about it after the instruction.
435     ///
436     /// The instruction "succeeded" iff the lowest `ty` bits of %rax afterwards are the same as
437     /// they were before.
438     LockCmpxchg {
439         ty: Type, // I8, I16, I32 or I64
440         src: Reg,
441         dst: SyntheticAmode,
442     },
443 
444     /// A synthetic instruction, based on a loop around a native `lock cmpxchg` instruction.
445     /// This atomically modifies a value in memory and returns the old value.  The sequence
446     /// consists of an initial "normal" load from `dst`, followed by a loop which computes the
447     /// new value and tries to compare-and-swap ("CAS") it into `dst`, using the native
448     /// instruction `lock cmpxchg{b,w,l,q}` .  The loop iterates until the CAS is successful.
449     /// If there is no contention, there will be only one pass through the loop body.  The
450     /// sequence does *not* perform any explicit memory fence instructions
451     /// (mfence/sfence/lfence).
452     ///
453     /// Note that the transaction is atomic in the sense that, as observed by some other thread,
454     /// `dst` either has the initial or final value, but no other.  It isn't atomic in the sense
455     /// of guaranteeing that no other thread writes to `dst` in between the initial load and the
456     /// CAS -- but that would cause the CAS to fail unless the other thread's last write before
457     /// the CAS wrote the same value that was already there.  In other words, this
458     /// implementation suffers (unavoidably) from the A-B-A problem.
459     ///
460     /// This instruction sequence has fixed register uses as follows:
461     ///
462     /// %r9   (read) address
463     /// %r10  (read) second operand for `op`
464     /// %r11  (written) scratch reg; value afterwards has no meaning
465     /// %rax  (written) the old value at %r9
466     /// %rflags is written.  Do not assume anything about it after the instruction.
467     AtomicRmwSeq {
468         ty: Type, // I8, I16, I32 or I64
469         op: inst_common::AtomicRmwOp,
470     },
471 
472     /// A memory fence (mfence, lfence or sfence).
473     Fence { kind: FenceKind },
474 
475     // =====================================
476     // Meta-instructions generating no code.
477     /// Marker, no-op in generated code: SP "virtual offset" is adjusted. This
478     /// controls how MemArg::NominalSPOffset args are lowered.
479     VirtualSPOffsetAdj { offset: i64 },
480 
481     /// Provides a way to tell the register allocator that the upcoming sequence of instructions
482     /// will overwrite `dst` so it should be considered as a `def`; use this with care.
483     ///
484     /// This is useful when we have a sequence of instructions whose register usages are nominally
485     /// `mod`s, but such that the combination of operations creates a result that is independent of
486     /// the initial register value. It's thus semantically a `def`, not a `mod`, when all the
487     /// instructions are taken together, so we want to ensure the register is defined (its
488     /// live-range starts) prior to the sequence to keep analyses happy.
489     ///
490     /// One alternative would be a compound instruction that somehow encapsulates the others and
491     /// reports its own `def`s/`use`s/`mod`s; this adds complexity (the instruction list is no
492     /// longer flat) and requires knowledge about semantics and initial-value independence anyway.
493     XmmUninitializedValue { dst: Writable<Reg> },
494 
495     /// A call to the `ElfTlsGetAddr` libcall. Returns address
496     /// of TLS symbol in rax.
497     ElfTlsGetAddr { symbol: ExternalName },
498 
499     /// A Mach-O TLS symbol access. Returns address of the TLS
500     /// symbol in rax.
501     MachOTlsGetAddr { symbol: ExternalName },
502 
503     /// A definition of a value label.
504     ValueLabelMarker { reg: Reg, label: ValueLabel },
505 
506     /// An unwind pseudoinstruction describing the state of the
507     /// machine at this program point.
508     Unwind { inst: UnwindInst },
509 }
510 
low32_will_sign_extend_to_64(x: u64) -> bool511 pub(crate) fn low32_will_sign_extend_to_64(x: u64) -> bool {
512     let xs = x as i64;
513     xs == ((xs << 32) >> 32)
514 }
515 
516 impl Inst {
517     /// Retrieve a list of ISA feature sets in which the instruction is available. An empty list
518     /// indicates that the instruction is available in the baseline feature set (i.e. SSE2 and
519     /// below); more than one `InstructionSet` in the list indicates that the instruction is present
520     /// *any* of the included ISA feature sets.
available_in_any_isa(&self) -> SmallVec<[InstructionSet; 2]>521     fn available_in_any_isa(&self) -> SmallVec<[InstructionSet; 2]> {
522         match self {
523             // These instructions are part of SSE2, which is a basic requirement in Cranelift, and
524             // don't have to be checked.
525             Inst::AluRmiR { .. }
526             | Inst::AtomicRmwSeq { .. }
527             | Inst::CallKnown { .. }
528             | Inst::CallUnknown { .. }
529             | Inst::CheckedDivOrRemSeq { .. }
530             | Inst::Cmove { .. }
531             | Inst::CmpRmiR { .. }
532             | Inst::CvtFloatToSintSeq { .. }
533             | Inst::CvtFloatToUintSeq { .. }
534             | Inst::CvtUint64ToFloatSeq { .. }
535             | Inst::Div { .. }
536             | Inst::EpiloguePlaceholder
537             | Inst::Fence { .. }
538             | Inst::Hlt
539             | Inst::Imm { .. }
540             | Inst::JmpCond { .. }
541             | Inst::JmpIf { .. }
542             | Inst::JmpKnown { .. }
543             | Inst::JmpTableSeq { .. }
544             | Inst::JmpUnknown { .. }
545             | Inst::LoadEffectiveAddress { .. }
546             | Inst::LoadExtName { .. }
547             | Inst::LockCmpxchg { .. }
548             | Inst::Mov64MR { .. }
549             | Inst::MovRM { .. }
550             | Inst::MovRR { .. }
551             | Inst::MovsxRmR { .. }
552             | Inst::MovzxRmR { .. }
553             | Inst::MulHi { .. }
554             | Inst::Neg { .. }
555             | Inst::Not { .. }
556             | Inst::Nop { .. }
557             | Inst::Pop64 { .. }
558             | Inst::Push64 { .. }
559             | Inst::Ret
560             | Inst::Setcc { .. }
561             | Inst::ShiftR { .. }
562             | Inst::SignExtendData { .. }
563             | Inst::TrapIf { .. }
564             | Inst::Ud2 { .. }
565             | Inst::VirtualSPOffsetAdj { .. }
566             | Inst::XmmCmove { .. }
567             | Inst::XmmCmpRmR { .. }
568             | Inst::XmmLoadConst { .. }
569             | Inst::XmmMinMaxSeq { .. }
570             | Inst::XmmUninitializedValue { .. }
571             | Inst::ElfTlsGetAddr { .. }
572             | Inst::MachOTlsGetAddr { .. }
573             | Inst::ValueLabelMarker { .. }
574             | Inst::Unwind { .. } => smallvec![],
575 
576             Inst::UnaryRmR { op, .. } => op.available_from(),
577 
578             // These use dynamic SSE opcodes.
579             Inst::GprToXmm { op, .. }
580             | Inst::XmmMovRM { op, .. }
581             | Inst::XmmRmiReg { opcode: op, .. }
582             | Inst::XmmRmR { op, .. }
583             | Inst::XmmRmRImm { op, .. }
584             | Inst::XmmToGpr { op, .. }
585             | Inst::XmmUnaryRmR { op, .. } => smallvec![op.available_from()],
586 
587             Inst::XmmUnaryRmREvex { op, .. } | Inst::XmmRmREvex { op, .. } => op.available_from(),
588         }
589     }
590 }
591 
592 // Handy constructors for Insts.
593 
594 impl Inst {
nop(len: u8) -> Self595     pub(crate) fn nop(len: u8) -> Self {
596         debug_assert!(len <= 15);
597         Self::Nop { len }
598     }
599 
alu_rmi_r( size: OperandSize, op: AluRmiROpcode, src: RegMemImm, dst: Writable<Reg>, ) -> Self600     pub(crate) fn alu_rmi_r(
601         size: OperandSize,
602         op: AluRmiROpcode,
603         src: RegMemImm,
604         dst: Writable<Reg>,
605     ) -> Self {
606         debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
607         src.assert_regclass_is(RegClass::I64);
608         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
609         Self::AluRmiR { size, op, src, dst }
610     }
611 
unary_rm_r( size: OperandSize, op: UnaryRmROpcode, src: RegMem, dst: Writable<Reg>, ) -> Self612     pub(crate) fn unary_rm_r(
613         size: OperandSize,
614         op: UnaryRmROpcode,
615         src: RegMem,
616         dst: Writable<Reg>,
617     ) -> Self {
618         src.assert_regclass_is(RegClass::I64);
619         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
620         debug_assert!(size.is_one_of(&[
621             OperandSize::Size16,
622             OperandSize::Size32,
623             OperandSize::Size64
624         ]));
625         Self::UnaryRmR { size, op, src, dst }
626     }
627 
not(size: OperandSize, src: Writable<Reg>) -> Inst628     pub(crate) fn not(size: OperandSize, src: Writable<Reg>) -> Inst {
629         debug_assert_eq!(src.to_reg().get_class(), RegClass::I64);
630         Inst::Not { size, src }
631     }
632 
neg(size: OperandSize, src: Writable<Reg>) -> Inst633     pub(crate) fn neg(size: OperandSize, src: Writable<Reg>) -> Inst {
634         debug_assert_eq!(src.to_reg().get_class(), RegClass::I64);
635         Inst::Neg { size, src }
636     }
637 
div(size: OperandSize, signed: bool, divisor: RegMem) -> Inst638     pub(crate) fn div(size: OperandSize, signed: bool, divisor: RegMem) -> Inst {
639         divisor.assert_regclass_is(RegClass::I64);
640         Inst::Div {
641             size,
642             signed,
643             divisor,
644         }
645     }
646 
mul_hi(size: OperandSize, signed: bool, rhs: RegMem) -> Inst647     pub(crate) fn mul_hi(size: OperandSize, signed: bool, rhs: RegMem) -> Inst {
648         debug_assert!(size.is_one_of(&[
649             OperandSize::Size16,
650             OperandSize::Size32,
651             OperandSize::Size64
652         ]));
653         rhs.assert_regclass_is(RegClass::I64);
654         Inst::MulHi { size, signed, rhs }
655     }
656 
checked_div_or_rem_seq( kind: DivOrRemKind, size: OperandSize, divisor: Writable<Reg>, tmp: Option<Writable<Reg>>, ) -> Inst657     pub(crate) fn checked_div_or_rem_seq(
658         kind: DivOrRemKind,
659         size: OperandSize,
660         divisor: Writable<Reg>,
661         tmp: Option<Writable<Reg>>,
662     ) -> Inst {
663         debug_assert!(divisor.to_reg().get_class() == RegClass::I64);
664         debug_assert!(tmp
665             .map(|tmp| tmp.to_reg().get_class() == RegClass::I64)
666             .unwrap_or(true));
667         Inst::CheckedDivOrRemSeq {
668             kind,
669             size,
670             divisor,
671             tmp,
672         }
673     }
674 
sign_extend_data(size: OperandSize) -> Inst675     pub(crate) fn sign_extend_data(size: OperandSize) -> Inst {
676         Inst::SignExtendData { size }
677     }
678 
imm(dst_size: OperandSize, simm64: u64, dst: Writable<Reg>) -> Inst679     pub(crate) fn imm(dst_size: OperandSize, simm64: u64, dst: Writable<Reg>) -> Inst {
680         debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
681         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
682         // Try to generate a 32-bit immediate when the upper high bits are zeroed (which matches
683         // the semantics of movl).
684         let dst_size = match dst_size {
685             OperandSize::Size64 if simm64 > u32::max_value() as u64 => OperandSize::Size64,
686             _ => OperandSize::Size32,
687         };
688         Inst::Imm {
689             dst_size,
690             simm64,
691             dst,
692         }
693     }
694 
mov_r_r(size: OperandSize, src: Reg, dst: Writable<Reg>) -> Inst695     pub(crate) fn mov_r_r(size: OperandSize, src: Reg, dst: Writable<Reg>) -> Inst {
696         debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
697         debug_assert!(src.get_class() == RegClass::I64);
698         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
699         Inst::MovRR { size, src, dst }
700     }
701 
702     // TODO Can be replaced by `Inst::move` (high-level) and `Inst::unary_rm_r` (low-level)
xmm_mov(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst703     pub(crate) fn xmm_mov(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst {
704         src.assert_regclass_is(RegClass::V128);
705         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
706         Inst::XmmUnaryRmR { op, src, dst }
707     }
708 
xmm_load_const(src: VCodeConstant, dst: Writable<Reg>, ty: Type) -> Inst709     pub(crate) fn xmm_load_const(src: VCodeConstant, dst: Writable<Reg>, ty: Type) -> Inst {
710         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
711         debug_assert!(ty.is_vector() && ty.bits() == 128);
712         Inst::XmmLoadConst { src, dst, ty }
713     }
714 
715     /// Convenient helper for unary float operations.
xmm_unary_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst716     pub(crate) fn xmm_unary_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Inst {
717         src.assert_regclass_is(RegClass::V128);
718         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
719         Inst::XmmUnaryRmR { op, src, dst }
720     }
721 
xmm_unary_rm_r_evex(op: Avx512Opcode, src: RegMem, dst: Writable<Reg>) -> Inst722     pub(crate) fn xmm_unary_rm_r_evex(op: Avx512Opcode, src: RegMem, dst: Writable<Reg>) -> Inst {
723         src.assert_regclass_is(RegClass::V128);
724         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
725         Inst::XmmUnaryRmREvex { op, src, dst }
726     }
727 
xmm_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Self728     pub(crate) fn xmm_rm_r(op: SseOpcode, src: RegMem, dst: Writable<Reg>) -> Self {
729         src.assert_regclass_is(RegClass::V128);
730         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
731         Inst::XmmRmR { op, src, dst }
732     }
733 
xmm_rm_r_evex( op: Avx512Opcode, src1: RegMem, src2: Reg, dst: Writable<Reg>, ) -> Self734     pub(crate) fn xmm_rm_r_evex(
735         op: Avx512Opcode,
736         src1: RegMem,
737         src2: Reg,
738         dst: Writable<Reg>,
739     ) -> Self {
740         src1.assert_regclass_is(RegClass::V128);
741         debug_assert!(src2.get_class() == RegClass::V128);
742         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
743         Inst::XmmRmREvex {
744             op,
745             src1,
746             src2,
747             dst,
748         }
749     }
750 
xmm_uninit_value(dst: Writable<Reg>) -> Self751     pub(crate) fn xmm_uninit_value(dst: Writable<Reg>) -> Self {
752         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
753         Inst::XmmUninitializedValue { dst }
754     }
755 
xmm_mov_r_m(op: SseOpcode, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst756     pub(crate) fn xmm_mov_r_m(op: SseOpcode, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
757         debug_assert!(src.get_class() == RegClass::V128);
758         Inst::XmmMovRM {
759             op,
760             src,
761             dst: dst.into(),
762         }
763     }
764 
xmm_to_gpr( op: SseOpcode, src: Reg, dst: Writable<Reg>, dst_size: OperandSize, ) -> Inst765     pub(crate) fn xmm_to_gpr(
766         op: SseOpcode,
767         src: Reg,
768         dst: Writable<Reg>,
769         dst_size: OperandSize,
770     ) -> Inst {
771         debug_assert!(src.get_class() == RegClass::V128);
772         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
773         debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
774         Inst::XmmToGpr {
775             op,
776             src,
777             dst,
778             dst_size,
779         }
780     }
781 
gpr_to_xmm( op: SseOpcode, src: RegMem, src_size: OperandSize, dst: Writable<Reg>, ) -> Inst782     pub(crate) fn gpr_to_xmm(
783         op: SseOpcode,
784         src: RegMem,
785         src_size: OperandSize,
786         dst: Writable<Reg>,
787     ) -> Inst {
788         src.assert_regclass_is(RegClass::I64);
789         debug_assert!(src_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
790         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
791         Inst::GprToXmm {
792             op,
793             src,
794             dst,
795             src_size,
796         }
797     }
798 
xmm_cmp_rm_r(op: SseOpcode, src: RegMem, dst: Reg) -> Inst799     pub(crate) fn xmm_cmp_rm_r(op: SseOpcode, src: RegMem, dst: Reg) -> Inst {
800         src.assert_regclass_is(RegClass::V128);
801         debug_assert!(dst.get_class() == RegClass::V128);
802         Inst::XmmCmpRmR { op, src, dst }
803     }
804 
cvt_u64_to_float_seq( dst_size: OperandSize, src: Writable<Reg>, tmp_gpr1: Writable<Reg>, tmp_gpr2: Writable<Reg>, dst: Writable<Reg>, ) -> Inst805     pub(crate) fn cvt_u64_to_float_seq(
806         dst_size: OperandSize,
807         src: Writable<Reg>,
808         tmp_gpr1: Writable<Reg>,
809         tmp_gpr2: Writable<Reg>,
810         dst: Writable<Reg>,
811     ) -> Inst {
812         debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
813         debug_assert!(src.to_reg().get_class() == RegClass::I64);
814         debug_assert!(tmp_gpr1.to_reg().get_class() == RegClass::I64);
815         debug_assert!(tmp_gpr2.to_reg().get_class() == RegClass::I64);
816         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
817         Inst::CvtUint64ToFloatSeq {
818             src,
819             dst,
820             tmp_gpr1,
821             tmp_gpr2,
822             dst_size,
823         }
824     }
825 
cvt_float_to_sint_seq( src_size: OperandSize, dst_size: OperandSize, is_saturating: bool, src: Writable<Reg>, dst: Writable<Reg>, tmp_gpr: Writable<Reg>, tmp_xmm: Writable<Reg>, ) -> Inst826     pub(crate) fn cvt_float_to_sint_seq(
827         src_size: OperandSize,
828         dst_size: OperandSize,
829         is_saturating: bool,
830         src: Writable<Reg>,
831         dst: Writable<Reg>,
832         tmp_gpr: Writable<Reg>,
833         tmp_xmm: Writable<Reg>,
834     ) -> Inst {
835         debug_assert!(src_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
836         debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
837         debug_assert!(src.to_reg().get_class() == RegClass::V128);
838         debug_assert!(tmp_xmm.to_reg().get_class() == RegClass::V128);
839         debug_assert!(tmp_gpr.to_reg().get_class() == RegClass::I64);
840         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
841         Inst::CvtFloatToSintSeq {
842             src_size,
843             dst_size,
844             is_saturating,
845             src,
846             dst,
847             tmp_gpr,
848             tmp_xmm,
849         }
850     }
851 
cvt_float_to_uint_seq( src_size: OperandSize, dst_size: OperandSize, is_saturating: bool, src: Writable<Reg>, dst: Writable<Reg>, tmp_gpr: Writable<Reg>, tmp_xmm: Writable<Reg>, ) -> Inst852     pub(crate) fn cvt_float_to_uint_seq(
853         src_size: OperandSize,
854         dst_size: OperandSize,
855         is_saturating: bool,
856         src: Writable<Reg>,
857         dst: Writable<Reg>,
858         tmp_gpr: Writable<Reg>,
859         tmp_xmm: Writable<Reg>,
860     ) -> Inst {
861         debug_assert!(src_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
862         debug_assert!(dst_size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
863         debug_assert!(src.to_reg().get_class() == RegClass::V128);
864         debug_assert!(tmp_xmm.to_reg().get_class() == RegClass::V128);
865         debug_assert!(tmp_gpr.to_reg().get_class() == RegClass::I64);
866         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
867         Inst::CvtFloatToUintSeq {
868             src_size,
869             dst_size,
870             is_saturating,
871             src,
872             dst,
873             tmp_gpr,
874             tmp_xmm,
875         }
876     }
877 
xmm_min_max_seq( size: OperandSize, is_min: bool, lhs: Reg, rhs_dst: Writable<Reg>, ) -> Inst878     pub(crate) fn xmm_min_max_seq(
879         size: OperandSize,
880         is_min: bool,
881         lhs: Reg,
882         rhs_dst: Writable<Reg>,
883     ) -> Inst {
884         debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
885         debug_assert_eq!(lhs.get_class(), RegClass::V128);
886         debug_assert_eq!(rhs_dst.to_reg().get_class(), RegClass::V128);
887         Inst::XmmMinMaxSeq {
888             size,
889             is_min,
890             lhs,
891             rhs_dst,
892         }
893     }
894 
xmm_rm_r_imm( op: SseOpcode, src: RegMem, dst: Writable<Reg>, imm: u8, size: OperandSize, ) -> Inst895     pub(crate) fn xmm_rm_r_imm(
896         op: SseOpcode,
897         src: RegMem,
898         dst: Writable<Reg>,
899         imm: u8,
900         size: OperandSize,
901     ) -> Inst {
902         debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
903         Inst::XmmRmRImm {
904             op,
905             src,
906             dst,
907             imm,
908             size,
909         }
910     }
911 
movzx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst912     pub(crate) fn movzx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
913         src.assert_regclass_is(RegClass::I64);
914         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
915         Inst::MovzxRmR { ext_mode, src, dst }
916     }
917 
xmm_rmi_reg(opcode: SseOpcode, src: RegMemImm, dst: Writable<Reg>) -> Inst918     pub(crate) fn xmm_rmi_reg(opcode: SseOpcode, src: RegMemImm, dst: Writable<Reg>) -> Inst {
919         src.assert_regclass_is(RegClass::V128);
920         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
921         Inst::XmmRmiReg { opcode, src, dst }
922     }
923 
movsx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst924     pub(crate) fn movsx_rm_r(ext_mode: ExtMode, src: RegMem, dst: Writable<Reg>) -> Inst {
925         src.assert_regclass_is(RegClass::I64);
926         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
927         Inst::MovsxRmR { ext_mode, src, dst }
928     }
929 
mov64_m_r(src: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst930     pub(crate) fn mov64_m_r(src: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
931         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
932         Inst::Mov64MR {
933             src: src.into(),
934             dst,
935         }
936     }
937 
938     /// A convenience function to be able to use a RegMem as the source of a move.
mov64_rm_r(src: RegMem, dst: Writable<Reg>) -> Inst939     pub(crate) fn mov64_rm_r(src: RegMem, dst: Writable<Reg>) -> Inst {
940         src.assert_regclass_is(RegClass::I64);
941         match src {
942             RegMem::Reg { reg } => Self::mov_r_r(OperandSize::Size64, reg, dst),
943             RegMem::Mem { addr } => Self::mov64_m_r(addr, dst),
944         }
945     }
946 
mov_r_m(size: OperandSize, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst947     pub(crate) fn mov_r_m(size: OperandSize, src: Reg, dst: impl Into<SyntheticAmode>) -> Inst {
948         debug_assert!(src.get_class() == RegClass::I64);
949         Inst::MovRM {
950             size,
951             src,
952             dst: dst.into(),
953         }
954     }
955 
lea(addr: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst956     pub(crate) fn lea(addr: impl Into<SyntheticAmode>, dst: Writable<Reg>) -> Inst {
957         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
958         Inst::LoadEffectiveAddress {
959             addr: addr.into(),
960             dst,
961         }
962     }
963 
shift_r( size: OperandSize, kind: ShiftKind, num_bits: Option<u8>, dst: Writable<Reg>, ) -> Inst964     pub(crate) fn shift_r(
965         size: OperandSize,
966         kind: ShiftKind,
967         num_bits: Option<u8>,
968         dst: Writable<Reg>,
969     ) -> Inst {
970         debug_assert!(if let Some(num_bits) = num_bits {
971             num_bits < size.to_bits()
972         } else {
973             true
974         });
975         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
976         Inst::ShiftR {
977             size,
978             kind,
979             num_bits,
980             dst,
981         }
982     }
983 
984     /// Does a comparison of dst - src for operands of size `size`, as stated by the machine
985     /// instruction semantics. Be careful with the order of parameters!
cmp_rmi_r(size: OperandSize, src: RegMemImm, dst: Reg) -> Inst986     pub(crate) fn cmp_rmi_r(size: OperandSize, src: RegMemImm, dst: Reg) -> Inst {
987         src.assert_regclass_is(RegClass::I64);
988         debug_assert_eq!(dst.get_class(), RegClass::I64);
989         Inst::CmpRmiR {
990             size,
991             src,
992             dst,
993             opcode: CmpOpcode::Cmp,
994         }
995     }
996 
997     /// Does a comparison of dst & src for operands of size `size`.
test_rmi_r(size: OperandSize, src: RegMemImm, dst: Reg) -> Inst998     pub(crate) fn test_rmi_r(size: OperandSize, src: RegMemImm, dst: Reg) -> Inst {
999         src.assert_regclass_is(RegClass::I64);
1000         debug_assert_eq!(dst.get_class(), RegClass::I64);
1001         Inst::CmpRmiR {
1002             size,
1003             src,
1004             dst,
1005             opcode: CmpOpcode::Test,
1006         }
1007     }
1008 
trap(trap_code: TrapCode) -> Inst1009     pub(crate) fn trap(trap_code: TrapCode) -> Inst {
1010         Inst::Ud2 {
1011             trap_code: trap_code,
1012         }
1013     }
1014 
setcc(cc: CC, dst: Writable<Reg>) -> Inst1015     pub(crate) fn setcc(cc: CC, dst: Writable<Reg>) -> Inst {
1016         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
1017         Inst::Setcc { cc, dst }
1018     }
1019 
cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst1020     pub(crate) fn cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
1021         debug_assert!(size.is_one_of(&[
1022             OperandSize::Size16,
1023             OperandSize::Size32,
1024             OperandSize::Size64
1025         ]));
1026         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
1027         Inst::Cmove { size, cc, src, dst }
1028     }
1029 
xmm_cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst1030     pub(crate) fn xmm_cmove(size: OperandSize, cc: CC, src: RegMem, dst: Writable<Reg>) -> Inst {
1031         debug_assert!(size.is_one_of(&[OperandSize::Size32, OperandSize::Size64]));
1032         src.assert_regclass_is(RegClass::V128);
1033         debug_assert!(dst.to_reg().get_class() == RegClass::V128);
1034         Inst::XmmCmove { size, cc, src, dst }
1035     }
1036 
push64(src: RegMemImm) -> Inst1037     pub(crate) fn push64(src: RegMemImm) -> Inst {
1038         src.assert_regclass_is(RegClass::I64);
1039         Inst::Push64 { src }
1040     }
1041 
pop64(dst: Writable<Reg>) -> Inst1042     pub(crate) fn pop64(dst: Writable<Reg>) -> Inst {
1043         debug_assert!(dst.to_reg().get_class() == RegClass::I64);
1044         Inst::Pop64 { dst }
1045     }
1046 
call_known( dest: ExternalName, uses: Vec<Reg>, defs: Vec<Writable<Reg>>, opcode: Opcode, ) -> Inst1047     pub(crate) fn call_known(
1048         dest: ExternalName,
1049         uses: Vec<Reg>,
1050         defs: Vec<Writable<Reg>>,
1051         opcode: Opcode,
1052     ) -> Inst {
1053         Inst::CallKnown {
1054             dest,
1055             uses,
1056             defs,
1057             opcode,
1058         }
1059     }
1060 
call_unknown( dest: RegMem, uses: Vec<Reg>, defs: Vec<Writable<Reg>>, opcode: Opcode, ) -> Inst1061     pub(crate) fn call_unknown(
1062         dest: RegMem,
1063         uses: Vec<Reg>,
1064         defs: Vec<Writable<Reg>>,
1065         opcode: Opcode,
1066     ) -> Inst {
1067         dest.assert_regclass_is(RegClass::I64);
1068         Inst::CallUnknown {
1069             dest,
1070             uses,
1071             defs,
1072             opcode,
1073         }
1074     }
1075 
ret() -> Inst1076     pub(crate) fn ret() -> Inst {
1077         Inst::Ret
1078     }
1079 
epilogue_placeholder() -> Inst1080     pub(crate) fn epilogue_placeholder() -> Inst {
1081         Inst::EpiloguePlaceholder
1082     }
1083 
jmp_known(dst: MachLabel) -> Inst1084     pub(crate) fn jmp_known(dst: MachLabel) -> Inst {
1085         Inst::JmpKnown { dst }
1086     }
1087 
jmp_if(cc: CC, taken: MachLabel) -> Inst1088     pub(crate) fn jmp_if(cc: CC, taken: MachLabel) -> Inst {
1089         Inst::JmpIf { cc, taken }
1090     }
1091 
jmp_cond(cc: CC, taken: MachLabel, not_taken: MachLabel) -> Inst1092     pub(crate) fn jmp_cond(cc: CC, taken: MachLabel, not_taken: MachLabel) -> Inst {
1093         Inst::JmpCond {
1094             cc,
1095             taken,
1096             not_taken,
1097         }
1098     }
1099 
jmp_unknown(target: RegMem) -> Inst1100     pub(crate) fn jmp_unknown(target: RegMem) -> Inst {
1101         target.assert_regclass_is(RegClass::I64);
1102         Inst::JmpUnknown { target }
1103     }
1104 
trap_if(cc: CC, trap_code: TrapCode) -> Inst1105     pub(crate) fn trap_if(cc: CC, trap_code: TrapCode) -> Inst {
1106         Inst::TrapIf { cc, trap_code }
1107     }
1108 
1109     /// Choose which instruction to use for loading a register value from memory. For loads smaller
1110     /// than 64 bits, this method expects a way to extend the value (i.e. [ExtKind::SignExtend],
1111     /// [ExtKind::ZeroExtend]); loads with no extension necessary will ignore this.
load( ty: Type, from_addr: impl Into<SyntheticAmode>, to_reg: Writable<Reg>, ext_kind: ExtKind, ) -> Inst1112     pub(crate) fn load(
1113         ty: Type,
1114         from_addr: impl Into<SyntheticAmode>,
1115         to_reg: Writable<Reg>,
1116         ext_kind: ExtKind,
1117     ) -> Inst {
1118         let rc = to_reg.to_reg().get_class();
1119         match rc {
1120             RegClass::I64 => {
1121                 let ext_mode = match ty.bytes() {
1122                     1 => Some(ExtMode::BQ),
1123                     2 => Some(ExtMode::WQ),
1124                     4 => Some(ExtMode::LQ),
1125                     8 => None,
1126                     _ => unreachable!("the type should never use a scalar load: {}", ty),
1127                 };
1128                 if let Some(ext_mode) = ext_mode {
1129                     // Values smaller than 64 bits must be extended in some way.
1130                     match ext_kind {
1131                         ExtKind::SignExtend => {
1132                             Inst::movsx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
1133                         }
1134                         ExtKind::ZeroExtend => {
1135                             Inst::movzx_rm_r(ext_mode, RegMem::mem(from_addr), to_reg)
1136                         }
1137                         ExtKind::None => panic!(
1138                             "expected an extension kind for extension mode: {:?}",
1139                             ext_mode
1140                         ),
1141                     }
1142                 } else {
1143                     // 64-bit values can be moved directly.
1144                     Inst::mov64_m_r(from_addr, to_reg)
1145                 }
1146             }
1147             RegClass::V128 => {
1148                 let opcode = match ty {
1149                     types::F32 => SseOpcode::Movss,
1150                     types::F64 => SseOpcode::Movsd,
1151                     types::F32X4 => SseOpcode::Movups,
1152                     types::F64X2 => SseOpcode::Movupd,
1153                     _ if ty.is_vector() && ty.bits() == 128 => SseOpcode::Movdqu,
1154                     _ => unimplemented!("unable to load type: {}", ty),
1155                 };
1156                 Inst::xmm_unary_rm_r(opcode, RegMem::mem(from_addr), to_reg)
1157             }
1158             _ => panic!("unable to generate load for register class: {:?}", rc),
1159         }
1160     }
1161 
1162     /// Choose which instruction to use for storing a register value to memory.
store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst1163     pub(crate) fn store(ty: Type, from_reg: Reg, to_addr: impl Into<SyntheticAmode>) -> Inst {
1164         let rc = from_reg.get_class();
1165         match rc {
1166             RegClass::I64 => Inst::mov_r_m(OperandSize::from_ty(ty), from_reg, to_addr),
1167             RegClass::V128 => {
1168                 let opcode = match ty {
1169                     types::F32 => SseOpcode::Movss,
1170                     types::F64 => SseOpcode::Movsd,
1171                     types::F32X4 => SseOpcode::Movups,
1172                     types::F64X2 => SseOpcode::Movupd,
1173                     _ if ty.is_vector() && ty.bits() == 128 => SseOpcode::Movdqu,
1174                     _ => unimplemented!("unable to store type: {}", ty),
1175                 };
1176                 Inst::xmm_mov_r_m(opcode, from_reg, to_addr)
1177             }
1178             _ => panic!("unable to generate store for register class: {:?}", rc),
1179         }
1180     }
1181 }
1182 
1183 // Inst helpers.
1184 
1185 impl Inst {
1186     /// In certain cases, instructions of this format can act as a definition of an XMM register,
1187     /// producing a value that is independent of its initial value.
1188     ///
1189     /// For example, a vector equality comparison (`cmppd` or `cmpps`) that compares a register to
1190     /// itself will generate all ones as a result, regardless of its value. From the register
1191     /// allocator's point of view, we should (i) record the first register, which is normally a
1192     /// mod, as a def instead; and (ii) not record the second register as a use, because it is the
1193     /// same as the first register (already handled).
produces_const(&self) -> bool1194     fn produces_const(&self) -> bool {
1195         match self {
1196             Self::AluRmiR { op, src, dst, .. } => {
1197                 src.to_reg() == Some(dst.to_reg())
1198                     && (*op == AluRmiROpcode::Xor || *op == AluRmiROpcode::Sub)
1199             }
1200 
1201             Self::XmmRmR { op, src, dst, .. } => {
1202                 src.to_reg() == Some(dst.to_reg())
1203                     && (*op == SseOpcode::Xorps
1204                         || *op == SseOpcode::Xorpd
1205                         || *op == SseOpcode::Pxor
1206                         || *op == SseOpcode::Pcmpeqb
1207                         || *op == SseOpcode::Pcmpeqw
1208                         || *op == SseOpcode::Pcmpeqd
1209                         || *op == SseOpcode::Pcmpeqq)
1210             }
1211 
1212             Self::XmmRmRImm {
1213                 op, src, dst, imm, ..
1214             } => {
1215                 src.to_reg() == Some(dst.to_reg())
1216                     && (*op == SseOpcode::Cmppd || *op == SseOpcode::Cmpps)
1217                     && *imm == FcmpImm::Equal.encode()
1218             }
1219 
1220             _ => false,
1221         }
1222     }
1223 
1224     /// Choose which instruction to use for comparing two values for equality.
equals(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst1225     pub(crate) fn equals(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst {
1226         match ty {
1227             types::I8X16 | types::B8X16 => Inst::xmm_rm_r(SseOpcode::Pcmpeqb, from, to),
1228             types::I16X8 | types::B16X8 => Inst::xmm_rm_r(SseOpcode::Pcmpeqw, from, to),
1229             types::I32X4 | types::B32X4 => Inst::xmm_rm_r(SseOpcode::Pcmpeqd, from, to),
1230             types::I64X2 | types::B64X2 => Inst::xmm_rm_r(SseOpcode::Pcmpeqq, from, to),
1231             types::F32X4 => Inst::xmm_rm_r_imm(
1232                 SseOpcode::Cmpps,
1233                 from,
1234                 to,
1235                 FcmpImm::Equal.encode(),
1236                 OperandSize::Size32,
1237             ),
1238             types::F64X2 => Inst::xmm_rm_r_imm(
1239                 SseOpcode::Cmppd,
1240                 from,
1241                 to,
1242                 FcmpImm::Equal.encode(),
1243                 OperandSize::Size32,
1244             ),
1245             _ => unimplemented!("unimplemented type for Inst::equals: {}", ty),
1246         }
1247     }
1248 
1249     /// Choose which instruction to use for computing a bitwise AND on two values.
and(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst1250     pub(crate) fn and(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst {
1251         match ty {
1252             types::F32X4 => Inst::xmm_rm_r(SseOpcode::Andps, from, to),
1253             types::F64X2 => Inst::xmm_rm_r(SseOpcode::Andpd, from, to),
1254             _ if ty.is_vector() && ty.bits() == 128 => Inst::xmm_rm_r(SseOpcode::Pand, from, to),
1255             _ => unimplemented!("unimplemented type for Inst::and: {}", ty),
1256         }
1257     }
1258 
1259     /// Choose which instruction to use for computing a bitwise AND NOT on two values.
and_not(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst1260     pub(crate) fn and_not(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst {
1261         match ty {
1262             types::F32X4 => Inst::xmm_rm_r(SseOpcode::Andnps, from, to),
1263             types::F64X2 => Inst::xmm_rm_r(SseOpcode::Andnpd, from, to),
1264             _ if ty.is_vector() && ty.bits() == 128 => Inst::xmm_rm_r(SseOpcode::Pandn, from, to),
1265             _ => unimplemented!("unimplemented type for Inst::and_not: {}", ty),
1266         }
1267     }
1268 
1269     /// Choose which instruction to use for computing a bitwise OR on two values.
or(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst1270     pub(crate) fn or(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst {
1271         match ty {
1272             types::F32X4 => Inst::xmm_rm_r(SseOpcode::Orps, from, to),
1273             types::F64X2 => Inst::xmm_rm_r(SseOpcode::Orpd, from, to),
1274             _ if ty.is_vector() && ty.bits() == 128 => Inst::xmm_rm_r(SseOpcode::Por, from, to),
1275             _ => unimplemented!("unimplemented type for Inst::or: {}", ty),
1276         }
1277     }
1278 
1279     /// Choose which instruction to use for computing a bitwise XOR on two values.
xor(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst1280     pub(crate) fn xor(ty: Type, from: RegMem, to: Writable<Reg>) -> Inst {
1281         match ty {
1282             types::F32X4 => Inst::xmm_rm_r(SseOpcode::Xorps, from, to),
1283             types::F64X2 => Inst::xmm_rm_r(SseOpcode::Xorpd, from, to),
1284             _ if ty.is_vector() && ty.bits() == 128 => Inst::xmm_rm_r(SseOpcode::Pxor, from, to),
1285             _ => unimplemented!("unimplemented type for Inst::xor: {}", ty),
1286         }
1287     }
1288 }
1289 
1290 //=============================================================================
1291 // Instructions: printing
1292 
1293 impl PrettyPrint for Inst {
show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String1294     fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
1295         fn ljustify(s: String) -> String {
1296             let w = 7;
1297             if s.len() >= w {
1298                 s
1299             } else {
1300                 let need = usize::min(w, w - s.len());
1301                 s + &format!("{nil: <width$}", nil = "", width = need)
1302             }
1303         }
1304 
1305         fn ljustify2(s1: String, s2: String) -> String {
1306             ljustify(s1 + &s2)
1307         }
1308 
1309         fn suffix_lq(size: OperandSize) -> String {
1310             match size {
1311                 OperandSize::Size32 => "l",
1312                 OperandSize::Size64 => "q",
1313                 _ => unreachable!(),
1314             }
1315             .to_string()
1316         }
1317 
1318         fn suffix_lqb(size: OperandSize, is_8: bool) -> String {
1319             match (size, is_8) {
1320                 (_, true) => "b",
1321                 (OperandSize::Size32, false) => "l",
1322                 (OperandSize::Size64, false) => "q",
1323                 _ => unreachable!(),
1324             }
1325             .to_string()
1326         }
1327 
1328         fn size_lqb(size: OperandSize, is_8: bool) -> u8 {
1329             if is_8 {
1330                 return 1;
1331             }
1332             size.to_bytes()
1333         }
1334 
1335         fn suffix_bwlq(size: OperandSize) -> String {
1336             match size {
1337                 OperandSize::Size8 => "b".to_string(),
1338                 OperandSize::Size16 => "w".to_string(),
1339                 OperandSize::Size32 => "l".to_string(),
1340                 OperandSize::Size64 => "q".to_string(),
1341             }
1342         }
1343 
1344         match self {
1345             Inst::Nop { len } => format!("{} len={}", ljustify("nop".to_string()), len),
1346 
1347             Inst::AluRmiR { size, op, src, dst } => format!(
1348                 "{} {}, {}",
1349                 ljustify2(op.to_string(), suffix_lqb(*size, op.is_8bit())),
1350                 src.show_rru_sized(mb_rru, size_lqb(*size, op.is_8bit())),
1351                 show_ireg_sized(dst.to_reg(), mb_rru, size_lqb(*size, op.is_8bit())),
1352             ),
1353 
1354             Inst::UnaryRmR { src, dst, op, size } => format!(
1355                 "{} {}, {}",
1356                 ljustify2(op.to_string(), suffix_bwlq(*size)),
1357                 src.show_rru_sized(mb_rru, size.to_bytes()),
1358                 show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes()),
1359             ),
1360 
1361             Inst::Not { size, src } => format!(
1362                 "{} {}",
1363                 ljustify2("not".to_string(), suffix_bwlq(*size)),
1364                 show_ireg_sized(src.to_reg(), mb_rru, size.to_bytes())
1365             ),
1366 
1367             Inst::Neg { size, src } => format!(
1368                 "{} {}",
1369                 ljustify2("neg".to_string(), suffix_bwlq(*size)),
1370                 show_ireg_sized(src.to_reg(), mb_rru, size.to_bytes())
1371             ),
1372 
1373             Inst::Div {
1374                 size,
1375                 signed,
1376                 divisor,
1377                 ..
1378             } => format!(
1379                 "{} {}",
1380                 ljustify(if *signed {
1381                     "idiv".to_string()
1382                 } else {
1383                     "div".into()
1384                 }),
1385                 divisor.show_rru_sized(mb_rru, size.to_bytes())
1386             ),
1387 
1388             Inst::MulHi {
1389                 size, signed, rhs, ..
1390             } => format!(
1391                 "{} {}",
1392                 ljustify(if *signed {
1393                     "imul".to_string()
1394                 } else {
1395                     "mul".to_string()
1396                 }),
1397                 rhs.show_rru_sized(mb_rru, size.to_bytes())
1398             ),
1399 
1400             Inst::CheckedDivOrRemSeq {
1401                 kind,
1402                 size,
1403                 divisor,
1404                 ..
1405             } => format!(
1406                 "{} $rax:$rdx, {}",
1407                 match kind {
1408                     DivOrRemKind::SignedDiv => "sdiv",
1409                     DivOrRemKind::UnsignedDiv => "udiv",
1410                     DivOrRemKind::SignedRem => "srem",
1411                     DivOrRemKind::UnsignedRem => "urem",
1412                 },
1413                 show_ireg_sized(divisor.to_reg(), mb_rru, size.to_bytes()),
1414             ),
1415 
1416             Inst::SignExtendData { size } => match size {
1417                 OperandSize::Size8 => "cbw",
1418                 OperandSize::Size16 => "cwd",
1419                 OperandSize::Size32 => "cdq",
1420                 OperandSize::Size64 => "cqo",
1421             }
1422             .into(),
1423 
1424             Inst::XmmUnaryRmR { op, src, dst, .. } => format!(
1425                 "{} {}, {}",
1426                 ljustify(op.to_string()),
1427                 src.show_rru_sized(mb_rru, op.src_size()),
1428                 show_ireg_sized(dst.to_reg(), mb_rru, 8),
1429             ),
1430 
1431             Inst::XmmUnaryRmREvex { op, src, dst, .. } => format!(
1432                 "{} {}, {}",
1433                 ljustify(op.to_string()),
1434                 src.show_rru_sized(mb_rru, 8),
1435                 show_ireg_sized(dst.to_reg(), mb_rru, 8),
1436             ),
1437 
1438             Inst::XmmMovRM { op, src, dst, .. } => format!(
1439                 "{} {}, {}",
1440                 ljustify(op.to_string()),
1441                 show_ireg_sized(*src, mb_rru, 8),
1442                 dst.show_rru(mb_rru),
1443             ),
1444 
1445             Inst::XmmRmR { op, src, dst, .. } => format!(
1446                 "{} {}, {}",
1447                 ljustify(op.to_string()),
1448                 src.show_rru_sized(mb_rru, 8),
1449                 show_ireg_sized(dst.to_reg(), mb_rru, 8),
1450             ),
1451 
1452             Inst::XmmRmREvex {
1453                 op,
1454                 src1,
1455                 src2,
1456                 dst,
1457                 ..
1458             } => format!(
1459                 "{} {}, {}, {}",
1460                 ljustify(op.to_string()),
1461                 src1.show_rru_sized(mb_rru, 8),
1462                 show_ireg_sized(*src2, mb_rru, 8),
1463                 show_ireg_sized(dst.to_reg(), mb_rru, 8),
1464             ),
1465 
1466             Inst::XmmMinMaxSeq {
1467                 lhs,
1468                 rhs_dst,
1469                 is_min,
1470                 size,
1471             } => format!(
1472                 "{} {}, {}",
1473                 ljustify2(
1474                     if *is_min {
1475                         "xmm min seq ".to_string()
1476                     } else {
1477                         "xmm max seq ".to_string()
1478                     },
1479                     format!("f{}", size.to_bits())
1480                 ),
1481                 show_ireg_sized(*lhs, mb_rru, 8),
1482                 show_ireg_sized(rhs_dst.to_reg(), mb_rru, 8),
1483             ),
1484 
1485             Inst::XmmRmRImm {
1486                 op,
1487                 src,
1488                 dst,
1489                 imm,
1490                 size,
1491                 ..
1492             } => format!(
1493                 "{} ${}, {}, {}",
1494                 ljustify(format!(
1495                     "{}{}",
1496                     op.to_string(),
1497                     if *size == OperandSize::Size64 {
1498                         ".w"
1499                     } else {
1500                         ""
1501                     }
1502                 )),
1503                 imm,
1504                 src.show_rru(mb_rru),
1505                 dst.show_rru(mb_rru),
1506             ),
1507 
1508             Inst::XmmUninitializedValue { dst } => {
1509                 format!("{} {}", ljustify("uninit".into()), dst.show_rru(mb_rru),)
1510             }
1511 
1512             Inst::XmmLoadConst { src, dst, .. } => {
1513                 format!("load_const {:?}, {}", src, dst.show_rru(mb_rru),)
1514             }
1515 
1516             Inst::XmmToGpr {
1517                 op,
1518                 src,
1519                 dst,
1520                 dst_size,
1521             } => {
1522                 let dst_size = dst_size.to_bytes();
1523                 format!(
1524                     "{} {}, {}",
1525                     ljustify(op.to_string()),
1526                     src.show_rru(mb_rru),
1527                     show_ireg_sized(dst.to_reg(), mb_rru, dst_size),
1528                 )
1529             }
1530 
1531             Inst::GprToXmm {
1532                 op,
1533                 src,
1534                 src_size,
1535                 dst,
1536             } => format!(
1537                 "{} {}, {}",
1538                 ljustify(op.to_string()),
1539                 src.show_rru_sized(mb_rru, src_size.to_bytes()),
1540                 dst.show_rru(mb_rru)
1541             ),
1542 
1543             Inst::XmmCmpRmR { op, src, dst } => format!(
1544                 "{} {}, {}",
1545                 ljustify(op.to_string()),
1546                 src.show_rru_sized(mb_rru, 8),
1547                 show_ireg_sized(*dst, mb_rru, 8),
1548             ),
1549 
1550             Inst::CvtUint64ToFloatSeq {
1551                 src, dst, dst_size, ..
1552             } => format!(
1553                 "{} {}, {}",
1554                 ljustify(format!(
1555                     "u64_to_{}_seq",
1556                     if *dst_size == OperandSize::Size64 {
1557                         "f64"
1558                     } else {
1559                         "f32"
1560                     }
1561                 )),
1562                 show_ireg_sized(src.to_reg(), mb_rru, 8),
1563                 dst.show_rru(mb_rru),
1564             ),
1565 
1566             Inst::CvtFloatToSintSeq {
1567                 src,
1568                 dst,
1569                 src_size,
1570                 dst_size,
1571                 ..
1572             } => format!(
1573                 "{} {}, {}",
1574                 ljustify(format!(
1575                     "cvt_float{}_to_sint{}_seq",
1576                     src_size.to_bits(),
1577                     dst_size.to_bits()
1578                 )),
1579                 show_ireg_sized(src.to_reg(), mb_rru, 8),
1580                 show_ireg_sized(dst.to_reg(), mb_rru, dst_size.to_bytes()),
1581             ),
1582 
1583             Inst::CvtFloatToUintSeq {
1584                 src,
1585                 dst,
1586                 src_size,
1587                 dst_size,
1588                 ..
1589             } => format!(
1590                 "{} {}, {}",
1591                 ljustify(format!(
1592                     "cvt_float{}_to_uint{}_seq",
1593                     src_size.to_bits(),
1594                     dst_size.to_bits()
1595                 )),
1596                 show_ireg_sized(src.to_reg(), mb_rru, 8),
1597                 show_ireg_sized(dst.to_reg(), mb_rru, dst_size.to_bytes()),
1598             ),
1599 
1600             Inst::Imm {
1601                 dst_size,
1602                 simm64,
1603                 dst,
1604             } => {
1605                 if *dst_size == OperandSize::Size64 {
1606                     format!(
1607                         "{} ${}, {}",
1608                         ljustify("movabsq".to_string()),
1609                         *simm64 as i64,
1610                         show_ireg_sized(dst.to_reg(), mb_rru, 8)
1611                     )
1612                 } else {
1613                     format!(
1614                         "{} ${}, {}",
1615                         ljustify("movl".to_string()),
1616                         (*simm64 as u32) as i32,
1617                         show_ireg_sized(dst.to_reg(), mb_rru, 4)
1618                     )
1619                 }
1620             }
1621 
1622             Inst::MovRR { size, src, dst } => format!(
1623                 "{} {}, {}",
1624                 ljustify2("mov".to_string(), suffix_lq(*size)),
1625                 show_ireg_sized(*src, mb_rru, size.to_bytes()),
1626                 show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
1627             ),
1628 
1629             Inst::MovzxRmR {
1630                 ext_mode, src, dst, ..
1631             } => {
1632                 if *ext_mode == ExtMode::LQ {
1633                     format!(
1634                         "{} {}, {}",
1635                         ljustify("movl".to_string()),
1636                         src.show_rru_sized(mb_rru, ext_mode.src_size()),
1637                         show_ireg_sized(dst.to_reg(), mb_rru, 4)
1638                     )
1639                 } else {
1640                     format!(
1641                         "{} {}, {}",
1642                         ljustify2("movz".to_string(), ext_mode.to_string()),
1643                         src.show_rru_sized(mb_rru, ext_mode.src_size()),
1644                         show_ireg_sized(dst.to_reg(), mb_rru, ext_mode.dst_size())
1645                     )
1646                 }
1647             }
1648 
1649             Inst::Mov64MR { src, dst, .. } => format!(
1650                 "{} {}, {}",
1651                 ljustify("movq".to_string()),
1652                 src.show_rru(mb_rru),
1653                 dst.show_rru(mb_rru)
1654             ),
1655 
1656             Inst::LoadEffectiveAddress { addr, dst } => format!(
1657                 "{} {}, {}",
1658                 ljustify("lea".to_string()),
1659                 addr.show_rru(mb_rru),
1660                 dst.show_rru(mb_rru)
1661             ),
1662 
1663             Inst::MovsxRmR {
1664                 ext_mode, src, dst, ..
1665             } => format!(
1666                 "{} {}, {}",
1667                 ljustify2("movs".to_string(), ext_mode.to_string()),
1668                 src.show_rru_sized(mb_rru, ext_mode.src_size()),
1669                 show_ireg_sized(dst.to_reg(), mb_rru, ext_mode.dst_size())
1670             ),
1671 
1672             Inst::MovRM { size, src, dst, .. } => format!(
1673                 "{} {}, {}",
1674                 ljustify2("mov".to_string(), suffix_bwlq(*size)),
1675                 show_ireg_sized(*src, mb_rru, size.to_bytes()),
1676                 dst.show_rru(mb_rru)
1677             ),
1678 
1679             Inst::ShiftR {
1680                 size,
1681                 kind,
1682                 num_bits,
1683                 dst,
1684             } => match num_bits {
1685                 None => format!(
1686                     "{} %cl, {}",
1687                     ljustify2(kind.to_string(), suffix_bwlq(*size)),
1688                     show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
1689                 ),
1690 
1691                 Some(num_bits) => format!(
1692                     "{} ${}, {}",
1693                     ljustify2(kind.to_string(), suffix_bwlq(*size)),
1694                     num_bits,
1695                     show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
1696                 ),
1697             },
1698 
1699             Inst::XmmRmiReg { opcode, src, dst } => format!(
1700                 "{} {}, {}",
1701                 ljustify(opcode.to_string()),
1702                 src.show_rru(mb_rru),
1703                 dst.to_reg().show_rru(mb_rru)
1704             ),
1705 
1706             Inst::CmpRmiR {
1707                 size,
1708                 src,
1709                 dst,
1710                 opcode,
1711             } => {
1712                 let op = match opcode {
1713                     CmpOpcode::Cmp => "cmp",
1714                     CmpOpcode::Test => "test",
1715                 };
1716                 format!(
1717                     "{} {}, {}",
1718                     ljustify2(op.to_string(), suffix_bwlq(*size)),
1719                     src.show_rru_sized(mb_rru, size.to_bytes()),
1720                     show_ireg_sized(*dst, mb_rru, size.to_bytes())
1721                 )
1722             }
1723 
1724             Inst::Setcc { cc, dst } => format!(
1725                 "{} {}",
1726                 ljustify2("set".to_string(), cc.to_string()),
1727                 show_ireg_sized(dst.to_reg(), mb_rru, 1)
1728             ),
1729 
1730             Inst::Cmove { size, cc, src, dst } => format!(
1731                 "{} {}, {}",
1732                 ljustify(format!("cmov{}{}", cc.to_string(), suffix_bwlq(*size))),
1733                 src.show_rru_sized(mb_rru, size.to_bytes()),
1734                 show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
1735             ),
1736 
1737             Inst::XmmCmove { size, cc, src, dst } => {
1738                 format!(
1739                     "j{} $next; mov{} {}, {}; $next: ",
1740                     cc.invert().to_string(),
1741                     if *size == OperandSize::Size64 {
1742                         "sd"
1743                     } else {
1744                         "ss"
1745                     },
1746                     src.show_rru_sized(mb_rru, size.to_bytes()),
1747                     show_ireg_sized(dst.to_reg(), mb_rru, size.to_bytes())
1748                 )
1749             }
1750 
1751             Inst::Push64 { src } => {
1752                 format!("{} {}", ljustify("pushq".to_string()), src.show_rru(mb_rru))
1753             }
1754 
1755             Inst::Pop64 { dst } => {
1756                 format!("{} {}", ljustify("popq".to_string()), dst.show_rru(mb_rru))
1757             }
1758 
1759             Inst::CallKnown { dest, .. } => format!("{} {:?}", ljustify("call".to_string()), dest),
1760 
1761             Inst::CallUnknown { dest, .. } => format!(
1762                 "{} *{}",
1763                 ljustify("call".to_string()),
1764                 dest.show_rru(mb_rru)
1765             ),
1766 
1767             Inst::Ret => "ret".to_string(),
1768 
1769             Inst::EpiloguePlaceholder => "epilogue placeholder".to_string(),
1770 
1771             Inst::JmpKnown { dst } => {
1772                 format!("{} {}", ljustify("jmp".to_string()), dst.to_string())
1773             }
1774 
1775             Inst::JmpIf { cc, taken } => format!(
1776                 "{} {}",
1777                 ljustify2("j".to_string(), cc.to_string()),
1778                 taken.to_string(),
1779             ),
1780 
1781             Inst::JmpCond {
1782                 cc,
1783                 taken,
1784                 not_taken,
1785             } => format!(
1786                 "{} {}; j {}",
1787                 ljustify2("j".to_string(), cc.to_string()),
1788                 taken.to_string(),
1789                 not_taken.to_string()
1790             ),
1791 
1792             Inst::JmpTableSeq { idx, .. } => {
1793                 format!("{} {}", ljustify("br_table".into()), idx.show_rru(mb_rru))
1794             }
1795 
1796             Inst::JmpUnknown { target } => format!(
1797                 "{} *{}",
1798                 ljustify("jmp".to_string()),
1799                 target.show_rru(mb_rru)
1800             ),
1801 
1802             Inst::TrapIf { cc, trap_code, .. } => {
1803                 format!("j{} ; ud2 {} ;", cc.invert().to_string(), trap_code)
1804             }
1805 
1806             Inst::LoadExtName {
1807                 dst, name, offset, ..
1808             } => format!(
1809                 "{} {}+{}, {}",
1810                 ljustify("load_ext_name".into()),
1811                 name,
1812                 offset,
1813                 show_ireg_sized(dst.to_reg(), mb_rru, 8),
1814             ),
1815 
1816             Inst::LockCmpxchg { ty, src, dst, .. } => {
1817                 let size = ty.bytes() as u8;
1818                 format!(
1819                     "lock cmpxchg{} {}, {}",
1820                     suffix_bwlq(OperandSize::from_bytes(size as u32)),
1821                     show_ireg_sized(*src, mb_rru, size),
1822                     dst.show_rru(mb_rru)
1823                 )
1824             }
1825 
1826             Inst::AtomicRmwSeq { ty, op, .. } => {
1827                 format!(
1828                     "atomically {{ {}_bits_at_[%r9]) {:?}= %r10; %rax = old_value_at_[%r9]; %r11, %rflags = trash }}",
1829                     ty.bits(), op)
1830             }
1831 
1832             Inst::Fence { kind } => match kind {
1833                 FenceKind::MFence => "mfence".to_string(),
1834                 FenceKind::LFence => "lfence".to_string(),
1835                 FenceKind::SFence => "sfence".to_string(),
1836             },
1837 
1838             Inst::VirtualSPOffsetAdj { offset } => format!("virtual_sp_offset_adjust {}", offset),
1839 
1840             Inst::Hlt => "hlt".into(),
1841 
1842             Inst::Ud2 { trap_code } => format!("ud2 {}", trap_code),
1843 
1844             Inst::ElfTlsGetAddr { ref symbol } => {
1845                 format!("elf_tls_get_addr {:?}", symbol)
1846             }
1847 
1848             Inst::MachOTlsGetAddr { ref symbol } => {
1849                 format!("macho_tls_get_addr {:?}", symbol)
1850             }
1851 
1852             Inst::ValueLabelMarker { label, reg } => {
1853                 format!("value_label {:?}, {}", label, reg.show_rru(mb_rru))
1854             }
1855 
1856             Inst::Unwind { inst } => {
1857                 format!("unwind {:?}", inst)
1858             }
1859         }
1860     }
1861 }
1862 
1863 // Temp hook for legacy printing machinery
1864 impl fmt::Debug for Inst {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result1865     fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
1866         // Print the insn without a Universe :-(
1867         write!(fmt, "{}", self.show_rru(None))
1868     }
1869 }
1870 
x64_get_regs(inst: &Inst, collector: &mut RegUsageCollector)1871 fn x64_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
1872     // This is a bit subtle. If some register is in the modified set, then it may not be in either
1873     // the use or def sets. However, enforcing that directly is somewhat difficult. Instead,
1874     // regalloc.rs will "fix" this for us by removing the modified set from the use and def
1875     // sets.
1876     match inst {
1877         Inst::AluRmiR { src, dst, .. } => {
1878             if inst.produces_const() {
1879                 // No need to account for src, since src == dst.
1880                 collector.add_def(*dst);
1881             } else {
1882                 src.get_regs_as_uses(collector);
1883                 collector.add_mod(*dst);
1884             }
1885         }
1886         Inst::Not { src, .. } => {
1887             collector.add_mod(*src);
1888         }
1889         Inst::Neg { src, .. } => {
1890             collector.add_mod(*src);
1891         }
1892         Inst::Div { size, divisor, .. } => {
1893             collector.add_mod(Writable::from_reg(regs::rax()));
1894             if *size == OperandSize::Size8 {
1895                 collector.add_def(Writable::from_reg(regs::rdx()));
1896             } else {
1897                 collector.add_mod(Writable::from_reg(regs::rdx()));
1898             }
1899             divisor.get_regs_as_uses(collector);
1900         }
1901         Inst::MulHi { rhs, .. } => {
1902             collector.add_mod(Writable::from_reg(regs::rax()));
1903             collector.add_def(Writable::from_reg(regs::rdx()));
1904             rhs.get_regs_as_uses(collector);
1905         }
1906         Inst::CheckedDivOrRemSeq { divisor, tmp, .. } => {
1907             // Mark both fixed registers as mods, to avoid an early clobber problem in codegen
1908             // (i.e. the temporary is allocated one of the fixed registers). This requires writing
1909             // the rdx register *before* the instruction, which is not too bad.
1910             collector.add_mod(Writable::from_reg(regs::rax()));
1911             collector.add_mod(Writable::from_reg(regs::rdx()));
1912             collector.add_mod(*divisor);
1913             if let Some(tmp) = tmp {
1914                 collector.add_def(*tmp);
1915             }
1916         }
1917         Inst::SignExtendData { size } => match size {
1918             OperandSize::Size8 => collector.add_mod(Writable::from_reg(regs::rax())),
1919             _ => {
1920                 collector.add_use(regs::rax());
1921                 collector.add_def(Writable::from_reg(regs::rdx()));
1922             }
1923         },
1924         Inst::UnaryRmR { src, dst, .. }
1925         | Inst::XmmUnaryRmR { src, dst, .. }
1926         | Inst::XmmUnaryRmREvex { src, dst, .. } => {
1927             src.get_regs_as_uses(collector);
1928             collector.add_def(*dst);
1929         }
1930         Inst::XmmRmR { src, dst, op, .. } => {
1931             if inst.produces_const() {
1932                 // No need to account for src, since src == dst.
1933                 collector.add_def(*dst);
1934             } else {
1935                 src.get_regs_as_uses(collector);
1936                 collector.add_mod(*dst);
1937                 // Some instructions have an implicit use of XMM0.
1938                 if *op == SseOpcode::Blendvpd
1939                     || *op == SseOpcode::Blendvps
1940                     || *op == SseOpcode::Pblendvb
1941                 {
1942                     collector.add_use(regs::xmm0());
1943                 }
1944             }
1945         }
1946         Inst::XmmRmREvex {
1947             op,
1948             src1,
1949             src2,
1950             dst,
1951             ..
1952         } => {
1953             src1.get_regs_as_uses(collector);
1954             collector.add_use(*src2);
1955             match *op {
1956                 Avx512Opcode::Vpermi2b => collector.add_mod(*dst),
1957                 _ => collector.add_def(*dst),
1958             }
1959         }
1960         Inst::XmmRmRImm { op, src, dst, .. } => {
1961             if inst.produces_const() {
1962                 // No need to account for src, since src == dst.
1963                 collector.add_def(*dst);
1964             } else if *op == SseOpcode::Pextrb
1965                 || *op == SseOpcode::Pextrw
1966                 || *op == SseOpcode::Pextrd
1967                 || *op == SseOpcode::Pshufd
1968                 || *op == SseOpcode::Roundss
1969                 || *op == SseOpcode::Roundsd
1970                 || *op == SseOpcode::Roundps
1971                 || *op == SseOpcode::Roundpd
1972             {
1973                 src.get_regs_as_uses(collector);
1974                 collector.add_def(*dst);
1975             } else {
1976                 src.get_regs_as_uses(collector);
1977                 collector.add_mod(*dst);
1978             }
1979         }
1980         Inst::XmmUninitializedValue { dst } => collector.add_def(*dst),
1981         Inst::XmmLoadConst { dst, .. } => collector.add_def(*dst),
1982         Inst::XmmMinMaxSeq { lhs, rhs_dst, .. } => {
1983             collector.add_use(*lhs);
1984             collector.add_mod(*rhs_dst);
1985         }
1986         Inst::XmmRmiReg { src, dst, .. } => {
1987             src.get_regs_as_uses(collector);
1988             collector.add_mod(*dst);
1989         }
1990         Inst::XmmMovRM { src, dst, .. } => {
1991             collector.add_use(*src);
1992             dst.get_regs_as_uses(collector);
1993         }
1994         Inst::XmmCmpRmR { src, dst, .. } => {
1995             src.get_regs_as_uses(collector);
1996             collector.add_use(*dst);
1997         }
1998         Inst::Imm { dst, .. } => {
1999             collector.add_def(*dst);
2000         }
2001         Inst::MovRR { src, dst, .. } | Inst::XmmToGpr { src, dst, .. } => {
2002             collector.add_use(*src);
2003             collector.add_def(*dst);
2004         }
2005         Inst::GprToXmm { src, dst, .. } => {
2006             src.get_regs_as_uses(collector);
2007             collector.add_def(*dst);
2008         }
2009         Inst::CvtUint64ToFloatSeq {
2010             src,
2011             dst,
2012             tmp_gpr1,
2013             tmp_gpr2,
2014             ..
2015         } => {
2016             collector.add_mod(*src);
2017             collector.add_def(*dst);
2018             collector.add_def(*tmp_gpr1);
2019             collector.add_def(*tmp_gpr2);
2020         }
2021         Inst::CvtFloatToSintSeq {
2022             src,
2023             dst,
2024             tmp_xmm,
2025             tmp_gpr,
2026             ..
2027         }
2028         | Inst::CvtFloatToUintSeq {
2029             src,
2030             dst,
2031             tmp_gpr,
2032             tmp_xmm,
2033             ..
2034         } => {
2035             collector.add_mod(*src);
2036             collector.add_def(*dst);
2037             collector.add_def(*tmp_gpr);
2038             collector.add_def(*tmp_xmm);
2039         }
2040         Inst::MovzxRmR { src, dst, .. } => {
2041             src.get_regs_as_uses(collector);
2042             collector.add_def(*dst);
2043         }
2044         Inst::Mov64MR { src, dst, .. } | Inst::LoadEffectiveAddress { addr: src, dst } => {
2045             src.get_regs_as_uses(collector);
2046             collector.add_def(*dst)
2047         }
2048         Inst::MovsxRmR { src, dst, .. } => {
2049             src.get_regs_as_uses(collector);
2050             collector.add_def(*dst);
2051         }
2052         Inst::MovRM { src, dst, .. } => {
2053             collector.add_use(*src);
2054             dst.get_regs_as_uses(collector);
2055         }
2056         Inst::ShiftR { num_bits, dst, .. } => {
2057             if num_bits.is_none() {
2058                 collector.add_use(regs::rcx());
2059             }
2060             collector.add_mod(*dst);
2061         }
2062         Inst::CmpRmiR { src, dst, .. } => {
2063             src.get_regs_as_uses(collector);
2064             collector.add_use(*dst); // yes, really `add_use`
2065         }
2066         Inst::Setcc { dst, .. } => {
2067             collector.add_def(*dst);
2068         }
2069         Inst::Cmove { src, dst, .. } | Inst::XmmCmove { src, dst, .. } => {
2070             src.get_regs_as_uses(collector);
2071             collector.add_mod(*dst);
2072         }
2073         Inst::Push64 { src } => {
2074             src.get_regs_as_uses(collector);
2075             collector.add_mod(Writable::from_reg(regs::rsp()));
2076         }
2077         Inst::Pop64 { dst } => {
2078             collector.add_def(*dst);
2079         }
2080 
2081         Inst::CallKnown {
2082             ref uses, ref defs, ..
2083         } => {
2084             collector.add_uses(uses);
2085             collector.add_defs(defs);
2086         }
2087 
2088         Inst::CallUnknown {
2089             ref uses,
2090             ref defs,
2091             dest,
2092             ..
2093         } => {
2094             collector.add_uses(uses);
2095             collector.add_defs(defs);
2096             dest.get_regs_as_uses(collector);
2097         }
2098 
2099         Inst::JmpTableSeq {
2100             ref idx,
2101             ref tmp1,
2102             ref tmp2,
2103             ..
2104         } => {
2105             collector.add_use(*idx);
2106             collector.add_def(*tmp1);
2107             collector.add_def(*tmp2);
2108         }
2109 
2110         Inst::JmpUnknown { target } => {
2111             target.get_regs_as_uses(collector);
2112         }
2113 
2114         Inst::LoadExtName { dst, .. } => {
2115             collector.add_def(*dst);
2116         }
2117 
2118         Inst::LockCmpxchg { src, dst, .. } => {
2119             dst.get_regs_as_uses(collector);
2120             collector.add_use(*src);
2121             collector.add_mod(Writable::from_reg(regs::rax()));
2122         }
2123 
2124         Inst::AtomicRmwSeq { .. } => {
2125             collector.add_use(regs::r9());
2126             collector.add_use(regs::r10());
2127             collector.add_def(Writable::from_reg(regs::r11()));
2128             collector.add_def(Writable::from_reg(regs::rax()));
2129         }
2130 
2131         Inst::Ret
2132         | Inst::EpiloguePlaceholder
2133         | Inst::JmpKnown { .. }
2134         | Inst::JmpIf { .. }
2135         | Inst::JmpCond { .. }
2136         | Inst::Nop { .. }
2137         | Inst::TrapIf { .. }
2138         | Inst::VirtualSPOffsetAdj { .. }
2139         | Inst::Hlt
2140         | Inst::Ud2 { .. }
2141         | Inst::Fence { .. } => {
2142             // No registers are used.
2143         }
2144 
2145         Inst::ElfTlsGetAddr { .. } | Inst::MachOTlsGetAddr { .. } => {
2146             // All caller-saves are clobbered.
2147             //
2148             // We use the SysV calling convention here because the
2149             // pseudoinstruction (and relocation that it emits) is specific to
2150             // ELF systems; other x86-64 targets with other conventions (i.e.,
2151             // Windows) use different TLS strategies.
2152             for reg in X64ABIMachineSpec::get_regs_clobbered_by_call(CallConv::SystemV) {
2153                 collector.add_def(reg);
2154             }
2155         }
2156 
2157         Inst::ValueLabelMarker { reg, .. } => {
2158             collector.add_use(*reg);
2159         }
2160 
2161         Inst::Unwind { .. } => {}
2162     }
2163 }
2164 
2165 //=============================================================================
2166 // Instructions and subcomponents: map_regs
2167 
map_use<RUM: RegUsageMapper>(m: &RUM, r: &mut Reg)2168 fn map_use<RUM: RegUsageMapper>(m: &RUM, r: &mut Reg) {
2169     if let Some(reg) = r.as_virtual_reg() {
2170         let new = m.get_use(reg).unwrap().to_reg();
2171         *r = new;
2172     }
2173 }
2174 
map_def<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>)2175 fn map_def<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
2176     if let Some(reg) = r.to_reg().as_virtual_reg() {
2177         let new = m.get_def(reg).unwrap().to_reg();
2178         *r = Writable::from_reg(new);
2179     }
2180 }
2181 
map_mod<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>)2182 fn map_mod<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
2183     if let Some(reg) = r.to_reg().as_virtual_reg() {
2184         let new = m.get_mod(reg).unwrap().to_reg();
2185         *r = Writable::from_reg(new);
2186     }
2187 }
2188 
2189 impl Amode {
map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM)2190     fn map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM) {
2191         match self {
2192             Amode::ImmReg { ref mut base, .. } => map_use(map, base),
2193             Amode::ImmRegRegShift {
2194                 ref mut base,
2195                 ref mut index,
2196                 ..
2197             } => {
2198                 map_use(map, base);
2199                 map_use(map, index);
2200             }
2201             Amode::RipRelative { .. } => {
2202                 // RIP isn't involved in regalloc.
2203             }
2204         }
2205     }
2206 
2207     /// Offset the amode by a fixed offset.
offset(&self, offset: u32) -> Self2208     pub(crate) fn offset(&self, offset: u32) -> Self {
2209         let mut ret = self.clone();
2210         match &mut ret {
2211             &mut Amode::ImmReg { ref mut simm32, .. } => *simm32 += offset,
2212             &mut Amode::ImmRegRegShift { ref mut simm32, .. } => *simm32 += offset,
2213             _ => panic!("Cannot offset amode: {:?}", self),
2214         }
2215         ret
2216     }
2217 }
2218 
2219 impl RegMemImm {
map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM)2220     fn map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM) {
2221         match self {
2222             RegMemImm::Reg { ref mut reg } => map_use(map, reg),
2223             RegMemImm::Mem { ref mut addr } => addr.map_uses(map),
2224             RegMemImm::Imm { .. } => {}
2225         }
2226     }
2227 
map_as_def<RUM: RegUsageMapper>(&mut self, mapper: &RUM)2228     fn map_as_def<RUM: RegUsageMapper>(&mut self, mapper: &RUM) {
2229         match self {
2230             Self::Reg { reg } => {
2231                 let mut writable_src = Writable::from_reg(*reg);
2232                 map_def(mapper, &mut writable_src);
2233                 *self = Self::reg(writable_src.to_reg());
2234             }
2235             _ => panic!("unexpected RegMemImm kind in map_src_reg_as_def"),
2236         }
2237     }
2238 }
2239 
2240 impl RegMem {
map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM)2241     fn map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM) {
2242         match self {
2243             RegMem::Reg { ref mut reg } => map_use(map, reg),
2244             RegMem::Mem { ref mut addr, .. } => addr.map_uses(map),
2245         }
2246     }
2247 
map_as_def<RUM: RegUsageMapper>(&mut self, mapper: &RUM)2248     fn map_as_def<RUM: RegUsageMapper>(&mut self, mapper: &RUM) {
2249         match self {
2250             Self::Reg { reg } => {
2251                 let mut writable_src = Writable::from_reg(*reg);
2252                 map_def(mapper, &mut writable_src);
2253                 *self = Self::reg(writable_src.to_reg());
2254             }
2255             _ => panic!("unexpected RegMem kind in map_src_reg_as_def"),
2256         }
2257     }
2258 }
2259 
x64_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM)2260 fn x64_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM) {
2261     // Note this must be carefully synchronized with x64_get_regs.
2262     let produces_const = inst.produces_const();
2263 
2264     match inst {
2265         // ** Nop
2266         Inst::AluRmiR {
2267             ref mut src,
2268             ref mut dst,
2269             ..
2270         } => {
2271             if produces_const {
2272                 src.map_as_def(mapper);
2273                 map_def(mapper, dst);
2274             } else {
2275                 src.map_uses(mapper);
2276                 map_mod(mapper, dst);
2277             }
2278         }
2279         Inst::Not { src, .. } | Inst::Neg { src, .. } => map_mod(mapper, src),
2280         Inst::Div { divisor, .. } => divisor.map_uses(mapper),
2281         Inst::MulHi { rhs, .. } => rhs.map_uses(mapper),
2282         Inst::CheckedDivOrRemSeq { divisor, tmp, .. } => {
2283             map_mod(mapper, divisor);
2284             if let Some(tmp) = tmp {
2285                 map_def(mapper, tmp)
2286             }
2287         }
2288         Inst::SignExtendData { .. } => {}
2289         Inst::XmmUnaryRmR {
2290             ref mut src,
2291             ref mut dst,
2292             ..
2293         }
2294         | Inst::XmmUnaryRmREvex {
2295             ref mut src,
2296             ref mut dst,
2297             ..
2298         }
2299         | Inst::UnaryRmR {
2300             ref mut src,
2301             ref mut dst,
2302             ..
2303         } => {
2304             src.map_uses(mapper);
2305             map_def(mapper, dst);
2306         }
2307         Inst::XmmRmRImm {
2308             ref op,
2309             ref mut src,
2310             ref mut dst,
2311             ..
2312         } => {
2313             if produces_const {
2314                 src.map_as_def(mapper);
2315                 map_def(mapper, dst);
2316             } else if *op == SseOpcode::Pextrb
2317                 || *op == SseOpcode::Pextrw
2318                 || *op == SseOpcode::Pextrd
2319                 || *op == SseOpcode::Pshufd
2320                 || *op == SseOpcode::Roundss
2321                 || *op == SseOpcode::Roundsd
2322                 || *op == SseOpcode::Roundps
2323                 || *op == SseOpcode::Roundpd
2324             {
2325                 src.map_uses(mapper);
2326                 map_def(mapper, dst);
2327             } else {
2328                 src.map_uses(mapper);
2329                 map_mod(mapper, dst);
2330             }
2331         }
2332         Inst::XmmRmR {
2333             ref mut src,
2334             ref mut dst,
2335             ..
2336         } => {
2337             if produces_const {
2338                 src.map_as_def(mapper);
2339                 map_def(mapper, dst);
2340             } else {
2341                 src.map_uses(mapper);
2342                 map_mod(mapper, dst);
2343             }
2344         }
2345         Inst::XmmRmREvex {
2346             op,
2347             ref mut src1,
2348             ref mut src2,
2349             ref mut dst,
2350             ..
2351         } => {
2352             src1.map_uses(mapper);
2353             map_use(mapper, src2);
2354             match *op {
2355                 Avx512Opcode::Vpermi2b => map_mod(mapper, dst),
2356                 _ => map_def(mapper, dst),
2357             }
2358         }
2359         Inst::XmmRmiReg {
2360             ref mut src,
2361             ref mut dst,
2362             ..
2363         } => {
2364             src.map_uses(mapper);
2365             map_mod(mapper, dst);
2366         }
2367         Inst::XmmUninitializedValue { ref mut dst, .. } => {
2368             map_def(mapper, dst);
2369         }
2370         Inst::XmmLoadConst { ref mut dst, .. } => {
2371             map_def(mapper, dst);
2372         }
2373         Inst::XmmMinMaxSeq {
2374             ref mut lhs,
2375             ref mut rhs_dst,
2376             ..
2377         } => {
2378             map_use(mapper, lhs);
2379             map_mod(mapper, rhs_dst);
2380         }
2381         Inst::XmmMovRM {
2382             ref mut src,
2383             ref mut dst,
2384             ..
2385         } => {
2386             map_use(mapper, src);
2387             dst.map_uses(mapper);
2388         }
2389         Inst::XmmCmpRmR {
2390             ref mut src,
2391             ref mut dst,
2392             ..
2393         } => {
2394             src.map_uses(mapper);
2395             map_use(mapper, dst);
2396         }
2397         Inst::Imm { ref mut dst, .. } => map_def(mapper, dst),
2398         Inst::MovRR {
2399             ref mut src,
2400             ref mut dst,
2401             ..
2402         }
2403         | Inst::XmmToGpr {
2404             ref mut src,
2405             ref mut dst,
2406             ..
2407         } => {
2408             map_use(mapper, src);
2409             map_def(mapper, dst);
2410         }
2411         Inst::GprToXmm {
2412             ref mut src,
2413             ref mut dst,
2414             ..
2415         } => {
2416             src.map_uses(mapper);
2417             map_def(mapper, dst);
2418         }
2419         Inst::CvtUint64ToFloatSeq {
2420             ref mut src,
2421             ref mut dst,
2422             ref mut tmp_gpr1,
2423             ref mut tmp_gpr2,
2424             ..
2425         } => {
2426             map_mod(mapper, src);
2427             map_def(mapper, dst);
2428             map_def(mapper, tmp_gpr1);
2429             map_def(mapper, tmp_gpr2);
2430         }
2431         Inst::CvtFloatToSintSeq {
2432             ref mut src,
2433             ref mut dst,
2434             ref mut tmp_xmm,
2435             ref mut tmp_gpr,
2436             ..
2437         }
2438         | Inst::CvtFloatToUintSeq {
2439             ref mut src,
2440             ref mut dst,
2441             ref mut tmp_gpr,
2442             ref mut tmp_xmm,
2443             ..
2444         } => {
2445             map_mod(mapper, src);
2446             map_def(mapper, dst);
2447             map_def(mapper, tmp_gpr);
2448             map_def(mapper, tmp_xmm);
2449         }
2450         Inst::MovzxRmR {
2451             ref mut src,
2452             ref mut dst,
2453             ..
2454         } => {
2455             src.map_uses(mapper);
2456             map_def(mapper, dst);
2457         }
2458         Inst::Mov64MR { src, dst, .. } | Inst::LoadEffectiveAddress { addr: src, dst } => {
2459             src.map_uses(mapper);
2460             map_def(mapper, dst);
2461         }
2462         Inst::MovsxRmR {
2463             ref mut src,
2464             ref mut dst,
2465             ..
2466         } => {
2467             src.map_uses(mapper);
2468             map_def(mapper, dst);
2469         }
2470         Inst::MovRM {
2471             ref mut src,
2472             ref mut dst,
2473             ..
2474         } => {
2475             map_use(mapper, src);
2476             dst.map_uses(mapper);
2477         }
2478         Inst::ShiftR { ref mut dst, .. } => {
2479             map_mod(mapper, dst);
2480         }
2481         Inst::CmpRmiR {
2482             ref mut src,
2483             ref mut dst,
2484             ..
2485         } => {
2486             src.map_uses(mapper);
2487             map_use(mapper, dst);
2488         }
2489         Inst::Setcc { ref mut dst, .. } => map_def(mapper, dst),
2490         Inst::Cmove {
2491             ref mut src,
2492             ref mut dst,
2493             ..
2494         }
2495         | Inst::XmmCmove {
2496             ref mut src,
2497             ref mut dst,
2498             ..
2499         } => {
2500             src.map_uses(mapper);
2501             map_mod(mapper, dst)
2502         }
2503         Inst::Push64 { ref mut src } => src.map_uses(mapper),
2504         Inst::Pop64 { ref mut dst } => {
2505             map_def(mapper, dst);
2506         }
2507 
2508         Inst::CallKnown {
2509             ref mut uses,
2510             ref mut defs,
2511             ..
2512         } => {
2513             for r in uses.iter_mut() {
2514                 map_use(mapper, r);
2515             }
2516             for r in defs.iter_mut() {
2517                 map_def(mapper, r);
2518             }
2519         }
2520 
2521         Inst::CallUnknown {
2522             ref mut uses,
2523             ref mut defs,
2524             ref mut dest,
2525             ..
2526         } => {
2527             for r in uses.iter_mut() {
2528                 map_use(mapper, r);
2529             }
2530             for r in defs.iter_mut() {
2531                 map_def(mapper, r);
2532             }
2533             dest.map_uses(mapper);
2534         }
2535 
2536         Inst::JmpTableSeq {
2537             ref mut idx,
2538             ref mut tmp1,
2539             ref mut tmp2,
2540             ..
2541         } => {
2542             map_use(mapper, idx);
2543             map_def(mapper, tmp1);
2544             map_def(mapper, tmp2);
2545         }
2546 
2547         Inst::JmpUnknown { ref mut target } => target.map_uses(mapper),
2548 
2549         Inst::LoadExtName { ref mut dst, .. } => map_def(mapper, dst),
2550 
2551         Inst::LockCmpxchg {
2552             ref mut src,
2553             ref mut dst,
2554             ..
2555         } => {
2556             map_use(mapper, src);
2557             dst.map_uses(mapper);
2558         }
2559 
2560         Inst::ValueLabelMarker { ref mut reg, .. } => map_use(mapper, reg),
2561 
2562         Inst::Ret
2563         | Inst::EpiloguePlaceholder
2564         | Inst::JmpKnown { .. }
2565         | Inst::JmpCond { .. }
2566         | Inst::JmpIf { .. }
2567         | Inst::Nop { .. }
2568         | Inst::TrapIf { .. }
2569         | Inst::VirtualSPOffsetAdj { .. }
2570         | Inst::Ud2 { .. }
2571         | Inst::Hlt
2572         | Inst::AtomicRmwSeq { .. }
2573         | Inst::ElfTlsGetAddr { .. }
2574         | Inst::MachOTlsGetAddr { .. }
2575         | Inst::Fence { .. }
2576         | Inst::Unwind { .. } => {
2577             // Instruction doesn't explicitly mention any regs, so it can't have any virtual
2578             // regs that we'd need to remap.  Hence no action required.
2579         }
2580     }
2581 }
2582 
2583 //=============================================================================
2584 // Instructions: misc functions and external interface
2585 
2586 impl MachInst for Inst {
get_regs(&self, collector: &mut RegUsageCollector)2587     fn get_regs(&self, collector: &mut RegUsageCollector) {
2588         x64_get_regs(&self, collector)
2589     }
2590 
map_regs<RUM: RegUsageMapper>(&mut self, mapper: &RUM)2591     fn map_regs<RUM: RegUsageMapper>(&mut self, mapper: &RUM) {
2592         x64_map_regs(self, mapper);
2593     }
2594 
is_move(&self) -> Option<(Writable<Reg>, Reg)>2595     fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
2596         match self {
2597             // Note (carefully!) that a 32-bit mov *isn't* a no-op since it zeroes
2598             // out the upper 32 bits of the destination.  For example, we could
2599             // conceivably use `movl %reg, %reg` to zero out the top 32 bits of
2600             // %reg.
2601             Self::MovRR { size, src, dst, .. } if *size == OperandSize::Size64 => {
2602                 Some((*dst, *src))
2603             }
2604             // Note as well that MOVS[S|D] when used in the `XmmUnaryRmR` context are pure moves of
2605             // scalar floating-point values (and annotate `dst` as `def`s to the register allocator)
2606             // whereas the same operation in a packed context, e.g. `XMM_RM_R`, is used to merge a
2607             // value into the lowest lane of a vector (not a move).
2608             Self::XmmUnaryRmR { op, src, dst, .. }
2609                 if *op == SseOpcode::Movss
2610                     || *op == SseOpcode::Movsd
2611                     || *op == SseOpcode::Movaps
2612                     || *op == SseOpcode::Movapd
2613                     || *op == SseOpcode::Movups
2614                     || *op == SseOpcode::Movupd
2615                     || *op == SseOpcode::Movdqa
2616                     || *op == SseOpcode::Movdqu =>
2617             {
2618                 if let RegMem::Reg { reg } = src {
2619                     Some((*dst, *reg))
2620                 } else {
2621                     None
2622                 }
2623             }
2624             _ => None,
2625         }
2626     }
2627 
is_epilogue_placeholder(&self) -> bool2628     fn is_epilogue_placeholder(&self) -> bool {
2629         if let Self::EpiloguePlaceholder = self {
2630             true
2631         } else {
2632             false
2633         }
2634     }
2635 
is_term<'a>(&'a self) -> MachTerminator<'a>2636     fn is_term<'a>(&'a self) -> MachTerminator<'a> {
2637         match self {
2638             // Interesting cases.
2639             &Self::Ret | &Self::EpiloguePlaceholder => MachTerminator::Ret,
2640             &Self::JmpKnown { dst } => MachTerminator::Uncond(dst),
2641             &Self::JmpCond {
2642                 taken, not_taken, ..
2643             } => MachTerminator::Cond(taken, not_taken),
2644             &Self::JmpTableSeq {
2645                 ref targets_for_term,
2646                 ..
2647             } => MachTerminator::Indirect(&targets_for_term[..]),
2648             // All other cases are boring.
2649             _ => MachTerminator::None,
2650         }
2651     }
2652 
stack_op_info(&self) -> Option<MachInstStackOpInfo>2653     fn stack_op_info(&self) -> Option<MachInstStackOpInfo> {
2654         match self {
2655             Self::VirtualSPOffsetAdj { offset } => Some(MachInstStackOpInfo::NomSPAdj(*offset)),
2656             Self::MovRM {
2657                 size: OperandSize::Size8,
2658                 src,
2659                 dst: SyntheticAmode::NominalSPOffset { simm32 },
2660             } => Some(MachInstStackOpInfo::StoreNomSPOff(*src, *simm32 as i64)),
2661             Self::Mov64MR {
2662                 src: SyntheticAmode::NominalSPOffset { simm32 },
2663                 dst,
2664             } => Some(MachInstStackOpInfo::LoadNomSPOff(
2665                 dst.to_reg(),
2666                 *simm32 as i64,
2667             )),
2668             _ => None,
2669         }
2670     }
2671 
gen_move(dst_reg: Writable<Reg>, src_reg: Reg, ty: Type) -> Inst2672     fn gen_move(dst_reg: Writable<Reg>, src_reg: Reg, ty: Type) -> Inst {
2673         let rc_dst = dst_reg.to_reg().get_class();
2674         let rc_src = src_reg.get_class();
2675         // If this isn't true, we have gone way off the rails.
2676         debug_assert!(rc_dst == rc_src);
2677         match rc_dst {
2678             RegClass::I64 => Inst::mov_r_r(OperandSize::Size64, src_reg, dst_reg),
2679             RegClass::V128 => {
2680                 // The Intel optimization manual, in "3.5.1.13 Zero-Latency MOV Instructions",
2681                 // doesn't include MOVSS/MOVSD as instructions with zero-latency. Use movaps for
2682                 // those, which may write more lanes that we need, but are specified to have
2683                 // zero-latency.
2684                 let opcode = match ty {
2685                     types::F32 | types::F64 | types::F32X4 => SseOpcode::Movaps,
2686                     types::F64X2 => SseOpcode::Movapd,
2687                     _ if ty.is_vector() && ty.bits() == 128 => SseOpcode::Movdqa,
2688                     _ => unimplemented!("unable to move type: {}", ty),
2689                 };
2690                 Inst::xmm_unary_rm_r(opcode, RegMem::reg(src_reg), dst_reg)
2691             }
2692             _ => panic!("gen_move(x64): unhandled regclass {:?}", rc_dst),
2693         }
2694     }
2695 
gen_nop(preferred_size: usize) -> Inst2696     fn gen_nop(preferred_size: usize) -> Inst {
2697         Inst::nop(std::cmp::min(preferred_size, 15) as u8)
2698     }
2699 
maybe_direct_reload(&self, _reg: VirtualReg, _slot: SpillSlot) -> Option<Inst>2700     fn maybe_direct_reload(&self, _reg: VirtualReg, _slot: SpillSlot) -> Option<Inst> {
2701         None
2702     }
2703 
rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])>2704     fn rc_for_type(ty: Type) -> CodegenResult<(&'static [RegClass], &'static [Type])> {
2705         match ty {
2706             types::I8 => Ok((&[RegClass::I64], &[types::I8])),
2707             types::I16 => Ok((&[RegClass::I64], &[types::I16])),
2708             types::I32 => Ok((&[RegClass::I64], &[types::I32])),
2709             types::I64 => Ok((&[RegClass::I64], &[types::I64])),
2710             types::B1 => Ok((&[RegClass::I64], &[types::B1])),
2711             types::B8 => Ok((&[RegClass::I64], &[types::B8])),
2712             types::B16 => Ok((&[RegClass::I64], &[types::B16])),
2713             types::B32 => Ok((&[RegClass::I64], &[types::B32])),
2714             types::B64 => Ok((&[RegClass::I64], &[types::B64])),
2715             types::R32 => panic!("32-bit reftype pointer should never be seen on x86-64"),
2716             types::R64 => Ok((&[RegClass::I64], &[types::R64])),
2717             types::F32 => Ok((&[RegClass::V128], &[types::F32])),
2718             types::F64 => Ok((&[RegClass::V128], &[types::F64])),
2719             types::I128 => Ok((&[RegClass::I64, RegClass::I64], &[types::I64, types::I64])),
2720             types::B128 => Ok((&[RegClass::I64, RegClass::I64], &[types::B64, types::B64])),
2721             _ if ty.is_vector() => {
2722                 assert!(ty.bits() <= 128);
2723                 Ok((&[RegClass::V128], &[types::I8X16]))
2724             }
2725             types::IFLAGS | types::FFLAGS => Ok((&[RegClass::I64], &[types::I64])),
2726             _ => Err(CodegenError::Unsupported(format!(
2727                 "Unexpected SSA-value type: {}",
2728                 ty
2729             ))),
2730         }
2731     }
2732 
gen_jump(label: MachLabel) -> Inst2733     fn gen_jump(label: MachLabel) -> Inst {
2734         Inst::jmp_known(label)
2735     }
2736 
gen_constant<F: FnMut(Type) -> Writable<Reg>>( to_regs: ValueRegs<Writable<Reg>>, value: u128, ty: Type, mut alloc_tmp: F, ) -> SmallVec<[Self; 4]>2737     fn gen_constant<F: FnMut(Type) -> Writable<Reg>>(
2738         to_regs: ValueRegs<Writable<Reg>>,
2739         value: u128,
2740         ty: Type,
2741         mut alloc_tmp: F,
2742     ) -> SmallVec<[Self; 4]> {
2743         let mut ret = SmallVec::new();
2744         if ty == types::I128 {
2745             ret.push(Inst::imm(
2746                 OperandSize::Size64,
2747                 value as u64,
2748                 to_regs.regs()[0],
2749             ));
2750             ret.push(Inst::imm(
2751                 OperandSize::Size64,
2752                 (value >> 64) as u64,
2753                 to_regs.regs()[1],
2754             ));
2755         } else {
2756             let to_reg = to_regs
2757                 .only_reg()
2758                 .expect("multi-reg values not supported on x64");
2759             if ty == types::F32 {
2760                 if value == 0 {
2761                     ret.push(Inst::xmm_rm_r(
2762                         SseOpcode::Xorps,
2763                         RegMem::reg(to_reg.to_reg()),
2764                         to_reg,
2765                     ));
2766                 } else {
2767                     let tmp = alloc_tmp(types::I32);
2768                     ret.push(Inst::imm(OperandSize::Size32, value as u64, tmp));
2769 
2770                     ret.push(Inst::gpr_to_xmm(
2771                         SseOpcode::Movd,
2772                         RegMem::reg(tmp.to_reg()),
2773                         OperandSize::Size32,
2774                         to_reg,
2775                     ));
2776                 }
2777             } else if ty == types::F64 {
2778                 if value == 0 {
2779                     ret.push(Inst::xmm_rm_r(
2780                         SseOpcode::Xorpd,
2781                         RegMem::reg(to_reg.to_reg()),
2782                         to_reg,
2783                     ));
2784                 } else {
2785                     let tmp = alloc_tmp(types::I64);
2786                     ret.push(Inst::imm(OperandSize::Size64, value as u64, tmp));
2787 
2788                     ret.push(Inst::gpr_to_xmm(
2789                         SseOpcode::Movq,
2790                         RegMem::reg(tmp.to_reg()),
2791                         OperandSize::Size64,
2792                         to_reg,
2793                     ));
2794                 }
2795             } else {
2796                 // Must be an integer type.
2797                 debug_assert!(
2798                     ty == types::B1
2799                         || ty == types::I8
2800                         || ty == types::B8
2801                         || ty == types::I16
2802                         || ty == types::B16
2803                         || ty == types::I32
2804                         || ty == types::B32
2805                         || ty == types::I64
2806                         || ty == types::B64
2807                         || ty == types::R32
2808                         || ty == types::R64
2809                 );
2810                 // Immediates must be 32 or 64 bits.
2811                 // Smaller types are widened.
2812                 let size = match OperandSize::from_ty(ty) {
2813                     OperandSize::Size64 => OperandSize::Size64,
2814                     _ => OperandSize::Size32,
2815                 };
2816                 if value == 0 {
2817                     ret.push(Inst::alu_rmi_r(
2818                         size,
2819                         AluRmiROpcode::Xor,
2820                         RegMemImm::reg(to_reg.to_reg()),
2821                         to_reg,
2822                     ));
2823                 } else {
2824                     let value = value as u64;
2825                     ret.push(Inst::imm(size, value.into(), to_reg));
2826                 }
2827             }
2828         }
2829         ret
2830     }
2831 
reg_universe(flags: &Flags) -> RealRegUniverse2832     fn reg_universe(flags: &Flags) -> RealRegUniverse {
2833         create_reg_universe_systemv(flags)
2834     }
2835 
worst_case_size() -> CodeOffset2836     fn worst_case_size() -> CodeOffset {
2837         15
2838     }
2839 
ref_type_regclass(_: &settings::Flags) -> RegClass2840     fn ref_type_regclass(_: &settings::Flags) -> RegClass {
2841         RegClass::I64
2842     }
2843 
gen_value_label_marker(label: ValueLabel, reg: Reg) -> Self2844     fn gen_value_label_marker(label: ValueLabel, reg: Reg) -> Self {
2845         Inst::ValueLabelMarker { label, reg }
2846     }
2847 
defines_value_label(&self) -> Option<(ValueLabel, Reg)>2848     fn defines_value_label(&self) -> Option<(ValueLabel, Reg)> {
2849         match self {
2850             Inst::ValueLabelMarker { label, reg } => Some((*label, *reg)),
2851             _ => None,
2852         }
2853     }
2854 
2855     type LabelUse = LabelUse;
2856 }
2857 
2858 /// State carried between emissions of a sequence of instructions.
2859 #[derive(Default, Clone, Debug)]
2860 pub struct EmitState {
2861     /// Addend to convert nominal-SP offsets to real-SP offsets at the current
2862     /// program point.
2863     pub(crate) virtual_sp_offset: i64,
2864     /// Offset of FP from nominal-SP.
2865     pub(crate) nominal_sp_to_fp: i64,
2866     /// Safepoint stack map for upcoming instruction, as provided to `pre_safepoint()`.
2867     stack_map: Option<StackMap>,
2868     /// Current source location.
2869     cur_srcloc: SourceLoc,
2870 }
2871 
2872 /// Constant state used during emissions of a sequence of instructions.
2873 pub struct EmitInfo {
2874     flags: settings::Flags,
2875     isa_flags: x64_settings::Flags,
2876 }
2877 
2878 impl EmitInfo {
new(flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self2879     pub(crate) fn new(flags: settings::Flags, isa_flags: x64_settings::Flags) -> Self {
2880         Self { flags, isa_flags }
2881     }
2882 }
2883 
2884 impl MachInstEmitInfo for EmitInfo {
flags(&self) -> &Flags2885     fn flags(&self) -> &Flags {
2886         &self.flags
2887     }
2888 }
2889 
2890 impl MachInstEmit for Inst {
2891     type State = EmitState;
2892     type Info = EmitInfo;
2893 
emit(&self, sink: &mut MachBuffer<Inst>, info: &Self::Info, state: &mut Self::State)2894     fn emit(&self, sink: &mut MachBuffer<Inst>, info: &Self::Info, state: &mut Self::State) {
2895         emit::emit(self, sink, info, state);
2896     }
2897 
pretty_print(&self, mb_rru: Option<&RealRegUniverse>, _: &mut Self::State) -> String2898     fn pretty_print(&self, mb_rru: Option<&RealRegUniverse>, _: &mut Self::State) -> String {
2899         self.show_rru(mb_rru)
2900     }
2901 }
2902 
2903 impl MachInstEmitState<Inst> for EmitState {
new(abi: &dyn ABICallee<I = Inst>) -> Self2904     fn new(abi: &dyn ABICallee<I = Inst>) -> Self {
2905         EmitState {
2906             virtual_sp_offset: 0,
2907             nominal_sp_to_fp: abi.frame_size() as i64,
2908             stack_map: None,
2909             cur_srcloc: SourceLoc::default(),
2910         }
2911     }
2912 
pre_safepoint(&mut self, stack_map: StackMap)2913     fn pre_safepoint(&mut self, stack_map: StackMap) {
2914         self.stack_map = Some(stack_map);
2915     }
2916 
pre_sourceloc(&mut self, srcloc: SourceLoc)2917     fn pre_sourceloc(&mut self, srcloc: SourceLoc) {
2918         self.cur_srcloc = srcloc;
2919     }
2920 }
2921 
2922 impl EmitState {
take_stack_map(&mut self) -> Option<StackMap>2923     fn take_stack_map(&mut self) -> Option<StackMap> {
2924         self.stack_map.take()
2925     }
2926 
clear_post_insn(&mut self)2927     fn clear_post_insn(&mut self) {
2928         self.stack_map = None;
2929     }
2930 
cur_srcloc(&self) -> SourceLoc2931     pub(crate) fn cur_srcloc(&self) -> SourceLoc {
2932         self.cur_srcloc
2933     }
2934 }
2935 
2936 /// A label-use (internal relocation) in generated code.
2937 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
2938 pub enum LabelUse {
2939     /// A 32-bit offset from location of relocation itself, added to the existing value at that
2940     /// location. Used for control flow instructions which consider an offset from the start of the
2941     /// next instruction (so the size of the payload -- 4 bytes -- is subtracted from the payload).
2942     JmpRel32,
2943 
2944     /// A 32-bit offset from location of relocation itself, added to the existing value at that
2945     /// location.
2946     PCRel32,
2947 }
2948 
2949 impl MachInstLabelUse for LabelUse {
2950     const ALIGN: CodeOffset = 1;
2951 
max_pos_range(self) -> CodeOffset2952     fn max_pos_range(self) -> CodeOffset {
2953         match self {
2954             LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x7fff_ffff,
2955         }
2956     }
2957 
max_neg_range(self) -> CodeOffset2958     fn max_neg_range(self) -> CodeOffset {
2959         match self {
2960             LabelUse::JmpRel32 | LabelUse::PCRel32 => 0x8000_0000,
2961         }
2962     }
2963 
patch_size(self) -> CodeOffset2964     fn patch_size(self) -> CodeOffset {
2965         match self {
2966             LabelUse::JmpRel32 | LabelUse::PCRel32 => 4,
2967         }
2968     }
2969 
patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset)2970     fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
2971         let pc_rel = (label_offset as i64) - (use_offset as i64);
2972         debug_assert!(pc_rel <= self.max_pos_range() as i64);
2973         debug_assert!(pc_rel >= -(self.max_neg_range() as i64));
2974         let pc_rel = pc_rel as u32;
2975         match self {
2976             LabelUse::JmpRel32 => {
2977                 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2978                 let value = pc_rel.wrapping_add(addend).wrapping_sub(4);
2979                 buffer.copy_from_slice(&value.to_le_bytes()[..]);
2980             }
2981             LabelUse::PCRel32 => {
2982                 let addend = u32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
2983                 let value = pc_rel.wrapping_add(addend);
2984                 buffer.copy_from_slice(&value.to_le_bytes()[..]);
2985             }
2986         }
2987     }
2988 
supports_veneer(self) -> bool2989     fn supports_veneer(self) -> bool {
2990         match self {
2991             LabelUse::JmpRel32 | LabelUse::PCRel32 => false,
2992         }
2993     }
2994 
veneer_size(self) -> CodeOffset2995     fn veneer_size(self) -> CodeOffset {
2996         match self {
2997             LabelUse::JmpRel32 | LabelUse::PCRel32 => 0,
2998         }
2999     }
3000 
generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse)3001     fn generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse) {
3002         match self {
3003             LabelUse::JmpRel32 | LabelUse::PCRel32 => {
3004                 panic!("Veneer not supported for JumpRel32 label-use.");
3005             }
3006         }
3007     }
3008 }
3009