1 //! AArch64 ISA definitions: instruction arguments.
2 
3 // Some variants are never constructed, but we still want them as options in the future.
4 #![allow(dead_code)]
5 
6 use crate::ir::types::*;
7 use crate::ir::Type;
8 use crate::isa::aarch64::inst::*;
9 use crate::machinst::{ty_bits, MachLabel};
10 
11 use regalloc::{PrettyPrint, RealRegUniverse, Reg, Writable};
12 
13 use core::convert::Into;
14 use std::string::String;
15 
16 //=============================================================================
17 // Instruction sub-components: shift and extend descriptors
18 
19 /// A shift operator for a register or immediate.
20 #[derive(Clone, Copy, Debug)]
21 #[repr(u8)]
22 pub enum ShiftOp {
23     LSL = 0b00,
24     LSR = 0b01,
25     ASR = 0b10,
26     ROR = 0b11,
27 }
28 
29 impl ShiftOp {
30     /// Get the encoding of this shift op.
bits(self) -> u831     pub fn bits(self) -> u8 {
32         self as u8
33     }
34 }
35 
36 /// A shift operator amount.
37 #[derive(Clone, Copy, Debug)]
38 pub struct ShiftOpShiftImm(u8);
39 
40 impl ShiftOpShiftImm {
41     /// Maximum shift for shifted-register operands.
42     pub const MAX_SHIFT: u64 = 63;
43 
44     /// Create a new shiftop shift amount, if possible.
maybe_from_shift(shift: u64) -> Option<ShiftOpShiftImm>45     pub fn maybe_from_shift(shift: u64) -> Option<ShiftOpShiftImm> {
46         if shift <= Self::MAX_SHIFT {
47             Some(ShiftOpShiftImm(shift as u8))
48         } else {
49             None
50         }
51     }
52 
53     /// Return the shift amount.
value(self) -> u854     pub fn value(self) -> u8 {
55         self.0
56     }
57 
58     /// Mask down to a given number of bits.
mask(self, bits: u8) -> ShiftOpShiftImm59     pub fn mask(self, bits: u8) -> ShiftOpShiftImm {
60         ShiftOpShiftImm(self.0 & (bits - 1))
61     }
62 }
63 
64 /// A shift operator with an amount, guaranteed to be within range.
65 #[derive(Clone, Debug)]
66 pub struct ShiftOpAndAmt {
67     op: ShiftOp,
68     shift: ShiftOpShiftImm,
69 }
70 
71 impl ShiftOpAndAmt {
new(op: ShiftOp, shift: ShiftOpShiftImm) -> ShiftOpAndAmt72     pub fn new(op: ShiftOp, shift: ShiftOpShiftImm) -> ShiftOpAndAmt {
73         ShiftOpAndAmt { op, shift }
74     }
75 
76     /// Get the shift op.
op(&self) -> ShiftOp77     pub fn op(&self) -> ShiftOp {
78         self.op
79     }
80 
81     /// Get the shift amount.
amt(&self) -> ShiftOpShiftImm82     pub fn amt(&self) -> ShiftOpShiftImm {
83         self.shift
84     }
85 }
86 
87 /// An extend operator for a register.
88 #[derive(Clone, Copy, Debug)]
89 #[repr(u8)]
90 pub enum ExtendOp {
91     UXTB = 0b000,
92     UXTH = 0b001,
93     UXTW = 0b010,
94     UXTX = 0b011,
95     SXTB = 0b100,
96     SXTH = 0b101,
97     SXTW = 0b110,
98     SXTX = 0b111,
99 }
100 
101 impl ExtendOp {
102     /// Encoding of this op.
bits(self) -> u8103     pub fn bits(self) -> u8 {
104         self as u8
105     }
106 }
107 
108 //=============================================================================
109 // Instruction sub-components (memory addresses): definitions
110 
111 /// A reference to some memory address.
112 #[derive(Clone, Debug)]
113 pub enum MemLabel {
114     /// An address in the code, a constant pool or jumptable, with relative
115     /// offset from this instruction. This form must be used at emission time;
116     /// see `memlabel_finalize()` for how other forms are lowered to this one.
117     PCRel(i32),
118 }
119 
120 /// An addressing mode specified for a load/store operation.
121 #[derive(Clone, Debug)]
122 pub enum AMode {
123     //
124     // Real ARM64 addressing modes:
125     //
126     /// "post-indexed" mode as per AArch64 docs: postincrement reg after address computation.
127     PostIndexed(Writable<Reg>, SImm9),
128     /// "pre-indexed" mode as per AArch64 docs: preincrement reg before address computation.
129     PreIndexed(Writable<Reg>, SImm9),
130 
131     // N.B.: RegReg, RegScaled, and RegScaledExtended all correspond to
132     // what the ISA calls the "register offset" addressing mode. We split out
133     // several options here for more ergonomic codegen.
134     /// Register plus register offset.
135     RegReg(Reg, Reg),
136 
137     /// Register plus register offset, scaled by type's size.
138     RegScaled(Reg, Reg, Type),
139 
140     /// Register plus register offset, scaled by type's size, with index sign- or zero-extended
141     /// first.
142     RegScaledExtended(Reg, Reg, Type, ExtendOp),
143 
144     /// Register plus register offset, with index sign- or zero-extended first.
145     RegExtended(Reg, Reg, ExtendOp),
146 
147     /// Unscaled signed 9-bit immediate offset from reg.
148     Unscaled(Reg, SImm9),
149 
150     /// Scaled (by size of a type) unsigned 12-bit immediate offset from reg.
151     UnsignedOffset(Reg, UImm12Scaled),
152 
153     //
154     // virtual addressing modes that are lowered at emission time:
155     //
156     /// Reference to a "label": e.g., a symbol.
157     Label(MemLabel),
158 
159     /// Arbitrary offset from a register. Converted to generation of large
160     /// offsets with multiple instructions as necessary during code emission.
161     RegOffset(Reg, i64, Type),
162 
163     /// Offset from the stack pointer.
164     SPOffset(i64, Type),
165 
166     /// Offset from the frame pointer.
167     FPOffset(i64, Type),
168 
169     /// Offset from the "nominal stack pointer", which is where the real SP is
170     /// just after stack and spill slots are allocated in the function prologue.
171     /// At emission time, this is converted to `SPOffset` with a fixup added to
172     /// the offset constant. The fixup is a running value that is tracked as
173     /// emission iterates through instructions in linear order, and can be
174     /// adjusted up and down with [Inst::VirtualSPOffsetAdj].
175     ///
176     /// The standard ABI is in charge of handling this (by emitting the
177     /// adjustment meta-instructions). It maintains the invariant that "nominal
178     /// SP" is where the actual SP is after the function prologue and before
179     /// clobber pushes. See the diagram in the documentation for
180     /// [crate::isa::aarch64::abi](the ABI module) for more details.
181     NominalSPOffset(i64, Type),
182 }
183 
184 impl AMode {
185     /// Memory reference using an address in a register.
reg(reg: Reg) -> AMode186     pub fn reg(reg: Reg) -> AMode {
187         // Use UnsignedOffset rather than Unscaled to use ldr rather than ldur.
188         // This also does not use PostIndexed / PreIndexed as they update the register.
189         AMode::UnsignedOffset(reg, UImm12Scaled::zero(I64))
190     }
191 
192     /// Memory reference using the sum of two registers as an address.
reg_plus_reg(reg1: Reg, reg2: Reg) -> AMode193     pub fn reg_plus_reg(reg1: Reg, reg2: Reg) -> AMode {
194         AMode::RegReg(reg1, reg2)
195     }
196 
197     /// Memory reference using `reg1 + sizeof(ty) * reg2` as an address.
reg_plus_reg_scaled(reg1: Reg, reg2: Reg, ty: Type) -> AMode198     pub fn reg_plus_reg_scaled(reg1: Reg, reg2: Reg, ty: Type) -> AMode {
199         AMode::RegScaled(reg1, reg2, ty)
200     }
201 
202     /// Memory reference using `reg1 + sizeof(ty) * reg2` as an address, with `reg2` sign- or
203     /// zero-extended as per `op`.
reg_plus_reg_scaled_extended(reg1: Reg, reg2: Reg, ty: Type, op: ExtendOp) -> AMode204     pub fn reg_plus_reg_scaled_extended(reg1: Reg, reg2: Reg, ty: Type, op: ExtendOp) -> AMode {
205         AMode::RegScaledExtended(reg1, reg2, ty, op)
206     }
207 
208     /// Memory reference to a label: a global function or value, or data in the constant pool.
label(label: MemLabel) -> AMode209     pub fn label(label: MemLabel) -> AMode {
210         AMode::Label(label)
211     }
212 
213     /// Does the address resolve to just a register value, with no offset or
214     /// other computation?
is_reg(&self) -> Option<Reg>215     pub fn is_reg(&self) -> Option<Reg> {
216         match self {
217             &AMode::UnsignedOffset(r, uimm12) if uimm12.value() == 0 => Some(r),
218             &AMode::Unscaled(r, imm9) if imm9.value() == 0 => Some(r),
219             &AMode::RegOffset(r, off, _) if off == 0 => Some(r),
220             &AMode::FPOffset(off, _) if off == 0 => Some(fp_reg()),
221             &AMode::SPOffset(off, _) if off == 0 => Some(stack_reg()),
222             _ => None,
223         }
224     }
225 }
226 
227 /// A memory argument to a load/store-pair.
228 #[derive(Clone, Debug)]
229 pub enum PairAMode {
230     SignedOffset(Reg, SImm7Scaled),
231     PreIndexed(Writable<Reg>, SImm7Scaled),
232     PostIndexed(Writable<Reg>, SImm7Scaled),
233 }
234 
235 //=============================================================================
236 // Instruction sub-components (conditions, branches and branch targets):
237 // definitions
238 
239 /// Condition for conditional branches.
240 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
241 #[repr(u8)]
242 pub enum Cond {
243     Eq = 0,
244     Ne = 1,
245     Hs = 2,
246     Lo = 3,
247     Mi = 4,
248     Pl = 5,
249     Vs = 6,
250     Vc = 7,
251     Hi = 8,
252     Ls = 9,
253     Ge = 10,
254     Lt = 11,
255     Gt = 12,
256     Le = 13,
257     Al = 14,
258     Nv = 15,
259 }
260 
261 impl Cond {
262     /// Return the inverted condition.
invert(self) -> Cond263     pub fn invert(self) -> Cond {
264         match self {
265             Cond::Eq => Cond::Ne,
266             Cond::Ne => Cond::Eq,
267 
268             Cond::Hs => Cond::Lo,
269             Cond::Lo => Cond::Hs,
270 
271             Cond::Mi => Cond::Pl,
272             Cond::Pl => Cond::Mi,
273 
274             Cond::Vs => Cond::Vc,
275             Cond::Vc => Cond::Vs,
276 
277             Cond::Hi => Cond::Ls,
278             Cond::Ls => Cond::Hi,
279 
280             Cond::Ge => Cond::Lt,
281             Cond::Lt => Cond::Ge,
282 
283             Cond::Gt => Cond::Le,
284             Cond::Le => Cond::Gt,
285 
286             Cond::Al => Cond::Nv,
287             Cond::Nv => Cond::Al,
288         }
289     }
290 
291     /// Return the machine encoding of this condition.
bits(self) -> u32292     pub fn bits(self) -> u32 {
293         self as u32
294     }
295 }
296 
297 /// The kind of conditional branch: the common-case-optimized "reg-is-zero" /
298 /// "reg-is-nonzero" variants, or the generic one that tests the machine
299 /// condition codes.
300 #[derive(Clone, Copy, Debug)]
301 pub enum CondBrKind {
302     /// Condition: given register is zero.
303     Zero(Reg),
304     /// Condition: given register is nonzero.
305     NotZero(Reg),
306     /// Condition: the given condition-code test is true.
307     Cond(Cond),
308 }
309 
310 impl CondBrKind {
311     /// Return the inverted branch condition.
invert(self) -> CondBrKind312     pub fn invert(self) -> CondBrKind {
313         match self {
314             CondBrKind::Zero(reg) => CondBrKind::NotZero(reg),
315             CondBrKind::NotZero(reg) => CondBrKind::Zero(reg),
316             CondBrKind::Cond(c) => CondBrKind::Cond(c.invert()),
317         }
318     }
319 }
320 
321 /// A branch target. Either unresolved (basic-block index) or resolved (offset
322 /// from end of current instruction).
323 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
324 pub enum BranchTarget {
325     /// An unresolved reference to a Label, as passed into
326     /// `lower_branch_group()`.
327     Label(MachLabel),
328     /// A fixed PC offset.
329     ResolvedOffset(i32),
330 }
331 
332 impl BranchTarget {
333     /// Return the target's label, if it is a label-based target.
as_label(self) -> Option<MachLabel>334     pub fn as_label(self) -> Option<MachLabel> {
335         match self {
336             BranchTarget::Label(l) => Some(l),
337             _ => None,
338         }
339     }
340 
341     /// Return the target's offset, if specified, or zero if label-based.
as_offset19_or_zero(self) -> u32342     pub fn as_offset19_or_zero(self) -> u32 {
343         let off = match self {
344             BranchTarget::ResolvedOffset(off) => off >> 2,
345             _ => 0,
346         };
347         assert!(off <= 0x3ffff);
348         assert!(off >= -0x40000);
349         (off as u32) & 0x7ffff
350     }
351 
352     /// Return the target's offset, if specified, or zero if label-based.
as_offset26_or_zero(self) -> u32353     pub fn as_offset26_or_zero(self) -> u32 {
354         let off = match self {
355             BranchTarget::ResolvedOffset(off) => off >> 2,
356             _ => 0,
357         };
358         assert!(off <= 0x1ffffff);
359         assert!(off >= -0x2000000);
360         (off as u32) & 0x3ffffff
361     }
362 }
363 
364 impl PrettyPrint for ShiftOpAndAmt {
show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String365     fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
366         format!("{:?} {}", self.op(), self.amt().value())
367     }
368 }
369 
370 impl PrettyPrint for ExtendOp {
show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String371     fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
372         format!("{:?}", self)
373     }
374 }
375 
376 impl PrettyPrint for MemLabel {
show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String377     fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
378         match self {
379             &MemLabel::PCRel(off) => format!("pc+{}", off),
380         }
381     }
382 }
383 
shift_for_type(ty: Type) -> usize384 fn shift_for_type(ty: Type) -> usize {
385     match ty.bytes() {
386         1 => 0,
387         2 => 1,
388         4 => 2,
389         8 => 3,
390         16 => 4,
391         _ => panic!("unknown type: {}", ty),
392     }
393 }
394 
395 impl PrettyPrint for AMode {
show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String396     fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
397         match self {
398             &AMode::Unscaled(reg, simm9) => {
399                 if simm9.value != 0 {
400                     format!("[{}, {}]", reg.show_rru(mb_rru), simm9.show_rru(mb_rru))
401                 } else {
402                     format!("[{}]", reg.show_rru(mb_rru))
403                 }
404             }
405             &AMode::UnsignedOffset(reg, uimm12) => {
406                 if uimm12.value != 0 {
407                     format!("[{}, {}]", reg.show_rru(mb_rru), uimm12.show_rru(mb_rru))
408                 } else {
409                     format!("[{}]", reg.show_rru(mb_rru))
410                 }
411             }
412             &AMode::RegReg(r1, r2) => {
413                 format!("[{}, {}]", r1.show_rru(mb_rru), r2.show_rru(mb_rru),)
414             }
415             &AMode::RegScaled(r1, r2, ty) => {
416                 let shift = shift_for_type(ty);
417                 format!(
418                     "[{}, {}, LSL #{}]",
419                     r1.show_rru(mb_rru),
420                     r2.show_rru(mb_rru),
421                     shift,
422                 )
423             }
424             &AMode::RegScaledExtended(r1, r2, ty, op) => {
425                 let shift = shift_for_type(ty);
426                 let size = match op {
427                     ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
428                     _ => OperandSize::Size64,
429                 };
430                 let op = op.show_rru(mb_rru);
431                 format!(
432                     "[{}, {}, {} #{}]",
433                     r1.show_rru(mb_rru),
434                     show_ireg_sized(r2, mb_rru, size),
435                     op,
436                     shift
437                 )
438             }
439             &AMode::RegExtended(r1, r2, op) => {
440                 let size = match op {
441                     ExtendOp::SXTW | ExtendOp::UXTW => OperandSize::Size32,
442                     _ => OperandSize::Size64,
443                 };
444                 let op = op.show_rru(mb_rru);
445                 format!(
446                     "[{}, {}, {}]",
447                     r1.show_rru(mb_rru),
448                     show_ireg_sized(r2, mb_rru, size),
449                     op,
450                 )
451             }
452             &AMode::Label(ref label) => label.show_rru(mb_rru),
453             &AMode::PreIndexed(r, simm9) => format!(
454                 "[{}, {}]!",
455                 r.to_reg().show_rru(mb_rru),
456                 simm9.show_rru(mb_rru)
457             ),
458             &AMode::PostIndexed(r, simm9) => format!(
459                 "[{}], {}",
460                 r.to_reg().show_rru(mb_rru),
461                 simm9.show_rru(mb_rru)
462             ),
463             // Eliminated by `mem_finalize()`.
464             &AMode::SPOffset(..)
465             | &AMode::FPOffset(..)
466             | &AMode::NominalSPOffset(..)
467             | &AMode::RegOffset(..) => {
468                 panic!("Unexpected pseudo mem-arg mode (stack-offset or generic reg-offset)!")
469             }
470         }
471     }
472 }
473 
474 impl PrettyPrint for PairAMode {
show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String475     fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
476         match self {
477             &PairAMode::SignedOffset(reg, simm7) => {
478                 if simm7.value != 0 {
479                     format!("[{}, {}]", reg.show_rru(mb_rru), simm7.show_rru(mb_rru))
480                 } else {
481                     format!("[{}]", reg.show_rru(mb_rru))
482                 }
483             }
484             &PairAMode::PreIndexed(reg, simm7) => format!(
485                 "[{}, {}]!",
486                 reg.to_reg().show_rru(mb_rru),
487                 simm7.show_rru(mb_rru)
488             ),
489             &PairAMode::PostIndexed(reg, simm7) => format!(
490                 "[{}], {}",
491                 reg.to_reg().show_rru(mb_rru),
492                 simm7.show_rru(mb_rru)
493             ),
494         }
495     }
496 }
497 
498 impl PrettyPrint for Cond {
show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String499     fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
500         let mut s = format!("{:?}", self);
501         s.make_ascii_lowercase();
502         s
503     }
504 }
505 
506 impl PrettyPrint for BranchTarget {
show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String507     fn show_rru(&self, _mb_rru: Option<&RealRegUniverse>) -> String {
508         match self {
509             &BranchTarget::Label(label) => format!("label{:?}", label.get()),
510             &BranchTarget::ResolvedOffset(off) => format!("{}", off),
511         }
512     }
513 }
514 
515 /// Type used to communicate the operand size of a machine instruction, as AArch64 has 32- and
516 /// 64-bit variants of many instructions (and integer registers).
517 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
518 pub enum OperandSize {
519     Size32,
520     Size64,
521 }
522 
523 impl OperandSize {
524     /// 32-bit case?
is32(self) -> bool525     pub fn is32(self) -> bool {
526         self == OperandSize::Size32
527     }
528     /// 64-bit case?
is64(self) -> bool529     pub fn is64(self) -> bool {
530         self == OperandSize::Size64
531     }
532     /// Convert from an `is32` boolean flag to an `OperandSize`.
from_is32(is32: bool) -> OperandSize533     pub fn from_is32(is32: bool) -> OperandSize {
534         if is32 {
535             OperandSize::Size32
536         } else {
537             OperandSize::Size64
538         }
539     }
540     /// Convert from a needed width to the smallest size that fits.
from_bits<I: Into<usize>>(bits: I) -> OperandSize541     pub fn from_bits<I: Into<usize>>(bits: I) -> OperandSize {
542         let bits: usize = bits.into();
543         assert!(bits <= 64);
544         if bits <= 32 {
545             OperandSize::Size32
546         } else {
547             OperandSize::Size64
548         }
549     }
550 
551     /// Convert from an integer type into the smallest size that fits.
from_ty(ty: Type) -> OperandSize552     pub fn from_ty(ty: Type) -> OperandSize {
553         Self::from_bits(ty_bits(ty))
554     }
555 
556     /// Convert to I32, I64, or I128.
to_ty(self) -> Type557     pub fn to_ty(self) -> Type {
558         match self {
559             OperandSize::Size32 => I32,
560             OperandSize::Size64 => I64,
561         }
562     }
563 
sf_bit(&self) -> u32564     pub fn sf_bit(&self) -> u32 {
565         match self {
566             OperandSize::Size32 => 0,
567             OperandSize::Size64 => 1,
568         }
569     }
570 }
571 
572 /// Type used to communicate the size of a scalar SIMD & FP operand.
573 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
574 pub enum ScalarSize {
575     Size8,
576     Size16,
577     Size32,
578     Size64,
579     Size128,
580 }
581 
582 impl ScalarSize {
583     /// Convert from a needed width to the smallest size that fits.
from_bits<I: Into<usize>>(bits: I) -> ScalarSize584     pub fn from_bits<I: Into<usize>>(bits: I) -> ScalarSize {
585         match bits.into().next_power_of_two() {
586             8 => ScalarSize::Size8,
587             16 => ScalarSize::Size16,
588             32 => ScalarSize::Size32,
589             64 => ScalarSize::Size64,
590             128 => ScalarSize::Size128,
591             w => panic!("Unexpected type width: {}", w),
592         }
593     }
594 
595     /// Convert to an integer operand size.
operand_size(&self) -> OperandSize596     pub fn operand_size(&self) -> OperandSize {
597         match self {
598             ScalarSize::Size32 => OperandSize::Size32,
599             ScalarSize::Size64 => OperandSize::Size64,
600             _ => panic!("Unexpected operand_size request for: {:?}", self),
601         }
602     }
603 
604     /// Convert from an integer operand size.
from_operand_size(size: OperandSize) -> ScalarSize605     pub fn from_operand_size(size: OperandSize) -> ScalarSize {
606         match size {
607             OperandSize::Size32 => ScalarSize::Size32,
608             OperandSize::Size64 => ScalarSize::Size64,
609         }
610     }
611 
612     /// Convert from a type into the smallest size that fits.
from_ty(ty: Type) -> ScalarSize613     pub fn from_ty(ty: Type) -> ScalarSize {
614         Self::from_bits(ty_bits(ty))
615     }
616 
617     /// Return the encoding bits that are used by some scalar FP instructions
618     /// for a particular operand size.
ftype(&self) -> u32619     pub fn ftype(&self) -> u32 {
620         match self {
621             ScalarSize::Size16 => 0b11,
622             ScalarSize::Size32 => 0b00,
623             ScalarSize::Size64 => 0b01,
624             _ => panic!("Unexpected scalar FP operand size: {:?}", self),
625         }
626     }
627 }
628 
629 /// Type used to communicate the size of a vector operand.
630 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
631 pub enum VectorSize {
632     Size8x8,
633     Size8x16,
634     Size16x4,
635     Size16x8,
636     Size32x2,
637     Size32x4,
638     Size64x2,
639 }
640 
641 impl VectorSize {
642     /// Get the vector operand size with the given scalar size as lane size.
from_lane_size(size: ScalarSize, is_128bit: bool) -> VectorSize643     pub fn from_lane_size(size: ScalarSize, is_128bit: bool) -> VectorSize {
644         match (size, is_128bit) {
645             (ScalarSize::Size8, false) => VectorSize::Size8x8,
646             (ScalarSize::Size8, true) => VectorSize::Size8x16,
647             (ScalarSize::Size16, false) => VectorSize::Size16x4,
648             (ScalarSize::Size16, true) => VectorSize::Size16x8,
649             (ScalarSize::Size32, false) => VectorSize::Size32x2,
650             (ScalarSize::Size32, true) => VectorSize::Size32x4,
651             (ScalarSize::Size64, true) => VectorSize::Size64x2,
652             _ => panic!("Unexpected scalar FP operand size: {:?}", size),
653         }
654     }
655 
656     /// Convert from a type into a vector operand size.
from_ty(ty: Type) -> VectorSize657     pub fn from_ty(ty: Type) -> VectorSize {
658         match ty {
659             B8X16 => VectorSize::Size8x16,
660             B16X8 => VectorSize::Size16x8,
661             B32X4 => VectorSize::Size32x4,
662             B64X2 => VectorSize::Size64x2,
663             F32X2 => VectorSize::Size32x2,
664             F32X4 => VectorSize::Size32x4,
665             F64X2 => VectorSize::Size64x2,
666             I8X8 => VectorSize::Size8x8,
667             I8X16 => VectorSize::Size8x16,
668             I16X4 => VectorSize::Size16x4,
669             I16X8 => VectorSize::Size16x8,
670             I32X2 => VectorSize::Size32x2,
671             I32X4 => VectorSize::Size32x4,
672             I64X2 => VectorSize::Size64x2,
673             _ => unimplemented!("Unsupported type: {}", ty),
674         }
675     }
676 
677     /// Get the integer operand size that corresponds to a lane of a vector with a certain size.
operand_size(&self) -> OperandSize678     pub fn operand_size(&self) -> OperandSize {
679         match self {
680             VectorSize::Size64x2 => OperandSize::Size64,
681             _ => OperandSize::Size32,
682         }
683     }
684 
685     /// Get the scalar operand size that corresponds to a lane of a vector with a certain size.
lane_size(&self) -> ScalarSize686     pub fn lane_size(&self) -> ScalarSize {
687         match self {
688             VectorSize::Size8x8 => ScalarSize::Size8,
689             VectorSize::Size8x16 => ScalarSize::Size8,
690             VectorSize::Size16x4 => ScalarSize::Size16,
691             VectorSize::Size16x8 => ScalarSize::Size16,
692             VectorSize::Size32x2 => ScalarSize::Size32,
693             VectorSize::Size32x4 => ScalarSize::Size32,
694             VectorSize::Size64x2 => ScalarSize::Size64,
695         }
696     }
697 
is_128bits(&self) -> bool698     pub fn is_128bits(&self) -> bool {
699         match self {
700             VectorSize::Size8x8 => false,
701             VectorSize::Size8x16 => true,
702             VectorSize::Size16x4 => false,
703             VectorSize::Size16x8 => true,
704             VectorSize::Size32x2 => false,
705             VectorSize::Size32x4 => true,
706             VectorSize::Size64x2 => true,
707         }
708     }
709 
710     /// Produces a `VectorSize` with lanes twice as wide.  Note that if the resulting
711     /// size would exceed 128 bits, then the number of lanes is also halved, so as to
712     /// ensure that the result size is at most 128 bits.
widen(&self) -> VectorSize713     pub fn widen(&self) -> VectorSize {
714         match self {
715             VectorSize::Size8x8 => VectorSize::Size16x8,
716             VectorSize::Size8x16 => VectorSize::Size16x8,
717             VectorSize::Size16x4 => VectorSize::Size32x4,
718             VectorSize::Size16x8 => VectorSize::Size32x4,
719             VectorSize::Size32x2 => VectorSize::Size64x2,
720             VectorSize::Size32x4 => VectorSize::Size64x2,
721             VectorSize::Size64x2 => unreachable!(),
722         }
723     }
724 
725     /// Produces a `VectorSize` that has the same lane width, but half as many lanes.
halve(&self) -> VectorSize726     pub fn halve(&self) -> VectorSize {
727         match self {
728             VectorSize::Size8x16 => VectorSize::Size8x8,
729             VectorSize::Size16x8 => VectorSize::Size16x4,
730             VectorSize::Size32x4 => VectorSize::Size32x2,
731             _ => *self,
732         }
733     }
734 
735     /// Return the encoding bits that are used by some SIMD instructions
736     /// for a particular operand size.
enc_size(&self) -> (u32, u32)737     pub fn enc_size(&self) -> (u32, u32) {
738         let q = self.is_128bits() as u32;
739         let size = match self.lane_size() {
740             ScalarSize::Size8 => 0b00,
741             ScalarSize::Size16 => 0b01,
742             ScalarSize::Size32 => 0b10,
743             ScalarSize::Size64 => 0b11,
744             _ => unreachable!(),
745         };
746 
747         (q, size)
748     }
749 }
750