1 //! Intermediate representation for instructions.
2 //!
3 //! The goal is to match wasm instructions as closely as possible, but translate
4 //! the stack machine into an instruction tree. Additionally all control frames
5 //! are representd as `Block`s.
6 
7 mod traversals;
8 pub use self::traversals::*;
9 
10 use crate::encode::Encoder;
11 use crate::{
12     DataId, ElementId, FunctionId, GlobalId, LocalFunction, MemoryId, ModuleTypes, TableId, TypeId,
13     ValType,
14 };
15 use id_arena::Id;
16 use std::fmt;
17 use std::ops::{Deref, DerefMut};
18 use walrus_macro::walrus_instr;
19 
20 /// The id of a local.
21 pub type LocalId = Id<Local>;
22 
23 /// A local variable or parameter.
24 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
25 pub struct Local {
26     id: LocalId,
27     ty: ValType,
28     /// A human-readable name for this local, often useful when debugging
29     pub name: Option<String>,
30 }
31 
32 impl Local {
33     /// Construct a new local from the given id and type.
new(id: LocalId, ty: ValType) -> Local34     pub fn new(id: LocalId, ty: ValType) -> Local {
35         Local { id, ty, name: None }
36     }
37 
38     /// Get this local's id that is unique across the whole module.
id(&self) -> LocalId39     pub fn id(&self) -> LocalId {
40         self.id
41     }
42 
43     /// Get this local's type.
ty(&self) -> ValType44     pub fn ty(&self) -> ValType {
45         self.ty
46     }
47 }
48 
49 /// The identifier for a `InstrSeq` within some `LocalFunction`.
50 pub type InstrSeqId = Id<InstrSeq>;
51 
52 /// The type of an instruction sequence.
53 ///
54 // NB: We purposefully match the encoding for block types here, with MVP Wasm
55 // types inlined and multi-value types outlined. If we tried to simplify this
56 // type representation by always using `TypeId`, then the `used` pass would
57 // think that a bunch of types that are only internally used by `InstrSeq`s are
58 // generally used, and we would emit them in the module's "Types" section. We
59 // don't want to bloat the modules we emit, nor do we want to make the used/GC
60 // passes convoluted, so we intentionally let the shape of this type guide us.
61 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
62 pub enum InstrSeqType {
63     /// MVP Wasm blocks/loops/ifs can only push zero or one resulting value onto
64     /// the stack. They cannot take parameters on the stack.
65     Simple(Option<ValType>),
66     /// The multi-value extension to Wasm allows arbitrary stack parameters and
67     /// results, which are expressed via the same mechanism as function types.
68     MultiValue(TypeId),
69 }
70 
71 impl InstrSeqType {
72     /// Construct a new `InstrSeqType` of the correct form for the given
73     /// parameter and result types.
new(types: &mut ModuleTypes, params: &[ValType], results: &[ValType]) -> InstrSeqType74     pub fn new(types: &mut ModuleTypes, params: &[ValType], results: &[ValType]) -> InstrSeqType {
75         match (params.len(), results.len()) {
76             (0, 0) => InstrSeqType::Simple(None),
77             (0, 1) => InstrSeqType::Simple(Some(results[0])),
78             _ => InstrSeqType::MultiValue(types.add(params, results)),
79         }
80     }
81 
82     /// Construct an `InstrSeqType` with a signature that is known to either be
83     /// `Simple` or uses a `Type` that has already been inserted into the
84     /// `ModuleTypes`.
85     ///
86     /// Returns `None` if this is an instruction sequence signature that
87     /// requires multi-value and `ModuleTypes` does not already have a `Type`
88     /// for it.
existing( types: &ModuleTypes, params: &[ValType], results: &[ValType], ) -> Option<InstrSeqType>89     pub fn existing(
90         types: &ModuleTypes,
91         params: &[ValType],
92         results: &[ValType],
93     ) -> Option<InstrSeqType> {
94         Some(match (params.len(), results.len()) {
95             (0, 0) => InstrSeqType::Simple(None),
96             (0, 1) => InstrSeqType::Simple(Some(results[0])),
97             _ => InstrSeqType::MultiValue(types.find(params, results)?),
98         })
99     }
100 }
101 
102 impl From<Option<ValType>> for InstrSeqType {
103     #[inline]
from(x: Option<ValType>) -> InstrSeqType104     fn from(x: Option<ValType>) -> InstrSeqType {
105         InstrSeqType::Simple(x)
106     }
107 }
108 
109 impl From<ValType> for InstrSeqType {
110     #[inline]
from(x: ValType) -> InstrSeqType111     fn from(x: ValType) -> InstrSeqType {
112         InstrSeqType::Simple(Some(x))
113     }
114 }
115 
116 impl From<TypeId> for InstrSeqType {
117     #[inline]
from(x: TypeId) -> InstrSeqType118     fn from(x: TypeId) -> InstrSeqType {
119         InstrSeqType::MultiValue(x)
120     }
121 }
122 
123 /// A symbolic original wasm operator source location.
124 #[derive(Debug, Copy, Clone)]
125 pub struct InstrLocId(u32);
126 
127 const DEFAULT_INSTR_LOC_ID: u32 = 0xffff_ffff;
128 
129 impl InstrLocId {
130     /// Create `InstrLocId` from provided data. Normaly the data is
131     /// wasm bytecode offset. (0xffff_ffff is reserved for default value).
new(data: u32) -> Self132     pub fn new(data: u32) -> Self {
133         assert!(data != DEFAULT_INSTR_LOC_ID);
134         InstrLocId(data)
135     }
136 
137     /// Check if default value.
is_default(&self) -> bool138     pub fn is_default(&self) -> bool {
139         self.0 == DEFAULT_INSTR_LOC_ID
140     }
141 
142     /// The data
data(&self) -> u32143     pub fn data(&self) -> u32 {
144         assert!(self.0 != DEFAULT_INSTR_LOC_ID);
145         self.0
146     }
147 }
148 
149 impl Default for InstrLocId {
default() -> Self150     fn default() -> Self {
151         InstrLocId(DEFAULT_INSTR_LOC_ID)
152     }
153 }
154 
155 /// A sequence of instructions.
156 #[derive(Debug)]
157 pub struct InstrSeq {
158     id: InstrSeqId,
159 
160     /// This block's type: its the types of values that are expected on the
161     /// stack when entering this instruction sequence and the types that are
162     /// left on the stack afterwards.
163     pub ty: InstrSeqType,
164 
165     /// The instructions that make up the body of this block.
166     pub instrs: Vec<(Instr, InstrLocId)>,
167 }
168 
169 impl Deref for InstrSeq {
170     type Target = Vec<(Instr, InstrLocId)>;
171 
172     #[inline]
deref(&self) -> &Vec<(Instr, InstrLocId)>173     fn deref(&self) -> &Vec<(Instr, InstrLocId)> {
174         &self.instrs
175     }
176 }
177 
178 impl DerefMut for InstrSeq {
179     #[inline]
deref_mut(&mut self) -> &mut Vec<(Instr, InstrLocId)>180     fn deref_mut(&mut self) -> &mut Vec<(Instr, InstrLocId)> {
181         &mut self.instrs
182     }
183 }
184 
185 impl InstrSeq {
186     /// Construct a new instruction sequence.
new(id: InstrSeqId, ty: InstrSeqType) -> InstrSeq187     pub(crate) fn new(id: InstrSeqId, ty: InstrSeqType) -> InstrSeq {
188         let instrs = vec![];
189         InstrSeq { id, ty, instrs }
190     }
191 
192     /// Get the id of this instruction sequence.
193     #[inline]
id(&self) -> InstrSeqId194     pub fn id(&self) -> InstrSeqId {
195         self.id
196     }
197 }
198 
199 /// Different kinds of blocks.
200 #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
201 pub(crate) enum BlockKind {
202     /// A `block` block.
203     Block,
204 
205     /// A `loop` block.
206     Loop,
207 
208     /// An `if` block
209     If,
210 
211     /// An `Else` block
212     Else,
213 
214     /// The entry to a function.
215     FunctionEntry,
216 }
217 
218 /// An enum of all the different kinds of wasm instructions.
219 ///
220 /// Note that the `#[walrus_expr]` macro rewrites this enum's variants from
221 ///
222 /// ```ignore
223 /// enum Instr {
224 ///     Variant { field: Ty, .. },
225 ///     ...
226 /// }
227 /// ```
228 ///
229 /// into
230 ///
231 /// ```ignore
232 /// enum Instr {
233 ///     Variant(Variant),
234 ///     ...
235 /// }
236 ///
237 /// struct Variant {
238 ///     field: Ty,
239 ///     ...
240 /// }
241 /// ```
242 #[walrus_instr]
243 #[derive(Clone, Debug)]
244 pub enum Instr {
245     /// `block ... end`
246     #[walrus(skip_builder)]
247     Block {
248         /// The id of this `block` instruction's inner `InstrSeq`.
249         seq: InstrSeqId,
250     },
251 
252     /// `loop ... end`
253     #[walrus(skip_builder)]
254     Loop {
255         /// The id of this `loop` instruction's inner `InstrSeq`.
256         seq: InstrSeqId,
257     },
258 
259     /// `call`
260     Call {
261         /// The function being invoked.
262         func: FunctionId,
263     },
264 
265     /// `call_indirect`
266     CallIndirect {
267         /// The type signature of the function we're calling
268         ty: TypeId,
269         /// The table which `func` below is indexing into
270         table: TableId,
271     },
272 
273     /// `local.get n`
274     LocalGet {
275         /// The local being got.
276         local: LocalId,
277     },
278 
279     /// `local.set n`
280     LocalSet {
281         /// The local being set.
282         local: LocalId,
283     },
284 
285     /// `local.tee n`
286     LocalTee {
287         /// The local being set.
288         local: LocalId,
289     },
290 
291     /// `global.get n`
292     GlobalGet {
293         /// The global being got.
294         global: GlobalId,
295     },
296 
297     /// `global.set n`
298     GlobalSet {
299         /// The global being set.
300         global: GlobalId,
301     },
302 
303     /// `*.const`
304     Const {
305         /// The constant value.
306         value: Value,
307     },
308 
309     /// Binary operations, those requiring two operands
310     Binop {
311         /// The operation being performed
312         #[walrus(skip_visit)]
313         op: BinaryOp,
314     },
315 
316     /// Unary operations, those requiring one operand
317     Unop {
318         /// The operation being performed
319         #[walrus(skip_visit)]
320         op: UnaryOp,
321     },
322 
323     /// `select`
324     Select {
325         /// Optionally listed type that the `select` instruction is expected to
326         /// produce, used in subtyping relations with the gc proposal.
327         #[walrus(skip_visit)]
328         ty: Option<ValType>,
329     },
330 
331     /// `unreachable`
332     Unreachable {},
333 
334     /// `br`
335     Br {
336         /// The target block to branch to.
337         #[walrus(skip_visit)] // should have already been visited
338         block: InstrSeqId,
339     },
340 
341     /// `br_if`
342     BrIf {
343         /// The target block to branch to when the condition is met.
344         #[walrus(skip_visit)] // should have already been visited
345         block: InstrSeqId,
346     },
347 
348     /// `if <consequent> else <alternative> end`
349     #[walrus(skip_builder)]
350     IfElse {
351         /// The block to execute when the condition is true.
352         consequent: InstrSeqId,
353         /// The block to execute when the condition is false.
354         alternative: InstrSeqId,
355     },
356 
357     /// `br_table`
358     BrTable {
359         /// The table of target blocks.
360         #[walrus(skip_visit)] // should have already been visited
361         blocks: Box<[InstrSeqId]>,
362         /// The block that is branched to by default when `which` is out of the
363         /// table's bounds.
364         #[walrus(skip_visit)] // should have already been visited
365         default: InstrSeqId,
366     },
367 
368     /// `drop`
369     Drop {},
370 
371     /// `return`
372     Return {},
373 
374     /// `memory.size`
375     MemorySize {
376         /// The memory we're fetching the current size of.
377         memory: MemoryId,
378     },
379 
380     /// `memory.grow`
381     MemoryGrow {
382         /// The memory we're growing.
383         memory: MemoryId,
384     },
385 
386     /// `memory.init`
387     MemoryInit {
388         /// The memory we're growing.
389         memory: MemoryId,
390         /// The data to copy in
391         data: DataId,
392     },
393 
394     /// `data.drop`
395     DataDrop {
396         /// The data to drop
397         data: DataId,
398     },
399 
400     /// `memory.copy`
401     MemoryCopy {
402         /// The source memory
403         src: MemoryId,
404         /// The destination memory
405         dst: MemoryId,
406     },
407 
408     /// `memory.fill`
409     MemoryFill {
410         /// The memory to fill
411         memory: MemoryId,
412     },
413 
414     /// `*.load`
415     ///
416     /// Loading a value from memory.
417     Load {
418         /// The memory we're loading from.
419         memory: MemoryId,
420         /// The kind of memory load this is performing
421         #[walrus(skip_visit)]
422         kind: LoadKind,
423         /// The alignment and offset of this memory load
424         #[walrus(skip_visit)]
425         arg: MemArg,
426     },
427 
428     /// `*.store`
429     ///
430     /// Storing a value to memory.
431     Store {
432         /// The memory we're storing to
433         memory: MemoryId,
434         /// The kind of memory store this is performing
435         #[walrus(skip_visit)]
436         kind: StoreKind,
437         /// The alignment and offset of this memory store
438         #[walrus(skip_visit)]
439         arg: MemArg,
440     },
441 
442     /// An atomic read/modify/write operation.
443     AtomicRmw {
444         /// The memory we're modifying
445         memory: MemoryId,
446         /// The atomic operation being performed
447         #[walrus(skip_visit)]
448         op: AtomicOp,
449         /// The atomic operation being performed
450         #[walrus(skip_visit)]
451         width: AtomicWidth,
452         /// The alignment and offset from the base address
453         #[walrus(skip_visit)]
454         arg: MemArg,
455     },
456 
457     /// An atomic compare-and-exchange operation.
458     Cmpxchg {
459         /// The memory we're modifying
460         memory: MemoryId,
461         /// The atomic operation being performed
462         #[walrus(skip_visit)]
463         width: AtomicWidth,
464         /// The alignment and offset from the base address
465         #[walrus(skip_visit)]
466         arg: MemArg,
467     },
468 
469     /// The `atomic.notify` instruction to wake up threads.
470     AtomicNotify {
471         /// The memory we're notifying through
472         memory: MemoryId,
473         /// The alignment and offset from the base address
474         #[walrus(skip_visit)]
475         arg: MemArg,
476     },
477 
478     /// The `*.atomic.wait` instruction to block threads.
479     AtomicWait {
480         /// The memory we're waiting through.
481         memory: MemoryId,
482         /// The alignment and offset from the base address.
483         #[walrus(skip_visit)]
484         arg: MemArg,
485         /// Whether or not this is an `i32` or `i64` wait.
486         #[walrus(skip_visit)]
487         sixty_four: bool,
488     },
489 
490     /// The `atomic.fence` instruction
491     AtomicFence {},
492 
493     /// `table.get`
494     TableGet {
495         /// The table we're fetching from.
496         table: TableId,
497     },
498 
499     /// `table.set`
500     TableSet {
501         /// The table we're storing to.
502         table: TableId,
503     },
504 
505     /// `table.grow`
506     TableGrow {
507         /// The table we're growing
508         table: TableId,
509     },
510 
511     /// `table.size`
512     TableSize {
513         /// The table we're getting the size of
514         table: TableId,
515     },
516 
517     /// `table.fill`
518     TableFill {
519         /// The table we're filling
520         table: TableId,
521     },
522 
523     /// `ref.null $ty`
524     RefNull {
525         /// The type of null that we're producing
526         #[walrus(skip_visit)]
527         ty: ValType,
528     },
529 
530     /// `ref.is_null`
531     RefIsNull {},
532 
533     /// `ref.func`
534     RefFunc {
535         /// The function that this instruction is referencing
536         func: FunctionId,
537     },
538 
539     /// `v128.bitselect`
540     V128Bitselect {},
541 
542     /// `i8x16.swizzle`
543     I8x16Swizzle {},
544 
545     /// `i8x16.shuffle`
546     I8x16Shuffle {
547         /// The indices that are used to create the final vector of this
548         /// instruction
549         #[walrus(skip_visit)]
550         indices: ShuffleIndices,
551     },
552 
553     /// Various instructions to load a simd vector from memory
554     LoadSimd {
555         /// The memory we're loading from.
556         memory: MemoryId,
557         /// The size of load this is performing
558         #[walrus(skip_visit)]
559         kind: LoadSimdKind,
560         /// The alignment and offset of this memory load
561         #[walrus(skip_visit)]
562         arg: MemArg,
563     },
564 
565     /// `table.init`
566     TableInit {
567         /// The table we're copying into.
568         table: TableId,
569         /// The element we're getting items from.
570         elem: ElementId,
571     },
572 
573     /// `elem.drop`
574     ElemDrop {
575         /// The elem segment to drop
576         elem: ElementId,
577     },
578 
579     /// `table.copy`
580     TableCopy {
581         /// The source table
582         src: TableId,
583         /// The destination table
584         dst: TableId,
585     },
586 }
587 
588 /// Argument in `V128Shuffle` of lane indices to select
589 pub type ShuffleIndices = [u8; 16];
590 
591 /// Constant values that can show up in WebAssembly
592 #[derive(Debug, Clone, Copy)]
593 pub enum Value {
594     /// A constant 32-bit integer
595     I32(i32),
596     /// A constant 64-bit integer
597     I64(i64),
598     /// A constant 32-bit float
599     F32(f32),
600     /// A constant 64-bit float
601     F64(f64),
602     /// A constant 128-bit vector register
603     V128(u128),
604 }
605 
606 impl Value {
emit(&self, encoder: &mut Encoder)607     pub(crate) fn emit(&self, encoder: &mut Encoder) {
608         match *self {
609             Value::I32(n) => {
610                 encoder.byte(0x41); // i32.const
611                 encoder.i32(n);
612             }
613             Value::I64(n) => {
614                 encoder.byte(0x42); // i64.const
615                 encoder.i64(n);
616             }
617             Value::F32(n) => {
618                 encoder.byte(0x43); // f32.const
619                 encoder.f32(n);
620             }
621             Value::F64(n) => {
622                 encoder.byte(0x44); // f64.const
623                 encoder.f64(n);
624             }
625             Value::V128(n) => {
626                 encoder.raw(&[0xfd, 0x0c]); // v128.const
627                 for i in 0..16 {
628                     encoder.byte((n >> (i * 8)) as u8);
629                 }
630             }
631         }
632     }
633 }
634 
635 impl fmt::Display for Value {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result636     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
637         match self {
638             Value::I32(i) => i.fmt(f),
639             Value::I64(i) => i.fmt(f),
640             Value::F32(i) => i.fmt(f),
641             Value::F64(i) => i.fmt(f),
642             Value::V128(i) => i.fmt(f),
643         }
644     }
645 }
646 
647 /// Possible binary operations in wasm
648 #[allow(missing_docs)]
649 #[derive(Copy, Clone, Debug)]
650 pub enum BinaryOp {
651     I32Eq,
652     I32Ne,
653     I32LtS,
654     I32LtU,
655     I32GtS,
656     I32GtU,
657     I32LeS,
658     I32LeU,
659     I32GeS,
660     I32GeU,
661 
662     I64Eq,
663     I64Ne,
664     I64LtS,
665     I64LtU,
666     I64GtS,
667     I64GtU,
668     I64LeS,
669     I64LeU,
670     I64GeS,
671     I64GeU,
672 
673     F32Eq,
674     F32Ne,
675     F32Lt,
676     F32Gt,
677     F32Le,
678     F32Ge,
679 
680     F64Eq,
681     F64Ne,
682     F64Lt,
683     F64Gt,
684     F64Le,
685     F64Ge,
686 
687     I32Add,
688     I32Sub,
689     I32Mul,
690     I32DivS,
691     I32DivU,
692     I32RemS,
693     I32RemU,
694     I32And,
695     I32Or,
696     I32Xor,
697     I32Shl,
698     I32ShrS,
699     I32ShrU,
700     I32Rotl,
701     I32Rotr,
702 
703     I64Add,
704     I64Sub,
705     I64Mul,
706     I64DivS,
707     I64DivU,
708     I64RemS,
709     I64RemU,
710     I64And,
711     I64Or,
712     I64Xor,
713     I64Shl,
714     I64ShrS,
715     I64ShrU,
716     I64Rotl,
717     I64Rotr,
718 
719     F32Add,
720     F32Sub,
721     F32Mul,
722     F32Div,
723     F32Min,
724     F32Max,
725     F32Copysign,
726 
727     F64Add,
728     F64Sub,
729     F64Mul,
730     F64Div,
731     F64Min,
732     F64Max,
733     F64Copysign,
734 
735     I8x16ReplaceLane { idx: u8 },
736     I16x8ReplaceLane { idx: u8 },
737     I32x4ReplaceLane { idx: u8 },
738     I64x2ReplaceLane { idx: u8 },
739     F32x4ReplaceLane { idx: u8 },
740     F64x2ReplaceLane { idx: u8 },
741 
742     I8x16Eq,
743     I8x16Ne,
744     I8x16LtS,
745     I8x16LtU,
746     I8x16GtS,
747     I8x16GtU,
748     I8x16LeS,
749     I8x16LeU,
750     I8x16GeS,
751     I8x16GeU,
752 
753     I16x8Eq,
754     I16x8Ne,
755     I16x8LtS,
756     I16x8LtU,
757     I16x8GtS,
758     I16x8GtU,
759     I16x8LeS,
760     I16x8LeU,
761     I16x8GeS,
762     I16x8GeU,
763 
764     I32x4Eq,
765     I32x4Ne,
766     I32x4LtS,
767     I32x4LtU,
768     I32x4GtS,
769     I32x4GtU,
770     I32x4LeS,
771     I32x4LeU,
772     I32x4GeS,
773     I32x4GeU,
774 
775     I64x2Eq,
776     I64x2Ne,
777     I64x2LtS,
778     I64x2GtS,
779     I64x2LeS,
780     I64x2GeS,
781 
782     F32x4Eq,
783     F32x4Ne,
784     F32x4Lt,
785     F32x4Gt,
786     F32x4Le,
787     F32x4Ge,
788 
789     F64x2Eq,
790     F64x2Ne,
791     F64x2Lt,
792     F64x2Gt,
793     F64x2Le,
794     F64x2Ge,
795 
796     V128And,
797     V128Or,
798     V128Xor,
799     V128AndNot,
800 
801     I8x16Shl,
802     I8x16ShrS,
803     I8x16ShrU,
804     I8x16Add,
805     I8x16AddSatS,
806     I8x16AddSatU,
807     I8x16Sub,
808     I8x16SubSatS,
809     I8x16SubSatU,
810     I16x8Shl,
811     I16x8ShrS,
812     I16x8ShrU,
813     I16x8Add,
814     I16x8AddSatS,
815     I16x8AddSatU,
816     I16x8Sub,
817     I16x8SubSatS,
818     I16x8SubSatU,
819     I16x8Mul,
820     I32x4Shl,
821     I32x4ShrS,
822     I32x4ShrU,
823     I32x4Add,
824     I32x4Sub,
825     I32x4Mul,
826     I64x2Shl,
827     I64x2ShrS,
828     I64x2ShrU,
829     I64x2Add,
830     I64x2Sub,
831     I64x2Mul,
832 
833     F32x4Add,
834     F32x4Sub,
835     F32x4Mul,
836     F32x4Div,
837     F32x4Min,
838     F32x4Max,
839     F32x4PMin,
840     F32x4PMax,
841     F64x2Add,
842     F64x2Sub,
843     F64x2Mul,
844     F64x2Div,
845     F64x2Min,
846     F64x2Max,
847     F64x2PMin,
848     F64x2PMax,
849 
850     I8x16NarrowI16x8S,
851     I8x16NarrowI16x8U,
852     I16x8NarrowI32x4S,
853     I16x8NarrowI32x4U,
854     I8x16RoundingAverageU,
855     I16x8RoundingAverageU,
856 
857     I8x16MinS,
858     I8x16MinU,
859     I8x16MaxS,
860     I8x16MaxU,
861     I16x8MinS,
862     I16x8MinU,
863     I16x8MaxS,
864     I16x8MaxU,
865     I32x4MinS,
866     I32x4MinU,
867     I32x4MaxS,
868     I32x4MaxU,
869 
870     I32x4DotI16x8S,
871 
872     I16x8Q15MulrSatS,
873     I16x8ExtMulLowI8x16S,
874     I16x8ExtMulHighI8x16S,
875     I16x8ExtMulLowI8x16U,
876     I16x8ExtMulHighI8x16U,
877     I32x4ExtMulLowI16x8S,
878     I32x4ExtMulHighI16x8S,
879     I32x4ExtMulLowI16x8U,
880     I32x4ExtMulHighI16x8U,
881     I64x2ExtMulLowI32x4S,
882     I64x2ExtMulHighI32x4S,
883     I64x2ExtMulLowI32x4U,
884     I64x2ExtMulHighI32x4U,
885 }
886 
887 /// Possible unary operations in wasm
888 #[allow(missing_docs)]
889 #[derive(Copy, Clone, Debug)]
890 pub enum UnaryOp {
891     I32Eqz,
892     I32Clz,
893     I32Ctz,
894     I32Popcnt,
895 
896     I64Eqz,
897     I64Clz,
898     I64Ctz,
899     I64Popcnt,
900 
901     F32Abs,
902     F32Neg,
903     F32Ceil,
904     F32Floor,
905     F32Trunc,
906     F32Nearest,
907     F32Sqrt,
908 
909     F64Abs,
910     F64Neg,
911     F64Ceil,
912     F64Floor,
913     F64Trunc,
914     F64Nearest,
915     F64Sqrt,
916 
917     I32WrapI64,
918     I32TruncSF32,
919     I32TruncUF32,
920     I32TruncSF64,
921     I32TruncUF64,
922     I64ExtendSI32,
923     I64ExtendUI32,
924     I64TruncSF32,
925     I64TruncUF32,
926     I64TruncSF64,
927     I64TruncUF64,
928 
929     F32ConvertSI32,
930     F32ConvertUI32,
931     F32ConvertSI64,
932     F32ConvertUI64,
933     F32DemoteF64,
934     F64ConvertSI32,
935     F64ConvertUI32,
936     F64ConvertSI64,
937     F64ConvertUI64,
938     F64PromoteF32,
939 
940     I32ReinterpretF32,
941     I64ReinterpretF64,
942     F32ReinterpretI32,
943     F64ReinterpretI64,
944 
945     I32Extend8S,
946     I32Extend16S,
947     I64Extend8S,
948     I64Extend16S,
949     I64Extend32S,
950 
951     I8x16Splat,
952     I8x16ExtractLaneS { idx: u8 },
953     I8x16ExtractLaneU { idx: u8 },
954     I16x8Splat,
955     I16x8ExtractLaneS { idx: u8 },
956     I16x8ExtractLaneU { idx: u8 },
957     I32x4Splat,
958     I32x4ExtractLane { idx: u8 },
959     I64x2Splat,
960     I64x2ExtractLane { idx: u8 },
961     F32x4Splat,
962     F32x4ExtractLane { idx: u8 },
963     F64x2Splat,
964     F64x2ExtractLane { idx: u8 },
965 
966     V128Not,
967     V128AnyTrue,
968 
969     I8x16Abs,
970     I8x16Popcnt,
971     I8x16Neg,
972     I8x16AllTrue,
973     I8x16Bitmask,
974     I16x8Abs,
975     I16x8Neg,
976     I16x8AllTrue,
977     I16x8Bitmask,
978     I32x4Abs,
979     I32x4Neg,
980     I32x4AllTrue,
981     I32x4Bitmask,
982     I64x2Abs,
983     I64x2Neg,
984     I64x2AllTrue,
985     I64x2Bitmask,
986 
987     F32x4Abs,
988     F32x4Neg,
989     F32x4Sqrt,
990     F32x4Ceil,
991     F32x4Floor,
992     F32x4Trunc,
993     F32x4Nearest,
994     F64x2Abs,
995     F64x2Neg,
996     F64x2Sqrt,
997     F64x2Ceil,
998     F64x2Floor,
999     F64x2Trunc,
1000     F64x2Nearest,
1001 
1002     I16x8ExtAddPairwiseI8x16S,
1003     I16x8ExtAddPairwiseI8x16U,
1004     I32x4ExtAddPairwiseI16x8S,
1005     I32x4ExtAddPairwiseI16x8U,
1006     I64x2ExtendLowI32x4S,
1007     I64x2ExtendHighI32x4S,
1008     I64x2ExtendLowI32x4U,
1009     I64x2ExtendHighI32x4U,
1010     I32x4TruncSatF64x2SZero,
1011     I32x4TruncSatF64x2UZero,
1012     F64x2ConvertLowI32x4S,
1013     F64x2ConvertLowI32x4U,
1014     F32x4DemoteF64x2Zero,
1015     F64x2PromoteLowF32x4,
1016 
1017     I32x4TruncSatF32x4S,
1018     I32x4TruncSatF32x4U,
1019     F32x4ConvertI32x4S,
1020     F32x4ConvertI32x4U,
1021 
1022     I32TruncSSatF32,
1023     I32TruncUSatF32,
1024     I32TruncSSatF64,
1025     I32TruncUSatF64,
1026     I64TruncSSatF32,
1027     I64TruncUSatF32,
1028     I64TruncSSatF64,
1029     I64TruncUSatF64,
1030 
1031     I16x8WidenLowI8x16S,
1032     I16x8WidenLowI8x16U,
1033     I16x8WidenHighI8x16S,
1034     I16x8WidenHighI8x16U,
1035     I32x4WidenLowI16x8S,
1036     I32x4WidenLowI16x8U,
1037     I32x4WidenHighI16x8S,
1038     I32x4WidenHighI16x8U,
1039 }
1040 
1041 /// The different kinds of load instructions that are part of a `Load` IR node
1042 #[derive(Debug, Copy, Clone)]
1043 #[allow(missing_docs)]
1044 pub enum LoadKind {
1045     // TODO: much of this is probably redundant with type information already
1046     // ambiently available, we probably want to trim this down to just "value"
1047     // and then maybe some sign extensions. We'd then use the type of the node
1048     // to figure out what kind of store it actually is.
1049     I32 { atomic: bool },
1050     I64 { atomic: bool },
1051     F32,
1052     F64,
1053     V128,
1054     I32_8 { kind: ExtendedLoad },
1055     I32_16 { kind: ExtendedLoad },
1056     I64_8 { kind: ExtendedLoad },
1057     I64_16 { kind: ExtendedLoad },
1058     I64_32 { kind: ExtendedLoad },
1059 }
1060 
1061 /// The different kinds of load instructions that are part of a `LoadSimd` IR node
1062 #[derive(Debug, Copy, Clone)]
1063 #[allow(missing_docs)]
1064 pub enum LoadSimdKind {
1065     Splat8,
1066     Splat16,
1067     Splat32,
1068     Splat64,
1069 
1070     V128Load8x8S,
1071     V128Load8x8U,
1072     V128Load16x4S,
1073     V128Load16x4U,
1074     V128Load32x2S,
1075     V128Load32x2U,
1076     V128Load32Zero,
1077     V128Load64Zero,
1078 
1079     V128Load8Lane(u8),
1080     V128Load16Lane(u8),
1081     V128Load32Lane(u8),
1082     V128Load64Lane(u8),
1083     V128Store8Lane(u8),
1084     V128Store16Lane(u8),
1085     V128Store32Lane(u8),
1086     V128Store64Lane(u8),
1087 }
1088 
1089 /// The kinds of extended loads which can happen
1090 #[derive(Debug, Copy, Clone)]
1091 #[allow(missing_docs)]
1092 pub enum ExtendedLoad {
1093     SignExtend,
1094     ZeroExtend,
1095     ZeroExtendAtomic,
1096 }
1097 
1098 impl LoadKind {
1099     /// Returns the number of bytes loaded
width(&self) -> u321100     pub fn width(&self) -> u32 {
1101         use self::LoadKind::*;
1102         match self {
1103             I32_8 { .. } | I64_8 { .. } => 1,
1104             I32_16 { .. } | I64_16 { .. } => 2,
1105             I32 { .. } | F32 | I64_32 { .. } => 4,
1106             I64 { .. } | F64 => 8,
1107             V128 => 16,
1108         }
1109     }
1110 
1111     /// Returns if this is an atomic load
atomic(&self) -> bool1112     pub fn atomic(&self) -> bool {
1113         use self::LoadKind::*;
1114         match self {
1115             I32_8 { kind }
1116             | I32_16 { kind }
1117             | I64_8 { kind }
1118             | I64_16 { kind }
1119             | I64_32 { kind } => kind.atomic(),
1120             I32 { atomic } | I64 { atomic } => *atomic,
1121             F32 | F64 | V128 => false,
1122         }
1123     }
1124 }
1125 
1126 impl ExtendedLoad {
1127     /// Returns whether this is an atomic extended load
atomic(&self) -> bool1128     pub fn atomic(&self) -> bool {
1129         match self {
1130             ExtendedLoad::SignExtend | ExtendedLoad::ZeroExtend => false,
1131             ExtendedLoad::ZeroExtendAtomic => true,
1132         }
1133     }
1134 }
1135 
1136 /// The different kinds of store instructions that are part of a `Store` IR node
1137 #[derive(Debug, Copy, Clone)]
1138 #[allow(missing_docs)]
1139 pub enum StoreKind {
1140     I32 { atomic: bool },
1141     I64 { atomic: bool },
1142     F32,
1143     F64,
1144     V128,
1145     I32_8 { atomic: bool },
1146     I32_16 { atomic: bool },
1147     I64_8 { atomic: bool },
1148     I64_16 { atomic: bool },
1149     I64_32 { atomic: bool },
1150 }
1151 
1152 impl StoreKind {
1153     /// Returns the number of bytes stored
width(&self) -> u321154     pub fn width(&self) -> u32 {
1155         use self::StoreKind::*;
1156         match self {
1157             I32_8 { .. } | I64_8 { .. } => 1,
1158             I32_16 { .. } | I64_16 { .. } => 2,
1159             I32 { .. } | F32 | I64_32 { .. } => 4,
1160             I64 { .. } | F64 => 8,
1161             V128 => 16,
1162         }
1163     }
1164 
1165     /// Returns whether this is an atomic store
atomic(&self) -> bool1166     pub fn atomic(&self) -> bool {
1167         use self::StoreKind::*;
1168 
1169         match self {
1170             I32 { atomic }
1171             | I64 { atomic }
1172             | I32_8 { atomic }
1173             | I32_16 { atomic }
1174             | I64_8 { atomic }
1175             | I64_16 { atomic }
1176             | I64_32 { atomic } => *atomic,
1177             F32 | F64 | V128 => false,
1178         }
1179     }
1180 }
1181 
1182 /// Arguments to memory operations, containing a constant offset from a dynamic
1183 /// address as well as a predicted alignment.
1184 #[derive(Debug, Copy, Clone)]
1185 pub struct MemArg {
1186     /// The alignment of the memory operation, must be a power of two
1187     pub align: u32,
1188     /// The offset of the memory operation, in bytes from the source address
1189     pub offset: u32,
1190 }
1191 
1192 /// The different kinds of atomic rmw operations
1193 #[derive(Debug, Copy, Clone)]
1194 #[allow(missing_docs)]
1195 pub enum AtomicOp {
1196     Add,
1197     Sub,
1198     And,
1199     Or,
1200     Xor,
1201     Xchg,
1202 }
1203 
1204 /// The different kinds of atomic rmw operations
1205 #[derive(Debug, Copy, Clone)]
1206 #[allow(missing_docs)]
1207 pub enum AtomicWidth {
1208     I32,
1209     I32_8,
1210     I32_16,
1211     I64,
1212     I64_8,
1213     I64_16,
1214     I64_32,
1215 }
1216 
1217 impl AtomicWidth {
1218     /// Returns the size, in bytes, of this atomic operation
bytes(&self) -> u321219     pub fn bytes(&self) -> u32 {
1220         use self::AtomicWidth::*;
1221         match self {
1222             I32_8 | I64_8 => 1,
1223             I32_16 | I64_16 => 2,
1224             I32 | I64_32 => 4,
1225             I64 => 8,
1226         }
1227     }
1228 }
1229 
1230 impl Instr {
1231     /// Are any instructions that follow this instruction's instruction (within
1232     /// the current block) unreachable?
1233     ///
1234     /// Returns `true` for unconditional branches (`br`, `return`, etc...) and
1235     /// `unreachable`. Returns `false` for all other "normal" instructions
1236     /// (`i32.add`, etc...).
following_instructions_are_unreachable(&self) -> bool1237     pub fn following_instructions_are_unreachable(&self) -> bool {
1238         match *self {
1239             Instr::Unreachable(..) | Instr::Br(..) | Instr::BrTable(..) | Instr::Return(..) => true,
1240 
1241             // No `_` arm to make sure that we properly update this function as
1242             // we add support for new instructions.
1243             Instr::Block(..)
1244             | Instr::Loop(..)
1245             | Instr::Call(..)
1246             | Instr::LocalGet(..)
1247             | Instr::LocalSet(..)
1248             | Instr::LocalTee(..)
1249             | Instr::GlobalGet(..)
1250             | Instr::GlobalSet(..)
1251             | Instr::Const(..)
1252             | Instr::Binop(..)
1253             | Instr::Unop(..)
1254             | Instr::Select(..)
1255             | Instr::BrIf(..)
1256             | Instr::IfElse(..)
1257             | Instr::MemorySize(..)
1258             | Instr::MemoryGrow(..)
1259             | Instr::MemoryInit(..)
1260             | Instr::DataDrop(..)
1261             | Instr::MemoryCopy(..)
1262             | Instr::MemoryFill(..)
1263             | Instr::CallIndirect(..)
1264             | Instr::Load(..)
1265             | Instr::Store(..)
1266             | Instr::AtomicRmw(..)
1267             | Instr::Cmpxchg(..)
1268             | Instr::AtomicNotify(..)
1269             | Instr::AtomicWait(..)
1270             | Instr::TableGet(..)
1271             | Instr::TableSet(..)
1272             | Instr::TableGrow(..)
1273             | Instr::TableSize(..)
1274             | Instr::TableFill(..)
1275             | Instr::RefNull(..)
1276             | Instr::RefIsNull(..)
1277             | Instr::RefFunc(..)
1278             | Instr::V128Bitselect(..)
1279             | Instr::I8x16Swizzle(..)
1280             | Instr::I8x16Shuffle(..)
1281             | Instr::LoadSimd(..)
1282             | Instr::AtomicFence(..)
1283             | Instr::TableInit(..)
1284             | Instr::TableCopy(..)
1285             | Instr::ElemDrop(..)
1286             | Instr::Drop(..) => false,
1287         }
1288     }
1289 }
1290 
1291 /// Anything that can be visited by a `Visitor`.
1292 pub(crate) trait Visit<'instr> {
1293     /// Visit this thing with the given visitor.
visit<V>(&self, visitor: &mut V) where V: Visitor<'instr>1294     fn visit<V>(&self, visitor: &mut V)
1295     where
1296         V: Visitor<'instr>;
1297 }
1298 
1299 /// Anything that can be mutably visited by a `VisitorMut`.
1300 pub(crate) trait VisitMut {
1301     /// Visit this thing with the given visitor.
visit_mut<V>(&mut self, visitor: &mut V) where V: VisitorMut1302     fn visit_mut<V>(&mut self, visitor: &mut V)
1303     where
1304         V: VisitorMut;
1305 }
1306 
1307 impl<'instr> Visit<'instr> for InstrSeq {
visit<V>(&self, visitor: &mut V) where V: Visitor<'instr>,1308     fn visit<V>(&self, visitor: &mut V)
1309     where
1310         V: Visitor<'instr>,
1311     {
1312         if let InstrSeqType::MultiValue(ref ty) = self.ty {
1313             visitor.visit_type_id(ty);
1314         }
1315     }
1316 }
1317 
1318 impl VisitMut for InstrSeq {
visit_mut<V>(&mut self, visitor: &mut V) where V: VisitorMut,1319     fn visit_mut<V>(&mut self, visitor: &mut V)
1320     where
1321         V: VisitorMut,
1322     {
1323         if let InstrSeqType::MultiValue(ref mut ty) = self.ty {
1324             visitor.visit_type_id_mut(ty);
1325         }
1326     }
1327 }
1328