1 //! This module contains the bulk of the interesting code performing the translation between
2 //! WebAssembly and Cranelift IR.
3 //!
4 //! The translation is done in one pass, opcode by opcode. Two main data structures are used during
5 //! code translations: the value stack and the control stack. The value stack mimics the execution
6 //! of the WebAssembly stack machine: each instruction result is pushed onto the stack and
7 //! instruction arguments are popped off the stack. Similarly, when encountering a control flow
8 //! block, it is pushed onto the control stack and popped off when encountering the corresponding
9 //! `End`.
10 //!
11 //! Another data structure, the translation state, records information concerning unreachable code
12 //! status and about if inserting a return at the end of the function is necessary.
13 //!
14 //! Some of the WebAssembly instructions need information about the environment for which they
15 //! are being translated:
16 //!
17 //! - the loads and stores need the memory base address;
18 //! - the `get_global` et `set_global` instructions depends on how the globals are implemented;
19 //! - `memory.size` and `memory.grow` are runtime functions;
20 //! - `call_indirect` has to translate the function index into the address of where this
21 //!    is;
22 //!
23 //! That is why `translate_function_body` takes an object having the `WasmRuntime` trait as
24 //! argument.
25 use super::{hash_map, HashMap};
26 use crate::environ::{FuncEnvironment, GlobalVariable, ReturnMode, WasmResult};
27 use crate::state::{ControlStackFrame, TranslationState};
28 use crate::translation_utils::{
29     blocktype_to_type, f32_translation, f64_translation, num_return_values,
30 };
31 use crate::translation_utils::{FuncIndex, MemoryIndex, SignatureIndex, TableIndex};
32 use crate::wasm_unsupported;
33 use core::{i32, u32};
34 use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
35 use cranelift_codegen::ir::types::*;
36 use cranelift_codegen::ir::{self, InstBuilder, JumpTableData, MemFlags, Value, ValueLabel};
37 use cranelift_codegen::packed_option::ReservedValue;
38 use cranelift_frontend::{FunctionBuilder, Variable};
39 use wasmparser::{MemoryImmediate, Operator};
40 
41 // Clippy warns about "flags: _" but its important to document that the flags field is ignored
42 #[cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
43 /// Translates wasm operators into Cranelift IR instructions. Returns `true` if it inserted
44 /// a return.
translate_operator<FE: FuncEnvironment + ?Sized>( op: &Operator, builder: &mut FunctionBuilder, state: &mut TranslationState, environ: &mut FE, ) -> WasmResult<()>45 pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
46     op: &Operator,
47     builder: &mut FunctionBuilder,
48     state: &mut TranslationState,
49     environ: &mut FE,
50 ) -> WasmResult<()> {
51     if !state.reachable {
52         translate_unreachable_operator(&op, builder, state);
53         return Ok(());
54     }
55 
56     // This big match treats all Wasm code operators.
57     match op {
58         /********************************** Locals ****************************************
59          *  `get_local` and `set_local` are treated as non-SSA variables and will completely
60          *  disappear in the Cranelift Code
61          ***********************************************************************************/
62         Operator::GetLocal { local_index } => {
63             let val = builder.use_var(Variable::with_u32(*local_index));
64             state.push1(val);
65             let label = ValueLabel::from_u32(*local_index);
66             builder.set_val_label(val, label);
67         }
68         Operator::SetLocal { local_index } => {
69             let val = state.pop1();
70             builder.def_var(Variable::with_u32(*local_index), val);
71             let label = ValueLabel::from_u32(*local_index);
72             builder.set_val_label(val, label);
73         }
74         Operator::TeeLocal { local_index } => {
75             let val = state.peek1();
76             builder.def_var(Variable::with_u32(*local_index), val);
77             let label = ValueLabel::from_u32(*local_index);
78             builder.set_val_label(val, label);
79         }
80         /********************************** Globals ****************************************
81          *  `get_global` and `set_global` are handled by the environment.
82          ***********************************************************************************/
83         Operator::GetGlobal { global_index } => {
84             let val = match state.get_global(builder.func, *global_index, environ)? {
85                 GlobalVariable::Const(val) => val,
86                 GlobalVariable::Memory { gv, offset, ty } => {
87                     let addr = builder.ins().global_value(environ.pointer_type(), gv);
88                     let flags = ir::MemFlags::trusted();
89                     builder.ins().load(ty, flags, addr, offset)
90                 }
91             };
92             state.push1(val);
93         }
94         Operator::SetGlobal { global_index } => {
95             match state.get_global(builder.func, *global_index, environ)? {
96                 GlobalVariable::Const(_) => panic!("global #{} is a constant", *global_index),
97                 GlobalVariable::Memory { gv, offset, ty } => {
98                     let addr = builder.ins().global_value(environ.pointer_type(), gv);
99                     let flags = ir::MemFlags::trusted();
100                     let val = state.pop1();
101                     debug_assert_eq!(ty, builder.func.dfg.value_type(val));
102                     builder.ins().store(flags, val, addr, offset);
103                 }
104             }
105         }
106         /********************************* Stack misc ***************************************
107          *  `drop`, `nop`, `unreachable` and `select`.
108          ***********************************************************************************/
109         Operator::Drop => {
110             state.pop1();
111         }
112         Operator::Select => {
113             let (arg1, arg2, cond) = state.pop3();
114             state.push1(builder.ins().select(cond, arg1, arg2));
115         }
116         Operator::Nop => {
117             // We do nothing
118         }
119         Operator::Unreachable => {
120             builder.ins().trap(ir::TrapCode::UnreachableCodeReached);
121             state.reachable = false;
122         }
123         /***************************** Control flow blocks **********************************
124          *  When starting a control flow block, we create a new `Ebb` that will hold the code
125          *  after the block, and we push a frame on the control stack. Depending on the type
126          *  of block, we create a new `Ebb` for the body of the block with an associated
127          *  jump instruction.
128          *
129          *  The `End` instruction pops the last control frame from the control stack, seals
130          *  the destination block (since `br` instructions targeting it only appear inside the
131          *  block and have already been translated) and modify the value stack to use the
132          *  possible `Ebb`'s arguments values.
133          ***********************************************************************************/
134         Operator::Block { ty } => {
135             let next = builder.create_ebb();
136             if let Some(ty_cre) = blocktype_to_type(*ty)? {
137                 builder.append_ebb_param(next, ty_cre);
138             }
139             state.push_block(next, num_return_values(*ty)?);
140         }
141         Operator::Loop { ty } => {
142             let loop_body = builder.create_ebb();
143             let next = builder.create_ebb();
144             if let Some(ty_cre) = blocktype_to_type(*ty)? {
145                 builder.append_ebb_param(next, ty_cre);
146             }
147             builder.ins().jump(loop_body, &[]);
148             state.push_loop(loop_body, next, num_return_values(*ty)?);
149             builder.switch_to_block(loop_body);
150             environ.translate_loop_header(builder.cursor())?;
151         }
152         Operator::If { ty } => {
153             let val = state.pop1();
154             let if_not = builder.create_ebb();
155             let jump_inst = builder.ins().brz(val, if_not, &[]);
156 
157             #[cfg(feature = "basic-blocks")]
158             {
159                 let next_ebb = builder.create_ebb();
160                 builder.ins().jump(next_ebb, &[]);
161                 builder.seal_block(next_ebb); // Only predecessor is the current block.
162                 builder.switch_to_block(next_ebb);
163             }
164 
165             // Here we append an argument to an Ebb targeted by an argumentless jump instruction
166             // But in fact there are two cases:
167             // - either the If does not have a Else clause, in that case ty = EmptyBlock
168             //   and we add nothing;
169             // - either the If have an Else clause, in that case the destination of this jump
170             //   instruction will be changed later when we translate the Else operator.
171             if let Some(ty_cre) = blocktype_to_type(*ty)? {
172                 builder.append_ebb_param(if_not, ty_cre);
173             }
174             state.push_if(jump_inst, if_not, num_return_values(*ty)?);
175         }
176         Operator::Else => {
177             // We take the control frame pushed by the if, use its ebb as the else body
178             // and push a new control frame with a new ebb for the code after the if/then/else
179             // At the end of the then clause we jump to the destination
180             let i = state.control_stack.len() - 1;
181             let (destination, return_count, branch_inst, ref mut reachable_from_top) =
182                 match state.control_stack[i] {
183                     ControlStackFrame::If {
184                         destination,
185                         num_return_values,
186                         branch_inst,
187                         reachable_from_top,
188                         ..
189                     } => (
190                         destination,
191                         num_return_values,
192                         branch_inst,
193                         reachable_from_top,
194                     ),
195                     _ => panic!("should not happen"),
196                 };
197             // The if has an else, so there's no branch to the end from the top.
198             *reachable_from_top = false;
199             builder.ins().jump(destination, state.peekn(return_count));
200             state.popn(return_count);
201             // We change the target of the branch instruction
202             let else_ebb = builder.create_ebb();
203             builder.change_jump_destination(branch_inst, else_ebb);
204             builder.seal_block(else_ebb);
205             builder.switch_to_block(else_ebb);
206         }
207         Operator::End => {
208             let frame = state.control_stack.pop().unwrap();
209             if !builder.is_unreachable() || !builder.is_pristine() {
210                 let return_count = frame.num_return_values();
211                 builder
212                     .ins()
213                     .jump(frame.following_code(), state.peekn(return_count));
214             }
215             builder.switch_to_block(frame.following_code());
216             builder.seal_block(frame.following_code());
217             // If it is a loop we also have to seal the body loop block
218             if let ControlStackFrame::Loop { header, .. } = frame {
219                 builder.seal_block(header)
220             }
221             state.stack.truncate(frame.original_stack_size());
222             state
223                 .stack
224                 .extend_from_slice(builder.ebb_params(frame.following_code()));
225         }
226         /**************************** Branch instructions *********************************
227          * The branch instructions all have as arguments a target nesting level, which
228          * corresponds to how many control stack frames do we have to pop to get the
229          * destination `Ebb`.
230          *
231          * Once the destination `Ebb` is found, we sometimes have to declare a certain depth
232          * of the stack unreachable, because some branch instructions are terminator.
233          *
234          * The `br_table` case is much more complicated because Cranelift's `br_table` instruction
235          * does not support jump arguments like all the other branch instructions. That is why, in
236          * the case where we would use jump arguments for every other branch instructions, we
237          * need to split the critical edges leaving the `br_tables` by creating one `Ebb` per
238          * table destination; the `br_table` will point to these newly created `Ebbs` and these
239          * `Ebb`s contain only a jump instruction pointing to the final destination, this time with
240          * jump arguments.
241          *
242          * This system is also implemented in Cranelift's SSA construction algorithm, because
243          * `use_var` located in a destination `Ebb` of a `br_table` might trigger the addition
244          * of jump arguments in each predecessor branch instruction, one of which might be a
245          * `br_table`.
246          ***********************************************************************************/
247         Operator::Br { relative_depth } => {
248             let i = state.control_stack.len() - 1 - (*relative_depth as usize);
249             let (return_count, br_destination) = {
250                 let frame = &mut state.control_stack[i];
251                 // We signal that all the code that follows until the next End is unreachable
252                 frame.set_branched_to_exit();
253                 let return_count = if frame.is_loop() {
254                     0
255                 } else {
256                     frame.num_return_values()
257                 };
258                 (return_count, frame.br_destination())
259             };
260             builder
261                 .ins()
262                 .jump(br_destination, state.peekn(return_count));
263             state.popn(return_count);
264             state.reachable = false;
265         }
266         Operator::BrIf { relative_depth } => translate_br_if(*relative_depth, builder, state),
267         Operator::BrTable { table } => {
268             let (depths, default) = table.read_table()?;
269             let mut min_depth = default;
270             for depth in &*depths {
271                 if *depth < min_depth {
272                     min_depth = *depth;
273                 }
274             }
275             let jump_args_count = {
276                 let i = state.control_stack.len() - 1 - (min_depth as usize);
277                 let min_depth_frame = &state.control_stack[i];
278                 if min_depth_frame.is_loop() {
279                     0
280                 } else {
281                     min_depth_frame.num_return_values()
282                 }
283             };
284             let val = state.pop1();
285             let mut data = JumpTableData::with_capacity(depths.len());
286             if jump_args_count == 0 {
287                 // No jump arguments
288                 for depth in &*depths {
289                     let ebb = {
290                         let i = state.control_stack.len() - 1 - (*depth as usize);
291                         let frame = &mut state.control_stack[i];
292                         frame.set_branched_to_exit();
293                         frame.br_destination()
294                     };
295                     data.push_entry(ebb);
296                 }
297                 let jt = builder.create_jump_table(data);
298                 let ebb = {
299                     let i = state.control_stack.len() - 1 - (default as usize);
300                     let frame = &mut state.control_stack[i];
301                     frame.set_branched_to_exit();
302                     frame.br_destination()
303                 };
304                 builder.ins().br_table(val, ebb, jt);
305             } else {
306                 // Here we have jump arguments, but Cranelift's br_table doesn't support them
307                 // We then proceed to split the edges going out of the br_table
308                 let return_count = jump_args_count;
309                 let mut dest_ebb_sequence = vec![];
310                 let mut dest_ebb_map = HashMap::new();
311                 for depth in &*depths {
312                     let branch_ebb = match dest_ebb_map.entry(*depth as usize) {
313                         hash_map::Entry::Occupied(entry) => *entry.get(),
314                         hash_map::Entry::Vacant(entry) => {
315                             let ebb = builder.create_ebb();
316                             dest_ebb_sequence.push((*depth as usize, ebb));
317                             *entry.insert(ebb)
318                         }
319                     };
320                     data.push_entry(branch_ebb);
321                 }
322                 let default_branch_ebb = match dest_ebb_map.entry(default as usize) {
323                     hash_map::Entry::Occupied(entry) => *entry.get(),
324                     hash_map::Entry::Vacant(entry) => {
325                         let ebb = builder.create_ebb();
326                         dest_ebb_sequence.push((default as usize, ebb));
327                         *entry.insert(ebb)
328                     }
329                 };
330                 let jt = builder.create_jump_table(data);
331                 builder.ins().br_table(val, default_branch_ebb, jt);
332                 for (depth, dest_ebb) in dest_ebb_sequence {
333                     builder.switch_to_block(dest_ebb);
334                     builder.seal_block(dest_ebb);
335                     let real_dest_ebb = {
336                         let i = state.control_stack.len() - 1 - depth;
337                         let frame = &mut state.control_stack[i];
338                         frame.set_branched_to_exit();
339                         frame.br_destination()
340                     };
341                     builder.ins().jump(real_dest_ebb, state.peekn(return_count));
342                 }
343                 state.popn(return_count);
344             }
345             state.reachable = false;
346         }
347         Operator::Return => {
348             let (return_count, br_destination) = {
349                 let frame = &mut state.control_stack[0];
350                 frame.set_branched_to_exit();
351                 let return_count = frame.num_return_values();
352                 (return_count, frame.br_destination())
353             };
354             {
355                 let args = state.peekn(return_count);
356                 match environ.return_mode() {
357                     ReturnMode::NormalReturns => builder.ins().return_(args),
358                     ReturnMode::FallthroughReturn => builder.ins().jump(br_destination, args),
359                 };
360             }
361             state.popn(return_count);
362             state.reachable = false;
363         }
364         /************************************ Calls ****************************************
365          * The call instructions pop off their arguments from the stack and append their
366          * return values to it. `call_indirect` needs environment support because there is an
367          * argument referring to an index in the external functions table of the module.
368          ************************************************************************************/
369         Operator::Call { function_index } => {
370             let (fref, num_args) = state.get_direct_func(builder.func, *function_index, environ)?;
371             let call = environ.translate_call(
372                 builder.cursor(),
373                 FuncIndex::from_u32(*function_index),
374                 fref,
375                 state.peekn(num_args),
376             )?;
377             let inst_results = builder.inst_results(call);
378             debug_assert_eq!(
379                 inst_results.len(),
380                 builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature]
381                     .returns
382                     .len(),
383                 "translate_call results should match the call signature"
384             );
385             state.popn(num_args);
386             state.pushn(inst_results);
387         }
388         Operator::CallIndirect { index, table_index } => {
389             // `index` is the index of the function's signature and `table_index` is the index of
390             // the table to search the function in.
391             let (sigref, num_args) = state.get_indirect_sig(builder.func, *index, environ)?;
392             let table = state.get_table(builder.func, *table_index, environ)?;
393             let callee = state.pop1();
394             let call = environ.translate_call_indirect(
395                 builder.cursor(),
396                 TableIndex::from_u32(*table_index),
397                 table,
398                 SignatureIndex::from_u32(*index),
399                 sigref,
400                 callee,
401                 state.peekn(num_args),
402             )?;
403             let inst_results = builder.inst_results(call);
404             debug_assert_eq!(
405                 inst_results.len(),
406                 builder.func.dfg.signatures[sigref].returns.len(),
407                 "translate_call_indirect results should match the call signature"
408             );
409             state.popn(num_args);
410             state.pushn(inst_results);
411         }
412         /******************************* Memory management ***********************************
413          * Memory management is handled by environment. It is usually translated into calls to
414          * special functions.
415          ************************************************************************************/
416         Operator::MemoryGrow { reserved } => {
417             // The WebAssembly MVP only supports one linear memory, but we expect the reserved
418             // argument to be a memory index.
419             let heap_index = MemoryIndex::from_u32(*reserved);
420             let heap = state.get_heap(builder.func, *reserved, environ)?;
421             let val = state.pop1();
422             state.push1(environ.translate_memory_grow(builder.cursor(), heap_index, heap, val)?)
423         }
424         Operator::MemorySize { reserved } => {
425             let heap_index = MemoryIndex::from_u32(*reserved);
426             let heap = state.get_heap(builder.func, *reserved, environ)?;
427             state.push1(environ.translate_memory_size(builder.cursor(), heap_index, heap)?);
428         }
429         /******************************* Load instructions ***********************************
430          * Wasm specifies an integer alignment flag but we drop it in Cranelift.
431          * The memory base address is provided by the environment.
432          ************************************************************************************/
433         Operator::I32Load8U {
434             memarg: MemoryImmediate { flags: _, offset },
435         } => {
436             translate_load(*offset, ir::Opcode::Uload8, I32, builder, state, environ)?;
437         }
438         Operator::I32Load16U {
439             memarg: MemoryImmediate { flags: _, offset },
440         } => {
441             translate_load(*offset, ir::Opcode::Uload16, I32, builder, state, environ)?;
442         }
443         Operator::I32Load8S {
444             memarg: MemoryImmediate { flags: _, offset },
445         } => {
446             translate_load(*offset, ir::Opcode::Sload8, I32, builder, state, environ)?;
447         }
448         Operator::I32Load16S {
449             memarg: MemoryImmediate { flags: _, offset },
450         } => {
451             translate_load(*offset, ir::Opcode::Sload16, I32, builder, state, environ)?;
452         }
453         Operator::I64Load8U {
454             memarg: MemoryImmediate { flags: _, offset },
455         } => {
456             translate_load(*offset, ir::Opcode::Uload8, I64, builder, state, environ)?;
457         }
458         Operator::I64Load16U {
459             memarg: MemoryImmediate { flags: _, offset },
460         } => {
461             translate_load(*offset, ir::Opcode::Uload16, I64, builder, state, environ)?;
462         }
463         Operator::I64Load8S {
464             memarg: MemoryImmediate { flags: _, offset },
465         } => {
466             translate_load(*offset, ir::Opcode::Sload8, I64, builder, state, environ)?;
467         }
468         Operator::I64Load16S {
469             memarg: MemoryImmediate { flags: _, offset },
470         } => {
471             translate_load(*offset, ir::Opcode::Sload16, I64, builder, state, environ)?;
472         }
473         Operator::I64Load32S {
474             memarg: MemoryImmediate { flags: _, offset },
475         } => {
476             translate_load(*offset, ir::Opcode::Sload32, I64, builder, state, environ)?;
477         }
478         Operator::I64Load32U {
479             memarg: MemoryImmediate { flags: _, offset },
480         } => {
481             translate_load(*offset, ir::Opcode::Uload32, I64, builder, state, environ)?;
482         }
483         Operator::I32Load {
484             memarg: MemoryImmediate { flags: _, offset },
485         } => {
486             translate_load(*offset, ir::Opcode::Load, I32, builder, state, environ)?;
487         }
488         Operator::F32Load {
489             memarg: MemoryImmediate { flags: _, offset },
490         } => {
491             translate_load(*offset, ir::Opcode::Load, F32, builder, state, environ)?;
492         }
493         Operator::I64Load {
494             memarg: MemoryImmediate { flags: _, offset },
495         } => {
496             translate_load(*offset, ir::Opcode::Load, I64, builder, state, environ)?;
497         }
498         Operator::F64Load {
499             memarg: MemoryImmediate { flags: _, offset },
500         } => {
501             translate_load(*offset, ir::Opcode::Load, F64, builder, state, environ)?;
502         }
503         /****************************** Store instructions ***********************************
504          * Wasm specifies an integer alignment flag but we drop it in Cranelift.
505          * The memory base address is provided by the environment.
506          ************************************************************************************/
507         Operator::I32Store {
508             memarg: MemoryImmediate { flags: _, offset },
509         }
510         | Operator::I64Store {
511             memarg: MemoryImmediate { flags: _, offset },
512         }
513         | Operator::F32Store {
514             memarg: MemoryImmediate { flags: _, offset },
515         }
516         | Operator::F64Store {
517             memarg: MemoryImmediate { flags: _, offset },
518         } => {
519             translate_store(*offset, ir::Opcode::Store, builder, state, environ)?;
520         }
521         Operator::I32Store8 {
522             memarg: MemoryImmediate { flags: _, offset },
523         }
524         | Operator::I64Store8 {
525             memarg: MemoryImmediate { flags: _, offset },
526         } => {
527             translate_store(*offset, ir::Opcode::Istore8, builder, state, environ)?;
528         }
529         Operator::I32Store16 {
530             memarg: MemoryImmediate { flags: _, offset },
531         }
532         | Operator::I64Store16 {
533             memarg: MemoryImmediate { flags: _, offset },
534         } => {
535             translate_store(*offset, ir::Opcode::Istore16, builder, state, environ)?;
536         }
537         Operator::I64Store32 {
538             memarg: MemoryImmediate { flags: _, offset },
539         } => {
540             translate_store(*offset, ir::Opcode::Istore32, builder, state, environ)?;
541         }
542         /****************************** Nullary Operators ************************************/
543         Operator::I32Const { value } => state.push1(builder.ins().iconst(I32, i64::from(*value))),
544         Operator::I64Const { value } => state.push1(builder.ins().iconst(I64, *value)),
545         Operator::F32Const { value } => {
546             state.push1(builder.ins().f32const(f32_translation(*value)));
547         }
548         Operator::F64Const { value } => {
549             state.push1(builder.ins().f64const(f64_translation(*value)));
550         }
551         /******************************* Unary Operators *************************************/
552         Operator::I32Clz | Operator::I64Clz => {
553             let arg = state.pop1();
554             state.push1(builder.ins().clz(arg));
555         }
556         Operator::I32Ctz | Operator::I64Ctz => {
557             let arg = state.pop1();
558             state.push1(builder.ins().ctz(arg));
559         }
560         Operator::I32Popcnt | Operator::I64Popcnt => {
561             let arg = state.pop1();
562             state.push1(builder.ins().popcnt(arg));
563         }
564         Operator::I64ExtendSI32 => {
565             let val = state.pop1();
566             state.push1(builder.ins().sextend(I64, val));
567         }
568         Operator::I64ExtendUI32 => {
569             let val = state.pop1();
570             state.push1(builder.ins().uextend(I64, val));
571         }
572         Operator::I32WrapI64 => {
573             let val = state.pop1();
574             state.push1(builder.ins().ireduce(I32, val));
575         }
576         Operator::F32Sqrt | Operator::F64Sqrt => {
577             let arg = state.pop1();
578             state.push1(builder.ins().sqrt(arg));
579         }
580         Operator::F32Ceil | Operator::F64Ceil => {
581             let arg = state.pop1();
582             state.push1(builder.ins().ceil(arg));
583         }
584         Operator::F32Floor | Operator::F64Floor => {
585             let arg = state.pop1();
586             state.push1(builder.ins().floor(arg));
587         }
588         Operator::F32Trunc | Operator::F64Trunc => {
589             let arg = state.pop1();
590             state.push1(builder.ins().trunc(arg));
591         }
592         Operator::F32Nearest | Operator::F64Nearest => {
593             let arg = state.pop1();
594             state.push1(builder.ins().nearest(arg));
595         }
596         Operator::F32Abs | Operator::F64Abs => {
597             let val = state.pop1();
598             state.push1(builder.ins().fabs(val));
599         }
600         Operator::F32Neg | Operator::F64Neg => {
601             let arg = state.pop1();
602             state.push1(builder.ins().fneg(arg));
603         }
604         Operator::F64ConvertUI64 | Operator::F64ConvertUI32 => {
605             let val = state.pop1();
606             state.push1(builder.ins().fcvt_from_uint(F64, val));
607         }
608         Operator::F64ConvertSI64 | Operator::F64ConvertSI32 => {
609             let val = state.pop1();
610             state.push1(builder.ins().fcvt_from_sint(F64, val));
611         }
612         Operator::F32ConvertSI64 | Operator::F32ConvertSI32 => {
613             let val = state.pop1();
614             state.push1(builder.ins().fcvt_from_sint(F32, val));
615         }
616         Operator::F32ConvertUI64 | Operator::F32ConvertUI32 => {
617             let val = state.pop1();
618             state.push1(builder.ins().fcvt_from_uint(F32, val));
619         }
620         Operator::F64PromoteF32 => {
621             let val = state.pop1();
622             state.push1(builder.ins().fpromote(F64, val));
623         }
624         Operator::F32DemoteF64 => {
625             let val = state.pop1();
626             state.push1(builder.ins().fdemote(F32, val));
627         }
628         Operator::I64TruncSF64 | Operator::I64TruncSF32 => {
629             let val = state.pop1();
630             state.push1(builder.ins().fcvt_to_sint(I64, val));
631         }
632         Operator::I32TruncSF64 | Operator::I32TruncSF32 => {
633             let val = state.pop1();
634             state.push1(builder.ins().fcvt_to_sint(I32, val));
635         }
636         Operator::I64TruncUF64 | Operator::I64TruncUF32 => {
637             let val = state.pop1();
638             state.push1(builder.ins().fcvt_to_uint(I64, val));
639         }
640         Operator::I32TruncUF64 | Operator::I32TruncUF32 => {
641             let val = state.pop1();
642             state.push1(builder.ins().fcvt_to_uint(I32, val));
643         }
644         Operator::I64TruncSSatF64 | Operator::I64TruncSSatF32 => {
645             let val = state.pop1();
646             state.push1(builder.ins().fcvt_to_sint_sat(I64, val));
647         }
648         Operator::I32TruncSSatF64 | Operator::I32TruncSSatF32 => {
649             let val = state.pop1();
650             state.push1(builder.ins().fcvt_to_sint_sat(I32, val));
651         }
652         Operator::I64TruncUSatF64 | Operator::I64TruncUSatF32 => {
653             let val = state.pop1();
654             state.push1(builder.ins().fcvt_to_uint_sat(I64, val));
655         }
656         Operator::I32TruncUSatF64 | Operator::I32TruncUSatF32 => {
657             let val = state.pop1();
658             state.push1(builder.ins().fcvt_to_uint_sat(I32, val));
659         }
660         Operator::F32ReinterpretI32 => {
661             let val = state.pop1();
662             state.push1(builder.ins().bitcast(F32, val));
663         }
664         Operator::F64ReinterpretI64 => {
665             let val = state.pop1();
666             state.push1(builder.ins().bitcast(F64, val));
667         }
668         Operator::I32ReinterpretF32 => {
669             let val = state.pop1();
670             state.push1(builder.ins().bitcast(I32, val));
671         }
672         Operator::I64ReinterpretF64 => {
673             let val = state.pop1();
674             state.push1(builder.ins().bitcast(I64, val));
675         }
676         Operator::I32Extend8S => {
677             let val = state.pop1();
678             state.push1(builder.ins().ireduce(I8, val));
679             let val = state.pop1();
680             state.push1(builder.ins().sextend(I32, val));
681         }
682         Operator::I32Extend16S => {
683             let val = state.pop1();
684             state.push1(builder.ins().ireduce(I16, val));
685             let val = state.pop1();
686             state.push1(builder.ins().sextend(I32, val));
687         }
688         Operator::I64Extend8S => {
689             let val = state.pop1();
690             state.push1(builder.ins().ireduce(I8, val));
691             let val = state.pop1();
692             state.push1(builder.ins().sextend(I64, val));
693         }
694         Operator::I64Extend16S => {
695             let val = state.pop1();
696             state.push1(builder.ins().ireduce(I16, val));
697             let val = state.pop1();
698             state.push1(builder.ins().sextend(I64, val));
699         }
700         Operator::I64Extend32S => {
701             let val = state.pop1();
702             state.push1(builder.ins().ireduce(I32, val));
703             let val = state.pop1();
704             state.push1(builder.ins().sextend(I64, val));
705         }
706         /****************************** Binary Operators ************************************/
707         Operator::I32Add | Operator::I64Add => {
708             let (arg1, arg2) = state.pop2();
709             state.push1(builder.ins().iadd(arg1, arg2));
710         }
711         Operator::I32And | Operator::I64And => {
712             let (arg1, arg2) = state.pop2();
713             state.push1(builder.ins().band(arg1, arg2));
714         }
715         Operator::I32Or | Operator::I64Or => {
716             let (arg1, arg2) = state.pop2();
717             state.push1(builder.ins().bor(arg1, arg2));
718         }
719         Operator::I32Xor | Operator::I64Xor => {
720             let (arg1, arg2) = state.pop2();
721             state.push1(builder.ins().bxor(arg1, arg2));
722         }
723         Operator::I32Shl | Operator::I64Shl => {
724             let (arg1, arg2) = state.pop2();
725             state.push1(builder.ins().ishl(arg1, arg2));
726         }
727         Operator::I32ShrS | Operator::I64ShrS => {
728             let (arg1, arg2) = state.pop2();
729             state.push1(builder.ins().sshr(arg1, arg2));
730         }
731         Operator::I32ShrU | Operator::I64ShrU => {
732             let (arg1, arg2) = state.pop2();
733             state.push1(builder.ins().ushr(arg1, arg2));
734         }
735         Operator::I32Rotl | Operator::I64Rotl => {
736             let (arg1, arg2) = state.pop2();
737             state.push1(builder.ins().rotl(arg1, arg2));
738         }
739         Operator::I32Rotr | Operator::I64Rotr => {
740             let (arg1, arg2) = state.pop2();
741             state.push1(builder.ins().rotr(arg1, arg2));
742         }
743         Operator::F32Add | Operator::F64Add => {
744             let (arg1, arg2) = state.pop2();
745             state.push1(builder.ins().fadd(arg1, arg2));
746         }
747         Operator::I32Sub | Operator::I64Sub => {
748             let (arg1, arg2) = state.pop2();
749             state.push1(builder.ins().isub(arg1, arg2));
750         }
751         Operator::F32Sub | Operator::F64Sub => {
752             let (arg1, arg2) = state.pop2();
753             state.push1(builder.ins().fsub(arg1, arg2));
754         }
755         Operator::I32Mul | Operator::I64Mul => {
756             let (arg1, arg2) = state.pop2();
757             state.push1(builder.ins().imul(arg1, arg2));
758         }
759         Operator::F32Mul | Operator::F64Mul => {
760             let (arg1, arg2) = state.pop2();
761             state.push1(builder.ins().fmul(arg1, arg2));
762         }
763         Operator::F32Div | Operator::F64Div => {
764             let (arg1, arg2) = state.pop2();
765             state.push1(builder.ins().fdiv(arg1, arg2));
766         }
767         Operator::I32DivS | Operator::I64DivS => {
768             let (arg1, arg2) = state.pop2();
769             state.push1(builder.ins().sdiv(arg1, arg2));
770         }
771         Operator::I32DivU | Operator::I64DivU => {
772             let (arg1, arg2) = state.pop2();
773             state.push1(builder.ins().udiv(arg1, arg2));
774         }
775         Operator::I32RemS | Operator::I64RemS => {
776             let (arg1, arg2) = state.pop2();
777             state.push1(builder.ins().srem(arg1, arg2));
778         }
779         Operator::I32RemU | Operator::I64RemU => {
780             let (arg1, arg2) = state.pop2();
781             state.push1(builder.ins().urem(arg1, arg2));
782         }
783         Operator::F32Min | Operator::F64Min => {
784             let (arg1, arg2) = state.pop2();
785             state.push1(builder.ins().fmin(arg1, arg2));
786         }
787         Operator::F32Max | Operator::F64Max => {
788             let (arg1, arg2) = state.pop2();
789             state.push1(builder.ins().fmax(arg1, arg2));
790         }
791         Operator::F32Copysign | Operator::F64Copysign => {
792             let (arg1, arg2) = state.pop2();
793             state.push1(builder.ins().fcopysign(arg1, arg2));
794         }
795         /**************************** Comparison Operators **********************************/
796         Operator::I32LtS | Operator::I64LtS => {
797             translate_icmp(IntCC::SignedLessThan, builder, state)
798         }
799         Operator::I32LtU | Operator::I64LtU => {
800             translate_icmp(IntCC::UnsignedLessThan, builder, state)
801         }
802         Operator::I32LeS | Operator::I64LeS => {
803             translate_icmp(IntCC::SignedLessThanOrEqual, builder, state)
804         }
805         Operator::I32LeU | Operator::I64LeU => {
806             translate_icmp(IntCC::UnsignedLessThanOrEqual, builder, state)
807         }
808         Operator::I32GtS | Operator::I64GtS => {
809             translate_icmp(IntCC::SignedGreaterThan, builder, state)
810         }
811         Operator::I32GtU | Operator::I64GtU => {
812             translate_icmp(IntCC::UnsignedGreaterThan, builder, state)
813         }
814         Operator::I32GeS | Operator::I64GeS => {
815             translate_icmp(IntCC::SignedGreaterThanOrEqual, builder, state)
816         }
817         Operator::I32GeU | Operator::I64GeU => {
818             translate_icmp(IntCC::UnsignedGreaterThanOrEqual, builder, state)
819         }
820         Operator::I32Eqz | Operator::I64Eqz => {
821             let arg = state.pop1();
822             let val = builder.ins().icmp_imm(IntCC::Equal, arg, 0);
823             state.push1(builder.ins().bint(I32, val));
824         }
825         Operator::I32Eq | Operator::I64Eq => translate_icmp(IntCC::Equal, builder, state),
826         Operator::F32Eq | Operator::F64Eq => translate_fcmp(FloatCC::Equal, builder, state),
827         Operator::I32Ne | Operator::I64Ne => translate_icmp(IntCC::NotEqual, builder, state),
828         Operator::F32Ne | Operator::F64Ne => translate_fcmp(FloatCC::NotEqual, builder, state),
829         Operator::F32Gt | Operator::F64Gt => translate_fcmp(FloatCC::GreaterThan, builder, state),
830         Operator::F32Ge | Operator::F64Ge => {
831             translate_fcmp(FloatCC::GreaterThanOrEqual, builder, state)
832         }
833         Operator::F32Lt | Operator::F64Lt => translate_fcmp(FloatCC::LessThan, builder, state),
834         Operator::F32Le | Operator::F64Le => {
835             translate_fcmp(FloatCC::LessThanOrEqual, builder, state)
836         }
837         Operator::RefNull => state.push1(builder.ins().null(environ.reference_type())),
838         Operator::RefIsNull => {
839             let arg = state.pop1();
840             let val = builder.ins().is_null(arg);
841             state.push1(val);
842         }
843         Operator::Wake { .. }
844         | Operator::I32Wait { .. }
845         | Operator::I64Wait { .. }
846         | Operator::I32AtomicLoad { .. }
847         | Operator::I64AtomicLoad { .. }
848         | Operator::I32AtomicLoad8U { .. }
849         | Operator::I32AtomicLoad16U { .. }
850         | Operator::I64AtomicLoad8U { .. }
851         | Operator::I64AtomicLoad16U { .. }
852         | Operator::I64AtomicLoad32U { .. }
853         | Operator::I32AtomicStore { .. }
854         | Operator::I64AtomicStore { .. }
855         | Operator::I32AtomicStore8 { .. }
856         | Operator::I32AtomicStore16 { .. }
857         | Operator::I64AtomicStore8 { .. }
858         | Operator::I64AtomicStore16 { .. }
859         | Operator::I64AtomicStore32 { .. }
860         | Operator::I32AtomicRmwAdd { .. }
861         | Operator::I64AtomicRmwAdd { .. }
862         | Operator::I32AtomicRmw8UAdd { .. }
863         | Operator::I32AtomicRmw16UAdd { .. }
864         | Operator::I64AtomicRmw8UAdd { .. }
865         | Operator::I64AtomicRmw16UAdd { .. }
866         | Operator::I64AtomicRmw32UAdd { .. }
867         | Operator::I32AtomicRmwSub { .. }
868         | Operator::I64AtomicRmwSub { .. }
869         | Operator::I32AtomicRmw8USub { .. }
870         | Operator::I32AtomicRmw16USub { .. }
871         | Operator::I64AtomicRmw8USub { .. }
872         | Operator::I64AtomicRmw16USub { .. }
873         | Operator::I64AtomicRmw32USub { .. }
874         | Operator::I32AtomicRmwAnd { .. }
875         | Operator::I64AtomicRmwAnd { .. }
876         | Operator::I32AtomicRmw8UAnd { .. }
877         | Operator::I32AtomicRmw16UAnd { .. }
878         | Operator::I64AtomicRmw8UAnd { .. }
879         | Operator::I64AtomicRmw16UAnd { .. }
880         | Operator::I64AtomicRmw32UAnd { .. }
881         | Operator::I32AtomicRmwOr { .. }
882         | Operator::I64AtomicRmwOr { .. }
883         | Operator::I32AtomicRmw8UOr { .. }
884         | Operator::I32AtomicRmw16UOr { .. }
885         | Operator::I64AtomicRmw8UOr { .. }
886         | Operator::I64AtomicRmw16UOr { .. }
887         | Operator::I64AtomicRmw32UOr { .. }
888         | Operator::I32AtomicRmwXor { .. }
889         | Operator::I64AtomicRmwXor { .. }
890         | Operator::I32AtomicRmw8UXor { .. }
891         | Operator::I32AtomicRmw16UXor { .. }
892         | Operator::I64AtomicRmw8UXor { .. }
893         | Operator::I64AtomicRmw16UXor { .. }
894         | Operator::I64AtomicRmw32UXor { .. }
895         | Operator::I32AtomicRmwXchg { .. }
896         | Operator::I64AtomicRmwXchg { .. }
897         | Operator::I32AtomicRmw8UXchg { .. }
898         | Operator::I32AtomicRmw16UXchg { .. }
899         | Operator::I64AtomicRmw8UXchg { .. }
900         | Operator::I64AtomicRmw16UXchg { .. }
901         | Operator::I64AtomicRmw32UXchg { .. }
902         | Operator::I32AtomicRmwCmpxchg { .. }
903         | Operator::I64AtomicRmwCmpxchg { .. }
904         | Operator::I32AtomicRmw8UCmpxchg { .. }
905         | Operator::I32AtomicRmw16UCmpxchg { .. }
906         | Operator::I64AtomicRmw8UCmpxchg { .. }
907         | Operator::I64AtomicRmw16UCmpxchg { .. }
908         | Operator::I64AtomicRmw32UCmpxchg { .. }
909         | Operator::Fence { .. } => {
910             return Err(wasm_unsupported!("proposed thread operator {:?}", op));
911         }
912         Operator::MemoryInit { .. }
913         | Operator::DataDrop { .. }
914         | Operator::MemoryCopy
915         | Operator::MemoryFill
916         | Operator::TableInit { .. }
917         | Operator::ElemDrop { .. }
918         | Operator::TableCopy
919         | Operator::TableGet { .. }
920         | Operator::TableSet { .. }
921         | Operator::TableGrow { .. }
922         | Operator::TableSize { .. } => {
923             return Err(wasm_unsupported!("proposed bulk memory operator {:?}", op));
924         }
925         Operator::V128Const { value } => {
926             let handle = builder.func.dfg.constants.insert(value.bytes().to_vec());
927             let value = builder.ins().vconst(I8X16, handle);
928             // the v128.const is typed in CLIF as a I8x16 but raw_bitcast to a different type before use
929             state.push1(value)
930         }
931         Operator::I8x16Splat
932         | Operator::I16x8Splat
933         | Operator::I32x4Splat
934         | Operator::I64x2Splat
935         | Operator::F32x4Splat
936         | Operator::F64x2Splat => {
937             let value_to_splat = state.pop1();
938             let ty = type_of(op);
939             let splatted = builder.ins().splat(ty, value_to_splat);
940             state.push1(splatted)
941         }
942         Operator::I8x16ExtractLaneS { lane } | Operator::I16x8ExtractLaneS { lane } => {
943             let vector = optionally_bitcast_vector(state.pop1(), type_of(op), builder);
944             let extracted = builder.ins().extractlane(vector, lane.clone());
945             state.push1(builder.ins().sextend(I32, extracted))
946         }
947         Operator::I8x16ExtractLaneU { lane } | Operator::I16x8ExtractLaneU { lane } => {
948             let vector = optionally_bitcast_vector(state.pop1(), type_of(op), builder);
949             state.push1(builder.ins().extractlane(vector, lane.clone()));
950             // on x86, PEXTRB zeroes the upper bits of the destination register of extractlane so uextend is elided; of course, this depends on extractlane being legalized to a PEXTRB
951         }
952         Operator::I32x4ExtractLane { lane }
953         | Operator::I64x2ExtractLane { lane }
954         | Operator::F32x4ExtractLane { lane }
955         | Operator::F64x2ExtractLane { lane } => {
956             let vector = optionally_bitcast_vector(state.pop1(), type_of(op), builder);
957             state.push1(builder.ins().extractlane(vector, lane.clone()))
958         }
959         Operator::I8x16ReplaceLane { lane }
960         | Operator::I16x8ReplaceLane { lane }
961         | Operator::I32x4ReplaceLane { lane }
962         | Operator::I64x2ReplaceLane { lane }
963         | Operator::F32x4ReplaceLane { lane }
964         | Operator::F64x2ReplaceLane { lane } => {
965             let (vector, replacement_value) = state.pop2();
966             let original_vector_type = builder.func.dfg.value_type(vector);
967             let vector = optionally_bitcast_vector(vector, type_of(op), builder);
968             let replaced_vector = builder
969                 .ins()
970                 .insertlane(vector, lane.clone(), replacement_value);
971             state.push1(optionally_bitcast_vector(
972                 replaced_vector,
973                 original_vector_type,
974                 builder,
975             ))
976         }
977         Operator::V8x16Shuffle { lanes, .. } => {
978             let (vector_a, vector_b) = state.pop2();
979             let a = optionally_bitcast_vector(vector_a, I8X16, builder);
980             let b = optionally_bitcast_vector(vector_b, I8X16, builder);
981             let mask = builder.func.dfg.immediates.push(lanes.to_vec());
982             let shuffled = builder.ins().shuffle(a, b, mask);
983             state.push1(shuffled)
984             // At this point the original types of a and b are lost; users of this value (i.e. this
985             // WASM-to-CLIF translator) may need to raw_bitcast for type-correctness. This is due
986             // to WASM using the less specific v128 type for certain operations and more specific
987             // types (e.g. i8x16) for others.
988         }
989         Operator::I8x16Add | Operator::I16x8Add | Operator::I32x4Add | Operator::I64x2Add => {
990             let (a, b) = state.pop2();
991             state.push1(builder.ins().iadd(a, b))
992         }
993         Operator::V128Load { .. }
994         | Operator::V128Store { .. }
995         | Operator::I8x16Eq
996         | Operator::I8x16Ne
997         | Operator::I8x16LtS
998         | Operator::I8x16LtU
999         | Operator::I8x16GtS
1000         | Operator::I8x16GtU
1001         | Operator::I8x16LeS
1002         | Operator::I8x16LeU
1003         | Operator::I8x16GeS
1004         | Operator::I8x16GeU
1005         | Operator::I16x8Eq
1006         | Operator::I16x8Ne
1007         | Operator::I16x8LtS
1008         | Operator::I16x8LtU
1009         | Operator::I16x8GtS
1010         | Operator::I16x8GtU
1011         | Operator::I16x8LeS
1012         | Operator::I16x8LeU
1013         | Operator::I16x8GeS
1014         | Operator::I16x8GeU
1015         | Operator::I32x4Eq
1016         | Operator::I32x4Ne
1017         | Operator::I32x4LtS
1018         | Operator::I32x4LtU
1019         | Operator::I32x4GtS
1020         | Operator::I32x4GtU
1021         | Operator::I32x4LeS
1022         | Operator::I32x4LeU
1023         | Operator::I32x4GeS
1024         | Operator::I32x4GeU
1025         | Operator::F32x4Eq
1026         | Operator::F32x4Ne
1027         | Operator::F32x4Lt
1028         | Operator::F32x4Gt
1029         | Operator::F32x4Le
1030         | Operator::F32x4Ge
1031         | Operator::F64x2Eq
1032         | Operator::F64x2Ne
1033         | Operator::F64x2Lt
1034         | Operator::F64x2Gt
1035         | Operator::F64x2Le
1036         | Operator::F64x2Ge
1037         | Operator::V128Not
1038         | Operator::V128And
1039         | Operator::V128Or
1040         | Operator::V128Xor
1041         | Operator::V128Bitselect
1042         | Operator::I8x16Neg
1043         | Operator::I8x16AnyTrue
1044         | Operator::I8x16AllTrue
1045         | Operator::I8x16Shl
1046         | Operator::I8x16ShrS
1047         | Operator::I8x16ShrU
1048         | Operator::I8x16AddSaturateS
1049         | Operator::I8x16AddSaturateU
1050         | Operator::I8x16Sub
1051         | Operator::I8x16SubSaturateS
1052         | Operator::I8x16SubSaturateU
1053         | Operator::I8x16Mul
1054         | Operator::I16x8Neg
1055         | Operator::I16x8AnyTrue
1056         | Operator::I16x8AllTrue
1057         | Operator::I16x8Shl
1058         | Operator::I16x8ShrS
1059         | Operator::I16x8ShrU
1060         | Operator::I16x8AddSaturateS
1061         | Operator::I16x8AddSaturateU
1062         | Operator::I16x8Sub
1063         | Operator::I16x8SubSaturateS
1064         | Operator::I16x8SubSaturateU
1065         | Operator::I16x8Mul
1066         | Operator::I32x4Neg
1067         | Operator::I32x4AnyTrue
1068         | Operator::I32x4AllTrue
1069         | Operator::I32x4Shl
1070         | Operator::I32x4ShrS
1071         | Operator::I32x4ShrU
1072         | Operator::I32x4Sub
1073         | Operator::I32x4Mul
1074         | Operator::I64x2Neg
1075         | Operator::I64x2AnyTrue
1076         | Operator::I64x2AllTrue
1077         | Operator::I64x2Shl
1078         | Operator::I64x2ShrS
1079         | Operator::I64x2ShrU
1080         | Operator::I64x2Sub
1081         | Operator::F32x4Abs
1082         | Operator::F32x4Neg
1083         | Operator::F32x4Sqrt
1084         | Operator::F32x4Add
1085         | Operator::F32x4Sub
1086         | Operator::F32x4Mul
1087         | Operator::F32x4Div
1088         | Operator::F32x4Min
1089         | Operator::F32x4Max
1090         | Operator::F64x2Abs
1091         | Operator::F64x2Neg
1092         | Operator::F64x2Sqrt
1093         | Operator::F64x2Add
1094         | Operator::F64x2Sub
1095         | Operator::F64x2Mul
1096         | Operator::F64x2Div
1097         | Operator::F64x2Min
1098         | Operator::F64x2Max
1099         | Operator::I32x4TruncSF32x4Sat
1100         | Operator::I32x4TruncUF32x4Sat
1101         | Operator::I64x2TruncSF64x2Sat
1102         | Operator::I64x2TruncUF64x2Sat
1103         | Operator::F32x4ConvertSI32x4
1104         | Operator::F32x4ConvertUI32x4
1105         | Operator::F64x2ConvertSI64x2
1106         | Operator::F64x2ConvertUI64x2 { .. }
1107         | Operator::V8x16Swizzle
1108         | Operator::I8x16LoadSplat { .. }
1109         | Operator::I16x8LoadSplat { .. }
1110         | Operator::I32x4LoadSplat { .. }
1111         | Operator::I64x2LoadSplat { .. } => {
1112             return Err(wasm_unsupported!("proposed SIMD operator {:?}", op));
1113         }
1114     };
1115     Ok(())
1116 }
1117 
1118 // Clippy warns us of some fields we are deliberately ignoring
1119 #[cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
1120 /// Deals with a Wasm instruction located in an unreachable portion of the code. Most of them
1121 /// are dropped but special ones like `End` or `Else` signal the potential end of the unreachable
1122 /// portion so the translation state must be updated accordingly.
translate_unreachable_operator( op: &Operator, builder: &mut FunctionBuilder, state: &mut TranslationState, )1123 fn translate_unreachable_operator(
1124     op: &Operator,
1125     builder: &mut FunctionBuilder,
1126     state: &mut TranslationState,
1127 ) {
1128     match *op {
1129         Operator::If { ty: _ } => {
1130             // Push a placeholder control stack entry. The if isn't reachable,
1131             // so we don't have any branches anywhere.
1132             state.push_if(ir::Inst::reserved_value(), ir::Ebb::reserved_value(), 0);
1133         }
1134         Operator::Loop { ty: _ } | Operator::Block { ty: _ } => {
1135             state.push_block(ir::Ebb::reserved_value(), 0);
1136         }
1137         Operator::Else => {
1138             let i = state.control_stack.len() - 1;
1139             if let ControlStackFrame::If {
1140                 branch_inst,
1141                 ref mut reachable_from_top,
1142                 ..
1143             } = state.control_stack[i]
1144             {
1145                 if *reachable_from_top {
1146                     // We have a branch from the top of the if to the else.
1147                     state.reachable = true;
1148                     // And because there's an else, there can no longer be a
1149                     // branch from the top directly to the end.
1150                     *reachable_from_top = false;
1151 
1152                     // We change the target of the branch instruction
1153                     let else_ebb = builder.create_ebb();
1154                     builder.change_jump_destination(branch_inst, else_ebb);
1155                     builder.seal_block(else_ebb);
1156                     builder.switch_to_block(else_ebb);
1157                 }
1158             }
1159         }
1160         Operator::End => {
1161             let stack = &mut state.stack;
1162             let control_stack = &mut state.control_stack;
1163             let frame = control_stack.pop().unwrap();
1164 
1165             // Now we have to split off the stack the values not used
1166             // by unreachable code that hasn't been translated
1167             stack.truncate(frame.original_stack_size());
1168 
1169             let reachable_anyway = match frame {
1170                 // If it is a loop we also have to seal the body loop block
1171                 ControlStackFrame::Loop { header, .. } => {
1172                     builder.seal_block(header);
1173                     // And loops can't have branches to the end.
1174                     false
1175                 }
1176                 ControlStackFrame::If {
1177                     reachable_from_top, ..
1178                 } => {
1179                     // A reachable if without an else has a branch from the top
1180                     // directly to the bottom.
1181                     reachable_from_top
1182                 }
1183                 // All other control constructs are already handled.
1184                 _ => false,
1185             };
1186 
1187             if frame.exit_is_branched_to() || reachable_anyway {
1188                 builder.switch_to_block(frame.following_code());
1189                 builder.seal_block(frame.following_code());
1190 
1191                 // And add the return values of the block but only if the next block is reachable
1192                 // (which corresponds to testing if the stack depth is 1)
1193                 stack.extend_from_slice(builder.ebb_params(frame.following_code()));
1194                 state.reachable = true;
1195             }
1196         }
1197         _ => {
1198             // We don't translate because this is unreachable code
1199         }
1200     }
1201 }
1202 
1203 /// Get the address+offset to use for a heap access.
get_heap_addr( heap: ir::Heap, addr32: ir::Value, offset: u32, addr_ty: Type, builder: &mut FunctionBuilder, ) -> (ir::Value, i32)1204 fn get_heap_addr(
1205     heap: ir::Heap,
1206     addr32: ir::Value,
1207     offset: u32,
1208     addr_ty: Type,
1209     builder: &mut FunctionBuilder,
1210 ) -> (ir::Value, i32) {
1211     use core::cmp::min;
1212 
1213     let mut adjusted_offset = u64::from(offset);
1214     let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into();
1215 
1216     // Generate `heap_addr` instructions that are friendly to CSE by checking offsets that are
1217     // multiples of the offset-guard size. Add one to make sure that we check the pointer itself
1218     // is in bounds.
1219     if offset_guard_size != 0 {
1220         adjusted_offset = adjusted_offset / offset_guard_size * offset_guard_size;
1221     }
1222 
1223     // For accesses on the outer skirts of the offset-guard pages, we expect that we get a trap
1224     // even if the access goes beyond the offset-guard pages. This is because the first byte
1225     // pointed to is inside the offset-guard pages.
1226     let check_size = min(u64::from(u32::MAX), 1 + adjusted_offset) as u32;
1227     let base = builder.ins().heap_addr(addr_ty, heap, addr32, check_size);
1228 
1229     // Native load/store instructions take a signed `Offset32` immediate, so adjust the base
1230     // pointer if necessary.
1231     if offset > i32::MAX as u32 {
1232         // Offset doesn't fit in the load/store instruction.
1233         let adj = builder.ins().iadd_imm(base, i64::from(i32::MAX) + 1);
1234         (adj, (offset - (i32::MAX as u32 + 1)) as i32)
1235     } else {
1236         (base, offset as i32)
1237     }
1238 }
1239 
1240 /// Translate a load instruction.
translate_load<FE: FuncEnvironment + ?Sized>( offset: u32, opcode: ir::Opcode, result_ty: Type, builder: &mut FunctionBuilder, state: &mut TranslationState, environ: &mut FE, ) -> WasmResult<()>1241 fn translate_load<FE: FuncEnvironment + ?Sized>(
1242     offset: u32,
1243     opcode: ir::Opcode,
1244     result_ty: Type,
1245     builder: &mut FunctionBuilder,
1246     state: &mut TranslationState,
1247     environ: &mut FE,
1248 ) -> WasmResult<()> {
1249     let addr32 = state.pop1();
1250     // We don't yet support multiple linear memories.
1251     let heap = state.get_heap(builder.func, 0, environ)?;
1252     let (base, offset) = get_heap_addr(heap, addr32, offset, environ.pointer_type(), builder);
1253     // Note that we don't set `is_aligned` here, even if the load instruction's
1254     // alignment immediate says it's aligned, because WebAssembly's immediate
1255     // field is just a hint, while Cranelift's aligned flag needs a guarantee.
1256     let flags = MemFlags::new();
1257     let (load, dfg) = builder
1258         .ins()
1259         .Load(opcode, result_ty, flags, offset.into(), base);
1260     state.push1(dfg.first_result(load));
1261     Ok(())
1262 }
1263 
1264 /// Translate a store instruction.
translate_store<FE: FuncEnvironment + ?Sized>( offset: u32, opcode: ir::Opcode, builder: &mut FunctionBuilder, state: &mut TranslationState, environ: &mut FE, ) -> WasmResult<()>1265 fn translate_store<FE: FuncEnvironment + ?Sized>(
1266     offset: u32,
1267     opcode: ir::Opcode,
1268     builder: &mut FunctionBuilder,
1269     state: &mut TranslationState,
1270     environ: &mut FE,
1271 ) -> WasmResult<()> {
1272     let (addr32, val) = state.pop2();
1273     let val_ty = builder.func.dfg.value_type(val);
1274 
1275     // We don't yet support multiple linear memories.
1276     let heap = state.get_heap(builder.func, 0, environ)?;
1277     let (base, offset) = get_heap_addr(heap, addr32, offset, environ.pointer_type(), builder);
1278     // See the comments in `translate_load` about the flags.
1279     let flags = MemFlags::new();
1280     builder
1281         .ins()
1282         .Store(opcode, val_ty, flags, offset.into(), val, base);
1283     Ok(())
1284 }
1285 
translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut TranslationState)1286 fn translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut TranslationState) {
1287     let (arg0, arg1) = state.pop2();
1288     let val = builder.ins().icmp(cc, arg0, arg1);
1289     state.push1(builder.ins().bint(I32, val));
1290 }
1291 
translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut TranslationState)1292 fn translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut TranslationState) {
1293     let (arg0, arg1) = state.pop2();
1294     let val = builder.ins().fcmp(cc, arg0, arg1);
1295     state.push1(builder.ins().bint(I32, val));
1296 }
1297 
translate_br_if( relative_depth: u32, builder: &mut FunctionBuilder, state: &mut TranslationState, )1298 fn translate_br_if(
1299     relative_depth: u32,
1300     builder: &mut FunctionBuilder,
1301     state: &mut TranslationState,
1302 ) {
1303     let val = state.pop1();
1304     let (br_destination, inputs) = translate_br_if_args(relative_depth, state);
1305     builder.ins().brnz(val, br_destination, inputs);
1306 
1307     #[cfg(feature = "basic-blocks")]
1308     {
1309         let next_ebb = builder.create_ebb();
1310         builder.ins().jump(next_ebb, &[]);
1311         builder.seal_block(next_ebb); // The only predecessor is the current block.
1312         builder.switch_to_block(next_ebb);
1313     }
1314 }
1315 
translate_br_if_args( relative_depth: u32, state: &mut TranslationState, ) -> (ir::Ebb, &[ir::Value])1316 fn translate_br_if_args(
1317     relative_depth: u32,
1318     state: &mut TranslationState,
1319 ) -> (ir::Ebb, &[ir::Value]) {
1320     let i = state.control_stack.len() - 1 - (relative_depth as usize);
1321     let (return_count, br_destination) = {
1322         let frame = &mut state.control_stack[i];
1323         // The values returned by the branch are still available for the reachable
1324         // code that comes after it
1325         frame.set_branched_to_exit();
1326         let return_count = if frame.is_loop() {
1327             0
1328         } else {
1329             frame.num_return_values()
1330         };
1331         (return_count, frame.br_destination())
1332     };
1333     let inputs = state.peekn(return_count);
1334     (br_destination, inputs)
1335 }
1336 
1337 /// Determine the returned value type of a WebAssembly operator
type_of(operator: &Operator) -> Type1338 fn type_of(operator: &Operator) -> Type {
1339     match operator {
1340         Operator::V128Load { .. }
1341         | Operator::V128Store { .. }
1342         | Operator::V128Const { .. }
1343         | Operator::V128Not
1344         | Operator::V128And
1345         | Operator::V128Or
1346         | Operator::V128Xor
1347         | Operator::V128Bitselect => I8X16, // default type representing V128
1348 
1349         Operator::V8x16Shuffle { .. }
1350         | Operator::I8x16Splat
1351         | Operator::I8x16ExtractLaneS { .. }
1352         | Operator::I8x16ExtractLaneU { .. }
1353         | Operator::I8x16ReplaceLane { .. }
1354         | Operator::I8x16Eq
1355         | Operator::I8x16Ne
1356         | Operator::I8x16LtS
1357         | Operator::I8x16LtU
1358         | Operator::I8x16GtS
1359         | Operator::I8x16GtU
1360         | Operator::I8x16LeS
1361         | Operator::I8x16LeU
1362         | Operator::I8x16GeS
1363         | Operator::I8x16GeU
1364         | Operator::I8x16Neg
1365         | Operator::I8x16AnyTrue
1366         | Operator::I8x16AllTrue
1367         | Operator::I8x16Shl
1368         | Operator::I8x16ShrS
1369         | Operator::I8x16ShrU
1370         | Operator::I8x16Add
1371         | Operator::I8x16AddSaturateS
1372         | Operator::I8x16AddSaturateU
1373         | Operator::I8x16Sub
1374         | Operator::I8x16SubSaturateS
1375         | Operator::I8x16SubSaturateU
1376         | Operator::I8x16Mul => I8X16,
1377 
1378         Operator::I16x8Splat
1379         | Operator::I16x8ExtractLaneS { .. }
1380         | Operator::I16x8ExtractLaneU { .. }
1381         | Operator::I16x8ReplaceLane { .. }
1382         | Operator::I16x8Eq
1383         | Operator::I16x8Ne
1384         | Operator::I16x8LtS
1385         | Operator::I16x8LtU
1386         | Operator::I16x8GtS
1387         | Operator::I16x8GtU
1388         | Operator::I16x8LeS
1389         | Operator::I16x8LeU
1390         | Operator::I16x8GeS
1391         | Operator::I16x8GeU
1392         | Operator::I16x8Neg
1393         | Operator::I16x8AnyTrue
1394         | Operator::I16x8AllTrue
1395         | Operator::I16x8Shl
1396         | Operator::I16x8ShrS
1397         | Operator::I16x8ShrU
1398         | Operator::I16x8Add
1399         | Operator::I16x8AddSaturateS
1400         | Operator::I16x8AddSaturateU
1401         | Operator::I16x8Sub
1402         | Operator::I16x8SubSaturateS
1403         | Operator::I16x8SubSaturateU
1404         | Operator::I16x8Mul => I16X8,
1405 
1406         Operator::I32x4Splat
1407         | Operator::I32x4ExtractLane { .. }
1408         | Operator::I32x4ReplaceLane { .. }
1409         | Operator::I32x4Eq
1410         | Operator::I32x4Ne
1411         | Operator::I32x4LtS
1412         | Operator::I32x4LtU
1413         | Operator::I32x4GtS
1414         | Operator::I32x4GtU
1415         | Operator::I32x4LeS
1416         | Operator::I32x4LeU
1417         | Operator::I32x4GeS
1418         | Operator::I32x4GeU
1419         | Operator::I32x4Neg
1420         | Operator::I32x4AnyTrue
1421         | Operator::I32x4AllTrue
1422         | Operator::I32x4Shl
1423         | Operator::I32x4ShrS
1424         | Operator::I32x4ShrU
1425         | Operator::I32x4Add
1426         | Operator::I32x4Sub
1427         | Operator::I32x4Mul
1428         | Operator::F32x4ConvertSI32x4
1429         | Operator::F32x4ConvertUI32x4 => I32X4,
1430 
1431         Operator::I64x2Splat
1432         | Operator::I64x2ExtractLane { .. }
1433         | Operator::I64x2ReplaceLane { .. }
1434         | Operator::I64x2Neg
1435         | Operator::I64x2AnyTrue
1436         | Operator::I64x2AllTrue
1437         | Operator::I64x2Shl
1438         | Operator::I64x2ShrS
1439         | Operator::I64x2ShrU
1440         | Operator::I64x2Add
1441         | Operator::I64x2Sub
1442         | Operator::F64x2ConvertSI64x2
1443         | Operator::F64x2ConvertUI64x2 => I64X2,
1444 
1445         Operator::F32x4Splat
1446         | Operator::F32x4ExtractLane { .. }
1447         | Operator::F32x4ReplaceLane { .. }
1448         | Operator::F32x4Eq
1449         | Operator::F32x4Ne
1450         | Operator::F32x4Lt
1451         | Operator::F32x4Gt
1452         | Operator::F32x4Le
1453         | Operator::F32x4Ge
1454         | Operator::F32x4Abs
1455         | Operator::F32x4Neg
1456         | Operator::F32x4Sqrt
1457         | Operator::F32x4Add
1458         | Operator::F32x4Sub
1459         | Operator::F32x4Mul
1460         | Operator::F32x4Div
1461         | Operator::F32x4Min
1462         | Operator::F32x4Max
1463         | Operator::I32x4TruncSF32x4Sat
1464         | Operator::I32x4TruncUF32x4Sat => F32X4,
1465 
1466         Operator::F64x2Splat
1467         | Operator::F64x2ExtractLane { .. }
1468         | Operator::F64x2ReplaceLane { .. }
1469         | Operator::F64x2Eq
1470         | Operator::F64x2Ne
1471         | Operator::F64x2Lt
1472         | Operator::F64x2Gt
1473         | Operator::F64x2Le
1474         | Operator::F64x2Ge
1475         | Operator::F64x2Abs
1476         | Operator::F64x2Neg
1477         | Operator::F64x2Sqrt
1478         | Operator::F64x2Add
1479         | Operator::F64x2Sub
1480         | Operator::F64x2Mul
1481         | Operator::F64x2Div
1482         | Operator::F64x2Min
1483         | Operator::F64x2Max
1484         | Operator::I64x2TruncSF64x2Sat
1485         | Operator::I64x2TruncUF64x2Sat => F64X2,
1486 
1487         _ => unimplemented!(
1488             "Currently only the SIMD instructions are translated to their return type: {:?}",
1489             operator
1490         ),
1491     }
1492 }
1493 
1494 /// Some SIMD operations only operate on I8X16 in CLIF; this will convert them to that type by
1495 /// adding a raw_bitcast if necessary
optionally_bitcast_vector( value: Value, needed_type: Type, builder: &mut FunctionBuilder, ) -> Value1496 fn optionally_bitcast_vector(
1497     value: Value,
1498     needed_type: Type,
1499     builder: &mut FunctionBuilder,
1500 ) -> Value {
1501     if builder.func.dfg.value_type(value) != needed_type {
1502         builder.ins().raw_bitcast(needed_type, value)
1503     } else {
1504         value
1505     }
1506 }
1507