1 //! This module contains the bulk of the interesting code performing the translation between
2 //! WebAssembly and Cranelift IR.
3 //!
4 //! The translation is done in one pass, opcode by opcode. Two main data structures are used during
5 //! code translations: the value stack and the control stack. The value stack mimics the execution
6 //! of the WebAssembly stack machine: each instruction result is pushed onto the stack and
7 //! instruction arguments are popped off the stack. Similarly, when encountering a control flow
8 //! block, it is pushed onto the control stack and popped off when encountering the corresponding
9 //! `End`.
10 //!
11 //! Another data structure, the translation state, records information concerning unreachable code
12 //! status and about if inserting a return at the end of the function is necessary.
13 //!
14 //! Some of the WebAssembly instructions need information about the environment for which they
15 //! are being translated:
16 //!
17 //! - the loads and stores need the memory base address;
18 //! - the `get_global` and `set_global` instructions depend on how the globals are implemented;
19 //! - `memory.size` and `memory.grow` are runtime functions;
20 //! - `call_indirect` has to translate the function index into the address of where this
21 //! is;
22 //!
23 //! That is why `translate_function_body` takes an object having the `WasmRuntime` trait as
24 //! argument.
25 use super::{hash_map, HashMap};
26 use crate::environ::{FuncEnvironment, GlobalVariable, ReturnMode, WasmResult};
27 use crate::state::{ControlStackFrame, ElseData, FuncTranslationState, ModuleTranslationState};
28 use crate::translation_utils::{
29 block_with_params, blocktype_params_results, f32_translation, f64_translation,
30 };
31 use crate::translation_utils::{FuncIndex, GlobalIndex, MemoryIndex, SignatureIndex, TableIndex};
32 use crate::wasm_unsupported;
33 use core::{i32, u32};
34 use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
35 use cranelift_codegen::ir::immediates::Offset32;
36 use cranelift_codegen::ir::types::*;
37 use cranelift_codegen::ir::{
38 self, ConstantData, InstBuilder, JumpTableData, MemFlags, Value, ValueLabel,
39 };
40 use cranelift_codegen::packed_option::ReservedValue;
41 use cranelift_frontend::{FunctionBuilder, Variable};
42 use std::cmp;
43 use std::convert::TryFrom;
44 use std::vec::Vec;
45 use wasmparser::{MemoryImmediate, Operator};
46
47 // Clippy warns about "flags: _" but its important to document that the flags field is ignored
48 #[cfg_attr(
49 feature = "cargo-clippy",
50 allow(clippy::unneeded_field_pattern, clippy::cognitive_complexity)
51 )]
52 /// Translates wasm operators into Cranelift IR instructions. Returns `true` if it inserted
53 /// a return.
translate_operator<FE: FuncEnvironment + ?Sized>( module_translation_state: &ModuleTranslationState, op: &Operator, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>54 pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
55 module_translation_state: &ModuleTranslationState,
56 op: &Operator,
57 builder: &mut FunctionBuilder,
58 state: &mut FuncTranslationState,
59 environ: &mut FE,
60 ) -> WasmResult<()> {
61 if !state.reachable {
62 translate_unreachable_operator(module_translation_state, &op, builder, state, environ)?;
63 return Ok(());
64 }
65
66 // This big match treats all Wasm code operators.
67 match op {
68 /********************************** Locals ****************************************
69 * `get_local` and `set_local` are treated as non-SSA variables and will completely
70 * disappear in the Cranelift Code
71 ***********************************************************************************/
72 Operator::LocalGet { local_index } => {
73 let val = builder.use_var(Variable::with_u32(*local_index));
74 state.push1(val);
75 let label = ValueLabel::from_u32(*local_index);
76 builder.set_val_label(val, label);
77 }
78 Operator::LocalSet { local_index } => {
79 let mut val = state.pop1();
80
81 // Ensure SIMD values are cast to their default Cranelift type, I8x16.
82 let ty = builder.func.dfg.value_type(val);
83 if ty.is_vector() {
84 val = optionally_bitcast_vector(val, I8X16, builder);
85 }
86
87 builder.def_var(Variable::with_u32(*local_index), val);
88 let label = ValueLabel::from_u32(*local_index);
89 builder.set_val_label(val, label);
90 }
91 Operator::LocalTee { local_index } => {
92 let mut val = state.peek1();
93
94 // Ensure SIMD values are cast to their default Cranelift type, I8x16.
95 let ty = builder.func.dfg.value_type(val);
96 if ty.is_vector() {
97 val = optionally_bitcast_vector(val, I8X16, builder);
98 }
99
100 builder.def_var(Variable::with_u32(*local_index), val);
101 let label = ValueLabel::from_u32(*local_index);
102 builder.set_val_label(val, label);
103 }
104 /********************************** Globals ****************************************
105 * `get_global` and `set_global` are handled by the environment.
106 ***********************************************************************************/
107 Operator::GlobalGet { global_index } => {
108 let val = match state.get_global(builder.func, *global_index, environ)? {
109 GlobalVariable::Const(val) => val,
110 GlobalVariable::Memory { gv, offset, ty } => {
111 let addr = builder.ins().global_value(environ.pointer_type(), gv);
112 let flags = ir::MemFlags::trusted();
113 builder.ins().load(ty, flags, addr, offset)
114 }
115 GlobalVariable::Custom => environ.translate_custom_global_get(
116 builder.cursor(),
117 GlobalIndex::from_u32(*global_index),
118 )?,
119 };
120 state.push1(val);
121 }
122 Operator::GlobalSet { global_index } => {
123 match state.get_global(builder.func, *global_index, environ)? {
124 GlobalVariable::Const(_) => panic!("global #{} is a constant", *global_index),
125 GlobalVariable::Memory { gv, offset, ty } => {
126 let addr = builder.ins().global_value(environ.pointer_type(), gv);
127 let flags = ir::MemFlags::trusted();
128 let val = state.pop1();
129 debug_assert_eq!(ty, builder.func.dfg.value_type(val));
130 builder.ins().store(flags, val, addr, offset);
131 }
132 GlobalVariable::Custom => {
133 let val = state.pop1();
134 environ.translate_custom_global_set(
135 builder.cursor(),
136 GlobalIndex::from_u32(*global_index),
137 val,
138 )?;
139 }
140 }
141 }
142 /********************************* Stack misc ***************************************
143 * `drop`, `nop`, `unreachable` and `select`.
144 ***********************************************************************************/
145 Operator::Drop => {
146 state.pop1();
147 }
148 Operator::Select => {
149 let (arg1, arg2, cond) = state.pop3();
150 state.push1(builder.ins().select(cond, arg1, arg2));
151 }
152 Operator::TypedSelect { ty: _ } => {
153 // We ignore the explicit type parameter as it is only needed for
154 // validation, which we require to have been performed before
155 // translation.
156 let (arg1, arg2, cond) = state.pop3();
157 state.push1(builder.ins().select(cond, arg1, arg2));
158 }
159 Operator::Nop => {
160 // We do nothing
161 }
162 Operator::Unreachable => {
163 builder.ins().trap(ir::TrapCode::UnreachableCodeReached);
164 state.reachable = false;
165 }
166 /***************************** Control flow blocks **********************************
167 * When starting a control flow block, we create a new `Block` that will hold the code
168 * after the block, and we push a frame on the control stack. Depending on the type
169 * of block, we create a new `Block` for the body of the block with an associated
170 * jump instruction.
171 *
172 * The `End` instruction pops the last control frame from the control stack, seals
173 * the destination block (since `br` instructions targeting it only appear inside the
174 * block and have already been translated) and modify the value stack to use the
175 * possible `Block`'s arguments values.
176 ***********************************************************************************/
177 Operator::Block { ty } => {
178 let (params, results) = blocktype_params_results(module_translation_state, *ty)?;
179 let next = block_with_params(builder, results, environ)?;
180 state.push_block(next, params.len(), results.len());
181 }
182 Operator::Loop { ty } => {
183 let (params, results) = blocktype_params_results(module_translation_state, *ty)?;
184 let loop_body = block_with_params(builder, params, environ)?;
185 let next = block_with_params(builder, results, environ)?;
186 builder.ins().jump(loop_body, state.peekn(params.len()));
187 state.push_loop(loop_body, next, params.len(), results.len());
188
189 // Pop the initial `Block` actuals and replace them with the `Block`'s
190 // params since control flow joins at the top of the loop.
191 state.popn(params.len());
192 state
193 .stack
194 .extend_from_slice(builder.block_params(loop_body));
195
196 builder.switch_to_block(loop_body);
197 environ.translate_loop_header(builder.cursor())?;
198 }
199 Operator::If { ty } => {
200 let val = state.pop1();
201
202 let (params, results) = blocktype_params_results(module_translation_state, *ty)?;
203 let (destination, else_data) = if params == results {
204 // It is possible there is no `else` block, so we will only
205 // allocate a block for it if/when we find the `else`. For now,
206 // we if the condition isn't true, then we jump directly to the
207 // destination block following the whole `if...end`. If we do end
208 // up discovering an `else`, then we will allocate a block for it
209 // and go back and patch the jump.
210 let destination = block_with_params(builder, results, environ)?;
211 let branch_inst = builder
212 .ins()
213 .brz(val, destination, state.peekn(params.len()));
214 (destination, ElseData::NoElse { branch_inst })
215 } else {
216 // The `if` type signature is not valid without an `else` block,
217 // so we eagerly allocate the `else` block here.
218 let destination = block_with_params(builder, results, environ)?;
219 let else_block = block_with_params(builder, params, environ)?;
220 builder
221 .ins()
222 .brz(val, else_block, state.peekn(params.len()));
223 builder.seal_block(else_block);
224 (destination, ElseData::WithElse { else_block })
225 };
226
227 let next_block = builder.create_block();
228 builder.ins().jump(next_block, &[]);
229 builder.seal_block(next_block); // Only predecessor is the current block.
230 builder.switch_to_block(next_block);
231
232 // Here we append an argument to a Block targeted by an argumentless jump instruction
233 // But in fact there are two cases:
234 // - either the If does not have a Else clause, in that case ty = EmptyBlock
235 // and we add nothing;
236 // - either the If have an Else clause, in that case the destination of this jump
237 // instruction will be changed later when we translate the Else operator.
238 state.push_if(destination, else_data, params.len(), results.len(), *ty);
239 }
240 Operator::Else => {
241 let i = state.control_stack.len() - 1;
242 match state.control_stack[i] {
243 ControlStackFrame::If {
244 ref else_data,
245 head_is_reachable,
246 ref mut consequent_ends_reachable,
247 num_return_values,
248 blocktype,
249 destination,
250 ..
251 } => {
252 // We finished the consequent, so record its final
253 // reachability state.
254 debug_assert!(consequent_ends_reachable.is_none());
255 *consequent_ends_reachable = Some(state.reachable);
256
257 if head_is_reachable {
258 // We have a branch from the head of the `if` to the `else`.
259 state.reachable = true;
260
261 // Ensure we have a block for the `else` block (it may have
262 // already been pre-allocated, see `ElseData` for details).
263 let else_block = match *else_data {
264 ElseData::NoElse { branch_inst } => {
265 let (params, _results) =
266 blocktype_params_results(module_translation_state, blocktype)?;
267 debug_assert_eq!(params.len(), num_return_values);
268 let else_block = block_with_params(builder, params, environ)?;
269 builder.ins().jump(destination, state.peekn(params.len()));
270 state.popn(params.len());
271
272 builder.change_jump_destination(branch_inst, else_block);
273 builder.seal_block(else_block);
274 else_block
275 }
276 ElseData::WithElse { else_block } => {
277 builder
278 .ins()
279 .jump(destination, state.peekn(num_return_values));
280 state.popn(num_return_values);
281 else_block
282 }
283 };
284
285 // You might be expecting that we push the parameters for this
286 // `else` block here, something like this:
287 //
288 // state.pushn(&control_stack_frame.params);
289 //
290 // We don't do that because they are already on the top of the stack
291 // for us: we pushed the parameters twice when we saw the initial
292 // `if` so that we wouldn't have to save the parameters in the
293 // `ControlStackFrame` as another `Vec` allocation.
294
295 builder.switch_to_block(else_block);
296
297 // We don't bother updating the control frame's `ElseData`
298 // to `WithElse` because nothing else will read it.
299 }
300 }
301 _ => unreachable!(),
302 }
303 }
304 Operator::End => {
305 let frame = state.control_stack.pop().unwrap();
306 let next_block = frame.following_code();
307
308 if !builder.is_unreachable() || !builder.is_pristine() {
309 let return_count = frame.num_return_values();
310 let return_args = state.peekn_mut(return_count);
311 let next_block_types = builder.func.dfg.block_param_types(next_block);
312 bitcast_arguments(return_args, &next_block_types, builder);
313 builder.ins().jump(frame.following_code(), return_args);
314 // You might expect that if we just finished an `if` block that
315 // didn't have a corresponding `else` block, then we would clean
316 // up our duplicate set of parameters that we pushed earlier
317 // right here. However, we don't have to explicitly do that,
318 // since we truncate the stack back to the original height
319 // below.
320 }
321 builder.switch_to_block(next_block);
322 builder.seal_block(next_block);
323 // If it is a loop we also have to seal the body loop block
324 if let ControlStackFrame::Loop { header, .. } = frame {
325 builder.seal_block(header)
326 }
327 state.stack.truncate(frame.original_stack_size());
328 state
329 .stack
330 .extend_from_slice(builder.block_params(next_block));
331 }
332 /**************************** Branch instructions *********************************
333 * The branch instructions all have as arguments a target nesting level, which
334 * corresponds to how many control stack frames do we have to pop to get the
335 * destination `Block`.
336 *
337 * Once the destination `Block` is found, we sometimes have to declare a certain depth
338 * of the stack unreachable, because some branch instructions are terminator.
339 *
340 * The `br_table` case is much more complicated because Cranelift's `br_table` instruction
341 * does not support jump arguments like all the other branch instructions. That is why, in
342 * the case where we would use jump arguments for every other branch instruction, we
343 * need to split the critical edges leaving the `br_tables` by creating one `Block` per
344 * table destination; the `br_table` will point to these newly created `Blocks` and these
345 * `Block`s contain only a jump instruction pointing to the final destination, this time with
346 * jump arguments.
347 *
348 * This system is also implemented in Cranelift's SSA construction algorithm, because
349 * `use_var` located in a destination `Block` of a `br_table` might trigger the addition
350 * of jump arguments in each predecessor branch instruction, one of which might be a
351 * `br_table`.
352 ***********************************************************************************/
353 Operator::Br { relative_depth } => {
354 let i = state.control_stack.len() - 1 - (*relative_depth as usize);
355 let (return_count, br_destination) = {
356 let frame = &mut state.control_stack[i];
357 // We signal that all the code that follows until the next End is unreachable
358 frame.set_branched_to_exit();
359 let return_count = if frame.is_loop() {
360 0
361 } else {
362 frame.num_return_values()
363 };
364 (return_count, frame.br_destination())
365 };
366
367 // Bitcast any vector arguments to their default type, I8X16, before jumping.
368 let destination_args = state.peekn_mut(return_count);
369 let destination_types = builder.func.dfg.block_param_types(br_destination);
370 bitcast_arguments(
371 destination_args,
372 &destination_types[..return_count],
373 builder,
374 );
375
376 builder.ins().jump(br_destination, destination_args);
377 state.popn(return_count);
378 state.reachable = false;
379 }
380 Operator::BrIf { relative_depth } => translate_br_if(*relative_depth, builder, state),
381 Operator::BrTable { table } => {
382 let (depths, default) = table.read_table()?;
383 let mut min_depth = default;
384 for depth in &*depths {
385 if *depth < min_depth {
386 min_depth = *depth;
387 }
388 }
389 let jump_args_count = {
390 let i = state.control_stack.len() - 1 - (min_depth as usize);
391 let min_depth_frame = &state.control_stack[i];
392 if min_depth_frame.is_loop() {
393 0
394 } else {
395 min_depth_frame.num_return_values()
396 }
397 };
398 let val = state.pop1();
399 let mut data = JumpTableData::with_capacity(depths.len());
400 if jump_args_count == 0 {
401 // No jump arguments
402 for depth in &*depths {
403 let block = {
404 let i = state.control_stack.len() - 1 - (*depth as usize);
405 let frame = &mut state.control_stack[i];
406 frame.set_branched_to_exit();
407 frame.br_destination()
408 };
409 data.push_entry(block);
410 }
411 let jt = builder.create_jump_table(data);
412 let block = {
413 let i = state.control_stack.len() - 1 - (default as usize);
414 let frame = &mut state.control_stack[i];
415 frame.set_branched_to_exit();
416 frame.br_destination()
417 };
418 builder.ins().br_table(val, block, jt);
419 } else {
420 // Here we have jump arguments, but Cranelift's br_table doesn't support them
421 // We then proceed to split the edges going out of the br_table
422 let return_count = jump_args_count;
423 let mut dest_block_sequence = vec![];
424 let mut dest_block_map = HashMap::new();
425 for depth in &*depths {
426 let branch_block = match dest_block_map.entry(*depth as usize) {
427 hash_map::Entry::Occupied(entry) => *entry.get(),
428 hash_map::Entry::Vacant(entry) => {
429 let block = builder.create_block();
430 dest_block_sequence.push((*depth as usize, block));
431 *entry.insert(block)
432 }
433 };
434 data.push_entry(branch_block);
435 }
436 let default_branch_block = match dest_block_map.entry(default as usize) {
437 hash_map::Entry::Occupied(entry) => *entry.get(),
438 hash_map::Entry::Vacant(entry) => {
439 let block = builder.create_block();
440 dest_block_sequence.push((default as usize, block));
441 *entry.insert(block)
442 }
443 };
444 let jt = builder.create_jump_table(data);
445 builder.ins().br_table(val, default_branch_block, jt);
446 for (depth, dest_block) in dest_block_sequence {
447 builder.switch_to_block(dest_block);
448 builder.seal_block(dest_block);
449 let real_dest_block = {
450 let i = state.control_stack.len() - 1 - depth;
451 let frame = &mut state.control_stack[i];
452 frame.set_branched_to_exit();
453 frame.br_destination()
454 };
455
456 // Bitcast any vector arguments to their default type, I8X16, before jumping.
457 let destination_args = state.peekn_mut(return_count);
458 let destination_types = builder.func.dfg.block_param_types(real_dest_block);
459 bitcast_arguments(
460 destination_args,
461 &destination_types[..return_count],
462 builder,
463 );
464
465 builder.ins().jump(real_dest_block, destination_args);
466 }
467 state.popn(return_count);
468 }
469 state.reachable = false;
470 }
471 Operator::Return => {
472 let (return_count, br_destination) = {
473 let frame = &mut state.control_stack[0];
474 frame.set_branched_to_exit();
475 let return_count = frame.num_return_values();
476 (return_count, frame.br_destination())
477 };
478 {
479 let return_args = state.peekn_mut(return_count);
480 let return_types = wasm_param_types(&builder.func.signature.returns, |i| {
481 environ.is_wasm_return(&builder.func.signature, i)
482 });
483 bitcast_arguments(return_args, &return_types, builder);
484 match environ.return_mode() {
485 ReturnMode::NormalReturns => builder.ins().return_(return_args),
486 ReturnMode::FallthroughReturn => {
487 builder.ins().jump(br_destination, return_args)
488 }
489 };
490 }
491 state.popn(return_count);
492 state.reachable = false;
493 }
494 /************************************ Calls ****************************************
495 * The call instructions pop off their arguments from the stack and append their
496 * return values to it. `call_indirect` needs environment support because there is an
497 * argument referring to an index in the external functions table of the module.
498 ************************************************************************************/
499 Operator::Call { function_index } => {
500 let (fref, num_args) = state.get_direct_func(builder.func, *function_index, environ)?;
501
502 // Bitcast any vector arguments to their default type, I8X16, before calling.
503 let callee_signature =
504 &builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature];
505 let args = state.peekn_mut(num_args);
506 let types = wasm_param_types(&callee_signature.params, |i| {
507 environ.is_wasm_parameter(&callee_signature, i)
508 });
509 bitcast_arguments(args, &types, builder);
510
511 let call = environ.translate_call(
512 builder.cursor(),
513 FuncIndex::from_u32(*function_index),
514 fref,
515 args,
516 )?;
517 let inst_results = builder.inst_results(call);
518 debug_assert_eq!(
519 inst_results.len(),
520 builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature]
521 .returns
522 .len(),
523 "translate_call results should match the call signature"
524 );
525 state.popn(num_args);
526 state.pushn(inst_results);
527 }
528 Operator::CallIndirect { index, table_index } => {
529 // `index` is the index of the function's signature and `table_index` is the index of
530 // the table to search the function in.
531 let (sigref, num_args) = state.get_indirect_sig(builder.func, *index, environ)?;
532 let table = state.get_table(builder.func, *table_index, environ)?;
533 let callee = state.pop1();
534
535 // Bitcast any vector arguments to their default type, I8X16, before calling.
536 let callee_signature = &builder.func.dfg.signatures[sigref];
537 let args = state.peekn_mut(num_args);
538 let types = wasm_param_types(&callee_signature.params, |i| {
539 environ.is_wasm_parameter(&callee_signature, i)
540 });
541 bitcast_arguments(args, &types, builder);
542
543 let call = environ.translate_call_indirect(
544 builder.cursor(),
545 TableIndex::from_u32(*table_index),
546 table,
547 SignatureIndex::from_u32(*index),
548 sigref,
549 callee,
550 state.peekn(num_args),
551 )?;
552 let inst_results = builder.inst_results(call);
553 debug_assert_eq!(
554 inst_results.len(),
555 builder.func.dfg.signatures[sigref].returns.len(),
556 "translate_call_indirect results should match the call signature"
557 );
558 state.popn(num_args);
559 state.pushn(inst_results);
560 }
561 /******************************* Memory management ***********************************
562 * Memory management is handled by environment. It is usually translated into calls to
563 * special functions.
564 ************************************************************************************/
565 Operator::MemoryGrow { reserved } => {
566 // The WebAssembly MVP only supports one linear memory, but we expect the reserved
567 // argument to be a memory index.
568 let heap_index = MemoryIndex::from_u32(*reserved);
569 let heap = state.get_heap(builder.func, *reserved, environ)?;
570 let val = state.pop1();
571 state.push1(environ.translate_memory_grow(builder.cursor(), heap_index, heap, val)?)
572 }
573 Operator::MemorySize { reserved } => {
574 let heap_index = MemoryIndex::from_u32(*reserved);
575 let heap = state.get_heap(builder.func, *reserved, environ)?;
576 state.push1(environ.translate_memory_size(builder.cursor(), heap_index, heap)?);
577 }
578 /******************************* Load instructions ***********************************
579 * Wasm specifies an integer alignment flag but we drop it in Cranelift.
580 * The memory base address is provided by the environment.
581 ************************************************************************************/
582 Operator::I32Load8U {
583 memarg: MemoryImmediate { flags: _, offset },
584 } => {
585 translate_load(*offset, ir::Opcode::Uload8, I32, builder, state, environ)?;
586 }
587 Operator::I32Load16U {
588 memarg: MemoryImmediate { flags: _, offset },
589 } => {
590 translate_load(*offset, ir::Opcode::Uload16, I32, builder, state, environ)?;
591 }
592 Operator::I32Load8S {
593 memarg: MemoryImmediate { flags: _, offset },
594 } => {
595 translate_load(*offset, ir::Opcode::Sload8, I32, builder, state, environ)?;
596 }
597 Operator::I32Load16S {
598 memarg: MemoryImmediate { flags: _, offset },
599 } => {
600 translate_load(*offset, ir::Opcode::Sload16, I32, builder, state, environ)?;
601 }
602 Operator::I64Load8U {
603 memarg: MemoryImmediate { flags: _, offset },
604 } => {
605 translate_load(*offset, ir::Opcode::Uload8, I64, builder, state, environ)?;
606 }
607 Operator::I64Load16U {
608 memarg: MemoryImmediate { flags: _, offset },
609 } => {
610 translate_load(*offset, ir::Opcode::Uload16, I64, builder, state, environ)?;
611 }
612 Operator::I64Load8S {
613 memarg: MemoryImmediate { flags: _, offset },
614 } => {
615 translate_load(*offset, ir::Opcode::Sload8, I64, builder, state, environ)?;
616 }
617 Operator::I64Load16S {
618 memarg: MemoryImmediate { flags: _, offset },
619 } => {
620 translate_load(*offset, ir::Opcode::Sload16, I64, builder, state, environ)?;
621 }
622 Operator::I64Load32S {
623 memarg: MemoryImmediate { flags: _, offset },
624 } => {
625 translate_load(*offset, ir::Opcode::Sload32, I64, builder, state, environ)?;
626 }
627 Operator::I64Load32U {
628 memarg: MemoryImmediate { flags: _, offset },
629 } => {
630 translate_load(*offset, ir::Opcode::Uload32, I64, builder, state, environ)?;
631 }
632 Operator::I32Load {
633 memarg: MemoryImmediate { flags: _, offset },
634 } => {
635 translate_load(*offset, ir::Opcode::Load, I32, builder, state, environ)?;
636 }
637 Operator::F32Load {
638 memarg: MemoryImmediate { flags: _, offset },
639 } => {
640 translate_load(*offset, ir::Opcode::Load, F32, builder, state, environ)?;
641 }
642 Operator::I64Load {
643 memarg: MemoryImmediate { flags: _, offset },
644 } => {
645 translate_load(*offset, ir::Opcode::Load, I64, builder, state, environ)?;
646 }
647 Operator::F64Load {
648 memarg: MemoryImmediate { flags: _, offset },
649 } => {
650 translate_load(*offset, ir::Opcode::Load, F64, builder, state, environ)?;
651 }
652 Operator::V128Load {
653 memarg: MemoryImmediate { flags: _, offset },
654 } => {
655 translate_load(*offset, ir::Opcode::Load, I8X16, builder, state, environ)?;
656 }
657 Operator::I16x8Load8x8S {
658 memarg: MemoryImmediate { flags: _, offset },
659 } => {
660 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
661 let loaded = builder.ins().sload8x8(flags, base, offset);
662 state.push1(loaded);
663 }
664 Operator::I16x8Load8x8U {
665 memarg: MemoryImmediate { flags: _, offset },
666 } => {
667 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
668 let loaded = builder.ins().uload8x8(flags, base, offset);
669 state.push1(loaded);
670 }
671 Operator::I32x4Load16x4S {
672 memarg: MemoryImmediate { flags: _, offset },
673 } => {
674 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
675 let loaded = builder.ins().sload16x4(flags, base, offset);
676 state.push1(loaded);
677 }
678 Operator::I32x4Load16x4U {
679 memarg: MemoryImmediate { flags: _, offset },
680 } => {
681 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
682 let loaded = builder.ins().uload16x4(flags, base, offset);
683 state.push1(loaded);
684 }
685 Operator::I64x2Load32x2S {
686 memarg: MemoryImmediate { flags: _, offset },
687 } => {
688 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
689 let loaded = builder.ins().sload32x2(flags, base, offset);
690 state.push1(loaded);
691 }
692 Operator::I64x2Load32x2U {
693 memarg: MemoryImmediate { flags: _, offset },
694 } => {
695 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
696 let loaded = builder.ins().uload32x2(flags, base, offset);
697 state.push1(loaded);
698 }
699 /****************************** Store instructions ***********************************
700 * Wasm specifies an integer alignment flag but we drop it in Cranelift.
701 * The memory base address is provided by the environment.
702 ************************************************************************************/
703 Operator::I32Store {
704 memarg: MemoryImmediate { flags: _, offset },
705 }
706 | Operator::I64Store {
707 memarg: MemoryImmediate { flags: _, offset },
708 }
709 | Operator::F32Store {
710 memarg: MemoryImmediate { flags: _, offset },
711 }
712 | Operator::F64Store {
713 memarg: MemoryImmediate { flags: _, offset },
714 } => {
715 translate_store(*offset, ir::Opcode::Store, builder, state, environ)?;
716 }
717 Operator::I32Store8 {
718 memarg: MemoryImmediate { flags: _, offset },
719 }
720 | Operator::I64Store8 {
721 memarg: MemoryImmediate { flags: _, offset },
722 } => {
723 translate_store(*offset, ir::Opcode::Istore8, builder, state, environ)?;
724 }
725 Operator::I32Store16 {
726 memarg: MemoryImmediate { flags: _, offset },
727 }
728 | Operator::I64Store16 {
729 memarg: MemoryImmediate { flags: _, offset },
730 } => {
731 translate_store(*offset, ir::Opcode::Istore16, builder, state, environ)?;
732 }
733 Operator::I64Store32 {
734 memarg: MemoryImmediate { flags: _, offset },
735 } => {
736 translate_store(*offset, ir::Opcode::Istore32, builder, state, environ)?;
737 }
738 Operator::V128Store {
739 memarg: MemoryImmediate { flags: _, offset },
740 } => {
741 translate_store(*offset, ir::Opcode::Store, builder, state, environ)?;
742 }
743 /****************************** Nullary Operators ************************************/
744 Operator::I32Const { value } => state.push1(builder.ins().iconst(I32, i64::from(*value))),
745 Operator::I64Const { value } => state.push1(builder.ins().iconst(I64, *value)),
746 Operator::F32Const { value } => {
747 state.push1(builder.ins().f32const(f32_translation(*value)));
748 }
749 Operator::F64Const { value } => {
750 state.push1(builder.ins().f64const(f64_translation(*value)));
751 }
752 /******************************* Unary Operators *************************************/
753 Operator::I32Clz | Operator::I64Clz => {
754 let arg = state.pop1();
755 state.push1(builder.ins().clz(arg));
756 }
757 Operator::I32Ctz | Operator::I64Ctz => {
758 let arg = state.pop1();
759 state.push1(builder.ins().ctz(arg));
760 }
761 Operator::I32Popcnt | Operator::I64Popcnt => {
762 let arg = state.pop1();
763 state.push1(builder.ins().popcnt(arg));
764 }
765 Operator::I64ExtendI32S => {
766 let val = state.pop1();
767 state.push1(builder.ins().sextend(I64, val));
768 }
769 Operator::I64ExtendI32U => {
770 let val = state.pop1();
771 state.push1(builder.ins().uextend(I64, val));
772 }
773 Operator::I32WrapI64 => {
774 let val = state.pop1();
775 state.push1(builder.ins().ireduce(I32, val));
776 }
777 Operator::F32Sqrt | Operator::F64Sqrt => {
778 let arg = state.pop1();
779 state.push1(builder.ins().sqrt(arg));
780 }
781 Operator::F32Ceil | Operator::F64Ceil => {
782 let arg = state.pop1();
783 state.push1(builder.ins().ceil(arg));
784 }
785 Operator::F32Floor | Operator::F64Floor => {
786 let arg = state.pop1();
787 state.push1(builder.ins().floor(arg));
788 }
789 Operator::F32Trunc | Operator::F64Trunc => {
790 let arg = state.pop1();
791 state.push1(builder.ins().trunc(arg));
792 }
793 Operator::F32Nearest | Operator::F64Nearest => {
794 let arg = state.pop1();
795 state.push1(builder.ins().nearest(arg));
796 }
797 Operator::F32Abs | Operator::F64Abs => {
798 let val = state.pop1();
799 state.push1(builder.ins().fabs(val));
800 }
801 Operator::F32Neg | Operator::F64Neg => {
802 let arg = state.pop1();
803 state.push1(builder.ins().fneg(arg));
804 }
805 Operator::F64ConvertI64U | Operator::F64ConvertI32U => {
806 let val = state.pop1();
807 state.push1(builder.ins().fcvt_from_uint(F64, val));
808 }
809 Operator::F64ConvertI64S | Operator::F64ConvertI32S => {
810 let val = state.pop1();
811 state.push1(builder.ins().fcvt_from_sint(F64, val));
812 }
813 Operator::F32ConvertI64S | Operator::F32ConvertI32S => {
814 let val = state.pop1();
815 state.push1(builder.ins().fcvt_from_sint(F32, val));
816 }
817 Operator::F32ConvertI64U | Operator::F32ConvertI32U => {
818 let val = state.pop1();
819 state.push1(builder.ins().fcvt_from_uint(F32, val));
820 }
821 Operator::F64PromoteF32 => {
822 let val = state.pop1();
823 state.push1(builder.ins().fpromote(F64, val));
824 }
825 Operator::F32DemoteF64 => {
826 let val = state.pop1();
827 state.push1(builder.ins().fdemote(F32, val));
828 }
829 Operator::I64TruncF64S | Operator::I64TruncF32S => {
830 let val = state.pop1();
831 state.push1(builder.ins().fcvt_to_sint(I64, val));
832 }
833 Operator::I32TruncF64S | Operator::I32TruncF32S => {
834 let val = state.pop1();
835 state.push1(builder.ins().fcvt_to_sint(I32, val));
836 }
837 Operator::I64TruncF64U | Operator::I64TruncF32U => {
838 let val = state.pop1();
839 state.push1(builder.ins().fcvt_to_uint(I64, val));
840 }
841 Operator::I32TruncF64U | Operator::I32TruncF32U => {
842 let val = state.pop1();
843 state.push1(builder.ins().fcvt_to_uint(I32, val));
844 }
845 Operator::I64TruncSatF64S | Operator::I64TruncSatF32S => {
846 let val = state.pop1();
847 state.push1(builder.ins().fcvt_to_sint_sat(I64, val));
848 }
849 Operator::I32TruncSatF64S | Operator::I32TruncSatF32S => {
850 let val = state.pop1();
851 state.push1(builder.ins().fcvt_to_sint_sat(I32, val));
852 }
853 Operator::I64TruncSatF64U | Operator::I64TruncSatF32U => {
854 let val = state.pop1();
855 state.push1(builder.ins().fcvt_to_uint_sat(I64, val));
856 }
857 Operator::I32TruncSatF64U | Operator::I32TruncSatF32U => {
858 let val = state.pop1();
859 state.push1(builder.ins().fcvt_to_uint_sat(I32, val));
860 }
861 Operator::F32ReinterpretI32 => {
862 let val = state.pop1();
863 state.push1(builder.ins().bitcast(F32, val));
864 }
865 Operator::F64ReinterpretI64 => {
866 let val = state.pop1();
867 state.push1(builder.ins().bitcast(F64, val));
868 }
869 Operator::I32ReinterpretF32 => {
870 let val = state.pop1();
871 state.push1(builder.ins().bitcast(I32, val));
872 }
873 Operator::I64ReinterpretF64 => {
874 let val = state.pop1();
875 state.push1(builder.ins().bitcast(I64, val));
876 }
877 Operator::I32Extend8S => {
878 let val = state.pop1();
879 state.push1(builder.ins().ireduce(I8, val));
880 let val = state.pop1();
881 state.push1(builder.ins().sextend(I32, val));
882 }
883 Operator::I32Extend16S => {
884 let val = state.pop1();
885 state.push1(builder.ins().ireduce(I16, val));
886 let val = state.pop1();
887 state.push1(builder.ins().sextend(I32, val));
888 }
889 Operator::I64Extend8S => {
890 let val = state.pop1();
891 state.push1(builder.ins().ireduce(I8, val));
892 let val = state.pop1();
893 state.push1(builder.ins().sextend(I64, val));
894 }
895 Operator::I64Extend16S => {
896 let val = state.pop1();
897 state.push1(builder.ins().ireduce(I16, val));
898 let val = state.pop1();
899 state.push1(builder.ins().sextend(I64, val));
900 }
901 Operator::I64Extend32S => {
902 let val = state.pop1();
903 state.push1(builder.ins().ireduce(I32, val));
904 let val = state.pop1();
905 state.push1(builder.ins().sextend(I64, val));
906 }
907 /****************************** Binary Operators ************************************/
908 Operator::I32Add | Operator::I64Add => {
909 let (arg1, arg2) = state.pop2();
910 state.push1(builder.ins().iadd(arg1, arg2));
911 }
912 Operator::I32And | Operator::I64And => {
913 let (arg1, arg2) = state.pop2();
914 state.push1(builder.ins().band(arg1, arg2));
915 }
916 Operator::I32Or | Operator::I64Or => {
917 let (arg1, arg2) = state.pop2();
918 state.push1(builder.ins().bor(arg1, arg2));
919 }
920 Operator::I32Xor | Operator::I64Xor => {
921 let (arg1, arg2) = state.pop2();
922 state.push1(builder.ins().bxor(arg1, arg2));
923 }
924 Operator::I32Shl | Operator::I64Shl => {
925 let (arg1, arg2) = state.pop2();
926 state.push1(builder.ins().ishl(arg1, arg2));
927 }
928 Operator::I32ShrS | Operator::I64ShrS => {
929 let (arg1, arg2) = state.pop2();
930 state.push1(builder.ins().sshr(arg1, arg2));
931 }
932 Operator::I32ShrU | Operator::I64ShrU => {
933 let (arg1, arg2) = state.pop2();
934 state.push1(builder.ins().ushr(arg1, arg2));
935 }
936 Operator::I32Rotl | Operator::I64Rotl => {
937 let (arg1, arg2) = state.pop2();
938 state.push1(builder.ins().rotl(arg1, arg2));
939 }
940 Operator::I32Rotr | Operator::I64Rotr => {
941 let (arg1, arg2) = state.pop2();
942 state.push1(builder.ins().rotr(arg1, arg2));
943 }
944 Operator::F32Add | Operator::F64Add => {
945 let (arg1, arg2) = state.pop2();
946 state.push1(builder.ins().fadd(arg1, arg2));
947 }
948 Operator::I32Sub | Operator::I64Sub => {
949 let (arg1, arg2) = state.pop2();
950 state.push1(builder.ins().isub(arg1, arg2));
951 }
952 Operator::F32Sub | Operator::F64Sub => {
953 let (arg1, arg2) = state.pop2();
954 state.push1(builder.ins().fsub(arg1, arg2));
955 }
956 Operator::I32Mul | Operator::I64Mul => {
957 let (arg1, arg2) = state.pop2();
958 state.push1(builder.ins().imul(arg1, arg2));
959 }
960 Operator::F32Mul | Operator::F64Mul => {
961 let (arg1, arg2) = state.pop2();
962 state.push1(builder.ins().fmul(arg1, arg2));
963 }
964 Operator::F32Div | Operator::F64Div => {
965 let (arg1, arg2) = state.pop2();
966 state.push1(builder.ins().fdiv(arg1, arg2));
967 }
968 Operator::I32DivS | Operator::I64DivS => {
969 let (arg1, arg2) = state.pop2();
970 state.push1(builder.ins().sdiv(arg1, arg2));
971 }
972 Operator::I32DivU | Operator::I64DivU => {
973 let (arg1, arg2) = state.pop2();
974 state.push1(builder.ins().udiv(arg1, arg2));
975 }
976 Operator::I32RemS | Operator::I64RemS => {
977 let (arg1, arg2) = state.pop2();
978 state.push1(builder.ins().srem(arg1, arg2));
979 }
980 Operator::I32RemU | Operator::I64RemU => {
981 let (arg1, arg2) = state.pop2();
982 state.push1(builder.ins().urem(arg1, arg2));
983 }
984 Operator::F32Min | Operator::F64Min => {
985 let (arg1, arg2) = state.pop2();
986 state.push1(builder.ins().fmin(arg1, arg2));
987 }
988 Operator::F32Max | Operator::F64Max => {
989 let (arg1, arg2) = state.pop2();
990 state.push1(builder.ins().fmax(arg1, arg2));
991 }
992 Operator::F32Copysign | Operator::F64Copysign => {
993 let (arg1, arg2) = state.pop2();
994 state.push1(builder.ins().fcopysign(arg1, arg2));
995 }
996 /**************************** Comparison Operators **********************************/
997 Operator::I32LtS | Operator::I64LtS => {
998 translate_icmp(IntCC::SignedLessThan, builder, state)
999 }
1000 Operator::I32LtU | Operator::I64LtU => {
1001 translate_icmp(IntCC::UnsignedLessThan, builder, state)
1002 }
1003 Operator::I32LeS | Operator::I64LeS => {
1004 translate_icmp(IntCC::SignedLessThanOrEqual, builder, state)
1005 }
1006 Operator::I32LeU | Operator::I64LeU => {
1007 translate_icmp(IntCC::UnsignedLessThanOrEqual, builder, state)
1008 }
1009 Operator::I32GtS | Operator::I64GtS => {
1010 translate_icmp(IntCC::SignedGreaterThan, builder, state)
1011 }
1012 Operator::I32GtU | Operator::I64GtU => {
1013 translate_icmp(IntCC::UnsignedGreaterThan, builder, state)
1014 }
1015 Operator::I32GeS | Operator::I64GeS => {
1016 translate_icmp(IntCC::SignedGreaterThanOrEqual, builder, state)
1017 }
1018 Operator::I32GeU | Operator::I64GeU => {
1019 translate_icmp(IntCC::UnsignedGreaterThanOrEqual, builder, state)
1020 }
1021 Operator::I32Eqz | Operator::I64Eqz => {
1022 let arg = state.pop1();
1023 let val = builder.ins().icmp_imm(IntCC::Equal, arg, 0);
1024 state.push1(builder.ins().bint(I32, val));
1025 }
1026 Operator::I32Eq | Operator::I64Eq => translate_icmp(IntCC::Equal, builder, state),
1027 Operator::F32Eq | Operator::F64Eq => translate_fcmp(FloatCC::Equal, builder, state),
1028 Operator::I32Ne | Operator::I64Ne => translate_icmp(IntCC::NotEqual, builder, state),
1029 Operator::F32Ne | Operator::F64Ne => translate_fcmp(FloatCC::NotEqual, builder, state),
1030 Operator::F32Gt | Operator::F64Gt => translate_fcmp(FloatCC::GreaterThan, builder, state),
1031 Operator::F32Ge | Operator::F64Ge => {
1032 translate_fcmp(FloatCC::GreaterThanOrEqual, builder, state)
1033 }
1034 Operator::F32Lt | Operator::F64Lt => translate_fcmp(FloatCC::LessThan, builder, state),
1035 Operator::F32Le | Operator::F64Le => {
1036 translate_fcmp(FloatCC::LessThanOrEqual, builder, state)
1037 }
1038 Operator::RefNull => state.push1(builder.ins().null(environ.reference_type())),
1039 Operator::RefIsNull => {
1040 let arg = state.pop1();
1041 let val = builder.ins().is_null(arg);
1042 let val_int = builder.ins().bint(I32, val);
1043 state.push1(val_int);
1044 }
1045 Operator::RefFunc { function_index } => {
1046 state.push1(environ.translate_ref_func(builder.cursor(), *function_index)?);
1047 }
1048 Operator::AtomicNotify { .. }
1049 | Operator::I32AtomicWait { .. }
1050 | Operator::I64AtomicWait { .. }
1051 | Operator::I32AtomicLoad { .. }
1052 | Operator::I64AtomicLoad { .. }
1053 | Operator::I32AtomicLoad8U { .. }
1054 | Operator::I32AtomicLoad16U { .. }
1055 | Operator::I64AtomicLoad8U { .. }
1056 | Operator::I64AtomicLoad16U { .. }
1057 | Operator::I64AtomicLoad32U { .. }
1058 | Operator::I32AtomicStore { .. }
1059 | Operator::I64AtomicStore { .. }
1060 | Operator::I32AtomicStore8 { .. }
1061 | Operator::I32AtomicStore16 { .. }
1062 | Operator::I64AtomicStore8 { .. }
1063 | Operator::I64AtomicStore16 { .. }
1064 | Operator::I64AtomicStore32 { .. }
1065 | Operator::I32AtomicRmwAdd { .. }
1066 | Operator::I64AtomicRmwAdd { .. }
1067 | Operator::I32AtomicRmw8AddU { .. }
1068 | Operator::I32AtomicRmw16AddU { .. }
1069 | Operator::I64AtomicRmw8AddU { .. }
1070 | Operator::I64AtomicRmw16AddU { .. }
1071 | Operator::I64AtomicRmw32AddU { .. }
1072 | Operator::I32AtomicRmwSub { .. }
1073 | Operator::I64AtomicRmwSub { .. }
1074 | Operator::I32AtomicRmw8SubU { .. }
1075 | Operator::I32AtomicRmw16SubU { .. }
1076 | Operator::I64AtomicRmw8SubU { .. }
1077 | Operator::I64AtomicRmw16SubU { .. }
1078 | Operator::I64AtomicRmw32SubU { .. }
1079 | Operator::I32AtomicRmwAnd { .. }
1080 | Operator::I64AtomicRmwAnd { .. }
1081 | Operator::I32AtomicRmw8AndU { .. }
1082 | Operator::I32AtomicRmw16AndU { .. }
1083 | Operator::I64AtomicRmw8AndU { .. }
1084 | Operator::I64AtomicRmw16AndU { .. }
1085 | Operator::I64AtomicRmw32AndU { .. }
1086 | Operator::I32AtomicRmwOr { .. }
1087 | Operator::I64AtomicRmwOr { .. }
1088 | Operator::I32AtomicRmw8OrU { .. }
1089 | Operator::I32AtomicRmw16OrU { .. }
1090 | Operator::I64AtomicRmw8OrU { .. }
1091 | Operator::I64AtomicRmw16OrU { .. }
1092 | Operator::I64AtomicRmw32OrU { .. }
1093 | Operator::I32AtomicRmwXor { .. }
1094 | Operator::I64AtomicRmwXor { .. }
1095 | Operator::I32AtomicRmw8XorU { .. }
1096 | Operator::I32AtomicRmw16XorU { .. }
1097 | Operator::I64AtomicRmw8XorU { .. }
1098 | Operator::I64AtomicRmw16XorU { .. }
1099 | Operator::I64AtomicRmw32XorU { .. }
1100 | Operator::I32AtomicRmwXchg { .. }
1101 | Operator::I64AtomicRmwXchg { .. }
1102 | Operator::I32AtomicRmw8XchgU { .. }
1103 | Operator::I32AtomicRmw16XchgU { .. }
1104 | Operator::I64AtomicRmw8XchgU { .. }
1105 | Operator::I64AtomicRmw16XchgU { .. }
1106 | Operator::I64AtomicRmw32XchgU { .. }
1107 | Operator::I32AtomicRmwCmpxchg { .. }
1108 | Operator::I64AtomicRmwCmpxchg { .. }
1109 | Operator::I32AtomicRmw8CmpxchgU { .. }
1110 | Operator::I32AtomicRmw16CmpxchgU { .. }
1111 | Operator::I64AtomicRmw8CmpxchgU { .. }
1112 | Operator::I64AtomicRmw16CmpxchgU { .. }
1113 | Operator::I64AtomicRmw32CmpxchgU { .. }
1114 | Operator::AtomicFence { .. } => {
1115 return Err(wasm_unsupported!("proposed thread operator {:?}", op));
1116 }
1117 Operator::MemoryCopy => {
1118 // The WebAssembly MVP only supports one linear memory and
1119 // wasmparser will ensure that the memory indices specified are
1120 // zero.
1121 let heap_index = MemoryIndex::from_u32(0);
1122 let heap = state.get_heap(builder.func, 0, environ)?;
1123 let len = state.pop1();
1124 let src = state.pop1();
1125 let dest = state.pop1();
1126 environ.translate_memory_copy(builder.cursor(), heap_index, heap, dest, src, len)?;
1127 }
1128 Operator::MemoryFill => {
1129 // The WebAssembly MVP only supports one linear memory and
1130 // wasmparser will ensure that the memory index specified is
1131 // zero.
1132 let heap_index = MemoryIndex::from_u32(0);
1133 let heap = state.get_heap(builder.func, 0, environ)?;
1134 let len = state.pop1();
1135 let val = state.pop1();
1136 let dest = state.pop1();
1137 environ.translate_memory_fill(builder.cursor(), heap_index, heap, dest, val, len)?;
1138 }
1139 Operator::MemoryInit { segment } => {
1140 // The WebAssembly MVP only supports one linear memory and
1141 // wasmparser will ensure that the memory index specified is
1142 // zero.
1143 let heap_index = MemoryIndex::from_u32(0);
1144 let heap = state.get_heap(builder.func, 0, environ)?;
1145 let len = state.pop1();
1146 let src = state.pop1();
1147 let dest = state.pop1();
1148 environ.translate_memory_init(
1149 builder.cursor(),
1150 heap_index,
1151 heap,
1152 *segment,
1153 dest,
1154 src,
1155 len,
1156 )?;
1157 }
1158 Operator::DataDrop { segment } => {
1159 environ.translate_data_drop(builder.cursor(), *segment)?;
1160 }
1161 Operator::TableSize { table: index } => {
1162 let table = state.get_table(builder.func, *index, environ)?;
1163 state.push1(environ.translate_table_size(
1164 builder.cursor(),
1165 TableIndex::from_u32(*index),
1166 table,
1167 )?);
1168 }
1169 Operator::TableGrow { table } => {
1170 let delta = state.pop1();
1171 let init_value = state.pop1();
1172 state.push1(environ.translate_table_grow(
1173 builder.cursor(),
1174 *table,
1175 delta,
1176 init_value,
1177 )?);
1178 }
1179 Operator::TableGet { table } => {
1180 let index = state.pop1();
1181 state.push1(environ.translate_table_get(builder.cursor(), *table, index)?);
1182 }
1183 Operator::TableSet { table } => {
1184 let value = state.pop1();
1185 let index = state.pop1();
1186 environ.translate_table_set(builder.cursor(), *table, value, index)?;
1187 }
1188 Operator::TableCopy {
1189 dst_table: dst_table_index,
1190 src_table: src_table_index,
1191 } => {
1192 let dst_table = state.get_table(builder.func, *dst_table_index, environ)?;
1193 let src_table = state.get_table(builder.func, *src_table_index, environ)?;
1194 let len = state.pop1();
1195 let src = state.pop1();
1196 let dest = state.pop1();
1197 environ.translate_table_copy(
1198 builder.cursor(),
1199 TableIndex::from_u32(*dst_table_index),
1200 dst_table,
1201 TableIndex::from_u32(*src_table_index),
1202 src_table,
1203 dest,
1204 src,
1205 len,
1206 )?;
1207 }
1208 Operator::TableFill { table } => {
1209 let len = state.pop1();
1210 let val = state.pop1();
1211 let dest = state.pop1();
1212 environ.translate_table_fill(builder.cursor(), *table, dest, val, len)?;
1213 }
1214 Operator::TableInit {
1215 segment,
1216 table: table_index,
1217 } => {
1218 let table = state.get_table(builder.func, *table_index, environ)?;
1219 let len = state.pop1();
1220 let src = state.pop1();
1221 let dest = state.pop1();
1222 environ.translate_table_init(
1223 builder.cursor(),
1224 *segment,
1225 TableIndex::from_u32(*table_index),
1226 table,
1227 dest,
1228 src,
1229 len,
1230 )?;
1231 }
1232 Operator::ElemDrop { segment } => {
1233 environ.translate_elem_drop(builder.cursor(), *segment)?;
1234 }
1235 Operator::V128Const { value } => {
1236 let data = value.bytes().to_vec().into();
1237 let handle = builder.func.dfg.constants.insert(data);
1238 let value = builder.ins().vconst(I8X16, handle);
1239 // the v128.const is typed in CLIF as a I8x16 but raw_bitcast to a different type before use
1240 state.push1(value)
1241 }
1242 Operator::I8x16Splat | Operator::I16x8Splat => {
1243 let reduced = builder.ins().ireduce(type_of(op).lane_type(), state.pop1());
1244 let splatted = builder.ins().splat(type_of(op), reduced);
1245 state.push1(splatted)
1246 }
1247 Operator::I32x4Splat
1248 | Operator::I64x2Splat
1249 | Operator::F32x4Splat
1250 | Operator::F64x2Splat => {
1251 let splatted = builder.ins().splat(type_of(op), state.pop1());
1252 state.push1(splatted)
1253 }
1254 Operator::V8x16LoadSplat {
1255 memarg: MemoryImmediate { flags: _, offset },
1256 }
1257 | Operator::V16x8LoadSplat {
1258 memarg: MemoryImmediate { flags: _, offset },
1259 }
1260 | Operator::V32x4LoadSplat {
1261 memarg: MemoryImmediate { flags: _, offset },
1262 }
1263 | Operator::V64x2LoadSplat {
1264 memarg: MemoryImmediate { flags: _, offset },
1265 } => {
1266 // TODO: For spec compliance, this is initially implemented as a combination of `load +
1267 // splat` but could be implemented eventually as a single instruction (`load_splat`).
1268 // See https://github.com/bytecodealliance/wasmtime/issues/1175.
1269 translate_load(
1270 *offset,
1271 ir::Opcode::Load,
1272 type_of(op).lane_type(),
1273 builder,
1274 state,
1275 environ,
1276 )?;
1277 let splatted = builder.ins().splat(type_of(op), state.pop1());
1278 state.push1(splatted)
1279 }
1280 Operator::I8x16ExtractLaneS { lane } | Operator::I16x8ExtractLaneS { lane } => {
1281 let vector = pop1_with_bitcast(state, type_of(op), builder);
1282 let extracted = builder.ins().extractlane(vector, lane.clone());
1283 state.push1(builder.ins().sextend(I32, extracted))
1284 }
1285 Operator::I8x16ExtractLaneU { lane } | Operator::I16x8ExtractLaneU { lane } => {
1286 let vector = pop1_with_bitcast(state, type_of(op), builder);
1287 let extracted = builder.ins().extractlane(vector, lane.clone());
1288 state.push1(builder.ins().uextend(I32, extracted));
1289 // On x86, PEXTRB zeroes the upper bits of the destination register of extractlane so
1290 // uextend could be elided; for now, uextend is needed for Cranelift's type checks to
1291 // work.
1292 }
1293 Operator::I32x4ExtractLane { lane }
1294 | Operator::I64x2ExtractLane { lane }
1295 | Operator::F32x4ExtractLane { lane }
1296 | Operator::F64x2ExtractLane { lane } => {
1297 let vector = pop1_with_bitcast(state, type_of(op), builder);
1298 state.push1(builder.ins().extractlane(vector, lane.clone()))
1299 }
1300 Operator::I8x16ReplaceLane { lane } | Operator::I16x8ReplaceLane { lane } => {
1301 let (vector, replacement) = state.pop2();
1302 let ty = type_of(op);
1303 let reduced = builder.ins().ireduce(ty.lane_type(), replacement);
1304 let vector = optionally_bitcast_vector(vector, ty, builder);
1305 state.push1(builder.ins().insertlane(vector, *lane, reduced))
1306 }
1307 Operator::I32x4ReplaceLane { lane }
1308 | Operator::I64x2ReplaceLane { lane }
1309 | Operator::F32x4ReplaceLane { lane }
1310 | Operator::F64x2ReplaceLane { lane } => {
1311 let (vector, replacement) = state.pop2();
1312 let vector = optionally_bitcast_vector(vector, type_of(op), builder);
1313 state.push1(builder.ins().insertlane(vector, *lane, replacement))
1314 }
1315 Operator::V8x16Shuffle { lanes, .. } => {
1316 let (a, b) = pop2_with_bitcast(state, I8X16, builder);
1317 let lanes = ConstantData::from(lanes.as_ref());
1318 let mask = builder.func.dfg.immediates.push(lanes);
1319 let shuffled = builder.ins().shuffle(a, b, mask);
1320 state.push1(shuffled)
1321 // At this point the original types of a and b are lost; users of this value (i.e. this
1322 // WASM-to-CLIF translator) may need to raw_bitcast for type-correctness. This is due
1323 // to WASM using the less specific v128 type for certain operations and more specific
1324 // types (e.g. i8x16) for others.
1325 }
1326 Operator::V8x16Swizzle => {
1327 let (a, b) = pop2_with_bitcast(state, I8X16, builder);
1328 state.push1(builder.ins().swizzle(I8X16, a, b))
1329 }
1330 Operator::I8x16Add | Operator::I16x8Add | Operator::I32x4Add | Operator::I64x2Add => {
1331 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1332 state.push1(builder.ins().iadd(a, b))
1333 }
1334 Operator::I8x16AddSaturateS | Operator::I16x8AddSaturateS => {
1335 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1336 state.push1(builder.ins().sadd_sat(a, b))
1337 }
1338 Operator::I8x16AddSaturateU | Operator::I16x8AddSaturateU => {
1339 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1340 state.push1(builder.ins().uadd_sat(a, b))
1341 }
1342 Operator::I8x16Sub | Operator::I16x8Sub | Operator::I32x4Sub | Operator::I64x2Sub => {
1343 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1344 state.push1(builder.ins().isub(a, b))
1345 }
1346 Operator::I8x16SubSaturateS | Operator::I16x8SubSaturateS => {
1347 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1348 state.push1(builder.ins().ssub_sat(a, b))
1349 }
1350 Operator::I8x16SubSaturateU | Operator::I16x8SubSaturateU => {
1351 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1352 state.push1(builder.ins().usub_sat(a, b))
1353 }
1354 Operator::I8x16MinS | Operator::I16x8MinS | Operator::I32x4MinS => {
1355 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1356 state.push1(builder.ins().imin(a, b))
1357 }
1358 Operator::I8x16MinU | Operator::I16x8MinU | Operator::I32x4MinU => {
1359 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1360 state.push1(builder.ins().umin(a, b))
1361 }
1362 Operator::I8x16MaxS | Operator::I16x8MaxS | Operator::I32x4MaxS => {
1363 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1364 state.push1(builder.ins().imax(a, b))
1365 }
1366 Operator::I8x16MaxU | Operator::I16x8MaxU | Operator::I32x4MaxU => {
1367 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1368 state.push1(builder.ins().umax(a, b))
1369 }
1370 Operator::I8x16RoundingAverageU | Operator::I16x8RoundingAverageU => {
1371 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1372 state.push1(builder.ins().avg_round(a, b))
1373 }
1374 Operator::I8x16Neg | Operator::I16x8Neg | Operator::I32x4Neg | Operator::I64x2Neg => {
1375 let a = pop1_with_bitcast(state, type_of(op), builder);
1376 state.push1(builder.ins().ineg(a))
1377 }
1378 Operator::I16x8Mul | Operator::I32x4Mul => {
1379 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1380 state.push1(builder.ins().imul(a, b))
1381 }
1382 Operator::V128Or => {
1383 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1384 state.push1(builder.ins().bor(a, b))
1385 }
1386 Operator::V128Xor => {
1387 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1388 state.push1(builder.ins().bxor(a, b))
1389 }
1390 Operator::V128And => {
1391 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1392 state.push1(builder.ins().band(a, b))
1393 }
1394 Operator::V128AndNot => {
1395 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1396 state.push1(builder.ins().band_not(a, b))
1397 }
1398 Operator::V128Not => {
1399 let a = state.pop1();
1400 state.push1(builder.ins().bnot(a));
1401 }
1402 Operator::I8x16Shl | Operator::I16x8Shl | Operator::I32x4Shl | Operator::I64x2Shl => {
1403 let (a, b) = state.pop2();
1404 let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
1405 let bitwidth = i64::from(builder.func.dfg.value_type(a).bits());
1406 // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width
1407 // we do `b AND 15`; this means fewer instructions than `iconst + urem`.
1408 let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1);
1409 state.push1(builder.ins().ishl(bitcast_a, b_mod_bitwidth))
1410 }
1411 Operator::I8x16ShrU | Operator::I16x8ShrU | Operator::I32x4ShrU | Operator::I64x2ShrU => {
1412 let (a, b) = state.pop2();
1413 let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
1414 let bitwidth = i64::from(builder.func.dfg.value_type(a).bits());
1415 // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width
1416 // we do `b AND 15`; this means fewer instructions than `iconst + urem`.
1417 let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1);
1418 state.push1(builder.ins().ushr(bitcast_a, b_mod_bitwidth))
1419 }
1420 Operator::I8x16ShrS | Operator::I16x8ShrS | Operator::I32x4ShrS => {
1421 let (a, b) = state.pop2();
1422 let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
1423 let bitwidth = i64::from(builder.func.dfg.value_type(a).bits());
1424 // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width
1425 // we do `b AND 15`; this means fewer instructions than `iconst + urem`.
1426 let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1);
1427 state.push1(builder.ins().sshr(bitcast_a, b_mod_bitwidth))
1428 }
1429 Operator::V128Bitselect => {
1430 let (a, b, c) = state.pop3();
1431 let bitcast_a = optionally_bitcast_vector(a, I8X16, builder);
1432 let bitcast_b = optionally_bitcast_vector(b, I8X16, builder);
1433 let bitcast_c = optionally_bitcast_vector(c, I8X16, builder);
1434 // The CLIF operand ordering is slightly different and the types of all three
1435 // operands must match (hence the bitcast).
1436 state.push1(builder.ins().bitselect(bitcast_c, bitcast_a, bitcast_b))
1437 }
1438 Operator::I8x16AnyTrue
1439 | Operator::I16x8AnyTrue
1440 | Operator::I32x4AnyTrue
1441 | Operator::I64x2AnyTrue => {
1442 let a = pop1_with_bitcast(state, type_of(op), builder);
1443 let bool_result = builder.ins().vany_true(a);
1444 state.push1(builder.ins().bint(I32, bool_result))
1445 }
1446 Operator::I8x16AllTrue
1447 | Operator::I16x8AllTrue
1448 | Operator::I32x4AllTrue
1449 | Operator::I64x2AllTrue => {
1450 let a = pop1_with_bitcast(state, type_of(op), builder);
1451 let bool_result = builder.ins().vall_true(a);
1452 state.push1(builder.ins().bint(I32, bool_result))
1453 }
1454 Operator::I8x16Eq | Operator::I16x8Eq | Operator::I32x4Eq => {
1455 translate_vector_icmp(IntCC::Equal, type_of(op), builder, state)
1456 }
1457 Operator::I8x16Ne | Operator::I16x8Ne | Operator::I32x4Ne => {
1458 translate_vector_icmp(IntCC::NotEqual, type_of(op), builder, state)
1459 }
1460 Operator::I8x16GtS | Operator::I16x8GtS | Operator::I32x4GtS => {
1461 translate_vector_icmp(IntCC::SignedGreaterThan, type_of(op), builder, state)
1462 }
1463 Operator::I8x16LtS | Operator::I16x8LtS | Operator::I32x4LtS => {
1464 translate_vector_icmp(IntCC::SignedLessThan, type_of(op), builder, state)
1465 }
1466 Operator::I8x16GtU | Operator::I16x8GtU | Operator::I32x4GtU => {
1467 translate_vector_icmp(IntCC::UnsignedGreaterThan, type_of(op), builder, state)
1468 }
1469 Operator::I8x16LtU | Operator::I16x8LtU | Operator::I32x4LtU => {
1470 translate_vector_icmp(IntCC::UnsignedLessThan, type_of(op), builder, state)
1471 }
1472 Operator::I8x16GeS | Operator::I16x8GeS | Operator::I32x4GeS => {
1473 translate_vector_icmp(IntCC::SignedGreaterThanOrEqual, type_of(op), builder, state)
1474 }
1475 Operator::I8x16LeS | Operator::I16x8LeS | Operator::I32x4LeS => {
1476 translate_vector_icmp(IntCC::SignedLessThanOrEqual, type_of(op), builder, state)
1477 }
1478 Operator::I8x16GeU | Operator::I16x8GeU | Operator::I32x4GeU => translate_vector_icmp(
1479 IntCC::UnsignedGreaterThanOrEqual,
1480 type_of(op),
1481 builder,
1482 state,
1483 ),
1484 Operator::I8x16LeU | Operator::I16x8LeU | Operator::I32x4LeU => {
1485 translate_vector_icmp(IntCC::UnsignedLessThanOrEqual, type_of(op), builder, state)
1486 }
1487 Operator::F32x4Eq | Operator::F64x2Eq => {
1488 translate_vector_fcmp(FloatCC::Equal, type_of(op), builder, state)
1489 }
1490 Operator::F32x4Ne | Operator::F64x2Ne => {
1491 translate_vector_fcmp(FloatCC::NotEqual, type_of(op), builder, state)
1492 }
1493 Operator::F32x4Lt | Operator::F64x2Lt => {
1494 translate_vector_fcmp(FloatCC::LessThan, type_of(op), builder, state)
1495 }
1496 Operator::F32x4Gt | Operator::F64x2Gt => {
1497 translate_vector_fcmp(FloatCC::GreaterThan, type_of(op), builder, state)
1498 }
1499 Operator::F32x4Le | Operator::F64x2Le => {
1500 translate_vector_fcmp(FloatCC::LessThanOrEqual, type_of(op), builder, state)
1501 }
1502 Operator::F32x4Ge | Operator::F64x2Ge => {
1503 translate_vector_fcmp(FloatCC::GreaterThanOrEqual, type_of(op), builder, state)
1504 }
1505 Operator::F32x4Add | Operator::F64x2Add => {
1506 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1507 state.push1(builder.ins().fadd(a, b))
1508 }
1509 Operator::F32x4Sub | Operator::F64x2Sub => {
1510 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1511 state.push1(builder.ins().fsub(a, b))
1512 }
1513 Operator::F32x4Mul | Operator::F64x2Mul => {
1514 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1515 state.push1(builder.ins().fmul(a, b))
1516 }
1517 Operator::F32x4Div | Operator::F64x2Div => {
1518 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1519 state.push1(builder.ins().fdiv(a, b))
1520 }
1521 Operator::F32x4Max | Operator::F64x2Max => {
1522 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1523 state.push1(builder.ins().fmax(a, b))
1524 }
1525 Operator::F32x4Min | Operator::F64x2Min => {
1526 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1527 state.push1(builder.ins().fmin(a, b))
1528 }
1529 Operator::F32x4Sqrt | Operator::F64x2Sqrt => {
1530 let a = pop1_with_bitcast(state, type_of(op), builder);
1531 state.push1(builder.ins().sqrt(a))
1532 }
1533 Operator::F32x4Neg | Operator::F64x2Neg => {
1534 let a = pop1_with_bitcast(state, type_of(op), builder);
1535 state.push1(builder.ins().fneg(a))
1536 }
1537 Operator::F32x4Abs | Operator::F64x2Abs => {
1538 let a = pop1_with_bitcast(state, type_of(op), builder);
1539 state.push1(builder.ins().fabs(a))
1540 }
1541 Operator::F32x4ConvertI32x4S => {
1542 let a = pop1_with_bitcast(state, I32X4, builder);
1543 state.push1(builder.ins().fcvt_from_sint(F32X4, a))
1544 }
1545 Operator::I8x16Mul
1546 | Operator::I64x2Mul
1547 | Operator::I64x2ShrS
1548 | Operator::I32x4TruncSatF32x4S
1549 | Operator::I32x4TruncSatF32x4U
1550 | Operator::I64x2TruncSatF64x2S
1551 | Operator::I64x2TruncSatF64x2U
1552 | Operator::F32x4ConvertI32x4U
1553 | Operator::F64x2ConvertI64x2S
1554 | Operator::F64x2ConvertI64x2U { .. }
1555 | Operator::I8x16NarrowI16x8S { .. }
1556 | Operator::I8x16NarrowI16x8U { .. }
1557 | Operator::I16x8NarrowI32x4S { .. }
1558 | Operator::I16x8NarrowI32x4U { .. }
1559 | Operator::I16x8WidenLowI8x16S { .. }
1560 | Operator::I16x8WidenHighI8x16S { .. }
1561 | Operator::I16x8WidenLowI8x16U { .. }
1562 | Operator::I16x8WidenHighI8x16U { .. }
1563 | Operator::I32x4WidenLowI16x8S { .. }
1564 | Operator::I32x4WidenHighI16x8S { .. }
1565 | Operator::I32x4WidenLowI16x8U { .. }
1566 | Operator::I32x4WidenHighI16x8U { .. } => {
1567 return Err(wasm_unsupported!("proposed SIMD operator {:?}", op));
1568 }
1569 };
1570 Ok(())
1571 }
1572
1573 // Clippy warns us of some fields we are deliberately ignoring
1574 #[cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
1575 /// Deals with a Wasm instruction located in an unreachable portion of the code. Most of them
1576 /// are dropped but special ones like `End` or `Else` signal the potential end of the unreachable
1577 /// portion so the translation state must be updated accordingly.
translate_unreachable_operator<FE: FuncEnvironment + ?Sized>( module_translation_state: &ModuleTranslationState, op: &Operator, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>1578 fn translate_unreachable_operator<FE: FuncEnvironment + ?Sized>(
1579 module_translation_state: &ModuleTranslationState,
1580 op: &Operator,
1581 builder: &mut FunctionBuilder,
1582 state: &mut FuncTranslationState,
1583 environ: &mut FE,
1584 ) -> WasmResult<()> {
1585 debug_assert!(!state.reachable);
1586 match *op {
1587 Operator::If { ty } => {
1588 // Push a placeholder control stack entry. The if isn't reachable,
1589 // so we don't have any branches anywhere.
1590 state.push_if(
1591 ir::Block::reserved_value(),
1592 ElseData::NoElse {
1593 branch_inst: ir::Inst::reserved_value(),
1594 },
1595 0,
1596 0,
1597 ty,
1598 );
1599 }
1600 Operator::Loop { ty: _ } | Operator::Block { ty: _ } => {
1601 state.push_block(ir::Block::reserved_value(), 0, 0);
1602 }
1603 Operator::Else => {
1604 let i = state.control_stack.len() - 1;
1605 match state.control_stack[i] {
1606 ControlStackFrame::If {
1607 ref else_data,
1608 head_is_reachable,
1609 ref mut consequent_ends_reachable,
1610 blocktype,
1611 ..
1612 } => {
1613 debug_assert!(consequent_ends_reachable.is_none());
1614 *consequent_ends_reachable = Some(state.reachable);
1615
1616 if head_is_reachable {
1617 // We have a branch from the head of the `if` to the `else`.
1618 state.reachable = true;
1619
1620 let else_block = match *else_data {
1621 ElseData::NoElse { branch_inst } => {
1622 let (params, _results) =
1623 blocktype_params_results(module_translation_state, blocktype)?;
1624 let else_block = block_with_params(builder, params, environ)?;
1625
1626 // We change the target of the branch instruction.
1627 builder.change_jump_destination(branch_inst, else_block);
1628 builder.seal_block(else_block);
1629 else_block
1630 }
1631 ElseData::WithElse { else_block } => else_block,
1632 };
1633
1634 builder.switch_to_block(else_block);
1635
1636 // Again, no need to push the parameters for the `else`,
1637 // since we already did when we saw the original `if`. See
1638 // the comment for translating `Operator::Else` in
1639 // `translate_operator` for details.
1640 }
1641 }
1642 _ => unreachable!(),
1643 }
1644 }
1645 Operator::End => {
1646 let stack = &mut state.stack;
1647 let control_stack = &mut state.control_stack;
1648 let frame = control_stack.pop().unwrap();
1649
1650 // Now we have to split off the stack the values not used
1651 // by unreachable code that hasn't been translated
1652 stack.truncate(frame.original_stack_size());
1653
1654 let reachable_anyway = match frame {
1655 // If it is a loop we also have to seal the body loop block
1656 ControlStackFrame::Loop { header, .. } => {
1657 builder.seal_block(header);
1658 // And loops can't have branches to the end.
1659 false
1660 }
1661 // If we never set `consequent_ends_reachable` then that means
1662 // we are finishing the consequent now, and there was no
1663 // `else`. Whether the following block is reachable depends only
1664 // on if the head was reachable.
1665 ControlStackFrame::If {
1666 head_is_reachable,
1667 consequent_ends_reachable: None,
1668 ..
1669 } => head_is_reachable,
1670 // Since we are only in this function when in unreachable code,
1671 // we know that the alternative just ended unreachable. Whether
1672 // the following block is reachable depends on if the consequent
1673 // ended reachable or not.
1674 ControlStackFrame::If {
1675 head_is_reachable,
1676 consequent_ends_reachable: Some(consequent_ends_reachable),
1677 ..
1678 } => head_is_reachable && consequent_ends_reachable,
1679 // All other control constructs are already handled.
1680 _ => false,
1681 };
1682
1683 if frame.exit_is_branched_to() || reachable_anyway {
1684 builder.switch_to_block(frame.following_code());
1685 builder.seal_block(frame.following_code());
1686
1687 // And add the return values of the block but only if the next block is reachable
1688 // (which corresponds to testing if the stack depth is 1)
1689 stack.extend_from_slice(builder.block_params(frame.following_code()));
1690 state.reachable = true;
1691 }
1692 }
1693 _ => {
1694 // We don't translate because this is unreachable code
1695 }
1696 }
1697
1698 Ok(())
1699 }
1700
1701 /// Get the address+offset to use for a heap access.
get_heap_addr( heap: ir::Heap, addr32: ir::Value, offset: u32, width: u32, addr_ty: Type, builder: &mut FunctionBuilder, ) -> (ir::Value, i32)1702 fn get_heap_addr(
1703 heap: ir::Heap,
1704 addr32: ir::Value,
1705 offset: u32,
1706 width: u32,
1707 addr_ty: Type,
1708 builder: &mut FunctionBuilder,
1709 ) -> (ir::Value, i32) {
1710 let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into();
1711
1712 // How exactly the bounds check is performed here and what it's performed
1713 // on is a bit tricky. Generally we want to rely on access violations (e.g.
1714 // segfaults) to generate traps since that means we don't have to bounds
1715 // check anything explicitly.
1716 //
1717 // If we don't have a guard page of unmapped memory, though, then we can't
1718 // rely on this trapping behavior through segfaults. Instead we need to
1719 // bounds-check the entire memory access here which is everything from
1720 // `addr32 + offset` to `addr32 + offset + width` (not inclusive). In this
1721 // scenario our adjusted offset that we're checking is `offset + width`.
1722 //
1723 // If we have a guard page, however, then we can perform a further
1724 // optimization of the generated code by only checking multiples of the
1725 // offset-guard size to be more CSE-friendly. Knowing that we have at least
1726 // 1 page of a guard page we're then able to disregard the `width` since we
1727 // know it's always less than one page. Our bounds check will be for the
1728 // first byte which will either succeed and be guaranteed to fault if it's
1729 // actually out of bounds, or the bounds check itself will fail. In any case
1730 // we assert that the width is reasonably small for now so this assumption
1731 // can be adjusted in the future if we get larger widths.
1732 //
1733 // Put another way we can say, where `y < offset_guard_size`:
1734 //
1735 // n * offset_guard_size + y = offset
1736 //
1737 // We'll then pass `n * offset_guard_size` as the bounds check value. If
1738 // this traps then our `offset` would have trapped anyway. If this check
1739 // passes we know
1740 //
1741 // addr32 + n * offset_guard_size < bound
1742 //
1743 // which means
1744 //
1745 // addr32 + n * offset_guard_size + y < bound + offset_guard_size
1746 //
1747 // because `y < offset_guard_size`, which then means:
1748 //
1749 // addr32 + offset < bound + offset_guard_size
1750 //
1751 // Since we know that that guard size bytes are all unmapped we're
1752 // guaranteed that `offset` and the `width` bytes after it are either
1753 // in-bounds or will hit the guard page, meaning we'll get the desired
1754 // semantics we want.
1755 //
1756 // As one final comment on the bits with the guard size here, another goal
1757 // of this is to hit an optimization in `heap_addr` where if the heap size
1758 // minus the offset is >= 4GB then bounds checks are 100% eliminated. This
1759 // means that with huge guard regions (e.g. our 2GB default) most adjusted
1760 // offsets we're checking here are zero. This means that we'll hit the fast
1761 // path and emit zero conditional traps for bounds checks
1762 let adjusted_offset = if offset_guard_size == 0 {
1763 u64::from(offset) + u64::from(width)
1764 } else {
1765 assert!(width < 1024);
1766 cmp::max(u64::from(offset) / offset_guard_size * offset_guard_size, 1)
1767 };
1768 debug_assert!(adjusted_offset > 0); // want to bounds check at least 1 byte
1769 let check_size = u32::try_from(adjusted_offset).unwrap_or(u32::MAX);
1770 let base = builder.ins().heap_addr(addr_ty, heap, addr32, check_size);
1771
1772 // Native load/store instructions take a signed `Offset32` immediate, so adjust the base
1773 // pointer if necessary.
1774 if offset > i32::MAX as u32 {
1775 // Offset doesn't fit in the load/store instruction.
1776 let adj = builder.ins().iadd_imm(base, i64::from(i32::MAX) + 1);
1777 (adj, (offset - (i32::MAX as u32 + 1)) as i32)
1778 } else {
1779 (base, offset as i32)
1780 }
1781 }
1782
1783 /// Prepare for a load; factors out common functionality between load and load_extend operations.
prepare_load<FE: FuncEnvironment + ?Sized>( offset: u32, loaded_bytes: u32, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<(MemFlags, Value, Offset32)>1784 fn prepare_load<FE: FuncEnvironment + ?Sized>(
1785 offset: u32,
1786 loaded_bytes: u32,
1787 builder: &mut FunctionBuilder,
1788 state: &mut FuncTranslationState,
1789 environ: &mut FE,
1790 ) -> WasmResult<(MemFlags, Value, Offset32)> {
1791 let addr32 = state.pop1();
1792
1793 // We don't yet support multiple linear memories.
1794 let heap = state.get_heap(builder.func, 0, environ)?;
1795 let (base, offset) = get_heap_addr(
1796 heap,
1797 addr32,
1798 offset,
1799 loaded_bytes,
1800 environ.pointer_type(),
1801 builder,
1802 );
1803
1804 // Note that we don't set `is_aligned` here, even if the load instruction's
1805 // alignment immediate says it's aligned, because WebAssembly's immediate
1806 // field is just a hint, while Cranelift's aligned flag needs a guarantee.
1807 let flags = MemFlags::new();
1808
1809 Ok((flags, base, offset.into()))
1810 }
1811
1812 /// Translate a load instruction.
translate_load<FE: FuncEnvironment + ?Sized>( offset: u32, opcode: ir::Opcode, result_ty: Type, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>1813 fn translate_load<FE: FuncEnvironment + ?Sized>(
1814 offset: u32,
1815 opcode: ir::Opcode,
1816 result_ty: Type,
1817 builder: &mut FunctionBuilder,
1818 state: &mut FuncTranslationState,
1819 environ: &mut FE,
1820 ) -> WasmResult<()> {
1821 let (flags, base, offset) = prepare_load(
1822 offset,
1823 mem_op_size(opcode, result_ty),
1824 builder,
1825 state,
1826 environ,
1827 )?;
1828 let (load, dfg) = builder.ins().Load(opcode, result_ty, flags, offset, base);
1829 state.push1(dfg.first_result(load));
1830 Ok(())
1831 }
1832
1833 /// Translate a store instruction.
translate_store<FE: FuncEnvironment + ?Sized>( offset: u32, opcode: ir::Opcode, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>1834 fn translate_store<FE: FuncEnvironment + ?Sized>(
1835 offset: u32,
1836 opcode: ir::Opcode,
1837 builder: &mut FunctionBuilder,
1838 state: &mut FuncTranslationState,
1839 environ: &mut FE,
1840 ) -> WasmResult<()> {
1841 let (addr32, val) = state.pop2();
1842 let val_ty = builder.func.dfg.value_type(val);
1843
1844 // We don't yet support multiple linear memories.
1845 let heap = state.get_heap(builder.func, 0, environ)?;
1846 let (base, offset) = get_heap_addr(
1847 heap,
1848 addr32,
1849 offset,
1850 mem_op_size(opcode, val_ty),
1851 environ.pointer_type(),
1852 builder,
1853 );
1854 // See the comments in `translate_load` about the flags.
1855 let flags = MemFlags::new();
1856 builder
1857 .ins()
1858 .Store(opcode, val_ty, flags, offset.into(), val, base);
1859 Ok(())
1860 }
1861
mem_op_size(opcode: ir::Opcode, ty: Type) -> u321862 fn mem_op_size(opcode: ir::Opcode, ty: Type) -> u32 {
1863 match opcode {
1864 ir::Opcode::Istore8 | ir::Opcode::Sload8 | ir::Opcode::Uload8 => 1,
1865 ir::Opcode::Istore16 | ir::Opcode::Sload16 | ir::Opcode::Uload16 => 2,
1866 ir::Opcode::Istore32 | ir::Opcode::Sload32 | ir::Opcode::Uload32 => 4,
1867 ir::Opcode::Store | ir::Opcode::Load => ty.bytes(),
1868 _ => panic!("unknown size of mem op for {:?}", opcode),
1869 }
1870 }
1871
translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState)1872 fn translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) {
1873 let (arg0, arg1) = state.pop2();
1874 let val = builder.ins().icmp(cc, arg0, arg1);
1875 state.push1(builder.ins().bint(I32, val));
1876 }
1877
translate_vector_icmp( cc: IntCC, needed_type: Type, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, )1878 fn translate_vector_icmp(
1879 cc: IntCC,
1880 needed_type: Type,
1881 builder: &mut FunctionBuilder,
1882 state: &mut FuncTranslationState,
1883 ) {
1884 let (a, b) = state.pop2();
1885 let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
1886 let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
1887 state.push1(builder.ins().icmp(cc, bitcast_a, bitcast_b))
1888 }
1889
translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState)1890 fn translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) {
1891 let (arg0, arg1) = state.pop2();
1892 let val = builder.ins().fcmp(cc, arg0, arg1);
1893 state.push1(builder.ins().bint(I32, val));
1894 }
1895
translate_vector_fcmp( cc: FloatCC, needed_type: Type, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, )1896 fn translate_vector_fcmp(
1897 cc: FloatCC,
1898 needed_type: Type,
1899 builder: &mut FunctionBuilder,
1900 state: &mut FuncTranslationState,
1901 ) {
1902 let (a, b) = state.pop2();
1903 let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
1904 let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
1905 state.push1(builder.ins().fcmp(cc, bitcast_a, bitcast_b))
1906 }
1907
translate_br_if( relative_depth: u32, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, )1908 fn translate_br_if(
1909 relative_depth: u32,
1910 builder: &mut FunctionBuilder,
1911 state: &mut FuncTranslationState,
1912 ) {
1913 let val = state.pop1();
1914 let (br_destination, inputs) = translate_br_if_args(relative_depth, state);
1915
1916 // Bitcast any vector arguments to their default type, I8X16, before jumping.
1917 let destination_types = builder.func.dfg.block_param_types(br_destination);
1918 bitcast_arguments(inputs, &destination_types[..inputs.len()], builder);
1919
1920 builder.ins().brnz(val, br_destination, inputs);
1921
1922 let next_block = builder.create_block();
1923 builder.ins().jump(next_block, &[]);
1924 builder.seal_block(next_block); // The only predecessor is the current block.
1925 builder.switch_to_block(next_block);
1926 }
1927
translate_br_if_args( relative_depth: u32, state: &mut FuncTranslationState, ) -> (ir::Block, &mut [ir::Value])1928 fn translate_br_if_args(
1929 relative_depth: u32,
1930 state: &mut FuncTranslationState,
1931 ) -> (ir::Block, &mut [ir::Value]) {
1932 let i = state.control_stack.len() - 1 - (relative_depth as usize);
1933 let (return_count, br_destination) = {
1934 let frame = &mut state.control_stack[i];
1935 // The values returned by the branch are still available for the reachable
1936 // code that comes after it
1937 frame.set_branched_to_exit();
1938 let return_count = if frame.is_loop() {
1939 frame.num_param_values()
1940 } else {
1941 frame.num_return_values()
1942 };
1943 (return_count, frame.br_destination())
1944 };
1945 let inputs = state.peekn_mut(return_count);
1946 (br_destination, inputs)
1947 }
1948
1949 /// Determine the returned value type of a WebAssembly operator
type_of(operator: &Operator) -> Type1950 fn type_of(operator: &Operator) -> Type {
1951 match operator {
1952 Operator::V128Load { .. }
1953 | Operator::V128Store { .. }
1954 | Operator::V128Const { .. }
1955 | Operator::V128Not
1956 | Operator::V128And
1957 | Operator::V128AndNot
1958 | Operator::V128Or
1959 | Operator::V128Xor
1960 | Operator::V128Bitselect => I8X16, // default type representing V128
1961
1962 Operator::V8x16Shuffle { .. }
1963 | Operator::I8x16Splat
1964 | Operator::V8x16LoadSplat { .. }
1965 | Operator::I8x16ExtractLaneS { .. }
1966 | Operator::I8x16ExtractLaneU { .. }
1967 | Operator::I8x16ReplaceLane { .. }
1968 | Operator::I8x16Eq
1969 | Operator::I8x16Ne
1970 | Operator::I8x16LtS
1971 | Operator::I8x16LtU
1972 | Operator::I8x16GtS
1973 | Operator::I8x16GtU
1974 | Operator::I8x16LeS
1975 | Operator::I8x16LeU
1976 | Operator::I8x16GeS
1977 | Operator::I8x16GeU
1978 | Operator::I8x16Neg
1979 | Operator::I8x16AnyTrue
1980 | Operator::I8x16AllTrue
1981 | Operator::I8x16Shl
1982 | Operator::I8x16ShrS
1983 | Operator::I8x16ShrU
1984 | Operator::I8x16Add
1985 | Operator::I8x16AddSaturateS
1986 | Operator::I8x16AddSaturateU
1987 | Operator::I8x16Sub
1988 | Operator::I8x16SubSaturateS
1989 | Operator::I8x16SubSaturateU
1990 | Operator::I8x16MinS
1991 | Operator::I8x16MinU
1992 | Operator::I8x16MaxS
1993 | Operator::I8x16MaxU
1994 | Operator::I8x16RoundingAverageU
1995 | Operator::I8x16Mul => I8X16,
1996
1997 Operator::I16x8Splat
1998 | Operator::V16x8LoadSplat { .. }
1999 | Operator::I16x8ExtractLaneS { .. }
2000 | Operator::I16x8ExtractLaneU { .. }
2001 | Operator::I16x8ReplaceLane { .. }
2002 | Operator::I16x8Eq
2003 | Operator::I16x8Ne
2004 | Operator::I16x8LtS
2005 | Operator::I16x8LtU
2006 | Operator::I16x8GtS
2007 | Operator::I16x8GtU
2008 | Operator::I16x8LeS
2009 | Operator::I16x8LeU
2010 | Operator::I16x8GeS
2011 | Operator::I16x8GeU
2012 | Operator::I16x8Neg
2013 | Operator::I16x8AnyTrue
2014 | Operator::I16x8AllTrue
2015 | Operator::I16x8Shl
2016 | Operator::I16x8ShrS
2017 | Operator::I16x8ShrU
2018 | Operator::I16x8Add
2019 | Operator::I16x8AddSaturateS
2020 | Operator::I16x8AddSaturateU
2021 | Operator::I16x8Sub
2022 | Operator::I16x8SubSaturateS
2023 | Operator::I16x8SubSaturateU
2024 | Operator::I16x8MinS
2025 | Operator::I16x8MinU
2026 | Operator::I16x8MaxS
2027 | Operator::I16x8MaxU
2028 | Operator::I16x8RoundingAverageU
2029 | Operator::I16x8Mul => I16X8,
2030
2031 Operator::I32x4Splat
2032 | Operator::V32x4LoadSplat { .. }
2033 | Operator::I32x4ExtractLane { .. }
2034 | Operator::I32x4ReplaceLane { .. }
2035 | Operator::I32x4Eq
2036 | Operator::I32x4Ne
2037 | Operator::I32x4LtS
2038 | Operator::I32x4LtU
2039 | Operator::I32x4GtS
2040 | Operator::I32x4GtU
2041 | Operator::I32x4LeS
2042 | Operator::I32x4LeU
2043 | Operator::I32x4GeS
2044 | Operator::I32x4GeU
2045 | Operator::I32x4Neg
2046 | Operator::I32x4AnyTrue
2047 | Operator::I32x4AllTrue
2048 | Operator::I32x4Shl
2049 | Operator::I32x4ShrS
2050 | Operator::I32x4ShrU
2051 | Operator::I32x4Add
2052 | Operator::I32x4Sub
2053 | Operator::I32x4Mul
2054 | Operator::I32x4MinS
2055 | Operator::I32x4MinU
2056 | Operator::I32x4MaxS
2057 | Operator::I32x4MaxU
2058 | Operator::F32x4ConvertI32x4S
2059 | Operator::F32x4ConvertI32x4U => I32X4,
2060
2061 Operator::I64x2Splat
2062 | Operator::V64x2LoadSplat { .. }
2063 | Operator::I64x2ExtractLane { .. }
2064 | Operator::I64x2ReplaceLane { .. }
2065 | Operator::I64x2Neg
2066 | Operator::I64x2AnyTrue
2067 | Operator::I64x2AllTrue
2068 | Operator::I64x2Shl
2069 | Operator::I64x2ShrS
2070 | Operator::I64x2ShrU
2071 | Operator::I64x2Add
2072 | Operator::I64x2Sub
2073 | Operator::F64x2ConvertI64x2S
2074 | Operator::F64x2ConvertI64x2U => I64X2,
2075
2076 Operator::F32x4Splat
2077 | Operator::F32x4ExtractLane { .. }
2078 | Operator::F32x4ReplaceLane { .. }
2079 | Operator::F32x4Eq
2080 | Operator::F32x4Ne
2081 | Operator::F32x4Lt
2082 | Operator::F32x4Gt
2083 | Operator::F32x4Le
2084 | Operator::F32x4Ge
2085 | Operator::F32x4Abs
2086 | Operator::F32x4Neg
2087 | Operator::F32x4Sqrt
2088 | Operator::F32x4Add
2089 | Operator::F32x4Sub
2090 | Operator::F32x4Mul
2091 | Operator::F32x4Div
2092 | Operator::F32x4Min
2093 | Operator::F32x4Max
2094 | Operator::I32x4TruncSatF32x4S
2095 | Operator::I32x4TruncSatF32x4U => F32X4,
2096
2097 Operator::F64x2Splat
2098 | Operator::F64x2ExtractLane { .. }
2099 | Operator::F64x2ReplaceLane { .. }
2100 | Operator::F64x2Eq
2101 | Operator::F64x2Ne
2102 | Operator::F64x2Lt
2103 | Operator::F64x2Gt
2104 | Operator::F64x2Le
2105 | Operator::F64x2Ge
2106 | Operator::F64x2Abs
2107 | Operator::F64x2Neg
2108 | Operator::F64x2Sqrt
2109 | Operator::F64x2Add
2110 | Operator::F64x2Sub
2111 | Operator::F64x2Mul
2112 | Operator::F64x2Div
2113 | Operator::F64x2Min
2114 | Operator::F64x2Max
2115 | Operator::I64x2TruncSatF64x2S
2116 | Operator::I64x2TruncSatF64x2U => F64X2,
2117
2118 _ => unimplemented!(
2119 "Currently only SIMD instructions are mapped to their return type; the \
2120 following instruction is not mapped: {:?}",
2121 operator
2122 ),
2123 }
2124 }
2125
2126 /// Some SIMD operations only operate on I8X16 in CLIF; this will convert them to that type by
2127 /// adding a raw_bitcast if necessary.
optionally_bitcast_vector( value: Value, needed_type: Type, builder: &mut FunctionBuilder, ) -> Value2128 pub fn optionally_bitcast_vector(
2129 value: Value,
2130 needed_type: Type,
2131 builder: &mut FunctionBuilder,
2132 ) -> Value {
2133 if builder.func.dfg.value_type(value) != needed_type {
2134 builder.ins().raw_bitcast(needed_type, value)
2135 } else {
2136 value
2137 }
2138 }
2139
2140 /// A helper for popping and bitcasting a single value; since SIMD values can lose their type by
2141 /// using v128 (i.e. CLIF's I8x16) we must re-type the values using a bitcast to avoid CLIF
2142 /// typing issues.
pop1_with_bitcast( state: &mut FuncTranslationState, needed_type: Type, builder: &mut FunctionBuilder, ) -> Value2143 fn pop1_with_bitcast(
2144 state: &mut FuncTranslationState,
2145 needed_type: Type,
2146 builder: &mut FunctionBuilder,
2147 ) -> Value {
2148 optionally_bitcast_vector(state.pop1(), needed_type, builder)
2149 }
2150
2151 /// A helper for popping and bitcasting two values; since SIMD values can lose their type by
2152 /// using v128 (i.e. CLIF's I8x16) we must re-type the values using a bitcast to avoid CLIF
2153 /// typing issues.
pop2_with_bitcast( state: &mut FuncTranslationState, needed_type: Type, builder: &mut FunctionBuilder, ) -> (Value, Value)2154 fn pop2_with_bitcast(
2155 state: &mut FuncTranslationState,
2156 needed_type: Type,
2157 builder: &mut FunctionBuilder,
2158 ) -> (Value, Value) {
2159 let (a, b) = state.pop2();
2160 let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
2161 let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
2162 (bitcast_a, bitcast_b)
2163 }
2164
2165 /// A helper for bitcasting a sequence of values (e.g. function arguments). If a value is a
2166 /// vector type that does not match its expected type, this will modify the value in place to point
2167 /// to the result of a `raw_bitcast`. This conversion is necessary to translate Wasm code that
2168 /// uses `V128` as function parameters (or implicitly in block parameters) and still use specific
2169 /// CLIF types (e.g. `I32X4`) in the function body.
bitcast_arguments( arguments: &mut [Value], expected_types: &[Type], builder: &mut FunctionBuilder, )2170 pub fn bitcast_arguments(
2171 arguments: &mut [Value],
2172 expected_types: &[Type],
2173 builder: &mut FunctionBuilder,
2174 ) {
2175 assert_eq!(arguments.len(), expected_types.len());
2176 for (i, t) in expected_types.iter().enumerate() {
2177 if t.is_vector() {
2178 assert!(
2179 builder.func.dfg.value_type(arguments[i]).is_vector(),
2180 "unexpected type mismatch: expected {}, argument {} was actually of type {}",
2181 t,
2182 arguments[i],
2183 builder.func.dfg.value_type(arguments[i])
2184 );
2185 arguments[i] = optionally_bitcast_vector(arguments[i], *t, builder)
2186 }
2187 }
2188 }
2189
2190 /// A helper to extract all the `Type` listings of each variable in `params`
2191 /// for only parameters the return true for `is_wasm`, typically paired with
2192 /// `is_wasm_return` or `is_wasm_parameter`.
wasm_param_types(params: &[ir::AbiParam], is_wasm: impl Fn(usize) -> bool) -> Vec<Type>2193 pub fn wasm_param_types(params: &[ir::AbiParam], is_wasm: impl Fn(usize) -> bool) -> Vec<Type> {
2194 let mut ret = Vec::with_capacity(params.len());
2195 for (i, param) in params.iter().enumerate() {
2196 if is_wasm(i) {
2197 ret.push(param.value_type);
2198 }
2199 }
2200 ret
2201 }
2202