1 //! This module contains the bulk of the interesting code performing the translation between
2 //! WebAssembly and Cranelift IR.
3 //!
4 //! The translation is done in one pass, opcode by opcode. Two main data structures are used during
5 //! code translations: the value stack and the control stack. The value stack mimics the execution
6 //! of the WebAssembly stack machine: each instruction result is pushed onto the stack and
7 //! instruction arguments are popped off the stack. Similarly, when encountering a control flow
8 //! block, it is pushed onto the control stack and popped off when encountering the corresponding
9 //! `End`.
10 //!
11 //! Another data structure, the translation state, records information concerning unreachable code
12 //! status and about if inserting a return at the end of the function is necessary.
13 //!
14 //! Some of the WebAssembly instructions need information about the environment for which they
15 //! are being translated:
16 //!
17 //! - the loads and stores need the memory base address;
18 //! - the `get_global` and `set_global` instructions depend on how the globals are implemented;
19 //! - `memory.size` and `memory.grow` are runtime functions;
20 //! - `call_indirect` has to translate the function index into the address of where this
21 //! is;
22 //!
23 //! That is why `translate_function_body` takes an object having the `WasmRuntime` trait as
24 //! argument.
25 use super::{hash_map, HashMap};
26 use crate::environ::{FuncEnvironment, GlobalVariable, ReturnMode, WasmResult};
27 use crate::state::{ControlStackFrame, ElseData, FuncTranslationState, ModuleTranslationState};
28 use crate::translation_utils::{
29 block_with_params, blocktype_params_results, f32_translation, f64_translation,
30 };
31 use crate::translation_utils::{FuncIndex, GlobalIndex, MemoryIndex, SignatureIndex, TableIndex};
32 use crate::wasm_unsupported;
33 use core::{i32, u32};
34 use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
35 use cranelift_codegen::ir::immediates::Offset32;
36 use cranelift_codegen::ir::types::*;
37 use cranelift_codegen::ir::{
38 self, ConstantData, InstBuilder, JumpTableData, MemFlags, Value, ValueLabel,
39 };
40 use cranelift_codegen::packed_option::ReservedValue;
41 use cranelift_frontend::{FunctionBuilder, Variable};
42 use std::cmp;
43 use std::convert::TryFrom;
44 use std::vec::Vec;
45 use wasmparser::{MemoryImmediate, Operator};
46
47 // Clippy warns about "flags: _" but its important to document that the flags field is ignored
48 #[cfg_attr(
49 feature = "cargo-clippy",
50 allow(clippy::unneeded_field_pattern, clippy::cognitive_complexity)
51 )]
52 /// Translates wasm operators into Cranelift IR instructions. Returns `true` if it inserted
53 /// a return.
translate_operator<FE: FuncEnvironment + ?Sized>( module_translation_state: &ModuleTranslationState, op: &Operator, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>54 pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
55 module_translation_state: &ModuleTranslationState,
56 op: &Operator,
57 builder: &mut FunctionBuilder,
58 state: &mut FuncTranslationState,
59 environ: &mut FE,
60 ) -> WasmResult<()> {
61 if !state.reachable {
62 translate_unreachable_operator(module_translation_state, &op, builder, state, environ)?;
63 return Ok(());
64 }
65
66 // This big match treats all Wasm code operators.
67 match op {
68 /********************************** Locals ****************************************
69 * `get_local` and `set_local` are treated as non-SSA variables and will completely
70 * disappear in the Cranelift Code
71 ***********************************************************************************/
72 Operator::LocalGet { local_index } => {
73 let val = builder.use_var(Variable::with_u32(*local_index));
74 state.push1(val);
75 let label = ValueLabel::from_u32(*local_index);
76 builder.set_val_label(val, label);
77 }
78 Operator::LocalSet { local_index } => {
79 let mut val = state.pop1();
80
81 // Ensure SIMD values are cast to their default Cranelift type, I8x16.
82 let ty = builder.func.dfg.value_type(val);
83 if ty.is_vector() {
84 val = optionally_bitcast_vector(val, I8X16, builder);
85 }
86
87 builder.def_var(Variable::with_u32(*local_index), val);
88 let label = ValueLabel::from_u32(*local_index);
89 builder.set_val_label(val, label);
90 }
91 Operator::LocalTee { local_index } => {
92 let mut val = state.peek1();
93
94 // Ensure SIMD values are cast to their default Cranelift type, I8x16.
95 let ty = builder.func.dfg.value_type(val);
96 if ty.is_vector() {
97 val = optionally_bitcast_vector(val, I8X16, builder);
98 }
99
100 builder.def_var(Variable::with_u32(*local_index), val);
101 let label = ValueLabel::from_u32(*local_index);
102 builder.set_val_label(val, label);
103 }
104 /********************************** Globals ****************************************
105 * `get_global` and `set_global` are handled by the environment.
106 ***********************************************************************************/
107 Operator::GlobalGet { global_index } => {
108 let val = match state.get_global(builder.func, *global_index, environ)? {
109 GlobalVariable::Const(val) => val,
110 GlobalVariable::Memory { gv, offset, ty } => {
111 let addr = builder.ins().global_value(environ.pointer_type(), gv);
112 let flags = ir::MemFlags::trusted();
113 builder.ins().load(ty, flags, addr, offset)
114 }
115 GlobalVariable::Custom => environ.translate_custom_global_get(
116 builder.cursor(),
117 GlobalIndex::from_u32(*global_index),
118 )?,
119 };
120 state.push1(val);
121 }
122 Operator::GlobalSet { global_index } => {
123 match state.get_global(builder.func, *global_index, environ)? {
124 GlobalVariable::Const(_) => panic!("global #{} is a constant", *global_index),
125 GlobalVariable::Memory { gv, offset, ty } => {
126 let addr = builder.ins().global_value(environ.pointer_type(), gv);
127 let flags = ir::MemFlags::trusted();
128 let mut val = state.pop1();
129 // Ensure SIMD values are cast to their default Cranelift type, I8x16.
130 if ty.is_vector() {
131 val = optionally_bitcast_vector(val, I8X16, builder);
132 }
133 debug_assert_eq!(ty, builder.func.dfg.value_type(val));
134 builder.ins().store(flags, val, addr, offset);
135 }
136 GlobalVariable::Custom => {
137 let val = state.pop1();
138 environ.translate_custom_global_set(
139 builder.cursor(),
140 GlobalIndex::from_u32(*global_index),
141 val,
142 )?;
143 }
144 }
145 }
146 /********************************* Stack misc ***************************************
147 * `drop`, `nop`, `unreachable` and `select`.
148 ***********************************************************************************/
149 Operator::Drop => {
150 state.pop1();
151 }
152 Operator::Select => {
153 let (arg1, arg2, cond) = state.pop3();
154 state.push1(builder.ins().select(cond, arg1, arg2));
155 }
156 Operator::TypedSelect { ty: _ } => {
157 // We ignore the explicit type parameter as it is only needed for
158 // validation, which we require to have been performed before
159 // translation.
160 let (arg1, arg2, cond) = state.pop3();
161 state.push1(builder.ins().select(cond, arg1, arg2));
162 }
163 Operator::Nop => {
164 // We do nothing
165 }
166 Operator::Unreachable => {
167 builder.ins().trap(ir::TrapCode::UnreachableCodeReached);
168 state.reachable = false;
169 }
170 /***************************** Control flow blocks **********************************
171 * When starting a control flow block, we create a new `Block` that will hold the code
172 * after the block, and we push a frame on the control stack. Depending on the type
173 * of block, we create a new `Block` for the body of the block with an associated
174 * jump instruction.
175 *
176 * The `End` instruction pops the last control frame from the control stack, seals
177 * the destination block (since `br` instructions targeting it only appear inside the
178 * block and have already been translated) and modify the value stack to use the
179 * possible `Block`'s arguments values.
180 ***********************************************************************************/
181 Operator::Block { ty } => {
182 let (params, results) = blocktype_params_results(module_translation_state, *ty)?;
183 let next = block_with_params(builder, results, environ)?;
184 state.push_block(next, params.len(), results.len());
185 }
186 Operator::Loop { ty } => {
187 let (params, results) = blocktype_params_results(module_translation_state, *ty)?;
188 let loop_body = block_with_params(builder, params, environ)?;
189 let next = block_with_params(builder, results, environ)?;
190 builder.ins().jump(loop_body, state.peekn(params.len()));
191 state.push_loop(loop_body, next, params.len(), results.len());
192
193 // Pop the initial `Block` actuals and replace them with the `Block`'s
194 // params since control flow joins at the top of the loop.
195 state.popn(params.len());
196 state
197 .stack
198 .extend_from_slice(builder.block_params(loop_body));
199
200 builder.switch_to_block(loop_body);
201 environ.translate_loop_header(builder.cursor())?;
202 }
203 Operator::If { ty } => {
204 let val = state.pop1();
205
206 let (params, results) = blocktype_params_results(module_translation_state, *ty)?;
207 let (destination, else_data) = if params == results {
208 // It is possible there is no `else` block, so we will only
209 // allocate a block for it if/when we find the `else`. For now,
210 // we if the condition isn't true, then we jump directly to the
211 // destination block following the whole `if...end`. If we do end
212 // up discovering an `else`, then we will allocate a block for it
213 // and go back and patch the jump.
214 let destination = block_with_params(builder, results, environ)?;
215 let branch_inst = builder
216 .ins()
217 .brz(val, destination, state.peekn(params.len()));
218 (destination, ElseData::NoElse { branch_inst })
219 } else {
220 // The `if` type signature is not valid without an `else` block,
221 // so we eagerly allocate the `else` block here.
222 let destination = block_with_params(builder, results, environ)?;
223 let else_block = block_with_params(builder, params, environ)?;
224 builder
225 .ins()
226 .brz(val, else_block, state.peekn(params.len()));
227 builder.seal_block(else_block);
228 (destination, ElseData::WithElse { else_block })
229 };
230
231 let next_block = builder.create_block();
232 builder.ins().jump(next_block, &[]);
233 builder.seal_block(next_block); // Only predecessor is the current block.
234 builder.switch_to_block(next_block);
235
236 // Here we append an argument to a Block targeted by an argumentless jump instruction
237 // But in fact there are two cases:
238 // - either the If does not have a Else clause, in that case ty = EmptyBlock
239 // and we add nothing;
240 // - either the If have an Else clause, in that case the destination of this jump
241 // instruction will be changed later when we translate the Else operator.
242 state.push_if(destination, else_data, params.len(), results.len(), *ty);
243 }
244 Operator::Else => {
245 let i = state.control_stack.len() - 1;
246 match state.control_stack[i] {
247 ControlStackFrame::If {
248 ref else_data,
249 head_is_reachable,
250 ref mut consequent_ends_reachable,
251 num_return_values,
252 blocktype,
253 destination,
254 ..
255 } => {
256 // We finished the consequent, so record its final
257 // reachability state.
258 debug_assert!(consequent_ends_reachable.is_none());
259 *consequent_ends_reachable = Some(state.reachable);
260
261 if head_is_reachable {
262 // We have a branch from the head of the `if` to the `else`.
263 state.reachable = true;
264
265 // Ensure we have a block for the `else` block (it may have
266 // already been pre-allocated, see `ElseData` for details).
267 let else_block = match *else_data {
268 ElseData::NoElse { branch_inst } => {
269 let (params, _results) =
270 blocktype_params_results(module_translation_state, blocktype)?;
271 debug_assert_eq!(params.len(), num_return_values);
272 let else_block = block_with_params(builder, params, environ)?;
273 builder.ins().jump(destination, state.peekn(params.len()));
274 state.popn(params.len());
275
276 builder.change_jump_destination(branch_inst, else_block);
277 builder.seal_block(else_block);
278 else_block
279 }
280 ElseData::WithElse { else_block } => {
281 builder
282 .ins()
283 .jump(destination, state.peekn(num_return_values));
284 state.popn(num_return_values);
285 else_block
286 }
287 };
288
289 // You might be expecting that we push the parameters for this
290 // `else` block here, something like this:
291 //
292 // state.pushn(&control_stack_frame.params);
293 //
294 // We don't do that because they are already on the top of the stack
295 // for us: we pushed the parameters twice when we saw the initial
296 // `if` so that we wouldn't have to save the parameters in the
297 // `ControlStackFrame` as another `Vec` allocation.
298
299 builder.switch_to_block(else_block);
300
301 // We don't bother updating the control frame's `ElseData`
302 // to `WithElse` because nothing else will read it.
303 }
304 }
305 _ => unreachable!(),
306 }
307 }
308 Operator::End => {
309 let frame = state.control_stack.pop().unwrap();
310 let next_block = frame.following_code();
311
312 if !builder.is_unreachable() || !builder.is_pristine() {
313 let return_count = frame.num_return_values();
314 let return_args = state.peekn_mut(return_count);
315 let next_block_types = builder.func.dfg.block_param_types(next_block);
316 bitcast_arguments(return_args, &next_block_types, builder);
317 builder.ins().jump(frame.following_code(), return_args);
318 // You might expect that if we just finished an `if` block that
319 // didn't have a corresponding `else` block, then we would clean
320 // up our duplicate set of parameters that we pushed earlier
321 // right here. However, we don't have to explicitly do that,
322 // since we truncate the stack back to the original height
323 // below.
324 }
325 builder.switch_to_block(next_block);
326 builder.seal_block(next_block);
327 // If it is a loop we also have to seal the body loop block
328 if let ControlStackFrame::Loop { header, .. } = frame {
329 builder.seal_block(header)
330 }
331 state.stack.truncate(frame.original_stack_size());
332 state
333 .stack
334 .extend_from_slice(builder.block_params(next_block));
335 }
336 /**************************** Branch instructions *********************************
337 * The branch instructions all have as arguments a target nesting level, which
338 * corresponds to how many control stack frames do we have to pop to get the
339 * destination `Block`.
340 *
341 * Once the destination `Block` is found, we sometimes have to declare a certain depth
342 * of the stack unreachable, because some branch instructions are terminator.
343 *
344 * The `br_table` case is much more complicated because Cranelift's `br_table` instruction
345 * does not support jump arguments like all the other branch instructions. That is why, in
346 * the case where we would use jump arguments for every other branch instruction, we
347 * need to split the critical edges leaving the `br_tables` by creating one `Block` per
348 * table destination; the `br_table` will point to these newly created `Blocks` and these
349 * `Block`s contain only a jump instruction pointing to the final destination, this time with
350 * jump arguments.
351 *
352 * This system is also implemented in Cranelift's SSA construction algorithm, because
353 * `use_var` located in a destination `Block` of a `br_table` might trigger the addition
354 * of jump arguments in each predecessor branch instruction, one of which might be a
355 * `br_table`.
356 ***********************************************************************************/
357 Operator::Br { relative_depth } => {
358 let i = state.control_stack.len() - 1 - (*relative_depth as usize);
359 let (return_count, br_destination) = {
360 let frame = &mut state.control_stack[i];
361 // We signal that all the code that follows until the next End is unreachable
362 frame.set_branched_to_exit();
363 let return_count = if frame.is_loop() {
364 0
365 } else {
366 frame.num_return_values()
367 };
368 (return_count, frame.br_destination())
369 };
370
371 // Bitcast any vector arguments to their default type, I8X16, before jumping.
372 let destination_args = state.peekn_mut(return_count);
373 let destination_types = builder.func.dfg.block_param_types(br_destination);
374 bitcast_arguments(
375 destination_args,
376 &destination_types[..return_count],
377 builder,
378 );
379
380 builder.ins().jump(br_destination, destination_args);
381 state.popn(return_count);
382 state.reachable = false;
383 }
384 Operator::BrIf { relative_depth } => translate_br_if(*relative_depth, builder, state),
385 Operator::BrTable { table } => {
386 let (depths, default) = table.read_table()?;
387 let mut min_depth = default;
388 for depth in &*depths {
389 if *depth < min_depth {
390 min_depth = *depth;
391 }
392 }
393 let jump_args_count = {
394 let i = state.control_stack.len() - 1 - (min_depth as usize);
395 let min_depth_frame = &state.control_stack[i];
396 if min_depth_frame.is_loop() {
397 0
398 } else {
399 min_depth_frame.num_return_values()
400 }
401 };
402 let val = state.pop1();
403 let mut data = JumpTableData::with_capacity(depths.len());
404 if jump_args_count == 0 {
405 // No jump arguments
406 for depth in &*depths {
407 let block = {
408 let i = state.control_stack.len() - 1 - (*depth as usize);
409 let frame = &mut state.control_stack[i];
410 frame.set_branched_to_exit();
411 frame.br_destination()
412 };
413 data.push_entry(block);
414 }
415 let jt = builder.create_jump_table(data);
416 let block = {
417 let i = state.control_stack.len() - 1 - (default as usize);
418 let frame = &mut state.control_stack[i];
419 frame.set_branched_to_exit();
420 frame.br_destination()
421 };
422 builder.ins().br_table(val, block, jt);
423 } else {
424 // Here we have jump arguments, but Cranelift's br_table doesn't support them
425 // We then proceed to split the edges going out of the br_table
426 let return_count = jump_args_count;
427 let mut dest_block_sequence = vec![];
428 let mut dest_block_map = HashMap::new();
429 for depth in &*depths {
430 let branch_block = match dest_block_map.entry(*depth as usize) {
431 hash_map::Entry::Occupied(entry) => *entry.get(),
432 hash_map::Entry::Vacant(entry) => {
433 let block = builder.create_block();
434 dest_block_sequence.push((*depth as usize, block));
435 *entry.insert(block)
436 }
437 };
438 data.push_entry(branch_block);
439 }
440 let default_branch_block = match dest_block_map.entry(default as usize) {
441 hash_map::Entry::Occupied(entry) => *entry.get(),
442 hash_map::Entry::Vacant(entry) => {
443 let block = builder.create_block();
444 dest_block_sequence.push((default as usize, block));
445 *entry.insert(block)
446 }
447 };
448 let jt = builder.create_jump_table(data);
449 builder.ins().br_table(val, default_branch_block, jt);
450 for (depth, dest_block) in dest_block_sequence {
451 builder.switch_to_block(dest_block);
452 builder.seal_block(dest_block);
453 let real_dest_block = {
454 let i = state.control_stack.len() - 1 - depth;
455 let frame = &mut state.control_stack[i];
456 frame.set_branched_to_exit();
457 frame.br_destination()
458 };
459
460 // Bitcast any vector arguments to their default type, I8X16, before jumping.
461 let destination_args = state.peekn_mut(return_count);
462 let destination_types = builder.func.dfg.block_param_types(real_dest_block);
463 bitcast_arguments(
464 destination_args,
465 &destination_types[..return_count],
466 builder,
467 );
468
469 builder.ins().jump(real_dest_block, destination_args);
470 }
471 state.popn(return_count);
472 }
473 state.reachable = false;
474 }
475 Operator::Return => {
476 let (return_count, br_destination) = {
477 let frame = &mut state.control_stack[0];
478 frame.set_branched_to_exit();
479 let return_count = frame.num_return_values();
480 (return_count, frame.br_destination())
481 };
482 {
483 let return_args = state.peekn_mut(return_count);
484 let return_types = wasm_param_types(&builder.func.signature.returns, |i| {
485 environ.is_wasm_return(&builder.func.signature, i)
486 });
487 bitcast_arguments(return_args, &return_types, builder);
488 match environ.return_mode() {
489 ReturnMode::NormalReturns => builder.ins().return_(return_args),
490 ReturnMode::FallthroughReturn => {
491 builder.ins().jump(br_destination, return_args)
492 }
493 };
494 }
495 state.popn(return_count);
496 state.reachable = false;
497 }
498 /************************************ Calls ****************************************
499 * The call instructions pop off their arguments from the stack and append their
500 * return values to it. `call_indirect` needs environment support because there is an
501 * argument referring to an index in the external functions table of the module.
502 ************************************************************************************/
503 Operator::Call { function_index } => {
504 let (fref, num_args) = state.get_direct_func(builder.func, *function_index, environ)?;
505
506 // Bitcast any vector arguments to their default type, I8X16, before calling.
507 let callee_signature =
508 &builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature];
509 let args = state.peekn_mut(num_args);
510 let types = wasm_param_types(&callee_signature.params, |i| {
511 environ.is_wasm_parameter(&callee_signature, i)
512 });
513 bitcast_arguments(args, &types, builder);
514
515 let call = environ.translate_call(
516 builder.cursor(),
517 FuncIndex::from_u32(*function_index),
518 fref,
519 args,
520 )?;
521 let inst_results = builder.inst_results(call);
522 debug_assert_eq!(
523 inst_results.len(),
524 builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature]
525 .returns
526 .len(),
527 "translate_call results should match the call signature"
528 );
529 state.popn(num_args);
530 state.pushn(inst_results);
531 }
532 Operator::CallIndirect { index, table_index } => {
533 // `index` is the index of the function's signature and `table_index` is the index of
534 // the table to search the function in.
535 let (sigref, num_args) = state.get_indirect_sig(builder.func, *index, environ)?;
536 let table = state.get_table(builder.func, *table_index, environ)?;
537 let callee = state.pop1();
538
539 // Bitcast any vector arguments to their default type, I8X16, before calling.
540 let callee_signature = &builder.func.dfg.signatures[sigref];
541 let args = state.peekn_mut(num_args);
542 let types = wasm_param_types(&callee_signature.params, |i| {
543 environ.is_wasm_parameter(&callee_signature, i)
544 });
545 bitcast_arguments(args, &types, builder);
546
547 let call = environ.translate_call_indirect(
548 builder.cursor(),
549 TableIndex::from_u32(*table_index),
550 table,
551 SignatureIndex::from_u32(*index),
552 sigref,
553 callee,
554 state.peekn(num_args),
555 )?;
556 let inst_results = builder.inst_results(call);
557 debug_assert_eq!(
558 inst_results.len(),
559 builder.func.dfg.signatures[sigref].returns.len(),
560 "translate_call_indirect results should match the call signature"
561 );
562 state.popn(num_args);
563 state.pushn(inst_results);
564 }
565 /******************************* Memory management ***********************************
566 * Memory management is handled by environment. It is usually translated into calls to
567 * special functions.
568 ************************************************************************************/
569 Operator::MemoryGrow { reserved } => {
570 // The WebAssembly MVP only supports one linear memory, but we expect the reserved
571 // argument to be a memory index.
572 let heap_index = MemoryIndex::from_u32(*reserved);
573 let heap = state.get_heap(builder.func, *reserved, environ)?;
574 let val = state.pop1();
575 state.push1(environ.translate_memory_grow(builder.cursor(), heap_index, heap, val)?)
576 }
577 Operator::MemorySize { reserved } => {
578 let heap_index = MemoryIndex::from_u32(*reserved);
579 let heap = state.get_heap(builder.func, *reserved, environ)?;
580 state.push1(environ.translate_memory_size(builder.cursor(), heap_index, heap)?);
581 }
582 /******************************* Load instructions ***********************************
583 * Wasm specifies an integer alignment flag but we drop it in Cranelift.
584 * The memory base address is provided by the environment.
585 ************************************************************************************/
586 Operator::I32Load8U {
587 memarg: MemoryImmediate { flags: _, offset },
588 } => {
589 translate_load(*offset, ir::Opcode::Uload8, I32, builder, state, environ)?;
590 }
591 Operator::I32Load16U {
592 memarg: MemoryImmediate { flags: _, offset },
593 } => {
594 translate_load(*offset, ir::Opcode::Uload16, I32, builder, state, environ)?;
595 }
596 Operator::I32Load8S {
597 memarg: MemoryImmediate { flags: _, offset },
598 } => {
599 translate_load(*offset, ir::Opcode::Sload8, I32, builder, state, environ)?;
600 }
601 Operator::I32Load16S {
602 memarg: MemoryImmediate { flags: _, offset },
603 } => {
604 translate_load(*offset, ir::Opcode::Sload16, I32, builder, state, environ)?;
605 }
606 Operator::I64Load8U {
607 memarg: MemoryImmediate { flags: _, offset },
608 } => {
609 translate_load(*offset, ir::Opcode::Uload8, I64, builder, state, environ)?;
610 }
611 Operator::I64Load16U {
612 memarg: MemoryImmediate { flags: _, offset },
613 } => {
614 translate_load(*offset, ir::Opcode::Uload16, I64, builder, state, environ)?;
615 }
616 Operator::I64Load8S {
617 memarg: MemoryImmediate { flags: _, offset },
618 } => {
619 translate_load(*offset, ir::Opcode::Sload8, I64, builder, state, environ)?;
620 }
621 Operator::I64Load16S {
622 memarg: MemoryImmediate { flags: _, offset },
623 } => {
624 translate_load(*offset, ir::Opcode::Sload16, I64, builder, state, environ)?;
625 }
626 Operator::I64Load32S {
627 memarg: MemoryImmediate { flags: _, offset },
628 } => {
629 translate_load(*offset, ir::Opcode::Sload32, I64, builder, state, environ)?;
630 }
631 Operator::I64Load32U {
632 memarg: MemoryImmediate { flags: _, offset },
633 } => {
634 translate_load(*offset, ir::Opcode::Uload32, I64, builder, state, environ)?;
635 }
636 Operator::I32Load {
637 memarg: MemoryImmediate { flags: _, offset },
638 } => {
639 translate_load(*offset, ir::Opcode::Load, I32, builder, state, environ)?;
640 }
641 Operator::F32Load {
642 memarg: MemoryImmediate { flags: _, offset },
643 } => {
644 translate_load(*offset, ir::Opcode::Load, F32, builder, state, environ)?;
645 }
646 Operator::I64Load {
647 memarg: MemoryImmediate { flags: _, offset },
648 } => {
649 translate_load(*offset, ir::Opcode::Load, I64, builder, state, environ)?;
650 }
651 Operator::F64Load {
652 memarg: MemoryImmediate { flags: _, offset },
653 } => {
654 translate_load(*offset, ir::Opcode::Load, F64, builder, state, environ)?;
655 }
656 Operator::V128Load {
657 memarg: MemoryImmediate { flags: _, offset },
658 } => {
659 translate_load(*offset, ir::Opcode::Load, I8X16, builder, state, environ)?;
660 }
661 Operator::I16x8Load8x8S {
662 memarg: MemoryImmediate { flags: _, offset },
663 } => {
664 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
665 let loaded = builder.ins().sload8x8(flags, base, offset);
666 state.push1(loaded);
667 }
668 Operator::I16x8Load8x8U {
669 memarg: MemoryImmediate { flags: _, offset },
670 } => {
671 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
672 let loaded = builder.ins().uload8x8(flags, base, offset);
673 state.push1(loaded);
674 }
675 Operator::I32x4Load16x4S {
676 memarg: MemoryImmediate { flags: _, offset },
677 } => {
678 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
679 let loaded = builder.ins().sload16x4(flags, base, offset);
680 state.push1(loaded);
681 }
682 Operator::I32x4Load16x4U {
683 memarg: MemoryImmediate { flags: _, offset },
684 } => {
685 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
686 let loaded = builder.ins().uload16x4(flags, base, offset);
687 state.push1(loaded);
688 }
689 Operator::I64x2Load32x2S {
690 memarg: MemoryImmediate { flags: _, offset },
691 } => {
692 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
693 let loaded = builder.ins().sload32x2(flags, base, offset);
694 state.push1(loaded);
695 }
696 Operator::I64x2Load32x2U {
697 memarg: MemoryImmediate { flags: _, offset },
698 } => {
699 let (flags, base, offset) = prepare_load(*offset, 8, builder, state, environ)?;
700 let loaded = builder.ins().uload32x2(flags, base, offset);
701 state.push1(loaded);
702 }
703 /****************************** Store instructions ***********************************
704 * Wasm specifies an integer alignment flag but we drop it in Cranelift.
705 * The memory base address is provided by the environment.
706 ************************************************************************************/
707 Operator::I32Store {
708 memarg: MemoryImmediate { flags: _, offset },
709 }
710 | Operator::I64Store {
711 memarg: MemoryImmediate { flags: _, offset },
712 }
713 | Operator::F32Store {
714 memarg: MemoryImmediate { flags: _, offset },
715 }
716 | Operator::F64Store {
717 memarg: MemoryImmediate { flags: _, offset },
718 } => {
719 translate_store(*offset, ir::Opcode::Store, builder, state, environ)?;
720 }
721 Operator::I32Store8 {
722 memarg: MemoryImmediate { flags: _, offset },
723 }
724 | Operator::I64Store8 {
725 memarg: MemoryImmediate { flags: _, offset },
726 } => {
727 translate_store(*offset, ir::Opcode::Istore8, builder, state, environ)?;
728 }
729 Operator::I32Store16 {
730 memarg: MemoryImmediate { flags: _, offset },
731 }
732 | Operator::I64Store16 {
733 memarg: MemoryImmediate { flags: _, offset },
734 } => {
735 translate_store(*offset, ir::Opcode::Istore16, builder, state, environ)?;
736 }
737 Operator::I64Store32 {
738 memarg: MemoryImmediate { flags: _, offset },
739 } => {
740 translate_store(*offset, ir::Opcode::Istore32, builder, state, environ)?;
741 }
742 Operator::V128Store {
743 memarg: MemoryImmediate { flags: _, offset },
744 } => {
745 translate_store(*offset, ir::Opcode::Store, builder, state, environ)?;
746 }
747 /****************************** Nullary Operators ************************************/
748 Operator::I32Const { value } => state.push1(builder.ins().iconst(I32, i64::from(*value))),
749 Operator::I64Const { value } => state.push1(builder.ins().iconst(I64, *value)),
750 Operator::F32Const { value } => {
751 state.push1(builder.ins().f32const(f32_translation(*value)));
752 }
753 Operator::F64Const { value } => {
754 state.push1(builder.ins().f64const(f64_translation(*value)));
755 }
756 /******************************* Unary Operators *************************************/
757 Operator::I32Clz | Operator::I64Clz => {
758 let arg = state.pop1();
759 state.push1(builder.ins().clz(arg));
760 }
761 Operator::I32Ctz | Operator::I64Ctz => {
762 let arg = state.pop1();
763 state.push1(builder.ins().ctz(arg));
764 }
765 Operator::I32Popcnt | Operator::I64Popcnt => {
766 let arg = state.pop1();
767 state.push1(builder.ins().popcnt(arg));
768 }
769 Operator::I64ExtendI32S => {
770 let val = state.pop1();
771 state.push1(builder.ins().sextend(I64, val));
772 }
773 Operator::I64ExtendI32U => {
774 let val = state.pop1();
775 state.push1(builder.ins().uextend(I64, val));
776 }
777 Operator::I32WrapI64 => {
778 let val = state.pop1();
779 state.push1(builder.ins().ireduce(I32, val));
780 }
781 Operator::F32Sqrt | Operator::F64Sqrt => {
782 let arg = state.pop1();
783 state.push1(builder.ins().sqrt(arg));
784 }
785 Operator::F32Ceil | Operator::F64Ceil => {
786 let arg = state.pop1();
787 state.push1(builder.ins().ceil(arg));
788 }
789 Operator::F32Floor | Operator::F64Floor => {
790 let arg = state.pop1();
791 state.push1(builder.ins().floor(arg));
792 }
793 Operator::F32Trunc | Operator::F64Trunc => {
794 let arg = state.pop1();
795 state.push1(builder.ins().trunc(arg));
796 }
797 Operator::F32Nearest | Operator::F64Nearest => {
798 let arg = state.pop1();
799 state.push1(builder.ins().nearest(arg));
800 }
801 Operator::F32Abs | Operator::F64Abs => {
802 let val = state.pop1();
803 state.push1(builder.ins().fabs(val));
804 }
805 Operator::F32Neg | Operator::F64Neg => {
806 let arg = state.pop1();
807 state.push1(builder.ins().fneg(arg));
808 }
809 Operator::F64ConvertI64U | Operator::F64ConvertI32U => {
810 let val = state.pop1();
811 state.push1(builder.ins().fcvt_from_uint(F64, val));
812 }
813 Operator::F64ConvertI64S | Operator::F64ConvertI32S => {
814 let val = state.pop1();
815 state.push1(builder.ins().fcvt_from_sint(F64, val));
816 }
817 Operator::F32ConvertI64S | Operator::F32ConvertI32S => {
818 let val = state.pop1();
819 state.push1(builder.ins().fcvt_from_sint(F32, val));
820 }
821 Operator::F32ConvertI64U | Operator::F32ConvertI32U => {
822 let val = state.pop1();
823 state.push1(builder.ins().fcvt_from_uint(F32, val));
824 }
825 Operator::F64PromoteF32 => {
826 let val = state.pop1();
827 state.push1(builder.ins().fpromote(F64, val));
828 }
829 Operator::F32DemoteF64 => {
830 let val = state.pop1();
831 state.push1(builder.ins().fdemote(F32, val));
832 }
833 Operator::I64TruncF64S | Operator::I64TruncF32S => {
834 let val = state.pop1();
835 state.push1(builder.ins().fcvt_to_sint(I64, val));
836 }
837 Operator::I32TruncF64S | Operator::I32TruncF32S => {
838 let val = state.pop1();
839 state.push1(builder.ins().fcvt_to_sint(I32, val));
840 }
841 Operator::I64TruncF64U | Operator::I64TruncF32U => {
842 let val = state.pop1();
843 state.push1(builder.ins().fcvt_to_uint(I64, val));
844 }
845 Operator::I32TruncF64U | Operator::I32TruncF32U => {
846 let val = state.pop1();
847 state.push1(builder.ins().fcvt_to_uint(I32, val));
848 }
849 Operator::I64TruncSatF64S | Operator::I64TruncSatF32S => {
850 let val = state.pop1();
851 state.push1(builder.ins().fcvt_to_sint_sat(I64, val));
852 }
853 Operator::I32TruncSatF64S | Operator::I32TruncSatF32S => {
854 let val = state.pop1();
855 state.push1(builder.ins().fcvt_to_sint_sat(I32, val));
856 }
857 Operator::I64TruncSatF64U | Operator::I64TruncSatF32U => {
858 let val = state.pop1();
859 state.push1(builder.ins().fcvt_to_uint_sat(I64, val));
860 }
861 Operator::I32TruncSatF64U | Operator::I32TruncSatF32U => {
862 let val = state.pop1();
863 state.push1(builder.ins().fcvt_to_uint_sat(I32, val));
864 }
865 Operator::F32ReinterpretI32 => {
866 let val = state.pop1();
867 state.push1(builder.ins().bitcast(F32, val));
868 }
869 Operator::F64ReinterpretI64 => {
870 let val = state.pop1();
871 state.push1(builder.ins().bitcast(F64, val));
872 }
873 Operator::I32ReinterpretF32 => {
874 let val = state.pop1();
875 state.push1(builder.ins().bitcast(I32, val));
876 }
877 Operator::I64ReinterpretF64 => {
878 let val = state.pop1();
879 state.push1(builder.ins().bitcast(I64, val));
880 }
881 Operator::I32Extend8S => {
882 let val = state.pop1();
883 state.push1(builder.ins().ireduce(I8, val));
884 let val = state.pop1();
885 state.push1(builder.ins().sextend(I32, val));
886 }
887 Operator::I32Extend16S => {
888 let val = state.pop1();
889 state.push1(builder.ins().ireduce(I16, val));
890 let val = state.pop1();
891 state.push1(builder.ins().sextend(I32, val));
892 }
893 Operator::I64Extend8S => {
894 let val = state.pop1();
895 state.push1(builder.ins().ireduce(I8, val));
896 let val = state.pop1();
897 state.push1(builder.ins().sextend(I64, val));
898 }
899 Operator::I64Extend16S => {
900 let val = state.pop1();
901 state.push1(builder.ins().ireduce(I16, val));
902 let val = state.pop1();
903 state.push1(builder.ins().sextend(I64, val));
904 }
905 Operator::I64Extend32S => {
906 let val = state.pop1();
907 state.push1(builder.ins().ireduce(I32, val));
908 let val = state.pop1();
909 state.push1(builder.ins().sextend(I64, val));
910 }
911 /****************************** Binary Operators ************************************/
912 Operator::I32Add | Operator::I64Add => {
913 let (arg1, arg2) = state.pop2();
914 state.push1(builder.ins().iadd(arg1, arg2));
915 }
916 Operator::I32And | Operator::I64And => {
917 let (arg1, arg2) = state.pop2();
918 state.push1(builder.ins().band(arg1, arg2));
919 }
920 Operator::I32Or | Operator::I64Or => {
921 let (arg1, arg2) = state.pop2();
922 state.push1(builder.ins().bor(arg1, arg2));
923 }
924 Operator::I32Xor | Operator::I64Xor => {
925 let (arg1, arg2) = state.pop2();
926 state.push1(builder.ins().bxor(arg1, arg2));
927 }
928 Operator::I32Shl | Operator::I64Shl => {
929 let (arg1, arg2) = state.pop2();
930 state.push1(builder.ins().ishl(arg1, arg2));
931 }
932 Operator::I32ShrS | Operator::I64ShrS => {
933 let (arg1, arg2) = state.pop2();
934 state.push1(builder.ins().sshr(arg1, arg2));
935 }
936 Operator::I32ShrU | Operator::I64ShrU => {
937 let (arg1, arg2) = state.pop2();
938 state.push1(builder.ins().ushr(arg1, arg2));
939 }
940 Operator::I32Rotl | Operator::I64Rotl => {
941 let (arg1, arg2) = state.pop2();
942 state.push1(builder.ins().rotl(arg1, arg2));
943 }
944 Operator::I32Rotr | Operator::I64Rotr => {
945 let (arg1, arg2) = state.pop2();
946 state.push1(builder.ins().rotr(arg1, arg2));
947 }
948 Operator::F32Add | Operator::F64Add => {
949 let (arg1, arg2) = state.pop2();
950 state.push1(builder.ins().fadd(arg1, arg2));
951 }
952 Operator::I32Sub | Operator::I64Sub => {
953 let (arg1, arg2) = state.pop2();
954 state.push1(builder.ins().isub(arg1, arg2));
955 }
956 Operator::F32Sub | Operator::F64Sub => {
957 let (arg1, arg2) = state.pop2();
958 state.push1(builder.ins().fsub(arg1, arg2));
959 }
960 Operator::I32Mul | Operator::I64Mul => {
961 let (arg1, arg2) = state.pop2();
962 state.push1(builder.ins().imul(arg1, arg2));
963 }
964 Operator::F32Mul | Operator::F64Mul => {
965 let (arg1, arg2) = state.pop2();
966 state.push1(builder.ins().fmul(arg1, arg2));
967 }
968 Operator::F32Div | Operator::F64Div => {
969 let (arg1, arg2) = state.pop2();
970 state.push1(builder.ins().fdiv(arg1, arg2));
971 }
972 Operator::I32DivS | Operator::I64DivS => {
973 let (arg1, arg2) = state.pop2();
974 state.push1(builder.ins().sdiv(arg1, arg2));
975 }
976 Operator::I32DivU | Operator::I64DivU => {
977 let (arg1, arg2) = state.pop2();
978 state.push1(builder.ins().udiv(arg1, arg2));
979 }
980 Operator::I32RemS | Operator::I64RemS => {
981 let (arg1, arg2) = state.pop2();
982 state.push1(builder.ins().srem(arg1, arg2));
983 }
984 Operator::I32RemU | Operator::I64RemU => {
985 let (arg1, arg2) = state.pop2();
986 state.push1(builder.ins().urem(arg1, arg2));
987 }
988 Operator::F32Min | Operator::F64Min => {
989 let (arg1, arg2) = state.pop2();
990 state.push1(builder.ins().fmin(arg1, arg2));
991 }
992 Operator::F32Max | Operator::F64Max => {
993 let (arg1, arg2) = state.pop2();
994 state.push1(builder.ins().fmax(arg1, arg2));
995 }
996 Operator::F32Copysign | Operator::F64Copysign => {
997 let (arg1, arg2) = state.pop2();
998 state.push1(builder.ins().fcopysign(arg1, arg2));
999 }
1000 /**************************** Comparison Operators **********************************/
1001 Operator::I32LtS | Operator::I64LtS => {
1002 translate_icmp(IntCC::SignedLessThan, builder, state)
1003 }
1004 Operator::I32LtU | Operator::I64LtU => {
1005 translate_icmp(IntCC::UnsignedLessThan, builder, state)
1006 }
1007 Operator::I32LeS | Operator::I64LeS => {
1008 translate_icmp(IntCC::SignedLessThanOrEqual, builder, state)
1009 }
1010 Operator::I32LeU | Operator::I64LeU => {
1011 translate_icmp(IntCC::UnsignedLessThanOrEqual, builder, state)
1012 }
1013 Operator::I32GtS | Operator::I64GtS => {
1014 translate_icmp(IntCC::SignedGreaterThan, builder, state)
1015 }
1016 Operator::I32GtU | Operator::I64GtU => {
1017 translate_icmp(IntCC::UnsignedGreaterThan, builder, state)
1018 }
1019 Operator::I32GeS | Operator::I64GeS => {
1020 translate_icmp(IntCC::SignedGreaterThanOrEqual, builder, state)
1021 }
1022 Operator::I32GeU | Operator::I64GeU => {
1023 translate_icmp(IntCC::UnsignedGreaterThanOrEqual, builder, state)
1024 }
1025 Operator::I32Eqz | Operator::I64Eqz => {
1026 let arg = state.pop1();
1027 let val = builder.ins().icmp_imm(IntCC::Equal, arg, 0);
1028 state.push1(builder.ins().bint(I32, val));
1029 }
1030 Operator::I32Eq | Operator::I64Eq => translate_icmp(IntCC::Equal, builder, state),
1031 Operator::F32Eq | Operator::F64Eq => translate_fcmp(FloatCC::Equal, builder, state),
1032 Operator::I32Ne | Operator::I64Ne => translate_icmp(IntCC::NotEqual, builder, state),
1033 Operator::F32Ne | Operator::F64Ne => translate_fcmp(FloatCC::NotEqual, builder, state),
1034 Operator::F32Gt | Operator::F64Gt => translate_fcmp(FloatCC::GreaterThan, builder, state),
1035 Operator::F32Ge | Operator::F64Ge => {
1036 translate_fcmp(FloatCC::GreaterThanOrEqual, builder, state)
1037 }
1038 Operator::F32Lt | Operator::F64Lt => translate_fcmp(FloatCC::LessThan, builder, state),
1039 Operator::F32Le | Operator::F64Le => {
1040 translate_fcmp(FloatCC::LessThanOrEqual, builder, state)
1041 }
1042 Operator::RefNull { ty: _ } => state.push1(builder.ins().null(environ.reference_type())),
1043 Operator::RefIsNull { ty: _ } => {
1044 let arg = state.pop1();
1045 let val = builder.ins().is_null(arg);
1046 let val_int = builder.ins().bint(I32, val);
1047 state.push1(val_int);
1048 }
1049 Operator::RefFunc { function_index } => {
1050 state.push1(environ.translate_ref_func(builder.cursor(), *function_index)?);
1051 }
1052 Operator::AtomicNotify { .. }
1053 | Operator::I32AtomicWait { .. }
1054 | Operator::I64AtomicWait { .. }
1055 | Operator::I32AtomicLoad { .. }
1056 | Operator::I64AtomicLoad { .. }
1057 | Operator::I32AtomicLoad8U { .. }
1058 | Operator::I32AtomicLoad16U { .. }
1059 | Operator::I64AtomicLoad8U { .. }
1060 | Operator::I64AtomicLoad16U { .. }
1061 | Operator::I64AtomicLoad32U { .. }
1062 | Operator::I32AtomicStore { .. }
1063 | Operator::I64AtomicStore { .. }
1064 | Operator::I32AtomicStore8 { .. }
1065 | Operator::I32AtomicStore16 { .. }
1066 | Operator::I64AtomicStore8 { .. }
1067 | Operator::I64AtomicStore16 { .. }
1068 | Operator::I64AtomicStore32 { .. }
1069 | Operator::I32AtomicRmwAdd { .. }
1070 | Operator::I64AtomicRmwAdd { .. }
1071 | Operator::I32AtomicRmw8AddU { .. }
1072 | Operator::I32AtomicRmw16AddU { .. }
1073 | Operator::I64AtomicRmw8AddU { .. }
1074 | Operator::I64AtomicRmw16AddU { .. }
1075 | Operator::I64AtomicRmw32AddU { .. }
1076 | Operator::I32AtomicRmwSub { .. }
1077 | Operator::I64AtomicRmwSub { .. }
1078 | Operator::I32AtomicRmw8SubU { .. }
1079 | Operator::I32AtomicRmw16SubU { .. }
1080 | Operator::I64AtomicRmw8SubU { .. }
1081 | Operator::I64AtomicRmw16SubU { .. }
1082 | Operator::I64AtomicRmw32SubU { .. }
1083 | Operator::I32AtomicRmwAnd { .. }
1084 | Operator::I64AtomicRmwAnd { .. }
1085 | Operator::I32AtomicRmw8AndU { .. }
1086 | Operator::I32AtomicRmw16AndU { .. }
1087 | Operator::I64AtomicRmw8AndU { .. }
1088 | Operator::I64AtomicRmw16AndU { .. }
1089 | Operator::I64AtomicRmw32AndU { .. }
1090 | Operator::I32AtomicRmwOr { .. }
1091 | Operator::I64AtomicRmwOr { .. }
1092 | Operator::I32AtomicRmw8OrU { .. }
1093 | Operator::I32AtomicRmw16OrU { .. }
1094 | Operator::I64AtomicRmw8OrU { .. }
1095 | Operator::I64AtomicRmw16OrU { .. }
1096 | Operator::I64AtomicRmw32OrU { .. }
1097 | Operator::I32AtomicRmwXor { .. }
1098 | Operator::I64AtomicRmwXor { .. }
1099 | Operator::I32AtomicRmw8XorU { .. }
1100 | Operator::I32AtomicRmw16XorU { .. }
1101 | Operator::I64AtomicRmw8XorU { .. }
1102 | Operator::I64AtomicRmw16XorU { .. }
1103 | Operator::I64AtomicRmw32XorU { .. }
1104 | Operator::I32AtomicRmwXchg { .. }
1105 | Operator::I64AtomicRmwXchg { .. }
1106 | Operator::I32AtomicRmw8XchgU { .. }
1107 | Operator::I32AtomicRmw16XchgU { .. }
1108 | Operator::I64AtomicRmw8XchgU { .. }
1109 | Operator::I64AtomicRmw16XchgU { .. }
1110 | Operator::I64AtomicRmw32XchgU { .. }
1111 | Operator::I32AtomicRmwCmpxchg { .. }
1112 | Operator::I64AtomicRmwCmpxchg { .. }
1113 | Operator::I32AtomicRmw8CmpxchgU { .. }
1114 | Operator::I32AtomicRmw16CmpxchgU { .. }
1115 | Operator::I64AtomicRmw8CmpxchgU { .. }
1116 | Operator::I64AtomicRmw16CmpxchgU { .. }
1117 | Operator::I64AtomicRmw32CmpxchgU { .. }
1118 | Operator::AtomicFence { .. } => {
1119 return Err(wasm_unsupported!("proposed thread operator {:?}", op));
1120 }
1121 Operator::MemoryCopy => {
1122 // The WebAssembly MVP only supports one linear memory and
1123 // wasmparser will ensure that the memory indices specified are
1124 // zero.
1125 let heap_index = MemoryIndex::from_u32(0);
1126 let heap = state.get_heap(builder.func, 0, environ)?;
1127 let len = state.pop1();
1128 let src = state.pop1();
1129 let dest = state.pop1();
1130 environ.translate_memory_copy(builder.cursor(), heap_index, heap, dest, src, len)?;
1131 }
1132 Operator::MemoryFill => {
1133 // The WebAssembly MVP only supports one linear memory and
1134 // wasmparser will ensure that the memory index specified is
1135 // zero.
1136 let heap_index = MemoryIndex::from_u32(0);
1137 let heap = state.get_heap(builder.func, 0, environ)?;
1138 let len = state.pop1();
1139 let val = state.pop1();
1140 let dest = state.pop1();
1141 environ.translate_memory_fill(builder.cursor(), heap_index, heap, dest, val, len)?;
1142 }
1143 Operator::MemoryInit { segment } => {
1144 // The WebAssembly MVP only supports one linear memory and
1145 // wasmparser will ensure that the memory index specified is
1146 // zero.
1147 let heap_index = MemoryIndex::from_u32(0);
1148 let heap = state.get_heap(builder.func, 0, environ)?;
1149 let len = state.pop1();
1150 let src = state.pop1();
1151 let dest = state.pop1();
1152 environ.translate_memory_init(
1153 builder.cursor(),
1154 heap_index,
1155 heap,
1156 *segment,
1157 dest,
1158 src,
1159 len,
1160 )?;
1161 }
1162 Operator::DataDrop { segment } => {
1163 environ.translate_data_drop(builder.cursor(), *segment)?;
1164 }
1165 Operator::TableSize { table: index } => {
1166 let table = state.get_table(builder.func, *index, environ)?;
1167 state.push1(environ.translate_table_size(
1168 builder.cursor(),
1169 TableIndex::from_u32(*index),
1170 table,
1171 )?);
1172 }
1173 Operator::TableGrow { table } => {
1174 let table_index = TableIndex::from_u32(*table);
1175 let delta = state.pop1();
1176 let init_value = state.pop1();
1177 state.push1(environ.translate_table_grow(
1178 builder.cursor(),
1179 table_index,
1180 delta,
1181 init_value,
1182 )?);
1183 }
1184 Operator::TableGet { table } => {
1185 let table_index = TableIndex::from_u32(*table);
1186 let index = state.pop1();
1187 state.push1(environ.translate_table_get(builder.cursor(), table_index, index)?);
1188 }
1189 Operator::TableSet { table } => {
1190 let table_index = TableIndex::from_u32(*table);
1191 let value = state.pop1();
1192 let index = state.pop1();
1193 environ.translate_table_set(builder.cursor(), table_index, value, index)?;
1194 }
1195 Operator::TableCopy {
1196 dst_table: dst_table_index,
1197 src_table: src_table_index,
1198 } => {
1199 let dst_table = state.get_table(builder.func, *dst_table_index, environ)?;
1200 let src_table = state.get_table(builder.func, *src_table_index, environ)?;
1201 let len = state.pop1();
1202 let src = state.pop1();
1203 let dest = state.pop1();
1204 environ.translate_table_copy(
1205 builder.cursor(),
1206 TableIndex::from_u32(*dst_table_index),
1207 dst_table,
1208 TableIndex::from_u32(*src_table_index),
1209 src_table,
1210 dest,
1211 src,
1212 len,
1213 )?;
1214 }
1215 Operator::TableFill { table } => {
1216 let table_index = TableIndex::from_u32(*table);
1217 let len = state.pop1();
1218 let val = state.pop1();
1219 let dest = state.pop1();
1220 environ.translate_table_fill(builder.cursor(), table_index, dest, val, len)?;
1221 }
1222 Operator::TableInit {
1223 segment,
1224 table: table_index,
1225 } => {
1226 let table = state.get_table(builder.func, *table_index, environ)?;
1227 let len = state.pop1();
1228 let src = state.pop1();
1229 let dest = state.pop1();
1230 environ.translate_table_init(
1231 builder.cursor(),
1232 *segment,
1233 TableIndex::from_u32(*table_index),
1234 table,
1235 dest,
1236 src,
1237 len,
1238 )?;
1239 }
1240 Operator::ElemDrop { segment } => {
1241 environ.translate_elem_drop(builder.cursor(), *segment)?;
1242 }
1243 Operator::V128Const { value } => {
1244 let data = value.bytes().to_vec().into();
1245 let handle = builder.func.dfg.constants.insert(data);
1246 let value = builder.ins().vconst(I8X16, handle);
1247 // the v128.const is typed in CLIF as a I8x16 but raw_bitcast to a different type before use
1248 state.push1(value)
1249 }
1250 Operator::I8x16Splat | Operator::I16x8Splat => {
1251 let reduced = builder.ins().ireduce(type_of(op).lane_type(), state.pop1());
1252 let splatted = builder.ins().splat(type_of(op), reduced);
1253 state.push1(splatted)
1254 }
1255 Operator::I32x4Splat
1256 | Operator::I64x2Splat
1257 | Operator::F32x4Splat
1258 | Operator::F64x2Splat => {
1259 let splatted = builder.ins().splat(type_of(op), state.pop1());
1260 state.push1(splatted)
1261 }
1262 Operator::V8x16LoadSplat {
1263 memarg: MemoryImmediate { flags: _, offset },
1264 }
1265 | Operator::V16x8LoadSplat {
1266 memarg: MemoryImmediate { flags: _, offset },
1267 }
1268 | Operator::V32x4LoadSplat {
1269 memarg: MemoryImmediate { flags: _, offset },
1270 }
1271 | Operator::V64x2LoadSplat {
1272 memarg: MemoryImmediate { flags: _, offset },
1273 } => {
1274 // TODO: For spec compliance, this is initially implemented as a combination of `load +
1275 // splat` but could be implemented eventually as a single instruction (`load_splat`).
1276 // See https://github.com/bytecodealliance/wasmtime/issues/1175.
1277 translate_load(
1278 *offset,
1279 ir::Opcode::Load,
1280 type_of(op).lane_type(),
1281 builder,
1282 state,
1283 environ,
1284 )?;
1285 let splatted = builder.ins().splat(type_of(op), state.pop1());
1286 state.push1(splatted)
1287 }
1288 Operator::I8x16ExtractLaneS { lane } | Operator::I16x8ExtractLaneS { lane } => {
1289 let vector = pop1_with_bitcast(state, type_of(op), builder);
1290 let extracted = builder.ins().extractlane(vector, lane.clone());
1291 state.push1(builder.ins().sextend(I32, extracted))
1292 }
1293 Operator::I8x16ExtractLaneU { lane } | Operator::I16x8ExtractLaneU { lane } => {
1294 let vector = pop1_with_bitcast(state, type_of(op), builder);
1295 let extracted = builder.ins().extractlane(vector, lane.clone());
1296 state.push1(builder.ins().uextend(I32, extracted));
1297 // On x86, PEXTRB zeroes the upper bits of the destination register of extractlane so
1298 // uextend could be elided; for now, uextend is needed for Cranelift's type checks to
1299 // work.
1300 }
1301 Operator::I32x4ExtractLane { lane }
1302 | Operator::I64x2ExtractLane { lane }
1303 | Operator::F32x4ExtractLane { lane }
1304 | Operator::F64x2ExtractLane { lane } => {
1305 let vector = pop1_with_bitcast(state, type_of(op), builder);
1306 state.push1(builder.ins().extractlane(vector, lane.clone()))
1307 }
1308 Operator::I8x16ReplaceLane { lane } | Operator::I16x8ReplaceLane { lane } => {
1309 let (vector, replacement) = state.pop2();
1310 let ty = type_of(op);
1311 let reduced = builder.ins().ireduce(ty.lane_type(), replacement);
1312 let vector = optionally_bitcast_vector(vector, ty, builder);
1313 state.push1(builder.ins().insertlane(vector, reduced, *lane))
1314 }
1315 Operator::I32x4ReplaceLane { lane }
1316 | Operator::I64x2ReplaceLane { lane }
1317 | Operator::F32x4ReplaceLane { lane }
1318 | Operator::F64x2ReplaceLane { lane } => {
1319 let (vector, replacement) = state.pop2();
1320 let vector = optionally_bitcast_vector(vector, type_of(op), builder);
1321 state.push1(builder.ins().insertlane(vector, replacement, *lane))
1322 }
1323 Operator::V8x16Shuffle { lanes, .. } => {
1324 let (a, b) = pop2_with_bitcast(state, I8X16, builder);
1325 let lanes = ConstantData::from(lanes.as_ref());
1326 let mask = builder.func.dfg.immediates.push(lanes);
1327 let shuffled = builder.ins().shuffle(a, b, mask);
1328 state.push1(shuffled)
1329 // At this point the original types of a and b are lost; users of this value (i.e. this
1330 // WASM-to-CLIF translator) may need to raw_bitcast for type-correctness. This is due
1331 // to WASM using the less specific v128 type for certain operations and more specific
1332 // types (e.g. i8x16) for others.
1333 }
1334 Operator::V8x16Swizzle => {
1335 let (a, b) = pop2_with_bitcast(state, I8X16, builder);
1336 state.push1(builder.ins().swizzle(I8X16, a, b))
1337 }
1338 Operator::I8x16Add | Operator::I16x8Add | Operator::I32x4Add | Operator::I64x2Add => {
1339 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1340 state.push1(builder.ins().iadd(a, b))
1341 }
1342 Operator::I8x16AddSaturateS | Operator::I16x8AddSaturateS => {
1343 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1344 state.push1(builder.ins().sadd_sat(a, b))
1345 }
1346 Operator::I8x16AddSaturateU | Operator::I16x8AddSaturateU => {
1347 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1348 state.push1(builder.ins().uadd_sat(a, b))
1349 }
1350 Operator::I8x16Sub | Operator::I16x8Sub | Operator::I32x4Sub | Operator::I64x2Sub => {
1351 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1352 state.push1(builder.ins().isub(a, b))
1353 }
1354 Operator::I8x16SubSaturateS | Operator::I16x8SubSaturateS => {
1355 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1356 state.push1(builder.ins().ssub_sat(a, b))
1357 }
1358 Operator::I8x16SubSaturateU | Operator::I16x8SubSaturateU => {
1359 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1360 state.push1(builder.ins().usub_sat(a, b))
1361 }
1362 Operator::I8x16MinS | Operator::I16x8MinS | Operator::I32x4MinS => {
1363 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1364 state.push1(builder.ins().imin(a, b))
1365 }
1366 Operator::I8x16MinU | Operator::I16x8MinU | Operator::I32x4MinU => {
1367 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1368 state.push1(builder.ins().umin(a, b))
1369 }
1370 Operator::I8x16MaxS | Operator::I16x8MaxS | Operator::I32x4MaxS => {
1371 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1372 state.push1(builder.ins().imax(a, b))
1373 }
1374 Operator::I8x16MaxU | Operator::I16x8MaxU | Operator::I32x4MaxU => {
1375 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1376 state.push1(builder.ins().umax(a, b))
1377 }
1378 Operator::I8x16RoundingAverageU | Operator::I16x8RoundingAverageU => {
1379 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1380 state.push1(builder.ins().avg_round(a, b))
1381 }
1382 Operator::I8x16Neg | Operator::I16x8Neg | Operator::I32x4Neg | Operator::I64x2Neg => {
1383 let a = pop1_with_bitcast(state, type_of(op), builder);
1384 state.push1(builder.ins().ineg(a))
1385 }
1386 Operator::I16x8Mul | Operator::I32x4Mul | Operator::I64x2Mul => {
1387 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1388 state.push1(builder.ins().imul(a, b))
1389 }
1390 Operator::V128Or => {
1391 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1392 state.push1(builder.ins().bor(a, b))
1393 }
1394 Operator::V128Xor => {
1395 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1396 state.push1(builder.ins().bxor(a, b))
1397 }
1398 Operator::V128And => {
1399 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1400 state.push1(builder.ins().band(a, b))
1401 }
1402 Operator::V128AndNot => {
1403 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1404 state.push1(builder.ins().band_not(a, b))
1405 }
1406 Operator::V128Not => {
1407 let a = state.pop1();
1408 state.push1(builder.ins().bnot(a));
1409 }
1410 Operator::I8x16Shl | Operator::I16x8Shl | Operator::I32x4Shl | Operator::I64x2Shl => {
1411 let (a, b) = state.pop2();
1412 let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
1413 let bitwidth = i64::from(type_of(op).lane_bits());
1414 // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width
1415 // we do `b AND 15`; this means fewer instructions than `iconst + urem`.
1416 let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1);
1417 state.push1(builder.ins().ishl(bitcast_a, b_mod_bitwidth))
1418 }
1419 Operator::I8x16ShrU | Operator::I16x8ShrU | Operator::I32x4ShrU | Operator::I64x2ShrU => {
1420 let (a, b) = state.pop2();
1421 let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
1422 let bitwidth = i64::from(type_of(op).lane_bits());
1423 // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width
1424 // we do `b AND 15`; this means fewer instructions than `iconst + urem`.
1425 let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1);
1426 state.push1(builder.ins().ushr(bitcast_a, b_mod_bitwidth))
1427 }
1428 Operator::I8x16ShrS | Operator::I16x8ShrS | Operator::I32x4ShrS | Operator::I64x2ShrS => {
1429 let (a, b) = state.pop2();
1430 let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
1431 let bitwidth = i64::from(type_of(op).lane_bits());
1432 // The spec expects to shift with `b mod lanewidth`; so, e.g., for 16 bit lane-width
1433 // we do `b AND 15`; this means fewer instructions than `iconst + urem`.
1434 let b_mod_bitwidth = builder.ins().band_imm(b, bitwidth - 1);
1435 state.push1(builder.ins().sshr(bitcast_a, b_mod_bitwidth))
1436 }
1437 Operator::V128Bitselect => {
1438 let (a, b, c) = state.pop3();
1439 let bitcast_a = optionally_bitcast_vector(a, I8X16, builder);
1440 let bitcast_b = optionally_bitcast_vector(b, I8X16, builder);
1441 let bitcast_c = optionally_bitcast_vector(c, I8X16, builder);
1442 // The CLIF operand ordering is slightly different and the types of all three
1443 // operands must match (hence the bitcast).
1444 state.push1(builder.ins().bitselect(bitcast_c, bitcast_a, bitcast_b))
1445 }
1446 Operator::I8x16AnyTrue | Operator::I16x8AnyTrue | Operator::I32x4AnyTrue => {
1447 let a = pop1_with_bitcast(state, type_of(op), builder);
1448 let bool_result = builder.ins().vany_true(a);
1449 state.push1(builder.ins().bint(I32, bool_result))
1450 }
1451 Operator::I8x16AllTrue | Operator::I16x8AllTrue | Operator::I32x4AllTrue => {
1452 let a = pop1_with_bitcast(state, type_of(op), builder);
1453 let bool_result = builder.ins().vall_true(a);
1454 state.push1(builder.ins().bint(I32, bool_result))
1455 }
1456 Operator::I8x16Eq | Operator::I16x8Eq | Operator::I32x4Eq => {
1457 translate_vector_icmp(IntCC::Equal, type_of(op), builder, state)
1458 }
1459 Operator::I8x16Ne | Operator::I16x8Ne | Operator::I32x4Ne => {
1460 translate_vector_icmp(IntCC::NotEqual, type_of(op), builder, state)
1461 }
1462 Operator::I8x16GtS | Operator::I16x8GtS | Operator::I32x4GtS => {
1463 translate_vector_icmp(IntCC::SignedGreaterThan, type_of(op), builder, state)
1464 }
1465 Operator::I8x16LtS | Operator::I16x8LtS | Operator::I32x4LtS => {
1466 translate_vector_icmp(IntCC::SignedLessThan, type_of(op), builder, state)
1467 }
1468 Operator::I8x16GtU | Operator::I16x8GtU | Operator::I32x4GtU => {
1469 translate_vector_icmp(IntCC::UnsignedGreaterThan, type_of(op), builder, state)
1470 }
1471 Operator::I8x16LtU | Operator::I16x8LtU | Operator::I32x4LtU => {
1472 translate_vector_icmp(IntCC::UnsignedLessThan, type_of(op), builder, state)
1473 }
1474 Operator::I8x16GeS | Operator::I16x8GeS | Operator::I32x4GeS => {
1475 translate_vector_icmp(IntCC::SignedGreaterThanOrEqual, type_of(op), builder, state)
1476 }
1477 Operator::I8x16LeS | Operator::I16x8LeS | Operator::I32x4LeS => {
1478 translate_vector_icmp(IntCC::SignedLessThanOrEqual, type_of(op), builder, state)
1479 }
1480 Operator::I8x16GeU | Operator::I16x8GeU | Operator::I32x4GeU => translate_vector_icmp(
1481 IntCC::UnsignedGreaterThanOrEqual,
1482 type_of(op),
1483 builder,
1484 state,
1485 ),
1486 Operator::I8x16LeU | Operator::I16x8LeU | Operator::I32x4LeU => {
1487 translate_vector_icmp(IntCC::UnsignedLessThanOrEqual, type_of(op), builder, state)
1488 }
1489 Operator::F32x4Eq | Operator::F64x2Eq => {
1490 translate_vector_fcmp(FloatCC::Equal, type_of(op), builder, state)
1491 }
1492 Operator::F32x4Ne | Operator::F64x2Ne => {
1493 translate_vector_fcmp(FloatCC::NotEqual, type_of(op), builder, state)
1494 }
1495 Operator::F32x4Lt | Operator::F64x2Lt => {
1496 translate_vector_fcmp(FloatCC::LessThan, type_of(op), builder, state)
1497 }
1498 Operator::F32x4Gt | Operator::F64x2Gt => {
1499 translate_vector_fcmp(FloatCC::GreaterThan, type_of(op), builder, state)
1500 }
1501 Operator::F32x4Le | Operator::F64x2Le => {
1502 translate_vector_fcmp(FloatCC::LessThanOrEqual, type_of(op), builder, state)
1503 }
1504 Operator::F32x4Ge | Operator::F64x2Ge => {
1505 translate_vector_fcmp(FloatCC::GreaterThanOrEqual, type_of(op), builder, state)
1506 }
1507 Operator::F32x4Add | Operator::F64x2Add => {
1508 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1509 state.push1(builder.ins().fadd(a, b))
1510 }
1511 Operator::F32x4Sub | Operator::F64x2Sub => {
1512 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1513 state.push1(builder.ins().fsub(a, b))
1514 }
1515 Operator::F32x4Mul | Operator::F64x2Mul => {
1516 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1517 state.push1(builder.ins().fmul(a, b))
1518 }
1519 Operator::F32x4Div | Operator::F64x2Div => {
1520 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1521 state.push1(builder.ins().fdiv(a, b))
1522 }
1523 Operator::F32x4Max | Operator::F64x2Max => {
1524 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1525 state.push1(builder.ins().fmax(a, b))
1526 }
1527 Operator::F32x4Min | Operator::F64x2Min => {
1528 let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
1529 state.push1(builder.ins().fmin(a, b))
1530 }
1531 Operator::F32x4Sqrt | Operator::F64x2Sqrt => {
1532 let a = pop1_with_bitcast(state, type_of(op), builder);
1533 state.push1(builder.ins().sqrt(a))
1534 }
1535 Operator::F32x4Neg | Operator::F64x2Neg => {
1536 let a = pop1_with_bitcast(state, type_of(op), builder);
1537 state.push1(builder.ins().fneg(a))
1538 }
1539 Operator::F32x4Abs | Operator::F64x2Abs => {
1540 let a = pop1_with_bitcast(state, type_of(op), builder);
1541 state.push1(builder.ins().fabs(a))
1542 }
1543 Operator::F32x4ConvertI32x4S => {
1544 let a = pop1_with_bitcast(state, I32X4, builder);
1545 state.push1(builder.ins().fcvt_from_sint(F32X4, a))
1546 }
1547 Operator::I32x4TruncSatF32x4S
1548 | Operator::I32x4TruncSatF32x4U
1549 | Operator::F32x4ConvertI32x4U
1550 | Operator::I8x16Abs
1551 | Operator::I16x8Abs
1552 | Operator::I32x4Abs
1553 | Operator::I8x16NarrowI16x8S { .. }
1554 | Operator::I8x16NarrowI16x8U { .. }
1555 | Operator::I16x8NarrowI32x4S { .. }
1556 | Operator::I16x8NarrowI32x4U { .. }
1557 | Operator::I16x8WidenLowI8x16S { .. }
1558 | Operator::I16x8WidenHighI8x16S { .. }
1559 | Operator::I16x8WidenLowI8x16U { .. }
1560 | Operator::I16x8WidenHighI8x16U { .. }
1561 | Operator::I32x4WidenLowI16x8S { .. }
1562 | Operator::I32x4WidenHighI16x8S { .. }
1563 | Operator::I32x4WidenLowI16x8U { .. }
1564 | Operator::I32x4WidenHighI16x8U { .. } => {
1565 return Err(wasm_unsupported!("proposed SIMD operator {:?}", op));
1566 }
1567
1568 Operator::ReturnCall { .. } | Operator::ReturnCallIndirect { .. } => {
1569 return Err(wasm_unsupported!("proposed tail-call operator {:?}", op));
1570 }
1571 };
1572 Ok(())
1573 }
1574
1575 // Clippy warns us of some fields we are deliberately ignoring
1576 #[cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
1577 /// Deals with a Wasm instruction located in an unreachable portion of the code. Most of them
1578 /// are dropped but special ones like `End` or `Else` signal the potential end of the unreachable
1579 /// portion so the translation state must be updated accordingly.
translate_unreachable_operator<FE: FuncEnvironment + ?Sized>( module_translation_state: &ModuleTranslationState, op: &Operator, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>1580 fn translate_unreachable_operator<FE: FuncEnvironment + ?Sized>(
1581 module_translation_state: &ModuleTranslationState,
1582 op: &Operator,
1583 builder: &mut FunctionBuilder,
1584 state: &mut FuncTranslationState,
1585 environ: &mut FE,
1586 ) -> WasmResult<()> {
1587 debug_assert!(!state.reachable);
1588 match *op {
1589 Operator::If { ty } => {
1590 // Push a placeholder control stack entry. The if isn't reachable,
1591 // so we don't have any branches anywhere.
1592 state.push_if(
1593 ir::Block::reserved_value(),
1594 ElseData::NoElse {
1595 branch_inst: ir::Inst::reserved_value(),
1596 },
1597 0,
1598 0,
1599 ty,
1600 );
1601 }
1602 Operator::Loop { ty: _ } | Operator::Block { ty: _ } => {
1603 state.push_block(ir::Block::reserved_value(), 0, 0);
1604 }
1605 Operator::Else => {
1606 let i = state.control_stack.len() - 1;
1607 match state.control_stack[i] {
1608 ControlStackFrame::If {
1609 ref else_data,
1610 head_is_reachable,
1611 ref mut consequent_ends_reachable,
1612 blocktype,
1613 ..
1614 } => {
1615 debug_assert!(consequent_ends_reachable.is_none());
1616 *consequent_ends_reachable = Some(state.reachable);
1617
1618 if head_is_reachable {
1619 // We have a branch from the head of the `if` to the `else`.
1620 state.reachable = true;
1621
1622 let else_block = match *else_data {
1623 ElseData::NoElse { branch_inst } => {
1624 let (params, _results) =
1625 blocktype_params_results(module_translation_state, blocktype)?;
1626 let else_block = block_with_params(builder, params, environ)?;
1627
1628 // We change the target of the branch instruction.
1629 builder.change_jump_destination(branch_inst, else_block);
1630 builder.seal_block(else_block);
1631 else_block
1632 }
1633 ElseData::WithElse { else_block } => else_block,
1634 };
1635
1636 builder.switch_to_block(else_block);
1637
1638 // Again, no need to push the parameters for the `else`,
1639 // since we already did when we saw the original `if`. See
1640 // the comment for translating `Operator::Else` in
1641 // `translate_operator` for details.
1642 }
1643 }
1644 _ => unreachable!(),
1645 }
1646 }
1647 Operator::End => {
1648 let stack = &mut state.stack;
1649 let control_stack = &mut state.control_stack;
1650 let frame = control_stack.pop().unwrap();
1651
1652 // Now we have to split off the stack the values not used
1653 // by unreachable code that hasn't been translated
1654 stack.truncate(frame.original_stack_size());
1655
1656 let reachable_anyway = match frame {
1657 // If it is a loop we also have to seal the body loop block
1658 ControlStackFrame::Loop { header, .. } => {
1659 builder.seal_block(header);
1660 // And loops can't have branches to the end.
1661 false
1662 }
1663 // If we never set `consequent_ends_reachable` then that means
1664 // we are finishing the consequent now, and there was no
1665 // `else`. Whether the following block is reachable depends only
1666 // on if the head was reachable.
1667 ControlStackFrame::If {
1668 head_is_reachable,
1669 consequent_ends_reachable: None,
1670 ..
1671 } => head_is_reachable,
1672 // Since we are only in this function when in unreachable code,
1673 // we know that the alternative just ended unreachable. Whether
1674 // the following block is reachable depends on if the consequent
1675 // ended reachable or not.
1676 ControlStackFrame::If {
1677 head_is_reachable,
1678 consequent_ends_reachable: Some(consequent_ends_reachable),
1679 ..
1680 } => head_is_reachable && consequent_ends_reachable,
1681 // All other control constructs are already handled.
1682 _ => false,
1683 };
1684
1685 if frame.exit_is_branched_to() || reachable_anyway {
1686 builder.switch_to_block(frame.following_code());
1687 builder.seal_block(frame.following_code());
1688
1689 // And add the return values of the block but only if the next block is reachable
1690 // (which corresponds to testing if the stack depth is 1)
1691 stack.extend_from_slice(builder.block_params(frame.following_code()));
1692 state.reachable = true;
1693 }
1694 }
1695 _ => {
1696 // We don't translate because this is unreachable code
1697 }
1698 }
1699
1700 Ok(())
1701 }
1702
1703 /// Get the address+offset to use for a heap access.
get_heap_addr( heap: ir::Heap, addr32: ir::Value, offset: u32, width: u32, addr_ty: Type, builder: &mut FunctionBuilder, ) -> (ir::Value, i32)1704 fn get_heap_addr(
1705 heap: ir::Heap,
1706 addr32: ir::Value,
1707 offset: u32,
1708 width: u32,
1709 addr_ty: Type,
1710 builder: &mut FunctionBuilder,
1711 ) -> (ir::Value, i32) {
1712 let offset_guard_size: u64 = builder.func.heaps[heap].offset_guard_size.into();
1713
1714 // How exactly the bounds check is performed here and what it's performed
1715 // on is a bit tricky. Generally we want to rely on access violations (e.g.
1716 // segfaults) to generate traps since that means we don't have to bounds
1717 // check anything explicitly.
1718 //
1719 // If we don't have a guard page of unmapped memory, though, then we can't
1720 // rely on this trapping behavior through segfaults. Instead we need to
1721 // bounds-check the entire memory access here which is everything from
1722 // `addr32 + offset` to `addr32 + offset + width` (not inclusive). In this
1723 // scenario our adjusted offset that we're checking is `offset + width`.
1724 //
1725 // If we have a guard page, however, then we can perform a further
1726 // optimization of the generated code by only checking multiples of the
1727 // offset-guard size to be more CSE-friendly. Knowing that we have at least
1728 // 1 page of a guard page we're then able to disregard the `width` since we
1729 // know it's always less than one page. Our bounds check will be for the
1730 // first byte which will either succeed and be guaranteed to fault if it's
1731 // actually out of bounds, or the bounds check itself will fail. In any case
1732 // we assert that the width is reasonably small for now so this assumption
1733 // can be adjusted in the future if we get larger widths.
1734 //
1735 // Put another way we can say, where `y < offset_guard_size`:
1736 //
1737 // n * offset_guard_size + y = offset
1738 //
1739 // We'll then pass `n * offset_guard_size` as the bounds check value. If
1740 // this traps then our `offset` would have trapped anyway. If this check
1741 // passes we know
1742 //
1743 // addr32 + n * offset_guard_size < bound
1744 //
1745 // which means
1746 //
1747 // addr32 + n * offset_guard_size + y < bound + offset_guard_size
1748 //
1749 // because `y < offset_guard_size`, which then means:
1750 //
1751 // addr32 + offset < bound + offset_guard_size
1752 //
1753 // Since we know that that guard size bytes are all unmapped we're
1754 // guaranteed that `offset` and the `width` bytes after it are either
1755 // in-bounds or will hit the guard page, meaning we'll get the desired
1756 // semantics we want.
1757 //
1758 // As one final comment on the bits with the guard size here, another goal
1759 // of this is to hit an optimization in `heap_addr` where if the heap size
1760 // minus the offset is >= 4GB then bounds checks are 100% eliminated. This
1761 // means that with huge guard regions (e.g. our 2GB default) most adjusted
1762 // offsets we're checking here are zero. This means that we'll hit the fast
1763 // path and emit zero conditional traps for bounds checks
1764 let adjusted_offset = if offset_guard_size == 0 {
1765 u64::from(offset) + u64::from(width)
1766 } else {
1767 assert!(width < 1024);
1768 cmp::max(u64::from(offset) / offset_guard_size * offset_guard_size, 1)
1769 };
1770 debug_assert!(adjusted_offset > 0); // want to bounds check at least 1 byte
1771 let check_size = u32::try_from(adjusted_offset).unwrap_or(u32::MAX);
1772 let base = builder.ins().heap_addr(addr_ty, heap, addr32, check_size);
1773
1774 // Native load/store instructions take a signed `Offset32` immediate, so adjust the base
1775 // pointer if necessary.
1776 if offset > i32::MAX as u32 {
1777 // Offset doesn't fit in the load/store instruction.
1778 let adj = builder.ins().iadd_imm(base, i64::from(i32::MAX) + 1);
1779 (adj, (offset - (i32::MAX as u32 + 1)) as i32)
1780 } else {
1781 (base, offset as i32)
1782 }
1783 }
1784
1785 /// Prepare for a load; factors out common functionality between load and load_extend operations.
prepare_load<FE: FuncEnvironment + ?Sized>( offset: u32, loaded_bytes: u32, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<(MemFlags, Value, Offset32)>1786 fn prepare_load<FE: FuncEnvironment + ?Sized>(
1787 offset: u32,
1788 loaded_bytes: u32,
1789 builder: &mut FunctionBuilder,
1790 state: &mut FuncTranslationState,
1791 environ: &mut FE,
1792 ) -> WasmResult<(MemFlags, Value, Offset32)> {
1793 let addr32 = state.pop1();
1794
1795 // We don't yet support multiple linear memories.
1796 let heap = state.get_heap(builder.func, 0, environ)?;
1797 let (base, offset) = get_heap_addr(
1798 heap,
1799 addr32,
1800 offset,
1801 loaded_bytes,
1802 environ.pointer_type(),
1803 builder,
1804 );
1805
1806 // Note that we don't set `is_aligned` here, even if the load instruction's
1807 // alignment immediate says it's aligned, because WebAssembly's immediate
1808 // field is just a hint, while Cranelift's aligned flag needs a guarantee.
1809 let flags = MemFlags::new();
1810
1811 Ok((flags, base, offset.into()))
1812 }
1813
1814 /// Translate a load instruction.
translate_load<FE: FuncEnvironment + ?Sized>( offset: u32, opcode: ir::Opcode, result_ty: Type, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>1815 fn translate_load<FE: FuncEnvironment + ?Sized>(
1816 offset: u32,
1817 opcode: ir::Opcode,
1818 result_ty: Type,
1819 builder: &mut FunctionBuilder,
1820 state: &mut FuncTranslationState,
1821 environ: &mut FE,
1822 ) -> WasmResult<()> {
1823 let (flags, base, offset) = prepare_load(
1824 offset,
1825 mem_op_size(opcode, result_ty),
1826 builder,
1827 state,
1828 environ,
1829 )?;
1830 let (load, dfg) = builder.ins().Load(opcode, result_ty, flags, offset, base);
1831 state.push1(dfg.first_result(load));
1832 Ok(())
1833 }
1834
1835 /// Translate a store instruction.
translate_store<FE: FuncEnvironment + ?Sized>( offset: u32, opcode: ir::Opcode, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, environ: &mut FE, ) -> WasmResult<()>1836 fn translate_store<FE: FuncEnvironment + ?Sized>(
1837 offset: u32,
1838 opcode: ir::Opcode,
1839 builder: &mut FunctionBuilder,
1840 state: &mut FuncTranslationState,
1841 environ: &mut FE,
1842 ) -> WasmResult<()> {
1843 let (addr32, val) = state.pop2();
1844 let val_ty = builder.func.dfg.value_type(val);
1845
1846 // We don't yet support multiple linear memories.
1847 let heap = state.get_heap(builder.func, 0, environ)?;
1848 let (base, offset) = get_heap_addr(
1849 heap,
1850 addr32,
1851 offset,
1852 mem_op_size(opcode, val_ty),
1853 environ.pointer_type(),
1854 builder,
1855 );
1856 // See the comments in `translate_load` about the flags.
1857 let flags = MemFlags::new();
1858 builder
1859 .ins()
1860 .Store(opcode, val_ty, flags, offset.into(), val, base);
1861 Ok(())
1862 }
1863
mem_op_size(opcode: ir::Opcode, ty: Type) -> u321864 fn mem_op_size(opcode: ir::Opcode, ty: Type) -> u32 {
1865 match opcode {
1866 ir::Opcode::Istore8 | ir::Opcode::Sload8 | ir::Opcode::Uload8 => 1,
1867 ir::Opcode::Istore16 | ir::Opcode::Sload16 | ir::Opcode::Uload16 => 2,
1868 ir::Opcode::Istore32 | ir::Opcode::Sload32 | ir::Opcode::Uload32 => 4,
1869 ir::Opcode::Store | ir::Opcode::Load => ty.bytes(),
1870 _ => panic!("unknown size of mem op for {:?}", opcode),
1871 }
1872 }
1873
translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState)1874 fn translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) {
1875 let (arg0, arg1) = state.pop2();
1876 let val = builder.ins().icmp(cc, arg0, arg1);
1877 state.push1(builder.ins().bint(I32, val));
1878 }
1879
translate_vector_icmp( cc: IntCC, needed_type: Type, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, )1880 fn translate_vector_icmp(
1881 cc: IntCC,
1882 needed_type: Type,
1883 builder: &mut FunctionBuilder,
1884 state: &mut FuncTranslationState,
1885 ) {
1886 let (a, b) = state.pop2();
1887 let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
1888 let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
1889 state.push1(builder.ins().icmp(cc, bitcast_a, bitcast_b))
1890 }
1891
translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState)1892 fn translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) {
1893 let (arg0, arg1) = state.pop2();
1894 let val = builder.ins().fcmp(cc, arg0, arg1);
1895 state.push1(builder.ins().bint(I32, val));
1896 }
1897
translate_vector_fcmp( cc: FloatCC, needed_type: Type, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, )1898 fn translate_vector_fcmp(
1899 cc: FloatCC,
1900 needed_type: Type,
1901 builder: &mut FunctionBuilder,
1902 state: &mut FuncTranslationState,
1903 ) {
1904 let (a, b) = state.pop2();
1905 let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
1906 let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
1907 state.push1(builder.ins().fcmp(cc, bitcast_a, bitcast_b))
1908 }
1909
translate_br_if( relative_depth: u32, builder: &mut FunctionBuilder, state: &mut FuncTranslationState, )1910 fn translate_br_if(
1911 relative_depth: u32,
1912 builder: &mut FunctionBuilder,
1913 state: &mut FuncTranslationState,
1914 ) {
1915 let val = state.pop1();
1916 let (br_destination, inputs) = translate_br_if_args(relative_depth, state);
1917
1918 // Bitcast any vector arguments to their default type, I8X16, before jumping.
1919 let destination_types = builder.func.dfg.block_param_types(br_destination);
1920 bitcast_arguments(inputs, &destination_types[..inputs.len()], builder);
1921
1922 builder.ins().brnz(val, br_destination, inputs);
1923
1924 let next_block = builder.create_block();
1925 builder.ins().jump(next_block, &[]);
1926 builder.seal_block(next_block); // The only predecessor is the current block.
1927 builder.switch_to_block(next_block);
1928 }
1929
translate_br_if_args( relative_depth: u32, state: &mut FuncTranslationState, ) -> (ir::Block, &mut [ir::Value])1930 fn translate_br_if_args(
1931 relative_depth: u32,
1932 state: &mut FuncTranslationState,
1933 ) -> (ir::Block, &mut [ir::Value]) {
1934 let i = state.control_stack.len() - 1 - (relative_depth as usize);
1935 let (return_count, br_destination) = {
1936 let frame = &mut state.control_stack[i];
1937 // The values returned by the branch are still available for the reachable
1938 // code that comes after it
1939 frame.set_branched_to_exit();
1940 let return_count = if frame.is_loop() {
1941 frame.num_param_values()
1942 } else {
1943 frame.num_return_values()
1944 };
1945 (return_count, frame.br_destination())
1946 };
1947 let inputs = state.peekn_mut(return_count);
1948 (br_destination, inputs)
1949 }
1950
1951 /// Determine the returned value type of a WebAssembly operator
type_of(operator: &Operator) -> Type1952 fn type_of(operator: &Operator) -> Type {
1953 match operator {
1954 Operator::V128Load { .. }
1955 | Operator::V128Store { .. }
1956 | Operator::V128Const { .. }
1957 | Operator::V128Not
1958 | Operator::V128And
1959 | Operator::V128AndNot
1960 | Operator::V128Or
1961 | Operator::V128Xor
1962 | Operator::V128Bitselect => I8X16, // default type representing V128
1963
1964 Operator::V8x16Shuffle { .. }
1965 | Operator::I8x16Splat
1966 | Operator::V8x16LoadSplat { .. }
1967 | Operator::I8x16ExtractLaneS { .. }
1968 | Operator::I8x16ExtractLaneU { .. }
1969 | Operator::I8x16ReplaceLane { .. }
1970 | Operator::I8x16Eq
1971 | Operator::I8x16Ne
1972 | Operator::I8x16LtS
1973 | Operator::I8x16LtU
1974 | Operator::I8x16GtS
1975 | Operator::I8x16GtU
1976 | Operator::I8x16LeS
1977 | Operator::I8x16LeU
1978 | Operator::I8x16GeS
1979 | Operator::I8x16GeU
1980 | Operator::I8x16Neg
1981 | Operator::I8x16AnyTrue
1982 | Operator::I8x16AllTrue
1983 | Operator::I8x16Shl
1984 | Operator::I8x16ShrS
1985 | Operator::I8x16ShrU
1986 | Operator::I8x16Add
1987 | Operator::I8x16AddSaturateS
1988 | Operator::I8x16AddSaturateU
1989 | Operator::I8x16Sub
1990 | Operator::I8x16SubSaturateS
1991 | Operator::I8x16SubSaturateU
1992 | Operator::I8x16MinS
1993 | Operator::I8x16MinU
1994 | Operator::I8x16MaxS
1995 | Operator::I8x16MaxU
1996 | Operator::I8x16RoundingAverageU => I8X16,
1997
1998 Operator::I16x8Splat
1999 | Operator::V16x8LoadSplat { .. }
2000 | Operator::I16x8ExtractLaneS { .. }
2001 | Operator::I16x8ExtractLaneU { .. }
2002 | Operator::I16x8ReplaceLane { .. }
2003 | Operator::I16x8Eq
2004 | Operator::I16x8Ne
2005 | Operator::I16x8LtS
2006 | Operator::I16x8LtU
2007 | Operator::I16x8GtS
2008 | Operator::I16x8GtU
2009 | Operator::I16x8LeS
2010 | Operator::I16x8LeU
2011 | Operator::I16x8GeS
2012 | Operator::I16x8GeU
2013 | Operator::I16x8Neg
2014 | Operator::I16x8AnyTrue
2015 | Operator::I16x8AllTrue
2016 | Operator::I16x8Shl
2017 | Operator::I16x8ShrS
2018 | Operator::I16x8ShrU
2019 | Operator::I16x8Add
2020 | Operator::I16x8AddSaturateS
2021 | Operator::I16x8AddSaturateU
2022 | Operator::I16x8Sub
2023 | Operator::I16x8SubSaturateS
2024 | Operator::I16x8SubSaturateU
2025 | Operator::I16x8MinS
2026 | Operator::I16x8MinU
2027 | Operator::I16x8MaxS
2028 | Operator::I16x8MaxU
2029 | Operator::I16x8RoundingAverageU
2030 | Operator::I16x8Mul => I16X8,
2031
2032 Operator::I32x4Splat
2033 | Operator::V32x4LoadSplat { .. }
2034 | Operator::I32x4ExtractLane { .. }
2035 | Operator::I32x4ReplaceLane { .. }
2036 | Operator::I32x4Eq
2037 | Operator::I32x4Ne
2038 | Operator::I32x4LtS
2039 | Operator::I32x4LtU
2040 | Operator::I32x4GtS
2041 | Operator::I32x4GtU
2042 | Operator::I32x4LeS
2043 | Operator::I32x4LeU
2044 | Operator::I32x4GeS
2045 | Operator::I32x4GeU
2046 | Operator::I32x4Neg
2047 | Operator::I32x4AnyTrue
2048 | Operator::I32x4AllTrue
2049 | Operator::I32x4Shl
2050 | Operator::I32x4ShrS
2051 | Operator::I32x4ShrU
2052 | Operator::I32x4Add
2053 | Operator::I32x4Sub
2054 | Operator::I32x4Mul
2055 | Operator::I32x4MinS
2056 | Operator::I32x4MinU
2057 | Operator::I32x4MaxS
2058 | Operator::I32x4MaxU
2059 | Operator::F32x4ConvertI32x4S
2060 | Operator::F32x4ConvertI32x4U => I32X4,
2061
2062 Operator::I64x2Splat
2063 | Operator::V64x2LoadSplat { .. }
2064 | Operator::I64x2ExtractLane { .. }
2065 | Operator::I64x2ReplaceLane { .. }
2066 | Operator::I64x2Neg
2067 | Operator::I64x2Shl
2068 | Operator::I64x2ShrS
2069 | Operator::I64x2ShrU
2070 | Operator::I64x2Add
2071 | Operator::I64x2Sub
2072 | Operator::I64x2Mul => I64X2,
2073
2074 Operator::F32x4Splat
2075 | Operator::F32x4ExtractLane { .. }
2076 | Operator::F32x4ReplaceLane { .. }
2077 | Operator::F32x4Eq
2078 | Operator::F32x4Ne
2079 | Operator::F32x4Lt
2080 | Operator::F32x4Gt
2081 | Operator::F32x4Le
2082 | Operator::F32x4Ge
2083 | Operator::F32x4Abs
2084 | Operator::F32x4Neg
2085 | Operator::F32x4Sqrt
2086 | Operator::F32x4Add
2087 | Operator::F32x4Sub
2088 | Operator::F32x4Mul
2089 | Operator::F32x4Div
2090 | Operator::F32x4Min
2091 | Operator::F32x4Max
2092 | Operator::I32x4TruncSatF32x4S
2093 | Operator::I32x4TruncSatF32x4U => F32X4,
2094
2095 Operator::F64x2Splat
2096 | Operator::F64x2ExtractLane { .. }
2097 | Operator::F64x2ReplaceLane { .. }
2098 | Operator::F64x2Eq
2099 | Operator::F64x2Ne
2100 | Operator::F64x2Lt
2101 | Operator::F64x2Gt
2102 | Operator::F64x2Le
2103 | Operator::F64x2Ge
2104 | Operator::F64x2Abs
2105 | Operator::F64x2Neg
2106 | Operator::F64x2Sqrt
2107 | Operator::F64x2Add
2108 | Operator::F64x2Sub
2109 | Operator::F64x2Mul
2110 | Operator::F64x2Div
2111 | Operator::F64x2Min
2112 | Operator::F64x2Max => F64X2,
2113
2114 _ => unimplemented!(
2115 "Currently only SIMD instructions are mapped to their return type; the \
2116 following instruction is not mapped: {:?}",
2117 operator
2118 ),
2119 }
2120 }
2121
2122 /// Some SIMD operations only operate on I8X16 in CLIF; this will convert them to that type by
2123 /// adding a raw_bitcast if necessary.
optionally_bitcast_vector( value: Value, needed_type: Type, builder: &mut FunctionBuilder, ) -> Value2124 pub fn optionally_bitcast_vector(
2125 value: Value,
2126 needed_type: Type,
2127 builder: &mut FunctionBuilder,
2128 ) -> Value {
2129 if builder.func.dfg.value_type(value) != needed_type {
2130 builder.ins().raw_bitcast(needed_type, value)
2131 } else {
2132 value
2133 }
2134 }
2135
2136 /// A helper for popping and bitcasting a single value; since SIMD values can lose their type by
2137 /// using v128 (i.e. CLIF's I8x16) we must re-type the values using a bitcast to avoid CLIF
2138 /// typing issues.
pop1_with_bitcast( state: &mut FuncTranslationState, needed_type: Type, builder: &mut FunctionBuilder, ) -> Value2139 fn pop1_with_bitcast(
2140 state: &mut FuncTranslationState,
2141 needed_type: Type,
2142 builder: &mut FunctionBuilder,
2143 ) -> Value {
2144 optionally_bitcast_vector(state.pop1(), needed_type, builder)
2145 }
2146
2147 /// A helper for popping and bitcasting two values; since SIMD values can lose their type by
2148 /// using v128 (i.e. CLIF's I8x16) we must re-type the values using a bitcast to avoid CLIF
2149 /// typing issues.
pop2_with_bitcast( state: &mut FuncTranslationState, needed_type: Type, builder: &mut FunctionBuilder, ) -> (Value, Value)2150 fn pop2_with_bitcast(
2151 state: &mut FuncTranslationState,
2152 needed_type: Type,
2153 builder: &mut FunctionBuilder,
2154 ) -> (Value, Value) {
2155 let (a, b) = state.pop2();
2156 let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
2157 let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
2158 (bitcast_a, bitcast_b)
2159 }
2160
2161 /// A helper for bitcasting a sequence of values (e.g. function arguments). If a value is a
2162 /// vector type that does not match its expected type, this will modify the value in place to point
2163 /// to the result of a `raw_bitcast`. This conversion is necessary to translate Wasm code that
2164 /// uses `V128` as function parameters (or implicitly in block parameters) and still use specific
2165 /// CLIF types (e.g. `I32X4`) in the function body.
bitcast_arguments( arguments: &mut [Value], expected_types: &[Type], builder: &mut FunctionBuilder, )2166 pub fn bitcast_arguments(
2167 arguments: &mut [Value],
2168 expected_types: &[Type],
2169 builder: &mut FunctionBuilder,
2170 ) {
2171 assert_eq!(arguments.len(), expected_types.len());
2172 for (i, t) in expected_types.iter().enumerate() {
2173 if t.is_vector() {
2174 assert!(
2175 builder.func.dfg.value_type(arguments[i]).is_vector(),
2176 "unexpected type mismatch: expected {}, argument {} was actually of type {}",
2177 t,
2178 arguments[i],
2179 builder.func.dfg.value_type(arguments[i])
2180 );
2181 arguments[i] = optionally_bitcast_vector(arguments[i], *t, builder)
2182 }
2183 }
2184 }
2185
2186 /// A helper to extract all the `Type` listings of each variable in `params`
2187 /// for only parameters the return true for `is_wasm`, typically paired with
2188 /// `is_wasm_return` or `is_wasm_parameter`.
wasm_param_types(params: &[ir::AbiParam], is_wasm: impl Fn(usize) -> bool) -> Vec<Type>2189 pub fn wasm_param_types(params: &[ir::AbiParam], is_wasm: impl Fn(usize) -> bool) -> Vec<Type> {
2190 let mut ret = Vec::with_capacity(params.len());
2191 for (i, param) in params.iter().enumerate() {
2192 if is_wasm(i) {
2193 ret.push(param.value_type);
2194 }
2195 }
2196 ret
2197 }
2198