1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/wasm/baseline/liftoff-compiler.h"
6
7 #include "src/base/enum-set.h"
8 #include "src/base/optional.h"
9 #include "src/base/platform/wrappers.h"
10 #include "src/codegen/assembler-inl.h"
11 // TODO(clemensb): Remove dependences on compiler stuff.
12 #include "src/codegen/external-reference.h"
13 #include "src/codegen/interface-descriptors-inl.h"
14 #include "src/codegen/machine-type.h"
15 #include "src/codegen/macro-assembler-inl.h"
16 #include "src/compiler/linkage.h"
17 #include "src/compiler/wasm-compiler.h"
18 #include "src/logging/counters.h"
19 #include "src/logging/log.h"
20 #include "src/objects/smi.h"
21 #include "src/tracing/trace-event.h"
22 #include "src/utils/ostreams.h"
23 #include "src/utils/utils.h"
24 #include "src/wasm/baseline/liftoff-assembler.h"
25 #include "src/wasm/baseline/liftoff-register.h"
26 #include "src/wasm/function-body-decoder-impl.h"
27 #include "src/wasm/function-compiler.h"
28 #include "src/wasm/memory-tracing.h"
29 #include "src/wasm/object-access.h"
30 #include "src/wasm/simd-shuffle.h"
31 #include "src/wasm/wasm-debug.h"
32 #include "src/wasm/wasm-engine.h"
33 #include "src/wasm/wasm-linkage.h"
34 #include "src/wasm/wasm-objects.h"
35 #include "src/wasm/wasm-opcodes-inl.h"
36
37 namespace v8 {
38 namespace internal {
39 namespace wasm {
40
41 constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
42 constexpr auto kIntConst = LiftoffAssembler::VarState::kIntConst;
43 constexpr auto kStack = LiftoffAssembler::VarState::kStack;
44
45 namespace {
46
47 #define __ asm_.
48
49 #define TRACE(...) \
50 do { \
51 if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
52 } while (false)
53
54 #define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \
55 ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
56
57 template <int expected_size, int actual_size>
58 struct assert_field_size {
59 static_assert(expected_size == actual_size,
60 "field in WasmInstance does not have the expected size");
61 static constexpr int size = actual_size;
62 };
63
64 #define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
65 FIELD_SIZE(WasmInstanceObject::k##name##Offset)
66
67 #define LOAD_INSTANCE_FIELD(dst, name, load_size, pinned) \
68 __ LoadFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
69 WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
70 assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
71 load_size>::size);
72
73 #define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name, pinned) \
74 static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
75 "field in WasmInstance does not have the expected size"); \
76 __ LoadTaggedPointerFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
77 WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
78
79 #ifdef V8_CODE_COMMENTS
80 #define CODE_COMMENT(str) \
81 do { \
82 __ RecordComment(str); \
83 } while (false)
84 #else
85 #define CODE_COMMENT(str) ((void)0)
86 #endif
87
88 constexpr LoadType::LoadTypeValue kPointerLoadType =
89 kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
90
91 constexpr ValueKind kPointerKind = LiftoffAssembler::kPointerKind;
92 constexpr ValueKind kSmiKind = LiftoffAssembler::kSmiKind;
93 constexpr ValueKind kTaggedKind = LiftoffAssembler::kTaggedKind;
94
95 // Used to construct fixed-size signatures: MakeSig::Returns(...).Params(...);
96 using MakeSig = FixedSizeSignature<ValueKind>;
97
98 #if V8_TARGET_ARCH_ARM64
99 // On ARM64, the Assembler keeps track of pointers to Labels to resolve
100 // branches to distant targets. Moving labels would confuse the Assembler,
101 // thus store the label on the heap and keep a unique_ptr.
102 class MovableLabel {
103 public:
104 MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
MovableLabel()105 MovableLabel() : label_(new Label()) {}
106
get()107 Label* get() { return label_.get(); }
108
109 private:
110 std::unique_ptr<Label> label_;
111 };
112 #else
113 // On all other platforms, just store the Label directly.
114 class MovableLabel {
115 public:
116 MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
117
get()118 Label* get() { return &label_; }
119
120 private:
121 Label label_;
122 };
123 #endif
124
GetLoweredCallDescriptor(Zone * zone,compiler::CallDescriptor * call_desc)125 compiler::CallDescriptor* GetLoweredCallDescriptor(
126 Zone* zone, compiler::CallDescriptor* call_desc) {
127 return kSystemPointerSize == 4
128 ? compiler::GetI32WasmCallDescriptor(zone, call_desc)
129 : call_desc;
130 }
131
GetCompareCondition(WasmOpcode opcode)132 constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
133 switch (opcode) {
134 case kExprI32Eq:
135 return kEqual;
136 case kExprI32Ne:
137 return kUnequal;
138 case kExprI32LtS:
139 return kSignedLessThan;
140 case kExprI32LtU:
141 return kUnsignedLessThan;
142 case kExprI32GtS:
143 return kSignedGreaterThan;
144 case kExprI32GtU:
145 return kUnsignedGreaterThan;
146 case kExprI32LeS:
147 return kSignedLessEqual;
148 case kExprI32LeU:
149 return kUnsignedLessEqual;
150 case kExprI32GeS:
151 return kSignedGreaterEqual;
152 case kExprI32GeU:
153 return kUnsignedGreaterEqual;
154 default:
155 UNREACHABLE();
156 }
157 }
158
159 // Builds a {DebugSideTable}.
160 class DebugSideTableBuilder {
161 using Entry = DebugSideTable::Entry;
162 using Value = Entry::Value;
163
164 public:
165 enum AssumeSpilling {
166 // All register values will be spilled before the pc covered by the debug
167 // side table entry. Register slots will be marked as stack slots in the
168 // generated debug side table entry.
169 kAssumeSpilling,
170 // Register slots will be written out as they are.
171 kAllowRegisters,
172 // Register slots cannot appear since we already spilled.
173 kDidSpill
174 };
175
176 class EntryBuilder {
177 public:
EntryBuilder(int pc_offset,int stack_height,std::vector<Value> changed_values)178 explicit EntryBuilder(int pc_offset, int stack_height,
179 std::vector<Value> changed_values)
180 : pc_offset_(pc_offset),
181 stack_height_(stack_height),
182 changed_values_(std::move(changed_values)) {}
183
ToTableEntry()184 Entry ToTableEntry() {
185 return Entry{pc_offset_, stack_height_, std::move(changed_values_)};
186 }
187
MinimizeBasedOnPreviousStack(const std::vector<Value> & last_values)188 void MinimizeBasedOnPreviousStack(const std::vector<Value>& last_values) {
189 auto dst = changed_values_.begin();
190 auto end = changed_values_.end();
191 for (auto src = dst; src != end; ++src) {
192 if (src->index < static_cast<int>(last_values.size()) &&
193 *src == last_values[src->index]) {
194 continue;
195 }
196 if (dst != src) *dst = *src;
197 ++dst;
198 }
199 changed_values_.erase(dst, end);
200 }
201
pc_offset() const202 int pc_offset() const { return pc_offset_; }
set_pc_offset(int new_pc_offset)203 void set_pc_offset(int new_pc_offset) { pc_offset_ = new_pc_offset; }
204
205 private:
206 int pc_offset_;
207 int stack_height_;
208 std::vector<Value> changed_values_;
209 };
210
211 // Adds a new entry in regular code.
NewEntry(int pc_offset,base::Vector<DebugSideTable::Entry::Value> values)212 void NewEntry(int pc_offset,
213 base::Vector<DebugSideTable::Entry::Value> values) {
214 entries_.emplace_back(pc_offset, static_cast<int>(values.size()),
215 GetChangedStackValues(last_values_, values));
216 }
217
218 // Adds a new entry for OOL code, and returns a pointer to a builder for
219 // modifying that entry.
NewOOLEntry(base::Vector<DebugSideTable::Entry::Value> values)220 EntryBuilder* NewOOLEntry(base::Vector<DebugSideTable::Entry::Value> values) {
221 constexpr int kNoPcOffsetYet = -1;
222 ool_entries_.emplace_back(kNoPcOffsetYet, static_cast<int>(values.size()),
223 GetChangedStackValues(last_ool_values_, values));
224 return &ool_entries_.back();
225 }
226
SetNumLocals(int num_locals)227 void SetNumLocals(int num_locals) {
228 DCHECK_EQ(-1, num_locals_);
229 DCHECK_LE(0, num_locals);
230 num_locals_ = num_locals;
231 }
232
GenerateDebugSideTable()233 std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
234 DCHECK_LE(0, num_locals_);
235
236 // Connect {entries_} and {ool_entries_} by removing redundant stack
237 // information from the first {ool_entries_} entry (based on
238 // {last_values_}).
239 if (!entries_.empty() && !ool_entries_.empty()) {
240 ool_entries_.front().MinimizeBasedOnPreviousStack(last_values_);
241 }
242
243 std::vector<Entry> entries;
244 entries.reserve(entries_.size() + ool_entries_.size());
245 for (auto& entry : entries_) entries.push_back(entry.ToTableEntry());
246 for (auto& entry : ool_entries_) entries.push_back(entry.ToTableEntry());
247 DCHECK(std::is_sorted(
248 entries.begin(), entries.end(),
249 [](Entry& a, Entry& b) { return a.pc_offset() < b.pc_offset(); }));
250 return std::make_unique<DebugSideTable>(num_locals_, std::move(entries));
251 }
252
253 private:
GetChangedStackValues(std::vector<Value> & last_values,base::Vector<DebugSideTable::Entry::Value> values)254 static std::vector<Value> GetChangedStackValues(
255 std::vector<Value>& last_values,
256 base::Vector<DebugSideTable::Entry::Value> values) {
257 std::vector<Value> changed_values;
258 int old_stack_size = static_cast<int>(last_values.size());
259 last_values.resize(values.size());
260
261 int index = 0;
262 for (const auto& value : values) {
263 if (index >= old_stack_size || last_values[index] != value) {
264 changed_values.push_back(value);
265 last_values[index] = value;
266 }
267 ++index;
268 }
269 return changed_values;
270 }
271
272 int num_locals_ = -1;
273 // Keep a snapshot of the stack of the last entry, to generate a delta to the
274 // next entry.
275 std::vector<Value> last_values_;
276 std::vector<EntryBuilder> entries_;
277 // Keep OOL code entries separate so we can do proper delta-encoding (more
278 // entries might be added between the existing {entries_} and the
279 // {ool_entries_}). Store the entries in a list so the pointer is not
280 // invalidated by adding more entries.
281 std::vector<Value> last_ool_values_;
282 std::list<EntryBuilder> ool_entries_;
283 };
284
CheckBailoutAllowed(LiftoffBailoutReason reason,const char * detail,const CompilationEnv * env)285 void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
286 const CompilationEnv* env) {
287 // Decode errors are ok.
288 if (reason == kDecodeError) return;
289
290 // Missing CPU features are also generally OK for now.
291 if (reason == kMissingCPUFeature) return;
292
293 // --liftoff-only ensures that tests actually exercise the Liftoff path
294 // without bailing out. Bailing out due to (simulated) lack of CPU support
295 // is okay though (see above).
296 if (FLAG_liftoff_only) {
297 FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s", detail);
298 }
299
300 // If --enable-testing-opcode-in-wasm is set, we are expected to bailout with
301 // "testing opcode".
302 if (FLAG_enable_testing_opcode_in_wasm &&
303 strcmp(detail, "testing opcode") == 0) {
304 return;
305 }
306
307 // Some externally maintained architectures don't fully implement Liftoff yet.
308 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
309 V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
310 return;
311 #endif
312
313 #define LIST_FEATURE(name, ...) kFeature_##name,
314 constexpr WasmFeatures kExperimentalFeatures{
315 FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
316 #undef LIST_FEATURE
317
318 // Bailout is allowed if any experimental feature is enabled.
319 if (env->enabled_features.contains_any(kExperimentalFeatures)) return;
320
321 // Otherwise, bailout is not allowed.
322 FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
323 }
324
325 class LiftoffCompiler {
326 public:
327 // TODO(clemensb): Make this a template parameter.
328 static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation;
329
330 using Value = ValueBase<validate>;
331
332 struct ElseState {
333 MovableLabel label;
334 LiftoffAssembler::CacheState state;
335 };
336
337 struct TryInfo {
338 TryInfo() = default;
339 LiftoffAssembler::CacheState catch_state;
340 Label catch_label;
341 bool catch_reached = false;
342 bool in_handler = false;
343 };
344
345 struct Control : public ControlBase<Value, validate> {
346 std::unique_ptr<ElseState> else_state;
347 LiftoffAssembler::CacheState label_state;
348 MovableLabel label;
349 std::unique_ptr<TryInfo> try_info;
350 // Number of exceptions on the stack below this control.
351 int num_exceptions = 0;
352
353 MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
354
355 template <typename... Args>
Controlv8::internal::wasm::__anona6483cad0111::LiftoffCompiler::Control356 explicit Control(Args&&... args) V8_NOEXCEPT
357 : ControlBase(std::forward<Args>(args)...) {}
358 };
359
360 using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
361 using ValueKindSig = LiftoffAssembler::ValueKindSig;
362
363 class MostlySmallValueKindSig : public Signature<ValueKind> {
364 public:
MostlySmallValueKindSig(Zone * zone,const FunctionSig * sig)365 MostlySmallValueKindSig(Zone* zone, const FunctionSig* sig)
366 : Signature<ValueKind>(sig->return_count(), sig->parameter_count(),
367 MakeKinds(inline_storage_, zone, sig)) {}
368
369 private:
370 static constexpr size_t kInlineStorage = 8;
371
MakeKinds(ValueKind * storage,Zone * zone,const FunctionSig * sig)372 static ValueKind* MakeKinds(ValueKind* storage, Zone* zone,
373 const FunctionSig* sig) {
374 const size_t size = sig->parameter_count() + sig->return_count();
375 if (V8_UNLIKELY(size > kInlineStorage)) {
376 storage = zone->NewArray<ValueKind>(size);
377 }
378 std::transform(sig->all().begin(), sig->all().end(), storage,
379 [](ValueType type) { return type.kind(); });
380 return storage;
381 }
382
383 ValueKind inline_storage_[kInlineStorage];
384 };
385
386 // For debugging, we need to spill registers before a trap or a stack check to
387 // be able to inspect them.
388 struct SpilledRegistersForInspection : public ZoneObject {
389 struct Entry {
390 int offset;
391 LiftoffRegister reg;
392 ValueKind kind;
393 };
394 ZoneVector<Entry> entries;
395
SpilledRegistersForInspectionv8::internal::wasm::__anona6483cad0111::LiftoffCompiler::SpilledRegistersForInspection396 explicit SpilledRegistersForInspection(Zone* zone) : entries(zone) {}
397 };
398
399 struct OutOfLineSafepointInfo {
400 ZoneVector<int> slots;
401 LiftoffRegList spills;
402
OutOfLineSafepointInfov8::internal::wasm::__anona6483cad0111::LiftoffCompiler::OutOfLineSafepointInfo403 explicit OutOfLineSafepointInfo(Zone* zone) : slots(zone) {}
404 };
405
406 struct OutOfLineCode {
407 MovableLabel label;
408 MovableLabel continuation;
409 WasmCode::RuntimeStubId stub;
410 WasmCodePosition position;
411 LiftoffRegList regs_to_save;
412 Register cached_instance;
413 OutOfLineSafepointInfo* safepoint_info;
414 uint32_t pc; // for trap handler.
415 // These two pointers will only be used for debug code:
416 SpilledRegistersForInspection* spilled_registers;
417 DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder;
418
419 // Named constructors:
Trapv8::internal::wasm::__anona6483cad0111::LiftoffCompiler::OutOfLineCode420 static OutOfLineCode Trap(
421 WasmCode::RuntimeStubId s, WasmCodePosition pos,
422 SpilledRegistersForInspection* spilled_registers,
423 OutOfLineSafepointInfo* safepoint_info, uint32_t pc,
424 DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
425 DCHECK_LT(0, pos);
426 return {
427 {}, // label
428 {}, // continuation
429 s, // stub
430 pos, // position
431 {}, // regs_to_save
432 no_reg, // cached_instance
433 safepoint_info, // safepoint_info
434 pc, // pc
435 spilled_registers, // spilled_registers
436 debug_sidetable_entry_builder // debug_side_table_entry_builder
437 };
438 }
StackCheckv8::internal::wasm::__anona6483cad0111::LiftoffCompiler::OutOfLineCode439 static OutOfLineCode StackCheck(
440 WasmCodePosition pos, LiftoffRegList regs_to_save,
441 Register cached_instance, SpilledRegistersForInspection* spilled_regs,
442 OutOfLineSafepointInfo* safepoint_info,
443 DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
444 return {
445 {}, // label
446 {}, // continuation
447 WasmCode::kWasmStackGuard, // stub
448 pos, // position
449 regs_to_save, // regs_to_save
450 cached_instance, // cached_instance
451 safepoint_info, // safepoint_info
452 0, // pc
453 spilled_regs, // spilled_registers
454 debug_sidetable_entry_builder // debug_side_table_entry_builder
455 };
456 }
457 };
458
LiftoffCompiler(compiler::CallDescriptor * call_descriptor,CompilationEnv * env,Zone * compilation_zone,std::unique_ptr<AssemblerBuffer> buffer,DebugSideTableBuilder * debug_sidetable_builder,ForDebugging for_debugging,int func_index,base::Vector<const int> breakpoints={},int dead_breakpoint=0,int32_t * max_steps=nullptr,int32_t * nondeterminism=nullptr)459 LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
460 CompilationEnv* env, Zone* compilation_zone,
461 std::unique_ptr<AssemblerBuffer> buffer,
462 DebugSideTableBuilder* debug_sidetable_builder,
463 ForDebugging for_debugging, int func_index,
464 base::Vector<const int> breakpoints = {},
465 int dead_breakpoint = 0, int32_t* max_steps = nullptr,
466 int32_t* nondeterminism = nullptr)
467 : asm_(std::move(buffer)),
468 descriptor_(
469 GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
470 env_(env),
471 debug_sidetable_builder_(debug_sidetable_builder),
472 for_debugging_(for_debugging),
473 func_index_(func_index),
474 out_of_line_code_(compilation_zone),
475 source_position_table_builder_(compilation_zone),
476 protected_instructions_(compilation_zone),
477 compilation_zone_(compilation_zone),
478 safepoint_table_builder_(compilation_zone_),
479 next_breakpoint_ptr_(breakpoints.begin()),
480 next_breakpoint_end_(breakpoints.end()),
481 dead_breakpoint_(dead_breakpoint),
482 handlers_(compilation_zone),
483 max_steps_(max_steps),
484 nondeterminism_(nondeterminism) {
485 if (breakpoints.empty()) {
486 next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
487 }
488 }
489
did_bailout() const490 bool did_bailout() const { return bailout_reason_ != kSuccess; }
bailout_reason() const491 LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
492
GetCode(CodeDesc * desc)493 void GetCode(CodeDesc* desc) {
494 asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
495 handler_table_offset_);
496 }
497
ReleaseBuffer()498 std::unique_ptr<AssemblerBuffer> ReleaseBuffer() {
499 return asm_.ReleaseBuffer();
500 }
501
GetSourcePositionTable()502 base::OwnedVector<uint8_t> GetSourcePositionTable() {
503 return source_position_table_builder_.ToSourcePositionTableVector();
504 }
505
GetProtectedInstructionsData() const506 base::OwnedVector<uint8_t> GetProtectedInstructionsData() const {
507 return base::OwnedVector<uint8_t>::Of(base::Vector<const uint8_t>::cast(
508 base::VectorOf(protected_instructions_)));
509 }
510
GetTotalFrameSlotCountForGC() const511 uint32_t GetTotalFrameSlotCountForGC() const {
512 return __ GetTotalFrameSlotCountForGC();
513 }
514
unsupported(FullDecoder * decoder,LiftoffBailoutReason reason,const char * detail)515 void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
516 const char* detail) {
517 DCHECK_NE(kSuccess, reason);
518 if (did_bailout()) return;
519 bailout_reason_ = reason;
520 TRACE("unsupported: %s\n", detail);
521 decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
522 detail);
523 UnuseLabels(decoder);
524 CheckBailoutAllowed(reason, detail, env_);
525 }
526
DidAssemblerBailout(FullDecoder * decoder)527 bool DidAssemblerBailout(FullDecoder* decoder) {
528 if (decoder->failed() || !__ did_bailout()) return false;
529 unsupported(decoder, __ bailout_reason(), __ bailout_detail());
530 return true;
531 }
532
CheckSupportedType(FullDecoder * decoder,ValueKind kind,const char * context)533 V8_INLINE bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
534 const char* context) {
535 if (V8_LIKELY(supported_types_.contains(kind))) return true;
536 return MaybeBailoutForUnsupportedType(decoder, kind, context);
537 }
538
MaybeBailoutForUnsupportedType(FullDecoder * decoder,ValueKind kind,const char * context)539 V8_NOINLINE bool MaybeBailoutForUnsupportedType(FullDecoder* decoder,
540 ValueKind kind,
541 const char* context) {
542 DCHECK(!supported_types_.contains(kind));
543
544 // Lazily update {supported_types_}; then check again.
545 if (CpuFeatures::SupportsWasmSimd128()) supported_types_.Add(kS128);
546 if (supported_types_.contains(kind)) return true;
547
548 LiftoffBailoutReason bailout_reason;
549 switch (kind) {
550 case kS128:
551 bailout_reason = kMissingCPUFeature;
552 break;
553 case kRef:
554 case kOptRef:
555 case kRtt:
556 case kRttWithDepth:
557 case kI8:
558 case kI16:
559 bailout_reason = kRefTypes;
560 break;
561 default:
562 UNREACHABLE();
563 }
564 base::EmbeddedVector<char, 128> buffer;
565 SNPrintF(buffer, "%s %s", name(kind), context);
566 unsupported(decoder, bailout_reason, buffer.begin());
567 return false;
568 }
569
UnuseLabels(FullDecoder * decoder)570 void UnuseLabels(FullDecoder* decoder) {
571 #ifdef DEBUG
572 auto Unuse = [](Label* label) {
573 label->Unuse();
574 label->UnuseNear();
575 };
576 // Unuse all labels now, otherwise their destructor will fire a DCHECK error
577 // if they where referenced before.
578 uint32_t control_depth = decoder ? decoder->control_depth() : 0;
579 for (uint32_t i = 0; i < control_depth; ++i) {
580 Control* c = decoder->control_at(i);
581 Unuse(c->label.get());
582 if (c->else_state) Unuse(c->else_state->label.get());
583 if (c->try_info != nullptr) Unuse(&c->try_info->catch_label);
584 }
585 for (auto& ool : out_of_line_code_) Unuse(ool.label.get());
586 #endif
587 }
588
StartFunction(FullDecoder * decoder)589 void StartFunction(FullDecoder* decoder) {
590 if (FLAG_trace_liftoff && !FLAG_trace_wasm_decoder) {
591 StdoutStream{} << "hint: add --trace-wasm-decoder to also see the wasm "
592 "instructions being decoded\n";
593 }
594 int num_locals = decoder->num_locals();
595 __ set_num_locals(num_locals);
596 for (int i = 0; i < num_locals; ++i) {
597 ValueKind kind = decoder->local_type(i).kind();
598 __ set_local_kind(i, kind);
599 }
600 }
601
RegsUnusedByParams()602 constexpr static LiftoffRegList RegsUnusedByParams() {
603 LiftoffRegList regs = kGpCacheRegList;
604 for (auto reg : kGpParamRegisters) {
605 regs.clear(reg);
606 }
607 return regs;
608 }
609
610 // Returns the number of inputs processed (1 or 2).
ProcessParameter(ValueKind kind,uint32_t input_idx)611 uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) {
612 const bool needs_pair = needs_gp_reg_pair(kind);
613 const ValueKind reg_kind = needs_pair ? kI32 : kind;
614 const RegClass rc = reg_class_for(reg_kind);
615
616 auto LoadToReg = [this, reg_kind, rc](compiler::LinkageLocation location,
617 LiftoffRegList pinned) {
618 if (location.IsRegister()) {
619 DCHECK(!location.IsAnyRegister());
620 return LiftoffRegister::from_external_code(rc, reg_kind,
621 location.AsRegister());
622 }
623 DCHECK(location.IsCallerFrameSlot());
624 // For reference type parameters we have to use registers that were not
625 // used for parameters because some reference type stack parameters may
626 // get processed before some value type register parameters.
627 static constexpr auto kRegsUnusedByParams = RegsUnusedByParams();
628 LiftoffRegister reg = is_reference(reg_kind)
629 ? __ GetUnusedRegister(kRegsUnusedByParams)
630 : __ GetUnusedRegister(rc, pinned);
631 __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
632 return reg;
633 };
634
635 LiftoffRegister reg =
636 LoadToReg(descriptor_->GetInputLocation(input_idx), {});
637 if (needs_pair) {
638 LiftoffRegister reg2 =
639 LoadToReg(descriptor_->GetInputLocation(input_idx + 1),
640 LiftoffRegList::ForRegs(reg));
641 reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp());
642 }
643 __ PushRegister(kind, reg);
644
645 return needs_pair ? 2 : 1;
646 }
647
StackCheck(FullDecoder * decoder,WasmCodePosition position)648 void StackCheck(FullDecoder* decoder, WasmCodePosition position) {
649 CODE_COMMENT("stack check");
650 if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) return;
651
652 // Loading the limit address can change the stack state, hence do this
653 // before storing information about registers.
654 Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
655 LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize,
656 {});
657
658 LiftoffRegList regs_to_save = __ cache_state()->used_registers;
659 // The cached instance will be reloaded separately.
660 if (__ cache_state()->cached_instance != no_reg) {
661 DCHECK(regs_to_save.has(__ cache_state()->cached_instance));
662 regs_to_save.clear(__ cache_state()->cached_instance);
663 }
664 SpilledRegistersForInspection* spilled_regs = nullptr;
665
666 OutOfLineSafepointInfo* safepoint_info =
667 compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
668 __ cache_state()->GetTaggedSlotsForOOLCode(
669 &safepoint_info->slots, &safepoint_info->spills,
670 for_debugging_
671 ? LiftoffAssembler::CacheState::SpillLocation::kStackSlots
672 : LiftoffAssembler::CacheState::SpillLocation::kTopOfStack);
673 if (V8_UNLIKELY(for_debugging_)) {
674 // When debugging, we do not just push all registers to the stack, but we
675 // spill them to their proper stack locations such that we can inspect
676 // them.
677 // The only exception is the cached memory start, which we just push
678 // before the stack check and pop afterwards.
679 regs_to_save = {};
680 if (__ cache_state()->cached_mem_start != no_reg) {
681 regs_to_save.set(__ cache_state()->cached_mem_start);
682 }
683 spilled_regs = GetSpilledRegistersForInspection();
684 }
685 out_of_line_code_.push_back(OutOfLineCode::StackCheck(
686 position, regs_to_save, __ cache_state()->cached_instance, spilled_regs,
687 safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
688 OutOfLineCode& ool = out_of_line_code_.back();
689 __ StackCheck(ool.label.get(), limit_address);
690 __ bind(ool.continuation.get());
691 }
692
SpillLocalsInitially(FullDecoder * decoder,uint32_t num_params)693 bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
694 int actual_locals = __ num_locals() - num_params;
695 DCHECK_LE(0, actual_locals);
696 constexpr int kNumCacheRegisters = NumRegs(kLiftoffAssemblerGpCacheRegs);
697 // If we have many locals, we put them on the stack initially. This avoids
698 // having to spill them on merge points. Use of these initial values should
699 // be rare anyway.
700 if (actual_locals > kNumCacheRegisters / 2) return true;
701 // If there are locals which are not i32 or i64, we also spill all locals,
702 // because other types cannot be initialized to constants.
703 for (uint32_t param_idx = num_params; param_idx < __ num_locals();
704 ++param_idx) {
705 ValueKind kind = __ local_kind(param_idx);
706 if (kind != kI32 && kind != kI64) return true;
707 }
708 return false;
709 }
710
TierUpFunction(FullDecoder * decoder)711 void TierUpFunction(FullDecoder* decoder) {
712 __ CallRuntimeStub(WasmCode::kWasmTriggerTierUp);
713 DefineSafepoint();
714 }
715
TraceFunctionEntry(FullDecoder * decoder)716 void TraceFunctionEntry(FullDecoder* decoder) {
717 CODE_COMMENT("trace function entry");
718 __ SpillAllRegisters();
719 source_position_table_builder_.AddPosition(
720 __ pc_offset(), SourcePosition(decoder->position()), false);
721 __ CallRuntimeStub(WasmCode::kWasmTraceEnter);
722 DefineSafepoint();
723 }
724
StartFunctionBody(FullDecoder * decoder,Control * block)725 void StartFunctionBody(FullDecoder* decoder, Control* block) {
726 for (uint32_t i = 0; i < __ num_locals(); ++i) {
727 if (!CheckSupportedType(decoder, __ local_kind(i), "param")) return;
728 }
729
730 // Parameter 0 is the instance parameter.
731 uint32_t num_params =
732 static_cast<uint32_t>(decoder->sig_->parameter_count());
733
734 __ CodeEntry();
735
736 __ EnterFrame(StackFrame::WASM);
737 __ set_has_frame(true);
738 pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
739 // {PrepareStackFrame} is the first platform-specific assembler method.
740 // If this failed, we can bail out immediately, avoiding runtime overhead
741 // and potential failures because of other unimplemented methods.
742 // A platform implementing {PrepareStackFrame} must ensure that we can
743 // finish compilation without errors even if we hit unimplemented
744 // LiftoffAssembler methods.
745 if (DidAssemblerBailout(decoder)) return;
746
747 // Input 0 is the call target, the instance is at 1.
748 constexpr int kInstanceParameterIndex = 1;
749 // Check that {kWasmInstanceRegister} matches our call descriptor.
750 DCHECK_EQ(kWasmInstanceRegister,
751 Register::from_code(
752 descriptor_->GetInputLocation(kInstanceParameterIndex)
753 .AsRegister()));
754 __ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
755 if (for_debugging_) __ ResetOSRTarget();
756
757 // Process parameters.
758 if (num_params) CODE_COMMENT("process parameters");
759 // Input 0 is the code target, 1 is the instance. First parameter at 2.
760 uint32_t input_idx = kInstanceParameterIndex + 1;
761 for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
762 input_idx += ProcessParameter(__ local_kind(param_idx), input_idx);
763 }
764 int params_size = __ TopSpillOffset();
765 DCHECK_EQ(input_idx, descriptor_->InputCount());
766
767 // Initialize locals beyond parameters.
768 if (num_params < __ num_locals()) CODE_COMMENT("init locals");
769 if (SpillLocalsInitially(decoder, num_params)) {
770 bool has_refs = false;
771 for (uint32_t param_idx = num_params; param_idx < __ num_locals();
772 ++param_idx) {
773 ValueKind kind = __ local_kind(param_idx);
774 has_refs |= is_reference(kind);
775 __ PushStack(kind);
776 }
777 int spill_size = __ TopSpillOffset() - params_size;
778 __ FillStackSlotsWithZero(params_size, spill_size);
779
780 // Initialize all reference type locals with ref.null.
781 if (has_refs) {
782 Register null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
783 LoadNullValue(null_ref_reg, {});
784 for (uint32_t local_index = num_params; local_index < __ num_locals();
785 ++local_index) {
786 ValueKind kind = __ local_kind(local_index);
787 if (is_reference(kind)) {
788 __ Spill(__ cache_state()->stack_state[local_index].offset(),
789 LiftoffRegister(null_ref_reg), kind);
790 }
791 }
792 }
793 } else {
794 for (uint32_t param_idx = num_params; param_idx < __ num_locals();
795 ++param_idx) {
796 ValueKind kind = __ local_kind(param_idx);
797 // Anything which is not i32 or i64 requires spilling.
798 DCHECK(kind == kI32 || kind == kI64);
799 __ PushConstant(kind, int32_t{0});
800 }
801 }
802
803 DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
804
805 if (V8_UNLIKELY(debug_sidetable_builder_)) {
806 debug_sidetable_builder_->SetNumLocals(__ num_locals());
807 }
808
809 // The function-prologue stack check is associated with position 0, which
810 // is never a position of any instruction in the function.
811 StackCheck(decoder, 0);
812
813 if (env_->dynamic_tiering == DynamicTiering::kEnabled) {
814 // TODO(arobin): Avoid spilling registers unconditionally.
815 __ SpillAllRegisters();
816 CODE_COMMENT("dynamic tiering");
817 LiftoffRegList pinned;
818
819 // Get the number of calls array address.
820 LiftoffRegister array_address =
821 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
822 LOAD_INSTANCE_FIELD(array_address.gp(), NumLiftoffFunctionCallsArray,
823 kSystemPointerSize, pinned);
824
825 // Compute the correct offset in the array.
826 uint32_t offset =
827 kInt32Size * declared_function_index(env_->module, func_index_);
828
829 // Get the number of calls and update it.
830 LiftoffRegister old_number_of_calls =
831 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
832 LiftoffRegister new_number_of_calls =
833 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
834 __ Load(old_number_of_calls, array_address.gp(), no_reg, offset,
835 LoadType::kI32Load, pinned);
836 __ emit_i32_addi(new_number_of_calls.gp(), old_number_of_calls.gp(), 1);
837 __ Store(array_address.gp(), no_reg, offset, new_number_of_calls,
838 StoreType::kI32Store, pinned);
839
840 // Emit the runtime call if necessary.
841 Label no_tierup;
842 // Check if the number of calls is a power of 2.
843 __ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
844 new_number_of_calls.gp());
845 __ emit_cond_jump(kNotEqualZero, &no_tierup, kI32,
846 old_number_of_calls.gp());
847 TierUpFunction(decoder);
848 // After the runtime call, the instance cache register is clobbered (we
849 // reset it already in {SpillAllRegisters} above, but then we still access
850 // the instance afterwards).
851 __ cache_state()->ClearCachedInstanceRegister();
852 __ bind(&no_tierup);
853 }
854
855 if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
856 }
857
GenerateOutOfLineCode(OutOfLineCode * ool)858 void GenerateOutOfLineCode(OutOfLineCode* ool) {
859 CODE_COMMENT(
860 (std::string("OOL: ") + GetRuntimeStubName(ool->stub)).c_str());
861 __ bind(ool->label.get());
862 const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
863
864 // Only memory OOB traps need a {pc}, but not unconditionally. Static OOB
865 // accesses do not need protected instruction information, hence they also
866 // do not set {pc}.
867 DCHECK_IMPLIES(ool->stub != WasmCode::kThrowWasmTrapMemOutOfBounds,
868 ool->pc == 0);
869
870 if (env_->bounds_checks == kTrapHandler && ool->pc != 0) {
871 uint32_t pc = static_cast<uint32_t>(__ pc_offset());
872 DCHECK_EQ(pc, __ pc_offset());
873 protected_instructions_.emplace_back(
874 trap_handler::ProtectedInstructionData{ool->pc, pc});
875 }
876
877 if (!env_->runtime_exception_support) {
878 // We cannot test calls to the runtime in cctest/test-run-wasm.
879 // Therefore we emit a call to C here instead of a call to the runtime.
880 // In this mode, we never generate stack checks.
881 DCHECK(!is_stack_check);
882 __ CallTrapCallbackForTesting();
883 __ LeaveFrame(StackFrame::WASM);
884 __ DropStackSlotsAndRet(
885 static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
886 return;
887 }
888
889 if (!ool->regs_to_save.is_empty()) {
890 __ PushRegisters(ool->regs_to_save);
891 }
892 if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
893 for (auto& entry : ool->spilled_registers->entries) {
894 // We should not push and spill the same register.
895 DCHECK(!ool->regs_to_save.has(entry.reg));
896 __ Spill(entry.offset, entry.reg, entry.kind);
897 }
898 }
899
900 source_position_table_builder_.AddPosition(
901 __ pc_offset(), SourcePosition(ool->position), true);
902 __ CallRuntimeStub(ool->stub);
903 Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
904
905 if (ool->safepoint_info) {
906 for (auto index : ool->safepoint_info->slots) {
907 safepoint.DefinePointerSlot(index);
908 }
909
910 int total_frame_size = __ GetTotalFrameSize();
911 LiftoffRegList gp_regs = ool->regs_to_save & kGpCacheRegList;
912 // {total_frame_size} is the highest offset from the FP that is used to
913 // store a value. The offset of the first spill slot should therefore be
914 // {(total_frame_size / kSystemPointerSize) + 1}. However, spill slots
915 // don't start at offset '0' but at offset '-1' (or
916 // {-kSystemPointerSize}). Therefore we have to add another '+ 1' to the
917 // index of the first spill slot.
918 int index = (total_frame_size / kSystemPointerSize) + 2;
919
920 __ RecordSpillsInSafepoint(safepoint, gp_regs,
921 ool->safepoint_info->spills, index);
922 }
923
924 DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder);
925 if (V8_UNLIKELY(ool->debug_sidetable_entry_builder)) {
926 ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset());
927 }
928 DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
929 if (is_stack_check) {
930 MaybeOSR();
931 }
932 if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
933 if (is_stack_check) {
934 if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
935 DCHECK(for_debugging_);
936 for (auto& entry : ool->spilled_registers->entries) {
937 __ Fill(entry.reg, entry.offset, entry.kind);
938 }
939 }
940 if (ool->cached_instance != no_reg) {
941 __ LoadInstanceFromFrame(ool->cached_instance);
942 }
943 __ emit_jump(ool->continuation.get());
944 } else {
945 __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
946 }
947 }
948
FinishFunction(FullDecoder * decoder)949 void FinishFunction(FullDecoder* decoder) {
950 if (DidAssemblerBailout(decoder)) return;
951 __ AlignFrameSize();
952 #if DEBUG
953 int frame_size = __ GetTotalFrameSize();
954 #endif
955 for (OutOfLineCode& ool : out_of_line_code_) {
956 GenerateOutOfLineCode(&ool);
957 }
958 DCHECK_EQ(frame_size, __ GetTotalFrameSize());
959 __ PatchPrepareStackFrame(pc_offset_stack_frame_construction_,
960 &safepoint_table_builder_);
961 __ FinishCode();
962 safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC());
963 // Emit the handler table.
964 if (!handlers_.empty()) {
965 handler_table_offset_ = HandlerTable::EmitReturnTableStart(&asm_);
966 for (auto& handler : handlers_) {
967 HandlerTable::EmitReturnEntry(&asm_, handler.pc_offset,
968 handler.handler.get()->pos());
969 }
970 }
971 __ MaybeEmitOutOfLineConstantPool();
972 // The previous calls may have also generated a bailout.
973 DidAssemblerBailout(decoder);
974 DCHECK_EQ(num_exceptions_, 0);
975 }
976
OnFirstError(FullDecoder * decoder)977 void OnFirstError(FullDecoder* decoder) {
978 if (!did_bailout()) bailout_reason_ = kDecodeError;
979 UnuseLabels(decoder);
980 asm_.AbortCompilation();
981 }
982
EmitDebuggingInfo(FullDecoder * decoder,WasmOpcode opcode)983 V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
984 DCHECK(for_debugging_);
985 if (!WasmOpcodes::IsBreakable(opcode)) return;
986 bool has_breakpoint = false;
987 if (next_breakpoint_ptr_) {
988 if (*next_breakpoint_ptr_ == 0) {
989 // A single breakpoint at offset 0 indicates stepping.
990 DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
991 has_breakpoint = true;
992 } else {
993 while (next_breakpoint_ptr_ != next_breakpoint_end_ &&
994 *next_breakpoint_ptr_ < decoder->position()) {
995 // Skip unreachable breakpoints.
996 ++next_breakpoint_ptr_;
997 }
998 if (next_breakpoint_ptr_ == next_breakpoint_end_) {
999 next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
1000 } else if (*next_breakpoint_ptr_ == decoder->position()) {
1001 has_breakpoint = true;
1002 }
1003 }
1004 }
1005 if (has_breakpoint) {
1006 CODE_COMMENT("breakpoint");
1007 EmitBreakpoint(decoder);
1008 // Once we emitted an unconditional breakpoint, we don't need to check
1009 // function entry breaks any more.
1010 did_function_entry_break_checks_ = true;
1011 } else if (!did_function_entry_break_checks_) {
1012 did_function_entry_break_checks_ = true;
1013 CODE_COMMENT("check function entry break");
1014 Label do_break;
1015 Label no_break;
1016 Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
1017
1018 // Check the "hook on function call" flag. If set, trigger a break.
1019 LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize,
1020 {});
1021 __ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
1022 __ emit_cond_jump(kNotEqualZero, &do_break, kI32, flag);
1023
1024 // Check if we should stop on "script entry".
1025 LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
1026 __ emit_cond_jump(kEqualZero, &no_break, kI32, flag);
1027
1028 __ bind(&do_break);
1029 EmitBreakpoint(decoder);
1030 __ bind(&no_break);
1031 } else if (dead_breakpoint_ == decoder->position()) {
1032 DCHECK(!next_breakpoint_ptr_ ||
1033 *next_breakpoint_ptr_ != dead_breakpoint_);
1034 // The top frame is paused at this position, but the breakpoint was
1035 // removed. Adding a dead breakpoint here ensures that the source
1036 // position exists, and that the offset to the return address is the
1037 // same as in the old code.
1038 CODE_COMMENT("dead breakpoint");
1039 Label cont;
1040 __ emit_jump(&cont);
1041 EmitBreakpoint(decoder);
1042 __ bind(&cont);
1043 }
1044 if (V8_UNLIKELY(max_steps_ != nullptr)) {
1045 CODE_COMMENT("check max steps");
1046 LiftoffRegList pinned;
1047 LiftoffRegister max_steps = __ GetUnusedRegister(kGpReg, {});
1048 pinned.set(max_steps);
1049 LiftoffRegister max_steps_addr = __ GetUnusedRegister(kGpReg, pinned);
1050 pinned.set(max_steps_addr);
1051 __ LoadConstant(
1052 max_steps_addr,
1053 WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(max_steps_)));
1054 __ Load(max_steps, max_steps_addr.gp(), no_reg, 0, LoadType::kI32Load,
1055 pinned);
1056 Label cont;
1057 __ emit_i32_cond_jumpi(kUnequal, &cont, max_steps.gp(), 0);
1058 // Abort.
1059 Trap(decoder, kTrapUnreachable);
1060 __ bind(&cont);
1061 __ emit_i32_subi(max_steps.gp(), max_steps.gp(), 1);
1062 __ Store(max_steps_addr.gp(), no_reg, 0, max_steps, StoreType::kI32Store,
1063 pinned);
1064 }
1065 }
1066
NextInstruction(FullDecoder * decoder,WasmOpcode opcode)1067 void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
1068 // Add a single check, so that the fast path can be inlined while
1069 // {EmitDebuggingInfo} stays outlined.
1070 if (V8_UNLIKELY(for_debugging_)) EmitDebuggingInfo(decoder, opcode);
1071 TraceCacheState(decoder);
1072 SLOW_DCHECK(__ ValidateCacheState());
1073 CODE_COMMENT(WasmOpcodes::OpcodeName(
1074 WasmOpcodes::IsPrefixOpcode(opcode)
1075 ? decoder->read_prefixed_opcode<Decoder::kFullValidation>(
1076 decoder->pc())
1077 : opcode));
1078 }
1079
EmitBreakpoint(FullDecoder * decoder)1080 void EmitBreakpoint(FullDecoder* decoder) {
1081 DCHECK(for_debugging_);
1082 source_position_table_builder_.AddPosition(
1083 __ pc_offset(), SourcePosition(decoder->position()), true);
1084 __ CallRuntimeStub(WasmCode::kWasmDebugBreak);
1085 DefineSafepointWithCalleeSavedRegisters();
1086 RegisterDebugSideTableEntry(decoder,
1087 DebugSideTableBuilder::kAllowRegisters);
1088 MaybeOSR();
1089 }
1090
PushControl(Control * block)1091 void PushControl(Control* block) {
1092 // The Liftoff stack includes implicit exception refs stored for catch
1093 // blocks, so that they can be rethrown.
1094 block->num_exceptions = num_exceptions_;
1095 }
1096
Block(FullDecoder * decoder,Control * block)1097 void Block(FullDecoder* decoder, Control* block) { PushControl(block); }
1098
Loop(FullDecoder * decoder,Control * loop)1099 void Loop(FullDecoder* decoder, Control* loop) {
1100 // Before entering a loop, spill all locals to the stack, in order to free
1101 // the cache registers, and to avoid unnecessarily reloading stack values
1102 // into registers at branches.
1103 // TODO(clemensb): Come up with a better strategy here, involving
1104 // pre-analysis of the function.
1105 __ SpillLocals();
1106
1107 __ PrepareLoopArgs(loop->start_merge.arity);
1108
1109 // Loop labels bind at the beginning of the block.
1110 __ bind(loop->label.get());
1111
1112 // Save the current cache state for the merge when jumping to this loop.
1113 loop->label_state.Split(*__ cache_state());
1114
1115 PushControl(loop);
1116
1117 // Execute a stack check in the loop header.
1118 StackCheck(decoder, decoder->position());
1119 }
1120
Try(FullDecoder * decoder,Control * block)1121 void Try(FullDecoder* decoder, Control* block) {
1122 block->try_info = std::make_unique<TryInfo>();
1123 PushControl(block);
1124 }
1125
1126 // Load the property in {kReturnRegister0}.
GetExceptionProperty(LiftoffAssembler::VarState & exception,RootIndex root_index)1127 LiftoffRegister GetExceptionProperty(LiftoffAssembler::VarState& exception,
1128 RootIndex root_index) {
1129 DCHECK(root_index == RootIndex::kwasm_exception_tag_symbol ||
1130 root_index == RootIndex::kwasm_exception_values_symbol);
1131
1132 LiftoffRegList pinned;
1133 LiftoffRegister tag_symbol_reg =
1134 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1135 LoadExceptionSymbol(tag_symbol_reg.gp(), pinned, root_index);
1136 LiftoffRegister context_reg =
1137 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
1138 LOAD_TAGGED_PTR_INSTANCE_FIELD(context_reg.gp(), NativeContext, pinned);
1139
1140 LiftoffAssembler::VarState tag_symbol(kPointerKind, tag_symbol_reg, 0);
1141 LiftoffAssembler::VarState context(kPointerKind, context_reg, 0);
1142
1143 CallRuntimeStub(WasmCode::kWasmGetOwnProperty,
1144 MakeSig::Returns(kPointerKind)
1145 .Params(kPointerKind, kPointerKind, kPointerKind),
1146 {exception, tag_symbol, context}, kNoSourcePosition);
1147
1148 return LiftoffRegister(kReturnRegister0);
1149 }
1150
CatchException(FullDecoder * decoder,const TagIndexImmediate<validate> & imm,Control * block,base::Vector<Value> values)1151 void CatchException(FullDecoder* decoder,
1152 const TagIndexImmediate<validate>& imm, Control* block,
1153 base::Vector<Value> values) {
1154 DCHECK(block->is_try_catch());
1155 __ emit_jump(block->label.get());
1156
1157 // The catch block is unreachable if no possible throws in the try block
1158 // exist. We only build a landing pad if some node in the try block can
1159 // (possibly) throw. Otherwise the catch environments remain empty.
1160 if (!block->try_info->catch_reached) {
1161 block->reachability = kSpecOnlyReachable;
1162 return;
1163 }
1164
1165 // This is the last use of this label. Re-use the field for the label of the
1166 // next catch block, and jump there if the tag does not match.
1167 __ bind(&block->try_info->catch_label);
1168 new (&block->try_info->catch_label) Label();
1169
1170 __ cache_state()->Split(block->try_info->catch_state);
1171
1172 CODE_COMMENT("load caught exception tag");
1173 DCHECK_EQ(__ cache_state()->stack_state.back().kind(), kRef);
1174 LiftoffRegister caught_tag =
1175 GetExceptionProperty(__ cache_state()->stack_state.back(),
1176 RootIndex::kwasm_exception_tag_symbol);
1177 LiftoffRegList pinned;
1178 pinned.set(caught_tag);
1179
1180 CODE_COMMENT("load expected exception tag");
1181 Register imm_tag = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
1182 LOAD_TAGGED_PTR_INSTANCE_FIELD(imm_tag, TagsTable, pinned);
1183 __ LoadTaggedPointer(
1184 imm_tag, imm_tag, no_reg,
1185 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
1186
1187 CODE_COMMENT("compare tags");
1188 Label caught;
1189 __ emit_cond_jump(kEqual, &caught, kI32, imm_tag, caught_tag.gp());
1190 // The tags don't match, merge the current state into the catch state and
1191 // jump to the next handler.
1192 __ MergeFullStackWith(block->try_info->catch_state, *__ cache_state());
1193 __ emit_jump(&block->try_info->catch_label);
1194
1195 __ bind(&caught);
1196 if (!block->try_info->in_handler) {
1197 block->try_info->in_handler = true;
1198 num_exceptions_++;
1199 }
1200 GetExceptionValues(decoder, __ cache_state()->stack_state.back(), imm.tag);
1201 }
1202
Rethrow(FullDecoder * decoder,const LiftoffAssembler::VarState & exception)1203 void Rethrow(FullDecoder* decoder,
1204 const LiftoffAssembler::VarState& exception) {
1205 DCHECK_EQ(exception.kind(), kRef);
1206 CallRuntimeStub(WasmCode::kWasmRethrow, MakeSig::Params(kPointerKind),
1207 {exception}, decoder->position());
1208 }
1209
Delegate(FullDecoder * decoder,uint32_t depth,Control * block)1210 void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) {
1211 DCHECK_EQ(block, decoder->control_at(0));
1212 Control* target = decoder->control_at(depth);
1213 DCHECK(block->is_incomplete_try());
1214 __ bind(&block->try_info->catch_label);
1215 if (block->try_info->catch_reached) {
1216 __ cache_state()->Steal(block->try_info->catch_state);
1217 if (depth == decoder->control_depth() - 1) {
1218 // Delegate to the caller, do not emit a landing pad.
1219 Rethrow(decoder, __ cache_state()->stack_state.back());
1220 MaybeOSR();
1221 } else {
1222 DCHECK(target->is_incomplete_try());
1223 if (!target->try_info->catch_reached) {
1224 target->try_info->catch_state.InitMerge(
1225 *__ cache_state(), __ num_locals(), 1,
1226 target->stack_depth + target->num_exceptions);
1227 target->try_info->catch_reached = true;
1228 }
1229 __ MergeStackWith(target->try_info->catch_state, 1,
1230 LiftoffAssembler::kForwardJump);
1231 __ emit_jump(&target->try_info->catch_label);
1232 }
1233 }
1234 }
1235
Rethrow(FullDecoder * decoder,Control * try_block)1236 void Rethrow(FullDecoder* decoder, Control* try_block) {
1237 int index = try_block->try_info->catch_state.stack_height() - 1;
1238 auto& exception = __ cache_state()->stack_state[index];
1239 Rethrow(decoder, exception);
1240 int pc_offset = __ pc_offset();
1241 MaybeOSR();
1242 EmitLandingPad(decoder, pc_offset);
1243 }
1244
CatchAll(FullDecoder * decoder,Control * block)1245 void CatchAll(FullDecoder* decoder, Control* block) {
1246 DCHECK(block->is_try_catchall() || block->is_try_catch());
1247 DCHECK_EQ(decoder->control_at(0), block);
1248
1249 // The catch block is unreachable if no possible throws in the try block
1250 // exist. We only build a landing pad if some node in the try block can
1251 // (possibly) throw. Otherwise the catch environments remain empty.
1252 if (!block->try_info->catch_reached) {
1253 decoder->SetSucceedingCodeDynamicallyUnreachable();
1254 return;
1255 }
1256
1257 __ bind(&block->try_info->catch_label);
1258 __ cache_state()->Steal(block->try_info->catch_state);
1259 if (!block->try_info->in_handler) {
1260 block->try_info->in_handler = true;
1261 num_exceptions_++;
1262 }
1263 }
1264
JumpIfFalse(FullDecoder * decoder,Label * false_dst)1265 void JumpIfFalse(FullDecoder* decoder, Label* false_dst) {
1266 LiftoffCondition cond =
1267 test_and_reset_outstanding_op(kExprI32Eqz) ? kNotEqualZero : kEqualZero;
1268
1269 if (!has_outstanding_op()) {
1270 // Unary comparison.
1271 Register value = __ PopToRegister().gp();
1272 __ emit_cond_jump(cond, false_dst, kI32, value);
1273 return;
1274 }
1275
1276 // Binary comparison of i32 values.
1277 cond = Negate(GetCompareCondition(outstanding_op_));
1278 outstanding_op_ = kNoOutstandingOp;
1279 LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
1280 if (rhs_slot.is_const()) {
1281 // Compare to a constant.
1282 int32_t rhs_imm = rhs_slot.i32_const();
1283 __ cache_state()->stack_state.pop_back();
1284 Register lhs = __ PopToRegister().gp();
1285 __ emit_i32_cond_jumpi(cond, false_dst, lhs, rhs_imm);
1286 return;
1287 }
1288
1289 Register rhs = __ PopToRegister().gp();
1290 LiftoffAssembler::VarState lhs_slot = __ cache_state()->stack_state.back();
1291 if (lhs_slot.is_const()) {
1292 // Compare a constant to an arbitrary value.
1293 int32_t lhs_imm = lhs_slot.i32_const();
1294 __ cache_state()->stack_state.pop_back();
1295 // Flip the condition, because {lhs} and {rhs} are swapped.
1296 __ emit_i32_cond_jumpi(Flip(cond), false_dst, rhs, lhs_imm);
1297 return;
1298 }
1299
1300 // Compare two arbitrary values.
1301 Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
1302 __ emit_cond_jump(cond, false_dst, kI32, lhs, rhs);
1303 }
1304
If(FullDecoder * decoder,const Value & cond,Control * if_block)1305 void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
1306 DCHECK_EQ(if_block, decoder->control_at(0));
1307 DCHECK(if_block->is_if());
1308
1309 // Allocate the else state.
1310 if_block->else_state = std::make_unique<ElseState>();
1311
1312 // Test the condition on the value stack, jump to else if zero.
1313 JumpIfFalse(decoder, if_block->else_state->label.get());
1314
1315 // Store the state (after popping the value) for executing the else branch.
1316 if_block->else_state->state.Split(*__ cache_state());
1317
1318 PushControl(if_block);
1319 }
1320
FallThruTo(FullDecoder * decoder,Control * c)1321 void FallThruTo(FullDecoder* decoder, Control* c) {
1322 if (!c->end_merge.reached) {
1323 c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
1324 c->end_merge.arity,
1325 c->stack_depth + c->num_exceptions);
1326 }
1327 DCHECK(!c->is_try_catchall());
1328 if (c->is_try_catch()) {
1329 // Drop the implicit exception ref if any. There may be none if this is a
1330 // catch-less try block.
1331 __ MergeStackWith(c->label_state, c->br_merge()->arity,
1332 LiftoffAssembler::kForwardJump);
1333 } else {
1334 __ MergeFullStackWith(c->label_state, *__ cache_state());
1335 }
1336 __ emit_jump(c->label.get());
1337 TraceCacheState(decoder);
1338 }
1339
FinishOneArmedIf(FullDecoder * decoder,Control * c)1340 void FinishOneArmedIf(FullDecoder* decoder, Control* c) {
1341 DCHECK(c->is_onearmed_if());
1342 if (c->end_merge.reached) {
1343 // Someone already merged to the end of the if. Merge both arms into that.
1344 if (c->reachable()) {
1345 // Merge the if state into the end state.
1346 __ MergeFullStackWith(c->label_state, *__ cache_state());
1347 __ emit_jump(c->label.get());
1348 }
1349 // Merge the else state into the end state.
1350 __ bind(c->else_state->label.get());
1351 __ MergeFullStackWith(c->label_state, c->else_state->state);
1352 __ cache_state()->Steal(c->label_state);
1353 } else if (c->reachable()) {
1354 // No merge yet at the end of the if, but we need to create a merge for
1355 // the both arms of this if. Thus init the merge point from the else
1356 // state, then merge the if state into that.
1357 DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
1358 c->label_state.InitMerge(c->else_state->state, __ num_locals(),
1359 c->start_merge.arity,
1360 c->stack_depth + c->num_exceptions);
1361 __ MergeFullStackWith(c->label_state, *__ cache_state());
1362 __ emit_jump(c->label.get());
1363 // Merge the else state into the end state.
1364 __ bind(c->else_state->label.get());
1365 __ MergeFullStackWith(c->label_state, c->else_state->state);
1366 __ cache_state()->Steal(c->label_state);
1367 } else {
1368 // No merge needed, just continue with the else state.
1369 __ bind(c->else_state->label.get());
1370 __ cache_state()->Steal(c->else_state->state);
1371 }
1372 }
1373
FinishTry(FullDecoder * decoder,Control * c)1374 void FinishTry(FullDecoder* decoder, Control* c) {
1375 DCHECK(c->is_try_catch() || c->is_try_catchall());
1376 if (!c->end_merge.reached) {
1377 if (c->try_info->catch_reached) {
1378 // Drop the implicit exception ref.
1379 __ DropValue(__ num_locals() + c->stack_depth + c->num_exceptions);
1380 }
1381 // Else we did not enter the catch state, continue with the current state.
1382 } else {
1383 if (c->reachable()) {
1384 __ MergeStackWith(c->label_state, c->br_merge()->arity,
1385 LiftoffAssembler::kForwardJump);
1386 }
1387 __ cache_state()->Steal(c->label_state);
1388 }
1389 if (c->try_info->catch_reached) {
1390 num_exceptions_--;
1391 }
1392 }
1393
PopControl(FullDecoder * decoder,Control * c)1394 void PopControl(FullDecoder* decoder, Control* c) {
1395 if (c->is_loop()) return; // A loop just falls through.
1396 if (c->is_onearmed_if()) {
1397 // Special handling for one-armed ifs.
1398 FinishOneArmedIf(decoder, c);
1399 } else if (c->is_try_catch() || c->is_try_catchall()) {
1400 FinishTry(decoder, c);
1401 } else if (c->end_merge.reached) {
1402 // There is a merge already. Merge our state into that, then continue with
1403 // that state.
1404 if (c->reachable()) {
1405 __ MergeFullStackWith(c->label_state, *__ cache_state());
1406 }
1407 __ cache_state()->Steal(c->label_state);
1408 } else {
1409 // No merge, just continue with our current state.
1410 }
1411
1412 if (!c->label.get()->is_bound()) __ bind(c->label.get());
1413 }
1414
GenerateCCall(const LiftoffRegister * result_regs,const ValueKindSig * sig,ValueKind out_argument_kind,const LiftoffRegister * arg_regs,ExternalReference ext_ref)1415 void GenerateCCall(const LiftoffRegister* result_regs,
1416 const ValueKindSig* sig, ValueKind out_argument_kind,
1417 const LiftoffRegister* arg_regs,
1418 ExternalReference ext_ref) {
1419 // Before making a call, spill all cache registers.
1420 __ SpillAllRegisters();
1421
1422 // Store arguments on our stack, then align the stack for calling to C.
1423 int param_bytes = 0;
1424 for (ValueKind param_kind : sig->parameters()) {
1425 param_bytes += element_size_bytes(param_kind);
1426 }
1427 int out_arg_bytes =
1428 out_argument_kind == kVoid ? 0 : element_size_bytes(out_argument_kind);
1429 int stack_bytes = std::max(param_bytes, out_arg_bytes);
1430 __ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
1431 ext_ref);
1432 }
1433
1434 template <typename EmitFn, typename... Args>
1435 typename std::enable_if<!std::is_member_function_pointer<EmitFn>::value>::type
CallEmitFn(EmitFn fn,Args...args)1436 CallEmitFn(EmitFn fn, Args... args) {
1437 fn(args...);
1438 }
1439
1440 template <typename EmitFn, typename... Args>
1441 typename std::enable_if<std::is_member_function_pointer<EmitFn>::value>::type
CallEmitFn(EmitFn fn,Args...args)1442 CallEmitFn(EmitFn fn, Args... args) {
1443 (asm_.*fn)(ConvertAssemblerArg(args)...);
1444 }
1445
1446 // Wrap a {LiftoffRegister} with implicit conversions to {Register} and
1447 // {DoubleRegister}.
1448 struct AssemblerRegisterConverter {
1449 LiftoffRegister reg;
operator LiftoffRegisterv8::internal::wasm::__anona6483cad0111::LiftoffCompiler::AssemblerRegisterConverter1450 operator LiftoffRegister() { return reg; }
operator Registerv8::internal::wasm::__anona6483cad0111::LiftoffCompiler::AssemblerRegisterConverter1451 operator Register() { return reg.gp(); }
operator DoubleRegisterv8::internal::wasm::__anona6483cad0111::LiftoffCompiler::AssemblerRegisterConverter1452 operator DoubleRegister() { return reg.fp(); }
1453 };
1454
1455 // Convert {LiftoffRegister} to {AssemblerRegisterConverter}, other types stay
1456 // unchanged.
1457 template <typename T>
1458 typename std::conditional<std::is_same<LiftoffRegister, T>::value,
1459 AssemblerRegisterConverter, T>::type
ConvertAssemblerArg(T t)1460 ConvertAssemblerArg(T t) {
1461 return {t};
1462 }
1463
1464 template <typename EmitFn, typename ArgType>
1465 struct EmitFnWithFirstArg {
1466 EmitFn fn;
1467 ArgType first_arg;
1468 };
1469
1470 template <typename EmitFn, typename ArgType>
BindFirst(EmitFn fn,ArgType arg)1471 EmitFnWithFirstArg<EmitFn, ArgType> BindFirst(EmitFn fn, ArgType arg) {
1472 return {fn, arg};
1473 }
1474
1475 template <typename EmitFn, typename T, typename... Args>
CallEmitFn(EmitFnWithFirstArg<EmitFn,T> bound_fn,Args...args)1476 void CallEmitFn(EmitFnWithFirstArg<EmitFn, T> bound_fn, Args... args) {
1477 CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
1478 }
1479
1480 template <ValueKind src_kind, ValueKind result_kind,
1481 ValueKind result_lane_kind = kVoid, class EmitFn>
EmitUnOp(EmitFn fn)1482 void EmitUnOp(EmitFn fn) {
1483 constexpr RegClass src_rc = reg_class_for(src_kind);
1484 constexpr RegClass result_rc = reg_class_for(result_kind);
1485 LiftoffRegister src = __ PopToRegister();
1486 LiftoffRegister dst = src_rc == result_rc
1487 ? __ GetUnusedRegister(result_rc, {src}, {})
1488 : __ GetUnusedRegister(result_rc, {});
1489 CallEmitFn(fn, dst, src);
1490 if (V8_UNLIKELY(nondeterminism_)) {
1491 auto pinned = LiftoffRegList::ForRegs(dst);
1492 if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
1493 CheckNan(dst, pinned, result_kind);
1494 } else if (result_kind == ValueKind::kS128 &&
1495 (result_lane_kind == kF32 || result_lane_kind == kF64)) {
1496 CheckS128Nan(dst, pinned, result_lane_kind);
1497 }
1498 }
1499 __ PushRegister(result_kind, dst);
1500 }
1501
1502 template <ValueKind kind>
EmitFloatUnOpWithCFallback(bool (LiftoffAssembler::* emit_fn)(DoubleRegister,DoubleRegister),ExternalReference (* fallback_fn)())1503 void EmitFloatUnOpWithCFallback(
1504 bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
1505 ExternalReference (*fallback_fn)()) {
1506 auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
1507 if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
1508 ExternalReference ext_ref = fallback_fn();
1509 auto sig = MakeSig::Params(kind);
1510 GenerateCCall(&dst, &sig, kind, &src, ext_ref);
1511 };
1512 EmitUnOp<kind, kind>(emit_with_c_fallback);
1513 }
1514
1515 enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
1516 template <ValueKind dst_kind, ValueKind src_kind,
1517 TypeConversionTrapping can_trap>
EmitTypeConversion(FullDecoder * decoder,WasmOpcode opcode,ExternalReference (* fallback_fn)())1518 void EmitTypeConversion(FullDecoder* decoder, WasmOpcode opcode,
1519 ExternalReference (*fallback_fn)()) {
1520 static constexpr RegClass src_rc = reg_class_for(src_kind);
1521 static constexpr RegClass dst_rc = reg_class_for(dst_kind);
1522 LiftoffRegister src = __ PopToRegister();
1523 LiftoffRegister dst = src_rc == dst_rc
1524 ? __ GetUnusedRegister(dst_rc, {src}, {})
1525 : __ GetUnusedRegister(dst_rc, {});
1526 Label* trap =
1527 can_trap ? AddOutOfLineTrap(
1528 decoder, WasmCode::kThrowWasmTrapFloatUnrepresentable)
1529 : nullptr;
1530 if (!__ emit_type_conversion(opcode, dst, src, trap)) {
1531 DCHECK_NOT_NULL(fallback_fn);
1532 ExternalReference ext_ref = fallback_fn();
1533 if (can_trap) {
1534 // External references for potentially trapping conversions return int.
1535 auto sig = MakeSig::Returns(kI32).Params(src_kind);
1536 LiftoffRegister ret_reg =
1537 __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
1538 LiftoffRegister dst_regs[] = {ret_reg, dst};
1539 GenerateCCall(dst_regs, &sig, dst_kind, &src, ext_ref);
1540 __ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
1541 } else {
1542 ValueKind sig_kinds[] = {src_kind};
1543 ValueKindSig sig(0, 1, sig_kinds);
1544 GenerateCCall(&dst, &sig, dst_kind, &src, ext_ref);
1545 }
1546 }
1547 __ PushRegister(dst_kind, dst);
1548 }
1549
UnOp(FullDecoder * decoder,WasmOpcode opcode,const Value & value,Value * result)1550 void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
1551 Value* result) {
1552 #define CASE_I32_UNOP(opcode, fn) \
1553 case kExpr##opcode: \
1554 return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_##fn);
1555 #define CASE_I64_UNOP(opcode, fn) \
1556 case kExpr##opcode: \
1557 return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn);
1558 #define CASE_FLOAT_UNOP(opcode, kind, fn) \
1559 case kExpr##opcode: \
1560 return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn);
1561 #define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn) \
1562 case kExpr##opcode: \
1563 return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
1564 &ExternalReference::wasm_##fn);
1565 #define CASE_TYPE_CONVERSION(opcode, dst_kind, src_kind, ext_ref, can_trap) \
1566 case kExpr##opcode: \
1567 return EmitTypeConversion<k##dst_kind, k##src_kind, can_trap>( \
1568 decoder, kExpr##opcode, ext_ref);
1569 switch (opcode) {
1570 CASE_I32_UNOP(I32Clz, i32_clz)
1571 CASE_I32_UNOP(I32Ctz, i32_ctz)
1572 CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
1573 CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
1574 CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Ceil, F32, f32_ceil)
1575 CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Floor, F32, f32_floor)
1576 CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Trunc, F32, f32_trunc)
1577 CASE_FLOAT_UNOP_WITH_CFALLBACK(F32NearestInt, F32, f32_nearest_int)
1578 CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
1579 CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
1580 CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
1581 CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil)
1582 CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor)
1583 CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc)
1584 CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int)
1585 CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
1586 CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap)
1587 CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap)
1588 CASE_TYPE_CONVERSION(I32UConvertF32, I32, F32, nullptr, kCanTrap)
1589 CASE_TYPE_CONVERSION(I32SConvertF64, I32, F64, nullptr, kCanTrap)
1590 CASE_TYPE_CONVERSION(I32UConvertF64, I32, F64, nullptr, kCanTrap)
1591 CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr, kNoTrap)
1592 CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr, kNoTrap)
1593 CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr, kNoTrap)
1594 CASE_TYPE_CONVERSION(I64SConvertF32, I64, F32,
1595 &ExternalReference::wasm_float32_to_int64, kCanTrap)
1596 CASE_TYPE_CONVERSION(I64UConvertF32, I64, F32,
1597 &ExternalReference::wasm_float32_to_uint64, kCanTrap)
1598 CASE_TYPE_CONVERSION(I64SConvertF64, I64, F64,
1599 &ExternalReference::wasm_float64_to_int64, kCanTrap)
1600 CASE_TYPE_CONVERSION(I64UConvertF64, I64, F64,
1601 &ExternalReference::wasm_float64_to_uint64, kCanTrap)
1602 CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr, kNoTrap)
1603 CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr, kNoTrap)
1604 CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr, kNoTrap)
1605 CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64,
1606 &ExternalReference::wasm_int64_to_float32, kNoTrap)
1607 CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64,
1608 &ExternalReference::wasm_uint64_to_float32, kNoTrap)
1609 CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr, kNoTrap)
1610 CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr, kNoTrap)
1611 CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr, kNoTrap)
1612 CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr, kNoTrap)
1613 CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64,
1614 &ExternalReference::wasm_int64_to_float64, kNoTrap)
1615 CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64,
1616 &ExternalReference::wasm_uint64_to_float64, kNoTrap)
1617 CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap)
1618 CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap)
1619 CASE_I32_UNOP(I32SExtendI8, i32_signextend_i8)
1620 CASE_I32_UNOP(I32SExtendI16, i32_signextend_i16)
1621 CASE_I64_UNOP(I64SExtendI8, i64_signextend_i8)
1622 CASE_I64_UNOP(I64SExtendI16, i64_signextend_i16)
1623 CASE_I64_UNOP(I64SExtendI32, i64_signextend_i32)
1624 CASE_I64_UNOP(I64Clz, i64_clz)
1625 CASE_I64_UNOP(I64Ctz, i64_ctz)
1626 CASE_TYPE_CONVERSION(I32SConvertSatF32, I32, F32, nullptr, kNoTrap)
1627 CASE_TYPE_CONVERSION(I32UConvertSatF32, I32, F32, nullptr, kNoTrap)
1628 CASE_TYPE_CONVERSION(I32SConvertSatF64, I32, F64, nullptr, kNoTrap)
1629 CASE_TYPE_CONVERSION(I32UConvertSatF64, I32, F64, nullptr, kNoTrap)
1630 CASE_TYPE_CONVERSION(I64SConvertSatF32, I64, F32,
1631 &ExternalReference::wasm_float32_to_int64_sat,
1632 kNoTrap)
1633 CASE_TYPE_CONVERSION(I64UConvertSatF32, I64, F32,
1634 &ExternalReference::wasm_float32_to_uint64_sat,
1635 kNoTrap)
1636 CASE_TYPE_CONVERSION(I64SConvertSatF64, I64, F64,
1637 &ExternalReference::wasm_float64_to_int64_sat,
1638 kNoTrap)
1639 CASE_TYPE_CONVERSION(I64UConvertSatF64, I64, F64,
1640 &ExternalReference::wasm_float64_to_uint64_sat,
1641 kNoTrap)
1642 case kExprI32Eqz:
1643 DCHECK(decoder->lookahead(0, kExprI32Eqz));
1644 if ((decoder->lookahead(1, kExprBrIf) ||
1645 decoder->lookahead(1, kExprIf)) &&
1646 !for_debugging_) {
1647 DCHECK(!has_outstanding_op());
1648 outstanding_op_ = kExprI32Eqz;
1649 break;
1650 }
1651 return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_i32_eqz);
1652 case kExprI64Eqz:
1653 return EmitUnOp<kI64, kI32>(&LiftoffAssembler::emit_i64_eqz);
1654 case kExprI32Popcnt:
1655 return EmitUnOp<kI32, kI32>(
1656 [=](LiftoffRegister dst, LiftoffRegister src) {
1657 if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
1658 auto sig = MakeSig::Returns(kI32).Params(kI32);
1659 GenerateCCall(&dst, &sig, kVoid, &src,
1660 ExternalReference::wasm_word32_popcnt());
1661 });
1662 case kExprI64Popcnt:
1663 return EmitUnOp<kI64, kI64>(
1664 [=](LiftoffRegister dst, LiftoffRegister src) {
1665 if (__ emit_i64_popcnt(dst, src)) return;
1666 // The c function returns i32. We will zero-extend later.
1667 auto sig = MakeSig::Returns(kI32).Params(kI64);
1668 LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst;
1669 GenerateCCall(&c_call_dst, &sig, kVoid, &src,
1670 ExternalReference::wasm_word64_popcnt());
1671 // Now zero-extend the result to i64.
1672 __ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
1673 nullptr);
1674 });
1675 case kExprRefIsNull: {
1676 LiftoffRegList pinned;
1677 LiftoffRegister ref = pinned.set(__ PopToRegister());
1678 LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
1679 LoadNullValue(null.gp(), pinned);
1680 // Prefer to overwrite one of the input registers with the result
1681 // of the comparison.
1682 LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
1683 __ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
1684 __ PushRegister(kI32, dst);
1685 return;
1686 }
1687 default:
1688 UNREACHABLE();
1689 }
1690 #undef CASE_I32_UNOP
1691 #undef CASE_I64_UNOP
1692 #undef CASE_FLOAT_UNOP
1693 #undef CASE_FLOAT_UNOP_WITH_CFALLBACK
1694 #undef CASE_TYPE_CONVERSION
1695 }
1696
1697 template <ValueKind src_kind, ValueKind result_kind, typename EmitFn,
1698 typename EmitFnImm>
EmitBinOpImm(EmitFn fn,EmitFnImm fnImm)1699 void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
1700 static constexpr RegClass src_rc = reg_class_for(src_kind);
1701 static constexpr RegClass result_rc = reg_class_for(result_kind);
1702
1703 LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
1704 // Check if the RHS is an immediate.
1705 if (rhs_slot.is_const()) {
1706 __ cache_state()->stack_state.pop_back();
1707 int32_t imm = rhs_slot.i32_const();
1708
1709 LiftoffRegister lhs = __ PopToRegister();
1710 // Either reuse {lhs} for {dst}, or choose a register (pair) which does
1711 // not overlap, for easier code generation.
1712 LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs);
1713 LiftoffRegister dst = src_rc == result_rc
1714 ? __ GetUnusedRegister(result_rc, {lhs}, pinned)
1715 : __ GetUnusedRegister(result_rc, pinned);
1716
1717 CallEmitFn(fnImm, dst, lhs, imm);
1718 static_assert(result_kind != kF32 && result_kind != kF64,
1719 "Unhandled nondeterminism for fuzzing.");
1720 __ PushRegister(result_kind, dst);
1721 } else {
1722 // The RHS was not an immediate.
1723 EmitBinOp<src_kind, result_kind>(fn);
1724 }
1725 }
1726
1727 template <ValueKind src_kind, ValueKind result_kind,
1728 bool swap_lhs_rhs = false, ValueKind result_lane_kind = kVoid,
1729 typename EmitFn>
EmitBinOp(EmitFn fn)1730 void EmitBinOp(EmitFn fn) {
1731 static constexpr RegClass src_rc = reg_class_for(src_kind);
1732 static constexpr RegClass result_rc = reg_class_for(result_kind);
1733 LiftoffRegister rhs = __ PopToRegister();
1734 LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
1735 LiftoffRegister dst = src_rc == result_rc
1736 ? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
1737 : __ GetUnusedRegister(result_rc, {});
1738
1739 if (swap_lhs_rhs) std::swap(lhs, rhs);
1740
1741 CallEmitFn(fn, dst, lhs, rhs);
1742 if (V8_UNLIKELY(nondeterminism_)) {
1743 auto pinned = LiftoffRegList::ForRegs(dst);
1744 if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
1745 CheckNan(dst, pinned, result_kind);
1746 } else if (result_kind == ValueKind::kS128 &&
1747 (result_lane_kind == kF32 || result_lane_kind == kF64)) {
1748 CheckS128Nan(dst, pinned, result_lane_kind);
1749 }
1750 }
1751 __ PushRegister(result_kind, dst);
1752 }
1753
EmitDivOrRem64CCall(LiftoffRegister dst,LiftoffRegister lhs,LiftoffRegister rhs,ExternalReference ext_ref,Label * trap_by_zero,Label * trap_unrepresentable=nullptr)1754 void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
1755 LiftoffRegister rhs, ExternalReference ext_ref,
1756 Label* trap_by_zero,
1757 Label* trap_unrepresentable = nullptr) {
1758 // Cannot emit native instructions, build C call.
1759 LiftoffRegister ret =
1760 __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
1761 LiftoffRegister tmp =
1762 __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
1763 LiftoffRegister arg_regs[] = {lhs, rhs};
1764 LiftoffRegister result_regs[] = {ret, dst};
1765 auto sig = MakeSig::Returns(kI32).Params(kI64, kI64);
1766 GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref);
1767 __ LoadConstant(tmp, WasmValue(int32_t{0}));
1768 __ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp());
1769 if (trap_unrepresentable) {
1770 __ LoadConstant(tmp, WasmValue(int32_t{-1}));
1771 __ emit_cond_jump(kEqual, trap_unrepresentable, kI32, ret.gp(), tmp.gp());
1772 }
1773 }
1774
1775 template <WasmOpcode opcode>
EmitI32CmpOp(FullDecoder * decoder)1776 void EmitI32CmpOp(FullDecoder* decoder) {
1777 DCHECK(decoder->lookahead(0, opcode));
1778 if ((decoder->lookahead(1, kExprBrIf) || decoder->lookahead(1, kExprIf)) &&
1779 !for_debugging_) {
1780 DCHECK(!has_outstanding_op());
1781 outstanding_op_ = opcode;
1782 return;
1783 }
1784 return EmitBinOp<kI32, kI32>(BindFirst(&LiftoffAssembler::emit_i32_set_cond,
1785 GetCompareCondition(opcode)));
1786 }
1787
BinOp(FullDecoder * decoder,WasmOpcode opcode,const Value & lhs,const Value & rhs,Value * result)1788 void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
1789 const Value& rhs, Value* result) {
1790 #define CASE_I64_SHIFTOP(opcode, fn) \
1791 case kExpr##opcode: \
1792 return EmitBinOpImm<kI64, kI64>( \
1793 [=](LiftoffRegister dst, LiftoffRegister src, \
1794 LiftoffRegister amount) { \
1795 __ emit_##fn(dst, src, \
1796 amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \
1797 }, \
1798 &LiftoffAssembler::emit_##fn##i);
1799 #define CASE_CCALL_BINOP(opcode, kind, ext_ref_fn) \
1800 case kExpr##opcode: \
1801 return EmitBinOp<k##kind, k##kind>( \
1802 [=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
1803 LiftoffRegister args[] = {lhs, rhs}; \
1804 auto ext_ref = ExternalReference::ext_ref_fn(); \
1805 ValueKind sig_kinds[] = {k##kind, k##kind, k##kind}; \
1806 const bool out_via_stack = k##kind == kI64; \
1807 ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_kinds); \
1808 ValueKind out_arg_kind = out_via_stack ? kI64 : kVoid; \
1809 GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
1810 });
1811 switch (opcode) {
1812 case kExprI32Add:
1813 return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_add,
1814 &LiftoffAssembler::emit_i32_addi);
1815 case kExprI32Sub:
1816 return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_sub);
1817 case kExprI32Mul:
1818 return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_mul);
1819 case kExprI32And:
1820 return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_and,
1821 &LiftoffAssembler::emit_i32_andi);
1822 case kExprI32Ior:
1823 return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_or,
1824 &LiftoffAssembler::emit_i32_ori);
1825 case kExprI32Xor:
1826 return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_xor,
1827 &LiftoffAssembler::emit_i32_xori);
1828 case kExprI32Eq:
1829 return EmitI32CmpOp<kExprI32Eq>(decoder);
1830 case kExprI32Ne:
1831 return EmitI32CmpOp<kExprI32Ne>(decoder);
1832 case kExprI32LtS:
1833 return EmitI32CmpOp<kExprI32LtS>(decoder);
1834 case kExprI32LtU:
1835 return EmitI32CmpOp<kExprI32LtU>(decoder);
1836 case kExprI32GtS:
1837 return EmitI32CmpOp<kExprI32GtS>(decoder);
1838 case kExprI32GtU:
1839 return EmitI32CmpOp<kExprI32GtU>(decoder);
1840 case kExprI32LeS:
1841 return EmitI32CmpOp<kExprI32LeS>(decoder);
1842 case kExprI32LeU:
1843 return EmitI32CmpOp<kExprI32LeU>(decoder);
1844 case kExprI32GeS:
1845 return EmitI32CmpOp<kExprI32GeS>(decoder);
1846 case kExprI32GeU:
1847 return EmitI32CmpOp<kExprI32GeU>(decoder);
1848 case kExprI64Add:
1849 return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_add,
1850 &LiftoffAssembler::emit_i64_addi);
1851 case kExprI64Sub:
1852 return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_sub);
1853 case kExprI64Mul:
1854 return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_mul);
1855 case kExprI64And:
1856 return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_and,
1857 &LiftoffAssembler::emit_i64_andi);
1858 case kExprI64Ior:
1859 return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_or,
1860 &LiftoffAssembler::emit_i64_ori);
1861 case kExprI64Xor:
1862 return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_xor,
1863 &LiftoffAssembler::emit_i64_xori);
1864 case kExprI64Eq:
1865 return EmitBinOp<kI64, kI32>(
1866 BindFirst(&LiftoffAssembler::emit_i64_set_cond, kEqual));
1867 case kExprI64Ne:
1868 return EmitBinOp<kI64, kI32>(
1869 BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnequal));
1870 case kExprI64LtS:
1871 return EmitBinOp<kI64, kI32>(
1872 BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessThan));
1873 case kExprI64LtU:
1874 return EmitBinOp<kI64, kI32>(
1875 BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnsignedLessThan));
1876 case kExprI64GtS:
1877 return EmitBinOp<kI64, kI32>(BindFirst(
1878 &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterThan));
1879 case kExprI64GtU:
1880 return EmitBinOp<kI64, kI32>(BindFirst(
1881 &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterThan));
1882 case kExprI64LeS:
1883 return EmitBinOp<kI64, kI32>(
1884 BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessEqual));
1885 case kExprI64LeU:
1886 return EmitBinOp<kI64, kI32>(BindFirst(
1887 &LiftoffAssembler::emit_i64_set_cond, kUnsignedLessEqual));
1888 case kExprI64GeS:
1889 return EmitBinOp<kI64, kI32>(BindFirst(
1890 &LiftoffAssembler::emit_i64_set_cond, kSignedGreaterEqual));
1891 case kExprI64GeU:
1892 return EmitBinOp<kI64, kI32>(BindFirst(
1893 &LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterEqual));
1894 case kExprF32Eq:
1895 return EmitBinOp<kF32, kI32>(
1896 BindFirst(&LiftoffAssembler::emit_f32_set_cond, kEqual));
1897 case kExprF32Ne:
1898 return EmitBinOp<kF32, kI32>(
1899 BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnequal));
1900 case kExprF32Lt:
1901 return EmitBinOp<kF32, kI32>(
1902 BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnsignedLessThan));
1903 case kExprF32Gt:
1904 return EmitBinOp<kF32, kI32>(BindFirst(
1905 &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterThan));
1906 case kExprF32Le:
1907 return EmitBinOp<kF32, kI32>(BindFirst(
1908 &LiftoffAssembler::emit_f32_set_cond, kUnsignedLessEqual));
1909 case kExprF32Ge:
1910 return EmitBinOp<kF32, kI32>(BindFirst(
1911 &LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterEqual));
1912 case kExprF64Eq:
1913 return EmitBinOp<kF64, kI32>(
1914 BindFirst(&LiftoffAssembler::emit_f64_set_cond, kEqual));
1915 case kExprF64Ne:
1916 return EmitBinOp<kF64, kI32>(
1917 BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnequal));
1918 case kExprF64Lt:
1919 return EmitBinOp<kF64, kI32>(
1920 BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnsignedLessThan));
1921 case kExprF64Gt:
1922 return EmitBinOp<kF64, kI32>(BindFirst(
1923 &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterThan));
1924 case kExprF64Le:
1925 return EmitBinOp<kF64, kI32>(BindFirst(
1926 &LiftoffAssembler::emit_f64_set_cond, kUnsignedLessEqual));
1927 case kExprF64Ge:
1928 return EmitBinOp<kF64, kI32>(BindFirst(
1929 &LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterEqual));
1930 case kExprI32Shl:
1931 return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shl,
1932 &LiftoffAssembler::emit_i32_shli);
1933 case kExprI32ShrS:
1934 return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_sar,
1935 &LiftoffAssembler::emit_i32_sari);
1936 case kExprI32ShrU:
1937 return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shr,
1938 &LiftoffAssembler::emit_i32_shri);
1939 CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
1940 CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
1941 CASE_I64_SHIFTOP(I64Shl, i64_shl)
1942 CASE_I64_SHIFTOP(I64ShrS, i64_sar)
1943 CASE_I64_SHIFTOP(I64ShrU, i64_shr)
1944 CASE_CCALL_BINOP(I64Rol, I64, wasm_word64_rol)
1945 CASE_CCALL_BINOP(I64Ror, I64, wasm_word64_ror)
1946 case kExprF32Add:
1947 return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_add);
1948 case kExprF32Sub:
1949 return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_sub);
1950 case kExprF32Mul:
1951 return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_mul);
1952 case kExprF32Div:
1953 return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_div);
1954 case kExprF32Min:
1955 return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_min);
1956 case kExprF32Max:
1957 return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_max);
1958 case kExprF32CopySign:
1959 return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_copysign);
1960 case kExprF64Add:
1961 return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_add);
1962 case kExprF64Sub:
1963 return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_sub);
1964 case kExprF64Mul:
1965 return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_mul);
1966 case kExprF64Div:
1967 return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_div);
1968 case kExprF64Min:
1969 return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_min);
1970 case kExprF64Max:
1971 return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_max);
1972 case kExprF64CopySign:
1973 return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_copysign);
1974 case kExprI32DivS:
1975 return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
1976 LiftoffRegister lhs,
1977 LiftoffRegister rhs) {
1978 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
1979 // Adding the second trap might invalidate the pointer returned for
1980 // the first one, thus get both pointers afterwards.
1981 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable);
1982 Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
1983 Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
1984 __ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
1985 div_unrepresentable);
1986 });
1987 case kExprI32DivU:
1988 return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
1989 LiftoffRegister lhs,
1990 LiftoffRegister rhs) {
1991 Label* div_by_zero =
1992 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
1993 __ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
1994 });
1995 case kExprI32RemS:
1996 return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
1997 LiftoffRegister lhs,
1998 LiftoffRegister rhs) {
1999 Label* rem_by_zero =
2000 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
2001 __ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
2002 });
2003 case kExprI32RemU:
2004 return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
2005 LiftoffRegister lhs,
2006 LiftoffRegister rhs) {
2007 Label* rem_by_zero =
2008 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
2009 __ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
2010 });
2011 case kExprI64DivS:
2012 return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
2013 LiftoffRegister lhs,
2014 LiftoffRegister rhs) {
2015 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
2016 // Adding the second trap might invalidate the pointer returned for
2017 // the first one, thus get both pointers afterwards.
2018 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable);
2019 Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
2020 Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
2021 if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
2022 div_unrepresentable)) {
2023 ExternalReference ext_ref = ExternalReference::wasm_int64_div();
2024 EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero,
2025 div_unrepresentable);
2026 }
2027 });
2028 case kExprI64DivU:
2029 return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
2030 LiftoffRegister lhs,
2031 LiftoffRegister rhs) {
2032 Label* div_by_zero =
2033 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
2034 if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
2035 ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
2036 EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
2037 }
2038 });
2039 case kExprI64RemS:
2040 return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
2041 LiftoffRegister lhs,
2042 LiftoffRegister rhs) {
2043 Label* rem_by_zero =
2044 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
2045 if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
2046 ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
2047 EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
2048 }
2049 });
2050 case kExprI64RemU:
2051 return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
2052 LiftoffRegister lhs,
2053 LiftoffRegister rhs) {
2054 Label* rem_by_zero =
2055 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
2056 if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
2057 ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
2058 EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
2059 }
2060 });
2061 case kExprRefEq: {
2062 return EmitBinOp<kOptRef, kI32>(
2063 BindFirst(&LiftoffAssembler::emit_ptrsize_set_cond, kEqual));
2064 }
2065
2066 default:
2067 UNREACHABLE();
2068 }
2069 #undef CASE_I64_SHIFTOP
2070 #undef CASE_CCALL_BINOP
2071 }
2072
I32Const(FullDecoder * decoder,Value * result,int32_t value)2073 void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
2074 __ PushConstant(kI32, value);
2075 }
2076
I64Const(FullDecoder * decoder,Value * result,int64_t value)2077 void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
2078 // The {VarState} stores constant values as int32_t, thus we only store
2079 // 64-bit constants in this field if it fits in an int32_t. Larger values
2080 // cannot be used as immediate value anyway, so we can also just put them in
2081 // a register immediately.
2082 int32_t value_i32 = static_cast<int32_t>(value);
2083 if (value_i32 == value) {
2084 __ PushConstant(kI64, value_i32);
2085 } else {
2086 LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kI64), {});
2087 __ LoadConstant(reg, WasmValue(value));
2088 __ PushRegister(kI64, reg);
2089 }
2090 }
2091
F32Const(FullDecoder * decoder,Value * result,float value)2092 void F32Const(FullDecoder* decoder, Value* result, float value) {
2093 LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
2094 __ LoadConstant(reg, WasmValue(value));
2095 __ PushRegister(kF32, reg);
2096 }
2097
F64Const(FullDecoder * decoder,Value * result,double value)2098 void F64Const(FullDecoder* decoder, Value* result, double value) {
2099 LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
2100 __ LoadConstant(reg, WasmValue(value));
2101 __ PushRegister(kF64, reg);
2102 }
2103
RefNull(FullDecoder * decoder,ValueType type,Value *)2104 void RefNull(FullDecoder* decoder, ValueType type, Value*) {
2105 LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
2106 LoadNullValue(null.gp(), {});
2107 __ PushRegister(type.kind(), null);
2108 }
2109
RefFunc(FullDecoder * decoder,uint32_t function_index,Value * result)2110 void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
2111 LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
2112 __ LoadConstant(func_index_reg, WasmValue(function_index));
2113 LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
2114 CallRuntimeStub(WasmCode::kWasmRefFunc, MakeSig::Returns(kRef).Params(kI32),
2115 {func_index_var}, decoder->position());
2116 __ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
2117 }
2118
RefAsNonNull(FullDecoder * decoder,const Value & arg,Value * result)2119 void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
2120 LiftoffRegList pinned;
2121 LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
2122 MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
2123 __ PushRegister(kRef, obj);
2124 }
2125
Drop(FullDecoder * decoder)2126 void Drop(FullDecoder* decoder) { __ DropValues(1); }
2127
TraceFunctionExit(FullDecoder * decoder)2128 void TraceFunctionExit(FullDecoder* decoder) {
2129 CODE_COMMENT("trace function exit");
2130 // Before making the runtime call, spill all cache registers.
2131 __ SpillAllRegisters();
2132 LiftoffRegList pinned;
2133 // Get a register to hold the stack slot for the return value.
2134 LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
2135 __ AllocateStackSlot(info.gp(), sizeof(int64_t));
2136
2137 // Store the return value if there is exactly one. Multiple return values
2138 // are not handled yet.
2139 size_t num_returns = decoder->sig_->return_count();
2140 if (num_returns == 1) {
2141 ValueKind return_kind = decoder->sig_->GetReturn(0).kind();
2142 LiftoffRegister return_reg =
2143 __ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
2144 __ Store(info.gp(), no_reg, 0, return_reg,
2145 StoreType::ForValueKind(return_kind), pinned);
2146 }
2147 // Put the parameter in its place.
2148 WasmTraceExitDescriptor descriptor;
2149 DCHECK_EQ(0, descriptor.GetStackParameterCount());
2150 DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
2151 Register param_reg = descriptor.GetRegisterParameter(0);
2152 if (info.gp() != param_reg) {
2153 __ Move(param_reg, info.gp(), kPointerKind);
2154 }
2155
2156 source_position_table_builder_.AddPosition(
2157 __ pc_offset(), SourcePosition(decoder->position()), false);
2158 __ CallRuntimeStub(WasmCode::kWasmTraceExit);
2159 DefineSafepoint();
2160
2161 __ DeallocateStackSlot(sizeof(int64_t));
2162 }
2163
DoReturn(FullDecoder * decoder,uint32_t)2164 void DoReturn(FullDecoder* decoder, uint32_t /* drop_values */) {
2165 if (FLAG_trace_wasm) TraceFunctionExit(decoder);
2166 size_t num_returns = decoder->sig_->return_count();
2167 if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
2168 __ LeaveFrame(StackFrame::WASM);
2169 __ DropStackSlotsAndRet(
2170 static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
2171 }
2172
LocalGet(FullDecoder * decoder,Value * result,const IndexImmediate<validate> & imm)2173 void LocalGet(FullDecoder* decoder, Value* result,
2174 const IndexImmediate<validate>& imm) {
2175 auto local_slot = __ cache_state()->stack_state[imm.index];
2176 __ cache_state()->stack_state.emplace_back(
2177 local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
2178 auto* slot = &__ cache_state()->stack_state.back();
2179 if (local_slot.is_reg()) {
2180 __ cache_state()->inc_used(local_slot.reg());
2181 slot->MakeRegister(local_slot.reg());
2182 } else if (local_slot.is_const()) {
2183 slot->MakeConstant(local_slot.i32_const());
2184 } else {
2185 DCHECK(local_slot.is_stack());
2186 auto rc = reg_class_for(local_slot.kind());
2187 LiftoffRegister reg = __ GetUnusedRegister(rc, {});
2188 __ cache_state()->inc_used(reg);
2189 slot->MakeRegister(reg);
2190 __ Fill(reg, local_slot.offset(), local_slot.kind());
2191 }
2192 }
2193
LocalSetFromStackSlot(LiftoffAssembler::VarState * dst_slot,uint32_t local_index)2194 void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot,
2195 uint32_t local_index) {
2196 auto& state = *__ cache_state();
2197 auto& src_slot = state.stack_state.back();
2198 ValueKind kind = dst_slot->kind();
2199 if (dst_slot->is_reg()) {
2200 LiftoffRegister slot_reg = dst_slot->reg();
2201 if (state.get_use_count(slot_reg) == 1) {
2202 __ Fill(dst_slot->reg(), src_slot.offset(), kind);
2203 return;
2204 }
2205 state.dec_used(slot_reg);
2206 dst_slot->MakeStack();
2207 }
2208 DCHECK_EQ(kind, __ local_kind(local_index));
2209 RegClass rc = reg_class_for(kind);
2210 LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
2211 __ Fill(dst_reg, src_slot.offset(), kind);
2212 *dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset());
2213 __ cache_state()->inc_used(dst_reg);
2214 }
2215
LocalSet(uint32_t local_index,bool is_tee)2216 void LocalSet(uint32_t local_index, bool is_tee) {
2217 auto& state = *__ cache_state();
2218 auto& source_slot = state.stack_state.back();
2219 auto& target_slot = state.stack_state[local_index];
2220 switch (source_slot.loc()) {
2221 case kRegister:
2222 if (target_slot.is_reg()) state.dec_used(target_slot.reg());
2223 target_slot.Copy(source_slot);
2224 if (is_tee) state.inc_used(target_slot.reg());
2225 break;
2226 case kIntConst:
2227 if (target_slot.is_reg()) state.dec_used(target_slot.reg());
2228 target_slot.Copy(source_slot);
2229 break;
2230 case kStack:
2231 LocalSetFromStackSlot(&target_slot, local_index);
2232 break;
2233 }
2234 if (!is_tee) __ cache_state()->stack_state.pop_back();
2235 }
2236
LocalSet(FullDecoder * decoder,const Value & value,const IndexImmediate<validate> & imm)2237 void LocalSet(FullDecoder* decoder, const Value& value,
2238 const IndexImmediate<validate>& imm) {
2239 LocalSet(imm.index, false);
2240 }
2241
LocalTee(FullDecoder * decoder,const Value & value,Value * result,const IndexImmediate<validate> & imm)2242 void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
2243 const IndexImmediate<validate>& imm) {
2244 LocalSet(imm.index, true);
2245 }
2246
AllocateLocals(FullDecoder * decoder,base::Vector<Value> local_values)2247 void AllocateLocals(FullDecoder* decoder, base::Vector<Value> local_values) {
2248 // TODO(7748): Introduce typed functions bailout reason
2249 unsupported(decoder, kGC, "let");
2250 }
2251
DeallocateLocals(FullDecoder * decoder,uint32_t count)2252 void DeallocateLocals(FullDecoder* decoder, uint32_t count) {
2253 // TODO(7748): Introduce typed functions bailout reason
2254 unsupported(decoder, kGC, "let");
2255 }
2256
GetGlobalBaseAndOffset(const WasmGlobal * global,LiftoffRegList * pinned,uint32_t * offset)2257 Register GetGlobalBaseAndOffset(const WasmGlobal* global,
2258 LiftoffRegList* pinned, uint32_t* offset) {
2259 Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
2260 if (global->mutability && global->imported) {
2261 LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize,
2262 *pinned);
2263 __ Load(LiftoffRegister(addr), addr, no_reg,
2264 global->index * sizeof(Address), kPointerLoadType, *pinned);
2265 *offset = 0;
2266 } else {
2267 LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize, *pinned);
2268 *offset = global->offset;
2269 }
2270 return addr;
2271 }
2272
GetBaseAndOffsetForImportedMutableExternRefGlobal(const WasmGlobal * global,LiftoffRegList * pinned,Register * base,Register * offset)2273 void GetBaseAndOffsetForImportedMutableExternRefGlobal(
2274 const WasmGlobal* global, LiftoffRegList* pinned, Register* base,
2275 Register* offset) {
2276 Register globals_buffer =
2277 pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
2278 LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer,
2279 ImportedMutableGlobalsBuffers, *pinned);
2280 *base = globals_buffer;
2281 __ LoadTaggedPointer(
2282 *base, globals_buffer, no_reg,
2283 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global->offset),
2284 *pinned);
2285
2286 // For the offset we need the index of the global in the buffer, and
2287 // then calculate the actual offset from the index. Load the index from
2288 // the ImportedMutableGlobals array of the instance.
2289 Register imported_mutable_globals =
2290 pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
2291
2292 LOAD_INSTANCE_FIELD(imported_mutable_globals, ImportedMutableGlobals,
2293 kSystemPointerSize, *pinned);
2294 *offset = imported_mutable_globals;
2295 __ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg,
2296 global->index * sizeof(Address),
2297 kSystemPointerSize == 4 ? LoadType::kI32Load : LoadType::kI64Load,
2298 *pinned);
2299 __ emit_i32_shli(*offset, *offset, kTaggedSizeLog2);
2300 __ emit_i32_addi(*offset, *offset,
2301 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0));
2302 }
2303
GlobalGet(FullDecoder * decoder,Value * result,const GlobalIndexImmediate<validate> & imm)2304 void GlobalGet(FullDecoder* decoder, Value* result,
2305 const GlobalIndexImmediate<validate>& imm) {
2306 const auto* global = &env_->module->globals[imm.index];
2307 ValueKind kind = global->type.kind();
2308 if (!CheckSupportedType(decoder, kind, "global")) {
2309 return;
2310 }
2311
2312 if (is_reference(kind)) {
2313 if (global->mutability && global->imported) {
2314 LiftoffRegList pinned;
2315 Register base = no_reg;
2316 Register offset = no_reg;
2317 GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
2318 &base, &offset);
2319 __ LoadTaggedPointer(base, base, offset, 0, pinned);
2320 __ PushRegister(kind, LiftoffRegister(base));
2321 return;
2322 }
2323
2324 LiftoffRegList pinned;
2325 Register globals_buffer =
2326 pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
2327 LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
2328 pinned);
2329 Register value = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
2330 __ LoadTaggedPointer(value, globals_buffer, no_reg,
2331 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
2332 imm.global->offset),
2333 pinned);
2334 __ PushRegister(kind, LiftoffRegister(value));
2335 return;
2336 }
2337 LiftoffRegList pinned;
2338 uint32_t offset = 0;
2339 Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
2340 LiftoffRegister value =
2341 pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned));
2342 LoadType type = LoadType::ForValueKind(kind);
2343 __ Load(value, addr, no_reg, offset, type, pinned, nullptr, false);
2344 __ PushRegister(kind, value);
2345 }
2346
GlobalSet(FullDecoder * decoder,const Value &,const GlobalIndexImmediate<validate> & imm)2347 void GlobalSet(FullDecoder* decoder, const Value&,
2348 const GlobalIndexImmediate<validate>& imm) {
2349 auto* global = &env_->module->globals[imm.index];
2350 ValueKind kind = global->type.kind();
2351 if (!CheckSupportedType(decoder, kind, "global")) {
2352 return;
2353 }
2354
2355 if (is_reference(kind)) {
2356 if (global->mutability && global->imported) {
2357 LiftoffRegList pinned;
2358 LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
2359 Register base = no_reg;
2360 Register offset = no_reg;
2361 GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
2362 &base, &offset);
2363 __ StoreTaggedPointer(base, offset, 0, value, pinned);
2364 return;
2365 }
2366
2367 LiftoffRegList pinned;
2368 Register globals_buffer =
2369 pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
2370 LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
2371 pinned);
2372 LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
2373 __ StoreTaggedPointer(globals_buffer, no_reg,
2374 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
2375 imm.global->offset),
2376 value, pinned);
2377 return;
2378 }
2379 LiftoffRegList pinned;
2380 uint32_t offset = 0;
2381 Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
2382 LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
2383 StoreType type = StoreType::ForValueKind(kind);
2384 __ Store(addr, no_reg, offset, reg, type, {}, nullptr, false);
2385 }
2386
TableGet(FullDecoder * decoder,const Value &,Value *,const IndexImmediate<validate> & imm)2387 void TableGet(FullDecoder* decoder, const Value&, Value*,
2388 const IndexImmediate<validate>& imm) {
2389 LiftoffRegList pinned;
2390
2391 LiftoffRegister table_index_reg =
2392 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
2393 __ LoadConstant(table_index_reg, WasmValue(imm.index));
2394 LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
2395
2396 LiftoffAssembler::VarState index = __ cache_state()->stack_state.back();
2397
2398 ValueKind result_kind = env_->module->tables[imm.index].type.kind();
2399 CallRuntimeStub(WasmCode::kWasmTableGet,
2400 MakeSig::Returns(result_kind).Params(kI32, kI32),
2401 {table_index, index}, decoder->position());
2402
2403 // Pop parameters from the value stack.
2404 __ cache_state()->stack_state.pop_back(1);
2405
2406 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
2407
2408 __ PushRegister(result_kind, LiftoffRegister(kReturnRegister0));
2409 }
2410
TableSet(FullDecoder * decoder,const Value &,const Value &,const IndexImmediate<validate> & imm)2411 void TableSet(FullDecoder* decoder, const Value&, const Value&,
2412 const IndexImmediate<validate>& imm) {
2413 LiftoffRegList pinned;
2414
2415 LiftoffRegister table_index_reg =
2416 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
2417 __ LoadConstant(table_index_reg, WasmValue(imm.index));
2418 LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
2419
2420 LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-1];
2421 LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
2422
2423 ValueKind table_kind = env_->module->tables[imm.index].type.kind();
2424
2425 CallRuntimeStub(WasmCode::kWasmTableSet,
2426 MakeSig::Params(kI32, kI32, table_kind),
2427 {table_index, index, value}, decoder->position());
2428
2429 // Pop parameters from the value stack.
2430 __ cache_state()->stack_state.pop_back(2);
2431
2432 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
2433 }
2434
GetRuntimeStubIdForTrapReason(TrapReason reason)2435 WasmCode::RuntimeStubId GetRuntimeStubIdForTrapReason(TrapReason reason) {
2436 switch (reason) {
2437 #define RUNTIME_STUB_FOR_TRAP(trap_reason) \
2438 case k##trap_reason: \
2439 return WasmCode::kThrowWasm##trap_reason;
2440
2441 FOREACH_WASM_TRAPREASON(RUNTIME_STUB_FOR_TRAP)
2442 #undef RUNTIME_STUB_FOR_TRAP
2443 default:
2444 UNREACHABLE();
2445 }
2446 }
2447
Trap(FullDecoder * decoder,TrapReason reason)2448 void Trap(FullDecoder* decoder, TrapReason reason) {
2449 Label* trap_label =
2450 AddOutOfLineTrap(decoder, GetRuntimeStubIdForTrapReason(reason));
2451 __ emit_jump(trap_label);
2452 __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
2453 }
2454
AssertNull(FullDecoder * decoder,const Value & arg,Value * result)2455 void AssertNull(FullDecoder* decoder, const Value& arg, Value* result) {
2456 LiftoffRegList pinned;
2457 LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
2458 Label* trap_label =
2459 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference);
2460 LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
2461 LoadNullValue(null.gp(), pinned);
2462 __ emit_cond_jump(kUnequal, trap_label, kOptRef, obj.gp(), null.gp());
2463 __ PushRegister(kOptRef, obj);
2464 }
2465
NopForTestingUnsupportedInLiftoff(FullDecoder * decoder)2466 void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
2467 unsupported(decoder, kOtherReason, "testing opcode");
2468 }
2469
Select(FullDecoder * decoder,const Value & cond,const Value & fval,const Value & tval,Value * result)2470 void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
2471 const Value& tval, Value* result) {
2472 LiftoffRegList pinned;
2473 Register condition = pinned.set(__ PopToRegister()).gp();
2474 ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
2475 DCHECK(CheckCompatibleStackSlotTypes(
2476 kind, __ cache_state()->stack_state.end()[-2].kind()));
2477 LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
2478 LiftoffRegister true_value = __ PopToRegister(pinned);
2479 LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
2480 {true_value, false_value}, {});
2481 if (!__ emit_select(dst, condition, true_value, false_value, kind)) {
2482 // Emit generic code (using branches) instead.
2483 Label cont;
2484 Label case_false;
2485 __ emit_cond_jump(kEqual, &case_false, kI32, condition);
2486 if (dst != true_value) __ Move(dst, true_value, kind);
2487 __ emit_jump(&cont);
2488
2489 __ bind(&case_false);
2490 if (dst != false_value) __ Move(dst, false_value, kind);
2491 __ bind(&cont);
2492 }
2493 __ PushRegister(kind, dst);
2494 }
2495
BrImpl(Control * target)2496 void BrImpl(Control* target) {
2497 if (!target->br_merge()->reached) {
2498 target->label_state.InitMerge(
2499 *__ cache_state(), __ num_locals(), target->br_merge()->arity,
2500 target->stack_depth + target->num_exceptions);
2501 }
2502 __ MergeStackWith(target->label_state, target->br_merge()->arity,
2503 target->is_loop() ? LiftoffAssembler::kBackwardJump
2504 : LiftoffAssembler::kForwardJump);
2505 __ jmp(target->label.get());
2506 }
2507
BrOrRet(FullDecoder * decoder,uint32_t depth,uint32_t)2508 void BrOrRet(FullDecoder* decoder, uint32_t depth,
2509 uint32_t /* drop_values */) {
2510 if (depth == decoder->control_depth() - 1) {
2511 DoReturn(decoder, 0);
2512 } else {
2513 BrImpl(decoder->control_at(depth));
2514 }
2515 }
2516
BrIf(FullDecoder * decoder,const Value &,uint32_t depth)2517 void BrIf(FullDecoder* decoder, const Value& /* cond */, uint32_t depth) {
2518 // Before branching, materialize all constants. This avoids repeatedly
2519 // materializing them for each conditional branch.
2520 // TODO(clemensb): Do the same for br_table.
2521 if (depth != decoder->control_depth() - 1) {
2522 __ MaterializeMergedConstants(
2523 decoder->control_at(depth)->br_merge()->arity);
2524 }
2525
2526 Label cont_false;
2527
2528 // Test the condition on the value stack, jump to {cont_false} if zero.
2529 JumpIfFalse(decoder, &cont_false);
2530
2531 BrOrRet(decoder, depth, 0);
2532 __ bind(&cont_false);
2533 }
2534
2535 // Generate a branch table case, potentially reusing previously generated
2536 // stack transfer code.
GenerateBrCase(FullDecoder * decoder,uint32_t br_depth,std::map<uint32_t,MovableLabel> * br_targets)2537 void GenerateBrCase(FullDecoder* decoder, uint32_t br_depth,
2538 std::map<uint32_t, MovableLabel>* br_targets) {
2539 MovableLabel& label = (*br_targets)[br_depth];
2540 if (label.get()->is_bound()) {
2541 __ jmp(label.get());
2542 } else {
2543 __ bind(label.get());
2544 BrOrRet(decoder, br_depth, 0);
2545 }
2546 }
2547
2548 // Generate a branch table for input in [min, max).
2549 // TODO(wasm): Generate a real branch table (like TF TableSwitch).
GenerateBrTable(FullDecoder * decoder,LiftoffRegister tmp,LiftoffRegister value,uint32_t min,uint32_t max,BranchTableIterator<validate> * table_iterator,std::map<uint32_t,MovableLabel> * br_targets)2550 void GenerateBrTable(FullDecoder* decoder, LiftoffRegister tmp,
2551 LiftoffRegister value, uint32_t min, uint32_t max,
2552 BranchTableIterator<validate>* table_iterator,
2553 std::map<uint32_t, MovableLabel>* br_targets) {
2554 DCHECK_LT(min, max);
2555 // Check base case.
2556 if (max == min + 1) {
2557 DCHECK_EQ(min, table_iterator->cur_index());
2558 GenerateBrCase(decoder, table_iterator->next(), br_targets);
2559 return;
2560 }
2561
2562 uint32_t split = min + (max - min) / 2;
2563 Label upper_half;
2564 __ LoadConstant(tmp, WasmValue(split));
2565 __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kI32, value.gp(),
2566 tmp.gp());
2567 // Emit br table for lower half:
2568 GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
2569 br_targets);
2570 __ bind(&upper_half);
2571 // table_iterator will trigger a DCHECK if we don't stop decoding now.
2572 if (did_bailout()) return;
2573 // Emit br table for upper half:
2574 GenerateBrTable(decoder, tmp, value, split, max, table_iterator,
2575 br_targets);
2576 }
2577
BrTable(FullDecoder * decoder,const BranchTableImmediate<validate> & imm,const Value & key)2578 void BrTable(FullDecoder* decoder, const BranchTableImmediate<validate>& imm,
2579 const Value& key) {
2580 LiftoffRegList pinned;
2581 LiftoffRegister value = pinned.set(__ PopToRegister());
2582 BranchTableIterator<validate> table_iterator(decoder, imm);
2583 std::map<uint32_t, MovableLabel> br_targets;
2584
2585 if (imm.table_count > 0) {
2586 LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
2587 __ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
2588 Label case_default;
2589 __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kI32, value.gp(),
2590 tmp.gp());
2591
2592 GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
2593 &br_targets);
2594
2595 __ bind(&case_default);
2596 // table_iterator will trigger a DCHECK if we don't stop decoding now.
2597 if (did_bailout()) return;
2598 }
2599
2600 // Generate the default case.
2601 GenerateBrCase(decoder, table_iterator.next(), &br_targets);
2602 DCHECK(!table_iterator.has_next());
2603 }
2604
Else(FullDecoder * decoder,Control * c)2605 void Else(FullDecoder* decoder, Control* c) {
2606 if (c->reachable()) {
2607 if (!c->end_merge.reached) {
2608 c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
2609 c->end_merge.arity,
2610 c->stack_depth + c->num_exceptions);
2611 }
2612 __ MergeFullStackWith(c->label_state, *__ cache_state());
2613 __ emit_jump(c->label.get());
2614 }
2615 __ bind(c->else_state->label.get());
2616 __ cache_state()->Steal(c->else_state->state);
2617 }
2618
GetSpilledRegistersForInspection()2619 SpilledRegistersForInspection* GetSpilledRegistersForInspection() {
2620 DCHECK(for_debugging_);
2621 // If we are generating debugging code, we really need to spill all
2622 // registers to make them inspectable when stopping at the trap.
2623 auto* spilled = compilation_zone_->New<SpilledRegistersForInspection>(
2624 compilation_zone_);
2625 for (uint32_t i = 0, e = __ cache_state()->stack_height(); i < e; ++i) {
2626 auto& slot = __ cache_state()->stack_state[i];
2627 if (!slot.is_reg()) continue;
2628 spilled->entries.push_back(SpilledRegistersForInspection::Entry{
2629 slot.offset(), slot.reg(), slot.kind()});
2630 __ RecordUsedSpillOffset(slot.offset());
2631 }
2632 return spilled;
2633 }
2634
AddOutOfLineTrap(FullDecoder * decoder,WasmCode::RuntimeStubId stub,uint32_t pc=0)2635 Label* AddOutOfLineTrap(FullDecoder* decoder, WasmCode::RuntimeStubId stub,
2636 uint32_t pc = 0) {
2637 // Only memory OOB traps need a {pc}.
2638 DCHECK_IMPLIES(stub != WasmCode::kThrowWasmTrapMemOutOfBounds, pc == 0);
2639 DCHECK(FLAG_wasm_bounds_checks);
2640 OutOfLineSafepointInfo* safepoint_info = nullptr;
2641 if (V8_UNLIKELY(for_debugging_)) {
2642 // Execution does not return after a trap. Therefore we don't have to
2643 // define a safepoint for traps that would preserve references on the
2644 // stack. However, if this is debug code, then we have to preserve the
2645 // references so that they can be inspected.
2646 safepoint_info =
2647 compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
2648 __ cache_state()->GetTaggedSlotsForOOLCode(
2649 &safepoint_info->slots, &safepoint_info->spills,
2650 LiftoffAssembler::CacheState::SpillLocation::kStackSlots);
2651 }
2652 out_of_line_code_.push_back(OutOfLineCode::Trap(
2653 stub, decoder->position(),
2654 V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection()
2655 : nullptr,
2656 safepoint_info, pc, RegisterOOLDebugSideTableEntry(decoder)));
2657 return out_of_line_code_.back().label.get();
2658 }
2659
2660 enum ForceCheck : bool { kDoForceCheck = true, kDontForceCheck = false };
2661
2662 // Returns {no_reg} if the memory access is statically known to be out of
2663 // bounds (a jump to the trap was generated then); return the GP {index}
2664 // register otherwise (holding the ptrsized index).
BoundsCheckMem(FullDecoder * decoder,uint32_t access_size,uint64_t offset,LiftoffRegister index,LiftoffRegList pinned,ForceCheck force_check)2665 Register BoundsCheckMem(FullDecoder* decoder, uint32_t access_size,
2666 uint64_t offset, LiftoffRegister index,
2667 LiftoffRegList pinned, ForceCheck force_check) {
2668 const bool statically_oob =
2669 !base::IsInBounds<uintptr_t>(offset, access_size,
2670 env_->max_memory_size);
2671
2672 // After bounds checking, we know that the index must be ptrsize, hence only
2673 // look at the lower word on 32-bit systems (the high word is bounds-checked
2674 // further down).
2675 Register index_ptrsize =
2676 kNeedI64RegPair && index.is_gp_pair() ? index.low_gp() : index.gp();
2677
2678 // Without bounds checks (testing only), just return the ptrsize index.
2679 if (V8_UNLIKELY(env_->bounds_checks == kNoBoundsChecks)) {
2680 return index_ptrsize;
2681 }
2682
2683 // Early return for trap handler.
2684 DCHECK_IMPLIES(env_->module->is_memory64,
2685 env_->bounds_checks == kExplicitBoundsChecks);
2686 if (!force_check && !statically_oob &&
2687 env_->bounds_checks == kTrapHandler) {
2688 // With trap handlers we should not have a register pair as input (we
2689 // would only return the lower half).
2690 DCHECK(index.is_gp());
2691 return index_ptrsize;
2692 }
2693
2694 CODE_COMMENT("bounds check memory");
2695
2696 // Set {pc} of the OOL code to {0} to avoid generation of protected
2697 // instruction information (see {GenerateOutOfLineCode}.
2698 Label* trap_label =
2699 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds, 0);
2700
2701 if (V8_UNLIKELY(statically_oob)) {
2702 __ emit_jump(trap_label);
2703 decoder->SetSucceedingCodeDynamicallyUnreachable();
2704 return no_reg;
2705 }
2706
2707 // Convert the index to ptrsize, bounds-checking the high word on 32-bit
2708 // systems for memory64.
2709 if (!env_->module->is_memory64) {
2710 __ emit_u32_to_intptr(index_ptrsize, index_ptrsize);
2711 } else if (kSystemPointerSize == kInt32Size) {
2712 DCHECK_GE(kMaxUInt32, env_->max_memory_size);
2713 __ emit_cond_jump(kNotEqualZero, trap_label, kI32, index.high_gp());
2714 }
2715
2716 uintptr_t end_offset = offset + access_size - 1u;
2717
2718 pinned.set(index_ptrsize);
2719 LiftoffRegister end_offset_reg =
2720 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
2721 LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
2722 LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize, pinned);
2723
2724 __ LoadConstant(end_offset_reg, WasmValue::ForUintPtr(end_offset));
2725
2726 // If the end offset is larger than the smallest memory, dynamically check
2727 // the end offset against the actual memory size, which is not known at
2728 // compile time. Otherwise, only one check is required (see below).
2729 if (end_offset > env_->min_memory_size) {
2730 __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind,
2731 end_offset_reg.gp(), mem_size.gp());
2732 }
2733
2734 // Just reuse the end_offset register for computing the effective size
2735 // (which is >= 0 because of the check above).
2736 LiftoffRegister effective_size_reg = end_offset_reg;
2737 __ emit_ptrsize_sub(effective_size_reg.gp(), mem_size.gp(),
2738 end_offset_reg.gp());
2739
2740 __ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kPointerKind,
2741 index_ptrsize, effective_size_reg.gp());
2742 return index_ptrsize;
2743 }
2744
AlignmentCheckMem(FullDecoder * decoder,uint32_t access_size,uintptr_t offset,Register index,LiftoffRegList pinned)2745 void AlignmentCheckMem(FullDecoder* decoder, uint32_t access_size,
2746 uintptr_t offset, Register index,
2747 LiftoffRegList pinned) {
2748 Label* trap_label =
2749 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapUnalignedAccess, 0);
2750 Register address = __ GetUnusedRegister(kGpReg, pinned).gp();
2751
2752 const uint32_t align_mask = access_size - 1;
2753 if ((offset & align_mask) == 0) {
2754 // If {offset} is aligned, we can produce faster code.
2755
2756 // TODO(ahaas): On Intel, the "test" instruction implicitly computes the
2757 // AND of two operands. We could introduce a new variant of
2758 // {emit_cond_jump} to use the "test" instruction without the "and" here.
2759 // Then we can also avoid using the temp register here.
2760 __ emit_i32_andi(address, index, align_mask);
2761 __ emit_cond_jump(kUnequal, trap_label, kI32, address);
2762 } else {
2763 // For alignment checks we only look at the lower 32-bits in {offset}.
2764 __ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
2765 __ emit_i32_andi(address, address, align_mask);
2766 __ emit_cond_jump(kUnequal, trap_label, kI32, address);
2767 }
2768 }
2769
TraceMemoryOperation(bool is_store,MachineRepresentation rep,Register index,uintptr_t offset,WasmCodePosition position)2770 void TraceMemoryOperation(bool is_store, MachineRepresentation rep,
2771 Register index, uintptr_t offset,
2772 WasmCodePosition position) {
2773 // Before making the runtime call, spill all cache registers.
2774 __ SpillAllRegisters();
2775
2776 LiftoffRegList pinned;
2777 if (index != no_reg) pinned.set(index);
2778 // Get one register for computing the effective offset (offset + index).
2779 LiftoffRegister effective_offset =
2780 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
2781 DCHECK_GE(kMaxUInt32, offset);
2782 __ LoadConstant(effective_offset, WasmValue(static_cast<uint32_t>(offset)));
2783 if (index != no_reg) {
2784 // TODO(clemensb): Do a 64-bit addition here if memory64 is used.
2785 __ emit_i32_add(effective_offset.gp(), effective_offset.gp(), index);
2786 }
2787
2788 // Get a register to hold the stack slot for MemoryTracingInfo.
2789 LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
2790 // Allocate stack slot for MemoryTracingInfo.
2791 __ AllocateStackSlot(info.gp(), sizeof(MemoryTracingInfo));
2792
2793 // Reuse the {effective_offset} register for all information to be stored in
2794 // the MemoryTracingInfo struct.
2795 LiftoffRegister data = effective_offset;
2796
2797 // Now store all information into the MemoryTracingInfo struct.
2798 if (kSystemPointerSize == 8) {
2799 // Zero-extend the effective offset to u64.
2800 CHECK(__ emit_type_conversion(kExprI64UConvertI32, data, effective_offset,
2801 nullptr));
2802 }
2803 __ Store(
2804 info.gp(), no_reg, offsetof(MemoryTracingInfo, offset), data,
2805 kSystemPointerSize == 8 ? StoreType::kI64Store : StoreType::kI32Store,
2806 pinned);
2807 __ LoadConstant(data, WasmValue(is_store ? 1 : 0));
2808 __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, is_store), data,
2809 StoreType::kI32Store8, pinned);
2810 __ LoadConstant(data, WasmValue(static_cast<int>(rep)));
2811 __ Store(info.gp(), no_reg, offsetof(MemoryTracingInfo, mem_rep), data,
2812 StoreType::kI32Store8, pinned);
2813
2814 WasmTraceMemoryDescriptor descriptor;
2815 DCHECK_EQ(0, descriptor.GetStackParameterCount());
2816 DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
2817 Register param_reg = descriptor.GetRegisterParameter(0);
2818 if (info.gp() != param_reg) {
2819 __ Move(param_reg, info.gp(), kPointerKind);
2820 }
2821
2822 source_position_table_builder_.AddPosition(__ pc_offset(),
2823 SourcePosition(position), false);
2824 __ CallRuntimeStub(WasmCode::kWasmTraceMemory);
2825 DefineSafepoint();
2826
2827 __ DeallocateStackSlot(sizeof(MemoryTracingInfo));
2828 }
2829
IndexStaticallyInBounds(const LiftoffAssembler::VarState & index_slot,int access_size,uintptr_t * offset)2830 bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
2831 int access_size, uintptr_t* offset) {
2832 if (!index_slot.is_const()) return false;
2833
2834 // Potentially zero extend index (which is a 32-bit constant).
2835 const uintptr_t index = static_cast<uint32_t>(index_slot.i32_const());
2836 const uintptr_t effective_offset = index + *offset;
2837
2838 if (effective_offset < index // overflow
2839 || !base::IsInBounds<uintptr_t>(effective_offset, access_size,
2840 env_->min_memory_size)) {
2841 return false;
2842 }
2843
2844 *offset = effective_offset;
2845 return true;
2846 }
2847
GetMemoryStart(LiftoffRegList pinned)2848 Register GetMemoryStart(LiftoffRegList pinned) {
2849 Register memory_start = __ cache_state()->cached_mem_start;
2850 if (memory_start == no_reg) {
2851 memory_start = __ GetUnusedRegister(kGpReg, pinned).gp();
2852 LOAD_INSTANCE_FIELD(memory_start, MemoryStart, kSystemPointerSize,
2853 pinned);
2854 __ cache_state()->SetMemStartCacheRegister(memory_start);
2855 }
2856 return memory_start;
2857 }
2858
LoadMem(FullDecoder * decoder,LoadType type,const MemoryAccessImmediate<validate> & imm,const Value & index_val,Value * result)2859 void LoadMem(FullDecoder* decoder, LoadType type,
2860 const MemoryAccessImmediate<validate>& imm,
2861 const Value& index_val, Value* result) {
2862 ValueKind kind = type.value_type().kind();
2863 RegClass rc = reg_class_for(kind);
2864 if (!CheckSupportedType(decoder, kind, "load")) return;
2865
2866 uintptr_t offset = imm.offset;
2867 Register index = no_reg;
2868
2869 // Only look at the slot, do not pop it yet (will happen in PopToRegister
2870 // below, if this is not a statically-in-bounds index).
2871 auto& index_slot = __ cache_state()->stack_state.back();
2872 bool i64_offset = index_val.type == kWasmI64;
2873 if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
2874 __ cache_state()->stack_state.pop_back();
2875 CODE_COMMENT("load from memory (constant offset)");
2876 LiftoffRegList pinned;
2877 Register mem = pinned.set(GetMemoryStart(pinned));
2878 LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
2879 __ Load(value, mem, no_reg, offset, type, pinned, nullptr, true,
2880 i64_offset);
2881 __ PushRegister(kind, value);
2882 } else {
2883 LiftoffRegister full_index = __ PopToRegister();
2884 index = BoundsCheckMem(decoder, type.size(), offset, full_index, {},
2885 kDontForceCheck);
2886 if (index == no_reg) return;
2887
2888 CODE_COMMENT("load from memory");
2889 LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
2890
2891 // Load the memory start address only now to reduce register pressure
2892 // (important on ia32).
2893 Register mem = pinned.set(GetMemoryStart(pinned));
2894 LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
2895
2896 uint32_t protected_load_pc = 0;
2897 __ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true,
2898 i64_offset);
2899 if (env_->bounds_checks == kTrapHandler) {
2900 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
2901 protected_load_pc);
2902 }
2903 __ PushRegister(kind, value);
2904 }
2905
2906 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
2907 TraceMemoryOperation(false, type.mem_type().representation(), index,
2908 offset, decoder->position());
2909 }
2910 }
2911
LoadTransform(FullDecoder * decoder,LoadType type,LoadTransformationKind transform,const MemoryAccessImmediate<validate> & imm,const Value & index_val,Value * result)2912 void LoadTransform(FullDecoder* decoder, LoadType type,
2913 LoadTransformationKind transform,
2914 const MemoryAccessImmediate<validate>& imm,
2915 const Value& index_val, Value* result) {
2916 // LoadTransform requires SIMD support, so check for it here. If
2917 // unsupported, bailout and let TurboFan lower the code.
2918 if (!CheckSupportedType(decoder, kS128, "LoadTransform")) {
2919 return;
2920 }
2921
2922 LiftoffRegister full_index = __ PopToRegister();
2923 // For load splats and load zero, LoadType is the size of the load, and for
2924 // load extends, LoadType is the size of the lane, and it always loads 8
2925 // bytes.
2926 uint32_t access_size =
2927 transform == LoadTransformationKind::kExtend ? 8 : type.size();
2928 Register index = BoundsCheckMem(decoder, access_size, imm.offset,
2929 full_index, {}, kDontForceCheck);
2930 if (index == no_reg) return;
2931
2932 uintptr_t offset = imm.offset;
2933 LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
2934 CODE_COMMENT("load with transformation");
2935 Register addr = GetMemoryStart(pinned);
2936 LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
2937 uint32_t protected_load_pc = 0;
2938 __ LoadTransform(value, addr, index, offset, type, transform,
2939 &protected_load_pc);
2940
2941 if (env_->bounds_checks == kTrapHandler) {
2942 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
2943 protected_load_pc);
2944 }
2945 __ PushRegister(kS128, value);
2946
2947 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
2948 // Again load extend is different.
2949 MachineRepresentation mem_rep =
2950 transform == LoadTransformationKind::kExtend
2951 ? MachineRepresentation::kWord64
2952 : type.mem_type().representation();
2953 TraceMemoryOperation(false, mem_rep, index, offset, decoder->position());
2954 }
2955 }
2956
LoadLane(FullDecoder * decoder,LoadType type,const Value & _value,const Value & _index,const MemoryAccessImmediate<validate> & imm,const uint8_t laneidx,Value * _result)2957 void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value,
2958 const Value& _index, const MemoryAccessImmediate<validate>& imm,
2959 const uint8_t laneidx, Value* _result) {
2960 if (!CheckSupportedType(decoder, kS128, "LoadLane")) {
2961 return;
2962 }
2963
2964 LiftoffRegList pinned;
2965 LiftoffRegister value = pinned.set(__ PopToRegister());
2966 LiftoffRegister full_index = __ PopToRegister();
2967 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
2968 full_index, pinned, kDontForceCheck);
2969 if (index == no_reg) return;
2970
2971 uintptr_t offset = imm.offset;
2972 pinned.set(index);
2973 CODE_COMMENT("load lane");
2974 Register addr = GetMemoryStart(pinned);
2975 LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
2976 uint32_t protected_load_pc = 0;
2977
2978 __ LoadLane(result, value, addr, index, offset, type, laneidx,
2979 &protected_load_pc);
2980 if (env_->bounds_checks == kTrapHandler) {
2981 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
2982 protected_load_pc);
2983 }
2984
2985 __ PushRegister(kS128, result);
2986
2987 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
2988 TraceMemoryOperation(false, type.mem_type().representation(), index,
2989 offset, decoder->position());
2990 }
2991 }
2992
StoreMem(FullDecoder * decoder,StoreType type,const MemoryAccessImmediate<validate> & imm,const Value & index_val,const Value & value_val)2993 void StoreMem(FullDecoder* decoder, StoreType type,
2994 const MemoryAccessImmediate<validate>& imm,
2995 const Value& index_val, const Value& value_val) {
2996 ValueKind kind = type.value_type().kind();
2997 if (!CheckSupportedType(decoder, kind, "store")) return;
2998
2999 LiftoffRegList pinned;
3000 LiftoffRegister value = pinned.set(__ PopToRegister());
3001
3002 uintptr_t offset = imm.offset;
3003 Register index = no_reg;
3004
3005 auto& index_slot = __ cache_state()->stack_state.back();
3006 if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
3007 __ cache_state()->stack_state.pop_back();
3008 CODE_COMMENT("store to memory (constant offset)");
3009 Register mem = pinned.set(GetMemoryStart(pinned));
3010 __ Store(mem, no_reg, offset, value, type, pinned, nullptr, true);
3011 } else {
3012 LiftoffRegister full_index = __ PopToRegister(pinned);
3013 index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index,
3014 pinned, kDontForceCheck);
3015 if (index == no_reg) return;
3016
3017 pinned.set(index);
3018 CODE_COMMENT("store to memory");
3019 uint32_t protected_store_pc = 0;
3020 // Load the memory start address only now to reduce register pressure
3021 // (important on ia32).
3022 Register mem = pinned.set(GetMemoryStart(pinned));
3023 LiftoffRegList outer_pinned;
3024 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
3025 __ Store(mem, index, offset, value, type, outer_pinned,
3026 &protected_store_pc, true);
3027 if (env_->bounds_checks == kTrapHandler) {
3028 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
3029 protected_store_pc);
3030 }
3031 }
3032
3033 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
3034 TraceMemoryOperation(true, type.mem_rep(), index, offset,
3035 decoder->position());
3036 }
3037 }
3038
StoreLane(FullDecoder * decoder,StoreType type,const MemoryAccessImmediate<validate> & imm,const Value & _index,const Value & _value,const uint8_t lane)3039 void StoreLane(FullDecoder* decoder, StoreType type,
3040 const MemoryAccessImmediate<validate>& imm,
3041 const Value& _index, const Value& _value, const uint8_t lane) {
3042 if (!CheckSupportedType(decoder, kS128, "StoreLane")) return;
3043 LiftoffRegList pinned;
3044 LiftoffRegister value = pinned.set(__ PopToRegister());
3045 LiftoffRegister full_index = __ PopToRegister(pinned);
3046 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
3047 full_index, pinned, kDontForceCheck);
3048 if (index == no_reg) return;
3049
3050 uintptr_t offset = imm.offset;
3051 pinned.set(index);
3052 CODE_COMMENT("store lane to memory");
3053 Register addr = pinned.set(GetMemoryStart(pinned));
3054 uint32_t protected_store_pc = 0;
3055 __ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc);
3056 if (env_->bounds_checks == kTrapHandler) {
3057 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
3058 protected_store_pc);
3059 }
3060 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
3061 TraceMemoryOperation(true, type.mem_rep(), index, offset,
3062 decoder->position());
3063 }
3064 }
3065
CurrentMemoryPages(FullDecoder *,Value *)3066 void CurrentMemoryPages(FullDecoder* /* decoder */, Value* /* result */) {
3067 Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
3068 LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize, {});
3069 __ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
3070 LiftoffRegister result{mem_size};
3071 if (env_->module->is_memory64 && kNeedI64RegPair) {
3072 LiftoffRegister high_word =
3073 __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(mem_size));
3074 // The high word is always 0 on 32-bit systems.
3075 __ LoadConstant(high_word, WasmValue{uint32_t{0}});
3076 result = LiftoffRegister::ForPair(mem_size, high_word.gp());
3077 }
3078 __ PushRegister(env_->module->is_memory64 ? kI64 : kI32, result);
3079 }
3080
MemoryGrow(FullDecoder * decoder,const Value & value,Value * result_val)3081 void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
3082 // Pop the input, then spill all cache registers to make the runtime call.
3083 LiftoffRegList pinned;
3084 LiftoffRegister input = pinned.set(__ PopToRegister());
3085 __ SpillAllRegisters();
3086
3087 LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
3088
3089 Label done;
3090
3091 if (env_->module->is_memory64) {
3092 // If the high word is not 0, this will always fail (would grow by
3093 // >=256TB). The int32_t value will be sign-extended below.
3094 __ LoadConstant(result, WasmValue(int32_t{-1}));
3095 if (kNeedI64RegPair) {
3096 __ emit_cond_jump(kUnequal /* neq */, &done, kI32, input.high_gp());
3097 input = input.low();
3098 } else {
3099 LiftoffRegister high_word = __ GetUnusedRegister(kGpReg, pinned);
3100 __ emit_i64_shri(high_word, input, 32);
3101 __ emit_cond_jump(kUnequal /* neq */, &done, kI32, high_word.gp());
3102 }
3103 }
3104
3105 WasmMemoryGrowDescriptor descriptor;
3106 DCHECK_EQ(0, descriptor.GetStackParameterCount());
3107 DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
3108 DCHECK_EQ(machine_type(kI32), descriptor.GetParameterType(0));
3109
3110 Register param_reg = descriptor.GetRegisterParameter(0);
3111 if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kI32);
3112
3113 __ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
3114 DefineSafepoint();
3115 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
3116
3117 if (kReturnRegister0 != result.gp()) {
3118 __ Move(result.gp(), kReturnRegister0, kI32);
3119 }
3120
3121 __ bind(&done);
3122
3123 if (env_->module->is_memory64) {
3124 LiftoffRegister result64 = result;
3125 if (kNeedI64RegPair) result64 = __ GetUnusedRegister(kGpRegPair, pinned);
3126 __ emit_type_conversion(kExprI64SConvertI32, result64, result, nullptr);
3127 __ PushRegister(kI64, result64);
3128 } else {
3129 __ PushRegister(kI32, result);
3130 }
3131 }
3132
3133 base::OwnedVector<DebugSideTable::Entry::Value>
GetCurrentDebugSideTableEntries(FullDecoder * decoder,DebugSideTableBuilder::AssumeSpilling assume_spilling)3134 GetCurrentDebugSideTableEntries(
3135 FullDecoder* decoder,
3136 DebugSideTableBuilder::AssumeSpilling assume_spilling) {
3137 auto& stack_state = __ cache_state()->stack_state;
3138 auto values =
3139 base::OwnedVector<DebugSideTable::Entry::Value>::NewForOverwrite(
3140 stack_state.size());
3141
3142 // For function calls, the decoder still has the arguments on the stack, but
3143 // Liftoff already popped them. Hence {decoder->stack_size()} can be bigger
3144 // than expected. Just ignore that and use the lower part only.
3145 DCHECK_LE(stack_state.size() - num_exceptions_,
3146 decoder->num_locals() + decoder->stack_size());
3147 int index = 0;
3148 int decoder_stack_index = decoder->stack_size();
3149 // Iterate the operand stack control block by control block, so that we can
3150 // handle the implicit exception value for try blocks.
3151 for (int j = decoder->control_depth() - 1; j >= 0; j--) {
3152 Control* control = decoder->control_at(j);
3153 Control* next_control = j > 0 ? decoder->control_at(j - 1) : nullptr;
3154 int end_index = next_control
3155 ? next_control->stack_depth + __ num_locals() +
3156 next_control->num_exceptions
3157 : __ cache_state()->stack_height();
3158 bool exception = control->is_try_catch() || control->is_try_catchall();
3159 for (; index < end_index; ++index) {
3160 auto& slot = stack_state[index];
3161 auto& value = values[index];
3162 value.index = index;
3163 ValueType type =
3164 index < static_cast<int>(__ num_locals())
3165 ? decoder->local_type(index)
3166 : exception ? ValueType::Ref(HeapType::kExtern, kNonNullable)
3167 : decoder->stack_value(decoder_stack_index--)->type;
3168 DCHECK(CheckCompatibleStackSlotTypes(slot.kind(), type.kind()));
3169 value.type = type;
3170 switch (slot.loc()) {
3171 case kIntConst:
3172 value.storage = DebugSideTable::Entry::kConstant;
3173 value.i32_const = slot.i32_const();
3174 break;
3175 case kRegister:
3176 DCHECK_NE(DebugSideTableBuilder::kDidSpill, assume_spilling);
3177 if (assume_spilling == DebugSideTableBuilder::kAllowRegisters) {
3178 value.storage = DebugSideTable::Entry::kRegister;
3179 value.reg_code = slot.reg().liftoff_code();
3180 break;
3181 }
3182 DCHECK_EQ(DebugSideTableBuilder::kAssumeSpilling, assume_spilling);
3183 V8_FALLTHROUGH;
3184 case kStack:
3185 value.storage = DebugSideTable::Entry::kStack;
3186 value.stack_offset = slot.offset();
3187 break;
3188 }
3189 exception = false;
3190 }
3191 }
3192 DCHECK_EQ(values.size(), index);
3193 return values;
3194 }
3195
RegisterDebugSideTableEntry(FullDecoder * decoder,DebugSideTableBuilder::AssumeSpilling assume_spilling)3196 void RegisterDebugSideTableEntry(
3197 FullDecoder* decoder,
3198 DebugSideTableBuilder::AssumeSpilling assume_spilling) {
3199 if (V8_LIKELY(!debug_sidetable_builder_)) return;
3200 debug_sidetable_builder_->NewEntry(
3201 __ pc_offset(),
3202 GetCurrentDebugSideTableEntries(decoder, assume_spilling).as_vector());
3203 }
3204
RegisterOOLDebugSideTableEntry(FullDecoder * decoder)3205 DebugSideTableBuilder::EntryBuilder* RegisterOOLDebugSideTableEntry(
3206 FullDecoder* decoder) {
3207 if (V8_LIKELY(!debug_sidetable_builder_)) return nullptr;
3208 return debug_sidetable_builder_->NewOOLEntry(
3209 GetCurrentDebugSideTableEntries(decoder,
3210 DebugSideTableBuilder::kAssumeSpilling)
3211 .as_vector());
3212 }
3213
3214 enum TailCall : bool { kTailCall = true, kNoTailCall = false };
3215
CallDirect(FullDecoder * decoder,const CallFunctionImmediate<validate> & imm,const Value args[],Value[])3216 void CallDirect(FullDecoder* decoder,
3217 const CallFunctionImmediate<validate>& imm,
3218 const Value args[], Value[]) {
3219 CallDirect(decoder, imm, args, nullptr, kNoTailCall);
3220 }
3221
CallIndirect(FullDecoder * decoder,const Value & index_val,const CallIndirectImmediate<validate> & imm,const Value args[],Value returns[])3222 void CallIndirect(FullDecoder* decoder, const Value& index_val,
3223 const CallIndirectImmediate<validate>& imm,
3224 const Value args[], Value returns[]) {
3225 CallIndirect(decoder, index_val, imm, kNoTailCall);
3226 }
3227
CallRef(FullDecoder * decoder,const Value & func_ref,const FunctionSig * sig,uint32_t sig_index,const Value args[],Value returns[])3228 void CallRef(FullDecoder* decoder, const Value& func_ref,
3229 const FunctionSig* sig, uint32_t sig_index, const Value args[],
3230 Value returns[]) {
3231 CallRef(decoder, func_ref.type, sig, kNoTailCall);
3232 }
3233
ReturnCall(FullDecoder * decoder,const CallFunctionImmediate<validate> & imm,const Value args[])3234 void ReturnCall(FullDecoder* decoder,
3235 const CallFunctionImmediate<validate>& imm,
3236 const Value args[]) {
3237 CallDirect(decoder, imm, args, nullptr, kTailCall);
3238 }
3239
ReturnCallIndirect(FullDecoder * decoder,const Value & index_val,const CallIndirectImmediate<validate> & imm,const Value args[])3240 void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
3241 const CallIndirectImmediate<validate>& imm,
3242 const Value args[]) {
3243 CallIndirect(decoder, index_val, imm, kTailCall);
3244 }
3245
ReturnCallRef(FullDecoder * decoder,const Value & func_ref,const FunctionSig * sig,uint32_t sig_index,const Value args[])3246 void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
3247 const FunctionSig* sig, uint32_t sig_index,
3248 const Value args[]) {
3249 CallRef(decoder, func_ref.type, sig, kTailCall);
3250 }
3251
BrOnNull(FullDecoder * decoder,const Value & ref_object,uint32_t depth)3252 void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
3253 // Before branching, materialize all constants. This avoids repeatedly
3254 // materializing them for each conditional branch.
3255 if (depth != decoder->control_depth() - 1) {
3256 __ MaterializeMergedConstants(
3257 decoder->control_at(depth)->br_merge()->arity);
3258 }
3259
3260 Label cont_false;
3261 LiftoffRegList pinned;
3262 LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
3263 Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
3264 LoadNullValue(null, pinned);
3265 __ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
3266 null);
3267
3268 BrOrRet(decoder, depth, 0);
3269 __ bind(&cont_false);
3270 __ PushRegister(kRef, ref);
3271 }
3272
BrOnNonNull(FullDecoder * decoder,const Value & ref_object,uint32_t depth)3273 void BrOnNonNull(FullDecoder* decoder, const Value& ref_object,
3274 uint32_t depth) {
3275 // Before branching, materialize all constants. This avoids repeatedly
3276 // materializing them for each conditional branch.
3277 if (depth != decoder->control_depth() - 1) {
3278 __ MaterializeMergedConstants(
3279 decoder->control_at(depth)->br_merge()->arity);
3280 }
3281
3282 Label cont_false;
3283 LiftoffRegList pinned;
3284 LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
3285 // Put the reference back onto the stack for the branch.
3286 __ PushRegister(kRef, ref);
3287
3288 Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
3289 LoadNullValue(null, pinned);
3290 __ emit_cond_jump(kEqual, &cont_false, ref_object.type.kind(), ref.gp(),
3291 null);
3292
3293 BrOrRet(decoder, depth, 0);
3294 // Drop the reference if we are not branching.
3295 __ DropValues(1);
3296 __ bind(&cont_false);
3297 }
3298
3299 template <ValueKind src_kind, ValueKind result_kind,
3300 ValueKind result_lane_kind = kVoid, typename EmitFn>
EmitTerOp(EmitFn fn)3301 void EmitTerOp(EmitFn fn) {
3302 static constexpr RegClass src_rc = reg_class_for(src_kind);
3303 static constexpr RegClass result_rc = reg_class_for(result_kind);
3304 LiftoffRegister src3 = __ PopToRegister();
3305 LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3));
3306 LiftoffRegister src1 =
3307 __ PopToRegister(LiftoffRegList::ForRegs(src3, src2));
3308 // Reusing src1 and src2 will complicate codegen for select for some
3309 // backend, so we allow only reusing src3 (the mask), and pin src1 and src2.
3310 LiftoffRegister dst =
3311 src_rc == result_rc
3312 ? __ GetUnusedRegister(result_rc, {src3},
3313 LiftoffRegList::ForRegs(src1, src2))
3314 : __ GetUnusedRegister(result_rc, {});
3315 CallEmitFn(fn, dst, src1, src2, src3);
3316 if (V8_UNLIKELY(nondeterminism_)) {
3317 auto pinned = LiftoffRegList::ForRegs(dst);
3318 if (result_kind == ValueKind::kF32 || result_kind == ValueKind::kF64) {
3319 CheckNan(dst, pinned, result_kind);
3320 } else if (result_kind == ValueKind::kS128 &&
3321 (result_lane_kind == kF32 || result_lane_kind == kF64)) {
3322 CheckS128Nan(dst, LiftoffRegList::ForRegs(src1, src2, src3, dst),
3323 result_lane_kind);
3324 }
3325 }
3326 __ PushRegister(result_kind, dst);
3327 }
3328
3329 template <typename EmitFn, typename EmitFnImm>
EmitSimdShiftOp(EmitFn fn,EmitFnImm fnImm)3330 void EmitSimdShiftOp(EmitFn fn, EmitFnImm fnImm) {
3331 static constexpr RegClass result_rc = reg_class_for(kS128);
3332
3333 LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
3334 // Check if the RHS is an immediate.
3335 if (rhs_slot.is_const()) {
3336 __ cache_state()->stack_state.pop_back();
3337 int32_t imm = rhs_slot.i32_const();
3338
3339 LiftoffRegister operand = __ PopToRegister();
3340 LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
3341
3342 CallEmitFn(fnImm, dst, operand, imm);
3343 __ PushRegister(kS128, dst);
3344 } else {
3345 LiftoffRegister count = __ PopToRegister();
3346 LiftoffRegister operand = __ PopToRegister();
3347 LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
3348
3349 CallEmitFn(fn, dst, operand, count);
3350 __ PushRegister(kS128, dst);
3351 }
3352 }
3353
3354 template <ValueKind result_lane_kind>
EmitSimdFloatRoundingOpWithCFallback(bool (LiftoffAssembler::* emit_fn)(LiftoffRegister,LiftoffRegister),ExternalReference (* ext_ref)())3355 void EmitSimdFloatRoundingOpWithCFallback(
3356 bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
3357 ExternalReference (*ext_ref)()) {
3358 static constexpr RegClass rc = reg_class_for(kS128);
3359 LiftoffRegister src = __ PopToRegister();
3360 LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {});
3361 if (!(asm_.*emit_fn)(dst, src)) {
3362 // Return v128 via stack for ARM.
3363 auto sig_v_s = MakeSig::Params(kS128);
3364 GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
3365 }
3366 if (V8_UNLIKELY(nondeterminism_)) {
3367 auto pinned = LiftoffRegList::ForRegs(dst);
3368 CheckS128Nan(dst, pinned, result_lane_kind);
3369 }
3370 __ PushRegister(kS128, dst);
3371 }
3372
SimdOp(FullDecoder * decoder,WasmOpcode opcode,base::Vector<Value> args,Value * result)3373 void SimdOp(FullDecoder* decoder, WasmOpcode opcode, base::Vector<Value> args,
3374 Value* result) {
3375 if (!CpuFeatures::SupportsWasmSimd128()) {
3376 return unsupported(decoder, kSimd, "simd");
3377 }
3378 switch (opcode) {
3379 case wasm::kExprI8x16Swizzle:
3380 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_swizzle);
3381 case wasm::kExprI8x16Popcnt:
3382 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_popcnt);
3383 case wasm::kExprI8x16Splat:
3384 return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat);
3385 case wasm::kExprI16x8Splat:
3386 return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i16x8_splat);
3387 case wasm::kExprI32x4Splat:
3388 return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i32x4_splat);
3389 case wasm::kExprI64x2Splat:
3390 return EmitUnOp<kI64, kS128>(&LiftoffAssembler::emit_i64x2_splat);
3391 case wasm::kExprF32x4Splat:
3392 return EmitUnOp<kF32, kS128, kF32>(&LiftoffAssembler::emit_f32x4_splat);
3393 case wasm::kExprF64x2Splat:
3394 return EmitUnOp<kF64, kS128, kF64>(&LiftoffAssembler::emit_f64x2_splat);
3395 case wasm::kExprI8x16Eq:
3396 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_eq);
3397 case wasm::kExprI8x16Ne:
3398 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ne);
3399 case wasm::kExprI8x16LtS:
3400 return EmitBinOp<kS128, kS128, true>(
3401 &LiftoffAssembler::emit_i8x16_gt_s);
3402 case wasm::kExprI8x16LtU:
3403 return EmitBinOp<kS128, kS128, true>(
3404 &LiftoffAssembler::emit_i8x16_gt_u);
3405 case wasm::kExprI8x16GtS:
3406 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_s);
3407 case wasm::kExprI8x16GtU:
3408 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_gt_u);
3409 case wasm::kExprI8x16LeS:
3410 return EmitBinOp<kS128, kS128, true>(
3411 &LiftoffAssembler::emit_i8x16_ge_s);
3412 case wasm::kExprI8x16LeU:
3413 return EmitBinOp<kS128, kS128, true>(
3414 &LiftoffAssembler::emit_i8x16_ge_u);
3415 case wasm::kExprI8x16GeS:
3416 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_s);
3417 case wasm::kExprI8x16GeU:
3418 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_ge_u);
3419 case wasm::kExprI16x8Eq:
3420 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_eq);
3421 case wasm::kExprI16x8Ne:
3422 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ne);
3423 case wasm::kExprI16x8LtS:
3424 return EmitBinOp<kS128, kS128, true>(
3425 &LiftoffAssembler::emit_i16x8_gt_s);
3426 case wasm::kExprI16x8LtU:
3427 return EmitBinOp<kS128, kS128, true>(
3428 &LiftoffAssembler::emit_i16x8_gt_u);
3429 case wasm::kExprI16x8GtS:
3430 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_s);
3431 case wasm::kExprI16x8GtU:
3432 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_gt_u);
3433 case wasm::kExprI16x8LeS:
3434 return EmitBinOp<kS128, kS128, true>(
3435 &LiftoffAssembler::emit_i16x8_ge_s);
3436 case wasm::kExprI16x8LeU:
3437 return EmitBinOp<kS128, kS128, true>(
3438 &LiftoffAssembler::emit_i16x8_ge_u);
3439 case wasm::kExprI16x8GeS:
3440 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_s);
3441 case wasm::kExprI16x8GeU:
3442 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_ge_u);
3443 case wasm::kExprI32x4Eq:
3444 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_eq);
3445 case wasm::kExprI32x4Ne:
3446 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ne);
3447 case wasm::kExprI32x4LtS:
3448 return EmitBinOp<kS128, kS128, true>(
3449 &LiftoffAssembler::emit_i32x4_gt_s);
3450 case wasm::kExprI32x4LtU:
3451 return EmitBinOp<kS128, kS128, true>(
3452 &LiftoffAssembler::emit_i32x4_gt_u);
3453 case wasm::kExprI32x4GtS:
3454 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_gt_s);
3455 case wasm::kExprI32x4GtU:
3456 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_gt_u);
3457 case wasm::kExprI32x4LeS:
3458 return EmitBinOp<kS128, kS128, true>(
3459 &LiftoffAssembler::emit_i32x4_ge_s);
3460 case wasm::kExprI32x4LeU:
3461 return EmitBinOp<kS128, kS128, true>(
3462 &LiftoffAssembler::emit_i32x4_ge_u);
3463 case wasm::kExprI32x4GeS:
3464 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_s);
3465 case wasm::kExprI32x4GeU:
3466 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_u);
3467 case wasm::kExprI64x2Eq:
3468 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_eq);
3469 case wasm::kExprI64x2Ne:
3470 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_ne);
3471 case wasm::kExprI64x2LtS:
3472 return EmitBinOp<kS128, kS128, true>(
3473 &LiftoffAssembler::emit_i64x2_gt_s);
3474 case wasm::kExprI64x2GtS:
3475 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_gt_s);
3476 case wasm::kExprI64x2LeS:
3477 return EmitBinOp<kS128, kS128, true>(
3478 &LiftoffAssembler::emit_i64x2_ge_s);
3479 case wasm::kExprI64x2GeS:
3480 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_ge_s);
3481 case wasm::kExprF32x4Eq:
3482 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_eq);
3483 case wasm::kExprF32x4Ne:
3484 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_ne);
3485 case wasm::kExprF32x4Lt:
3486 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_lt);
3487 case wasm::kExprF32x4Gt:
3488 return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f32x4_lt);
3489 case wasm::kExprF32x4Le:
3490 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_le);
3491 case wasm::kExprF32x4Ge:
3492 return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f32x4_le);
3493 case wasm::kExprF64x2Eq:
3494 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_eq);
3495 case wasm::kExprF64x2Ne:
3496 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_ne);
3497 case wasm::kExprF64x2Lt:
3498 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_lt);
3499 case wasm::kExprF64x2Gt:
3500 return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f64x2_lt);
3501 case wasm::kExprF64x2Le:
3502 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_le);
3503 case wasm::kExprF64x2Ge:
3504 return EmitBinOp<kS128, kS128, true>(&LiftoffAssembler::emit_f64x2_le);
3505 case wasm::kExprS128Not:
3506 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_s128_not);
3507 case wasm::kExprS128And:
3508 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and);
3509 case wasm::kExprS128Or:
3510 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_or);
3511 case wasm::kExprS128Xor:
3512 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_xor);
3513 case wasm::kExprS128Select:
3514 return EmitTerOp<kS128, kS128>(&LiftoffAssembler::emit_s128_select);
3515 case wasm::kExprI8x16Neg:
3516 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
3517 case wasm::kExprV128AnyTrue:
3518 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v128_anytrue);
3519 case wasm::kExprI8x16AllTrue:
3520 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_alltrue);
3521 case wasm::kExprI8x16BitMask:
3522 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_bitmask);
3523 case wasm::kExprI8x16Shl:
3524 return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shl,
3525 &LiftoffAssembler::emit_i8x16_shli);
3526 case wasm::kExprI8x16ShrS:
3527 return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_s,
3528 &LiftoffAssembler::emit_i8x16_shri_s);
3529 case wasm::kExprI8x16ShrU:
3530 return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_u,
3531 &LiftoffAssembler::emit_i8x16_shri_u);
3532 case wasm::kExprI8x16Add:
3533 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add);
3534 case wasm::kExprI8x16AddSatS:
3535 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_s);
3536 case wasm::kExprI8x16AddSatU:
3537 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add_sat_u);
3538 case wasm::kExprI8x16Sub:
3539 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub);
3540 case wasm::kExprI8x16SubSatS:
3541 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_s);
3542 case wasm::kExprI8x16SubSatU:
3543 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_sub_sat_u);
3544 case wasm::kExprI8x16MinS:
3545 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_min_s);
3546 case wasm::kExprI8x16MinU:
3547 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_min_u);
3548 case wasm::kExprI8x16MaxS:
3549 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_s);
3550 case wasm::kExprI8x16MaxU:
3551 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u);
3552 case wasm::kExprI16x8Neg:
3553 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg);
3554 case wasm::kExprI16x8AllTrue:
3555 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_alltrue);
3556 case wasm::kExprI16x8BitMask:
3557 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_bitmask);
3558 case wasm::kExprI16x8Shl:
3559 return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shl,
3560 &LiftoffAssembler::emit_i16x8_shli);
3561 case wasm::kExprI16x8ShrS:
3562 return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_s,
3563 &LiftoffAssembler::emit_i16x8_shri_s);
3564 case wasm::kExprI16x8ShrU:
3565 return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_u,
3566 &LiftoffAssembler::emit_i16x8_shri_u);
3567 case wasm::kExprI16x8Add:
3568 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add);
3569 case wasm::kExprI16x8AddSatS:
3570 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_s);
3571 case wasm::kExprI16x8AddSatU:
3572 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add_sat_u);
3573 case wasm::kExprI16x8Sub:
3574 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub);
3575 case wasm::kExprI16x8SubSatS:
3576 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_s);
3577 case wasm::kExprI16x8SubSatU:
3578 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_sub_sat_u);
3579 case wasm::kExprI16x8Mul:
3580 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_mul);
3581 case wasm::kExprI16x8MinS:
3582 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_min_s);
3583 case wasm::kExprI16x8MinU:
3584 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_min_u);
3585 case wasm::kExprI16x8MaxS:
3586 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_s);
3587 case wasm::kExprI16x8MaxU:
3588 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u);
3589 case wasm::kExprI16x8ExtAddPairwiseI8x16S:
3590 return EmitUnOp<kS128, kS128>(
3591 &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s);
3592 case wasm::kExprI16x8ExtAddPairwiseI8x16U:
3593 return EmitUnOp<kS128, kS128>(
3594 &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u);
3595 case wasm::kExprI16x8ExtMulLowI8x16S:
3596 return EmitBinOp<kS128, kS128>(
3597 &LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s);
3598 case wasm::kExprI16x8ExtMulLowI8x16U:
3599 return EmitBinOp<kS128, kS128>(
3600 &LiftoffAssembler::emit_i16x8_extmul_low_i8x16_u);
3601 case wasm::kExprI16x8ExtMulHighI8x16S:
3602 return EmitBinOp<kS128, kS128>(
3603 &LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s);
3604 case wasm::kExprI16x8ExtMulHighI8x16U:
3605 return EmitBinOp<kS128, kS128>(
3606 &LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u);
3607 case wasm::kExprI16x8Q15MulRSatS:
3608 return EmitBinOp<kS128, kS128>(
3609 &LiftoffAssembler::emit_i16x8_q15mulr_sat_s);
3610 case wasm::kExprI32x4Neg:
3611 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
3612 case wasm::kExprI32x4AllTrue:
3613 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_alltrue);
3614 case wasm::kExprI32x4BitMask:
3615 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_bitmask);
3616 case wasm::kExprI32x4Shl:
3617 return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shl,
3618 &LiftoffAssembler::emit_i32x4_shli);
3619 case wasm::kExprI32x4ShrS:
3620 return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_s,
3621 &LiftoffAssembler::emit_i32x4_shri_s);
3622 case wasm::kExprI32x4ShrU:
3623 return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_u,
3624 &LiftoffAssembler::emit_i32x4_shri_u);
3625 case wasm::kExprI32x4Add:
3626 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_add);
3627 case wasm::kExprI32x4Sub:
3628 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_sub);
3629 case wasm::kExprI32x4Mul:
3630 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_mul);
3631 case wasm::kExprI32x4MinS:
3632 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_min_s);
3633 case wasm::kExprI32x4MinU:
3634 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_min_u);
3635 case wasm::kExprI32x4MaxS:
3636 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_s);
3637 case wasm::kExprI32x4MaxU:
3638 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_max_u);
3639 case wasm::kExprI32x4DotI16x8S:
3640 return EmitBinOp<kS128, kS128>(
3641 &LiftoffAssembler::emit_i32x4_dot_i16x8_s);
3642 case wasm::kExprI32x4ExtAddPairwiseI16x8S:
3643 return EmitUnOp<kS128, kS128>(
3644 &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s);
3645 case wasm::kExprI32x4ExtAddPairwiseI16x8U:
3646 return EmitUnOp<kS128, kS128>(
3647 &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u);
3648 case wasm::kExprI32x4ExtMulLowI16x8S:
3649 return EmitBinOp<kS128, kS128>(
3650 &LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s);
3651 case wasm::kExprI32x4ExtMulLowI16x8U:
3652 return EmitBinOp<kS128, kS128>(
3653 &LiftoffAssembler::emit_i32x4_extmul_low_i16x8_u);
3654 case wasm::kExprI32x4ExtMulHighI16x8S:
3655 return EmitBinOp<kS128, kS128>(
3656 &LiftoffAssembler::emit_i32x4_extmul_high_i16x8_s);
3657 case wasm::kExprI32x4ExtMulHighI16x8U:
3658 return EmitBinOp<kS128, kS128>(
3659 &LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u);
3660 case wasm::kExprI64x2Neg:
3661 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
3662 case wasm::kExprI64x2AllTrue:
3663 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_alltrue);
3664 case wasm::kExprI64x2Shl:
3665 return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl,
3666 &LiftoffAssembler::emit_i64x2_shli);
3667 case wasm::kExprI64x2ShrS:
3668 return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_s,
3669 &LiftoffAssembler::emit_i64x2_shri_s);
3670 case wasm::kExprI64x2ShrU:
3671 return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_u,
3672 &LiftoffAssembler::emit_i64x2_shri_u);
3673 case wasm::kExprI64x2Add:
3674 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_add);
3675 case wasm::kExprI64x2Sub:
3676 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_sub);
3677 case wasm::kExprI64x2Mul:
3678 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_mul);
3679 case wasm::kExprI64x2ExtMulLowI32x4S:
3680 return EmitBinOp<kS128, kS128>(
3681 &LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s);
3682 case wasm::kExprI64x2ExtMulLowI32x4U:
3683 return EmitBinOp<kS128, kS128>(
3684 &LiftoffAssembler::emit_i64x2_extmul_low_i32x4_u);
3685 case wasm::kExprI64x2ExtMulHighI32x4S:
3686 return EmitBinOp<kS128, kS128>(
3687 &LiftoffAssembler::emit_i64x2_extmul_high_i32x4_s);
3688 case wasm::kExprI64x2ExtMulHighI32x4U:
3689 return EmitBinOp<kS128, kS128>(
3690 &LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u);
3691 case wasm::kExprI64x2BitMask:
3692 return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_bitmask);
3693 case wasm::kExprI64x2SConvertI32x4Low:
3694 return EmitUnOp<kS128, kS128>(
3695 &LiftoffAssembler::emit_i64x2_sconvert_i32x4_low);
3696 case wasm::kExprI64x2SConvertI32x4High:
3697 return EmitUnOp<kS128, kS128>(
3698 &LiftoffAssembler::emit_i64x2_sconvert_i32x4_high);
3699 case wasm::kExprI64x2UConvertI32x4Low:
3700 return EmitUnOp<kS128, kS128>(
3701 &LiftoffAssembler::emit_i64x2_uconvert_i32x4_low);
3702 case wasm::kExprI64x2UConvertI32x4High:
3703 return EmitUnOp<kS128, kS128>(
3704 &LiftoffAssembler::emit_i64x2_uconvert_i32x4_high);
3705 case wasm::kExprF32x4Abs:
3706 return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_abs);
3707 case wasm::kExprF32x4Neg:
3708 return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_neg);
3709 case wasm::kExprF32x4Sqrt:
3710 return EmitUnOp<kS128, kS128, kF32>(&LiftoffAssembler::emit_f32x4_sqrt);
3711 case wasm::kExprF32x4Ceil:
3712 return EmitSimdFloatRoundingOpWithCFallback<kF32>(
3713 &LiftoffAssembler::emit_f32x4_ceil,
3714 &ExternalReference::wasm_f32x4_ceil);
3715 case wasm::kExprF32x4Floor:
3716 return EmitSimdFloatRoundingOpWithCFallback<kF32>(
3717 &LiftoffAssembler::emit_f32x4_floor,
3718 ExternalReference::wasm_f32x4_floor);
3719 case wasm::kExprF32x4Trunc:
3720 return EmitSimdFloatRoundingOpWithCFallback<kF32>(
3721 &LiftoffAssembler::emit_f32x4_trunc,
3722 ExternalReference::wasm_f32x4_trunc);
3723 case wasm::kExprF32x4NearestInt:
3724 return EmitSimdFloatRoundingOpWithCFallback<kF32>(
3725 &LiftoffAssembler::emit_f32x4_nearest_int,
3726 ExternalReference::wasm_f32x4_nearest_int);
3727 case wasm::kExprF32x4Add:
3728 return EmitBinOp<kS128, kS128, false, kF32>(
3729 &LiftoffAssembler::emit_f32x4_add);
3730 case wasm::kExprF32x4Sub:
3731 return EmitBinOp<kS128, kS128, false, kF32>(
3732 &LiftoffAssembler::emit_f32x4_sub);
3733 case wasm::kExprF32x4Mul:
3734 return EmitBinOp<kS128, kS128, false, kF32>(
3735 &LiftoffAssembler::emit_f32x4_mul);
3736 case wasm::kExprF32x4Div:
3737 return EmitBinOp<kS128, kS128, false, kF32>(
3738 &LiftoffAssembler::emit_f32x4_div);
3739 case wasm::kExprF32x4Min:
3740 return EmitBinOp<kS128, kS128, false, kF32>(
3741 &LiftoffAssembler::emit_f32x4_min);
3742 case wasm::kExprF32x4Max:
3743 return EmitBinOp<kS128, kS128, false, kF32>(
3744 &LiftoffAssembler::emit_f32x4_max);
3745 case wasm::kExprF32x4Pmin:
3746 return EmitBinOp<kS128, kS128, false, kF32>(
3747 &LiftoffAssembler::emit_f32x4_pmin);
3748 case wasm::kExprF32x4Pmax:
3749 return EmitBinOp<kS128, kS128, false, kF32>(
3750 &LiftoffAssembler::emit_f32x4_pmax);
3751 case wasm::kExprF64x2Abs:
3752 return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_abs);
3753 case wasm::kExprF64x2Neg:
3754 return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_neg);
3755 case wasm::kExprF64x2Sqrt:
3756 return EmitUnOp<kS128, kS128, kF64>(&LiftoffAssembler::emit_f64x2_sqrt);
3757 case wasm::kExprF64x2Ceil:
3758 return EmitSimdFloatRoundingOpWithCFallback<kF64>(
3759 &LiftoffAssembler::emit_f64x2_ceil,
3760 &ExternalReference::wasm_f64x2_ceil);
3761 case wasm::kExprF64x2Floor:
3762 return EmitSimdFloatRoundingOpWithCFallback<kF64>(
3763 &LiftoffAssembler::emit_f64x2_floor,
3764 ExternalReference::wasm_f64x2_floor);
3765 case wasm::kExprF64x2Trunc:
3766 return EmitSimdFloatRoundingOpWithCFallback<kF64>(
3767 &LiftoffAssembler::emit_f64x2_trunc,
3768 ExternalReference::wasm_f64x2_trunc);
3769 case wasm::kExprF64x2NearestInt:
3770 return EmitSimdFloatRoundingOpWithCFallback<kF64>(
3771 &LiftoffAssembler::emit_f64x2_nearest_int,
3772 ExternalReference::wasm_f64x2_nearest_int);
3773 case wasm::kExprF64x2Add:
3774 return EmitBinOp<kS128, kS128, false, kF64>(
3775 &LiftoffAssembler::emit_f64x2_add);
3776 case wasm::kExprF64x2Sub:
3777 return EmitBinOp<kS128, kS128, false, kF64>(
3778 &LiftoffAssembler::emit_f64x2_sub);
3779 case wasm::kExprF64x2Mul:
3780 return EmitBinOp<kS128, kS128, false, kF64>(
3781 &LiftoffAssembler::emit_f64x2_mul);
3782 case wasm::kExprF64x2Div:
3783 return EmitBinOp<kS128, kS128, false, kF64>(
3784 &LiftoffAssembler::emit_f64x2_div);
3785 case wasm::kExprF64x2Min:
3786 return EmitBinOp<kS128, kS128, false, kF64>(
3787 &LiftoffAssembler::emit_f64x2_min);
3788 case wasm::kExprF64x2Max:
3789 return EmitBinOp<kS128, kS128, false, kF64>(
3790 &LiftoffAssembler::emit_f64x2_max);
3791 case wasm::kExprF64x2Pmin:
3792 return EmitBinOp<kS128, kS128, false, kF64>(
3793 &LiftoffAssembler::emit_f64x2_pmin);
3794 case wasm::kExprF64x2Pmax:
3795 return EmitBinOp<kS128, kS128, false, kF64>(
3796 &LiftoffAssembler::emit_f64x2_pmax);
3797 case wasm::kExprI32x4SConvertF32x4:
3798 return EmitUnOp<kS128, kS128, kF32>(
3799 &LiftoffAssembler::emit_i32x4_sconvert_f32x4);
3800 case wasm::kExprI32x4UConvertF32x4:
3801 return EmitUnOp<kS128, kS128, kF32>(
3802 &LiftoffAssembler::emit_i32x4_uconvert_f32x4);
3803 case wasm::kExprF32x4SConvertI32x4:
3804 return EmitUnOp<kS128, kS128, kF32>(
3805 &LiftoffAssembler::emit_f32x4_sconvert_i32x4);
3806 case wasm::kExprF32x4UConvertI32x4:
3807 return EmitUnOp<kS128, kS128, kF32>(
3808 &LiftoffAssembler::emit_f32x4_uconvert_i32x4);
3809 case wasm::kExprI8x16SConvertI16x8:
3810 return EmitBinOp<kS128, kS128>(
3811 &LiftoffAssembler::emit_i8x16_sconvert_i16x8);
3812 case wasm::kExprI8x16UConvertI16x8:
3813 return EmitBinOp<kS128, kS128>(
3814 &LiftoffAssembler::emit_i8x16_uconvert_i16x8);
3815 case wasm::kExprI16x8SConvertI32x4:
3816 return EmitBinOp<kS128, kS128>(
3817 &LiftoffAssembler::emit_i16x8_sconvert_i32x4);
3818 case wasm::kExprI16x8UConvertI32x4:
3819 return EmitBinOp<kS128, kS128>(
3820 &LiftoffAssembler::emit_i16x8_uconvert_i32x4);
3821 case wasm::kExprI16x8SConvertI8x16Low:
3822 return EmitUnOp<kS128, kS128>(
3823 &LiftoffAssembler::emit_i16x8_sconvert_i8x16_low);
3824 case wasm::kExprI16x8SConvertI8x16High:
3825 return EmitUnOp<kS128, kS128>(
3826 &LiftoffAssembler::emit_i16x8_sconvert_i8x16_high);
3827 case wasm::kExprI16x8UConvertI8x16Low:
3828 return EmitUnOp<kS128, kS128>(
3829 &LiftoffAssembler::emit_i16x8_uconvert_i8x16_low);
3830 case wasm::kExprI16x8UConvertI8x16High:
3831 return EmitUnOp<kS128, kS128>(
3832 &LiftoffAssembler::emit_i16x8_uconvert_i8x16_high);
3833 case wasm::kExprI32x4SConvertI16x8Low:
3834 return EmitUnOp<kS128, kS128>(
3835 &LiftoffAssembler::emit_i32x4_sconvert_i16x8_low);
3836 case wasm::kExprI32x4SConvertI16x8High:
3837 return EmitUnOp<kS128, kS128>(
3838 &LiftoffAssembler::emit_i32x4_sconvert_i16x8_high);
3839 case wasm::kExprI32x4UConvertI16x8Low:
3840 return EmitUnOp<kS128, kS128>(
3841 &LiftoffAssembler::emit_i32x4_uconvert_i16x8_low);
3842 case wasm::kExprI32x4UConvertI16x8High:
3843 return EmitUnOp<kS128, kS128>(
3844 &LiftoffAssembler::emit_i32x4_uconvert_i16x8_high);
3845 case wasm::kExprS128AndNot:
3846 return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s128_and_not);
3847 case wasm::kExprI8x16RoundingAverageU:
3848 return EmitBinOp<kS128, kS128>(
3849 &LiftoffAssembler::emit_i8x16_rounding_average_u);
3850 case wasm::kExprI16x8RoundingAverageU:
3851 return EmitBinOp<kS128, kS128>(
3852 &LiftoffAssembler::emit_i16x8_rounding_average_u);
3853 case wasm::kExprI8x16Abs:
3854 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_abs);
3855 case wasm::kExprI16x8Abs:
3856 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_abs);
3857 case wasm::kExprI32x4Abs:
3858 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_abs);
3859 case wasm::kExprI64x2Abs:
3860 return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_abs);
3861 case wasm::kExprF64x2ConvertLowI32x4S:
3862 return EmitUnOp<kS128, kS128, kF64>(
3863 &LiftoffAssembler::emit_f64x2_convert_low_i32x4_s);
3864 case wasm::kExprF64x2ConvertLowI32x4U:
3865 return EmitUnOp<kS128, kS128, kF64>(
3866 &LiftoffAssembler::emit_f64x2_convert_low_i32x4_u);
3867 case wasm::kExprF64x2PromoteLowF32x4:
3868 return EmitUnOp<kS128, kS128, kF64>(
3869 &LiftoffAssembler::emit_f64x2_promote_low_f32x4);
3870 case wasm::kExprF32x4DemoteF64x2Zero:
3871 return EmitUnOp<kS128, kS128, kF32>(
3872 &LiftoffAssembler::emit_f32x4_demote_f64x2_zero);
3873 case wasm::kExprI32x4TruncSatF64x2SZero:
3874 return EmitUnOp<kS128, kS128>(
3875 &LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero);
3876 case wasm::kExprI32x4TruncSatF64x2UZero:
3877 return EmitUnOp<kS128, kS128>(
3878 &LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero);
3879 default:
3880 unsupported(decoder, kSimd, "simd");
3881 }
3882 }
3883
3884 template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
EmitSimdExtractLaneOp(EmitFn fn,const SimdLaneImmediate<validate> & imm)3885 void EmitSimdExtractLaneOp(EmitFn fn,
3886 const SimdLaneImmediate<validate>& imm) {
3887 static constexpr RegClass src_rc = reg_class_for(src_kind);
3888 static constexpr RegClass result_rc = reg_class_for(result_kind);
3889 LiftoffRegister lhs = __ PopToRegister();
3890 LiftoffRegister dst = src_rc == result_rc
3891 ? __ GetUnusedRegister(result_rc, {lhs}, {})
3892 : __ GetUnusedRegister(result_rc, {});
3893 fn(dst, lhs, imm.lane);
3894 __ PushRegister(result_kind, dst);
3895 }
3896
3897 template <ValueKind src2_kind, typename EmitFn>
EmitSimdReplaceLaneOp(EmitFn fn,const SimdLaneImmediate<validate> & imm)3898 void EmitSimdReplaceLaneOp(EmitFn fn,
3899 const SimdLaneImmediate<validate>& imm) {
3900 static constexpr RegClass src1_rc = reg_class_for(kS128);
3901 static constexpr RegClass src2_rc = reg_class_for(src2_kind);
3902 static constexpr RegClass result_rc = reg_class_for(kS128);
3903 // On backends which need fp pair, src1_rc and result_rc end up being
3904 // kFpRegPair, which is != kFpReg, but we still want to pin src2 when it is
3905 // kFpReg, since it can overlap with those pairs.
3906 static constexpr bool pin_src2 = kNeedS128RegPair && src2_rc == kFpReg;
3907
3908 // Does not work for arm
3909 LiftoffRegister src2 = __ PopToRegister();
3910 LiftoffRegister src1 = (src1_rc == src2_rc || pin_src2)
3911 ? __ PopToRegister(LiftoffRegList::ForRegs(src2))
3912 : __
3913 PopToRegister();
3914 LiftoffRegister dst =
3915 (src2_rc == result_rc || pin_src2)
3916 ? __ GetUnusedRegister(result_rc, {src1},
3917 LiftoffRegList::ForRegs(src2))
3918 : __ GetUnusedRegister(result_rc, {src1}, {});
3919 fn(dst, src1, src2, imm.lane);
3920 __ PushRegister(kS128, dst);
3921 }
3922
SimdLaneOp(FullDecoder * decoder,WasmOpcode opcode,const SimdLaneImmediate<validate> & imm,const base::Vector<Value> inputs,Value * result)3923 void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
3924 const SimdLaneImmediate<validate>& imm,
3925 const base::Vector<Value> inputs, Value* result) {
3926 if (!CpuFeatures::SupportsWasmSimd128()) {
3927 return unsupported(decoder, kSimd, "simd");
3928 }
3929 switch (opcode) {
3930 #define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn) \
3931 case wasm::kExpr##opcode: \
3932 EmitSimdExtractLaneOp<kS128, k##kind>( \
3933 [=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \
3934 __ emit_##fn(dst, lhs, imm_lane_idx); \
3935 }, \
3936 imm); \
3937 break;
3938 CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneS, I32, i8x16_extract_lane_s)
3939 CASE_SIMD_EXTRACT_LANE_OP(I8x16ExtractLaneU, I32, i8x16_extract_lane_u)
3940 CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneS, I32, i16x8_extract_lane_s)
3941 CASE_SIMD_EXTRACT_LANE_OP(I16x8ExtractLaneU, I32, i16x8_extract_lane_u)
3942 CASE_SIMD_EXTRACT_LANE_OP(I32x4ExtractLane, I32, i32x4_extract_lane)
3943 CASE_SIMD_EXTRACT_LANE_OP(I64x2ExtractLane, I64, i64x2_extract_lane)
3944 CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
3945 CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
3946 #undef CASE_SIMD_EXTRACT_LANE_OP
3947 #define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn) \
3948 case wasm::kExpr##opcode: \
3949 EmitSimdReplaceLaneOp<k##kind>( \
3950 [=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
3951 uint8_t imm_lane_idx) { \
3952 __ emit_##fn(dst, src1, src2, imm_lane_idx); \
3953 }, \
3954 imm); \
3955 break;
3956 CASE_SIMD_REPLACE_LANE_OP(I8x16ReplaceLane, I32, i8x16_replace_lane)
3957 CASE_SIMD_REPLACE_LANE_OP(I16x8ReplaceLane, I32, i16x8_replace_lane)
3958 CASE_SIMD_REPLACE_LANE_OP(I32x4ReplaceLane, I32, i32x4_replace_lane)
3959 CASE_SIMD_REPLACE_LANE_OP(I64x2ReplaceLane, I64, i64x2_replace_lane)
3960 CASE_SIMD_REPLACE_LANE_OP(F32x4ReplaceLane, F32, f32x4_replace_lane)
3961 CASE_SIMD_REPLACE_LANE_OP(F64x2ReplaceLane, F64, f64x2_replace_lane)
3962 #undef CASE_SIMD_REPLACE_LANE_OP
3963 default:
3964 unsupported(decoder, kSimd, "simd");
3965 }
3966 }
3967
S128Const(FullDecoder * decoder,const Simd128Immediate<validate> & imm,Value * result)3968 void S128Const(FullDecoder* decoder, const Simd128Immediate<validate>& imm,
3969 Value* result) {
3970 if (!CpuFeatures::SupportsWasmSimd128()) {
3971 return unsupported(decoder, kSimd, "simd");
3972 }
3973 constexpr RegClass result_rc = reg_class_for(kS128);
3974 LiftoffRegister dst = __ GetUnusedRegister(result_rc, {});
3975 bool all_zeroes = std::all_of(std::begin(imm.value), std::end(imm.value),
3976 [](uint8_t v) { return v == 0; });
3977 bool all_ones = std::all_of(std::begin(imm.value), std::end(imm.value),
3978 [](uint8_t v) { return v == 0xff; });
3979 if (all_zeroes) {
3980 __ LiftoffAssembler::emit_s128_xor(dst, dst, dst);
3981 } else if (all_ones) {
3982 // Any SIMD eq will work, i32x4 is efficient on all archs.
3983 __ LiftoffAssembler::emit_i32x4_eq(dst, dst, dst);
3984 } else {
3985 __ LiftoffAssembler::emit_s128_const(dst, imm.value);
3986 }
3987 __ PushRegister(kS128, dst);
3988 }
3989
Simd8x16ShuffleOp(FullDecoder * decoder,const Simd128Immediate<validate> & imm,const Value & input0,const Value & input1,Value * result)3990 void Simd8x16ShuffleOp(FullDecoder* decoder,
3991 const Simd128Immediate<validate>& imm,
3992 const Value& input0, const Value& input1,
3993 Value* result) {
3994 if (!CpuFeatures::SupportsWasmSimd128()) {
3995 return unsupported(decoder, kSimd, "simd");
3996 }
3997 static constexpr RegClass result_rc = reg_class_for(kS128);
3998 LiftoffRegister rhs = __ PopToRegister();
3999 LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
4000 LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
4001
4002 uint8_t shuffle[kSimd128Size];
4003 memcpy(shuffle, imm.value, sizeof(shuffle));
4004 bool is_swizzle;
4005 bool needs_swap;
4006 wasm::SimdShuffle::CanonicalizeShuffle(lhs == rhs, shuffle, &needs_swap,
4007 &is_swizzle);
4008 if (needs_swap) {
4009 std::swap(lhs, rhs);
4010 }
4011 __ LiftoffAssembler::emit_i8x16_shuffle(dst, lhs, rhs, shuffle, is_swizzle);
4012 __ PushRegister(kS128, dst);
4013 }
4014
ToSmi(Register reg)4015 void ToSmi(Register reg) {
4016 if (COMPRESS_POINTERS_BOOL || kSystemPointerSize == 4) {
4017 __ emit_i32_shli(reg, reg, kSmiShiftSize + kSmiTagSize);
4018 } else {
4019 __ emit_i64_shli(LiftoffRegister{reg}, LiftoffRegister{reg},
4020 kSmiShiftSize + kSmiTagSize);
4021 }
4022 }
4023
Store32BitExceptionValue(Register values_array,int * index_in_array,Register value,LiftoffRegList pinned)4024 void Store32BitExceptionValue(Register values_array, int* index_in_array,
4025 Register value, LiftoffRegList pinned) {
4026 LiftoffRegister tmp_reg = __ GetUnusedRegister(kGpReg, pinned);
4027 // Get the lower half word into tmp_reg and extend to a Smi.
4028 --*index_in_array;
4029 __ emit_i32_andi(tmp_reg.gp(), value, 0xffff);
4030 ToSmi(tmp_reg.gp());
4031 __ StoreTaggedPointer(
4032 values_array, no_reg,
4033 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array),
4034 tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier);
4035
4036 // Get the upper half word into tmp_reg and extend to a Smi.
4037 --*index_in_array;
4038 __ emit_i32_shri(tmp_reg.gp(), value, 16);
4039 ToSmi(tmp_reg.gp());
4040 __ StoreTaggedPointer(
4041 values_array, no_reg,
4042 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array),
4043 tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier);
4044 }
4045
Store64BitExceptionValue(Register values_array,int * index_in_array,LiftoffRegister value,LiftoffRegList pinned)4046 void Store64BitExceptionValue(Register values_array, int* index_in_array,
4047 LiftoffRegister value, LiftoffRegList pinned) {
4048 if (kNeedI64RegPair) {
4049 Store32BitExceptionValue(values_array, index_in_array, value.low_gp(),
4050 pinned);
4051 Store32BitExceptionValue(values_array, index_in_array, value.high_gp(),
4052 pinned);
4053 } else {
4054 Store32BitExceptionValue(values_array, index_in_array, value.gp(),
4055 pinned);
4056 __ emit_i64_shri(value, value, 32);
4057 Store32BitExceptionValue(values_array, index_in_array, value.gp(),
4058 pinned);
4059 }
4060 }
4061
Load16BitExceptionValue(LiftoffRegister dst,LiftoffRegister values_array,uint32_t * index,LiftoffRegList pinned)4062 void Load16BitExceptionValue(LiftoffRegister dst,
4063 LiftoffRegister values_array, uint32_t* index,
4064 LiftoffRegList pinned) {
4065 __ LoadSmiAsInt32(
4066 dst, values_array.gp(),
4067 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index), pinned);
4068 (*index)++;
4069 }
4070
Load32BitExceptionValue(Register dst,LiftoffRegister values_array,uint32_t * index,LiftoffRegList pinned)4071 void Load32BitExceptionValue(Register dst, LiftoffRegister values_array,
4072 uint32_t* index, LiftoffRegList pinned) {
4073 LiftoffRegister upper = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4074 Load16BitExceptionValue(upper, values_array, index, pinned);
4075 __ emit_i32_shli(upper.gp(), upper.gp(), 16);
4076 Load16BitExceptionValue(LiftoffRegister(dst), values_array, index, pinned);
4077 __ emit_i32_or(dst, upper.gp(), dst);
4078 }
4079
Load64BitExceptionValue(LiftoffRegister dst,LiftoffRegister values_array,uint32_t * index,LiftoffRegList pinned)4080 void Load64BitExceptionValue(LiftoffRegister dst,
4081 LiftoffRegister values_array, uint32_t* index,
4082 LiftoffRegList pinned) {
4083 if (kNeedI64RegPair) {
4084 Load32BitExceptionValue(dst.high_gp(), values_array, index, pinned);
4085 Load32BitExceptionValue(dst.low_gp(), values_array, index, pinned);
4086 } else {
4087 Load16BitExceptionValue(dst, values_array, index, pinned);
4088 __ emit_i64_shli(dst, dst, 48);
4089 LiftoffRegister tmp_reg =
4090 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4091 Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
4092 __ emit_i64_shli(tmp_reg, tmp_reg, 32);
4093 __ emit_i64_or(dst, tmp_reg, dst);
4094 Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
4095 __ emit_i64_shli(tmp_reg, tmp_reg, 16);
4096 __ emit_i64_or(dst, tmp_reg, dst);
4097 Load16BitExceptionValue(tmp_reg, values_array, index, pinned);
4098 __ emit_i64_or(dst, tmp_reg, dst);
4099 }
4100 }
4101
StoreExceptionValue(ValueType type,Register values_array,int * index_in_array,LiftoffRegList pinned)4102 void StoreExceptionValue(ValueType type, Register values_array,
4103 int* index_in_array, LiftoffRegList pinned) {
4104 LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
4105 switch (type.kind()) {
4106 case kI32:
4107 Store32BitExceptionValue(values_array, index_in_array, value.gp(),
4108 pinned);
4109 break;
4110 case kF32: {
4111 LiftoffRegister gp_reg =
4112 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4113 __ emit_type_conversion(kExprI32ReinterpretF32, gp_reg, value, nullptr);
4114 Store32BitExceptionValue(values_array, index_in_array, gp_reg.gp(),
4115 pinned);
4116 break;
4117 }
4118 case kI64:
4119 Store64BitExceptionValue(values_array, index_in_array, value, pinned);
4120 break;
4121 case kF64: {
4122 LiftoffRegister tmp_reg =
4123 pinned.set(__ GetUnusedRegister(reg_class_for(kI64), pinned));
4124 __ emit_type_conversion(kExprI64ReinterpretF64, tmp_reg, value,
4125 nullptr);
4126 Store64BitExceptionValue(values_array, index_in_array, tmp_reg, pinned);
4127 break;
4128 }
4129 case kS128: {
4130 LiftoffRegister tmp_reg =
4131 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4132 for (int i : {3, 2, 1, 0}) {
4133 __ emit_i32x4_extract_lane(tmp_reg, value, i);
4134 Store32BitExceptionValue(values_array, index_in_array, tmp_reg.gp(),
4135 pinned);
4136 }
4137 break;
4138 }
4139 case wasm::kRef:
4140 case wasm::kOptRef:
4141 case wasm::kRtt:
4142 case wasm::kRttWithDepth: {
4143 --(*index_in_array);
4144 __ StoreTaggedPointer(
4145 values_array, no_reg,
4146 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
4147 *index_in_array),
4148 value, pinned);
4149 break;
4150 }
4151 case wasm::kI8:
4152 case wasm::kI16:
4153 case wasm::kVoid:
4154 case wasm::kBottom:
4155 UNREACHABLE();
4156 }
4157 }
4158
LoadExceptionValue(ValueKind kind,LiftoffRegister values_array,uint32_t * index,LiftoffRegList pinned)4159 void LoadExceptionValue(ValueKind kind, LiftoffRegister values_array,
4160 uint32_t* index, LiftoffRegList pinned) {
4161 RegClass rc = reg_class_for(kind);
4162 LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
4163 switch (kind) {
4164 case kI32:
4165 Load32BitExceptionValue(value.gp(), values_array, index, pinned);
4166 break;
4167 case kF32: {
4168 LiftoffRegister tmp_reg =
4169 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4170 Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
4171 __ emit_type_conversion(kExprF32ReinterpretI32, value, tmp_reg,
4172 nullptr);
4173 break;
4174 }
4175 case kI64:
4176 Load64BitExceptionValue(value, values_array, index, pinned);
4177 break;
4178 case kF64: {
4179 RegClass rc_i64 = reg_class_for(kI64);
4180 LiftoffRegister tmp_reg =
4181 pinned.set(__ GetUnusedRegister(rc_i64, pinned));
4182 Load64BitExceptionValue(tmp_reg, values_array, index, pinned);
4183 __ emit_type_conversion(kExprF64ReinterpretI64, value, tmp_reg,
4184 nullptr);
4185 break;
4186 }
4187 case kS128: {
4188 LiftoffRegister tmp_reg =
4189 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4190 Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
4191 __ emit_i32x4_splat(value, tmp_reg);
4192 for (int lane : {1, 2, 3}) {
4193 Load32BitExceptionValue(tmp_reg.gp(), values_array, index, pinned);
4194 __ emit_i32x4_replace_lane(value, value, tmp_reg, lane);
4195 }
4196 break;
4197 }
4198 case wasm::kRef:
4199 case wasm::kOptRef:
4200 case wasm::kRtt:
4201 case wasm::kRttWithDepth: {
4202 __ LoadTaggedPointer(
4203 value.gp(), values_array.gp(), no_reg,
4204 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index),
4205 pinned);
4206 (*index)++;
4207 break;
4208 }
4209 case wasm::kI8:
4210 case wasm::kI16:
4211 case wasm::kVoid:
4212 case wasm::kBottom:
4213 UNREACHABLE();
4214 }
4215 __ PushRegister(kind, value);
4216 }
4217
GetExceptionValues(FullDecoder * decoder,LiftoffAssembler::VarState & exception_var,const WasmTag * tag)4218 void GetExceptionValues(FullDecoder* decoder,
4219 LiftoffAssembler::VarState& exception_var,
4220 const WasmTag* tag) {
4221 LiftoffRegList pinned;
4222 CODE_COMMENT("get exception values");
4223 LiftoffRegister values_array = GetExceptionProperty(
4224 exception_var, RootIndex::kwasm_exception_values_symbol);
4225 pinned.set(values_array);
4226 uint32_t index = 0;
4227 const WasmTagSig* sig = tag->sig;
4228 for (ValueType param : sig->parameters()) {
4229 LoadExceptionValue(param.kind(), values_array, &index, pinned);
4230 }
4231 DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(tag));
4232 }
4233
EmitLandingPad(FullDecoder * decoder,int handler_offset)4234 void EmitLandingPad(FullDecoder* decoder, int handler_offset) {
4235 if (decoder->current_catch() == -1) return;
4236 MovableLabel handler;
4237
4238 // If we return from the throwing code normally, just skip over the handler.
4239 Label skip_handler;
4240 __ emit_jump(&skip_handler);
4241
4242 // Handler: merge into the catch state, and jump to the catch body.
4243 CODE_COMMENT("-- landing pad --");
4244 __ bind(handler.get());
4245 __ ExceptionHandler();
4246 __ PushException();
4247 handlers_.push_back({std::move(handler), handler_offset});
4248 Control* current_try =
4249 decoder->control_at(decoder->control_depth_of_current_catch());
4250 DCHECK_NOT_NULL(current_try->try_info);
4251 if (!current_try->try_info->catch_reached) {
4252 current_try->try_info->catch_state.InitMerge(
4253 *__ cache_state(), __ num_locals(), 1,
4254 current_try->stack_depth + current_try->num_exceptions);
4255 current_try->try_info->catch_reached = true;
4256 }
4257 __ MergeStackWith(current_try->try_info->catch_state, 1,
4258 LiftoffAssembler::kForwardJump);
4259 __ emit_jump(¤t_try->try_info->catch_label);
4260
4261 __ bind(&skip_handler);
4262 // Drop the exception.
4263 __ DropValues(1);
4264 }
4265
Throw(FullDecoder * decoder,const TagIndexImmediate<validate> & imm,const base::Vector<Value> &)4266 void Throw(FullDecoder* decoder, const TagIndexImmediate<validate>& imm,
4267 const base::Vector<Value>& /* args */) {
4268 LiftoffRegList pinned;
4269
4270 // Load the encoded size in a register for the builtin call.
4271 int encoded_size = WasmExceptionPackage::GetEncodedSize(imm.tag);
4272 LiftoffRegister encoded_size_reg =
4273 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4274 __ LoadConstant(encoded_size_reg, WasmValue(encoded_size));
4275
4276 // Call the WasmAllocateFixedArray builtin to create the values array.
4277 CallRuntimeStub(WasmCode::kWasmAllocateFixedArray,
4278 MakeSig::Returns(kPointerKind).Params(kPointerKind),
4279 {LiftoffAssembler::VarState{
4280 kSmiKind, LiftoffRegister{encoded_size_reg}, 0}},
4281 decoder->position());
4282 MaybeOSR();
4283
4284 // The FixedArray for the exception values is now in the first gp return
4285 // register.
4286 LiftoffRegister values_array{kReturnRegister0};
4287 pinned.set(values_array);
4288
4289 // Now store the exception values in the FixedArray. Do this from last to
4290 // first value, such that we can just pop them from the value stack.
4291 CODE_COMMENT("fill values array");
4292 int index = encoded_size;
4293 auto* sig = imm.tag->sig;
4294 for (size_t param_idx = sig->parameter_count(); param_idx > 0;
4295 --param_idx) {
4296 ValueType type = sig->GetParam(param_idx - 1);
4297 StoreExceptionValue(type, values_array.gp(), &index, pinned);
4298 }
4299 DCHECK_EQ(0, index);
4300
4301 // Load the exception tag.
4302 CODE_COMMENT("load exception tag");
4303 LiftoffRegister exception_tag =
4304 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4305 LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag.gp(), TagsTable, pinned);
4306 __ LoadTaggedPointer(
4307 exception_tag.gp(), exception_tag.gp(), no_reg,
4308 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
4309
4310 // Finally, call WasmThrow.
4311 CallRuntimeStub(WasmCode::kWasmThrow,
4312 MakeSig::Params(kPointerKind, kPointerKind),
4313 {LiftoffAssembler::VarState{kPointerKind, exception_tag, 0},
4314 LiftoffAssembler::VarState{kPointerKind, values_array, 0}},
4315 decoder->position());
4316
4317 int pc_offset = __ pc_offset();
4318 MaybeOSR();
4319 EmitLandingPad(decoder, pc_offset);
4320 }
4321
AtomicStoreMem(FullDecoder * decoder,StoreType type,const MemoryAccessImmediate<validate> & imm)4322 void AtomicStoreMem(FullDecoder* decoder, StoreType type,
4323 const MemoryAccessImmediate<validate>& imm) {
4324 LiftoffRegList pinned;
4325 LiftoffRegister value = pinned.set(__ PopToRegister());
4326 LiftoffRegister full_index = __ PopToRegister(pinned);
4327 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4328 full_index, pinned, kDoForceCheck);
4329 if (index == no_reg) return;
4330
4331 pinned.set(index);
4332 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4333 uintptr_t offset = imm.offset;
4334 CODE_COMMENT("atomic store to memory");
4335 Register addr = pinned.set(GetMemoryStart(pinned));
4336 LiftoffRegList outer_pinned;
4337 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
4338 __ AtomicStore(addr, index, offset, value, type, outer_pinned);
4339 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
4340 TraceMemoryOperation(true, type.mem_rep(), index, offset,
4341 decoder->position());
4342 }
4343 }
4344
AtomicLoadMem(FullDecoder * decoder,LoadType type,const MemoryAccessImmediate<validate> & imm)4345 void AtomicLoadMem(FullDecoder* decoder, LoadType type,
4346 const MemoryAccessImmediate<validate>& imm) {
4347 ValueKind kind = type.value_type().kind();
4348 LiftoffRegister full_index = __ PopToRegister();
4349 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4350 full_index, {}, kDoForceCheck);
4351 if (index == no_reg) return;
4352
4353 LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
4354 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4355 uintptr_t offset = imm.offset;
4356 CODE_COMMENT("atomic load from memory");
4357 Register addr = pinned.set(GetMemoryStart(pinned));
4358 RegClass rc = reg_class_for(kind);
4359 LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
4360 __ AtomicLoad(value, addr, index, offset, type, pinned);
4361 __ PushRegister(kind, value);
4362
4363 if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
4364 TraceMemoryOperation(false, type.mem_type().representation(), index,
4365 offset, decoder->position());
4366 }
4367 }
4368
AtomicBinop(FullDecoder * decoder,StoreType type,const MemoryAccessImmediate<validate> & imm,void (LiftoffAssembler::* emit_fn)(Register,Register,uintptr_t,LiftoffRegister,LiftoffRegister,StoreType))4369 void AtomicBinop(FullDecoder* decoder, StoreType type,
4370 const MemoryAccessImmediate<validate>& imm,
4371 void (LiftoffAssembler::*emit_fn)(Register, Register,
4372 uintptr_t, LiftoffRegister,
4373 LiftoffRegister,
4374 StoreType)) {
4375 ValueKind result_kind = type.value_type().kind();
4376 LiftoffRegList pinned;
4377 LiftoffRegister value = pinned.set(__ PopToRegister());
4378 #ifdef V8_TARGET_ARCH_IA32
4379 // We have to reuse the value register as the result register so that we
4380 // don't run out of registers on ia32. For this we use the value register
4381 // as the result register if it has no other uses. Otherwise we allocate
4382 // a new register and let go of the value register to get spilled.
4383 LiftoffRegister result = value;
4384 if (__ cache_state()->is_used(value)) {
4385 result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
4386 __ Move(result, value, result_kind);
4387 pinned.clear(value);
4388 value = result;
4389 }
4390 #else
4391 LiftoffRegister result =
4392 pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
4393 #endif
4394 LiftoffRegister full_index = __ PopToRegister(pinned);
4395 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4396 full_index, pinned, kDoForceCheck);
4397 if (index == no_reg) return;
4398
4399 pinned.set(index);
4400 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4401
4402 uintptr_t offset = imm.offset;
4403 Register addr = pinned.set(GetMemoryStart(pinned));
4404
4405 (asm_.*emit_fn)(addr, index, offset, value, result, type);
4406 __ PushRegister(result_kind, result);
4407 }
4408
AtomicCompareExchange(FullDecoder * decoder,StoreType type,const MemoryAccessImmediate<validate> & imm)4409 void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
4410 const MemoryAccessImmediate<validate>& imm) {
4411 #ifdef V8_TARGET_ARCH_IA32
4412 // On ia32 we don't have enough registers to first pop all the values off
4413 // the stack and then start with the code generation. Instead we do the
4414 // complete address calculation first, so that the address only needs a
4415 // single register. Afterwards we load all remaining values into the
4416 // other registers.
4417 LiftoffRegister full_index = __ PeekToRegister(2, {});
4418 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4419 full_index, {}, kDoForceCheck);
4420 if (index == no_reg) return;
4421 LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
4422 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4423
4424 uintptr_t offset = imm.offset;
4425 Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
4426 LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
4427 __ emit_i32_add(addr, addr, index);
4428 pinned.clear(LiftoffRegister(index));
4429 LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
4430 LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
4431
4432 // Pop the index from the stack.
4433 __ DropValues(1);
4434
4435 LiftoffRegister result = expected;
4436 if (__ cache_state()->is_used(result)) __ SpillRegister(result);
4437
4438 // We already added the index to addr, so we can just pass no_reg to the
4439 // assembler now.
4440 __ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
4441 type);
4442 __ PushRegister(type.value_type().kind(), result);
4443 return;
4444 #else
4445 ValueKind result_kind = type.value_type().kind();
4446 LiftoffRegList pinned;
4447 LiftoffRegister new_value = pinned.set(__ PopToRegister());
4448 LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
4449 LiftoffRegister full_index = __ PopToRegister(pinned);
4450 Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
4451 full_index, pinned, kDoForceCheck);
4452 if (index == no_reg) return;
4453 pinned.set(index);
4454 AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
4455
4456 uintptr_t offset = imm.offset;
4457 Register addr = pinned.set(GetMemoryStart(pinned));
4458 LiftoffRegister result =
4459 pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
4460
4461 __ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
4462 type);
4463 __ PushRegister(result_kind, result);
4464 #endif
4465 }
4466
CallRuntimeStub(WasmCode::RuntimeStubId stub_id,const ValueKindSig & sig,std::initializer_list<LiftoffAssembler::VarState> params,int position)4467 void CallRuntimeStub(WasmCode::RuntimeStubId stub_id, const ValueKindSig& sig,
4468 std::initializer_list<LiftoffAssembler::VarState> params,
4469 int position) {
4470 CODE_COMMENT(
4471 (std::string{"call builtin: "} + GetRuntimeStubName(stub_id)).c_str());
4472 auto interface_descriptor = Builtins::CallInterfaceDescriptorFor(
4473 RuntimeStubIdToBuiltinName(stub_id));
4474 auto* call_descriptor = compiler::Linkage::GetStubCallDescriptor(
4475 compilation_zone_, // zone
4476 interface_descriptor, // descriptor
4477 interface_descriptor.GetStackParameterCount(), // stack parameter count
4478 compiler::CallDescriptor::kNoFlags, // flags
4479 compiler::Operator::kNoProperties, // properties
4480 StubCallMode::kCallWasmRuntimeStub); // stub call mode
4481
4482 __ PrepareBuiltinCall(&sig, call_descriptor, params);
4483 if (position != kNoSourcePosition) {
4484 source_position_table_builder_.AddPosition(
4485 __ pc_offset(), SourcePosition(position), true);
4486 }
4487 __ CallRuntimeStub(stub_id);
4488 DefineSafepoint();
4489 }
4490
AtomicWait(FullDecoder * decoder,ValueKind kind,const MemoryAccessImmediate<validate> & imm)4491 void AtomicWait(FullDecoder* decoder, ValueKind kind,
4492 const MemoryAccessImmediate<validate>& imm) {
4493 LiftoffRegister full_index = __ PeekToRegister(2, {});
4494 Register index_reg =
4495 BoundsCheckMem(decoder, element_size_bytes(kind), imm.offset,
4496 full_index, {}, kDoForceCheck);
4497 if (index_reg == no_reg) return;
4498 LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
4499 AlignmentCheckMem(decoder, element_size_bytes(kind), imm.offset, index_reg,
4500 pinned);
4501
4502 uintptr_t offset = imm.offset;
4503 Register index_plus_offset =
4504 __ cache_state()->is_used(LiftoffRegister(index_reg))
4505 ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
4506 : index_reg;
4507 // TODO(clemensb): Skip this if memory is 64 bit.
4508 __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
4509 if (offset) {
4510 __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
4511 }
4512
4513 LiftoffAssembler::VarState timeout =
4514 __ cache_state()->stack_state.end()[-1];
4515 LiftoffAssembler::VarState expected_value =
4516 __ cache_state()->stack_state.end()[-2];
4517 LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
4518
4519 // We have to set the correct register for the index.
4520 index.MakeRegister(LiftoffRegister(index_plus_offset));
4521
4522 static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
4523 // 64 bit systems (kNeedI64RegPair == false):
4524 {WasmCode::kWasmI64AtomicWait64, WasmCode::kWasmI32AtomicWait64},
4525 // 32 bit systems (kNeedI64RegPair == true):
4526 {WasmCode::kWasmI64AtomicWait32, WasmCode::kWasmI32AtomicWait32}};
4527 auto target = kTargets[kNeedI64RegPair][kind == kI32];
4528
4529 CallRuntimeStub(target, MakeSig::Params(kPointerKind, kind, kI64),
4530 {index, expected_value, timeout}, decoder->position());
4531 // Pop parameters from the value stack.
4532 __ DropValues(3);
4533
4534 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
4535
4536 __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
4537 }
4538
AtomicNotify(FullDecoder * decoder,const MemoryAccessImmediate<validate> & imm)4539 void AtomicNotify(FullDecoder* decoder,
4540 const MemoryAccessImmediate<validate>& imm) {
4541 LiftoffRegister full_index = __ PeekToRegister(1, {});
4542 Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
4543 full_index, {}, kDoForceCheck);
4544 if (index_reg == no_reg) return;
4545 LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
4546 AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
4547
4548 uintptr_t offset = imm.offset;
4549 Register index_plus_offset =
4550 __ cache_state()->is_used(LiftoffRegister(index_reg))
4551 ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
4552 : index_reg;
4553 // TODO(clemensb): Skip this if memory is 64 bit.
4554 __ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
4555 if (offset) {
4556 __ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
4557 }
4558
4559 LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1];
4560 LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
4561 index.MakeRegister(LiftoffRegister(index_plus_offset));
4562
4563 CallRuntimeStub(WasmCode::kWasmAtomicNotify,
4564 MakeSig::Returns(kI32).Params(kPointerKind, kI32),
4565 {index, count}, decoder->position());
4566 // Pop parameters from the value stack.
4567 __ DropValues(2);
4568
4569 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
4570
4571 __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
4572 }
4573
4574 #define ATOMIC_STORE_LIST(V) \
4575 V(I32AtomicStore, kI32Store) \
4576 V(I64AtomicStore, kI64Store) \
4577 V(I32AtomicStore8U, kI32Store8) \
4578 V(I32AtomicStore16U, kI32Store16) \
4579 V(I64AtomicStore8U, kI64Store8) \
4580 V(I64AtomicStore16U, kI64Store16) \
4581 V(I64AtomicStore32U, kI64Store32)
4582
4583 #define ATOMIC_LOAD_LIST(V) \
4584 V(I32AtomicLoad, kI32Load) \
4585 V(I64AtomicLoad, kI64Load) \
4586 V(I32AtomicLoad8U, kI32Load8U) \
4587 V(I32AtomicLoad16U, kI32Load16U) \
4588 V(I64AtomicLoad8U, kI64Load8U) \
4589 V(I64AtomicLoad16U, kI64Load16U) \
4590 V(I64AtomicLoad32U, kI64Load32U)
4591
4592 #define ATOMIC_BINOP_INSTRUCTION_LIST(V) \
4593 V(Add, I32AtomicAdd, kI32Store) \
4594 V(Add, I64AtomicAdd, kI64Store) \
4595 V(Add, I32AtomicAdd8U, kI32Store8) \
4596 V(Add, I32AtomicAdd16U, kI32Store16) \
4597 V(Add, I64AtomicAdd8U, kI64Store8) \
4598 V(Add, I64AtomicAdd16U, kI64Store16) \
4599 V(Add, I64AtomicAdd32U, kI64Store32) \
4600 V(Sub, I32AtomicSub, kI32Store) \
4601 V(Sub, I64AtomicSub, kI64Store) \
4602 V(Sub, I32AtomicSub8U, kI32Store8) \
4603 V(Sub, I32AtomicSub16U, kI32Store16) \
4604 V(Sub, I64AtomicSub8U, kI64Store8) \
4605 V(Sub, I64AtomicSub16U, kI64Store16) \
4606 V(Sub, I64AtomicSub32U, kI64Store32) \
4607 V(And, I32AtomicAnd, kI32Store) \
4608 V(And, I64AtomicAnd, kI64Store) \
4609 V(And, I32AtomicAnd8U, kI32Store8) \
4610 V(And, I32AtomicAnd16U, kI32Store16) \
4611 V(And, I64AtomicAnd8U, kI64Store8) \
4612 V(And, I64AtomicAnd16U, kI64Store16) \
4613 V(And, I64AtomicAnd32U, kI64Store32) \
4614 V(Or, I32AtomicOr, kI32Store) \
4615 V(Or, I64AtomicOr, kI64Store) \
4616 V(Or, I32AtomicOr8U, kI32Store8) \
4617 V(Or, I32AtomicOr16U, kI32Store16) \
4618 V(Or, I64AtomicOr8U, kI64Store8) \
4619 V(Or, I64AtomicOr16U, kI64Store16) \
4620 V(Or, I64AtomicOr32U, kI64Store32) \
4621 V(Xor, I32AtomicXor, kI32Store) \
4622 V(Xor, I64AtomicXor, kI64Store) \
4623 V(Xor, I32AtomicXor8U, kI32Store8) \
4624 V(Xor, I32AtomicXor16U, kI32Store16) \
4625 V(Xor, I64AtomicXor8U, kI64Store8) \
4626 V(Xor, I64AtomicXor16U, kI64Store16) \
4627 V(Xor, I64AtomicXor32U, kI64Store32) \
4628 V(Exchange, I32AtomicExchange, kI32Store) \
4629 V(Exchange, I64AtomicExchange, kI64Store) \
4630 V(Exchange, I32AtomicExchange8U, kI32Store8) \
4631 V(Exchange, I32AtomicExchange16U, kI32Store16) \
4632 V(Exchange, I64AtomicExchange8U, kI64Store8) \
4633 V(Exchange, I64AtomicExchange16U, kI64Store16) \
4634 V(Exchange, I64AtomicExchange32U, kI64Store32)
4635
4636 #define ATOMIC_COMPARE_EXCHANGE_LIST(V) \
4637 V(I32AtomicCompareExchange, kI32Store) \
4638 V(I64AtomicCompareExchange, kI64Store) \
4639 V(I32AtomicCompareExchange8U, kI32Store8) \
4640 V(I32AtomicCompareExchange16U, kI32Store16) \
4641 V(I64AtomicCompareExchange8U, kI64Store8) \
4642 V(I64AtomicCompareExchange16U, kI64Store16) \
4643 V(I64AtomicCompareExchange32U, kI64Store32)
4644
AtomicOp(FullDecoder * decoder,WasmOpcode opcode,base::Vector<Value> args,const MemoryAccessImmediate<validate> & imm,Value * result)4645 void AtomicOp(FullDecoder* decoder, WasmOpcode opcode,
4646 base::Vector<Value> args,
4647 const MemoryAccessImmediate<validate>& imm, Value* result) {
4648 switch (opcode) {
4649 #define ATOMIC_STORE_OP(name, type) \
4650 case wasm::kExpr##name: \
4651 AtomicStoreMem(decoder, StoreType::type, imm); \
4652 break;
4653
4654 ATOMIC_STORE_LIST(ATOMIC_STORE_OP)
4655 #undef ATOMIC_STORE_OP
4656
4657 #define ATOMIC_LOAD_OP(name, type) \
4658 case wasm::kExpr##name: \
4659 AtomicLoadMem(decoder, LoadType::type, imm); \
4660 break;
4661
4662 ATOMIC_LOAD_LIST(ATOMIC_LOAD_OP)
4663 #undef ATOMIC_LOAD_OP
4664
4665 #define ATOMIC_BINOP_OP(op, name, type) \
4666 case wasm::kExpr##name: \
4667 AtomicBinop(decoder, StoreType::type, imm, &LiftoffAssembler::Atomic##op); \
4668 break;
4669
4670 ATOMIC_BINOP_INSTRUCTION_LIST(ATOMIC_BINOP_OP)
4671 #undef ATOMIC_BINOP_OP
4672
4673 #define ATOMIC_COMPARE_EXCHANGE_OP(name, type) \
4674 case wasm::kExpr##name: \
4675 AtomicCompareExchange(decoder, StoreType::type, imm); \
4676 break;
4677
4678 ATOMIC_COMPARE_EXCHANGE_LIST(ATOMIC_COMPARE_EXCHANGE_OP)
4679 #undef ATOMIC_COMPARE_EXCHANGE_OP
4680
4681 case kExprI32AtomicWait:
4682 AtomicWait(decoder, kI32, imm);
4683 break;
4684 case kExprI64AtomicWait:
4685 AtomicWait(decoder, kI64, imm);
4686 break;
4687 case kExprAtomicNotify:
4688 AtomicNotify(decoder, imm);
4689 break;
4690 default:
4691 unsupported(decoder, kAtomics, "atomicop");
4692 }
4693 }
4694
4695 #undef ATOMIC_STORE_LIST
4696 #undef ATOMIC_LOAD_LIST
4697 #undef ATOMIC_BINOP_INSTRUCTION_LIST
4698 #undef ATOMIC_COMPARE_EXCHANGE_LIST
4699
AtomicFence(FullDecoder * decoder)4700 void AtomicFence(FullDecoder* decoder) { __ AtomicFence(); }
4701
MemoryInit(FullDecoder * decoder,const MemoryInitImmediate<validate> & imm,const Value &,const Value &,const Value &)4702 void MemoryInit(FullDecoder* decoder,
4703 const MemoryInitImmediate<validate>& imm, const Value&,
4704 const Value&, const Value&) {
4705 LiftoffRegList pinned;
4706 LiftoffRegister size = pinned.set(__ PopToRegister());
4707 LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
4708 LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
4709
4710 Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
4711 __ FillInstanceInto(instance);
4712
4713 LiftoffRegister segment_index =
4714 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4715 __ LoadConstant(segment_index, WasmValue(imm.data_segment.index));
4716
4717 ExternalReference ext_ref = ExternalReference::wasm_memory_init();
4718 auto sig =
4719 MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32, kI32);
4720 LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
4721 segment_index, size};
4722 // We don't need the instance anymore after the call. We can use the
4723 // register for the result.
4724 LiftoffRegister result(instance);
4725 GenerateCCall(&result, &sig, kVoid, args, ext_ref);
4726 Label* trap_label =
4727 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
4728 __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
4729 }
4730
DataDrop(FullDecoder * decoder,const IndexImmediate<validate> & imm)4731 void DataDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
4732 LiftoffRegList pinned;
4733
4734 Register seg_size_array =
4735 pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
4736 LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize,
4737 pinned);
4738
4739 LiftoffRegister seg_index =
4740 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4741 // Scale the seg_index for the array access.
4742 __ LoadConstant(seg_index, WasmValue(imm.index << element_size_log2(kI32)));
4743
4744 // Set the length of the segment to '0' to drop it.
4745 LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4746 __ LoadConstant(null_reg, WasmValue(0));
4747 __ Store(seg_size_array, seg_index.gp(), 0, null_reg, StoreType::kI32Store,
4748 pinned);
4749 }
4750
MemoryCopy(FullDecoder * decoder,const MemoryCopyImmediate<validate> & imm,const Value &,const Value &,const Value &)4751 void MemoryCopy(FullDecoder* decoder,
4752 const MemoryCopyImmediate<validate>& imm, const Value&,
4753 const Value&, const Value&) {
4754 LiftoffRegList pinned;
4755 LiftoffRegister size = pinned.set(__ PopToRegister());
4756 LiftoffRegister src = pinned.set(__ PopToRegister(pinned));
4757 LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
4758 Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
4759 __ FillInstanceInto(instance);
4760 ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
4761 auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
4762 LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
4763 // We don't need the instance anymore after the call. We can use the
4764 // register for the result.
4765 LiftoffRegister result(instance);
4766 GenerateCCall(&result, &sig, kVoid, args, ext_ref);
4767 Label* trap_label =
4768 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
4769 __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
4770 }
4771
MemoryFill(FullDecoder * decoder,const MemoryIndexImmediate<validate> & imm,const Value &,const Value &,const Value &)4772 void MemoryFill(FullDecoder* decoder,
4773 const MemoryIndexImmediate<validate>& imm, const Value&,
4774 const Value&, const Value&) {
4775 LiftoffRegList pinned;
4776 LiftoffRegister size = pinned.set(__ PopToRegister());
4777 LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
4778 LiftoffRegister dst = pinned.set(__ PopToRegister(pinned));
4779 Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
4780 __ FillInstanceInto(instance);
4781 ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
4782 auto sig = MakeSig::Returns(kI32).Params(kPointerKind, kI32, kI32, kI32);
4783 LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
4784 // We don't need the instance anymore after the call. We can use the
4785 // register for the result.
4786 LiftoffRegister result(instance);
4787 GenerateCCall(&result, &sig, kVoid, args, ext_ref);
4788 Label* trap_label =
4789 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds);
4790 __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
4791 }
4792
LoadSmi(LiftoffRegister reg,int value)4793 void LoadSmi(LiftoffRegister reg, int value) {
4794 Address smi_value = Smi::FromInt(value).ptr();
4795 using smi_type = std::conditional_t<kSmiKind == kI32, int32_t, int64_t>;
4796 __ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)});
4797 }
4798
TableInit(FullDecoder * decoder,const TableInitImmediate<validate> & imm,base::Vector<Value> args)4799 void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
4800 base::Vector<Value> args) {
4801 LiftoffRegList pinned;
4802 LiftoffRegister table_index_reg =
4803 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4804
4805 LoadSmi(table_index_reg, imm.table.index);
4806 LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
4807
4808 LiftoffRegister segment_index_reg =
4809 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4810 LoadSmi(segment_index_reg, imm.element_segment.index);
4811 LiftoffAssembler::VarState segment_index(kPointerKind, segment_index_reg,
4812 0);
4813
4814 LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
4815 LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
4816 LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3];
4817
4818 CallRuntimeStub(WasmCode::kWasmTableInit,
4819 MakeSig::Params(kI32, kI32, kI32, kSmiKind, kSmiKind),
4820 {dst, src, size, table_index, segment_index},
4821 decoder->position());
4822
4823 // Pop parameters from the value stack.
4824 __ cache_state()->stack_state.pop_back(3);
4825
4826 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
4827 }
4828
ElemDrop(FullDecoder * decoder,const IndexImmediate<validate> & imm)4829 void ElemDrop(FullDecoder* decoder, const IndexImmediate<validate>& imm) {
4830 LiftoffRegList pinned;
4831 Register dropped_elem_segments =
4832 pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
4833 LOAD_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments,
4834 kSystemPointerSize, pinned);
4835
4836 LiftoffRegister seg_index =
4837 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4838 __ LoadConstant(seg_index, WasmValue(imm.index));
4839
4840 // Mark the segment as dropped by setting its value in the dropped
4841 // segments list to 1.
4842 LiftoffRegister one_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4843 __ LoadConstant(one_reg, WasmValue(1));
4844 __ Store(dropped_elem_segments, seg_index.gp(), 0, one_reg,
4845 StoreType::kI32Store8, pinned);
4846 }
4847
TableCopy(FullDecoder * decoder,const TableCopyImmediate<validate> & imm,base::Vector<Value> args)4848 void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
4849 base::Vector<Value> args) {
4850 LiftoffRegList pinned;
4851
4852 LiftoffRegister table_dst_index_reg =
4853 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4854 LoadSmi(table_dst_index_reg, imm.table_dst.index);
4855 LiftoffAssembler::VarState table_dst_index(kPointerKind,
4856 table_dst_index_reg, 0);
4857
4858 LiftoffRegister table_src_index_reg =
4859 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4860 LoadSmi(table_src_index_reg, imm.table_src.index);
4861 LiftoffAssembler::VarState table_src_index(kPointerKind,
4862 table_src_index_reg, 0);
4863
4864 LiftoffAssembler::VarState size = __ cache_state()->stack_state.end()[-1];
4865 LiftoffAssembler::VarState src = __ cache_state()->stack_state.end()[-2];
4866 LiftoffAssembler::VarState dst = __ cache_state()->stack_state.end()[-3];
4867
4868 CallRuntimeStub(WasmCode::kWasmTableCopy,
4869 MakeSig::Params(kI32, kI32, kI32, kSmiKind, kSmiKind),
4870 {dst, src, size, table_dst_index, table_src_index},
4871 decoder->position());
4872
4873 // Pop parameters from the value stack.
4874 __ cache_state()->stack_state.pop_back(3);
4875
4876 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
4877 }
4878
TableGrow(FullDecoder * decoder,const IndexImmediate<validate> & imm,const Value &,const Value &,Value * result)4879 void TableGrow(FullDecoder* decoder, const IndexImmediate<validate>& imm,
4880 const Value&, const Value&, Value* result) {
4881 LiftoffRegList pinned;
4882
4883 LiftoffRegister table_index_reg =
4884 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4885 LoadSmi(table_index_reg, imm.index);
4886 LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
4887
4888 LiftoffAssembler::VarState delta = __ cache_state()->stack_state.end()[-1];
4889 LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2];
4890
4891 CallRuntimeStub(
4892 WasmCode::kWasmTableGrow,
4893 MakeSig::Returns(kSmiKind).Params(kSmiKind, kI32, kTaggedKind),
4894 {table_index, delta, value}, decoder->position());
4895
4896 // Pop parameters from the value stack.
4897 __ cache_state()->stack_state.pop_back(2);
4898
4899 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
4900 __ SmiToInt32(kReturnRegister0);
4901 __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
4902 }
4903
TableSize(FullDecoder * decoder,const IndexImmediate<validate> & imm,Value *)4904 void TableSize(FullDecoder* decoder, const IndexImmediate<validate>& imm,
4905 Value*) {
4906 // We have to look up instance->tables[table_index].length.
4907
4908 LiftoffRegList pinned;
4909 // Get the number of calls array address.
4910 Register tables = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
4911 LOAD_TAGGED_PTR_INSTANCE_FIELD(tables, Tables, pinned);
4912
4913 Register table = tables;
4914 __ LoadTaggedPointer(
4915 table, tables, no_reg,
4916 ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
4917
4918 int length_field_size = WasmTableObject::kCurrentLengthOffsetEnd -
4919 WasmTableObject::kCurrentLengthOffset + 1;
4920
4921 Register result = table;
4922 __ Load(LiftoffRegister(result), table, no_reg,
4923 wasm::ObjectAccess::ToTagged(WasmTableObject::kCurrentLengthOffset),
4924 length_field_size == 4 ? LoadType::kI32Load : LoadType::kI64Load,
4925 pinned);
4926
4927 __ SmiUntag(result);
4928 __ PushRegister(kI32, LiftoffRegister(result));
4929 }
4930
TableFill(FullDecoder * decoder,const IndexImmediate<validate> & imm,const Value &,const Value &,const Value &)4931 void TableFill(FullDecoder* decoder, const IndexImmediate<validate>& imm,
4932 const Value&, const Value&, const Value&) {
4933 LiftoffRegList pinned;
4934
4935 LiftoffRegister table_index_reg =
4936 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
4937 LoadSmi(table_index_reg, imm.index);
4938 LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
4939
4940 LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1];
4941 LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2];
4942 LiftoffAssembler::VarState start = __ cache_state()->stack_state.end()[-3];
4943
4944 CallRuntimeStub(WasmCode::kWasmTableFill,
4945 MakeSig::Params(kSmiKind, kI32, kI32, kTaggedKind),
4946 {table_index, start, count, value}, decoder->position());
4947
4948 // Pop parameters from the value stack.
4949 __ cache_state()->stack_state.pop_back(3);
4950
4951 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
4952 }
4953
StructNew(FullDecoder * decoder,const StructIndexImmediate<validate> & imm,const Value & rtt,bool initial_values_on_stack)4954 void StructNew(FullDecoder* decoder,
4955 const StructIndexImmediate<validate>& imm, const Value& rtt,
4956 bool initial_values_on_stack) {
4957 LiftoffAssembler::VarState rtt_value =
4958 __ cache_state()->stack_state.end()[-1];
4959 CallRuntimeStub(WasmCode::kWasmAllocateStructWithRtt,
4960 MakeSig::Returns(kRef).Params(rtt.type.kind()), {rtt_value},
4961 decoder->position());
4962 // Drop the RTT.
4963 __ cache_state()->stack_state.pop_back(1);
4964
4965 LiftoffRegister obj(kReturnRegister0);
4966 LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
4967 for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
4968 i--;
4969 int offset = StructFieldOffset(imm.struct_type, i);
4970 ValueKind field_kind = imm.struct_type->field(i).kind();
4971 LiftoffRegister value = initial_values_on_stack
4972 ? pinned.set(__ PopToRegister(pinned))
4973 : pinned.set(__ GetUnusedRegister(
4974 reg_class_for(field_kind), pinned));
4975 if (!initial_values_on_stack) {
4976 if (!CheckSupportedType(decoder, field_kind, "default value")) return;
4977 SetDefaultValue(value, field_kind, pinned);
4978 }
4979 StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
4980 pinned.clear(value);
4981 }
4982 // If this assert fails then initialization of padding field might be
4983 // necessary.
4984 static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
4985 WasmStruct::kHeaderSize == 2 * kTaggedSize,
4986 "empty struct might require initialization of padding field");
4987 __ PushRegister(kRef, obj);
4988 }
4989
StructNewWithRtt(FullDecoder * decoder,const StructIndexImmediate<validate> & imm,const Value & rtt,const Value args[],Value * result)4990 void StructNewWithRtt(FullDecoder* decoder,
4991 const StructIndexImmediate<validate>& imm,
4992 const Value& rtt, const Value args[], Value* result) {
4993 StructNew(decoder, imm, rtt, true);
4994 }
4995
StructNewDefault(FullDecoder * decoder,const StructIndexImmediate<validate> & imm,const Value & rtt,Value * result)4996 void StructNewDefault(FullDecoder* decoder,
4997 const StructIndexImmediate<validate>& imm,
4998 const Value& rtt, Value* result) {
4999 StructNew(decoder, imm, rtt, false);
5000 }
5001
StructGet(FullDecoder * decoder,const Value & struct_obj,const FieldImmediate<validate> & field,bool is_signed,Value * result)5002 void StructGet(FullDecoder* decoder, const Value& struct_obj,
5003 const FieldImmediate<validate>& field, bool is_signed,
5004 Value* result) {
5005 const StructType* struct_type = field.struct_imm.struct_type;
5006 ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
5007 if (!CheckSupportedType(decoder, field_kind, "field load")) return;
5008 int offset = StructFieldOffset(struct_type, field.field_imm.index);
5009 LiftoffRegList pinned;
5010 LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
5011 MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
5012 LiftoffRegister value =
5013 __ GetUnusedRegister(reg_class_for(field_kind), pinned);
5014 LoadObjectField(value, obj.gp(), no_reg, offset, field_kind, is_signed,
5015 pinned);
5016 __ PushRegister(unpacked(field_kind), value);
5017 }
5018
StructSet(FullDecoder * decoder,const Value & struct_obj,const FieldImmediate<validate> & field,const Value & field_value)5019 void StructSet(FullDecoder* decoder, const Value& struct_obj,
5020 const FieldImmediate<validate>& field,
5021 const Value& field_value) {
5022 const StructType* struct_type = field.struct_imm.struct_type;
5023 ValueKind field_kind = struct_type->field(field.field_imm.index).kind();
5024 int offset = StructFieldOffset(struct_type, field.field_imm.index);
5025 LiftoffRegList pinned;
5026 LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
5027 LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
5028 MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
5029 StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
5030 }
5031
ArrayNew(FullDecoder * decoder,const ArrayIndexImmediate<validate> & imm,ValueKind rtt_kind,bool initial_value_on_stack)5032 void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
5033 ValueKind rtt_kind, bool initial_value_on_stack) {
5034 // Max length check.
5035 {
5036 LiftoffRegister length =
5037 __ LoadToRegister(__ cache_state()->stack_state.end()[-2], {});
5038 Label* trap_label =
5039 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
5040 __ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
5041 WasmArray::MaxLength(imm.array_type));
5042 }
5043 ValueKind elem_kind = imm.array_type->element_type().kind();
5044 int elem_size = element_size_bytes(elem_kind);
5045 // Allocate the array.
5046 {
5047 LiftoffAssembler::VarState rtt_var =
5048 __ cache_state()->stack_state.end()[-1];
5049 LiftoffAssembler::VarState length_var =
5050 __ cache_state()->stack_state.end()[-2];
5051 LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
5052 __ LoadConstant(elem_size_reg, WasmValue(elem_size));
5053 LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
5054
5055 WasmCode::RuntimeStubId stub_id =
5056 initial_value_on_stack
5057 ? WasmCode::kWasmAllocateArray_Uninitialized
5058 : is_reference(elem_kind) ? WasmCode::kWasmAllocateArray_InitNull
5059 : WasmCode::kWasmAllocateArray_InitZero;
5060 CallRuntimeStub(
5061 stub_id, MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
5062 {rtt_var, length_var, elem_size_var}, decoder->position());
5063 // Drop the RTT.
5064 __ cache_state()->stack_state.pop_back(1);
5065 }
5066
5067 LiftoffRegister obj(kReturnRegister0);
5068 if (initial_value_on_stack) {
5069 LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
5070 LiftoffRegister length = pinned.set(__ PopToModifiableRegister(pinned));
5071 LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
5072
5073 // Initialize the array's elements.
5074 LiftoffRegister offset = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5075 __ LoadConstant(
5076 offset,
5077 WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
5078 LiftoffRegister end_offset = length;
5079 if (element_size_log2(elem_kind) != 0) {
5080 __ emit_i32_shli(end_offset.gp(), length.gp(),
5081 element_size_log2(elem_kind));
5082 }
5083 __ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
5084 Label loop, done;
5085 __ bind(&loop);
5086 __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
5087 end_offset.gp());
5088 StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
5089 __ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
5090 __ emit_jump(&loop);
5091
5092 __ bind(&done);
5093 } else {
5094 if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
5095 // Drop the length.
5096 __ cache_state()->stack_state.pop_back(1);
5097 }
5098 __ PushRegister(kRef, obj);
5099 }
5100
ArrayNewWithRtt(FullDecoder * decoder,const ArrayIndexImmediate<validate> & imm,const Value & length_value,const Value & initial_value,const Value & rtt,Value * result)5101 void ArrayNewWithRtt(FullDecoder* decoder,
5102 const ArrayIndexImmediate<validate>& imm,
5103 const Value& length_value, const Value& initial_value,
5104 const Value& rtt, Value* result) {
5105 ArrayNew(decoder, imm, rtt.type.kind(), true);
5106 }
5107
ArrayNewDefault(FullDecoder * decoder,const ArrayIndexImmediate<validate> & imm,const Value & length,const Value & rtt,Value * result)5108 void ArrayNewDefault(FullDecoder* decoder,
5109 const ArrayIndexImmediate<validate>& imm,
5110 const Value& length, const Value& rtt, Value* result) {
5111 ArrayNew(decoder, imm, rtt.type.kind(), false);
5112 }
5113
ArrayGet(FullDecoder * decoder,const Value & array_obj,const ArrayIndexImmediate<validate> & imm,const Value & index_val,bool is_signed,Value * result)5114 void ArrayGet(FullDecoder* decoder, const Value& array_obj,
5115 const ArrayIndexImmediate<validate>& imm,
5116 const Value& index_val, bool is_signed, Value* result) {
5117 LiftoffRegList pinned;
5118 LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned));
5119 LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
5120 MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
5121 BoundsCheck(decoder, array, index, pinned);
5122 ValueKind elem_kind = imm.array_type->element_type().kind();
5123 if (!CheckSupportedType(decoder, elem_kind, "array load")) return;
5124 int elem_size_shift = element_size_log2(elem_kind);
5125 if (elem_size_shift != 0) {
5126 __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
5127 }
5128 LiftoffRegister value =
5129 __ GetUnusedRegister(reg_class_for(elem_kind), pinned);
5130 LoadObjectField(value, array.gp(), index.gp(),
5131 wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
5132 elem_kind, is_signed, pinned);
5133 __ PushRegister(unpacked(elem_kind), value);
5134 }
5135
ArraySet(FullDecoder * decoder,const Value & array_obj,const ArrayIndexImmediate<validate> & imm,const Value & index_val,const Value & value_val)5136 void ArraySet(FullDecoder* decoder, const Value& array_obj,
5137 const ArrayIndexImmediate<validate>& imm,
5138 const Value& index_val, const Value& value_val) {
5139 LiftoffRegList pinned;
5140 LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
5141 DCHECK_EQ(reg_class_for(imm.array_type->element_type().kind()),
5142 value.reg_class());
5143 LiftoffRegister index = pinned.set(__ PopToModifiableRegister(pinned));
5144 LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
5145 MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
5146 BoundsCheck(decoder, array, index, pinned);
5147 ValueKind elem_kind = imm.array_type->element_type().kind();
5148 int elem_size_shift = element_size_log2(elem_kind);
5149 if (elem_size_shift != 0) {
5150 __ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
5151 }
5152 StoreObjectField(array.gp(), index.gp(),
5153 wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
5154 value, pinned, elem_kind);
5155 }
5156
ArrayLen(FullDecoder * decoder,const Value & array_obj,Value * result)5157 void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
5158 LiftoffRegList pinned;
5159 LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
5160 MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type);
5161 LiftoffRegister len = __ GetUnusedRegister(kGpReg, pinned);
5162 int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
5163 LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kI32, false, pinned);
5164 __ PushRegister(kI32, len);
5165 }
5166
ArrayCopy(FullDecoder * decoder,const Value & dst,const Value & dst_index,const Value & src,const Value & src_index,const Value & length)5167 void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
5168 const Value& src, const Value& src_index,
5169 const Value& length) {
5170 // TODO(7748): Unify implementation with TF: Implement this with
5171 // GenerateCCall. Remove runtime function and builtin in wasm.tq.
5172 CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks,
5173 MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef),
5174 // Builtin parameter order:
5175 // [dst_index, src_index, length, dst, src].
5176 {__ cache_state()->stack_state.end()[-4],
5177 __ cache_state()->stack_state.end()[-2],
5178 __ cache_state()->stack_state.end()[-1],
5179 __ cache_state()->stack_state.end()[-5],
5180 __ cache_state()->stack_state.end()[-3]},
5181 decoder->position());
5182 __ cache_state()->stack_state.pop_back(5);
5183 }
5184
ArrayInit(FullDecoder * decoder,const ArrayIndexImmediate<validate> & imm,const base::Vector<Value> & elements,const Value & rtt,Value * result)5185 void ArrayInit(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
5186 const base::Vector<Value>& elements, const Value& rtt,
5187 Value* result) {
5188 ValueKind rtt_kind = rtt.type.kind();
5189 ValueKind elem_kind = imm.array_type->element_type().kind();
5190 // Allocate the array.
5191 {
5192 LiftoffAssembler::VarState rtt_var =
5193 __ cache_state()->stack_state.end()[-1];
5194
5195 LiftoffRegList pinned;
5196
5197 LiftoffRegister elem_size_reg =
5198 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5199 __ LoadConstant(elem_size_reg, WasmValue(element_size_bytes(elem_kind)));
5200 LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
5201
5202 LiftoffRegister length_reg =
5203 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5204 __ LoadConstant(length_reg,
5205 WasmValue(static_cast<int32_t>(elements.size())));
5206 LiftoffAssembler::VarState length_var(kI32, length_reg, 0);
5207
5208 CallRuntimeStub(WasmCode::kWasmAllocateArray_Uninitialized,
5209 MakeSig::Returns(kRef).Params(rtt_kind, kI32, kI32),
5210 {rtt_var, length_var, elem_size_var},
5211 decoder->position());
5212 // Drop the RTT.
5213 __ DropValues(1);
5214 }
5215
5216 // Initialize the array with stack arguments.
5217 LiftoffRegister array(kReturnRegister0);
5218 if (!CheckSupportedType(decoder, elem_kind, "array.init")) return;
5219 for (int i = static_cast<int>(elements.size()) - 1; i >= 0; i--) {
5220 LiftoffRegList pinned = LiftoffRegList::ForRegs(array);
5221 LiftoffRegister element = pinned.set(__ PopToRegister(pinned));
5222 LiftoffRegister offset_reg =
5223 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5224 __ LoadConstant(offset_reg, WasmValue(i << element_size_log2(elem_kind)));
5225 StoreObjectField(array.gp(), offset_reg.gp(),
5226 wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
5227 element, pinned, elem_kind);
5228 }
5229
5230 // Push the array onto the stack.
5231 __ PushRegister(kRef, array);
5232 }
5233
5234 // 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
5235 constexpr static int kI31To32BitSmiShift = 33;
5236
I31New(FullDecoder * decoder,const Value & input,Value * result)5237 void I31New(FullDecoder* decoder, const Value& input, Value* result) {
5238 LiftoffRegister src = __ PopToRegister();
5239 LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
5240 if (SmiValuesAre31Bits()) {
5241 STATIC_ASSERT(kSmiTag == 0);
5242 __ emit_i32_shli(dst.gp(), src.gp(), kSmiTagSize);
5243 } else {
5244 DCHECK(SmiValuesAre32Bits());
5245 __ emit_i64_shli(dst, src, kI31To32BitSmiShift);
5246 }
5247 __ PushRegister(kRef, dst);
5248 }
5249
I31GetS(FullDecoder * decoder,const Value & input,Value * result)5250 void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
5251 LiftoffRegister src = __ PopToRegister();
5252 LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
5253 if (SmiValuesAre31Bits()) {
5254 __ emit_i32_sari(dst.gp(), src.gp(), kSmiTagSize);
5255 } else {
5256 DCHECK(SmiValuesAre32Bits());
5257 __ emit_i64_sari(dst, src, kI31To32BitSmiShift);
5258 }
5259 __ PushRegister(kI32, dst);
5260 }
5261
I31GetU(FullDecoder * decoder,const Value & input,Value * result)5262 void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
5263 LiftoffRegister src = __ PopToRegister();
5264 LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {src}, {});
5265 if (SmiValuesAre31Bits()) {
5266 __ emit_i32_shri(dst.gp(), src.gp(), kSmiTagSize);
5267 } else {
5268 DCHECK(SmiValuesAre32Bits());
5269 __ emit_i64_shri(dst, src, kI31To32BitSmiShift);
5270 }
5271 __ PushRegister(kI32, dst);
5272 }
5273
RttCanon(FullDecoder * decoder,uint32_t type_index,Value * result)5274 void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
5275 LiftoffRegister rtt = __ GetUnusedRegister(kGpReg, {});
5276 LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps, {});
5277 __ LoadTaggedPointer(
5278 rtt.gp(), rtt.gp(), no_reg,
5279 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {});
5280 __ PushRegister(kRttWithDepth, rtt);
5281 }
5282
RttSub(FullDecoder * decoder,uint32_t type_index,const Value & parent,Value * result,WasmRttSubMode mode)5283 void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
5284 Value* result, WasmRttSubMode mode) {
5285 ValueKind parent_value_kind = parent.type.kind();
5286 ValueKind rtt_value_kind = kRttWithDepth;
5287 LiftoffAssembler::VarState parent_var =
5288 __ cache_state()->stack_state.end()[-1];
5289 LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
5290 __ LoadConstant(type_reg, WasmValue(type_index));
5291 LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
5292 WasmCode::RuntimeStubId target = mode == WasmRttSubMode::kCanonicalize
5293 ? WasmCode::kWasmAllocateRtt
5294 : WasmCode::kWasmAllocateFreshRtt;
5295 CallRuntimeStub(
5296 target,
5297 MakeSig::Returns(rtt_value_kind).Params(kI32, parent_value_kind),
5298 {type_var, parent_var}, decoder->position());
5299 // Drop the parent RTT.
5300 __ cache_state()->stack_state.pop_back(1);
5301 __ PushRegister(rtt_value_kind, LiftoffRegister(kReturnRegister0));
5302 }
5303
5304 enum NullSucceeds : bool { // --
5305 kNullSucceeds = true,
5306 kNullFails = false
5307 };
5308
5309 // Falls through on match (=successful type check).
5310 // Returns the register containing the object.
SubtypeCheck(FullDecoder * decoder,const Value & obj,const Value & rtt,Label * no_match,NullSucceeds null_succeeds,LiftoffRegList pinned={},Register opt_scratch=no_reg)5311 LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj,
5312 const Value& rtt, Label* no_match,
5313 NullSucceeds null_succeeds,
5314 LiftoffRegList pinned = {},
5315 Register opt_scratch = no_reg) {
5316 Label match;
5317 LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
5318 LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
5319
5320 // Reserve all temporary registers up front, so that the cache state
5321 // tracking doesn't get confused by the following conditional jumps.
5322 LiftoffRegister tmp1 =
5323 opt_scratch != no_reg
5324 ? LiftoffRegister(opt_scratch)
5325 : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5326 LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5327 if (obj.type.is_nullable()) {
5328 LoadNullValue(tmp1.gp(), pinned);
5329 __ emit_cond_jump(kEqual, null_succeeds ? &match : no_match,
5330 obj.type.kind(), obj_reg.gp(), tmp1.gp());
5331 }
5332
5333 // Perform a regular type check. Check for exact match first.
5334 __ LoadMap(tmp1.gp(), obj_reg.gp());
5335 // {tmp1} now holds the object's map.
5336
5337 if (decoder->module_->has_signature(rtt.type.ref_index())) {
5338 // Function case: currently, the only way for a function to match an rtt
5339 // is if its map is equal to that rtt.
5340 __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
5341 rtt_reg.gp());
5342 __ bind(&match);
5343 return obj_reg;
5344 }
5345
5346 // Array/struct case until the rest of the function.
5347
5348 // Check for rtt equality, and if not, check if the rtt is a struct/array
5349 // rtt.
5350 __ emit_cond_jump(kEqual, &match, rtt.type.kind(), tmp1.gp(), rtt_reg.gp());
5351
5352 // Constant-time subtyping check: load exactly one candidate RTT from the
5353 // supertypes list.
5354 // Step 1: load the WasmTypeInfo into {tmp1}.
5355 constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
5356 Map::kConstructorOrBackPointerOrNativeContextOffset);
5357 __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset, pinned);
5358 // Step 2: load the super types list into {tmp1}.
5359 constexpr int kSuperTypesOffset =
5360 wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
5361 __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
5362 pinned);
5363 // Step 3: check the list's length.
5364 LiftoffRegister list_length = tmp2;
5365 __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
5366 if (rtt.type.has_depth()) {
5367 __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
5368 rtt.type.depth());
5369 // Step 4: load the candidate list slot into {tmp1}, and compare it.
5370 __ LoadTaggedPointer(
5371 tmp1.gp(), tmp1.gp(), no_reg,
5372 wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
5373 pinned);
5374 __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
5375 rtt_reg.gp());
5376 } else {
5377 // Preserve {obj_reg} across the call.
5378 LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
5379 __ PushRegisters(saved_regs);
5380 LiftoffAssembler::VarState rtt_state(kPointerKind, rtt_reg, 0);
5381 LiftoffAssembler::VarState tmp1_state(kPointerKind, tmp1, 0);
5382 CallRuntimeStub(WasmCode::kWasmSubtypeCheck,
5383 MakeSig::Returns(kI32).Params(kOptRef, rtt.type.kind()),
5384 {tmp1_state, rtt_state}, decoder->position());
5385 __ PopRegisters(saved_regs);
5386 __ Move(tmp1.gp(), kReturnRegister0, kI32);
5387 __ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0);
5388 }
5389
5390 // Fall through to {match}.
5391 __ bind(&match);
5392 return obj_reg;
5393 }
5394
RefTest(FullDecoder * decoder,const Value & obj,const Value & rtt,Value *)5395 void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
5396 Value* /* result_val */) {
5397 Label return_false, done;
5398 LiftoffRegList pinned;
5399 LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
5400
5401 SubtypeCheck(decoder, obj, rtt, &return_false, kNullFails, pinned,
5402 result.gp());
5403
5404 __ LoadConstant(result, WasmValue(1));
5405 // TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
5406 __ emit_jump(&done);
5407
5408 __ bind(&return_false);
5409 __ LoadConstant(result, WasmValue(0));
5410 __ bind(&done);
5411 __ PushRegister(kI32, result);
5412 }
5413
RefCast(FullDecoder * decoder,const Value & obj,const Value & rtt,Value * result)5414 void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
5415 Value* result) {
5416 Label* trap_label =
5417 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
5418 LiftoffRegister obj_reg =
5419 SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds);
5420 __ PushRegister(obj.type.kind(), obj_reg);
5421 }
5422
BrOnCast(FullDecoder * decoder,const Value & obj,const Value & rtt,Value *,uint32_t depth)5423 void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
5424 Value* /* result_on_branch */, uint32_t depth) {
5425 // Before branching, materialize all constants. This avoids repeatedly
5426 // materializing them for each conditional branch.
5427 if (depth != decoder->control_depth() - 1) {
5428 __ MaterializeMergedConstants(
5429 decoder->control_at(depth)->br_merge()->arity);
5430 }
5431
5432 Label cont_false;
5433 LiftoffRegister obj_reg =
5434 SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails);
5435
5436 __ PushRegister(rtt.type.is_bottom() ? kBottom : obj.type.kind(), obj_reg);
5437 BrOrRet(decoder, depth, 0);
5438
5439 __ bind(&cont_false);
5440 // Drop the branch's value, restore original value.
5441 Drop(decoder);
5442 __ PushRegister(obj.type.kind(), obj_reg);
5443 }
5444
BrOnCastFail(FullDecoder * decoder,const Value & obj,const Value & rtt,Value *,uint32_t depth)5445 void BrOnCastFail(FullDecoder* decoder, const Value& obj, const Value& rtt,
5446 Value* /* result_on_fallthrough */, uint32_t depth) {
5447 // Before branching, materialize all constants. This avoids repeatedly
5448 // materializing them for each conditional branch.
5449 if (depth != decoder->control_depth() - 1) {
5450 __ MaterializeMergedConstants(
5451 decoder->control_at(depth)->br_merge()->arity);
5452 }
5453
5454 Label cont_branch, fallthrough;
5455 LiftoffRegister obj_reg =
5456 SubtypeCheck(decoder, obj, rtt, &cont_branch, kNullFails);
5457 __ PushRegister(obj.type.kind(), obj_reg);
5458 __ emit_jump(&fallthrough);
5459
5460 __ bind(&cont_branch);
5461 BrOrRet(decoder, depth, 0);
5462
5463 __ bind(&fallthrough);
5464 }
5465
5466 // Abstract type checkers. They all return the object register and fall
5467 // through to match.
DataCheck(const Value & obj,Label * no_match,LiftoffRegList pinned,Register opt_scratch)5468 LiftoffRegister DataCheck(const Value& obj, Label* no_match,
5469 LiftoffRegList pinned, Register opt_scratch) {
5470 LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
5471
5472 // Reserve all temporary registers up front, so that the cache state
5473 // tracking doesn't get confused by the following conditional jumps.
5474 LiftoffRegister tmp1 =
5475 opt_scratch != no_reg
5476 ? LiftoffRegister(opt_scratch)
5477 : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5478 LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5479
5480 if (obj.type.is_nullable()) {
5481 LoadNullValue(tmp1.gp(), pinned);
5482 __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
5483 }
5484
5485 __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
5486
5487 // Load the object's map and check if it is a struct/array map.
5488 __ LoadMap(tmp1.gp(), obj_reg.gp());
5489 EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
5490
5491 return obj_reg;
5492 }
5493
FuncCheck(const Value & obj,Label * no_match,LiftoffRegList pinned,Register opt_scratch)5494 LiftoffRegister FuncCheck(const Value& obj, Label* no_match,
5495 LiftoffRegList pinned, Register opt_scratch) {
5496 LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
5497
5498 // Reserve all temporary registers up front, so that the cache state
5499 // tracking doesn't get confused by the following conditional jumps.
5500 LiftoffRegister tmp1 =
5501 opt_scratch != no_reg
5502 ? LiftoffRegister(opt_scratch)
5503 : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5504
5505 if (obj.type.is_nullable()) {
5506 LoadNullValue(tmp1.gp(), pinned);
5507 __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
5508 }
5509
5510 __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
5511
5512 // Load the object's map and check if its InstaceType field is that of a
5513 // function.
5514 __ LoadMap(tmp1.gp(), obj_reg.gp());
5515 __ Load(tmp1, tmp1.gp(), no_reg,
5516 wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
5517 LoadType::kI32Load16U, pinned);
5518 __ emit_i32_cond_jumpi(kUnequal, no_match, tmp1.gp(), JS_FUNCTION_TYPE);
5519
5520 return obj_reg;
5521 }
5522
I31Check(const Value & object,Label * no_match,LiftoffRegList pinned,Register opt_scratch)5523 LiftoffRegister I31Check(const Value& object, Label* no_match,
5524 LiftoffRegList pinned, Register opt_scratch) {
5525 LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
5526
5527 __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnNotSmi);
5528
5529 return obj_reg;
5530 }
5531
5532 using TypeChecker = LiftoffRegister (LiftoffCompiler::*)(
5533 const Value& obj, Label* no_match, LiftoffRegList pinned,
5534 Register opt_scratch);
5535
5536 template <TypeChecker type_checker>
AbstractTypeCheck(const Value & object)5537 void AbstractTypeCheck(const Value& object) {
5538 Label match, no_match, done;
5539 LiftoffRegList pinned;
5540 LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
5541
5542 (this->*type_checker)(object, &no_match, pinned, result.gp());
5543
5544 __ bind(&match);
5545 __ LoadConstant(result, WasmValue(1));
5546 // TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
5547 __ emit_jump(&done);
5548
5549 __ bind(&no_match);
5550 __ LoadConstant(result, WasmValue(0));
5551 __ bind(&done);
5552 __ PushRegister(kI32, result);
5553 }
5554
RefIsData(FullDecoder *,const Value & object,Value *)5555 void RefIsData(FullDecoder* /* decoder */, const Value& object,
5556 Value* /* result_val */) {
5557 return AbstractTypeCheck<&LiftoffCompiler::DataCheck>(object);
5558 }
5559
RefIsFunc(FullDecoder *,const Value & object,Value *)5560 void RefIsFunc(FullDecoder* /* decoder */, const Value& object,
5561 Value* /* result_val */) {
5562 return AbstractTypeCheck<&LiftoffCompiler::FuncCheck>(object);
5563 }
5564
RefIsI31(FullDecoder * decoder,const Value & object,Value *)5565 void RefIsI31(FullDecoder* decoder, const Value& object,
5566 Value* /* result */) {
5567 return AbstractTypeCheck<&LiftoffCompiler::I31Check>(object);
5568 }
5569
5570 template <TypeChecker type_checker>
AbstractTypeCast(const Value & object,FullDecoder * decoder,ValueKind result_kind)5571 void AbstractTypeCast(const Value& object, FullDecoder* decoder,
5572 ValueKind result_kind) {
5573 Label* trap_label =
5574 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapIllegalCast);
5575 Label match;
5576 LiftoffRegister obj_reg =
5577 (this->*type_checker)(object, trap_label, {}, no_reg);
5578 __ bind(&match);
5579 __ PushRegister(result_kind, obj_reg);
5580 }
5581
RefAsData(FullDecoder * decoder,const Value & object,Value *)5582 void RefAsData(FullDecoder* decoder, const Value& object,
5583 Value* /* result */) {
5584 return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, kRef);
5585 }
5586
RefAsFunc(FullDecoder * decoder,const Value & object,Value *)5587 void RefAsFunc(FullDecoder* decoder, const Value& object,
5588 Value* /* result */) {
5589 return AbstractTypeCast<&LiftoffCompiler::FuncCheck>(object, decoder, kRef);
5590 }
5591
RefAsI31(FullDecoder * decoder,const Value & object,Value * result)5592 void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
5593 return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef);
5594 }
5595
5596 template <TypeChecker type_checker>
BrOnAbstractType(const Value & object,FullDecoder * decoder,uint32_t br_depth)5597 void BrOnAbstractType(const Value& object, FullDecoder* decoder,
5598 uint32_t br_depth) {
5599 // Before branching, materialize all constants. This avoids repeatedly
5600 // materializing them for each conditional branch.
5601 if (br_depth != decoder->control_depth() - 1) {
5602 __ MaterializeMergedConstants(
5603 decoder->control_at(br_depth)->br_merge()->arity);
5604 }
5605
5606 Label no_match;
5607 LiftoffRegister obj_reg =
5608 (this->*type_checker)(object, &no_match, {}, no_reg);
5609
5610 __ PushRegister(kRef, obj_reg);
5611 BrOrRet(decoder, br_depth, 0);
5612
5613 __ bind(&no_match);
5614 }
5615
5616 template <TypeChecker type_checker>
BrOnNonAbstractType(const Value & object,FullDecoder * decoder,uint32_t br_depth)5617 void BrOnNonAbstractType(const Value& object, FullDecoder* decoder,
5618 uint32_t br_depth) {
5619 // Before branching, materialize all constants. This avoids repeatedly
5620 // materializing them for each conditional branch.
5621 if (br_depth != decoder->control_depth() - 1) {
5622 __ MaterializeMergedConstants(
5623 decoder->control_at(br_depth)->br_merge()->arity);
5624 }
5625
5626 Label no_match, end;
5627 LiftoffRegister obj_reg =
5628 (this->*type_checker)(object, &no_match, {}, no_reg);
5629 __ PushRegister(kRef, obj_reg);
5630 __ emit_jump(&end);
5631
5632 __ bind(&no_match);
5633 BrOrRet(decoder, br_depth, 0);
5634
5635 __ bind(&end);
5636 }
5637
BrOnData(FullDecoder * decoder,const Value & object,Value *,uint32_t br_depth)5638 void BrOnData(FullDecoder* decoder, const Value& object,
5639 Value* /* value_on_branch */, uint32_t br_depth) {
5640 return BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
5641 br_depth);
5642 }
5643
BrOnFunc(FullDecoder * decoder,const Value & object,Value *,uint32_t br_depth)5644 void BrOnFunc(FullDecoder* decoder, const Value& object,
5645 Value* /* value_on_branch */, uint32_t br_depth) {
5646 return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
5647 br_depth);
5648 }
5649
BrOnI31(FullDecoder * decoder,const Value & object,Value *,uint32_t br_depth)5650 void BrOnI31(FullDecoder* decoder, const Value& object,
5651 Value* /* value_on_branch */, uint32_t br_depth) {
5652 return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
5653 br_depth);
5654 }
5655
BrOnNonData(FullDecoder * decoder,const Value & object,Value *,uint32_t br_depth)5656 void BrOnNonData(FullDecoder* decoder, const Value& object,
5657 Value* /* value_on_branch */, uint32_t br_depth) {
5658 return BrOnNonAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
5659 br_depth);
5660 }
5661
BrOnNonFunc(FullDecoder * decoder,const Value & object,Value *,uint32_t br_depth)5662 void BrOnNonFunc(FullDecoder* decoder, const Value& object,
5663 Value* /* value_on_branch */, uint32_t br_depth) {
5664 return BrOnNonAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
5665 br_depth);
5666 }
5667
BrOnNonI31(FullDecoder * decoder,const Value & object,Value *,uint32_t br_depth)5668 void BrOnNonI31(FullDecoder* decoder, const Value& object,
5669 Value* /* value_on_branch */, uint32_t br_depth) {
5670 return BrOnNonAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
5671 br_depth);
5672 }
5673
Forward(FullDecoder * decoder,const Value & from,Value * to)5674 void Forward(FullDecoder* decoder, const Value& from, Value* to) {
5675 // Nothing to do here.
5676 }
5677
5678 private:
CallDirect(FullDecoder * decoder,const CallFunctionImmediate<validate> & imm,const Value args[],Value returns[],TailCall tail_call)5679 void CallDirect(FullDecoder* decoder,
5680 const CallFunctionImmediate<validate>& imm,
5681 const Value args[], Value returns[], TailCall tail_call) {
5682 MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
5683 for (ValueKind ret : sig.returns()) {
5684 if (!CheckSupportedType(decoder, ret, "return")) return;
5685 }
5686
5687 auto call_descriptor =
5688 compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
5689 call_descriptor =
5690 GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
5691
5692 if (imm.index < env_->module->num_imported_functions) {
5693 // A direct call to an imported function.
5694 LiftoffRegList pinned;
5695 Register tmp = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
5696 Register target = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
5697
5698 Register imported_targets = tmp;
5699 LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
5700 kSystemPointerSize, pinned);
5701 __ Load(LiftoffRegister(target), imported_targets, no_reg,
5702 imm.index * sizeof(Address), kPointerLoadType, pinned);
5703
5704 Register imported_function_refs = tmp;
5705 LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
5706 ImportedFunctionRefs, pinned);
5707 Register imported_function_ref = tmp;
5708 __ LoadTaggedPointer(
5709 imported_function_ref, imported_function_refs, no_reg,
5710 ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
5711
5712 Register* explicit_instance = &imported_function_ref;
5713 __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
5714 if (tail_call) {
5715 __ PrepareTailCall(
5716 static_cast<int>(call_descriptor->ParameterSlotCount()),
5717 static_cast<int>(
5718 call_descriptor->GetStackParameterDelta(descriptor_)));
5719 __ TailCallIndirect(target);
5720 } else {
5721 source_position_table_builder_.AddPosition(
5722 __ pc_offset(), SourcePosition(decoder->position()), true);
5723 __ CallIndirect(&sig, call_descriptor, target);
5724 FinishCall(decoder, &sig, call_descriptor);
5725 }
5726 } else {
5727 // A direct call within this module just gets the current instance.
5728 __ PrepareCall(&sig, call_descriptor);
5729 // Just encode the function index. This will be patched at instantiation.
5730 Address addr = static_cast<Address>(imm.index);
5731 if (tail_call) {
5732 DCHECK(descriptor_->CanTailCall(call_descriptor));
5733 __ PrepareTailCall(
5734 static_cast<int>(call_descriptor->ParameterSlotCount()),
5735 static_cast<int>(
5736 call_descriptor->GetStackParameterDelta(descriptor_)));
5737 __ TailCallNativeWasmCode(addr);
5738 } else {
5739 source_position_table_builder_.AddPosition(
5740 __ pc_offset(), SourcePosition(decoder->position()), true);
5741 __ CallNativeWasmCode(addr);
5742 FinishCall(decoder, &sig, call_descriptor);
5743 }
5744 }
5745 }
5746
CallIndirect(FullDecoder * decoder,const Value & index_val,const CallIndirectImmediate<validate> & imm,TailCall tail_call)5747 void CallIndirect(FullDecoder* decoder, const Value& index_val,
5748 const CallIndirectImmediate<validate>& imm,
5749 TailCall tail_call) {
5750 MostlySmallValueKindSig sig(compilation_zone_, imm.sig);
5751 for (ValueKind ret : sig.returns()) {
5752 if (!CheckSupportedType(decoder, ret, "return")) return;
5753 }
5754
5755 // Pop the index. We'll modify the register's contents later.
5756 Register index = __ PopToModifiableRegister().gp();
5757
5758 LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
5759 // Get three temporary registers.
5760 Register table = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
5761 Register tmp_const = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
5762 Register scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
5763 Register indirect_function_table = no_reg;
5764 if (imm.table_imm.index != 0) {
5765 Register indirect_function_tables =
5766 pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
5767 LOAD_TAGGED_PTR_INSTANCE_FIELD(indirect_function_tables,
5768 IndirectFunctionTables, pinned);
5769
5770 indirect_function_table = indirect_function_tables;
5771 __ LoadTaggedPointer(
5772 indirect_function_table, indirect_function_tables, no_reg,
5773 ObjectAccess::ElementOffsetInTaggedFixedArray(imm.table_imm.index),
5774 pinned);
5775 }
5776
5777 // Bounds check against the table size.
5778 Label* invalid_func_label =
5779 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapTableOutOfBounds);
5780
5781 uint32_t canonical_sig_num =
5782 env_->module->canonicalized_type_ids[imm.sig_imm.index];
5783 DCHECK_GE(canonical_sig_num, 0);
5784 DCHECK_GE(kMaxInt, canonical_sig_num);
5785
5786 // Compare against table size stored in
5787 // {instance->indirect_function_table_size}.
5788 if (imm.table_imm.index == 0) {
5789 LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
5790 pinned);
5791 } else {
5792 __ Load(
5793 LiftoffRegister(tmp_const), indirect_function_table, no_reg,
5794 wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset),
5795 LoadType::kI32Load, pinned);
5796 }
5797 __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
5798 tmp_const);
5799
5800 CODE_COMMENT("Check indirect call signature");
5801 // Load the signature from {instance->ift_sig_ids[key]}
5802 if (imm.table_imm.index == 0) {
5803 LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds,
5804 kSystemPointerSize, pinned);
5805 } else {
5806 __ Load(LiftoffRegister(table), indirect_function_table, no_reg,
5807 wasm::ObjectAccess::ToTagged(
5808 WasmIndirectFunctionTable::kSigIdsOffset),
5809 kPointerLoadType, pinned);
5810 }
5811 // Shift {index} by 2 (multiply by 4) to represent kInt32Size items.
5812 STATIC_ASSERT((1 << 2) == kInt32Size);
5813 __ emit_i32_shli(index, index, 2);
5814 __ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
5815 pinned);
5816
5817 // Compare against expected signature.
5818 __ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num));
5819
5820 Label* sig_mismatch_label =
5821 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapFuncSigMismatch);
5822 __ emit_cond_jump(kUnequal, sig_mismatch_label, kPointerKind, scratch,
5823 tmp_const);
5824
5825 // At this point {index} has already been multiplied by 4.
5826 CODE_COMMENT("Execute indirect call");
5827 if (kTaggedSize != kInt32Size) {
5828 DCHECK_EQ(kTaggedSize, kInt32Size * 2);
5829 // Multiply {index} by another 2 to represent kTaggedSize items.
5830 __ emit_i32_add(index, index, index);
5831 }
5832 // At this point {index} has already been multiplied by kTaggedSize.
5833
5834 // Load the instance from {instance->ift_instances[key]}
5835 if (imm.table_imm.index == 0) {
5836 LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned);
5837 } else {
5838 __ LoadTaggedPointer(
5839 table, indirect_function_table, no_reg,
5840 wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset),
5841 pinned);
5842 }
5843 __ LoadTaggedPointer(tmp_const, table, index,
5844 ObjectAccess::ElementOffsetInTaggedFixedArray(0),
5845 pinned);
5846
5847 if (kTaggedSize != kSystemPointerSize) {
5848 DCHECK_EQ(kSystemPointerSize, kTaggedSize * 2);
5849 // Multiply {index} by another 2 to represent kSystemPointerSize items.
5850 __ emit_i32_add(index, index, index);
5851 }
5852 // At this point {index} has already been multiplied by kSystemPointerSize.
5853
5854 Register* explicit_instance = &tmp_const;
5855
5856 // Load the target from {instance->ift_targets[key]}
5857 if (imm.table_imm.index == 0) {
5858 LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
5859 kSystemPointerSize, pinned);
5860 } else {
5861 __ Load(LiftoffRegister(table), indirect_function_table, no_reg,
5862 wasm::ObjectAccess::ToTagged(
5863 WasmIndirectFunctionTable::kTargetsOffset),
5864 kPointerLoadType, pinned);
5865 }
5866 __ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
5867 pinned);
5868
5869 auto call_descriptor =
5870 compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
5871 call_descriptor =
5872 GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
5873
5874 Register target = scratch;
5875 __ PrepareCall(&sig, call_descriptor, &target, explicit_instance);
5876 if (tail_call) {
5877 __ PrepareTailCall(
5878 static_cast<int>(call_descriptor->ParameterSlotCount()),
5879 static_cast<int>(
5880 call_descriptor->GetStackParameterDelta(descriptor_)));
5881 __ TailCallIndirect(target);
5882 } else {
5883 source_position_table_builder_.AddPosition(
5884 __ pc_offset(), SourcePosition(decoder->position()), true);
5885 __ CallIndirect(&sig, call_descriptor, target);
5886
5887 FinishCall(decoder, &sig, call_descriptor);
5888 }
5889 }
5890
CallRef(FullDecoder * decoder,ValueType func_ref_type,const FunctionSig * type_sig,TailCall tail_call)5891 void CallRef(FullDecoder* decoder, ValueType func_ref_type,
5892 const FunctionSig* type_sig, TailCall tail_call) {
5893 MostlySmallValueKindSig sig(compilation_zone_, type_sig);
5894 for (ValueKind ret : sig.returns()) {
5895 if (!CheckSupportedType(decoder, ret, "return")) return;
5896 }
5897 compiler::CallDescriptor* call_descriptor =
5898 compiler::GetWasmCallDescriptor(compilation_zone_, type_sig);
5899 call_descriptor =
5900 GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
5901
5902 // Executing a write barrier needs temp registers; doing this on a
5903 // conditional branch confuses the LiftoffAssembler's register management.
5904 // Spill everything up front to work around that.
5905 __ SpillAllRegisters();
5906
5907 // We limit ourselves to four registers:
5908 // (1) func_data, initially reused for func_ref.
5909 // (2) instance, initially used as temp.
5910 // (3) target, initially used as temp.
5911 // (4) temp.
5912 LiftoffRegList pinned;
5913 LiftoffRegister func_ref = pinned.set(__ PopToModifiableRegister(pinned));
5914 MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type);
5915 LiftoffRegister instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5916 LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5917 LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
5918
5919 // Load the WasmFunctionData.
5920 LiftoffRegister func_data = func_ref;
5921 __ LoadTaggedPointer(
5922 func_data.gp(), func_ref.gp(), no_reg,
5923 wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset),
5924 pinned);
5925 __ LoadTaggedPointer(
5926 func_data.gp(), func_data.gp(), no_reg,
5927 wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
5928 pinned);
5929
5930 // Load "ref" (instance or <instance, callable> pair) and target.
5931 __ LoadTaggedPointer(
5932 instance.gp(), func_data.gp(), no_reg,
5933 wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset), pinned);
5934
5935 Label load_target, perform_call;
5936
5937 // Check if "ref" is a Tuple2.
5938 {
5939 LiftoffRegister pair_map = temp;
5940 LiftoffRegister ref_map = target;
5941 __ LoadMap(ref_map.gp(), instance.gp());
5942 LOAD_INSTANCE_FIELD(pair_map.gp(), IsolateRoot, kSystemPointerSize,
5943 pinned);
5944 __ LoadTaggedPointer(pair_map.gp(), pair_map.gp(), no_reg,
5945 IsolateData::root_slot_offset(RootIndex::kTuple2Map),
5946 pinned);
5947 __ emit_cond_jump(kUnequal, &load_target, kRef, ref_map.gp(),
5948 pair_map.gp());
5949
5950 // Overwrite the tuple's "instance" entry with the current instance.
5951 // TODO(jkummerow): Can we figure out a way to guarantee that the
5952 // instance field is always precomputed?
5953 LiftoffRegister current_instance = temp;
5954 __ FillInstanceInto(current_instance.gp());
5955 __ StoreTaggedPointer(instance.gp(), no_reg,
5956 wasm::ObjectAccess::ToTagged(Tuple2::kValue1Offset),
5957 current_instance, pinned);
5958 // Fall through to {load_target}.
5959 }
5960 // Load the call target.
5961 __ bind(&load_target);
5962
5963 #ifdef V8_HEAP_SANDBOX
5964 LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
5965 __ LoadExternalPointer(target.gp(), func_data.gp(),
5966 WasmFunctionData::kForeignAddressOffset,
5967 kForeignForeignAddressTag, temp.gp());
5968 #else
5969 __ Load(
5970 target, func_data.gp(), no_reg,
5971 wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset),
5972 kPointerLoadType, pinned);
5973 #endif
5974
5975 LiftoffRegister null_address = temp;
5976 __ LoadConstant(null_address, WasmValue::ForUintPtr(0));
5977 __ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(),
5978 null_address.gp());
5979 // The cached target can only be null for WasmJSFunctions.
5980 __ LoadTaggedPointer(target.gp(), func_data.gp(), no_reg,
5981 wasm::ObjectAccess::ToTagged(
5982 WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
5983 pinned);
5984 #ifdef V8_EXTERNAL_CODE_SPACE
5985 __ LoadCodeDataContainerEntry(target.gp(), target.gp());
5986 #else
5987 __ emit_ptrsize_addi(target.gp(), target.gp(),
5988 wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
5989 #endif
5990 // Fall through to {perform_call}.
5991
5992 __ bind(&perform_call);
5993 // Now the call target is in {target}, and the right instance object
5994 // is in {instance}.
5995 Register target_reg = target.gp();
5996 Register instance_reg = instance.gp();
5997 __ PrepareCall(&sig, call_descriptor, &target_reg, &instance_reg);
5998 if (tail_call) {
5999 __ PrepareTailCall(
6000 static_cast<int>(call_descriptor->ParameterSlotCount()),
6001 static_cast<int>(
6002 call_descriptor->GetStackParameterDelta(descriptor_)));
6003 __ TailCallIndirect(target_reg);
6004 } else {
6005 source_position_table_builder_.AddPosition(
6006 __ pc_offset(), SourcePosition(decoder->position()), true);
6007 __ CallIndirect(&sig, call_descriptor, target_reg);
6008
6009 FinishCall(decoder, &sig, call_descriptor);
6010 }
6011 }
6012
LoadNullValue(Register null,LiftoffRegList pinned)6013 void LoadNullValue(Register null, LiftoffRegList pinned) {
6014 LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned);
6015 __ LoadFullPointer(null, null,
6016 IsolateData::root_slot_offset(RootIndex::kNullValue));
6017 }
6018
LoadExceptionSymbol(Register dst,LiftoffRegList pinned,RootIndex root_index)6019 void LoadExceptionSymbol(Register dst, LiftoffRegList pinned,
6020 RootIndex root_index) {
6021 LOAD_INSTANCE_FIELD(dst, IsolateRoot, kSystemPointerSize, pinned);
6022 uint32_t offset_imm = IsolateData::root_slot_offset(root_index);
6023 __ LoadFullPointer(dst, dst, offset_imm);
6024 }
6025
MaybeEmitNullCheck(FullDecoder * decoder,Register object,LiftoffRegList pinned,ValueType type)6026 void MaybeEmitNullCheck(FullDecoder* decoder, Register object,
6027 LiftoffRegList pinned, ValueType type) {
6028 if (!type.is_nullable()) return;
6029 Label* trap_label =
6030 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference);
6031 LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
6032 LoadNullValue(null.gp(), pinned);
6033 __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kOptRef, object,
6034 null.gp());
6035 }
6036
BoundsCheck(FullDecoder * decoder,LiftoffRegister array,LiftoffRegister index,LiftoffRegList pinned)6037 void BoundsCheck(FullDecoder* decoder, LiftoffRegister array,
6038 LiftoffRegister index, LiftoffRegList pinned) {
6039 Label* trap_label =
6040 AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayOutOfBounds);
6041 LiftoffRegister length = __ GetUnusedRegister(kGpReg, pinned);
6042 constexpr int kLengthOffset =
6043 wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
6044 __ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load,
6045 pinned);
6046 __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, kI32,
6047 index.gp(), length.gp());
6048 }
6049
StructFieldOffset(const StructType * struct_type,int field_index)6050 int StructFieldOffset(const StructType* struct_type, int field_index) {
6051 return wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize +
6052 struct_type->field_offset(field_index));
6053 }
6054
LoadObjectField(LiftoffRegister dst,Register src,Register offset_reg,int offset,ValueKind kind,bool is_signed,LiftoffRegList pinned)6055 void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
6056 int offset, ValueKind kind, bool is_signed,
6057 LiftoffRegList pinned) {
6058 if (is_reference(kind)) {
6059 __ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
6060 } else {
6061 // Primitive kind.
6062 LoadType load_type = LoadType::ForValueKind(kind, is_signed);
6063 __ Load(dst, src, offset_reg, offset, load_type, pinned);
6064 }
6065 }
6066
StoreObjectField(Register obj,Register offset_reg,int offset,LiftoffRegister value,LiftoffRegList pinned,ValueKind kind)6067 void StoreObjectField(Register obj, Register offset_reg, int offset,
6068 LiftoffRegister value, LiftoffRegList pinned,
6069 ValueKind kind) {
6070 if (is_reference(kind)) {
6071 __ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
6072 } else {
6073 // Primitive kind.
6074 StoreType store_type = StoreType::ForValueKind(kind);
6075 __ Store(obj, offset_reg, offset, value, store_type, pinned);
6076 }
6077 }
6078
SetDefaultValue(LiftoffRegister reg,ValueKind kind,LiftoffRegList pinned)6079 void SetDefaultValue(LiftoffRegister reg, ValueKind kind,
6080 LiftoffRegList pinned) {
6081 DCHECK(is_defaultable(kind));
6082 switch (kind) {
6083 case kI8:
6084 case kI16:
6085 case kI32:
6086 return __ LoadConstant(reg, WasmValue(int32_t{0}));
6087 case kI64:
6088 return __ LoadConstant(reg, WasmValue(int64_t{0}));
6089 case kF32:
6090 return __ LoadConstant(reg, WasmValue(float{0.0}));
6091 case kF64:
6092 return __ LoadConstant(reg, WasmValue(double{0.0}));
6093 case kS128:
6094 DCHECK(CpuFeatures::SupportsWasmSimd128());
6095 return __ emit_s128_xor(reg, reg, reg);
6096 case kOptRef:
6097 return LoadNullValue(reg.gp(), pinned);
6098 case kRtt:
6099 case kRttWithDepth:
6100 case kVoid:
6101 case kBottom:
6102 case kRef:
6103 UNREACHABLE();
6104 }
6105 }
6106
EmitDataRefCheck(Register map,Label * not_data_ref,LiftoffRegister tmp,LiftoffRegList pinned)6107 void EmitDataRefCheck(Register map, Label* not_data_ref, LiftoffRegister tmp,
6108 LiftoffRegList pinned) {
6109 constexpr int kInstanceTypeOffset =
6110 wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset);
6111 __ Load(tmp, map, no_reg, kInstanceTypeOffset, LoadType::kI32Load16U,
6112 pinned);
6113 // We're going to test a range of WasmObject instance types with a single
6114 // unsigned comparison.
6115 __ emit_i32_subi(tmp.gp(), tmp.gp(), FIRST_WASM_OBJECT_TYPE);
6116 __ emit_i32_cond_jumpi(kUnsignedGreaterThan, not_data_ref, tmp.gp(),
6117 LAST_WASM_OBJECT_TYPE - FIRST_WASM_OBJECT_TYPE);
6118 }
6119
MaybeOSR()6120 void MaybeOSR() {
6121 if (V8_UNLIKELY(for_debugging_)) {
6122 __ MaybeOSR();
6123 }
6124 }
6125
FinishCall(FullDecoder * decoder,ValueKindSig * sig,compiler::CallDescriptor * call_descriptor)6126 void FinishCall(FullDecoder* decoder, ValueKindSig* sig,
6127 compiler::CallDescriptor* call_descriptor) {
6128 DefineSafepoint();
6129 RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
6130 int pc_offset = __ pc_offset();
6131 MaybeOSR();
6132 EmitLandingPad(decoder, pc_offset);
6133 __ FinishCall(sig, call_descriptor);
6134 }
6135
CheckNan(LiftoffRegister src,LiftoffRegList pinned,ValueKind kind)6136 void CheckNan(LiftoffRegister src, LiftoffRegList pinned, ValueKind kind) {
6137 DCHECK(kind == ValueKind::kF32 || kind == ValueKind::kF64);
6138 auto nondeterminism_addr = __ GetUnusedRegister(kGpReg, pinned);
6139 __ LoadConstant(
6140 nondeterminism_addr,
6141 WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
6142 __ emit_set_if_nan(nondeterminism_addr.gp(), src.fp(), kind);
6143 }
6144
CheckS128Nan(LiftoffRegister dst,LiftoffRegList pinned,ValueKind lane_kind)6145 void CheckS128Nan(LiftoffRegister dst, LiftoffRegList pinned,
6146 ValueKind lane_kind) {
6147 RegClass rc = reg_class_for(kS128);
6148 LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
6149 LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned));
6150 LiftoffRegister nondeterminism_addr =
6151 pinned.set(__ GetUnusedRegister(kGpReg, pinned));
6152 __ LoadConstant(
6153 nondeterminism_addr,
6154 WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
6155 __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(),
6156 tmp_s128, lane_kind);
6157 }
6158
has_outstanding_op() const6159 bool has_outstanding_op() const {
6160 return outstanding_op_ != kNoOutstandingOp;
6161 }
6162
test_and_reset_outstanding_op(WasmOpcode opcode)6163 bool test_and_reset_outstanding_op(WasmOpcode opcode) {
6164 DCHECK_NE(kNoOutstandingOp, opcode);
6165 if (outstanding_op_ != opcode) return false;
6166 outstanding_op_ = kNoOutstandingOp;
6167 return true;
6168 }
6169
TraceCacheState(FullDecoder * decoder) const6170 void TraceCacheState(FullDecoder* decoder) const {
6171 if (!FLAG_trace_liftoff) return;
6172 StdoutStream os;
6173 for (int control_depth = decoder->control_depth() - 1; control_depth >= -1;
6174 --control_depth) {
6175 auto* cache_state =
6176 control_depth == -1 ? __ cache_state()
6177 : &decoder->control_at(control_depth)
6178 ->label_state;
6179 os << PrintCollection(cache_state->stack_state);
6180 if (control_depth != -1) PrintF("; ");
6181 }
6182 os << "\n";
6183 }
6184
DefineSafepoint()6185 void DefineSafepoint() {
6186 Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
6187 __ cache_state()->DefineSafepoint(safepoint);
6188 }
6189
DefineSafepointWithCalleeSavedRegisters()6190 void DefineSafepointWithCalleeSavedRegisters() {
6191 Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
6192 __ cache_state()->DefineSafepointWithCalleeSavedRegisters(safepoint);
6193 }
6194
LoadInstanceIntoRegister(LiftoffRegList pinned,Register fallback)6195 Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
6196 Register instance = __ cache_state()->cached_instance;
6197 if (instance == no_reg) {
6198 instance = __ cache_state()->TrySetCachedInstanceRegister(
6199 pinned | LiftoffRegList::ForRegs(fallback));
6200 if (instance == no_reg) instance = fallback;
6201 __ LoadInstanceFromFrame(instance);
6202 }
6203 return instance;
6204 }
6205
6206 static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
6207 static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
6208 // MVP:
6209 kI32, kI64, kF32, kF64,
6210 // Extern ref:
6211 kRef, kOptRef, kRtt, kRttWithDepth, kI8, kI16};
6212
6213 LiftoffAssembler asm_;
6214
6215 // Used for merging code generation of subsequent operations (via look-ahead).
6216 // Set by the first opcode, reset by the second.
6217 WasmOpcode outstanding_op_ = kNoOutstandingOp;
6218
6219 // {supported_types_} is updated in {MaybeBailoutForUnsupportedType}.
6220 base::EnumSet<ValueKind> supported_types_ = kUnconditionallySupported;
6221 compiler::CallDescriptor* const descriptor_;
6222 CompilationEnv* const env_;
6223 DebugSideTableBuilder* const debug_sidetable_builder_;
6224 const ForDebugging for_debugging_;
6225 LiftoffBailoutReason bailout_reason_ = kSuccess;
6226 const int func_index_;
6227 ZoneVector<OutOfLineCode> out_of_line_code_;
6228 SourcePositionTableBuilder source_position_table_builder_;
6229 ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
6230 // Zone used to store information during compilation. The result will be
6231 // stored independently, such that this zone can die together with the
6232 // LiftoffCompiler after compilation.
6233 Zone* compilation_zone_;
6234 SafepointTableBuilder safepoint_table_builder_;
6235 // The pc offset of the instructions to reserve the stack frame. Needed to
6236 // patch the actually needed stack size in the end.
6237 uint32_t pc_offset_stack_frame_construction_ = 0;
6238 // For emitting breakpoint, we store a pointer to the position of the next
6239 // breakpoint, and a pointer after the list of breakpoints as end marker.
6240 // A single breakpoint at offset 0 indicates that we should prepare the
6241 // function for stepping by flooding it with breakpoints.
6242 const int* next_breakpoint_ptr_ = nullptr;
6243 const int* next_breakpoint_end_ = nullptr;
6244
6245 // Introduce a dead breakpoint to ensure that the calculation of the return
6246 // address in OSR is correct.
6247 int dead_breakpoint_ = 0;
6248
6249 // Remember whether the did function-entry break checks (for "hook on function
6250 // call" and "break on entry" a.k.a. instrumentation breakpoint). This happens
6251 // at the first breakable opcode in the function (if compiling for debugging).
6252 bool did_function_entry_break_checks_ = false;
6253
6254 struct HandlerInfo {
6255 MovableLabel handler;
6256 int pc_offset;
6257 };
6258
6259 ZoneVector<HandlerInfo> handlers_;
6260 int handler_table_offset_ = Assembler::kNoHandlerTable;
6261
6262 // Current number of exception refs on the stack.
6263 int num_exceptions_ = 0;
6264
6265 int32_t* max_steps_;
6266 int32_t* nondeterminism_;
6267
6268 DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
6269 };
6270
6271 // static
6272 constexpr WasmOpcode LiftoffCompiler::kNoOutstandingOp;
6273 // static
6274 constexpr base::EnumSet<ValueKind> LiftoffCompiler::kUnconditionallySupported;
6275
6276 } // namespace
6277
ExecuteLiftoffCompilation(CompilationEnv * env,const FunctionBody & func_body,int func_index,ForDebugging for_debugging,const LiftoffOptions & compiler_options)6278 WasmCompilationResult ExecuteLiftoffCompilation(
6279 CompilationEnv* env, const FunctionBody& func_body, int func_index,
6280 ForDebugging for_debugging, const LiftoffOptions& compiler_options) {
6281 int func_body_size = static_cast<int>(func_body.end - func_body.start);
6282 TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
6283 "wasm.CompileBaseline", "funcIndex", func_index, "bodySize",
6284 func_body_size);
6285
6286 Zone zone(GetWasmEngine()->allocator(), "LiftoffCompilationZone");
6287 auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
6288 size_t code_size_estimate =
6289 WasmCodeManager::EstimateLiftoffCodeSize(func_body_size);
6290 // Allocate the initial buffer a bit bigger to avoid reallocation during code
6291 // generation. Overflows when casting to int are fine, as we will allocate at
6292 // least {AssemblerBase::kMinimalBufferSize} anyway, so in the worst case we
6293 // have to grow more often.
6294 int initial_buffer_size = static_cast<int>(128 + code_size_estimate * 4 / 3);
6295 std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
6296 if (compiler_options.debug_sidetable) {
6297 debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
6298 }
6299 DCHECK_IMPLIES(compiler_options.max_steps, for_debugging == kForDebugging);
6300 WasmFeatures unused_detected_features;
6301 WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
6302 &zone, env->module, env->enabled_features,
6303 compiler_options.detected_features ? compiler_options.detected_features
6304 : &unused_detected_features,
6305 func_body, call_descriptor, env, &zone,
6306 NewAssemblerBuffer(initial_buffer_size), debug_sidetable_builder.get(),
6307 for_debugging, func_index, compiler_options.breakpoints,
6308 compiler_options.dead_breakpoint, compiler_options.max_steps,
6309 compiler_options.nondeterminism);
6310 decoder.Decode();
6311 LiftoffCompiler* compiler = &decoder.interface();
6312 if (decoder.failed()) compiler->OnFirstError(&decoder);
6313
6314 if (auto* counters = compiler_options.counters) {
6315 // Check that the histogram for the bailout reasons has the correct size.
6316 DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min());
6317 DCHECK_EQ(kNumBailoutReasons - 1,
6318 counters->liftoff_bailout_reasons()->max());
6319 DCHECK_EQ(kNumBailoutReasons,
6320 counters->liftoff_bailout_reasons()->num_buckets());
6321 // Register the bailout reason (can also be {kSuccess}).
6322 counters->liftoff_bailout_reasons()->AddSample(
6323 static_cast<int>(compiler->bailout_reason()));
6324 }
6325
6326 if (compiler->did_bailout()) return WasmCompilationResult{};
6327
6328 WasmCompilationResult result;
6329 compiler->GetCode(&result.code_desc);
6330 result.instr_buffer = compiler->ReleaseBuffer();
6331 result.source_positions = compiler->GetSourcePositionTable();
6332 result.protected_instructions_data = compiler->GetProtectedInstructionsData();
6333 result.frame_slot_count = compiler->GetTotalFrameSlotCountForGC();
6334 result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
6335 result.func_index = func_index;
6336 result.result_tier = ExecutionTier::kLiftoff;
6337 result.for_debugging = for_debugging;
6338 if (auto* debug_sidetable = compiler_options.debug_sidetable) {
6339 *debug_sidetable = debug_sidetable_builder->GenerateDebugSideTable();
6340 }
6341
6342 DCHECK(result.succeeded());
6343 return result;
6344 }
6345
GenerateLiftoffDebugSideTable(const WasmCode * code)6346 std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
6347 const WasmCode* code) {
6348 auto* native_module = code->native_module();
6349 auto* function = &native_module->module()->functions[code->index()];
6350 ModuleWireBytes wire_bytes{native_module->wire_bytes()};
6351 base::Vector<const byte> function_bytes =
6352 wire_bytes.GetFunctionBytes(function);
6353 CompilationEnv env = native_module->CreateCompilationEnv();
6354 FunctionBody func_body{function->sig, 0, function_bytes.begin(),
6355 function_bytes.end()};
6356
6357 Zone zone(GetWasmEngine()->allocator(), "LiftoffDebugSideTableZone");
6358 auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, function->sig);
6359 DebugSideTableBuilder debug_sidetable_builder;
6360 WasmFeatures detected;
6361 constexpr int kSteppingBreakpoints[] = {0};
6362 DCHECK(code->for_debugging() == kForDebugging ||
6363 code->for_debugging() == kForStepping);
6364 base::Vector<const int> breakpoints =
6365 code->for_debugging() == kForStepping
6366 ? base::ArrayVector(kSteppingBreakpoints)
6367 : base::Vector<const int>{};
6368 WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
6369 &zone, native_module->module(), env.enabled_features, &detected,
6370 func_body, call_descriptor, &env, &zone,
6371 NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
6372 &debug_sidetable_builder, code->for_debugging(), code->index(),
6373 breakpoints);
6374 decoder.Decode();
6375 DCHECK(decoder.ok());
6376 DCHECK(!decoder.interface().did_bailout());
6377 return debug_sidetable_builder.GenerateDebugSideTable();
6378 }
6379
6380 #undef __
6381 #undef TRACE
6382 #undef WASM_INSTANCE_OBJECT_FIELD_OFFSET
6383 #undef WASM_INSTANCE_OBJECT_FIELD_SIZE
6384 #undef LOAD_INSTANCE_FIELD
6385 #undef LOAD_TAGGED_PTR_INSTANCE_FIELD
6386 #undef CODE_COMMENT
6387
6388 } // namespace wasm
6389 } // namespace internal
6390 } // namespace v8
6391