1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/interpreter/interpreter-assembler.h"
6 
7 #include <limits>
8 #include <ostream>
9 
10 #include "src/code-factory.h"
11 #include "src/frames.h"
12 #include "src/interface-descriptors.h"
13 #include "src/interpreter/bytecodes.h"
14 #include "src/interpreter/interpreter.h"
15 #include "src/machine-type.h"
16 #include "src/macro-assembler.h"
17 #include "src/objects-inl.h"
18 #include "src/zone/zone.h"
19 
20 namespace v8 {
21 namespace internal {
22 namespace interpreter {
23 
24 using compiler::CodeAssemblerState;
25 using compiler::Node;
26 
InterpreterAssembler(CodeAssemblerState * state,Bytecode bytecode,OperandScale operand_scale)27 InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
28                                            Bytecode bytecode,
29                                            OperandScale operand_scale)
30     : CodeStubAssembler(state),
31       bytecode_(bytecode),
32       operand_scale_(operand_scale),
33       VARIABLE_CONSTRUCTOR(interpreted_frame_pointer_,
34                            MachineType::PointerRepresentation()),
35       VARIABLE_CONSTRUCTOR(
36           bytecode_array_, MachineRepresentation::kTagged,
37           Parameter(InterpreterDispatchDescriptor::kBytecodeArray)),
38       VARIABLE_CONSTRUCTOR(
39           bytecode_offset_, MachineType::PointerRepresentation(),
40           Parameter(InterpreterDispatchDescriptor::kBytecodeOffset)),
41       VARIABLE_CONSTRUCTOR(
42           dispatch_table_, MachineType::PointerRepresentation(),
43           Parameter(InterpreterDispatchDescriptor::kDispatchTable)),
44       VARIABLE_CONSTRUCTOR(
45           accumulator_, MachineRepresentation::kTagged,
46           Parameter(InterpreterDispatchDescriptor::kAccumulator)),
47       accumulator_use_(AccumulatorUse::kNone),
48       made_call_(false),
49       reloaded_frame_ptr_(false),
50       bytecode_array_valid_(true),
51       disable_stack_check_across_call_(false),
52       stack_pointer_before_call_(nullptr) {
53 #ifdef V8_TRACE_IGNITION
54   TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
55 #endif
56   RegisterCallGenerationCallbacks([this] { CallPrologue(); },
57                                   [this] { CallEpilogue(); });
58 
59   // Save the bytecode offset immediately if bytecode will make a call along the
60   // critical path, or it is a return bytecode.
61   if (Bytecodes::MakesCallAlongCriticalPath(bytecode) ||
62       Bytecodes::Returns(bytecode)) {
63     SaveBytecodeOffset();
64   }
65 }
66 
~InterpreterAssembler()67 InterpreterAssembler::~InterpreterAssembler() {
68   // If the following check fails the handler does not use the
69   // accumulator in the way described in the bytecode definitions in
70   // bytecodes.h.
71   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
72   UnregisterCallGenerationCallbacks();
73 }
74 
GetInterpretedFramePointer()75 Node* InterpreterAssembler::GetInterpretedFramePointer() {
76   if (!interpreted_frame_pointer_.IsBound()) {
77     interpreted_frame_pointer_.Bind(LoadParentFramePointer());
78   } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
79              !reloaded_frame_ptr_) {
80     interpreted_frame_pointer_.Bind(LoadParentFramePointer());
81     reloaded_frame_ptr_ = true;
82   }
83   return interpreted_frame_pointer_.value();
84 }
85 
BytecodeOffset()86 Node* InterpreterAssembler::BytecodeOffset() {
87   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
88       (bytecode_offset_.value() ==
89        Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
90     bytecode_offset_.Bind(ReloadBytecodeOffset());
91   }
92   return bytecode_offset_.value();
93 }
94 
ReloadBytecodeOffset()95 Node* InterpreterAssembler::ReloadBytecodeOffset() {
96   Node* offset = LoadAndUntagRegister(Register::bytecode_offset());
97   if (operand_scale() != OperandScale::kSingle) {
98     // Add one to the offset such that it points to the actual bytecode rather
99     // than the Wide / ExtraWide prefix bytecode.
100     offset = IntPtrAdd(offset, IntPtrConstant(1));
101   }
102   return offset;
103 }
104 
SaveBytecodeOffset()105 void InterpreterAssembler::SaveBytecodeOffset() {
106   Node* offset = BytecodeOffset();
107   if (operand_scale() != OperandScale::kSingle) {
108     // Subtract one from the offset such that it points to the Wide / ExtraWide
109     // prefix bytecode.
110     offset = IntPtrSub(BytecodeOffset(), IntPtrConstant(1));
111   }
112   StoreAndTagRegister(offset, Register::bytecode_offset());
113 }
114 
BytecodeArrayTaggedPointer()115 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
116   // Force a re-load of the bytecode array after every call in case the debugger
117   // has been activated.
118   if (!bytecode_array_valid_) {
119     bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
120     bytecode_array_valid_ = true;
121   }
122   return bytecode_array_.value();
123 }
124 
DispatchTableRawPointer()125 Node* InterpreterAssembler::DispatchTableRawPointer() {
126   if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
127       (dispatch_table_.value() ==
128        Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
129     dispatch_table_.Bind(ExternalConstant(
130         ExternalReference::interpreter_dispatch_table_address(isolate())));
131   }
132   return dispatch_table_.value();
133 }
134 
GetAccumulatorUnchecked()135 Node* InterpreterAssembler::GetAccumulatorUnchecked() {
136   return accumulator_.value();
137 }
138 
GetAccumulator()139 Node* InterpreterAssembler::GetAccumulator() {
140   DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
141   accumulator_use_ = accumulator_use_ | AccumulatorUse::kRead;
142   return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
143 }
144 
SetAccumulator(Node * value)145 void InterpreterAssembler::SetAccumulator(Node* value) {
146   DCHECK(Bytecodes::WritesAccumulator(bytecode_));
147   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
148   accumulator_.Bind(value);
149 }
150 
GetContext()151 Node* InterpreterAssembler::GetContext() {
152   return LoadRegister(Register::current_context());
153 }
154 
SetContext(Node * value)155 void InterpreterAssembler::SetContext(Node* value) {
156   StoreRegister(value, Register::current_context());
157 }
158 
GetContextAtDepth(Node * context,Node * depth)159 Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
160   Variable cur_context(this, MachineRepresentation::kTaggedPointer);
161   cur_context.Bind(context);
162 
163   Variable cur_depth(this, MachineRepresentation::kWord32);
164   cur_depth.Bind(depth);
165 
166   Label context_found(this);
167 
168   Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
169   Label context_search(this, 2, context_search_loop_variables);
170 
171   // Fast path if the depth is 0.
172   Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
173 
174   // Loop until the depth is 0.
175   BIND(&context_search);
176   {
177     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
178     cur_context.Bind(
179         LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
180 
181     Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
182            &context_search);
183   }
184 
185   BIND(&context_found);
186   return cur_context.value();
187 }
188 
GotoIfHasContextExtensionUpToDepth(Node * context,Node * depth,Label * target)189 void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
190                                                               Node* depth,
191                                                               Label* target) {
192   Variable cur_context(this, MachineRepresentation::kTaggedPointer);
193   cur_context.Bind(context);
194 
195   Variable cur_depth(this, MachineRepresentation::kWord32);
196   cur_depth.Bind(depth);
197 
198   Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context};
199   Label context_search(this, 2, context_search_loop_variables);
200 
201   // Loop until the depth is 0.
202   Goto(&context_search);
203   BIND(&context_search);
204   {
205     // TODO(leszeks): We only need to do this check if the context had a sloppy
206     // eval, we could pass in a context chain bitmask to figure out which
207     // contexts actually need to be checked.
208 
209     Node* extension_slot =
210         LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
211 
212     // Jump to the target if the extension slot is not a hole.
213     GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
214 
215     cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
216     cur_context.Bind(
217         LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
218 
219     GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
220            &context_search);
221   }
222 }
223 
RegisterLocation(Node * reg_index)224 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
225   return WordPoisonOnSpeculation(
226       IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
227 }
228 
RegisterLocation(Register reg)229 Node* InterpreterAssembler::RegisterLocation(Register reg) {
230   return RegisterLocation(IntPtrConstant(reg.ToOperand()));
231 }
232 
RegisterFrameOffset(Node * index)233 Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
234   return TimesPointerSize(index);
235 }
236 
LoadRegister(Node * reg_index)237 Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
238   return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
239               RegisterFrameOffset(reg_index), LoadSensitivity::kCritical);
240 }
241 
LoadRegister(Register reg)242 Node* InterpreterAssembler::LoadRegister(Register reg) {
243   return Load(MachineType::AnyTagged(), GetInterpretedFramePointer(),
244               IntPtrConstant(reg.ToOperand() << kPointerSizeLog2));
245 }
246 
LoadAndUntagRegister(Register reg)247 Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
248   return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
249                                                            << kPointerSizeLog2);
250 }
251 
LoadRegisterAtOperandIndex(int operand_index)252 Node* InterpreterAssembler::LoadRegisterAtOperandIndex(int operand_index) {
253   return LoadRegister(
254       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
255 }
256 
LoadRegisterPairAtOperandIndex(int operand_index)257 std::pair<Node*, Node*> InterpreterAssembler::LoadRegisterPairAtOperandIndex(
258     int operand_index) {
259   DCHECK_EQ(OperandType::kRegPair,
260             Bytecodes::GetOperandType(bytecode_, operand_index));
261   Node* first_reg_index =
262       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
263   Node* second_reg_index = NextRegister(first_reg_index);
264   return std::make_pair(LoadRegister(first_reg_index),
265                         LoadRegister(second_reg_index));
266 }
267 
268 InterpreterAssembler::RegListNodePair
GetRegisterListAtOperandIndex(int operand_index)269 InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
270   DCHECK(Bytecodes::IsRegisterListOperandType(
271       Bytecodes::GetOperandType(bytecode_, operand_index)));
272   DCHECK_EQ(OperandType::kRegCount,
273             Bytecodes::GetOperandType(bytecode_, operand_index + 1));
274   Node* base_reg = RegisterLocation(
275       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
276   Node* reg_count = BytecodeOperandCount(operand_index + 1);
277   return RegListNodePair(base_reg, reg_count);
278 }
279 
LoadRegisterFromRegisterList(const RegListNodePair & reg_list,int index)280 Node* InterpreterAssembler::LoadRegisterFromRegisterList(
281     const RegListNodePair& reg_list, int index) {
282   Node* location = RegisterLocationInRegisterList(reg_list, index);
283   // Location is already poisoned on speculation, so no need to poison here.
284   return Load(MachineType::AnyTagged(), location);
285 }
286 
RegisterLocationInRegisterList(const RegListNodePair & reg_list,int index)287 Node* InterpreterAssembler::RegisterLocationInRegisterList(
288     const RegListNodePair& reg_list, int index) {
289   CSA_ASSERT(this,
290              Uint32GreaterThan(reg_list.reg_count(), Int32Constant(index)));
291   Node* offset = RegisterFrameOffset(IntPtrConstant(index));
292   // Register indexes are negative, so subtract index from base location to get
293   // location.
294   return IntPtrSub(reg_list.base_reg_location(), offset);
295 }
296 
StoreRegister(Node * value,Register reg)297 void InterpreterAssembler::StoreRegister(Node* value, Register reg) {
298   StoreNoWriteBarrier(
299       MachineRepresentation::kTagged, GetInterpretedFramePointer(),
300       IntPtrConstant(reg.ToOperand() << kPointerSizeLog2), value);
301 }
302 
StoreRegister(Node * value,Node * reg_index)303 void InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
304   StoreNoWriteBarrier(MachineRepresentation::kTagged,
305                       GetInterpretedFramePointer(),
306                       RegisterFrameOffset(reg_index), value);
307 }
308 
StoreAndTagRegister(Node * value,Register reg)309 void InterpreterAssembler::StoreAndTagRegister(Node* value, Register reg) {
310   int offset = reg.ToOperand() << kPointerSizeLog2;
311   StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
312 }
313 
StoreRegisterAtOperandIndex(Node * value,int operand_index)314 void InterpreterAssembler::StoreRegisterAtOperandIndex(Node* value,
315                                                        int operand_index) {
316   StoreRegister(value,
317                 BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
318 }
319 
StoreRegisterPairAtOperandIndex(Node * value1,Node * value2,int operand_index)320 void InterpreterAssembler::StoreRegisterPairAtOperandIndex(Node* value1,
321                                                            Node* value2,
322                                                            int operand_index) {
323   DCHECK_EQ(OperandType::kRegOutPair,
324             Bytecodes::GetOperandType(bytecode_, operand_index));
325   Node* first_reg_index =
326       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
327   StoreRegister(value1, first_reg_index);
328   Node* second_reg_index = NextRegister(first_reg_index);
329   StoreRegister(value2, second_reg_index);
330 }
331 
StoreRegisterTripleAtOperandIndex(Node * value1,Node * value2,Node * value3,int operand_index)332 void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
333     Node* value1, Node* value2, Node* value3, int operand_index) {
334   DCHECK_EQ(OperandType::kRegOutTriple,
335             Bytecodes::GetOperandType(bytecode_, operand_index));
336   Node* first_reg_index =
337       BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
338   StoreRegister(value1, first_reg_index);
339   Node* second_reg_index = NextRegister(first_reg_index);
340   StoreRegister(value2, second_reg_index);
341   Node* third_reg_index = NextRegister(second_reg_index);
342   StoreRegister(value3, third_reg_index);
343 }
344 
NextRegister(Node * reg_index)345 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
346   // Register indexes are negative, so the next index is minus one.
347   return IntPtrAdd(reg_index, IntPtrConstant(-1));
348 }
349 
OperandOffset(int operand_index)350 Node* InterpreterAssembler::OperandOffset(int operand_index) {
351   return IntPtrConstant(
352       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()));
353 }
354 
BytecodeOperandUnsignedByte(int operand_index,LoadSensitivity needs_poisoning)355 Node* InterpreterAssembler::BytecodeOperandUnsignedByte(
356     int operand_index, LoadSensitivity needs_poisoning) {
357   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
358   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
359                                     bytecode_, operand_index, operand_scale()));
360   Node* operand_offset = OperandOffset(operand_index);
361   return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
362               IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
363 }
364 
BytecodeOperandSignedByte(int operand_index,LoadSensitivity needs_poisoning)365 Node* InterpreterAssembler::BytecodeOperandSignedByte(
366     int operand_index, LoadSensitivity needs_poisoning) {
367   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
368   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
369                                     bytecode_, operand_index, operand_scale()));
370   Node* operand_offset = OperandOffset(operand_index);
371   return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
372               IntPtrAdd(BytecodeOffset(), operand_offset), needs_poisoning);
373 }
374 
BytecodeOperandReadUnaligned(int relative_offset,MachineType result_type,LoadSensitivity needs_poisoning)375 Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
376     int relative_offset, MachineType result_type,
377     LoadSensitivity needs_poisoning) {
378   static const int kMaxCount = 4;
379   DCHECK(!TargetSupportsUnalignedAccess());
380 
381   int count;
382   switch (result_type.representation()) {
383     case MachineRepresentation::kWord16:
384       count = 2;
385       break;
386     case MachineRepresentation::kWord32:
387       count = 4;
388       break;
389     default:
390       UNREACHABLE();
391       break;
392   }
393   MachineType msb_type =
394       result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
395 
396 #if V8_TARGET_LITTLE_ENDIAN
397   const int kStep = -1;
398   int msb_offset = count - 1;
399 #elif V8_TARGET_BIG_ENDIAN
400   const int kStep = 1;
401   int msb_offset = 0;
402 #else
403 #error "Unknown Architecture"
404 #endif
405 
406   // Read the most signicant bytecode into bytes[0] and then in order
407   // down to least significant in bytes[count - 1].
408   DCHECK_LE(count, kMaxCount);
409   Node* bytes[kMaxCount];
410   for (int i = 0; i < count; i++) {
411     MachineType machine_type = (i == 0) ? msb_type : MachineType::Uint8();
412     Node* offset = IntPtrConstant(relative_offset + msb_offset + i * kStep);
413     Node* array_offset = IntPtrAdd(BytecodeOffset(), offset);
414     bytes[i] = Load(machine_type, BytecodeArrayTaggedPointer(), array_offset,
415                     needs_poisoning);
416   }
417 
418   // Pack LSB to MSB.
419   Node* result = bytes[--count];
420   for (int i = 1; --count >= 0; i++) {
421     Node* shift = Int32Constant(i * kBitsPerByte);
422     Node* value = Word32Shl(bytes[count], shift);
423     result = Word32Or(value, result);
424   }
425   return result;
426 }
427 
BytecodeOperandUnsignedShort(int operand_index,LoadSensitivity needs_poisoning)428 Node* InterpreterAssembler::BytecodeOperandUnsignedShort(
429     int operand_index, LoadSensitivity needs_poisoning) {
430   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
431   DCHECK_EQ(
432       OperandSize::kShort,
433       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
434   int operand_offset =
435       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
436   if (TargetSupportsUnalignedAccess()) {
437     return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(),
438                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
439                 needs_poisoning);
440   } else {
441     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16(),
442                                         needs_poisoning);
443   }
444 }
445 
BytecodeOperandSignedShort(int operand_index,LoadSensitivity needs_poisoning)446 Node* InterpreterAssembler::BytecodeOperandSignedShort(
447     int operand_index, LoadSensitivity needs_poisoning) {
448   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
449   DCHECK_EQ(
450       OperandSize::kShort,
451       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
452   int operand_offset =
453       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
454   if (TargetSupportsUnalignedAccess()) {
455     return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
456                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
457                 needs_poisoning);
458   } else {
459     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16(),
460                                         needs_poisoning);
461   }
462 }
463 
BytecodeOperandUnsignedQuad(int operand_index,LoadSensitivity needs_poisoning)464 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(
465     int operand_index, LoadSensitivity needs_poisoning) {
466   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
467   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
468                                     bytecode_, operand_index, operand_scale()));
469   int operand_offset =
470       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
471   if (TargetSupportsUnalignedAccess()) {
472     return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(),
473                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
474                 needs_poisoning);
475   } else {
476     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32(),
477                                         needs_poisoning);
478   }
479 }
480 
BytecodeOperandSignedQuad(int operand_index,LoadSensitivity needs_poisoning)481 Node* InterpreterAssembler::BytecodeOperandSignedQuad(
482     int operand_index, LoadSensitivity needs_poisoning) {
483   DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
484   DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
485                                     bytecode_, operand_index, operand_scale()));
486   int operand_offset =
487       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
488   if (TargetSupportsUnalignedAccess()) {
489     return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
490                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
491                 needs_poisoning);
492   } else {
493     return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32(),
494                                         needs_poisoning);
495   }
496 }
497 
BytecodeSignedOperand(int operand_index,OperandSize operand_size,LoadSensitivity needs_poisoning)498 Node* InterpreterAssembler::BytecodeSignedOperand(
499     int operand_index, OperandSize operand_size,
500     LoadSensitivity needs_poisoning) {
501   DCHECK(!Bytecodes::IsUnsignedOperandType(
502       Bytecodes::GetOperandType(bytecode_, operand_index)));
503   switch (operand_size) {
504     case OperandSize::kByte:
505       return BytecodeOperandSignedByte(operand_index, needs_poisoning);
506     case OperandSize::kShort:
507       return BytecodeOperandSignedShort(operand_index, needs_poisoning);
508     case OperandSize::kQuad:
509       return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
510     case OperandSize::kNone:
511       UNREACHABLE();
512   }
513   return nullptr;
514 }
515 
BytecodeUnsignedOperand(int operand_index,OperandSize operand_size,LoadSensitivity needs_poisoning)516 Node* InterpreterAssembler::BytecodeUnsignedOperand(
517     int operand_index, OperandSize operand_size,
518     LoadSensitivity needs_poisoning) {
519   DCHECK(Bytecodes::IsUnsignedOperandType(
520       Bytecodes::GetOperandType(bytecode_, operand_index)));
521   switch (operand_size) {
522     case OperandSize::kByte:
523       return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
524     case OperandSize::kShort:
525       return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
526     case OperandSize::kQuad:
527       return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
528     case OperandSize::kNone:
529       UNREACHABLE();
530   }
531   return nullptr;
532 }
533 
BytecodeOperandCount(int operand_index)534 Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
535   DCHECK_EQ(OperandType::kRegCount,
536             Bytecodes::GetOperandType(bytecode_, operand_index));
537   OperandSize operand_size =
538       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
539   return BytecodeUnsignedOperand(operand_index, operand_size);
540 }
541 
BytecodeOperandFlag(int operand_index)542 Node* InterpreterAssembler::BytecodeOperandFlag(int operand_index) {
543   DCHECK_EQ(OperandType::kFlag8,
544             Bytecodes::GetOperandType(bytecode_, operand_index));
545   OperandSize operand_size =
546       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
547   DCHECK_EQ(operand_size, OperandSize::kByte);
548   return BytecodeUnsignedOperand(operand_index, operand_size);
549 }
550 
BytecodeOperandUImm(int operand_index)551 Node* InterpreterAssembler::BytecodeOperandUImm(int operand_index) {
552   DCHECK_EQ(OperandType::kUImm,
553             Bytecodes::GetOperandType(bytecode_, operand_index));
554   OperandSize operand_size =
555       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
556   return BytecodeUnsignedOperand(operand_index, operand_size);
557 }
558 
BytecodeOperandUImmWord(int operand_index)559 Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
560   return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
561 }
562 
BytecodeOperandUImmSmi(int operand_index)563 Node* InterpreterAssembler::BytecodeOperandUImmSmi(int operand_index) {
564   return SmiFromInt32(BytecodeOperandUImm(operand_index));
565 }
566 
BytecodeOperandImm(int operand_index)567 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
568   DCHECK_EQ(OperandType::kImm,
569             Bytecodes::GetOperandType(bytecode_, operand_index));
570   OperandSize operand_size =
571       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
572   return BytecodeSignedOperand(operand_index, operand_size);
573 }
574 
BytecodeOperandImmIntPtr(int operand_index)575 Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
576   return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
577 }
578 
BytecodeOperandImmSmi(int operand_index)579 Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
580   return SmiFromInt32(BytecodeOperandImm(operand_index));
581 }
582 
BytecodeOperandIdxInt32(int operand_index)583 Node* InterpreterAssembler::BytecodeOperandIdxInt32(int operand_index) {
584   DCHECK_EQ(OperandType::kIdx,
585             Bytecodes::GetOperandType(bytecode_, operand_index));
586   OperandSize operand_size =
587       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
588   return BytecodeUnsignedOperand(operand_index, operand_size);
589 }
590 
BytecodeOperandIdx(int operand_index)591 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
592   return ChangeUint32ToWord(BytecodeOperandIdxInt32(operand_index));
593 }
594 
BytecodeOperandIdxSmi(int operand_index)595 Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
596   return SmiTag(BytecodeOperandIdx(operand_index));
597 }
598 
BytecodeOperandConstantPoolIdx(int operand_index,LoadSensitivity needs_poisoning)599 Node* InterpreterAssembler::BytecodeOperandConstantPoolIdx(
600     int operand_index, LoadSensitivity needs_poisoning) {
601   DCHECK_EQ(OperandType::kIdx,
602             Bytecodes::GetOperandType(bytecode_, operand_index));
603   OperandSize operand_size =
604       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
605   return ChangeUint32ToWord(
606       BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
607 }
608 
BytecodeOperandReg(int operand_index,LoadSensitivity needs_poisoning)609 Node* InterpreterAssembler::BytecodeOperandReg(
610     int operand_index, LoadSensitivity needs_poisoning) {
611   DCHECK(Bytecodes::IsRegisterOperandType(
612       Bytecodes::GetOperandType(bytecode_, operand_index)));
613   OperandSize operand_size =
614       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
615   return ChangeInt32ToIntPtr(
616       BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
617 }
618 
BytecodeOperandRuntimeId(int operand_index)619 Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
620   DCHECK_EQ(OperandType::kRuntimeId,
621             Bytecodes::GetOperandType(bytecode_, operand_index));
622   OperandSize operand_size =
623       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
624   DCHECK_EQ(operand_size, OperandSize::kShort);
625   return BytecodeUnsignedOperand(operand_index, operand_size);
626 }
627 
BytecodeOperandNativeContextIndex(int operand_index)628 Node* InterpreterAssembler::BytecodeOperandNativeContextIndex(
629     int operand_index) {
630   DCHECK_EQ(OperandType::kNativeContextIndex,
631             Bytecodes::GetOperandType(bytecode_, operand_index));
632   OperandSize operand_size =
633       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
634   return ChangeUint32ToWord(
635       BytecodeUnsignedOperand(operand_index, operand_size));
636 }
637 
BytecodeOperandIntrinsicId(int operand_index)638 Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
639   DCHECK_EQ(OperandType::kIntrinsicId,
640             Bytecodes::GetOperandType(bytecode_, operand_index));
641   OperandSize operand_size =
642       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
643   DCHECK_EQ(operand_size, OperandSize::kByte);
644   return BytecodeUnsignedOperand(operand_index, operand_size);
645 }
646 
LoadConstantPoolEntry(Node * index)647 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
648   Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
649                                         BytecodeArray::kConstantPoolOffset);
650   return LoadFixedArrayElement(constant_pool, UncheckedCast<IntPtrT>(index),
651                                LoadSensitivity::kCritical);
652 }
653 
LoadAndUntagConstantPoolEntry(Node * index)654 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
655   return SmiUntag(LoadConstantPoolEntry(index));
656 }
657 
LoadConstantPoolEntryAtOperandIndex(int operand_index)658 Node* InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
659     int operand_index) {
660   Node* index =
661       BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
662   return LoadConstantPoolEntry(index);
663 }
664 
LoadAndUntagConstantPoolEntryAtOperandIndex(int operand_index)665 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntryAtOperandIndex(
666     int operand_index) {
667   return SmiUntag(LoadConstantPoolEntryAtOperandIndex(operand_index));
668 }
669 
LoadFeedbackVector()670 Node* InterpreterAssembler::LoadFeedbackVector() {
671   Node* function = LoadRegister(Register::function_closure());
672   return CodeStubAssembler::LoadFeedbackVector(function);
673 }
674 
CallPrologue()675 void InterpreterAssembler::CallPrologue() {
676   if (!Bytecodes::MakesCallAlongCriticalPath(bytecode_)) {
677     // Bytecodes that make a call along the critical path save the bytecode
678     // offset in the bytecode handler's prologue. For other bytecodes, if
679     // there are multiple calls in the bytecode handler, you need to spill
680     // before each of them, unless SaveBytecodeOffset has explicitly been called
681     // in a path that dominates _all_ of those calls (which we don't track).
682     SaveBytecodeOffset();
683   }
684 
685   if (FLAG_debug_code && !disable_stack_check_across_call_) {
686     DCHECK_NULL(stack_pointer_before_call_);
687     stack_pointer_before_call_ = LoadStackPointer();
688   }
689   bytecode_array_valid_ = false;
690   made_call_ = true;
691 }
692 
CallEpilogue()693 void InterpreterAssembler::CallEpilogue() {
694   if (FLAG_debug_code && !disable_stack_check_across_call_) {
695     Node* stack_pointer_after_call = LoadStackPointer();
696     Node* stack_pointer_before_call = stack_pointer_before_call_;
697     stack_pointer_before_call_ = nullptr;
698     AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
699                         AbortReason::kUnexpectedStackPointer);
700   }
701 }
702 
IncrementCallCount(Node * feedback_vector,Node * slot_id)703 void InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
704                                               Node* slot_id) {
705   Comment("increment call count");
706   TNode<Smi> call_count = CAST(
707       ToObject(LoadFeedbackVectorSlot(feedback_vector, slot_id, kPointerSize)));
708   // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call
709   // count are used as flags. To increment the call count by 1 we hence
710   // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}.
711   Node* new_count = SmiAdd(
712       call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift));
713   // Count is Smi, so we don't need a write barrier.
714   StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count,
715                           SKIP_WRITE_BARRIER, kPointerSize);
716 }
717 
CollectCallableFeedback(Node * target,Node * context,Node * feedback_vector,Node * slot_id)718 void InterpreterAssembler::CollectCallableFeedback(Node* target, Node* context,
719                                                    Node* feedback_vector,
720                                                    Node* slot_id) {
721   Label extra_checks(this, Label::kDeferred), done(this);
722 
723   // Check if we have monomorphic {target} feedback already.
724   TNode<HeapObject> feedback_element =
725       ToStrongHeapObject(LoadFeedbackVectorSlot(feedback_vector, slot_id));
726   Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
727   Comment("check if monomorphic");
728   Node* is_monomorphic = WordEqual(target, feedback_value);
729   GotoIf(is_monomorphic, &done);
730 
731   // Check if it is a megamorphic {target}.
732   Comment("check if megamorphic");
733   Node* is_megamorphic =
734       WordEqual(feedback_element,
735                 HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
736   Branch(is_megamorphic, &done, &extra_checks);
737 
738   BIND(&extra_checks);
739   {
740     Label initialize(this), mark_megamorphic(this);
741 
742     Comment("check if weak cell");
743     Node* is_uninitialized = WordEqual(
744         feedback_element,
745         HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
746     GotoIf(is_uninitialized, &initialize);
747     CSA_ASSERT(this, IsWeakCell(feedback_element));
748 
749     // If the weak cell is cleared, we have a new chance to become monomorphic.
750     Comment("check if weak cell is cleared");
751     Node* is_smi = TaggedIsSmi(feedback_value);
752     Branch(is_smi, &initialize, &mark_megamorphic);
753 
754     BIND(&initialize);
755     {
756       // Check if {target} is a JSFunction in the current native context.
757       Comment("check if function in same native context");
758       GotoIf(TaggedIsSmi(target), &mark_megamorphic);
759       // Check if the {target} is a JSFunction or JSBoundFunction
760       // in the current native context.
761       VARIABLE(var_current, MachineRepresentation::kTagged, target);
762       Label loop(this, &var_current), done_loop(this);
763       Goto(&loop);
764       BIND(&loop);
765       {
766         Label if_boundfunction(this), if_function(this);
767         Node* current = var_current.value();
768         CSA_ASSERT(this, TaggedIsNotSmi(current));
769         Node* current_instance_type = LoadInstanceType(current);
770         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
771                &if_boundfunction);
772         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
773                &if_function, &mark_megamorphic);
774 
775         BIND(&if_function);
776         {
777           // Check that the JSFunction {current} is in the current native
778           // context.
779           Node* current_context =
780               LoadObjectField(current, JSFunction::kContextOffset);
781           Node* current_native_context = LoadNativeContext(current_context);
782           Branch(WordEqual(LoadNativeContext(context), current_native_context),
783                  &done_loop, &mark_megamorphic);
784         }
785 
786         BIND(&if_boundfunction);
787         {
788           // Continue with the [[BoundTargetFunction]] of {target}.
789           var_current.Bind(LoadObjectField(
790               current, JSBoundFunction::kBoundTargetFunctionOffset));
791           Goto(&loop);
792         }
793       }
794       BIND(&done_loop);
795       CreateWeakCellInFeedbackVector(feedback_vector, slot_id, target);
796       ReportFeedbackUpdate(feedback_vector, slot_id, "Call:Initialize");
797       Goto(&done);
798     }
799 
800     BIND(&mark_megamorphic);
801     {
802       // MegamorphicSentinel is an immortal immovable object so
803       // write-barrier is not needed.
804       Comment("transition to megamorphic");
805       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
806       StoreFeedbackVectorSlot(
807           feedback_vector, slot_id,
808           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
809           SKIP_WRITE_BARRIER);
810       ReportFeedbackUpdate(feedback_vector, slot_id,
811                            "Call:TransitionMegamorphic");
812       Goto(&done);
813     }
814   }
815 
816   BIND(&done);
817 }
818 
CollectCallFeedback(Node * target,Node * context,Node * feedback_vector,Node * slot_id)819 void InterpreterAssembler::CollectCallFeedback(Node* target, Node* context,
820                                                Node* feedback_vector,
821                                                Node* slot_id) {
822   // Increment the call count.
823   IncrementCallCount(feedback_vector, slot_id);
824 
825   // Collect the callable {target} feedback.
826   CollectCallableFeedback(target, context, feedback_vector, slot_id);
827 }
828 
CallJSAndDispatch(Node * function,Node * context,const RegListNodePair & args,ConvertReceiverMode receiver_mode)829 void InterpreterAssembler::CallJSAndDispatch(
830     Node* function, Node* context, const RegListNodePair& args,
831     ConvertReceiverMode receiver_mode) {
832   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
833   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
834          bytecode_ == Bytecode::kInvokeIntrinsic);
835   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
836 
837   Node* args_count;
838   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
839     // The receiver is implied, so it is not in the argument list.
840     args_count = args.reg_count();
841   } else {
842     // Subtract the receiver from the argument count.
843     Node* receiver_count = Int32Constant(1);
844     args_count = Int32Sub(args.reg_count(), receiver_count);
845   }
846 
847   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
848       isolate(), receiver_mode, InterpreterPushArgsMode::kOther);
849   Node* code_target = HeapConstant(callable.code());
850 
851   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
852                                    args_count, args.base_reg_location(),
853                                    function);
854   // TailCallStubThenDispatch updates accumulator with result.
855   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
856 }
857 
858 template <class... TArgs>
CallJSAndDispatch(Node * function,Node * context,Node * arg_count,ConvertReceiverMode receiver_mode,TArgs...args)859 void InterpreterAssembler::CallJSAndDispatch(Node* function, Node* context,
860                                              Node* arg_count,
861                                              ConvertReceiverMode receiver_mode,
862                                              TArgs... args) {
863   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
864   DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
865          bytecode_ == Bytecode::kInvokeIntrinsic);
866   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
867   Callable callable = CodeFactory::Call(isolate());
868   Node* code_target = HeapConstant(callable.code());
869 
870   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
871     // The first argument parameter (the receiver) is implied to be undefined.
872     TailCallStubThenBytecodeDispatch(
873         callable.descriptor(), code_target, context, function, arg_count,
874         static_cast<Node*>(UndefinedConstant()), args...);
875   } else {
876     TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
877                                      context, function, arg_count, args...);
878   }
879   // TailCallStubThenDispatch updates accumulator with result.
880   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
881 }
882 
883 // Instantiate CallJSAndDispatch() for argument counts used by interpreter
884 // generator.
885 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
886     Node* function, Node* context, Node* arg_count,
887     ConvertReceiverMode receiver_mode);
888 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
889     Node* function, Node* context, Node* arg_count,
890     ConvertReceiverMode receiver_mode, Node*);
891 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
892     Node* function, Node* context, Node* arg_count,
893     ConvertReceiverMode receiver_mode, Node*, Node*);
894 template V8_EXPORT_PRIVATE void InterpreterAssembler::CallJSAndDispatch(
895     Node* function, Node* context, Node* arg_count,
896     ConvertReceiverMode receiver_mode, Node*, Node*, Node*);
897 
CallJSWithSpreadAndDispatch(Node * function,Node * context,const RegListNodePair & args,Node * slot_id,Node * feedback_vector)898 void InterpreterAssembler::CallJSWithSpreadAndDispatch(
899     Node* function, Node* context, const RegListNodePair& args, Node* slot_id,
900     Node* feedback_vector) {
901   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
902   DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
903   CollectCallFeedback(function, context, feedback_vector, slot_id);
904   Comment("call using CallWithSpread builtin");
905   Callable callable = CodeFactory::InterpreterPushArgsThenCall(
906       isolate(), ConvertReceiverMode::kAny,
907       InterpreterPushArgsMode::kWithFinalSpread);
908   Node* code_target = HeapConstant(callable.code());
909 
910   Node* receiver_count = Int32Constant(1);
911   Node* args_count = Int32Sub(args.reg_count(), receiver_count);
912   TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
913                                    args_count, args.base_reg_location(),
914                                    function);
915   // TailCallStubThenDispatch updates accumulator with result.
916   accumulator_use_ = accumulator_use_ | AccumulatorUse::kWrite;
917 }
918 
Construct(Node * target,Node * context,Node * new_target,const RegListNodePair & args,Node * slot_id,Node * feedback_vector)919 Node* InterpreterAssembler::Construct(Node* target, Node* context,
920                                       Node* new_target,
921                                       const RegListNodePair& args,
922                                       Node* slot_id, Node* feedback_vector) {
923   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
924   VARIABLE(var_result, MachineRepresentation::kTagged);
925   VARIABLE(var_site, MachineRepresentation::kTagged);
926   Label extra_checks(this, Label::kDeferred), return_result(this, &var_result),
927       construct(this), construct_array(this, &var_site);
928 
929   // Increment the call count.
930   IncrementCallCount(feedback_vector, slot_id);
931 
932   // Check if we have monomorphic {new_target} feedback already.
933   TNode<HeapObject> feedback_element =
934       CAST(ToObject(LoadFeedbackVectorSlot(feedback_vector, slot_id)));
935   Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
936   Branch(WordEqual(new_target, feedback_value), &construct, &extra_checks);
937 
938   BIND(&extra_checks);
939   {
940     Label check_allocation_site(this), check_initialized(this),
941         initialize(this), mark_megamorphic(this);
942 
943     // Check if it is a megamorphic {new_target}..
944     Comment("check if megamorphic");
945     Node* is_megamorphic =
946         WordEqual(feedback_element,
947                   HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
948     GotoIf(is_megamorphic, &construct);
949 
950     Comment("check if weak cell");
951     Node* feedback_element_map = LoadMap(feedback_element);
952     GotoIfNot(IsWeakCellMap(feedback_element_map), &check_allocation_site);
953 
954     // If the weak cell is cleared, we have a new chance to become monomorphic.
955     Comment("check if weak cell is cleared");
956     Node* is_smi = TaggedIsSmi(feedback_value);
957     Branch(is_smi, &initialize, &mark_megamorphic);
958 
959     BIND(&check_allocation_site);
960     {
961       // Check if it is an AllocationSite.
962       Comment("check if allocation site");
963       GotoIfNot(IsAllocationSiteMap(feedback_element_map), &check_initialized);
964 
965       // Make sure that {target} and {new_target} are the Array constructor.
966       Node* array_function = LoadContextElement(LoadNativeContext(context),
967                                                 Context::ARRAY_FUNCTION_INDEX);
968       GotoIfNot(WordEqual(target, array_function), &mark_megamorphic);
969       GotoIfNot(WordEqual(new_target, array_function), &mark_megamorphic);
970       var_site.Bind(feedback_element);
971       Goto(&construct_array);
972     }
973 
974     BIND(&check_initialized);
975     {
976       // Check if it is uninitialized.
977       Comment("check if uninitialized");
978       Node* is_uninitialized = WordEqual(
979           feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
980       Branch(is_uninitialized, &initialize, &mark_megamorphic);
981     }
982 
983     BIND(&initialize);
984     {
985       Comment("check if function in same native context");
986       GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
987       // Check if the {new_target} is a JSFunction or JSBoundFunction
988       // in the current native context.
989       VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
990       Label loop(this, &var_current), done_loop(this);
991       Goto(&loop);
992       BIND(&loop);
993       {
994         Label if_boundfunction(this), if_function(this);
995         Node* current = var_current.value();
996         CSA_ASSERT(this, TaggedIsNotSmi(current));
997         Node* current_instance_type = LoadInstanceType(current);
998         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
999                &if_boundfunction);
1000         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1001                &if_function, &mark_megamorphic);
1002 
1003         BIND(&if_function);
1004         {
1005           // Check that the JSFunction {current} is in the current native
1006           // context.
1007           Node* current_context =
1008               LoadObjectField(current, JSFunction::kContextOffset);
1009           Node* current_native_context = LoadNativeContext(current_context);
1010           Branch(WordEqual(LoadNativeContext(context), current_native_context),
1011                  &done_loop, &mark_megamorphic);
1012         }
1013 
1014         BIND(&if_boundfunction);
1015         {
1016           // Continue with the [[BoundTargetFunction]] of {current}.
1017           var_current.Bind(LoadObjectField(
1018               current, JSBoundFunction::kBoundTargetFunctionOffset));
1019           Goto(&loop);
1020         }
1021       }
1022       BIND(&done_loop);
1023 
1024       // Create an AllocationSite if {target} and {new_target} refer
1025       // to the current native context's Array constructor.
1026       Label create_allocation_site(this), create_weak_cell(this);
1027       GotoIfNot(WordEqual(target, new_target), &create_weak_cell);
1028       Node* array_function = LoadContextElement(LoadNativeContext(context),
1029                                                 Context::ARRAY_FUNCTION_INDEX);
1030       Branch(WordEqual(target, array_function), &create_allocation_site,
1031              &create_weak_cell);
1032 
1033       BIND(&create_allocation_site);
1034       {
1035         var_site.Bind(CreateAllocationSiteInFeedbackVector(feedback_vector,
1036                                                            SmiTag(slot_id)));
1037         ReportFeedbackUpdate(feedback_vector, slot_id,
1038                              "Construct:CreateAllocationSite");
1039         Goto(&construct_array);
1040       }
1041 
1042       BIND(&create_weak_cell);
1043       {
1044         CreateWeakCellInFeedbackVector(feedback_vector, slot_id, new_target);
1045         ReportFeedbackUpdate(feedback_vector, slot_id,
1046                              "Construct:CreateWeakCell");
1047         Goto(&construct);
1048       }
1049     }
1050 
1051     BIND(&mark_megamorphic);
1052     {
1053       // MegamorphicSentinel is an immortal immovable object so
1054       // write-barrier is not needed.
1055       Comment("transition to megamorphic");
1056       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
1057       StoreFeedbackVectorSlot(
1058           feedback_vector, slot_id,
1059           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1060           SKIP_WRITE_BARRIER);
1061       ReportFeedbackUpdate(feedback_vector, slot_id,
1062                            "Construct:TransitionMegamorphic");
1063       Goto(&construct);
1064     }
1065   }
1066 
1067   BIND(&construct_array);
1068   {
1069     // TODO(bmeurer): Introduce a dedicated builtin to deal with the Array
1070     // constructor feedback collection inside of Ignition.
1071     Comment("call using ConstructArray builtin");
1072     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1073         isolate(), InterpreterPushArgsMode::kArrayFunction);
1074     Node* code_target = HeapConstant(callable.code());
1075     var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1076                              args.reg_count(), new_target, target,
1077                              var_site.value(), args.base_reg_location()));
1078     Goto(&return_result);
1079   }
1080 
1081   BIND(&construct);
1082   {
1083     // TODO(bmeurer): Remove the generic type_info parameter from the Construct.
1084     Comment("call using Construct builtin");
1085     Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1086         isolate(), InterpreterPushArgsMode::kOther);
1087     Node* code_target = HeapConstant(callable.code());
1088     var_result.Bind(CallStub(callable.descriptor(), code_target, context,
1089                              args.reg_count(), new_target, target,
1090                              UndefinedConstant(), args.base_reg_location()));
1091     Goto(&return_result);
1092   }
1093 
1094   BIND(&return_result);
1095   return var_result.value();
1096 }
1097 
ConstructWithSpread(Node * target,Node * context,Node * new_target,const RegListNodePair & args,Node * slot_id,Node * feedback_vector)1098 Node* InterpreterAssembler::ConstructWithSpread(Node* target, Node* context,
1099                                                 Node* new_target,
1100                                                 const RegListNodePair& args,
1101                                                 Node* slot_id,
1102                                                 Node* feedback_vector) {
1103   // TODO(bmeurer): Unify this with the Construct bytecode feedback
1104   // above once we have a way to pass the AllocationSite to the Array
1105   // constructor _and_ spread the last argument at the same time.
1106   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1107   Label extra_checks(this, Label::kDeferred), construct(this);
1108 
1109   // Increment the call count.
1110   IncrementCallCount(feedback_vector, slot_id);
1111 
1112   // Check if we have monomorphic {new_target} feedback already.
1113   TNode<HeapObject> feedback_element =
1114       CAST(ToObject(LoadFeedbackVectorSlot(feedback_vector, slot_id)));
1115   Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
1116   Branch(WordEqual(new_target, feedback_value), &construct, &extra_checks);
1117 
1118   BIND(&extra_checks);
1119   {
1120     Label check_initialized(this), initialize(this), mark_megamorphic(this);
1121 
1122     // Check if it is a megamorphic {new_target}.
1123     Comment("check if megamorphic");
1124     Node* is_megamorphic =
1125         WordEqual(feedback_element,
1126                   HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
1127     GotoIf(is_megamorphic, &construct);
1128 
1129     Comment("check if weak cell");
1130     Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
1131                                    LoadRoot(Heap::kWeakCellMapRootIndex));
1132     GotoIfNot(is_weak_cell, &check_initialized);
1133 
1134     // If the weak cell is cleared, we have a new chance to become monomorphic.
1135     Comment("check if weak cell is cleared");
1136     Node* is_smi = TaggedIsSmi(feedback_value);
1137     Branch(is_smi, &initialize, &mark_megamorphic);
1138 
1139     BIND(&check_initialized);
1140     {
1141       // Check if it is uninitialized.
1142       Comment("check if uninitialized");
1143       Node* is_uninitialized = WordEqual(
1144           feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
1145       Branch(is_uninitialized, &initialize, &mark_megamorphic);
1146     }
1147 
1148     BIND(&initialize);
1149     {
1150       Comment("check if function in same native context");
1151       GotoIf(TaggedIsSmi(new_target), &mark_megamorphic);
1152       // Check if the {new_target} is a JSFunction or JSBoundFunction
1153       // in the current native context.
1154       VARIABLE(var_current, MachineRepresentation::kTagged, new_target);
1155       Label loop(this, &var_current), done_loop(this);
1156       Goto(&loop);
1157       BIND(&loop);
1158       {
1159         Label if_boundfunction(this), if_function(this);
1160         Node* current = var_current.value();
1161         CSA_ASSERT(this, TaggedIsNotSmi(current));
1162         Node* current_instance_type = LoadInstanceType(current);
1163         GotoIf(InstanceTypeEqual(current_instance_type, JS_BOUND_FUNCTION_TYPE),
1164                &if_boundfunction);
1165         Branch(InstanceTypeEqual(current_instance_type, JS_FUNCTION_TYPE),
1166                &if_function, &mark_megamorphic);
1167 
1168         BIND(&if_function);
1169         {
1170           // Check that the JSFunction {current} is in the current native
1171           // context.
1172           Node* current_context =
1173               LoadObjectField(current, JSFunction::kContextOffset);
1174           Node* current_native_context = LoadNativeContext(current_context);
1175           Branch(WordEqual(LoadNativeContext(context), current_native_context),
1176                  &done_loop, &mark_megamorphic);
1177         }
1178 
1179         BIND(&if_boundfunction);
1180         {
1181           // Continue with the [[BoundTargetFunction]] of {current}.
1182           var_current.Bind(LoadObjectField(
1183               current, JSBoundFunction::kBoundTargetFunctionOffset));
1184           Goto(&loop);
1185         }
1186       }
1187       BIND(&done_loop);
1188       CreateWeakCellInFeedbackVector(feedback_vector, slot_id, new_target);
1189       ReportFeedbackUpdate(feedback_vector, slot_id,
1190                            "ConstructWithSpread:Initialize");
1191       Goto(&construct);
1192     }
1193 
1194     BIND(&mark_megamorphic);
1195     {
1196       // MegamorphicSentinel is an immortal immovable object so
1197       // write-barrier is not needed.
1198       Comment("transition to megamorphic");
1199       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
1200       StoreFeedbackVectorSlot(
1201           feedback_vector, slot_id,
1202           HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
1203           SKIP_WRITE_BARRIER);
1204       ReportFeedbackUpdate(feedback_vector, slot_id,
1205                            "ConstructWithSpread:TransitionMegamorphic");
1206       Goto(&construct);
1207     }
1208   }
1209 
1210   BIND(&construct);
1211   Comment("call using ConstructWithSpread builtin");
1212   Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
1213       isolate(), InterpreterPushArgsMode::kWithFinalSpread);
1214   Node* code_target = HeapConstant(callable.code());
1215   return CallStub(callable.descriptor(), code_target, context, args.reg_count(),
1216                   new_target, target, UndefinedConstant(),
1217                   args.base_reg_location());
1218 }
1219 
CallRuntimeN(Node * function_id,Node * context,const RegListNodePair & args,int result_size)1220 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
1221                                          const RegListNodePair& args,
1222                                          int result_size) {
1223   DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
1224   DCHECK(Bytecodes::IsCallRuntime(bytecode_));
1225   Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
1226   Node* code_target = HeapConstant(callable.code());
1227 
1228   // Get the function entry from the function id.
1229   Node* function_table = ExternalConstant(
1230       ExternalReference::runtime_function_table_address(isolate()));
1231   Node* function_offset =
1232       Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
1233   Node* function =
1234       IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
1235   Node* function_entry =
1236       Load(MachineType::Pointer(), function,
1237            IntPtrConstant(offsetof(Runtime::Function, entry)));
1238 
1239   return CallStubR(callable.descriptor(), result_size, code_target, context,
1240                    args.reg_count(), args.base_reg_location(), function_entry);
1241 }
1242 
UpdateInterruptBudget(Node * weight,bool backward)1243 void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
1244   Comment("[ UpdateInterruptBudget");
1245 
1246   Node* budget_offset =
1247       IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
1248 
1249   // Assert that the weight is positive (negative weights should be implemented
1250   // as backward updates).
1251   CSA_ASSERT(this, Int32GreaterThanOrEqual(weight, Int32Constant(0)));
1252 
1253   // Update budget by |weight| and check if it reaches zero.
1254   Variable new_budget(this, MachineRepresentation::kWord32);
1255   Node* old_budget =
1256       Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
1257   // Make sure we include the current bytecode in the budget calculation.
1258   Node* budget_after_bytecode =
1259       Int32Sub(old_budget, Int32Constant(CurrentBytecodeSize()));
1260 
1261   if (backward) {
1262     new_budget.Bind(Int32Sub(budget_after_bytecode, weight));
1263 
1264     Node* condition =
1265         Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
1266     Label ok(this), interrupt_check(this, Label::kDeferred);
1267     Branch(condition, &ok, &interrupt_check);
1268 
1269     // Perform interrupt and reset budget.
1270     BIND(&interrupt_check);
1271     {
1272       CallRuntime(Runtime::kInterrupt, GetContext());
1273       new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
1274       Goto(&ok);
1275     }
1276 
1277     BIND(&ok);
1278   } else {
1279     // For a forward jump, we know we only increase the interrupt budget, so
1280     // no need to check if it's below zero.
1281     new_budget.Bind(Int32Add(budget_after_bytecode, weight));
1282   }
1283 
1284   // Update budget.
1285   StoreNoWriteBarrier(MachineRepresentation::kWord32,
1286                       BytecodeArrayTaggedPointer(), budget_offset,
1287                       new_budget.value());
1288   Comment("] UpdateInterruptBudget");
1289 }
1290 
Advance()1291 Node* InterpreterAssembler::Advance() { return Advance(CurrentBytecodeSize()); }
1292 
Advance(int delta)1293 Node* InterpreterAssembler::Advance(int delta) {
1294   return Advance(IntPtrConstant(delta));
1295 }
1296 
Advance(Node * delta,bool backward)1297 Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
1298 #ifdef V8_TRACE_IGNITION
1299   TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
1300 #endif
1301   Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
1302                                : IntPtrAdd(BytecodeOffset(), delta);
1303   bytecode_offset_.Bind(next_offset);
1304   return next_offset;
1305 }
1306 
Jump(Node * delta,bool backward)1307 Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
1308   DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
1309 
1310   UpdateInterruptBudget(TruncateIntPtrToInt32(delta), backward);
1311   Node* new_bytecode_offset = Advance(delta, backward);
1312   Node* target_bytecode = LoadBytecode(new_bytecode_offset);
1313   return DispatchToBytecode(target_bytecode, new_bytecode_offset);
1314 }
1315 
Jump(Node * delta)1316 Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
1317 
JumpBackward(Node * delta)1318 Node* InterpreterAssembler::JumpBackward(Node* delta) {
1319   return Jump(delta, true);
1320 }
1321 
JumpConditional(Node * condition,Node * delta)1322 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
1323   Label match(this), no_match(this);
1324 
1325   Branch(condition, &match, &no_match);
1326   BIND(&match);
1327   Jump(delta);
1328   BIND(&no_match);
1329   Dispatch();
1330 }
1331 
JumpIfWordEqual(Node * lhs,Node * rhs,Node * delta)1332 void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
1333   JumpConditional(WordEqual(lhs, rhs), delta);
1334 }
1335 
JumpIfWordNotEqual(Node * lhs,Node * rhs,Node * delta)1336 void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
1337                                               Node* delta) {
1338   JumpConditional(WordNotEqual(lhs, rhs), delta);
1339 }
1340 
LoadBytecode(Node * bytecode_offset)1341 Node* InterpreterAssembler::LoadBytecode(Node* bytecode_offset) {
1342   Node* bytecode =
1343       Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
1344   return ChangeUint32ToWord(bytecode);
1345 }
1346 
StarDispatchLookahead(Node * target_bytecode)1347 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
1348   Label do_inline_star(this), done(this);
1349 
1350   Variable var_bytecode(this, MachineType::PointerRepresentation());
1351   var_bytecode.Bind(target_bytecode);
1352 
1353   Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
1354   Node* is_star = WordEqual(target_bytecode, star_bytecode);
1355   Branch(is_star, &do_inline_star, &done);
1356 
1357   BIND(&do_inline_star);
1358   {
1359     InlineStar();
1360     var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
1361     Goto(&done);
1362   }
1363   BIND(&done);
1364   return var_bytecode.value();
1365 }
1366 
InlineStar()1367 void InterpreterAssembler::InlineStar() {
1368   Bytecode previous_bytecode = bytecode_;
1369   AccumulatorUse previous_acc_use = accumulator_use_;
1370 
1371   bytecode_ = Bytecode::kStar;
1372   accumulator_use_ = AccumulatorUse::kNone;
1373 
1374 #ifdef V8_TRACE_IGNITION
1375   TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
1376 #endif
1377   StoreRegister(GetAccumulator(),
1378                 BytecodeOperandReg(0, LoadSensitivity::kSafe));
1379 
1380   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
1381 
1382   Advance();
1383   bytecode_ = previous_bytecode;
1384   accumulator_use_ = previous_acc_use;
1385 }
1386 
Dispatch()1387 Node* InterpreterAssembler::Dispatch() {
1388   Comment("========= Dispatch");
1389   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1390   Node* target_offset = Advance();
1391   Node* target_bytecode = LoadBytecode(target_offset);
1392 
1393   if (Bytecodes::IsStarLookahead(bytecode_, operand_scale_)) {
1394     target_bytecode = StarDispatchLookahead(target_bytecode);
1395   }
1396   return DispatchToBytecode(target_bytecode, BytecodeOffset());
1397 }
1398 
DispatchToBytecode(Node * target_bytecode,Node * new_bytecode_offset)1399 Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
1400                                                Node* new_bytecode_offset) {
1401   if (FLAG_trace_ignition_dispatches) {
1402     TraceBytecodeDispatch(target_bytecode);
1403   }
1404 
1405   Node* target_code_entry =
1406       Load(MachineType::Pointer(), DispatchTableRawPointer(),
1407            TimesPointerSize(target_bytecode));
1408 
1409   return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset,
1410                                         target_bytecode);
1411 }
1412 
DispatchToBytecodeHandler(Node * handler,Node * bytecode_offset,Node * target_bytecode)1413 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
1414                                                       Node* bytecode_offset,
1415                                                       Node* target_bytecode) {
1416   // TODO(ishell): Add CSA::CodeEntryPoint(code).
1417   Node* handler_entry =
1418       IntPtrAdd(BitcastTaggedToWord(handler),
1419                 IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
1420   return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset,
1421                                         target_bytecode);
1422 }
1423 
DispatchToBytecodeHandlerEntry(Node * handler_entry,Node * bytecode_offset,Node * target_bytecode)1424 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
1425     Node* handler_entry, Node* bytecode_offset, Node* target_bytecode) {
1426   InterpreterDispatchDescriptor descriptor(isolate());
1427   // Propagate speculation poisoning.
1428   Node* poisoned_handler_entry = WordPoisonOnSpeculation(handler_entry);
1429   return TailCallBytecodeDispatch(
1430       descriptor, poisoned_handler_entry, GetAccumulatorUnchecked(),
1431       bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
1432 }
1433 
DispatchWide(OperandScale operand_scale)1434 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
1435   // Dispatching a wide bytecode requires treating the prefix
1436   // bytecode a base pointer into the dispatch table and dispatching
1437   // the bytecode that follows relative to this base.
1438   //
1439   //   Indices 0-255 correspond to bytecodes with operand_scale == 0
1440   //   Indices 256-511 correspond to bytecodes with operand_scale == 1
1441   //   Indices 512-767 correspond to bytecodes with operand_scale == 2
1442   DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
1443   Node* next_bytecode_offset = Advance(1);
1444   Node* next_bytecode = LoadBytecode(next_bytecode_offset);
1445 
1446   if (FLAG_trace_ignition_dispatches) {
1447     TraceBytecodeDispatch(next_bytecode);
1448   }
1449 
1450   Node* base_index;
1451   switch (operand_scale) {
1452     case OperandScale::kDouble:
1453       base_index = IntPtrConstant(1 << kBitsPerByte);
1454       break;
1455     case OperandScale::kQuadruple:
1456       base_index = IntPtrConstant(2 << kBitsPerByte);
1457       break;
1458     default:
1459       UNREACHABLE();
1460   }
1461   Node* target_index = IntPtrAdd(base_index, next_bytecode);
1462   Node* target_code_entry =
1463       Load(MachineType::Pointer(), DispatchTableRawPointer(),
1464            TimesPointerSize(target_index));
1465 
1466   DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset,
1467                                  next_bytecode);
1468 }
1469 
UpdateInterruptBudgetOnReturn()1470 void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
1471   // TODO(rmcilroy): Investigate whether it is worth supporting self
1472   // optimization of primitive functions like FullCodegen.
1473 
1474   // Update profiling count by the number of bytes between the end of the
1475   // current bytecode and the start of the first one, to simulate backedge to
1476   // start of function.
1477   //
1478   // With headers and current offset, the bytecode array layout looks like:
1479   //
1480   //           <---------- simulated backedge ----------
1481   // | header | first bytecode | .... | return bytecode |
1482   //  |<------ current offset ------->
1483   //  ^ tagged bytecode array pointer
1484   //
1485   // UpdateInterruptBudget already handles adding the bytecode size to the
1486   // length of the back-edge, so we just have to correct for the non-zero offset
1487   // of the first bytecode.
1488 
1489   const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
1490   Node* profiling_weight = Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
1491                                     Int32Constant(kFirstBytecodeOffset));
1492   UpdateInterruptBudget(profiling_weight, true);
1493 }
1494 
LoadOSRNestingLevel()1495 Node* InterpreterAssembler::LoadOSRNestingLevel() {
1496   return LoadObjectField(BytecodeArrayTaggedPointer(),
1497                          BytecodeArray::kOSRNestingLevelOffset,
1498                          MachineType::Int8());
1499 }
1500 
Abort(AbortReason abort_reason)1501 void InterpreterAssembler::Abort(AbortReason abort_reason) {
1502   disable_stack_check_across_call_ = true;
1503   Node* abort_id = SmiConstant(abort_reason);
1504   CallRuntime(Runtime::kAbort, GetContext(), abort_id);
1505   disable_stack_check_across_call_ = false;
1506 }
1507 
AbortIfWordNotEqual(Node * lhs,Node * rhs,AbortReason abort_reason)1508 void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
1509                                                AbortReason abort_reason) {
1510   Label ok(this), abort(this, Label::kDeferred);
1511   Branch(WordEqual(lhs, rhs), &ok, &abort);
1512 
1513   BIND(&abort);
1514   Abort(abort_reason);
1515   Goto(&ok);
1516 
1517   BIND(&ok);
1518 }
1519 
MaybeDropFrames(Node * context)1520 void InterpreterAssembler::MaybeDropFrames(Node* context) {
1521   Node* restart_fp_address =
1522       ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
1523 
1524   Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
1525   Node* null = IntPtrConstant(0);
1526 
1527   Label ok(this), drop_frames(this);
1528   Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
1529 
1530   BIND(&drop_frames);
1531   // We don't expect this call to return since the frame dropper tears down
1532   // the stack and jumps into the function on the target frame to restart it.
1533   CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
1534   Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
1535   Goto(&ok);
1536 
1537   BIND(&ok);
1538 }
1539 
TraceBytecode(Runtime::FunctionId function_id)1540 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
1541   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
1542               SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
1543 }
1544 
TraceBytecodeDispatch(Node * target_bytecode)1545 void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
1546   Node* counters_table = ExternalConstant(
1547       ExternalReference::interpreter_dispatch_counters(isolate()));
1548   Node* source_bytecode_table_index = IntPtrConstant(
1549       static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
1550 
1551   Node* counter_offset =
1552       TimesPointerSize(IntPtrAdd(source_bytecode_table_index, target_bytecode));
1553   Node* old_counter =
1554       Load(MachineType::IntPtr(), counters_table, counter_offset);
1555 
1556   Label counter_ok(this), counter_saturated(this, Label::kDeferred);
1557 
1558   Node* counter_reached_max = WordEqual(
1559       old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
1560   Branch(counter_reached_max, &counter_saturated, &counter_ok);
1561 
1562   BIND(&counter_ok);
1563   {
1564     Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
1565     StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
1566                         counter_offset, new_counter);
1567     Goto(&counter_saturated);
1568   }
1569 
1570   BIND(&counter_saturated);
1571 }
1572 
1573 // static
TargetSupportsUnalignedAccess()1574 bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
1575 #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
1576   return false;
1577 #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
1578     V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
1579   return true;
1580 #else
1581 #error "Unknown Architecture"
1582 #endif
1583 }
1584 
AbortIfRegisterCountInvalid(Node * register_file,Node * register_count)1585 void InterpreterAssembler::AbortIfRegisterCountInvalid(Node* register_file,
1586                                                        Node* register_count) {
1587   Node* array_size = LoadAndUntagFixedArrayBaseLength(register_file);
1588 
1589   Label ok(this), abort(this, Label::kDeferred);
1590   Branch(UintPtrLessThanOrEqual(register_count, array_size), &ok, &abort);
1591 
1592   BIND(&abort);
1593   Abort(AbortReason::kInvalidRegisterFileInGenerator);
1594   Goto(&ok);
1595 
1596   BIND(&ok);
1597 }
1598 
ExportRegisterFile(Node * array,const RegListNodePair & registers)1599 Node* InterpreterAssembler::ExportRegisterFile(
1600     Node* array, const RegListNodePair& registers) {
1601   Node* register_count = ChangeUint32ToWord(registers.reg_count());
1602   if (FLAG_debug_code) {
1603     CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1604                                  RegisterLocation(Register(0))));
1605     AbortIfRegisterCountInvalid(array, register_count);
1606   }
1607 
1608   Variable var_index(this, MachineType::PointerRepresentation());
1609   var_index.Bind(IntPtrConstant(0));
1610 
1611   // Iterate over register file and write values into array.
1612   // The mapping of register to array index must match that used in
1613   // BytecodeGraphBuilder::VisitResumeGenerator.
1614   Label loop(this, &var_index), done_loop(this);
1615   Goto(&loop);
1616   BIND(&loop);
1617   {
1618     Node* index = var_index.value();
1619     GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1620 
1621     Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1622     Node* value = LoadRegister(reg_index);
1623 
1624     StoreFixedArrayElement(array, index, value);
1625 
1626     var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1627     Goto(&loop);
1628   }
1629   BIND(&done_loop);
1630 
1631   return array;
1632 }
1633 
ImportRegisterFile(Node * array,const RegListNodePair & registers)1634 Node* InterpreterAssembler::ImportRegisterFile(
1635     Node* array, const RegListNodePair& registers) {
1636   Node* register_count = ChangeUint32ToWord(registers.reg_count());
1637   if (FLAG_debug_code) {
1638     CSA_ASSERT(this, IntPtrEqual(registers.base_reg_location(),
1639                                  RegisterLocation(Register(0))));
1640     AbortIfRegisterCountInvalid(array, register_count);
1641   }
1642 
1643   Variable var_index(this, MachineType::PointerRepresentation());
1644   var_index.Bind(IntPtrConstant(0));
1645 
1646   // Iterate over array and write values into register file.  Also erase the
1647   // array contents to not keep them alive artificially.
1648   Label loop(this, &var_index), done_loop(this);
1649   Goto(&loop);
1650   BIND(&loop);
1651   {
1652     Node* index = var_index.value();
1653     GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
1654 
1655     Node* value = LoadFixedArrayElement(array, index);
1656 
1657     Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
1658     StoreRegister(value, reg_index);
1659 
1660     StoreFixedArrayElement(array, index,
1661                            LoadRoot(Heap::kStaleRegisterRootIndex));
1662 
1663     var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
1664     Goto(&loop);
1665   }
1666   BIND(&done_loop);
1667 
1668   return array;
1669 }
1670 
CurrentBytecodeSize() const1671 int InterpreterAssembler::CurrentBytecodeSize() const {
1672   return Bytecodes::Size(bytecode_, operand_scale_);
1673 }
1674 
ToNumberOrNumeric(Object::Conversion mode)1675 void InterpreterAssembler::ToNumberOrNumeric(Object::Conversion mode) {
1676   Node* object = GetAccumulator();
1677   Node* context = GetContext();
1678 
1679   Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
1680   Variable var_result(this, MachineRepresentation::kTagged);
1681   Label if_done(this), if_objectissmi(this), if_objectisheapnumber(this),
1682       if_objectisother(this, Label::kDeferred);
1683 
1684   GotoIf(TaggedIsSmi(object), &if_objectissmi);
1685   Branch(IsHeapNumber(object), &if_objectisheapnumber, &if_objectisother);
1686 
1687   BIND(&if_objectissmi);
1688   {
1689     var_result.Bind(object);
1690     var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
1691     Goto(&if_done);
1692   }
1693 
1694   BIND(&if_objectisheapnumber);
1695   {
1696     var_result.Bind(object);
1697     var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
1698     Goto(&if_done);
1699   }
1700 
1701   BIND(&if_objectisother);
1702   {
1703     auto builtin = Builtins::kNonNumberToNumber;
1704     if (mode == Object::Conversion::kToNumeric) {
1705       builtin = Builtins::kNonNumberToNumeric;
1706       // Special case for collecting BigInt feedback.
1707       Label not_bigint(this);
1708       GotoIfNot(IsBigInt(object), &not_bigint);
1709       {
1710         var_result.Bind(object);
1711         var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kBigInt));
1712         Goto(&if_done);
1713       }
1714       BIND(&not_bigint);
1715     }
1716 
1717     // Convert {object} by calling out to the appropriate builtin.
1718     var_result.Bind(CallBuiltin(builtin, context, object));
1719     var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kAny));
1720     Goto(&if_done);
1721   }
1722 
1723   BIND(&if_done);
1724 
1725   // Record the type feedback collected for {object}.
1726   Node* slot_index = BytecodeOperandIdx(0);
1727   Node* feedback_vector = LoadFeedbackVector();
1728   UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
1729 
1730   SetAccumulator(var_result.value());
1731   Dispatch();
1732 }
1733 
DeserializeLazyAndDispatch()1734 void InterpreterAssembler::DeserializeLazyAndDispatch() {
1735   Node* context = GetContext();
1736   Node* bytecode_offset = BytecodeOffset();
1737   Node* bytecode = LoadBytecode(bytecode_offset);
1738 
1739   Node* target_handler =
1740       CallRuntime(Runtime::kInterpreterDeserializeLazy, context,
1741                   SmiTag(bytecode), SmiConstant(operand_scale()));
1742   DispatchToBytecodeHandler(target_handler, bytecode_offset, bytecode);
1743 }
1744 
1745 }  // namespace interpreter
1746 }  // namespace internal
1747 }  // namespace v8
1748