1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X64
6 
7 #include "src/api/api-arguments.h"
8 #include "src/base/bits-iterator.h"
9 #include "src/base/iterator.h"
10 #include "src/codegen/code-factory.h"
11 #include "src/codegen/interface-descriptors-inl.h"
12 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
13 #include "src/codegen/macro-assembler-inl.h"
14 #include "src/codegen/register-configuration.h"
15 #include "src/codegen/x64/assembler-x64.h"
16 #include "src/common/globals.h"
17 #include "src/deoptimizer/deoptimizer.h"
18 #include "src/execution/frame-constants.h"
19 #include "src/execution/frames.h"
20 #include "src/heap/heap-inl.h"
21 #include "src/logging/counters.h"
22 #include "src/objects/cell.h"
23 #include "src/objects/code.h"
24 #include "src/objects/debug-objects.h"
25 #include "src/objects/foreign.h"
26 #include "src/objects/heap-number.h"
27 #include "src/objects/js-generator.h"
28 #include "src/objects/objects-inl.h"
29 #include "src/objects/smi.h"
30 
31 #if V8_ENABLE_WEBASSEMBLY
32 #include "src/wasm/baseline/liftoff-assembler-defs.h"
33 #include "src/wasm/object-access.h"
34 #include "src/wasm/wasm-constants.h"
35 #include "src/wasm/wasm-linkage.h"
36 #include "src/wasm/wasm-objects.h"
37 #endif  // V8_ENABLE_WEBASSEMBLY
38 
39 namespace v8 {
40 namespace internal {
41 
42 #define __ ACCESS_MASM(masm)
43 
Generate_Adaptor(MacroAssembler * masm,Address address)44 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
45   __ LoadAddress(kJavaScriptCallExtraArg1Register,
46                  ExternalReference::Create(address));
47   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
48           RelocInfo::CODE_TARGET);
49 }
50 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id,JumpMode jump_mode=JumpMode::kJump)51 static void GenerateTailCallToReturnedCode(
52     MacroAssembler* masm, Runtime::FunctionId function_id,
53     JumpMode jump_mode = JumpMode::kJump) {
54   // ----------- S t a t e -------------
55   //  -- rax : actual argument count
56   //  -- rdx : new target (preserved for callee)
57   //  -- rdi : target function (preserved for callee)
58   // -----------------------------------
59   ASM_CODE_COMMENT(masm);
60   {
61     FrameScope scope(masm, StackFrame::INTERNAL);
62     // Push a copy of the target function, the new target and the actual
63     // argument count.
64     __ Push(kJavaScriptCallTargetRegister);
65     __ Push(kJavaScriptCallNewTargetRegister);
66     __ SmiTag(kJavaScriptCallArgCountRegister);
67     __ Push(kJavaScriptCallArgCountRegister);
68     // Function is also the parameter to the runtime call.
69     __ Push(kJavaScriptCallTargetRegister);
70 
71     __ CallRuntime(function_id, 1);
72     __ movq(rcx, rax);
73 
74     // Restore target function, new target and actual argument count.
75     __ Pop(kJavaScriptCallArgCountRegister);
76     __ SmiUntag(kJavaScriptCallArgCountRegister);
77     __ Pop(kJavaScriptCallNewTargetRegister);
78     __ Pop(kJavaScriptCallTargetRegister);
79   }
80   static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
81   __ JumpCodeObject(rcx, jump_mode);
82 }
83 
84 namespace {
85 
86 enum class ArgumentsElementType {
87   kRaw,    // Push arguments as they are.
88   kHandle  // Dereference arguments before pushing.
89 };
90 
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,ArgumentsElementType element_type)91 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
92                             Register scratch,
93                             ArgumentsElementType element_type) {
94   DCHECK(!AreAliased(array, argc, scratch, kScratchRegister));
95   Register counter = scratch;
96   Label loop, entry;
97   if (kJSArgcIncludesReceiver) {
98     __ leaq(counter, Operand(argc, -kJSArgcReceiverSlots));
99   } else {
100     __ movq(counter, argc);
101   }
102   __ jmp(&entry);
103   __ bind(&loop);
104   Operand value(array, counter, times_system_pointer_size, 0);
105   if (element_type == ArgumentsElementType::kHandle) {
106     __ movq(kScratchRegister, value);
107     value = Operand(kScratchRegister, 0);
108   }
109   __ Push(value);
110   __ bind(&entry);
111   __ decq(counter);
112   __ j(greater_equal, &loop, Label::kNear);
113 }
114 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)115 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
116   // ----------- S t a t e -------------
117   //  -- rax: number of arguments
118   //  -- rdi: constructor function
119   //  -- rdx: new target
120   //  -- rsi: context
121   // -----------------------------------
122 
123   Label stack_overflow;
124   __ StackOverflowCheck(rax, &stack_overflow, Label::kFar);
125 
126   // Enter a construct frame.
127   {
128     FrameScope scope(masm, StackFrame::CONSTRUCT);
129 
130     // Preserve the incoming parameters on the stack.
131     __ SmiTag(rcx, rax);
132     __ Push(rsi);
133     __ Push(rcx);
134 
135     // TODO(victorgomes): When the arguments adaptor is completely removed, we
136     // should get the formal parameter count and copy the arguments in its
137     // correct position (including any undefined), instead of delaying this to
138     // InvokeFunction.
139 
140     // Set up pointer to first argument (skip receiver).
141     __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
142                                   kSystemPointerSize));
143     // Copy arguments to the expression stack.
144     // rbx: Pointer to start of arguments.
145     // rax: Number of arguments.
146     Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
147     // The receiver for the builtin/api call.
148     __ PushRoot(RootIndex::kTheHoleValue);
149 
150     // Call the function.
151     // rax: number of arguments (untagged)
152     // rdi: constructor function
153     // rdx: new target
154     __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
155 
156     // Restore smi-tagged arguments count from the frame.
157     __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
158 
159     // Leave construct frame.
160   }
161 
162   // Remove caller arguments from the stack and return.
163   __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
164                    kJSArgcIncludesReceiver
165                        ? TurboAssembler::kCountIncludesReceiver
166                        : TurboAssembler::kCountExcludesReceiver);
167 
168   __ ret(0);
169 
170   __ bind(&stack_overflow);
171   {
172     FrameScope scope(masm, StackFrame::INTERNAL);
173     __ CallRuntime(Runtime::kThrowStackOverflow);
174     __ int3();  // This should be unreachable.
175   }
176 }
177 
178 }  // namespace
179 
180 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)181 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
182   // ----------- S t a t e -------------
183   //  -- rax: number of arguments (untagged)
184   //  -- rdi: constructor function
185   //  -- rdx: new target
186   //  -- rsi: context
187   //  -- sp[...]: constructor arguments
188   // -----------------------------------
189 
190   FrameScope scope(masm, StackFrame::MANUAL);
191   // Enter a construct frame.
192   __ EnterFrame(StackFrame::CONSTRUCT);
193   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
194 
195   // Preserve the incoming parameters on the stack.
196   __ SmiTag(rcx, rax);
197   __ Push(rsi);
198   __ Push(rcx);
199   __ Push(rdi);
200   __ PushRoot(RootIndex::kTheHoleValue);
201   __ Push(rdx);
202 
203   // ----------- S t a t e -------------
204   //  --         sp[0*kSystemPointerSize]: new target
205   //  --         sp[1*kSystemPointerSize]: padding
206   //  -- rdi and sp[2*kSystemPointerSize]: constructor function
207   //  --         sp[3*kSystemPointerSize]: argument count
208   //  --         sp[4*kSystemPointerSize]: context
209   // -----------------------------------
210 
211   __ LoadTaggedPointerField(
212       rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
213   __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
214   __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
215   __ JumpIfIsInRange(rbx, kDefaultDerivedConstructor, kDerivedConstructor,
216                      &not_create_implicit_receiver, Label::kNear);
217 
218   // If not derived class constructor: Allocate the new receiver object.
219   __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
220   __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
221   __ jmp(&post_instantiation_deopt_entry, Label::kNear);
222 
223   // Else: use TheHoleValue as receiver for constructor call
224   __ bind(&not_create_implicit_receiver);
225   __ LoadRoot(rax, RootIndex::kTheHoleValue);
226 
227   // ----------- S t a t e -------------
228   //  -- rax                          implicit receiver
229   //  -- Slot 4 / sp[0*kSystemPointerSize]  new target
230   //  -- Slot 3 / sp[1*kSystemPointerSize]  padding
231   //  -- Slot 2 / sp[2*kSystemPointerSize]  constructor function
232   //  -- Slot 1 / sp[3*kSystemPointerSize]  number of arguments (tagged)
233   //  -- Slot 0 / sp[4*kSystemPointerSize]  context
234   // -----------------------------------
235   // Deoptimizer enters here.
236   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
237       masm->pc_offset());
238   __ bind(&post_instantiation_deopt_entry);
239 
240   // Restore new target.
241   __ Pop(rdx);
242 
243   // Push the allocated receiver to the stack.
244   __ Push(rax);
245 
246   // We need two copies because we may have to return the original one
247   // and the calling conventions dictate that the called function pops the
248   // receiver. The second copy is pushed after the arguments, we saved in r8
249   // since rax needs to store the number of arguments before
250   // InvokingFunction.
251   __ movq(r8, rax);
252 
253   // Set up pointer to first argument (skip receiver).
254   __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
255                                 kSystemPointerSize));
256 
257   // Restore constructor function and argument count.
258   __ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
259   __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
260 
261   // Check if we have enough stack space to push all arguments.
262   // Argument count in rax.
263   Label stack_overflow;
264   __ StackOverflowCheck(rax, &stack_overflow);
265 
266   // TODO(victorgomes): When the arguments adaptor is completely removed, we
267   // should get the formal parameter count and copy the arguments in its
268   // correct position (including any undefined), instead of delaying this to
269   // InvokeFunction.
270 
271   // Copy arguments to the expression stack.
272   // rbx: Pointer to start of arguments.
273   // rax: Number of arguments.
274   Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
275 
276   // Push implicit receiver.
277   __ Push(r8);
278 
279   // Call the function.
280   __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
281 
282   // ----------- S t a t e -------------
283   //  -- rax                 constructor result
284   //  -- sp[0*kSystemPointerSize]  implicit receiver
285   //  -- sp[1*kSystemPointerSize]  padding
286   //  -- sp[2*kSystemPointerSize]  constructor function
287   //  -- sp[3*kSystemPointerSize]  number of arguments
288   //  -- sp[4*kSystemPointerSize]  context
289   // -----------------------------------
290 
291   // Store offset of return address for deoptimizer.
292   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
293       masm->pc_offset());
294 
295   // If the result is an object (in the ECMA sense), we should get rid
296   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
297   // on page 74.
298   Label use_receiver, do_throw, leave_and_return, check_result;
299 
300   // If the result is undefined, we'll use the implicit receiver. Otherwise we
301   // do a smi check and fall through to check if the return value is a valid
302   // receiver.
303   __ JumpIfNotRoot(rax, RootIndex::kUndefinedValue, &check_result,
304                    Label::kNear);
305 
306   // Throw away the result of the constructor invocation and use the
307   // on-stack receiver as the result.
308   __ bind(&use_receiver);
309   __ movq(rax, Operand(rsp, 0 * kSystemPointerSize));
310   __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
311 
312   __ bind(&leave_and_return);
313   // Restore the arguments count.
314   __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
315   __ LeaveFrame(StackFrame::CONSTRUCT);
316   // Remove caller arguments from the stack and return.
317   __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
318                    kJSArgcIncludesReceiver
319                        ? TurboAssembler::kCountIncludesReceiver
320                        : TurboAssembler::kCountExcludesReceiver);
321   __ ret(0);
322 
323   // If the result is a smi, it is *not* an object in the ECMA sense.
324   __ bind(&check_result);
325   __ JumpIfSmi(rax, &use_receiver, Label::kNear);
326 
327   // If the type of the result (stored in its map) is less than
328   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
329   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
330   __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
331   __ j(above_equal, &leave_and_return, Label::kNear);
332   __ jmp(&use_receiver);
333 
334   __ bind(&do_throw);
335   // Restore context from the frame.
336   __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
337   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
338   // We don't return here.
339   __ int3();
340 
341   __ bind(&stack_overflow);
342   // Restore the context from the frame.
343   __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
344   __ CallRuntime(Runtime::kThrowStackOverflow);
345   // This should be unreachable.
346   __ int3();
347 }
348 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)349 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
350   Generate_JSBuiltinsConstructStubHelper(masm);
351 }
352 
Generate_ConstructedNonConstructable(MacroAssembler * masm)353 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
354   FrameScope scope(masm, StackFrame::INTERNAL);
355   __ Push(rdi);
356   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
357 }
358 
359 namespace {
360 
361 // Called with the native C calling convention. The corresponding function
362 // signature is either:
363 //   using JSEntryFunction = GeneratedCode<Address(
364 //       Address root_register_value, Address new_target, Address target,
365 //       Address receiver, intptr_t argc, Address** argv)>;
366 // or
367 //   using JSEntryFunction = GeneratedCode<Address(
368 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)369 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
370                              Builtin entry_trampoline) {
371   Label invoke, handler_entry, exit;
372   Label not_outermost_js, not_outermost_js_2;
373 
374   {
375     NoRootArrayScope uninitialized_root_register(masm);
376     // Set up frame.
377     __ pushq(rbp);
378     __ movq(rbp, rsp);
379 
380     // Push the stack frame type.
381     __ Push(Immediate(StackFrame::TypeToMarker(type)));
382     // Reserve a slot for the context. It is filled after the root register has
383     // been set up.
384     __ AllocateStackSpace(kSystemPointerSize);
385     // Save callee-saved registers (X64/X32/Win64 calling conventions).
386     __ pushq(r12);
387     __ pushq(r13);
388     __ pushq(r14);
389     __ pushq(r15);
390 #ifdef V8_TARGET_OS_WIN
391     __ pushq(rdi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
392     __ pushq(rsi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
393 #endif
394     __ pushq(rbx);
395 
396 #ifdef V8_TARGET_OS_WIN
397     // On Win64 XMM6-XMM15 are callee-save.
398     __ AllocateStackSpace(EntryFrameConstants::kXMMRegistersBlockSize);
399     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
400     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
401     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
402     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
403     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
404     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
405     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
406     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
407     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
408     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
409     STATIC_ASSERT(EntryFrameConstants::kCalleeSaveXMMRegisters == 10);
410     STATIC_ASSERT(EntryFrameConstants::kXMMRegistersBlockSize ==
411                   EntryFrameConstants::kXMMRegisterSize *
412                       EntryFrameConstants::kCalleeSaveXMMRegisters);
413 #endif
414 
415     // Initialize the root register.
416     // C calling convention. The first argument is passed in arg_reg_1.
417     __ movq(kRootRegister, arg_reg_1);
418 
419 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
420     // Initialize the pointer cage base register.
421     __ LoadRootRelative(kPtrComprCageBaseRegister,
422                         IsolateData::cage_base_offset());
423 #endif
424   }
425 
426   // Save copies of the top frame descriptor on the stack.
427   ExternalReference c_entry_fp = ExternalReference::Create(
428       IsolateAddressId::kCEntryFPAddress, masm->isolate());
429   {
430     Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
431     __ Push(c_entry_fp_operand);
432 
433     // Clear c_entry_fp, now we've pushed its previous value to the stack.
434     // If the c_entry_fp is not already zero and we don't clear it, the
435     // SafeStackFrameIterator will assume we are executing C++ and miss the JS
436     // frames on top.
437     __ Move(c_entry_fp_operand, 0);
438   }
439 
440   // Store the context address in the previously-reserved slot.
441   ExternalReference context_address = ExternalReference::Create(
442       IsolateAddressId::kContextAddress, masm->isolate());
443   __ Load(kScratchRegister, context_address);
444   static constexpr int kOffsetToContextSlot = -2 * kSystemPointerSize;
445   __ movq(Operand(rbp, kOffsetToContextSlot), kScratchRegister);
446 
447   // If this is the outermost JS call, set js_entry_sp value.
448   ExternalReference js_entry_sp = ExternalReference::Create(
449       IsolateAddressId::kJSEntrySPAddress, masm->isolate());
450   __ Load(rax, js_entry_sp);
451   __ testq(rax, rax);
452   __ j(not_zero, &not_outermost_js);
453   __ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
454   __ movq(rax, rbp);
455   __ Store(js_entry_sp, rax);
456   Label cont;
457   __ jmp(&cont);
458   __ bind(&not_outermost_js);
459   __ Push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
460   __ bind(&cont);
461 
462   // Jump to a faked try block that does the invoke, with a faked catch
463   // block that sets the pending exception.
464   __ jmp(&invoke);
465   __ bind(&handler_entry);
466 
467   // Store the current pc as the handler offset. It's used later to create the
468   // handler table.
469   masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
470 
471   // Caught exception: Store result (exception) in the pending exception
472   // field in the JSEnv and return a failure sentinel.
473   ExternalReference pending_exception = ExternalReference::Create(
474       IsolateAddressId::kPendingExceptionAddress, masm->isolate());
475   __ Store(pending_exception, rax);
476   __ LoadRoot(rax, RootIndex::kException);
477   __ jmp(&exit);
478 
479   // Invoke: Link this frame into the handler chain.
480   __ bind(&invoke);
481   __ PushStackHandler();
482 
483   // Invoke the function by calling through JS entry trampoline builtin and
484   // pop the faked function when we return.
485   Handle<Code> trampoline_code =
486       masm->isolate()->builtins()->code_handle(entry_trampoline);
487   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
488 
489   // Unlink this frame from the handler chain.
490   __ PopStackHandler();
491 
492   __ bind(&exit);
493   // Check if the current stack frame is marked as the outermost JS frame.
494   __ Pop(rbx);
495   __ cmpq(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
496   __ j(not_equal, &not_outermost_js_2);
497   __ Move(kScratchRegister, js_entry_sp);
498   __ movq(Operand(kScratchRegister, 0), Immediate(0));
499   __ bind(&not_outermost_js_2);
500 
501   // Restore the top frame descriptor from the stack.
502   {
503     Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
504     __ Pop(c_entry_fp_operand);
505   }
506 
507   // Restore callee-saved registers (X64 conventions).
508 #ifdef V8_TARGET_OS_WIN
509   // On Win64 XMM6-XMM15 are callee-save
510   __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
511   __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
512   __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
513   __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
514   __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
515   __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
516   __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
517   __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
518   __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
519   __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
520   __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
521 #endif
522 
523   __ popq(rbx);
524 #ifdef V8_TARGET_OS_WIN
525   // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
526   __ popq(rsi);
527   __ popq(rdi);
528 #endif
529   __ popq(r15);
530   __ popq(r14);
531   __ popq(r13);
532   __ popq(r12);
533   __ addq(rsp, Immediate(2 * kSystemPointerSize));  // remove markers
534 
535   // Restore frame pointer and return.
536   __ popq(rbp);
537   __ ret(0);
538 }
539 
540 }  // namespace
541 
Generate_JSEntry(MacroAssembler * masm)542 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
543   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
544 }
545 
Generate_JSConstructEntry(MacroAssembler * masm)546 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
547   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
548                           Builtin::kJSConstructEntryTrampoline);
549 }
550 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)551 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
552   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
553                           Builtin::kRunMicrotasksTrampoline);
554 }
555 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)556 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
557                                              bool is_construct) {
558   // Expects six C++ function parameters.
559   // - Address root_register_value
560   // - Address new_target (tagged Object pointer)
561   // - Address function (tagged JSFunction pointer)
562   // - Address receiver (tagged Object pointer)
563   // - intptr_t argc
564   // - Address** argv (pointer to array of tagged Object pointers)
565   // (see Handle::Invoke in execution.cc).
566 
567   // Open a C++ scope for the FrameScope.
568   {
569     // Platform specific argument handling. After this, the stack contains
570     // an internal frame and the pushed function and receiver, and
571     // register rax and rbx holds the argument count and argument array,
572     // while rdi holds the function pointer, rsi the context, and rdx the
573     // new.target.
574 
575     // MSVC parameters in:
576     // rcx        : root_register_value
577     // rdx        : new_target
578     // r8         : function
579     // r9         : receiver
580     // [rsp+0x20] : argc
581     // [rsp+0x28] : argv
582     //
583     // GCC parameters in:
584     // rdi : root_register_value
585     // rsi : new_target
586     // rdx : function
587     // rcx : receiver
588     // r8  : argc
589     // r9  : argv
590 
591     __ movq(rdi, arg_reg_3);
592     __ Move(rdx, arg_reg_2);
593     // rdi : function
594     // rdx : new_target
595 
596     // Clear the context before we push it when entering the internal frame.
597     __ Move(rsi, 0);
598 
599     // Enter an internal frame.
600     FrameScope scope(masm, StackFrame::INTERNAL);
601 
602     // Setup the context (we need to use the caller context from the isolate).
603     ExternalReference context_address = ExternalReference::Create(
604         IsolateAddressId::kContextAddress, masm->isolate());
605     __ movq(rsi, masm->ExternalReferenceAsOperand(context_address));
606 
607     // Push the function onto the stack.
608     __ Push(rdi);
609 
610 #ifdef V8_TARGET_OS_WIN
611     // Load the previous frame pointer to access C arguments on stack
612     __ movq(kScratchRegister, Operand(rbp, 0));
613     // Load the number of arguments and setup pointer to the arguments.
614     __ movq(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset));
615     __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
616 #else   // V8_TARGET_OS_WIN
617     // Load the number of arguments and setup pointer to the arguments.
618     __ movq(rax, r8);
619     __ movq(rbx, r9);
620     __ movq(r9, arg_reg_4);  // Temporarily saving the receiver.
621 #endif  // V8_TARGET_OS_WIN
622 
623     // Current stack contents:
624     // [rsp + kSystemPointerSize]     : Internal frame
625     // [rsp]                          : function
626     // Current register contents:
627     // rax : argc
628     // rbx : argv
629     // rsi : context
630     // rdi : function
631     // rdx : new.target
632     // r9  : receiver
633 
634     // Check if we have enough stack space to push all arguments.
635     // Argument count in rax.
636     Label enough_stack_space, stack_overflow;
637     __ StackOverflowCheck(rax, &stack_overflow, Label::kNear);
638     __ jmp(&enough_stack_space, Label::kNear);
639 
640     __ bind(&stack_overflow);
641     __ CallRuntime(Runtime::kThrowStackOverflow);
642     // This should be unreachable.
643     __ int3();
644 
645     __ bind(&enough_stack_space);
646 
647     // Copy arguments to the stack.
648     // Register rbx points to array of pointers to handle locations.
649     // Push the values of these handles.
650     // rbx: Pointer to start of arguments.
651     // rax: Number of arguments.
652     Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kHandle);
653 
654     // Push the receiver.
655     __ Push(r9);
656 
657     // Invoke the builtin code.
658     Handle<Code> builtin = is_construct
659                                ? BUILTIN_CODE(masm->isolate(), Construct)
660                                : masm->isolate()->builtins()->Call();
661     __ Call(builtin, RelocInfo::CODE_TARGET);
662 
663     // Exit the internal frame. Notice that this also removes the empty
664     // context and the function left on the stack by the code
665     // invocation.
666   }
667 
668   __ ret(0);
669 }
670 
Generate_JSEntryTrampoline(MacroAssembler * masm)671 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
672   Generate_JSEntryTrampolineHelper(masm, false);
673 }
674 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)675 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
676   Generate_JSEntryTrampolineHelper(masm, true);
677 }
678 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)679 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
680   // arg_reg_2: microtask_queue
681   __ movq(RunMicrotasksDescriptor::MicrotaskQueueRegister(), arg_reg_2);
682   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
683 }
684 
AssertCodeIsBaselineAllowClobber(MacroAssembler * masm,Register code,Register scratch)685 static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
686                                              Register code, Register scratch) {
687   // Verify that the code kind is baseline code via the CodeKind.
688   __ movl(scratch, FieldOperand(code, Code::kFlagsOffset));
689   __ DecodeField<Code::KindField>(scratch);
690   __ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
691   __ Assert(equal, AbortReason::kExpectedBaselineData);
692 }
693 
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)694 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
695                                  Register scratch) {
696   DCHECK(!AreAliased(code, scratch));
697   return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
698 }
699 
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)700 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
701                                                     Register sfi_data,
702                                                     Register scratch1,
703                                                     Label* is_baseline) {
704   ASM_CODE_COMMENT(masm);
705   Label done;
706   __ LoadMap(scratch1, sfi_data);
707 
708   __ CmpInstanceType(scratch1, CODET_TYPE);
709   if (FLAG_debug_code) {
710     Label not_baseline;
711     __ j(not_equal, &not_baseline);
712     if (V8_EXTERNAL_CODE_SPACE_BOOL) {
713       __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
714       AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
715     } else {
716       AssertCodeIsBaseline(masm, sfi_data, scratch1);
717     }
718     __ j(equal, is_baseline);
719     __ bind(&not_baseline);
720   } else {
721     __ j(equal, is_baseline);
722   }
723 
724   __ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
725   __ j(not_equal, &done, Label::kNear);
726 
727   __ LoadTaggedPointerField(
728       sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
729 
730   __ bind(&done);
731 }
732 
733 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)734 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
735   // ----------- S t a t e -------------
736   //  -- rax    : the value to pass to the generator
737   //  -- rdx    : the JSGeneratorObject to resume
738   //  -- rsp[0] : return address
739   // -----------------------------------
740 
741   // Store input value into generator object.
742   __ StoreTaggedField(
743       FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
744   Register object = WriteBarrierDescriptor::ObjectRegister();
745   __ Move(object, rdx);
746   __ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, rax,
747                       WriteBarrierDescriptor::SlotAddressRegister(),
748                       SaveFPRegsMode::kIgnore);
749   // Check that rdx is still valid, RecordWrite might have clobbered it.
750   __ AssertGeneratorObject(rdx);
751 
752   Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
753 
754   // Load suspended function and context.
755   __ LoadTaggedPointerField(
756       rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
757   __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
758 
759   // Flood function if we are stepping.
760   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
761   Label stepping_prepared;
762   ExternalReference debug_hook =
763       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
764   Operand debug_hook_operand = masm->ExternalReferenceAsOperand(debug_hook);
765   __ cmpb(debug_hook_operand, Immediate(0));
766   __ j(not_equal, &prepare_step_in_if_stepping);
767 
768   // Flood function if we need to continue stepping in the suspended generator.
769   ExternalReference debug_suspended_generator =
770       ExternalReference::debug_suspended_generator_address(masm->isolate());
771   Operand debug_suspended_generator_operand =
772       masm->ExternalReferenceAsOperand(debug_suspended_generator);
773   __ cmpq(rdx, debug_suspended_generator_operand);
774   __ j(equal, &prepare_step_in_suspended_generator);
775   __ bind(&stepping_prepared);
776 
777   // Check the stack for overflow. We are not trying to catch interruptions
778   // (i.e. debug break and preemption) here, so check the "real stack limit".
779   Label stack_overflow;
780   __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
781   __ j(below, &stack_overflow);
782 
783   // Pop return address.
784   __ PopReturnAddressTo(rax);
785 
786   // ----------- S t a t e -------------
787   //  -- rax    : return address
788   //  -- rdx    : the JSGeneratorObject to resume
789   //  -- rdi    : generator function
790   //  -- rsi    : generator context
791   // -----------------------------------
792 
793   // Copy the function arguments from the generator object's register file.
794   __ LoadTaggedPointerField(
795       rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
796   __ movzxwq(
797       rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
798   if (kJSArgcIncludesReceiver) {
799     __ decq(rcx);
800   }
801   __ LoadTaggedPointerField(
802       rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
803 
804   {
805     Label done_loop, loop;
806     __ bind(&loop);
807     __ decq(rcx);
808     __ j(less, &done_loop, Label::kNear);
809     __ PushTaggedAnyField(
810         FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
811         decompr_scratch1);
812     __ jmp(&loop);
813     __ bind(&done_loop);
814 
815     // Push the receiver.
816     __ PushTaggedPointerField(
817         FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
818         decompr_scratch1);
819   }
820 
821   // Underlying function needs to have bytecode available.
822   if (FLAG_debug_code) {
823     Label is_baseline, ok;
824     __ LoadTaggedPointerField(
825         rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
826     __ LoadTaggedPointerField(
827         rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
828     GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
829                                             &is_baseline);
830     __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
831     __ Assert(equal, AbortReason::kMissingBytecodeArray);
832     __ jmp(&ok);
833 
834     __ bind(&is_baseline);
835     __ CmpObjectType(rcx, CODET_TYPE, rcx);
836     __ Assert(equal, AbortReason::kMissingBytecodeArray);
837 
838     __ bind(&ok);
839   }
840 
841   // Resume (Ignition/TurboFan) generator object.
842   {
843     __ PushReturnAddressFrom(rax);
844     __ LoadTaggedPointerField(
845         rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
846     __ movzxwq(rax, FieldOperand(
847                         rax, SharedFunctionInfo::kFormalParameterCountOffset));
848     // We abuse new.target both to indicate that this is a resume call and to
849     // pass in the generator object.  In ordinary calls, new.target is always
850     // undefined because generator functions are non-constructable.
851     static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
852     __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
853     __ JumpCodeTObject(rcx);
854   }
855 
856   __ bind(&prepare_step_in_if_stepping);
857   {
858     FrameScope scope(masm, StackFrame::INTERNAL);
859     __ Push(rdx);
860     __ Push(rdi);
861     // Push hole as receiver since we do not use it for stepping.
862     __ PushRoot(RootIndex::kTheHoleValue);
863     __ CallRuntime(Runtime::kDebugOnFunctionCall);
864     __ Pop(rdx);
865     __ LoadTaggedPointerField(
866         rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
867   }
868   __ jmp(&stepping_prepared);
869 
870   __ bind(&prepare_step_in_suspended_generator);
871   {
872     FrameScope scope(masm, StackFrame::INTERNAL);
873     __ Push(rdx);
874     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
875     __ Pop(rdx);
876     __ LoadTaggedPointerField(
877         rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
878   }
879   __ jmp(&stepping_prepared);
880 
881   __ bind(&stack_overflow);
882   {
883     FrameScope scope(masm, StackFrame::INTERNAL);
884     __ CallRuntime(Runtime::kThrowStackOverflow);
885     __ int3();  // This should be unreachable.
886   }
887 }
888 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register slot_address)889 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
890                                                 Register optimized_code,
891                                                 Register closure,
892                                                 Register scratch1,
893                                                 Register slot_address) {
894   ASM_CODE_COMMENT(masm);
895   DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
896   DCHECK_EQ(closure, kJSFunctionRegister);
897   // Store the optimized code in the closure.
898   __ AssertCodeT(optimized_code);
899   __ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
900                       optimized_code);
901   // Write barrier clobbers scratch1 below.
902   Register value = scratch1;
903   __ movq(value, optimized_code);
904 
905   __ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
906                       SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
907                       SmiCheck::kOmit);
908 }
909 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)910 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
911                                   Register scratch2) {
912   ASM_CODE_COMMENT(masm);
913   Register params_size = scratch1;
914   // Get the size of the formal parameters + receiver (in bytes).
915   __ movq(params_size,
916           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
917   __ movl(params_size,
918           FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
919 
920   Register actual_params_size = scratch2;
921   // Compute the size of the actual parameters + receiver (in bytes).
922   __ movq(actual_params_size,
923           Operand(rbp, StandardFrameConstants::kArgCOffset));
924   __ leaq(actual_params_size,
925           Operand(actual_params_size, times_system_pointer_size,
926                   kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
927 
928   // If actual is bigger than formal, then we should use it to free up the stack
929   // arguments.
930   Label corrected_args_count;
931   __ cmpq(params_size, actual_params_size);
932   __ j(greater_equal, &corrected_args_count, Label::kNear);
933   __ movq(params_size, actual_params_size);
934   __ bind(&corrected_args_count);
935 
936   // Leave the frame (also dropping the register file).
937   __ leave();
938 
939   // Drop receiver + arguments.
940   __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
941                    TurboAssembler::kCountIncludesReceiver);
942 }
943 
944 // Tail-call |function_id| if |actual_marker| == |expected_marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register actual_marker,OptimizationMarker expected_marker,Runtime::FunctionId function_id)945 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
946                                           Register actual_marker,
947                                           OptimizationMarker expected_marker,
948                                           Runtime::FunctionId function_id) {
949   ASM_CODE_COMMENT(masm);
950   Label no_match;
951   __ Cmp(actual_marker, expected_marker);
952   __ j(not_equal, &no_match);
953   GenerateTailCallToReturnedCode(masm, function_id);
954   __ bind(&no_match);
955 }
956 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register optimization_marker)957 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
958                               Register optimization_marker) {
959   // ----------- S t a t e -------------
960   //  -- rax : actual argument count
961   //  -- rdx : new target (preserved for callee if needed, and caller)
962   //  -- rdi : target function (preserved for callee if needed, and caller)
963   //  -- feedback vector (preserved for caller if needed)
964   //  -- optimization_marker : a Smi containing a non-zero optimization marker.
965   // -----------------------------------
966   ASM_CODE_COMMENT(masm);
967   DCHECK(!AreAliased(feedback_vector, rdx, rdi, optimization_marker));
968 
969   // TODO(v8:8394): The logging of first execution will break if
970   // feedback vectors are not allocated. We need to find a different way of
971   // logging these events if required.
972   TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
973                                 OptimizationMarker::kLogFirstExecution,
974                                 Runtime::kFunctionFirstExecution);
975   TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
976                                 OptimizationMarker::kCompileOptimized,
977                                 Runtime::kCompileOptimized_NotConcurrent);
978   TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
979                                 OptimizationMarker::kCompileOptimizedConcurrent,
980                                 Runtime::kCompileOptimized_Concurrent);
981 
982   // Marker should be one of LogFirstExecution / CompileOptimized /
983   // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
984   // here.
985   if (FLAG_debug_code) {
986     __ int3();
987   }
988 }
989 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register closure,Register scratch1,Register scratch2,JumpMode jump_mode)990 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
991                                       Register optimized_code_entry,
992                                       Register closure, Register scratch1,
993                                       Register scratch2, JumpMode jump_mode) {
994   // ----------- S t a t e -------------
995   //  rax : actual argument count
996   //  rdx : new target (preserved for callee if needed, and caller)
997   //  rsi : current context, used for the runtime call
998   //  rdi : target function (preserved for callee if needed, and caller)
999   // -----------------------------------
1000   ASM_CODE_COMMENT(masm);
1001   DCHECK_EQ(closure, kJSFunctionRegister);
1002   DCHECK(!AreAliased(rax, rdx, closure, rsi, optimized_code_entry, scratch1,
1003                      scratch2));
1004 
1005   Label heal_optimized_code_slot;
1006 
1007   // If the optimized code is cleared, go to runtime to update the optimization
1008   // marker field.
1009   __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
1010 
1011   // Check if the optimized code is marked for deopt. If it is, call the
1012   // runtime to clear it.
1013   __ AssertCodeT(optimized_code_entry);
1014   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
1015     __ testl(FieldOperand(optimized_code_entry,
1016                           CodeDataContainer::kKindSpecificFlagsOffset),
1017              Immediate(1 << Code::kMarkedForDeoptimizationBit));
1018   } else {
1019     __ LoadTaggedPointerField(
1020         scratch1,
1021         FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
1022     __ testl(
1023         FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
1024         Immediate(1 << Code::kMarkedForDeoptimizationBit));
1025   }
1026   __ j(not_zero, &heal_optimized_code_slot);
1027 
1028   // Optimized code is good, get it into the closure and link the closure into
1029   // the optimized functions list, then tail call the optimized code.
1030   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
1031                                       scratch1, scratch2);
1032   static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
1033   __ Move(rcx, optimized_code_entry);
1034   __ JumpCodeTObject(rcx, jump_mode);
1035 
1036   // Optimized code slot contains deoptimized code or code is cleared and
1037   // optimized code marker isn't updated. Evict the code, update the marker
1038   // and re-enter the closure's code.
1039   __ bind(&heal_optimized_code_slot);
1040   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot,
1041                                  jump_mode);
1042 }
1043 
1044 // Advance the current bytecode offset. This simulates what all bytecode
1045 // handlers do upon completion of the underlying operation. Will bail out to a
1046 // label if the bytecode (without prefix) is a return bytecode. Will not advance
1047 // the bytecode offset if the current bytecode is a JumpLoop, instead just
1048 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)1049 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
1050                                           Register bytecode_array,
1051                                           Register bytecode_offset,
1052                                           Register bytecode, Register scratch1,
1053                                           Register scratch2, Label* if_return) {
1054   ASM_CODE_COMMENT(masm);
1055   Register bytecode_size_table = scratch1;
1056 
1057   // The bytecode offset value will be increased by one in wide and extra wide
1058   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
1059   // will restore the original bytecode. In order to simplify the code, we have
1060   // a backup of it.
1061   Register original_bytecode_offset = scratch2;
1062   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
1063                      bytecode_size_table, original_bytecode_offset));
1064 
1065   __ movq(original_bytecode_offset, bytecode_offset);
1066 
1067   __ Move(bytecode_size_table,
1068           ExternalReference::bytecode_size_table_address());
1069 
1070   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1071   Label process_bytecode, extra_wide;
1072   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
1073   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1074   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1075   STATIC_ASSERT(3 ==
1076                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1077   __ cmpb(bytecode, Immediate(0x3));
1078   __ j(above, &process_bytecode, Label::kNear);
1079   // The code to load the next bytecode is common to both wide and extra wide.
1080   // We can hoist them up here. incl has to happen before testb since it
1081   // modifies the ZF flag.
1082   __ incl(bytecode_offset);
1083   __ testb(bytecode, Immediate(0x1));
1084   __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
1085   __ j(not_equal, &extra_wide, Label::kNear);
1086 
1087   // Update table to the wide scaled table.
1088   __ addq(bytecode_size_table,
1089           Immediate(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1090   __ jmp(&process_bytecode, Label::kNear);
1091 
1092   __ bind(&extra_wide);
1093   // Update table to the extra wide scaled table.
1094   __ addq(bytecode_size_table,
1095           Immediate(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1096 
1097   __ bind(&process_bytecode);
1098 
1099 // Bailout to the return label if this is a return bytecode.
1100 #define JUMP_IF_EQUAL(NAME)                                             \
1101   __ cmpb(bytecode,                                                     \
1102           Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
1103   __ j(equal, if_return, Label::kFar);
1104   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1105 #undef JUMP_IF_EQUAL
1106 
1107   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1108   // of the loop.
1109   Label end, not_jump_loop;
1110   __ cmpb(bytecode,
1111           Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1112   __ j(not_equal, &not_jump_loop, Label::kNear);
1113   // We need to restore the original bytecode_offset since we might have
1114   // increased it to skip the wide / extra-wide prefix bytecode.
1115   __ movq(bytecode_offset, original_bytecode_offset);
1116   __ jmp(&end, Label::kNear);
1117 
1118   __ bind(&not_jump_loop);
1119   // Otherwise, load the size of the current bytecode and advance the offset.
1120   __ movzxbl(kScratchRegister,
1121              Operand(bytecode_size_table, bytecode, times_1, 0));
1122   __ addl(bytecode_offset, kScratchRegister);
1123 
1124   __ bind(&end);
1125 }
1126 
1127 // Read off the optimization state in the feedback vector and check if there
1128 // is optimized code or a optimization marker that needs to be processed.
LoadOptimizationStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_marker)1129 static void LoadOptimizationStateAndJumpIfNeedsProcessing(
1130     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1131     Label* has_optimized_code_or_marker) {
1132   ASM_CODE_COMMENT(masm);
1133   __ movl(optimization_state,
1134           FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1135   __ testl(
1136       optimization_state,
1137       Immediate(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
1138   __ j(not_zero, has_optimized_code_or_marker);
1139 }
1140 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Register closure,JumpMode jump_mode=JumpMode::kJump)1141 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1142     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1143     Register closure, JumpMode jump_mode = JumpMode::kJump) {
1144   ASM_CODE_COMMENT(masm);
1145   DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
1146   Label maybe_has_optimized_code;
1147   __ testl(
1148       optimization_state,
1149       Immediate(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
1150   __ j(zero, &maybe_has_optimized_code);
1151 
1152   Register optimization_marker = optimization_state;
1153   __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1154   MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1155 
1156   __ bind(&maybe_has_optimized_code);
1157   Register optimized_code_entry = optimization_state;
1158   __ LoadAnyTaggedField(
1159       optimized_code_entry,
1160       FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
1161   TailCallOptimizedCodeSlot(masm, optimized_code_entry, closure, r9,
1162                             WriteBarrierDescriptor::SlotAddressRegister(),
1163                             jump_mode);
1164 }
1165 
1166 // Generate code for entering a JS function with the interpreter.
1167 // On entry to the function the receiver and arguments have been pushed on the
1168 // stack left to right.
1169 //
1170 // The live registers are:
1171 //   o rax: actual argument count
1172 //   o rdi: the JS function object being called
1173 //   o rdx: the incoming new target or generator object
1174 //   o rsi: our context
1175 //   o rbp: the caller's frame pointer
1176 //   o rsp: stack pointer (pointing to return address)
1177 //
1178 // The function builds an interpreter frame. See InterpreterFrameConstants in
1179 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1180 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1181   Register closure = rdi;
1182   Register feedback_vector = rbx;
1183 
1184   // Get the bytecode array from the function object and load it into
1185   // kInterpreterBytecodeArrayRegister.
1186   __ LoadTaggedPointerField(
1187       kScratchRegister,
1188       FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1189   __ LoadTaggedPointerField(
1190       kInterpreterBytecodeArrayRegister,
1191       FieldOperand(kScratchRegister, SharedFunctionInfo::kFunctionDataOffset));
1192 
1193   Label is_baseline;
1194   GetSharedFunctionInfoBytecodeOrBaseline(
1195       masm, kInterpreterBytecodeArrayRegister, kScratchRegister, &is_baseline);
1196 
1197   // The bytecode array could have been flushed from the shared function info,
1198   // if so, call into CompileLazy.
1199   Label compile_lazy;
1200   __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
1201                    kScratchRegister);
1202   __ j(not_equal, &compile_lazy);
1203 
1204   // Load the feedback vector from the closure.
1205   __ LoadTaggedPointerField(
1206       feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1207   __ LoadTaggedPointerField(feedback_vector,
1208                             FieldOperand(feedback_vector, Cell::kValueOffset));
1209 
1210   Label push_stack_frame;
1211   // Check if feedback vector is valid. If valid, check for optimized code
1212   // and update invocation count. Otherwise, setup the stack frame.
1213   __ LoadMap(rcx, feedback_vector);
1214   __ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
1215   __ j(not_equal, &push_stack_frame);
1216 
1217   // Check for an optimization marker.
1218   Label has_optimized_code_or_marker;
1219   Register optimization_state = rcx;
1220   LoadOptimizationStateAndJumpIfNeedsProcessing(
1221       masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
1222 
1223   Label not_optimized;
1224   __ bind(&not_optimized);
1225 
1226   // Increment invocation count for the function.
1227   __ incl(
1228       FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
1229 
1230   // Open a frame scope to indicate that there is a frame on the stack.  The
1231   // MANUAL indicates that the scope shouldn't actually generate code to set up
1232   // the frame (that is done below).
1233   __ bind(&push_stack_frame);
1234   FrameScope frame_scope(masm, StackFrame::MANUAL);
1235   __ pushq(rbp);  // Caller's frame pointer.
1236   __ movq(rbp, rsp);
1237   __ Push(kContextRegister);                 // Callee's context.
1238   __ Push(kJavaScriptCallTargetRegister);    // Callee's JS function.
1239   __ Push(kJavaScriptCallArgCountRegister);  // Actual argument count.
1240 
1241   // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
1242   // 8-bit fields next to each other, so we could just optimize by writing a
1243   // 16-bit. These static asserts guard our assumption is valid.
1244   STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1245                 BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1246   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1247   __ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
1248                        BytecodeArray::kOsrLoopNestingLevelOffset),
1249           Immediate(0));
1250 
1251   // Load initial bytecode offset.
1252   __ Move(kInterpreterBytecodeOffsetRegister,
1253           BytecodeArray::kHeaderSize - kHeapObjectTag);
1254 
1255   // Push bytecode array and Smi tagged bytecode offset.
1256   __ Push(kInterpreterBytecodeArrayRegister);
1257   __ SmiTag(rcx, kInterpreterBytecodeOffsetRegister);
1258   __ Push(rcx);
1259 
1260   // Allocate the local and temporary register file on the stack.
1261   Label stack_overflow;
1262   {
1263     // Load frame size from the BytecodeArray object.
1264     __ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister,
1265                               BytecodeArray::kFrameSizeOffset));
1266 
1267     // Do a stack check to ensure we don't go over the limit.
1268     __ movq(rax, rsp);
1269     __ subq(rax, rcx);
1270     __ cmpq(rax, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
1271     __ j(below, &stack_overflow);
1272 
1273     // If ok, push undefined as the initial value for all register file entries.
1274     Label loop_header;
1275     Label loop_check;
1276     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1277     __ j(always, &loop_check, Label::kNear);
1278     __ bind(&loop_header);
1279     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1280     __ Push(kInterpreterAccumulatorRegister);
1281     // Continue loop if not done.
1282     __ bind(&loop_check);
1283     __ subq(rcx, Immediate(kSystemPointerSize));
1284     __ j(greater_equal, &loop_header, Label::kNear);
1285   }
1286 
1287   // If the bytecode array has a valid incoming new target or generator object
1288   // register, initialize it with incoming value which was passed in rdx.
1289   Label no_incoming_new_target_or_generator_register;
1290   __ movsxlq(
1291       rcx,
1292       FieldOperand(kInterpreterBytecodeArrayRegister,
1293                    BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1294   __ testl(rcx, rcx);
1295   __ j(zero, &no_incoming_new_target_or_generator_register, Label::kNear);
1296   __ movq(Operand(rbp, rcx, times_system_pointer_size, 0), rdx);
1297   __ bind(&no_incoming_new_target_or_generator_register);
1298 
1299   // Perform interrupt stack check.
1300   // TODO(solanes): Merge with the real stack limit check above.
1301   Label stack_check_interrupt, after_stack_check_interrupt;
1302   __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
1303   __ j(below, &stack_check_interrupt);
1304   __ bind(&after_stack_check_interrupt);
1305 
1306   // The accumulator is already loaded with undefined.
1307 
1308   // Load the dispatch table into a register and dispatch to the bytecode
1309   // handler at the current bytecode offset.
1310   Label do_dispatch;
1311   __ bind(&do_dispatch);
1312   __ Move(
1313       kInterpreterDispatchTableRegister,
1314       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1315   __ movzxbq(kScratchRegister,
1316              Operand(kInterpreterBytecodeArrayRegister,
1317                      kInterpreterBytecodeOffsetRegister, times_1, 0));
1318   __ movq(kJavaScriptCallCodeStartRegister,
1319           Operand(kInterpreterDispatchTableRegister, kScratchRegister,
1320                   times_system_pointer_size, 0));
1321   __ call(kJavaScriptCallCodeStartRegister);
1322   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1323 
1324   // Any returns to the entry trampoline are either due to the return bytecode
1325   // or the interpreter tail calling a builtin and then a dispatch.
1326 
1327   // Get bytecode array and bytecode offset from the stack frame.
1328   __ movq(kInterpreterBytecodeArrayRegister,
1329           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1330   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1331               Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1332 
1333   // Either return, or advance to the next bytecode and dispatch.
1334   Label do_return;
1335   __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
1336                           kInterpreterBytecodeOffsetRegister, times_1, 0));
1337   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1338                                 kInterpreterBytecodeOffsetRegister, rbx, rcx,
1339                                 r8, &do_return);
1340   __ jmp(&do_dispatch);
1341 
1342   __ bind(&do_return);
1343   // The return value is in rax.
1344   LeaveInterpreterFrame(masm, rbx, rcx);
1345   __ ret(0);
1346 
1347   __ bind(&stack_check_interrupt);
1348   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1349   // for the call to the StackGuard.
1350   __ Move(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
1351           Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1352                        kFunctionEntryBytecodeOffset));
1353   __ CallRuntime(Runtime::kStackGuard);
1354 
1355   // After the call, restore the bytecode array, bytecode offset and accumulator
1356   // registers again. Also, restore the bytecode offset in the stack to its
1357   // previous value.
1358   __ movq(kInterpreterBytecodeArrayRegister,
1359           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1360   __ Move(kInterpreterBytecodeOffsetRegister,
1361           BytecodeArray::kHeaderSize - kHeapObjectTag);
1362   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1363 
1364   __ SmiTag(rcx, kInterpreterBytecodeArrayRegister);
1365   __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rcx);
1366 
1367   __ jmp(&after_stack_check_interrupt);
1368 
1369   __ bind(&compile_lazy);
1370   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1371   __ int3();  // Should not return.
1372 
1373   __ bind(&has_optimized_code_or_marker);
1374   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1375                                                feedback_vector, closure);
1376 
1377   __ bind(&is_baseline);
1378   {
1379     // Load the feedback vector from the closure.
1380     __ LoadTaggedPointerField(
1381         feedback_vector,
1382         FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1383     __ LoadTaggedPointerField(
1384         feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
1385 
1386     Label install_baseline_code;
1387     // Check if feedback vector is valid. If not, call prepare for baseline to
1388     // allocate it.
1389     __ LoadMap(rcx, feedback_vector);
1390     __ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
1391     __ j(not_equal, &install_baseline_code);
1392 
1393     // Check for an optimization marker.
1394     LoadOptimizationStateAndJumpIfNeedsProcessing(
1395         masm, optimization_state, feedback_vector,
1396         &has_optimized_code_or_marker);
1397 
1398     // Load the baseline code into the closure.
1399     __ Move(rcx, kInterpreterBytecodeArrayRegister);
1400     static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
1401     ReplaceClosureCodeWithOptimizedCode(
1402         masm, rcx, closure, kInterpreterBytecodeArrayRegister,
1403         WriteBarrierDescriptor::SlotAddressRegister());
1404     __ JumpCodeTObject(rcx);
1405 
1406     __ bind(&install_baseline_code);
1407     GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1408   }
1409 
1410   __ bind(&stack_overflow);
1411   __ CallRuntime(Runtime::kThrowStackOverflow);
1412   __ int3();  // Should not return.
1413 }
1414 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1415 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1416                                         Register start_address,
1417                                         Register scratch) {
1418   ASM_CODE_COMMENT(masm);
1419   // Find the argument with lowest address.
1420   __ movq(scratch, num_args);
1421   __ negq(scratch);
1422   __ leaq(start_address,
1423           Operand(start_address, scratch, times_system_pointer_size,
1424                   kSystemPointerSize));
1425   // Push the arguments.
1426   __ PushArray(start_address, num_args, scratch,
1427                TurboAssembler::PushArrayOrder::kReverse);
1428 }
1429 
1430 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1431 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1432     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1433     InterpreterPushArgsMode mode) {
1434   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1435   // ----------- S t a t e -------------
1436   //  -- rax : the number of arguments
1437   //  -- rbx : the address of the first argument to be pushed. Subsequent
1438   //           arguments should be consecutive above this, in the same order as
1439   //           they are to be pushed onto the stack.
1440   //  -- rdi : the target to call (can be any Object).
1441   // -----------------------------------
1442   Label stack_overflow;
1443 
1444   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1445     // The spread argument should not be pushed.
1446     __ decl(rax);
1447   }
1448 
1449   int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
1450   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1451     argc_modification -= 1;
1452   }
1453   if (argc_modification != 0) {
1454     __ leal(rcx, Operand(rax, argc_modification));
1455   } else {
1456     __ movl(rcx, rax);
1457   }
1458 
1459   // Add a stack check before pushing arguments.
1460   __ StackOverflowCheck(rcx, &stack_overflow);
1461 
1462   // Pop return address to allow tail-call after pushing arguments.
1463   __ PopReturnAddressTo(kScratchRegister);
1464 
1465   // rbx and rdx will be modified.
1466   GenerateInterpreterPushArgs(masm, rcx, rbx, rdx);
1467 
1468   // Push "undefined" as the receiver arg if we need to.
1469   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1470     __ PushRoot(RootIndex::kUndefinedValue);
1471   }
1472 
1473   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1474     // Pass the spread in the register rbx.
1475     // rbx already points to the penultime argument, the spread
1476     // is below that.
1477     __ movq(rbx, Operand(rbx, -kSystemPointerSize));
1478   }
1479 
1480   // Call the target.
1481   __ PushReturnAddressFrom(kScratchRegister);  // Re-push return address.
1482 
1483   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1484     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1485             RelocInfo::CODE_TARGET);
1486   } else {
1487     __ Jump(masm->isolate()->builtins()->Call(receiver_mode),
1488             RelocInfo::CODE_TARGET);
1489   }
1490 
1491   // Throw stack overflow exception.
1492   __ bind(&stack_overflow);
1493   {
1494     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1495     // This should be unreachable.
1496     __ int3();
1497   }
1498 }
1499 
1500 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1501 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1502     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1503   // ----------- S t a t e -------------
1504   //  -- rax : the number of arguments
1505   //  -- rdx : the new target (either the same as the constructor or
1506   //           the JSFunction on which new was invoked initially)
1507   //  -- rdi : the constructor to call (can be any Object)
1508   //  -- rbx : the allocation site feedback if available, undefined otherwise
1509   //  -- rcx : the address of the first argument to be pushed. Subsequent
1510   //           arguments should be consecutive above this, in the same order as
1511   //           they are to be pushed onto the stack.
1512   // -----------------------------------
1513   Label stack_overflow;
1514 
1515   // Add a stack check before pushing arguments.
1516   __ StackOverflowCheck(rax, &stack_overflow);
1517 
1518   // Pop return address to allow tail-call after pushing arguments.
1519   __ PopReturnAddressTo(kScratchRegister);
1520 
1521   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1522     // The spread argument should not be pushed.
1523     __ decl(rax);
1524   }
1525 
1526   // rcx and r8 will be modified.
1527   Register argc_without_receiver = rax;
1528   if (kJSArgcIncludesReceiver) {
1529     argc_without_receiver = r11;
1530     __ leaq(argc_without_receiver, Operand(rax, -kJSArgcReceiverSlots));
1531   }
1532   GenerateInterpreterPushArgs(masm, argc_without_receiver, rcx, r8);
1533 
1534   // Push slot for the receiver to be constructed.
1535   __ Push(Immediate(0));
1536 
1537   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1538     // Pass the spread in the register rbx.
1539     __ movq(rbx, Operand(rcx, -kSystemPointerSize));
1540     // Push return address in preparation for the tail-call.
1541     __ PushReturnAddressFrom(kScratchRegister);
1542   } else {
1543     __ PushReturnAddressFrom(kScratchRegister);
1544     __ AssertUndefinedOrAllocationSite(rbx);
1545   }
1546 
1547   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1548     // Tail call to the array construct stub (still in the caller
1549     // context at this point).
1550     __ AssertFunction(rdi);
1551     // Jump to the constructor function (rax, rbx, rdx passed on).
1552     Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1553     __ Jump(code, RelocInfo::CODE_TARGET);
1554   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1555     // Call the constructor (rax, rdx, rdi passed on).
1556     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1557             RelocInfo::CODE_TARGET);
1558   } else {
1559     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1560     // Call the constructor (rax, rdx, rdi passed on).
1561     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1562   }
1563 
1564   // Throw stack overflow exception.
1565   __ bind(&stack_overflow);
1566   {
1567     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1568     // This should be unreachable.
1569     __ int3();
1570   }
1571 }
1572 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1573 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1574   // Set the return address to the correct point in the interpreter entry
1575   // trampoline.
1576   Label builtin_trampoline, trampoline_loaded;
1577   Smi interpreter_entry_return_pc_offset(
1578       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1579   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1580 
1581   // If the SFI function_data is an InterpreterData, the function will have a
1582   // custom copy of the interpreter entry trampoline for profiling. If so,
1583   // get the custom trampoline, otherwise grab the entry address of the global
1584   // trampoline.
1585   __ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
1586   __ LoadTaggedPointerField(
1587       rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
1588   __ LoadTaggedPointerField(
1589       rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
1590   __ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
1591   __ j(not_equal, &builtin_trampoline, Label::kNear);
1592 
1593   __ LoadTaggedPointerField(
1594       rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
1595   __ LoadCodeTEntry(rbx, rbx);
1596   __ jmp(&trampoline_loaded, Label::kNear);
1597 
1598   __ bind(&builtin_trampoline);
1599   // TODO(jgruber): Replace this by a lookup in the builtin entry table.
1600   __ movq(rbx,
1601           __ ExternalReferenceAsOperand(
1602               ExternalReference::
1603                   address_of_interpreter_entry_trampoline_instruction_start(
1604                       masm->isolate()),
1605               kScratchRegister));
1606 
1607   __ bind(&trampoline_loaded);
1608   __ addq(rbx, Immediate(interpreter_entry_return_pc_offset.value()));
1609   __ Push(rbx);
1610 
1611   // Initialize dispatch table register.
1612   __ Move(
1613       kInterpreterDispatchTableRegister,
1614       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1615 
1616   // Get the bytecode array pointer from the frame.
1617   __ movq(kInterpreterBytecodeArrayRegister,
1618           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1619 
1620   if (FLAG_debug_code) {
1621     // Check function data field is actually a BytecodeArray object.
1622     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
1623     __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
1624                      rbx);
1625     __ Assert(
1626         equal,
1627         AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1628   }
1629 
1630   // Get the target bytecode offset from the frame.
1631   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1632               Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1633 
1634   if (FLAG_debug_code) {
1635     Label okay;
1636     __ cmpq(kInterpreterBytecodeOffsetRegister,
1637             Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
1638     __ j(greater_equal, &okay, Label::kNear);
1639     __ int3();
1640     __ bind(&okay);
1641   }
1642 
1643   // Dispatch to the target bytecode.
1644   __ movzxbq(kScratchRegister,
1645              Operand(kInterpreterBytecodeArrayRegister,
1646                      kInterpreterBytecodeOffsetRegister, times_1, 0));
1647   __ movq(kJavaScriptCallCodeStartRegister,
1648           Operand(kInterpreterDispatchTableRegister, kScratchRegister,
1649                   times_system_pointer_size, 0));
1650   __ jmp(kJavaScriptCallCodeStartRegister);
1651 }
1652 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1653 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1654   // Get bytecode array and bytecode offset from the stack frame.
1655   __ movq(kInterpreterBytecodeArrayRegister,
1656           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1657   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1658               Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1659 
1660   Label enter_bytecode, function_entry_bytecode;
1661   __ cmpq(kInterpreterBytecodeOffsetRegister,
1662           Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
1663                     kFunctionEntryBytecodeOffset));
1664   __ j(equal, &function_entry_bytecode);
1665 
1666   // Load the current bytecode.
1667   __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
1668                           kInterpreterBytecodeOffsetRegister, times_1, 0));
1669 
1670   // Advance to the next bytecode.
1671   Label if_return;
1672   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1673                                 kInterpreterBytecodeOffsetRegister, rbx, rcx,
1674                                 r8, &if_return);
1675 
1676   __ bind(&enter_bytecode);
1677   // Convert new bytecode offset to a Smi and save in the stackframe.
1678   __ SmiTag(kInterpreterBytecodeOffsetRegister);
1679   __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
1680           kInterpreterBytecodeOffsetRegister);
1681 
1682   Generate_InterpreterEnterBytecode(masm);
1683 
1684   __ bind(&function_entry_bytecode);
1685   // If the code deoptimizes during the implicit function entry stack interrupt
1686   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1687   // not a valid bytecode offset. Detect this case and advance to the first
1688   // actual bytecode.
1689   __ Move(kInterpreterBytecodeOffsetRegister,
1690           BytecodeArray::kHeaderSize - kHeapObjectTag);
1691   __ jmp(&enter_bytecode);
1692 
1693   // We should never take the if_return path.
1694   __ bind(&if_return);
1695   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1696 }
1697 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1698 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1699   Generate_InterpreterEnterBytecode(masm);
1700 }
1701 
1702 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1703 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1704   Register feedback_vector = r8;
1705   Register optimization_state = rcx;
1706   Register return_address = r15;
1707 
1708 #ifdef DEBUG
1709   for (auto reg : BaselineOutOfLinePrologueDescriptor::registers()) {
1710     DCHECK(
1711         !AreAliased(feedback_vector, optimization_state, return_address, reg));
1712   }
1713 #endif
1714 
1715   auto descriptor =
1716       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1717   Register closure = descriptor.GetRegisterParameter(
1718       BaselineOutOfLinePrologueDescriptor::kClosure);
1719   // Load the feedback vector from the closure.
1720   __ LoadTaggedPointerField(
1721       feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1722   __ LoadTaggedPointerField(feedback_vector,
1723                             FieldOperand(feedback_vector, Cell::kValueOffset));
1724   if (FLAG_debug_code) {
1725     __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
1726     __ Assert(equal, AbortReason::kExpectedFeedbackVector);
1727   }
1728 
1729   // Check for an optimization marker.
1730   Label has_optimized_code_or_marker;
1731   LoadOptimizationStateAndJumpIfNeedsProcessing(
1732       masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
1733 
1734   // Increment invocation count for the function.
1735   __ incl(
1736       FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
1737 
1738     // Save the return address, so that we can push it to the end of the newly
1739     // set-up frame once we're done setting it up.
1740     __ PopReturnAddressTo(return_address);
1741     FrameScope frame_scope(masm, StackFrame::MANUAL);
1742     {
1743       ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1744       __ EnterFrame(StackFrame::BASELINE);
1745 
1746       __ Push(descriptor.GetRegisterParameter(
1747           BaselineOutOfLinePrologueDescriptor::kCalleeContext));  // Callee's
1748                                                                   // context.
1749       Register callee_js_function = descriptor.GetRegisterParameter(
1750           BaselineOutOfLinePrologueDescriptor::kClosure);
1751       DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1752       DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1753       __ Push(callee_js_function);  // Callee's JS function.
1754       __ Push(descriptor.GetRegisterParameter(
1755           BaselineOutOfLinePrologueDescriptor::
1756               kJavaScriptCallArgCount));  // Actual argument
1757                                           // count.
1758 
1759       // We'll use the bytecode for both code age/OSR resetting, and pushing
1760       // onto the frame, so load it into a register.
1761       Register bytecode_array = descriptor.GetRegisterParameter(
1762           BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1763 
1764       // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
1765       // are 8-bit fields next to each other, so we could just optimize by
1766       // writing a 16-bit. These static asserts guard our assumption is valid.
1767       STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1768                     BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
1769       STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1770       __ movw(FieldOperand(bytecode_array,
1771                            BytecodeArray::kOsrLoopNestingLevelOffset),
1772               Immediate(0));
1773       __ Push(bytecode_array);
1774 
1775       // Baseline code frames store the feedback vector where interpreter would
1776       // store the bytecode offset.
1777       __ Push(feedback_vector);
1778     }
1779 
1780   Register new_target = descriptor.GetRegisterParameter(
1781       BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
1782 
1783   Label call_stack_guard;
1784   Register frame_size = descriptor.GetRegisterParameter(
1785       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1786   {
1787     ASM_CODE_COMMENT_STRING(masm, " Stack/interrupt check");
1788     // Stack check. This folds the checks for both the interrupt stack limit
1789     // check and the real stack limit into one by just checking for the
1790     // interrupt limit. The interrupt limit is either equal to the real stack
1791     // limit or tighter. By ensuring we have space until that limit after
1792     // building the frame we can quickly precheck both at once.
1793     //
1794     // TODO(v8:11429): Backport this folded check to the
1795     // InterpreterEntryTrampoline.
1796     __ Move(kScratchRegister, rsp);
1797     DCHECK_NE(frame_size, new_target);
1798     __ subq(kScratchRegister, frame_size);
1799     __ cmpq(kScratchRegister,
1800             __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
1801     __ j(below, &call_stack_guard);
1802   }
1803 
1804   // Push the return address back onto the stack for return.
1805   __ PushReturnAddressFrom(return_address);
1806   // Return to caller pushed pc, without any frame teardown.
1807   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1808   __ Ret();
1809 
1810   __ bind(&has_optimized_code_or_marker);
1811   {
1812     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1813     // Drop the return address, rebalancing the return stack buffer by using
1814     // JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on
1815     // return since we may do a runtime call along the way that requires the
1816     // stack to only contain valid frames.
1817     __ Drop(1);
1818     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1819                                                  feedback_vector, closure,
1820                                                  JumpMode::kPushAndReturn);
1821     __ Trap();
1822   }
1823 
1824   __ bind(&call_stack_guard);
1825   {
1826     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1827     {
1828       // Push the baseline code return address now, as if it had been pushed by
1829       // the call to this builtin.
1830       __ PushReturnAddressFrom(return_address);
1831       FrameScope inner_frame_scope(masm, StackFrame::INTERNAL);
1832       // Save incoming new target or generator
1833       __ Push(new_target);
1834       __ SmiTag(frame_size);
1835       __ Push(frame_size);
1836       __ CallRuntime(Runtime::kStackGuardWithGap, 1);
1837       __ Pop(new_target);
1838     }
1839 
1840     // Return to caller pushed pc, without any frame teardown.
1841     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1842     __ Ret();
1843   }
1844 }
1845 
1846 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1847 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1848                                       bool java_script_builtin,
1849                                       bool with_result) {
1850   ASM_CODE_COMMENT(masm);
1851   const RegisterConfiguration* config(RegisterConfiguration::Default());
1852   int allocatable_register_count = config->num_allocatable_general_registers();
1853   if (with_result) {
1854     if (java_script_builtin) {
1855       // kScratchRegister is not included in the allocateable registers.
1856       __ movq(kScratchRegister, rax);
1857     } else {
1858       // Overwrite the hole inserted by the deoptimizer with the return value
1859       // from the LAZY deopt point.
1860       __ movq(
1861           Operand(rsp, config->num_allocatable_general_registers() *
1862                                kSystemPointerSize +
1863                            BuiltinContinuationFrameConstants::kFixedFrameSize),
1864           rax);
1865     }
1866   }
1867   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1868     int code = config->GetAllocatableGeneralCode(i);
1869     __ popq(Register::from_code(code));
1870     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1871       __ SmiUntag(Register::from_code(code));
1872     }
1873   }
1874   if (with_result && java_script_builtin) {
1875     // Overwrite the hole inserted by the deoptimizer with the return value from
1876     // the LAZY deopt point. rax contains the arguments count, the return value
1877     // from LAZY is always the last argument.
1878     __ movq(Operand(rsp, rax, times_system_pointer_size,
1879                     BuiltinContinuationFrameConstants::kFixedFrameSize -
1880                         (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
1881             kScratchRegister);
1882   }
1883   __ movq(
1884       rbp,
1885       Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1886   const int offsetToPC =
1887       BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
1888       kSystemPointerSize;
1889   __ popq(Operand(rsp, offsetToPC));
1890   __ Drop(offsetToPC / kSystemPointerSize);
1891 
1892   // Replace the builtin index Smi on the stack with the instruction start
1893   // address of the builtin from the builtins table, and then Ret to this
1894   // address
1895   __ movq(kScratchRegister, Operand(rsp, 0));
1896   __ movq(kScratchRegister,
1897           __ EntryFromBuiltinIndexAsOperand(kScratchRegister));
1898   __ movq(Operand(rsp, 0), kScratchRegister);
1899 
1900   __ Ret();
1901 }
1902 }  // namespace
1903 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1904 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1905   Generate_ContinueToBuiltinHelper(masm, false, false);
1906 }
1907 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1908 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1909     MacroAssembler* masm) {
1910   Generate_ContinueToBuiltinHelper(masm, false, true);
1911 }
1912 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1913 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1914   Generate_ContinueToBuiltinHelper(masm, true, false);
1915 }
1916 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1917 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1918     MacroAssembler* masm) {
1919   Generate_ContinueToBuiltinHelper(masm, true, true);
1920 }
1921 
Generate_NotifyDeoptimized(MacroAssembler * masm)1922 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1923   // Enter an internal frame.
1924   {
1925     FrameScope scope(masm, StackFrame::INTERNAL);
1926     __ CallRuntime(Runtime::kNotifyDeoptimized);
1927     // Tear down internal frame.
1928   }
1929 
1930   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
1931   __ movq(rax, Operand(rsp, kPCOnStackSize));
1932   __ ret(1 * kSystemPointerSize);  // Remove rax.
1933 }
1934 
1935 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1936 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1937   // ----------- S t a t e -------------
1938   //  -- rax     : argc
1939   //  -- rsp[0]  : return address
1940   //  -- rsp[1]  : receiver
1941   //  -- rsp[2]  : thisArg
1942   //  -- rsp[3]  : argArray
1943   // -----------------------------------
1944 
1945   // 1. Load receiver into rdi, argArray into rbx (if present), remove all
1946   // arguments from the stack (including the receiver), and push thisArg (if
1947   // present) instead.
1948   {
1949     Label no_arg_array, no_this_arg;
1950     StackArgumentsAccessor args(rax);
1951     __ LoadRoot(rdx, RootIndex::kUndefinedValue);
1952     __ movq(rbx, rdx);
1953     __ movq(rdi, args[0]);
1954     __ cmpq(rax, Immediate(JSParameterCount(0)));
1955     __ j(equal, &no_this_arg, Label::kNear);
1956     {
1957       __ movq(rdx, args[1]);
1958       __ cmpq(rax, Immediate(JSParameterCount(1)));
1959       __ j(equal, &no_arg_array, Label::kNear);
1960       __ movq(rbx, args[2]);
1961       __ bind(&no_arg_array);
1962     }
1963     __ bind(&no_this_arg);
1964     __ DropArgumentsAndPushNewReceiver(
1965         rax, rdx, rcx, TurboAssembler::kCountIsInteger,
1966         kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
1967                                 : TurboAssembler::kCountExcludesReceiver);
1968   }
1969 
1970   // ----------- S t a t e -------------
1971   //  -- rbx     : argArray
1972   //  -- rdi     : receiver
1973   //  -- rsp[0]  : return address
1974   //  -- rsp[8]  : thisArg
1975   // -----------------------------------
1976 
1977   // 2. We don't need to check explicitly for callable receiver here,
1978   // since that's the first thing the Call/CallWithArrayLike builtins
1979   // will do.
1980 
1981   // 3. Tail call with no arguments if argArray is null or undefined.
1982   Label no_arguments;
1983   __ JumpIfRoot(rbx, RootIndex::kNullValue, &no_arguments, Label::kNear);
1984   __ JumpIfRoot(rbx, RootIndex::kUndefinedValue, &no_arguments, Label::kNear);
1985 
1986   // 4a. Apply the receiver to the given argArray.
1987   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1988           RelocInfo::CODE_TARGET);
1989 
1990   // 4b. The argArray is either null or undefined, so we tail call without any
1991   // arguments to the receiver. Since we did not create a frame for
1992   // Function.prototype.apply() yet, we use a normal Call builtin here.
1993   __ bind(&no_arguments);
1994   {
1995     __ Move(rax, JSParameterCount(0));
1996     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1997   }
1998 }
1999 
2000 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)2001 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2002   // Stack Layout:
2003   // rsp[0]           : Return address
2004   // rsp[8]           : Argument 0 (receiver: callable to call)
2005   // rsp[16]          : Argument 1
2006   //  ...
2007   // rsp[8 * n]       : Argument n-1
2008   // rsp[8 * (n + 1)] : Argument n
2009   // rax contains the number of arguments, n.
2010 
2011   // 1. Get the callable to call (passed as receiver) from the stack.
2012   {
2013     StackArgumentsAccessor args(rax);
2014     __ movq(rdi, args.GetReceiverOperand());
2015   }
2016 
2017   // 2. Save the return address and drop the callable.
2018   __ PopReturnAddressTo(rbx);
2019   __ Pop(kScratchRegister);
2020 
2021   // 3. Make sure we have at least one argument.
2022   {
2023     Label done;
2024     if (kJSArgcIncludesReceiver) {
2025       __ cmpq(rax, Immediate(JSParameterCount(0)));
2026       __ j(greater, &done, Label::kNear);
2027     } else {
2028       __ testq(rax, rax);
2029       __ j(not_zero, &done, Label::kNear);
2030     }
2031     __ PushRoot(RootIndex::kUndefinedValue);
2032     __ incq(rax);
2033     __ bind(&done);
2034   }
2035 
2036   // 4. Push back the return address one slot down on the stack (overwriting the
2037   // original callable), making the original first argument the new receiver.
2038   __ PushReturnAddressFrom(rbx);
2039   __ decq(rax);  // One fewer argument (first argument is new receiver).
2040 
2041   // 5. Call the callable.
2042   // Since we did not create a frame for Function.prototype.call() yet,
2043   // we use a normal Call builtin here.
2044   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2045 }
2046 
Generate_ReflectApply(MacroAssembler * masm)2047 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2048   // ----------- S t a t e -------------
2049   //  -- rax     : argc
2050   //  -- rsp[0]  : return address
2051   //  -- rsp[8]  : receiver
2052   //  -- rsp[16] : target         (if argc >= 1)
2053   //  -- rsp[24] : thisArgument   (if argc >= 2)
2054   //  -- rsp[32] : argumentsList  (if argc == 3)
2055   // -----------------------------------
2056 
2057   // 1. Load target into rdi (if present), argumentsList into rbx (if present),
2058   // remove all arguments from the stack (including the receiver), and push
2059   // thisArgument (if present) instead.
2060   {
2061     Label done;
2062     StackArgumentsAccessor args(rax);
2063     __ LoadRoot(rdi, RootIndex::kUndefinedValue);
2064     __ movq(rdx, rdi);
2065     __ movq(rbx, rdi);
2066     __ cmpq(rax, Immediate(JSParameterCount(1)));
2067     __ j(below, &done, Label::kNear);
2068     __ movq(rdi, args[1]);  // target
2069     __ j(equal, &done, Label::kNear);
2070     __ movq(rdx, args[2]);  // thisArgument
2071     __ cmpq(rax, Immediate(JSParameterCount(3)));
2072     __ j(below, &done, Label::kNear);
2073     __ movq(rbx, args[3]);  // argumentsList
2074     __ bind(&done);
2075     __ DropArgumentsAndPushNewReceiver(
2076         rax, rdx, rcx, TurboAssembler::kCountIsInteger,
2077         kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
2078                                 : TurboAssembler::kCountExcludesReceiver);
2079   }
2080 
2081   // ----------- S t a t e -------------
2082   //  -- rbx     : argumentsList
2083   //  -- rdi     : target
2084   //  -- rsp[0]  : return address
2085   //  -- rsp[8]  : thisArgument
2086   // -----------------------------------
2087 
2088   // 2. We don't need to check explicitly for callable target here,
2089   // since that's the first thing the Call/CallWithArrayLike builtins
2090   // will do.
2091 
2092   // 3. Apply the target to the given argumentsList.
2093   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2094           RelocInfo::CODE_TARGET);
2095 }
2096 
Generate_ReflectConstruct(MacroAssembler * masm)2097 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2098   // ----------- S t a t e -------------
2099   //  -- rax     : argc
2100   //  -- rsp[0]  : return address
2101   //  -- rsp[8]  : receiver
2102   //  -- rsp[16] : target
2103   //  -- rsp[24] : argumentsList
2104   //  -- rsp[32] : new.target (optional)
2105   // -----------------------------------
2106 
2107   // 1. Load target into rdi (if present), argumentsList into rbx (if present),
2108   // new.target into rdx (if present, otherwise use target), remove all
2109   // arguments from the stack (including the receiver), and push thisArgument
2110   // (if present) instead.
2111   {
2112     Label done;
2113     StackArgumentsAccessor args(rax);
2114     __ LoadRoot(rdi, RootIndex::kUndefinedValue);
2115     __ movq(rdx, rdi);
2116     __ movq(rbx, rdi);
2117     __ cmpq(rax, Immediate(JSParameterCount(1)));
2118     __ j(below, &done, Label::kNear);
2119     __ movq(rdi, args[1]);                     // target
2120     __ movq(rdx, rdi);                         // new.target defaults to target
2121     __ j(equal, &done, Label::kNear);
2122     __ movq(rbx, args[2]);  // argumentsList
2123     __ cmpq(rax, Immediate(JSParameterCount(3)));
2124     __ j(below, &done, Label::kNear);
2125     __ movq(rdx, args[3]);  // new.target
2126     __ bind(&done);
2127     __ DropArgumentsAndPushNewReceiver(
2128         rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
2129         TurboAssembler::kCountIsInteger,
2130         kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
2131                                 : TurboAssembler::kCountExcludesReceiver);
2132   }
2133 
2134   // ----------- S t a t e -------------
2135   //  -- rbx     : argumentsList
2136   //  -- rdx     : new.target
2137   //  -- rdi     : target
2138   //  -- rsp[0]  : return address
2139   //  -- rsp[8]  : receiver (undefined)
2140   // -----------------------------------
2141 
2142   // 2. We don't need to check explicitly for constructor target here,
2143   // since that's the first thing the Construct/ConstructWithArrayLike
2144   // builtins will do.
2145 
2146   // 3. We don't need to check explicitly for constructor new.target here,
2147   // since that's the second thing the Construct/ConstructWithArrayLike
2148   // builtins will do.
2149 
2150   // 4. Construct the target with the given new.target and argumentsList.
2151   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2152           RelocInfo::CODE_TARGET);
2153 }
2154 
2155 namespace {
2156 
2157 // Allocate new stack space for |count| arguments and shift all existing
2158 // arguments already on the stack. |pointer_to_new_space_out| points to the
2159 // first free slot on the stack to copy additional arguments to and
2160 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2)2161 void Generate_AllocateSpaceAndShiftExistingArguments(
2162     MacroAssembler* masm, Register count, Register argc_in_out,
2163     Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
2164   DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2165                      scratch2, kScratchRegister));
2166   // Use pointer_to_new_space_out as scratch until we set it to the correct
2167   // value at the end.
2168   Register old_rsp = pointer_to_new_space_out;
2169   Register new_space = kScratchRegister;
2170   __ movq(old_rsp, rsp);
2171 
2172   __ leaq(new_space, Operand(count, times_system_pointer_size, 0));
2173   __ AllocateStackSpace(new_space);
2174 
2175   Register copy_count = argc_in_out;
2176   if (!kJSArgcIncludesReceiver) {
2177     // We have a spare register, so use it instead of clobbering argc.
2178     // lea + add (to add the count to argc in the end) uses 1 less byte than
2179     // inc + lea (with base, index and disp), at the cost of 1 extra register.
2180     copy_count = scratch1;
2181     __ leaq(copy_count, Operand(argc_in_out, 1));  // Include the receiver.
2182   }
2183   Register current = scratch2;
2184   Register value = kScratchRegister;
2185 
2186   Label loop, entry;
2187   __ Move(current, 0);
2188   __ jmp(&entry);
2189   __ bind(&loop);
2190   __ movq(value, Operand(old_rsp, current, times_system_pointer_size, 0));
2191   __ movq(Operand(rsp, current, times_system_pointer_size, 0), value);
2192   __ incq(current);
2193   __ bind(&entry);
2194   __ cmpq(current, copy_count);
2195   __ j(less_equal, &loop, Label::kNear);
2196 
2197   // Point to the next free slot above the shifted arguments (copy_count + 1
2198   // slot for the return address).
2199   __ leaq(
2200       pointer_to_new_space_out,
2201       Operand(rsp, copy_count, times_system_pointer_size, kSystemPointerSize));
2202   // We use addl instead of addq here because we can omit REX.W, saving 1 byte.
2203   // We are especially constrained here because we are close to reaching the
2204   // limit for a near jump to the stackoverflow label, so every byte counts.
2205   __ addl(argc_in_out, count);  // Update total number of arguments.
2206 }
2207 
2208 }  // namespace
2209 
2210 // static
2211 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2212 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2213                                                Handle<Code> code) {
2214   // ----------- S t a t e -------------
2215   //  -- rdi    : target
2216   //  -- rax    : number of parameters on the stack
2217   //  -- rbx    : arguments list (a FixedArray)
2218   //  -- rcx    : len (number of elements to push from args)
2219   //  -- rdx    : new.target (for [[Construct]])
2220   //  -- rsp[0] : return address
2221   // -----------------------------------
2222 
2223   if (FLAG_debug_code) {
2224     // Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
2225     Label ok, fail;
2226     __ AssertNotSmi(rbx);
2227     Register map = r9;
2228     __ LoadMap(map, rbx);
2229     __ CmpInstanceType(map, FIXED_ARRAY_TYPE);
2230     __ j(equal, &ok);
2231     __ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
2232     __ j(not_equal, &fail);
2233     __ Cmp(rcx, 0);
2234     __ j(equal, &ok);
2235     // Fall through.
2236     __ bind(&fail);
2237     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2238 
2239     __ bind(&ok);
2240   }
2241 
2242   Label stack_overflow;
2243   __ StackOverflowCheck(rcx, &stack_overflow, Label::kNear);
2244 
2245   // Push additional arguments onto the stack.
2246   // Move the arguments already in the stack,
2247   // including the receiver and the return address.
2248   // rcx: Number of arguments to make room for.
2249   // rax: Number of arguments already on the stack.
2250   // r8: Points to first free slot on the stack after arguments were shifted.
2251   Generate_AllocateSpaceAndShiftExistingArguments(masm, rcx, rax, r8, r9, r12);
2252   // Copy the additional arguments onto the stack.
2253   {
2254     Register value = r12;
2255     Register src = rbx, dest = r8, num = rcx, current = r9;
2256     __ Move(current, 0);
2257     Label done, push, loop;
2258     __ bind(&loop);
2259     __ cmpl(current, num);
2260     __ j(equal, &done, Label::kNear);
2261     // Turn the hole into undefined as we go.
2262     __ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
2263                                               FixedArray::kHeaderSize));
2264     __ CompareRoot(value, RootIndex::kTheHoleValue);
2265     __ j(not_equal, &push, Label::kNear);
2266     __ LoadRoot(value, RootIndex::kUndefinedValue);
2267     __ bind(&push);
2268     __ movq(Operand(dest, current, times_system_pointer_size, 0), value);
2269     __ incl(current);
2270     __ jmp(&loop);
2271     __ bind(&done);
2272   }
2273 
2274   // Tail-call to the actual Call or Construct builtin.
2275   __ Jump(code, RelocInfo::CODE_TARGET);
2276 
2277   __ bind(&stack_overflow);
2278   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2279 }
2280 
2281 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2282 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2283                                                       CallOrConstructMode mode,
2284                                                       Handle<Code> code) {
2285   // ----------- S t a t e -------------
2286   //  -- rax : the number of arguments
2287   //  -- rdx : the new target (for [[Construct]] calls)
2288   //  -- rdi : the target to call (can be any Object)
2289   //  -- rcx : start index (to support rest parameters)
2290   // -----------------------------------
2291 
2292   // Check if new.target has a [[Construct]] internal method.
2293   if (mode == CallOrConstructMode::kConstruct) {
2294     Label new_target_constructor, new_target_not_constructor;
2295     __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
2296     __ LoadMap(rbx, rdx);
2297     __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
2298              Immediate(Map::Bits1::IsConstructorBit::kMask));
2299     __ j(not_zero, &new_target_constructor, Label::kNear);
2300     __ bind(&new_target_not_constructor);
2301     {
2302       FrameScope scope(masm, StackFrame::MANUAL);
2303       __ EnterFrame(StackFrame::INTERNAL);
2304       __ Push(rdx);
2305       __ CallRuntime(Runtime::kThrowNotConstructor);
2306     }
2307     __ bind(&new_target_constructor);
2308   }
2309 
2310   Label stack_done, stack_overflow;
2311   __ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
2312   if (kJSArgcIncludesReceiver) {
2313     __ decq(r8);
2314   }
2315   __ subl(r8, rcx);
2316   __ j(less_equal, &stack_done);
2317   {
2318     // ----------- S t a t e -------------
2319     //  -- rax : the number of arguments already in the stack
2320     //  -- rbp : point to the caller stack frame
2321     //  -- rcx : start index (to support rest parameters)
2322     //  -- rdx : the new target (for [[Construct]] calls)
2323     //  -- rdi : the target to call (can be any Object)
2324     //  -- r8  : number of arguments to copy, i.e. arguments count - start index
2325     // -----------------------------------
2326 
2327     // Check for stack overflow.
2328     __ StackOverflowCheck(r8, &stack_overflow, Label::kNear);
2329 
2330     // Forward the arguments from the caller frame.
2331     // Move the arguments already in the stack,
2332     // including the receiver and the return address.
2333     // r8: Number of arguments to make room for.
2334     // rax: Number of arguments already on the stack.
2335     // r9: Points to first free slot on the stack after arguments were shifted.
2336     Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, rax, r9, r12,
2337                                                     r15);
2338 
2339     // Point to the first argument to copy (skipping receiver).
2340     __ leaq(rcx, Operand(rcx, times_system_pointer_size,
2341                          CommonFrameConstants::kFixedFrameSizeAboveFp +
2342                              kSystemPointerSize));
2343     __ addq(rcx, rbp);
2344 
2345     // Copy the additional caller arguments onto the stack.
2346     // TODO(victorgomes): Consider using forward order as potentially more cache
2347     // friendly.
2348     {
2349       Register src = rcx, dest = r9, num = r8;
2350       Label loop;
2351       __ bind(&loop);
2352       __ decq(num);
2353       __ movq(kScratchRegister,
2354               Operand(src, num, times_system_pointer_size, 0));
2355       __ movq(Operand(dest, num, times_system_pointer_size, 0),
2356               kScratchRegister);
2357       __ j(not_zero, &loop);
2358     }
2359   }
2360   __ jmp(&stack_done, Label::kNear);
2361   __ bind(&stack_overflow);
2362   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2363   __ bind(&stack_done);
2364 
2365   // Tail-call to the {code} handler.
2366   __ Jump(code, RelocInfo::CODE_TARGET);
2367 }
2368 
2369 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2370 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2371                                      ConvertReceiverMode mode) {
2372   // ----------- S t a t e -------------
2373   //  -- rax : the number of arguments
2374   //  -- rdi : the function to call (checked to be a JSFunction)
2375   // -----------------------------------
2376 
2377   StackArgumentsAccessor args(rax);
2378   __ AssertFunction(rdi);
2379 
2380   __ LoadTaggedPointerField(
2381       rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2382   // ----------- S t a t e -------------
2383   //  -- rax : the number of arguments
2384   //  -- rdx : the shared function info.
2385   //  -- rdi : the function to call (checked to be a JSFunction)
2386   // -----------------------------------
2387 
2388   // Enter the context of the function; ToObject has to run in the function
2389   // context, and we also need to take the global proxy from the function
2390   // context in case of conversion.
2391   __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
2392   // We need to convert the receiver for non-native sloppy mode functions.
2393   Label done_convert;
2394   __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
2395            Immediate(SharedFunctionInfo::IsNativeBit::kMask |
2396                      SharedFunctionInfo::IsStrictBit::kMask));
2397   __ j(not_zero, &done_convert);
2398   {
2399     // ----------- S t a t e -------------
2400     //  -- rax : the number of arguments
2401     //  -- rdx : the shared function info.
2402     //  -- rdi : the function to call (checked to be a JSFunction)
2403     //  -- rsi : the function context.
2404     // -----------------------------------
2405 
2406     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2407       // Patch receiver to global proxy.
2408       __ LoadGlobalProxy(rcx);
2409     } else {
2410       Label convert_to_object, convert_receiver;
2411       __ movq(rcx, args.GetReceiverOperand());
2412       __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
2413       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2414       __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
2415       __ j(above_equal, &done_convert);
2416       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2417         Label convert_global_proxy;
2418         __ JumpIfRoot(rcx, RootIndex::kUndefinedValue, &convert_global_proxy,
2419                       Label::kNear);
2420         __ JumpIfNotRoot(rcx, RootIndex::kNullValue, &convert_to_object,
2421                          Label::kNear);
2422         __ bind(&convert_global_proxy);
2423         {
2424           // Patch receiver to global proxy.
2425           __ LoadGlobalProxy(rcx);
2426         }
2427         __ jmp(&convert_receiver);
2428       }
2429       __ bind(&convert_to_object);
2430       {
2431         // Convert receiver using ToObject.
2432         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2433         // in the fast case? (fall back to AllocateInNewSpace?)
2434         FrameScope scope(masm, StackFrame::INTERNAL);
2435         __ SmiTag(rax);
2436         __ Push(rax);
2437         __ Push(rdi);
2438         __ movq(rax, rcx);
2439         __ Push(rsi);
2440         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2441                 RelocInfo::CODE_TARGET);
2442         __ Pop(rsi);
2443         __ movq(rcx, rax);
2444         __ Pop(rdi);
2445         __ Pop(rax);
2446         __ SmiUntag(rax);
2447       }
2448       __ LoadTaggedPointerField(
2449           rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2450       __ bind(&convert_receiver);
2451     }
2452     __ movq(args.GetReceiverOperand(), rcx);
2453   }
2454   __ bind(&done_convert);
2455 
2456   // ----------- S t a t e -------------
2457   //  -- rax : the number of arguments
2458   //  -- rdx : the shared function info.
2459   //  -- rdi : the function to call (checked to be a JSFunction)
2460   //  -- rsi : the function context.
2461   // -----------------------------------
2462 
2463   __ movzxwq(
2464       rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
2465   __ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
2466 }
2467 
2468 namespace {
2469 
Generate_PushBoundArguments(MacroAssembler * masm)2470 void Generate_PushBoundArguments(MacroAssembler* masm) {
2471   // ----------- S t a t e -------------
2472   //  -- rax : the number of arguments
2473   //  -- rdx : new.target (only in case of [[Construct]])
2474   //  -- rdi : target (checked to be a JSBoundFunction)
2475   // -----------------------------------
2476 
2477   // Load [[BoundArguments]] into rcx and length of that into rbx.
2478   Label no_bound_arguments;
2479   __ LoadTaggedPointerField(
2480       rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
2481   __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
2482   __ testl(rbx, rbx);
2483   __ j(zero, &no_bound_arguments);
2484   {
2485     // ----------- S t a t e -------------
2486     //  -- rax : the number of arguments
2487     //  -- rdx : new.target (only in case of [[Construct]])
2488     //  -- rdi : target (checked to be a JSBoundFunction)
2489     //  -- rcx : the [[BoundArguments]] (implemented as FixedArray)
2490     //  -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
2491     // -----------------------------------
2492 
2493     // TODO(victor): Use Generate_StackOverflowCheck here.
2494     // Check the stack for overflow.
2495     {
2496       Label done;
2497       __ shlq(rbx, Immediate(kSystemPointerSizeLog2));
2498       __ movq(kScratchRegister, rsp);
2499       __ subq(kScratchRegister, rbx);
2500 
2501       // We are not trying to catch interruptions (i.e. debug break and
2502       // preemption) here, so check the "real stack limit".
2503       __ cmpq(kScratchRegister,
2504               __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
2505       __ j(above_equal, &done, Label::kNear);
2506       {
2507         FrameScope scope(masm, StackFrame::MANUAL);
2508         __ EnterFrame(StackFrame::INTERNAL);
2509         __ CallRuntime(Runtime::kThrowStackOverflow);
2510       }
2511       __ bind(&done);
2512     }
2513 
2514     // Save Return Address and Receiver into registers.
2515     __ Pop(r8);
2516     __ Pop(r10);
2517 
2518     // Push [[BoundArguments]] to the stack.
2519     {
2520       Label loop;
2521       __ LoadTaggedPointerField(
2522           rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
2523       __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
2524       __ addq(rax, rbx);  // Adjust effective number of arguments.
2525       __ bind(&loop);
2526       // Instead of doing decl(rbx) here subtract kTaggedSize from the header
2527       // offset in order to be able to move decl(rbx) right before the loop
2528       // condition. This is necessary in order to avoid flags corruption by
2529       // pointer decompression code.
2530       __ LoadAnyTaggedField(
2531           r12, FieldOperand(rcx, rbx, times_tagged_size,
2532                             FixedArray::kHeaderSize - kTaggedSize));
2533       __ Push(r12);
2534       __ decl(rbx);
2535       __ j(greater, &loop);
2536     }
2537 
2538     // Recover Receiver and Return Address.
2539     __ Push(r10);
2540     __ Push(r8);
2541   }
2542   __ bind(&no_bound_arguments);
2543 }
2544 
2545 }  // namespace
2546 
2547 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2548 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2549   // ----------- S t a t e -------------
2550   //  -- rax : the number of arguments
2551   //  -- rdi : the function to call (checked to be a JSBoundFunction)
2552   // -----------------------------------
2553   __ AssertBoundFunction(rdi);
2554 
2555   // Patch the receiver to [[BoundThis]].
2556   StackArgumentsAccessor args(rax);
2557   __ LoadAnyTaggedField(rbx,
2558                         FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
2559   __ movq(args.GetReceiverOperand(), rbx);
2560 
2561   // Push the [[BoundArguments]] onto the stack.
2562   Generate_PushBoundArguments(masm);
2563 
2564   // Call the [[BoundTargetFunction]] via the Call builtin.
2565   __ LoadTaggedPointerField(
2566       rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
2567   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2568           RelocInfo::CODE_TARGET);
2569 }
2570 
2571 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2572 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2573   // ----------- S t a t e -------------
2574   //  -- rax : the number of arguments
2575   //  -- rdi : the target to call (can be any Object)
2576   // -----------------------------------
2577   Register argc = rax;
2578   Register target = rdi;
2579   Register map = rcx;
2580   Register instance_type = rdx;
2581   DCHECK(!AreAliased(argc, target, map, instance_type));
2582 
2583   StackArgumentsAccessor args(argc);
2584 
2585   Label non_callable, class_constructor;
2586   __ JumpIfSmi(target, &non_callable);
2587   __ LoadMap(map, target);
2588   __ CmpInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2589                           LAST_CALLABLE_JS_FUNCTION_TYPE);
2590   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2591           RelocInfo::CODE_TARGET, below_equal);
2592 
2593   __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
2594   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2595           RelocInfo::CODE_TARGET, equal);
2596 
2597   // Check if target has a [[Call]] internal method.
2598   __ testb(FieldOperand(map, Map::kBitFieldOffset),
2599            Immediate(Map::Bits1::IsCallableBit::kMask));
2600   __ j(zero, &non_callable, Label::kNear);
2601 
2602   // Check if target is a proxy and call CallProxy external builtin
2603   __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
2604   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET,
2605           equal);
2606 
2607   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2608   // Check that the function is not a "classConstructor".
2609   __ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
2610   __ j(equal, &class_constructor);
2611 
2612   // 2. Call to something else, which might have a [[Call]] internal method (if
2613   // not we raise an exception).
2614 
2615   // Overwrite the original receiver with the (original) target.
2616   __ movq(args.GetReceiverOperand(), target);
2617   // Let the "call_as_function_delegate" take care of the rest.
2618   __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2619   __ Jump(masm->isolate()->builtins()->CallFunction(
2620               ConvertReceiverMode::kNotNullOrUndefined),
2621           RelocInfo::CODE_TARGET);
2622 
2623   // 3. Call to something that is not callable.
2624   __ bind(&non_callable);
2625   {
2626     FrameScope scope(masm, StackFrame::INTERNAL);
2627     __ Push(target);
2628     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2629     __ Trap();  // Unreachable.
2630   }
2631 
2632   // 4. The function is a "classConstructor", need to raise an exception.
2633   __ bind(&class_constructor);
2634   {
2635     FrameScope frame(masm, StackFrame::INTERNAL);
2636     __ Push(target);
2637     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2638     __ Trap();  // Unreachable.
2639   }
2640 }
2641 
2642 // static
Generate_ConstructFunction(MacroAssembler * masm)2643 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2644   // ----------- S t a t e -------------
2645   //  -- rax : the number of arguments
2646   //  -- rdx : the new target (checked to be a constructor)
2647   //  -- rdi : the constructor to call (checked to be a JSFunction)
2648   // -----------------------------------
2649   __ AssertConstructor(rdi);
2650   __ AssertFunction(rdi);
2651 
2652   // Calling convention for function specific ConstructStubs require
2653   // rbx to contain either an AllocationSite or undefined.
2654   __ LoadRoot(rbx, RootIndex::kUndefinedValue);
2655 
2656   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2657   __ LoadTaggedPointerField(
2658       rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2659   __ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
2660            Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2661   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2662           RelocInfo::CODE_TARGET, not_zero);
2663 
2664   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2665           RelocInfo::CODE_TARGET);
2666 }
2667 
2668 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2669 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2670   // ----------- S t a t e -------------
2671   //  -- rax : the number of arguments
2672   //  -- rdx : the new target (checked to be a constructor)
2673   //  -- rdi : the constructor to call (checked to be a JSBoundFunction)
2674   // -----------------------------------
2675   __ AssertConstructor(rdi);
2676   __ AssertBoundFunction(rdi);
2677 
2678   // Push the [[BoundArguments]] onto the stack.
2679   Generate_PushBoundArguments(masm);
2680 
2681   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2682   {
2683     Label done;
2684     __ cmpq(rdi, rdx);
2685     __ j(not_equal, &done, Label::kNear);
2686     __ LoadTaggedPointerField(
2687         rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
2688     __ bind(&done);
2689   }
2690 
2691   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2692   __ LoadTaggedPointerField(
2693       rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
2694   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2695 }
2696 
2697 // static
Generate_Construct(MacroAssembler * masm)2698 void Builtins::Generate_Construct(MacroAssembler* masm) {
2699   // ----------- S t a t e -------------
2700   //  -- rax : the number of arguments
2701   //  -- rdx : the new target (either the same as the constructor or
2702   //           the JSFunction on which new was invoked initially)
2703   //  -- rdi : the constructor to call (can be any Object)
2704   // -----------------------------------
2705   Register argc = rax;
2706   Register target = rdi;
2707   Register map = rcx;
2708   Register instance_type = r8;
2709   DCHECK(!AreAliased(argc, target, map, instance_type));
2710 
2711   StackArgumentsAccessor args(argc);
2712 
2713   // Check if target is a Smi.
2714   Label non_constructor;
2715   __ JumpIfSmi(target, &non_constructor);
2716 
2717   // Check if target has a [[Construct]] internal method.
2718   __ LoadMap(map, target);
2719   __ testb(FieldOperand(map, Map::kBitFieldOffset),
2720            Immediate(Map::Bits1::IsConstructorBit::kMask));
2721   __ j(zero, &non_constructor);
2722 
2723   // Dispatch based on instance type.
2724   __ CmpInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
2725                           LAST_JS_FUNCTION_TYPE);
2726   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2727           RelocInfo::CODE_TARGET, below_equal);
2728 
2729   // Only dispatch to bound functions after checking whether they are
2730   // constructors.
2731   __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
2732   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2733           RelocInfo::CODE_TARGET, equal);
2734 
2735   // Only dispatch to proxies after checking whether they are constructors.
2736   __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
2737   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET,
2738           equal);
2739 
2740   // Called Construct on an exotic Object with a [[Construct]] internal method.
2741   {
2742     // Overwrite the original receiver with the (original) target.
2743     __ movq(args.GetReceiverOperand(), target);
2744     // Let the "call_as_constructor_delegate" take care of the rest.
2745     __ LoadNativeContextSlot(target,
2746                              Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2747     __ Jump(masm->isolate()->builtins()->CallFunction(),
2748             RelocInfo::CODE_TARGET);
2749   }
2750 
2751   // Called Construct on an Object that doesn't have a [[Construct]] internal
2752   // method.
2753   __ bind(&non_constructor);
2754   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2755           RelocInfo::CODE_TARGET);
2756 }
2757 
2758 namespace {
2759 
Generate_OSREntry(MacroAssembler * masm,Register entry_address)2760 void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
2761   // Overwrite the return address on the stack.
2762   __ movq(StackOperandForReturnAddress(0), entry_address);
2763 
2764   // And "return" to the OSR entry point of the function.
2765   __ ret(0);
2766 }
2767 
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)2768 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
2769   {
2770     FrameScope scope(masm, StackFrame::INTERNAL);
2771     __ CallRuntime(Runtime::kCompileForOnStackReplacement);
2772   }
2773 
2774   Label skip;
2775   // If the code object is null, just return to the caller.
2776   __ testq(rax, rax);
2777   __ j(not_equal, &skip, Label::kNear);
2778   __ ret(0);
2779 
2780   __ bind(&skip);
2781 
2782   if (is_interpreter) {
2783     // Drop the handler frame that is be sitting on top of the actual
2784     // JavaScript frame. This is the case then OSR is triggered from bytecode.
2785     __ leave();
2786   }
2787 
2788   // Load deoptimization data from the code object.
2789   __ LoadTaggedPointerField(
2790       rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
2791 
2792   // Load the OSR entrypoint offset from the deoptimization data.
2793   __ SmiUntagField(
2794       rbx, FieldOperand(rbx, FixedArray::OffsetOfElementAt(
2795                                  DeoptimizationData::kOsrPcOffsetIndex)));
2796 
2797   // Compute the target address = code_obj + header_size + osr_offset
2798   __ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
2799 
2800   Generate_OSREntry(masm, rax);
2801 }
2802 
2803 }  // namespace
2804 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)2805 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2806   return OnStackReplacement(masm, true);
2807 }
2808 
Generate_BaselineOnStackReplacement(MacroAssembler * masm)2809 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2810   __ movq(kContextRegister,
2811           MemOperand(rbp, BaselineFrameConstants::kContextOffset));
2812   return OnStackReplacement(masm, false);
2813 }
2814 
2815 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2816 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2817   // The function index was pushed to the stack by the caller as int32.
2818   __ Pop(r15);
2819   // Convert to Smi for the runtime call.
2820   __ SmiTag(r15);
2821   {
2822     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2823     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2824 
2825     // Save all parameter registers (see wasm-linkage.h). They might be
2826     // overwritten in the runtime call below. We don't have any callee-saved
2827     // registers in wasm, so no need to store anything else.
2828     static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
2829                       arraysize(wasm::kGpParamRegisters),
2830                   "frame size mismatch");
2831     for (Register reg : wasm::kGpParamRegisters) {
2832       __ Push(reg);
2833     }
2834     static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
2835                       arraysize(wasm::kFpParamRegisters),
2836                   "frame size mismatch");
2837     __ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
2838     int offset = 0;
2839     for (DoubleRegister reg : wasm::kFpParamRegisters) {
2840       __ movdqu(Operand(rsp, offset), reg);
2841       offset += kSimd128Size;
2842     }
2843 
2844     // Push the Wasm instance as an explicit argument to WasmCompileLazy.
2845     __ Push(kWasmInstanceRegister);
2846     // Push the function index as second argument.
2847     __ Push(r15);
2848     // Initialize the JavaScript context with 0. CEntry will use it to
2849     // set the current context on the isolate.
2850     __ Move(kContextRegister, Smi::zero());
2851     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2852     // The entrypoint address is the return value.
2853     __ movq(r15, kReturnRegister0);
2854 
2855     // Restore registers.
2856     for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
2857       offset -= kSimd128Size;
2858       __ movdqu(reg, Operand(rsp, offset));
2859     }
2860     DCHECK_EQ(0, offset);
2861     __ addq(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
2862     for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
2863       __ Pop(reg);
2864     }
2865   }
2866   // Finally, jump to the entrypoint.
2867   __ jmp(r15);
2868 }
2869 
Generate_WasmDebugBreak(MacroAssembler * masm)2870 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2871   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2872   {
2873     FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2874 
2875     // Save all parameter registers. They might hold live values, we restore
2876     // them after the runtime call.
2877     for (int reg_code : base::bits::IterateBitsBackwards(
2878              WasmDebugBreakFrameConstants::kPushedGpRegs)) {
2879       __ Push(Register::from_code(reg_code));
2880     }
2881 
2882     constexpr int kFpStackSize =
2883         kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
2884     __ AllocateStackSpace(kFpStackSize);
2885     int offset = kFpStackSize;
2886     for (int reg_code : base::bits::IterateBitsBackwards(
2887              WasmDebugBreakFrameConstants::kPushedFpRegs)) {
2888       offset -= kSimd128Size;
2889       __ movdqu(Operand(rsp, offset), DoubleRegister::from_code(reg_code));
2890     }
2891 
2892     // Initialize the JavaScript context with 0. CEntry will use it to
2893     // set the current context on the isolate.
2894     __ Move(kContextRegister, Smi::zero());
2895     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2896 
2897     // Restore registers.
2898     for (int reg_code :
2899          base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
2900       __ movdqu(DoubleRegister::from_code(reg_code), Operand(rsp, offset));
2901       offset += kSimd128Size;
2902     }
2903     __ addq(rsp, Immediate(kFpStackSize));
2904     for (int reg_code :
2905          base::bits::IterateBits(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
2906       __ Pop(Register::from_code(reg_code));
2907     }
2908   }
2909 
2910   __ ret(0);
2911 }
2912 
2913 namespace {
2914 // Helper functions for the GenericJSToWasmWrapper.
PrepareForBuiltinCall(MacroAssembler * masm,MemOperand GCScanSlotPlace,const int GCScanSlotCount,Register current_param,Register param_limit,Register current_int_param_slot,Register current_float_param_slot,Register valuetypes_array_ptr,Register wasm_instance,Register function_data)2915 void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
2916                            const int GCScanSlotCount, Register current_param,
2917                            Register param_limit,
2918                            Register current_int_param_slot,
2919                            Register current_float_param_slot,
2920                            Register valuetypes_array_ptr,
2921                            Register wasm_instance, Register function_data) {
2922   // Pushes and puts the values in order onto the stack before builtin calls for
2923   // the GenericJSToWasmWrapper.
2924   __ Move(GCScanSlotPlace, GCScanSlotCount);
2925   __ pushq(current_param);
2926   __ pushq(param_limit);
2927   __ pushq(current_int_param_slot);
2928   __ pushq(current_float_param_slot);
2929   __ pushq(valuetypes_array_ptr);
2930   __ pushq(wasm_instance);
2931   __ pushq(function_data);
2932   // We had to prepare the parameters for the Call: we have to put the context
2933   // into rsi.
2934   __ LoadAnyTaggedField(
2935       rsi,
2936       MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
2937                                     WasmInstanceObject::kNativeContextOffset)));
2938 }
2939 
RestoreAfterBuiltinCall(MacroAssembler * masm,Register function_data,Register wasm_instance,Register valuetypes_array_ptr,Register current_float_param_slot,Register current_int_param_slot,Register param_limit,Register current_param)2940 void RestoreAfterBuiltinCall(MacroAssembler* masm, Register function_data,
2941                              Register wasm_instance,
2942                              Register valuetypes_array_ptr,
2943                              Register current_float_param_slot,
2944                              Register current_int_param_slot,
2945                              Register param_limit, Register current_param) {
2946   // Pop and load values from the stack in order into the registers after
2947   // builtin calls for the GenericJSToWasmWrapper.
2948   __ popq(function_data);
2949   __ popq(wasm_instance);
2950   __ popq(valuetypes_array_ptr);
2951   __ popq(current_float_param_slot);
2952   __ popq(current_int_param_slot);
2953   __ popq(param_limit);
2954   __ popq(current_param);
2955 }
2956 }  // namespace
2957 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2958 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2959   // Set up the stackframe.
2960   __ EnterFrame(StackFrame::JS_TO_WASM);
2961 
2962   // -------------------------------------------
2963   // Compute offsets and prepare for GC.
2964   // -------------------------------------------
2965   // We will have to save a value indicating the GC the number
2966   // of values on the top of the stack that have to be scanned before calling
2967   // the Wasm function.
2968   constexpr int kFrameMarkerOffset = -kSystemPointerSize;
2969   constexpr int kGCScanSlotCountOffset =
2970       kFrameMarkerOffset - kSystemPointerSize;
2971   // The number of parameters passed to this function.
2972   constexpr int kInParamCountOffset =
2973       kGCScanSlotCountOffset - kSystemPointerSize;
2974   // The number of parameters according to the signature.
2975   constexpr int kParamCountOffset = kInParamCountOffset - kSystemPointerSize;
2976   constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
2977   constexpr int kValueTypesArrayStartOffset =
2978       kReturnCountOffset - kSystemPointerSize;
2979   // We set and use this slot only when moving parameters into the parameter
2980   // registers (so no GC scan is needed).
2981   constexpr int kFunctionDataOffset =
2982       kValueTypesArrayStartOffset - kSystemPointerSize;
2983   constexpr int kLastSpillOffset = kFunctionDataOffset;
2984   constexpr int kNumSpillSlots = 6;
2985   __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
2986   // Put the in_parameter count on the stack, we only  need it at the very end
2987   // when we pop the parameters off the stack.
2988   Register in_param_count = rax;
2989   if (kJSArgcIncludesReceiver) {
2990     __ decq(in_param_count);
2991   }
2992   __ movq(MemOperand(rbp, kInParamCountOffset), in_param_count);
2993   in_param_count = no_reg;
2994 
2995   // -------------------------------------------
2996   // Load the Wasm exported function data and the Wasm instance.
2997   // -------------------------------------------
2998   Register closure = rdi;
2999   Register shared_function_info = closure;
3000   __ LoadAnyTaggedField(
3001       shared_function_info,
3002       MemOperand(
3003           closure,
3004           wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
3005   closure = no_reg;
3006   Register function_data = shared_function_info;
3007   __ LoadAnyTaggedField(
3008       function_data,
3009       MemOperand(shared_function_info,
3010                  SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
3011   shared_function_info = no_reg;
3012 
3013   Register wasm_instance = rsi;
3014   __ LoadAnyTaggedField(
3015       wasm_instance,
3016       MemOperand(function_data,
3017                  WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
3018 
3019   // -------------------------------------------
3020   // Decrement the budget of the generic wrapper in function data.
3021   // -------------------------------------------
3022   __ SmiAddConstant(
3023       MemOperand(function_data, WasmExportedFunctionData::kWrapperBudgetOffset -
3024                                     kHeapObjectTag),
3025       Smi::FromInt(-1));
3026 
3027   // -------------------------------------------
3028   // Check if the budget of the generic wrapper reached 0 (zero).
3029   // -------------------------------------------
3030   // Instead of a specific comparison, we can directly use the flags set
3031   // from the previous addition.
3032   Label compile_wrapper, compile_wrapper_done;
3033   __ j(less_equal, &compile_wrapper);
3034   __ bind(&compile_wrapper_done);
3035 
3036   // -------------------------------------------
3037   // Load values from the signature.
3038   // -------------------------------------------
3039   Register foreign_signature = r11;
3040   __ LoadAnyTaggedField(
3041       foreign_signature,
3042       MemOperand(function_data,
3043                  WasmExportedFunctionData::kSignatureOffset - kHeapObjectTag));
3044   Register signature = foreign_signature;
3045   __ LoadExternalPointerField(
3046       signature,
3047       FieldOperand(foreign_signature, Foreign::kForeignAddressOffset),
3048       kForeignForeignAddressTag, kScratchRegister);
3049   foreign_signature = no_reg;
3050   Register return_count = r8;
3051   __ movq(return_count,
3052           MemOperand(signature, wasm::FunctionSig::kReturnCountOffset));
3053   Register param_count = rcx;
3054   __ movq(param_count,
3055           MemOperand(signature, wasm::FunctionSig::kParameterCountOffset));
3056   Register valuetypes_array_ptr = signature;
3057   __ movq(valuetypes_array_ptr,
3058           MemOperand(signature, wasm::FunctionSig::kRepsOffset));
3059   signature = no_reg;
3060 
3061   // -------------------------------------------
3062   // Store signature-related values to the stack.
3063   // -------------------------------------------
3064   // We store values on the stack to restore them after function calls.
3065   // We cannot push values onto the stack right before the wasm call. The wasm
3066   // function expects the parameters, that didn't fit into the registers, on the
3067   // top of the stack.
3068   __ movq(MemOperand(rbp, kParamCountOffset), param_count);
3069   __ movq(MemOperand(rbp, kReturnCountOffset), return_count);
3070   __ movq(MemOperand(rbp, kValueTypesArrayStartOffset), valuetypes_array_ptr);
3071 
3072   // -------------------------------------------
3073   // Parameter handling.
3074   // -------------------------------------------
3075   Label prepare_for_wasm_call;
3076   __ Cmp(param_count, 0);
3077 
3078   // IF we have 0 params: jump through parameter handling.
3079   __ j(equal, &prepare_for_wasm_call);
3080 
3081   // -------------------------------------------
3082   // Create 2 sections for integer and float params.
3083   // -------------------------------------------
3084   // We will create 2 sections on the stack for the evaluated parameters:
3085   // Integer and Float section, both with parameter count size. We will place
3086   // the parameters into these sections depending on their valuetype. This way
3087   // we can easily fill the general purpose and floating point parameter
3088   // registers and place the remaining parameters onto the stack in proper order
3089   // for the Wasm function. These remaining params are the final stack
3090   // parameters for the call to WebAssembly. Example of the stack layout after
3091   // processing 2 int and 1 float parameters when param_count is 4.
3092   //   +-----------------+
3093   //   |      rbp        |
3094   //   |-----------------|-------------------------------
3095   //   |                 |   Slots we defined
3096   //   |   Saved values  |    when setting up
3097   //   |                 |     the stack
3098   //   |                 |
3099   //   +-Integer section-+--- <--- start_int_section ----
3100   //   |  1st int param  |
3101   //   |- - - - - - - - -|
3102   //   |  2nd int param  |
3103   //   |- - - - - - - - -|  <----- current_int_param_slot
3104   //   |                 |       (points to the stackslot
3105   //   |- - - - - - - - -|  where the next int param should be placed)
3106   //   |                 |
3107   //   +--Float section--+--- <--- start_float_section --
3108   //   | 1st float param |
3109   //   |- - - - - - - - -|  <----  current_float_param_slot
3110   //   |                 |       (points to the stackslot
3111   //   |- - - - - - - - -|  where the next float param should be placed)
3112   //   |                 |
3113   //   |- - - - - - - - -|
3114   //   |                 |
3115   //   +---Final stack---+------------------------------
3116   //   +-parameters for--+------------------------------
3117   //   +-the Wasm call---+------------------------------
3118   //   |      . . .      |
3119 
3120   constexpr int kIntegerSectionStartOffset =
3121       kLastSpillOffset - kSystemPointerSize;
3122   // For Integer section.
3123   // Set the current_int_param_slot to point to the start of the section.
3124   Register current_int_param_slot = r10;
3125   __ leaq(current_int_param_slot, MemOperand(rsp, -kSystemPointerSize));
3126   Register params_size = param_count;
3127   param_count = no_reg;
3128   __ shlq(params_size, Immediate(kSystemPointerSizeLog2));
3129   __ subq(rsp, params_size);
3130 
3131   // For Float section.
3132   // Set the current_float_param_slot to point to the start of the section.
3133   Register current_float_param_slot = r15;
3134   __ leaq(current_float_param_slot, MemOperand(rsp, -kSystemPointerSize));
3135   __ subq(rsp, params_size);
3136   params_size = no_reg;
3137   param_count = rcx;
3138   __ movq(param_count, MemOperand(rbp, kParamCountOffset));
3139 
3140   // -------------------------------------------
3141   // Set up for the param evaluation loop.
3142   // -------------------------------------------
3143   // We will loop through the params starting with the 1st param.
3144   // The order of processing the params is important. We have to evaluate them
3145   // in an increasing order.
3146   //       +-----------------+---------------
3147   //       |     param n     |
3148   //       |- - - - - - - - -|
3149   //       |    param n-1    |   Caller
3150   //       |       ...       | frame slots
3151   //       |     param 1     |
3152   //       |- - - - - - - - -|
3153   //       |    receiver     |
3154   //       +-----------------+---------------
3155   //       |  return addr    |
3156   //   FP->|- - - - - - - - -|
3157   //       |      rbp        |   Spill slots
3158   //       |- - - - - - - - -|
3159   //
3160   // [rbp + current_param] gives us the parameter we are processing.
3161   // We iterate through half-open interval <1st param, [rbp + param_limit]).
3162 
3163   Register current_param = rbx;
3164   Register param_limit = rdx;
3165   constexpr int kReceiverOnStackSize = kSystemPointerSize;
3166   __ Move(current_param,
3167           kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize);
3168   __ movq(param_limit, param_count);
3169   __ shlq(param_limit, Immediate(kSystemPointerSizeLog2));
3170   __ addq(param_limit,
3171           Immediate(kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize));
3172   const int increment = kSystemPointerSize;
3173   Register param = rax;
3174   // We have to check the types of the params. The ValueType array contains
3175   // first the return then the param types.
3176   constexpr int kValueTypeSize = sizeof(wasm::ValueType);
3177   STATIC_ASSERT(kValueTypeSize == 4);
3178   const int32_t kValueTypeSizeLog2 = log2(kValueTypeSize);
3179   // Set the ValueType array pointer to point to the first parameter.
3180   Register returns_size = return_count;
3181   return_count = no_reg;
3182   __ shlq(returns_size, Immediate(kValueTypeSizeLog2));
3183   __ addq(valuetypes_array_ptr, returns_size);
3184   returns_size = no_reg;
3185   Register valuetype = r12;
3186 
3187   // -------------------------------------------
3188   // Param evaluation loop.
3189   // -------------------------------------------
3190   Label loop_through_params;
3191   __ bind(&loop_through_params);
3192 
3193   __ movq(param, MemOperand(rbp, current_param, times_1, 0));
3194   __ movl(valuetype,
3195           Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
3196 
3197   // -------------------------------------------
3198   // Param conversion.
3199   // -------------------------------------------
3200   // If param is a Smi we can easily convert it. Otherwise we'll call a builtin
3201   // for conversion.
3202   Label convert_param;
3203   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3204   __ j(not_equal, &convert_param);
3205   __ JumpIfNotSmi(param, &convert_param);
3206   // Change the paramfrom Smi to int32.
3207   __ SmiUntag(param);
3208   // Zero extend.
3209   __ movl(param, param);
3210   // Place the param into the proper slot in Integer section.
3211   __ movq(MemOperand(current_int_param_slot, 0), param);
3212   __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
3213 
3214   // -------------------------------------------
3215   // Param conversion done.
3216   // -------------------------------------------
3217   Label param_conversion_done;
3218   __ bind(&param_conversion_done);
3219 
3220   __ addq(current_param, Immediate(increment));
3221   __ addq(valuetypes_array_ptr, Immediate(kValueTypeSize));
3222 
3223   __ cmpq(current_param, param_limit);
3224   __ j(not_equal, &loop_through_params);
3225 
3226   // -------------------------------------------
3227   // Move the parameters into the proper param registers.
3228   // -------------------------------------------
3229   // The Wasm function expects that the params can be popped from the top of the
3230   // stack in an increasing order.
3231   // We can always move the values on the beginning of the sections into the GP
3232   // or FP parameter registers. If the parameter count is less than the number
3233   // of parameter registers, we may move values into the registers that are not
3234   // in the section.
3235   // ----------- S t a t e -------------
3236   //  -- r8  : start_int_section
3237   //  -- rdi : start_float_section
3238   //  -- r10 : current_int_param_slot
3239   //  -- r15 : current_float_param_slot
3240   //  -- r11 : valuetypes_array_ptr
3241   //  -- r12 : valuetype
3242   //  -- rsi : wasm_instance
3243   //  -- GpParamRegisters = rax, rdx, rcx, rbx, r9
3244   // -----------------------------------
3245 
3246   Register temp_params_size = rax;
3247   __ movq(temp_params_size, MemOperand(rbp, kParamCountOffset));
3248   __ shlq(temp_params_size, Immediate(kSystemPointerSizeLog2));
3249   // We want to use the register of the function_data = rdi.
3250   __ movq(MemOperand(rbp, kFunctionDataOffset), function_data);
3251   Register start_float_section = function_data;
3252   function_data = no_reg;
3253   __ movq(start_float_section, rbp);
3254   __ addq(start_float_section, Immediate(kIntegerSectionStartOffset));
3255   __ subq(start_float_section, temp_params_size);
3256   temp_params_size = no_reg;
3257   // Fill the FP param registers.
3258   __ Movsd(xmm1, MemOperand(start_float_section, 0));
3259   __ Movsd(xmm2, MemOperand(start_float_section, -kSystemPointerSize));
3260   __ Movsd(xmm3, MemOperand(start_float_section, -2 * kSystemPointerSize));
3261   __ Movsd(xmm4, MemOperand(start_float_section, -3 * kSystemPointerSize));
3262   __ Movsd(xmm5, MemOperand(start_float_section, -4 * kSystemPointerSize));
3263   __ Movsd(xmm6, MemOperand(start_float_section, -5 * kSystemPointerSize));
3264   // We want the start to point to the last properly placed param.
3265   __ subq(start_float_section, Immediate(5 * kSystemPointerSize));
3266 
3267   Register start_int_section = r8;
3268   __ movq(start_int_section, rbp);
3269   __ addq(start_int_section, Immediate(kIntegerSectionStartOffset));
3270   // Fill the GP param registers.
3271   __ movq(rax, MemOperand(start_int_section, 0));
3272   __ movq(rdx, MemOperand(start_int_section, -kSystemPointerSize));
3273   __ movq(rcx, MemOperand(start_int_section, -2 * kSystemPointerSize));
3274   __ movq(rbx, MemOperand(start_int_section, -3 * kSystemPointerSize));
3275   __ movq(r9, MemOperand(start_int_section, -4 * kSystemPointerSize));
3276   // We want the start to point to the last properly placed param.
3277   __ subq(start_int_section, Immediate(4 * kSystemPointerSize));
3278 
3279   // -------------------------------------------
3280   // Place the final stack parameters to the proper place.
3281   // -------------------------------------------
3282   // We want the current_param_slot (insertion) pointers to point at the last
3283   // param of the section instead of the next free slot.
3284   __ addq(current_int_param_slot, Immediate(kSystemPointerSize));
3285   __ addq(current_float_param_slot, Immediate(kSystemPointerSize));
3286 
3287   // -------------------------------------------
3288   // Final stack parameters loop.
3289   // -------------------------------------------
3290   // The parameters that didn't fit into the registers should be placed on the
3291   // top of the stack contiguously. The interval of parameters between the
3292   // start_section and the current_param_slot pointers define the remaining
3293   // parameters of the section.
3294   // We can iterate through the valuetypes array to decide from which section we
3295   // need to push the parameter onto the top of the stack. By iterating in a
3296   // reversed order we can easily pick the last parameter of the proper section.
3297   // The parameter of the section is pushed on the top of the stack only if the
3298   // interval of remaining params is not empty. This way we ensure that only
3299   // params that didn't fit into param registers are pushed again.
3300 
3301   Label loop_through_valuetypes;
3302   __ bind(&loop_through_valuetypes);
3303 
3304   // We iterated through the valuetypes array, we are one field over the end in
3305   // the beginning. Also, we have to decrement it in each iteration.
3306   __ subq(valuetypes_array_ptr, Immediate(kValueTypeSize));
3307 
3308   // Check if there are still remaining integer params.
3309   Label continue_loop;
3310   __ cmpq(start_int_section, current_int_param_slot);
3311   // If there are remaining integer params.
3312   __ j(greater, &continue_loop);
3313 
3314   // Check if there are still remaining float params.
3315   __ cmpq(start_float_section, current_float_param_slot);
3316   // If there aren't any params remaining.
3317   Label params_done;
3318   __ j(less_equal, &params_done);
3319 
3320   __ bind(&continue_loop);
3321   __ movl(valuetype,
3322           Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
3323   Label place_integer_param;
3324   Label place_float_param;
3325   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3326   __ j(equal, &place_integer_param);
3327 
3328   __ cmpq(valuetype, Immediate(wasm::kWasmI64.raw_bit_field()));
3329   __ j(equal, &place_integer_param);
3330 
3331   __ cmpq(valuetype, Immediate(wasm::kWasmF32.raw_bit_field()));
3332   __ j(equal, &place_float_param);
3333 
3334   __ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
3335   __ j(equal, &place_float_param);
3336 
3337   __ int3();
3338 
3339   __ bind(&place_integer_param);
3340   __ cmpq(start_int_section, current_int_param_slot);
3341   // If there aren't any integer params remaining, just floats, then go to the
3342   // next valuetype.
3343   __ j(less_equal, &loop_through_valuetypes);
3344 
3345   // Copy the param from the integer section to the actual parameter area.
3346   __ pushq(MemOperand(current_int_param_slot, 0));
3347   __ addq(current_int_param_slot, Immediate(kSystemPointerSize));
3348   __ jmp(&loop_through_valuetypes);
3349 
3350   __ bind(&place_float_param);
3351   __ cmpq(start_float_section, current_float_param_slot);
3352   // If there aren't any float params remaining, just integers, then go to the
3353   // next valuetype.
3354   __ j(less_equal, &loop_through_valuetypes);
3355 
3356   // Copy the param from the float section to the actual parameter area.
3357   __ pushq(MemOperand(current_float_param_slot, 0));
3358   __ addq(current_float_param_slot, Immediate(kSystemPointerSize));
3359   __ jmp(&loop_through_valuetypes);
3360 
3361   __ bind(&params_done);
3362   // Restore function_data after we are done with parameter placement.
3363   function_data = rdi;
3364   __ movq(function_data, MemOperand(rbp, kFunctionDataOffset));
3365 
3366   __ bind(&prepare_for_wasm_call);
3367   // -------------------------------------------
3368   // Prepare for the Wasm call.
3369   // -------------------------------------------
3370   // Set thread_in_wasm_flag.
3371   Register thread_in_wasm_flag_addr = r12;
3372   __ movq(
3373       thread_in_wasm_flag_addr,
3374       MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
3375   __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1));
3376   thread_in_wasm_flag_addr = no_reg;
3377 
3378   Register function_entry = function_data;
3379   Register scratch = r12;
3380   __ LoadExternalPointerField(
3381       function_entry,
3382       FieldOperand(function_data,
3383                    WasmExportedFunctionData::kForeignAddressOffset),
3384       kForeignForeignAddressTag, scratch);
3385   function_data = no_reg;
3386   scratch = no_reg;
3387 
3388   // We set the indicating value for the GC to the proper one for Wasm call.
3389   constexpr int kWasmCallGCScanSlotCount = 0;
3390   __ Move(MemOperand(rbp, kGCScanSlotCountOffset), kWasmCallGCScanSlotCount);
3391 
3392   // -------------------------------------------
3393   // Call the Wasm function.
3394   // -------------------------------------------
3395   __ call(function_entry);
3396   function_entry = no_reg;
3397 
3398   // -------------------------------------------
3399   // Resetting after the Wasm call.
3400   // -------------------------------------------
3401   // Restore rsp to free the reserved stack slots for the sections.
3402   __ leaq(rsp, MemOperand(rbp, kLastSpillOffset));
3403 
3404   // Unset thread_in_wasm_flag.
3405   thread_in_wasm_flag_addr = r8;
3406   __ movq(
3407       thread_in_wasm_flag_addr,
3408       MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
3409   __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0));
3410   thread_in_wasm_flag_addr = no_reg;
3411 
3412   // -------------------------------------------
3413   // Return handling.
3414   // -------------------------------------------
3415   return_count = r8;
3416   __ movq(return_count, MemOperand(rbp, kReturnCountOffset));
3417   Register return_reg = rax;
3418 
3419   // If we have 1 return value, then jump to conversion.
3420   __ cmpl(return_count, Immediate(1));
3421   Label convert_return;
3422   __ j(equal, &convert_return);
3423 
3424   // Otherwise load undefined.
3425   __ LoadRoot(return_reg, RootIndex::kUndefinedValue);
3426 
3427   Label return_done;
3428   __ bind(&return_done);
3429   __ movq(param_count, MemOperand(rbp, kParamCountOffset));
3430 
3431   // Calculate the number of parameters we have to pop off the stack. This
3432   // number is max(in_param_count, param_count).
3433   in_param_count = rdx;
3434   __ movq(in_param_count, MemOperand(rbp, kInParamCountOffset));
3435   __ cmpq(param_count, in_param_count);
3436   __ cmovq(less, param_count, in_param_count);
3437 
3438   // -------------------------------------------
3439   // Deconstrunct the stack frame.
3440   // -------------------------------------------
3441   __ LeaveFrame(StackFrame::JS_TO_WASM);
3442 
3443   // We have to remove the caller frame slots:
3444   //  - JS arguments
3445   //  - the receiver
3446   // and transfer the control to the return address (the return address is
3447   // expected to be on the top of the stack).
3448   // We cannot use just the ret instruction for this, because we cannot pass the
3449   // number of slots to remove in a Register as an argument.
3450   __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger,
3451                    TurboAssembler::kCountExcludesReceiver);
3452   __ ret(0);
3453 
3454   // --------------------------------------------------------------------------
3455   //                          Deferred code.
3456   // --------------------------------------------------------------------------
3457 
3458   // -------------------------------------------
3459   // Param conversion builtins.
3460   // -------------------------------------------
3461   __ bind(&convert_param);
3462   // Restore function_data register (which was clobbered by the code above,
3463   // but was valid when jumping here earlier).
3464   function_data = rdi;
3465   // The order of pushes is important. We want the heap objects, that should be
3466   // scanned by GC, to be on the top of the stack.
3467   // We have to set the indicating value for the GC to the number of values on
3468   // the top of the stack that have to be scanned before calling the builtin
3469   // function.
3470   // The builtin expects the parameter to be in register param = rax.
3471 
3472   constexpr int kBuiltinCallGCScanSlotCount = 2;
3473   PrepareForBuiltinCall(masm, MemOperand(rbp, kGCScanSlotCountOffset),
3474                         kBuiltinCallGCScanSlotCount, current_param, param_limit,
3475                         current_int_param_slot, current_float_param_slot,
3476                         valuetypes_array_ptr, wasm_instance, function_data);
3477 
3478   Label param_kWasmI32_not_smi;
3479   Label param_kWasmI64;
3480   Label param_kWasmF32;
3481   Label param_kWasmF64;
3482 
3483   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3484   __ j(equal, &param_kWasmI32_not_smi);
3485 
3486   __ cmpq(valuetype, Immediate(wasm::kWasmI64.raw_bit_field()));
3487   __ j(equal, &param_kWasmI64);
3488 
3489   __ cmpq(valuetype, Immediate(wasm::kWasmF32.raw_bit_field()));
3490   __ j(equal, &param_kWasmF32);
3491 
3492   __ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
3493   __ j(equal, &param_kWasmF64);
3494 
3495   __ int3();
3496 
3497   __ bind(&param_kWasmI32_not_smi);
3498   __ Call(BUILTIN_CODE(masm->isolate(), WasmTaggedNonSmiToInt32),
3499           RelocInfo::CODE_TARGET);
3500   // Param is the result of the builtin.
3501   __ AssertZeroExtended(param);
3502   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3503                           valuetypes_array_ptr, current_float_param_slot,
3504                           current_int_param_slot, param_limit, current_param);
3505   __ movq(MemOperand(current_int_param_slot, 0), param);
3506   __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
3507   __ jmp(&param_conversion_done);
3508 
3509   __ bind(&param_kWasmI64);
3510   __ Call(BUILTIN_CODE(masm->isolate(), BigIntToI64), RelocInfo::CODE_TARGET);
3511   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3512                           valuetypes_array_ptr, current_float_param_slot,
3513                           current_int_param_slot, param_limit, current_param);
3514   __ movq(MemOperand(current_int_param_slot, 0), param);
3515   __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
3516   __ jmp(&param_conversion_done);
3517 
3518   __ bind(&param_kWasmF32);
3519   __ Call(BUILTIN_CODE(masm->isolate(), WasmTaggedToFloat64),
3520           RelocInfo::CODE_TARGET);
3521   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3522                           valuetypes_array_ptr, current_float_param_slot,
3523                           current_int_param_slot, param_limit, current_param);
3524   // Clear higher bits.
3525   __ Xorpd(xmm1, xmm1);
3526   // Truncate float64 to float32.
3527   __ Cvtsd2ss(xmm1, xmm0);
3528   __ Movsd(MemOperand(current_float_param_slot, 0), xmm1);
3529   __ subq(current_float_param_slot, Immediate(kSystemPointerSize));
3530   __ jmp(&param_conversion_done);
3531 
3532   __ bind(&param_kWasmF64);
3533   __ Call(BUILTIN_CODE(masm->isolate(), WasmTaggedToFloat64),
3534           RelocInfo::CODE_TARGET);
3535   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3536                           valuetypes_array_ptr, current_float_param_slot,
3537                           current_int_param_slot, param_limit, current_param);
3538   __ Movsd(MemOperand(current_float_param_slot, 0), xmm0);
3539   __ subq(current_float_param_slot, Immediate(kSystemPointerSize));
3540   __ jmp(&param_conversion_done);
3541 
3542   // -------------------------------------------
3543   // Return conversions.
3544   // -------------------------------------------
3545   __ bind(&convert_return);
3546   // We have to make sure that the kGCScanSlotCount is set correctly when we
3547   // call the builtins for conversion. For these builtins it's the same as for
3548   // the Wasm call, that is, kGCScanSlotCount = 0, so we don't have to reset it.
3549   // We don't need the JS context for these builtin calls.
3550 
3551   __ movq(valuetypes_array_ptr, MemOperand(rbp, kValueTypesArrayStartOffset));
3552   // The first valuetype of the array is the return's valuetype.
3553   __ movl(valuetype,
3554           Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
3555 
3556   Label return_kWasmI32;
3557   Label return_kWasmI64;
3558   Label return_kWasmF32;
3559   Label return_kWasmF64;
3560 
3561   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3562   __ j(equal, &return_kWasmI32);
3563 
3564   __ cmpq(valuetype, Immediate(wasm::kWasmI64.raw_bit_field()));
3565   __ j(equal, &return_kWasmI64);
3566 
3567   __ cmpq(valuetype, Immediate(wasm::kWasmF32.raw_bit_field()));
3568   __ j(equal, &return_kWasmF32);
3569 
3570   __ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
3571   __ j(equal, &return_kWasmF64);
3572 
3573   __ int3();
3574 
3575   __ bind(&return_kWasmI32);
3576   Label to_heapnumber;
3577   // If pointer compression is disabled, we can convert the return to a smi.
3578   if (SmiValuesAre32Bits()) {
3579     __ SmiTag(return_reg);
3580   } else {
3581     Register temp = rbx;
3582     __ movq(temp, return_reg);
3583     // Double the return value to test if it can be a Smi.
3584     __ addl(temp, return_reg);
3585     temp = no_reg;
3586     // If there was overflow, convert the return value to a HeapNumber.
3587     __ j(overflow, &to_heapnumber);
3588     // If there was no overflow, we can convert to Smi.
3589     __ SmiTag(return_reg);
3590   }
3591   __ jmp(&return_done);
3592 
3593   // Handle the conversion of the I32 return value to HeapNumber when it cannot
3594   // be a smi.
3595   __ bind(&to_heapnumber);
3596   __ Call(BUILTIN_CODE(masm->isolate(), WasmInt32ToHeapNumber),
3597           RelocInfo::CODE_TARGET);
3598   __ jmp(&return_done);
3599 
3600   __ bind(&return_kWasmI64);
3601   __ Call(BUILTIN_CODE(masm->isolate(), I64ToBigInt), RelocInfo::CODE_TARGET);
3602   __ jmp(&return_done);
3603 
3604   __ bind(&return_kWasmF32);
3605   // The builtin expects the value to be in xmm0.
3606   __ Movss(xmm0, xmm1);
3607   __ Call(BUILTIN_CODE(masm->isolate(), WasmFloat32ToNumber),
3608           RelocInfo::CODE_TARGET);
3609   __ jmp(&return_done);
3610 
3611   __ bind(&return_kWasmF64);
3612   // The builtin expects the value to be in xmm0.
3613   __ Movsd(xmm0, xmm1);
3614   __ Call(BUILTIN_CODE(masm->isolate(), WasmFloat64ToNumber),
3615           RelocInfo::CODE_TARGET);
3616   __ jmp(&return_done);
3617 
3618   // -------------------------------------------
3619   // Kick off compilation.
3620   // -------------------------------------------
3621   __ bind(&compile_wrapper);
3622   // Enable GC.
3623   MemOperand GCScanSlotPlace = MemOperand(rbp, kGCScanSlotCountOffset);
3624   __ Move(GCScanSlotPlace, 4);
3625   // Save registers to the stack.
3626   __ pushq(wasm_instance);
3627   __ pushq(function_data);
3628   // Push the arguments for the runtime call.
3629   __ Push(wasm_instance);  // first argument
3630   __ Push(function_data);  // second argument
3631   // Set up context.
3632   __ Move(kContextRegister, Smi::zero());
3633   // Call the runtime function that kicks off compilation.
3634   __ CallRuntime(Runtime::kWasmCompileWrapper, 2);
3635   // Pop the result.
3636   __ movq(r9, kReturnRegister0);
3637   // Restore registers from the stack.
3638   __ popq(function_data);
3639   __ popq(wasm_instance);
3640   __ jmp(&compile_wrapper_done);
3641 }
3642 
Generate_WasmOnStackReplace(MacroAssembler * masm)3643 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
3644   MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset);
3645   __ movq(kScratchRegister, OSRTargetSlot);
3646   __ Move(OSRTargetSlot, 0);
3647   __ jmp(kScratchRegister);
3648 }
3649 
3650 #endif  // V8_ENABLE_WEBASSEMBLY
3651 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)3652 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
3653                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
3654                                bool builtin_exit_frame) {
3655   // rax: number of arguments including receiver
3656   // rbx: pointer to C function  (C callee-saved)
3657   // rbp: frame pointer of calling JS frame (restored after C call)
3658   // rsp: stack pointer  (restored after C call)
3659   // rsi: current context (restored)
3660   //
3661   // If argv_mode == ArgvMode::kRegister:
3662   // r15: pointer to the first argument
3663 
3664 #ifdef V8_TARGET_OS_WIN
3665   // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
3666   // stack to be aligned to 16 bytes. It only allows a single-word to be
3667   // returned in register rax. Larger return sizes must be written to an address
3668   // passed as a hidden first argument.
3669   const Register kCCallArg0 = rcx;
3670   const Register kCCallArg1 = rdx;
3671   const Register kCCallArg2 = r8;
3672   const Register kCCallArg3 = r9;
3673   const int kArgExtraStackSpace = 2;
3674   const int kMaxRegisterResultSize = 1;
3675 #else
3676   // GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
3677   // are returned in rax, and a struct of two pointers are returned in rax+rdx.
3678   // Larger return sizes must be written to an address passed as a hidden first
3679   // argument.
3680   const Register kCCallArg0 = rdi;
3681   const Register kCCallArg1 = rsi;
3682   const Register kCCallArg2 = rdx;
3683   const Register kCCallArg3 = rcx;
3684   const int kArgExtraStackSpace = 0;
3685   const int kMaxRegisterResultSize = 2;
3686 #endif  // V8_TARGET_OS_WIN
3687 
3688   // Enter the exit frame that transitions from JavaScript to C++.
3689   int arg_stack_space =
3690       kArgExtraStackSpace +
3691       (result_size <= kMaxRegisterResultSize ? 0 : result_size);
3692   if (argv_mode == ArgvMode::kRegister) {
3693     DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
3694     DCHECK(!builtin_exit_frame);
3695     __ EnterApiExitFrame(arg_stack_space);
3696     // Move argc into r12 (argv is already in r15).
3697     __ movq(r12, rax);
3698   } else {
3699     __ EnterExitFrame(
3700         arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
3701         builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
3702   }
3703 
3704   // rbx: pointer to builtin function  (C callee-saved).
3705   // rbp: frame pointer of exit frame  (restored after C call).
3706   // rsp: stack pointer (restored after C call).
3707   // r12: number of arguments including receiver (C callee-saved).
3708   // r15: argv pointer (C callee-saved).
3709 
3710   // Check stack alignment.
3711   if (FLAG_debug_code) {
3712     __ CheckStackAlignment();
3713   }
3714 
3715   // Call C function. The arguments object will be created by stubs declared by
3716   // DECLARE_RUNTIME_FUNCTION().
3717   if (result_size <= kMaxRegisterResultSize) {
3718     // Pass a pointer to the Arguments object as the first argument.
3719     // Return result in single register (rax), or a register pair (rax, rdx).
3720     __ movq(kCCallArg0, r12);  // argc.
3721     __ movq(kCCallArg1, r15);  // argv.
3722     __ Move(kCCallArg2, ExternalReference::isolate_address(masm->isolate()));
3723   } else {
3724     DCHECK_LE(result_size, 2);
3725     // Pass a pointer to the result location as the first argument.
3726     __ leaq(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
3727     // Pass a pointer to the Arguments object as the second argument.
3728     __ movq(kCCallArg1, r12);  // argc.
3729     __ movq(kCCallArg2, r15);  // argv.
3730     __ Move(kCCallArg3, ExternalReference::isolate_address(masm->isolate()));
3731   }
3732   __ call(rbx);
3733 
3734   if (result_size > kMaxRegisterResultSize) {
3735     // Read result values stored on stack. Result is stored
3736     // above the the two Arguments object slots on Win64.
3737     DCHECK_LE(result_size, 2);
3738     __ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
3739     __ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
3740   }
3741   // Result is in rax or rdx:rax - do not destroy these registers!
3742 
3743   // Check result for exception sentinel.
3744   Label exception_returned;
3745   __ CompareRoot(rax, RootIndex::kException);
3746   __ j(equal, &exception_returned);
3747 
3748   // Check that there is no pending exception, otherwise we
3749   // should have returned the exception sentinel.
3750   if (FLAG_debug_code) {
3751     Label okay;
3752     __ LoadRoot(kScratchRegister, RootIndex::kTheHoleValue);
3753     ExternalReference pending_exception_address = ExternalReference::Create(
3754         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
3755     Operand pending_exception_operand =
3756         masm->ExternalReferenceAsOperand(pending_exception_address);
3757     __ cmp_tagged(kScratchRegister, pending_exception_operand);
3758     __ j(equal, &okay, Label::kNear);
3759     __ int3();
3760     __ bind(&okay);
3761   }
3762 
3763   // Exit the JavaScript to C++ exit frame.
3764   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
3765                     argv_mode == ArgvMode::kStack);
3766   __ ret(0);
3767 
3768   // Handling of exception.
3769   __ bind(&exception_returned);
3770 
3771   ExternalReference pending_handler_context_address = ExternalReference::Create(
3772       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
3773   ExternalReference pending_handler_entrypoint_address =
3774       ExternalReference::Create(
3775           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
3776   ExternalReference pending_handler_fp_address = ExternalReference::Create(
3777       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
3778   ExternalReference pending_handler_sp_address = ExternalReference::Create(
3779       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
3780 
3781   // Ask the runtime for help to determine the handler. This will set rax to
3782   // contain the current pending exception, don't clobber it.
3783   ExternalReference find_handler =
3784       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
3785   {
3786     FrameScope scope(masm, StackFrame::MANUAL);
3787     __ Move(arg_reg_1, 0);  // argc.
3788     __ Move(arg_reg_2, 0);  // argv.
3789     __ Move(arg_reg_3, ExternalReference::isolate_address(masm->isolate()));
3790     __ PrepareCallCFunction(3);
3791     __ CallCFunction(find_handler, 3);
3792   }
3793   // Retrieve the handler context, SP and FP.
3794   __ movq(rsi,
3795           masm->ExternalReferenceAsOperand(pending_handler_context_address));
3796   __ movq(rsp, masm->ExternalReferenceAsOperand(pending_handler_sp_address));
3797   __ movq(rbp, masm->ExternalReferenceAsOperand(pending_handler_fp_address));
3798 
3799   // If the handler is a JS frame, restore the context to the frame. Note that
3800   // the context will be set to (rsi == 0) for non-JS frames.
3801   Label skip;
3802   __ testq(rsi, rsi);
3803   __ j(zero, &skip, Label::kNear);
3804   __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
3805   __ bind(&skip);
3806 
3807   // Clear c_entry_fp, like we do in `LeaveExitFrame`.
3808   ExternalReference c_entry_fp_address = ExternalReference::Create(
3809       IsolateAddressId::kCEntryFPAddress, masm->isolate());
3810   Operand c_entry_fp_operand =
3811       masm->ExternalReferenceAsOperand(c_entry_fp_address);
3812   __ movq(c_entry_fp_operand, Immediate(0));
3813 
3814   // Compute the handler entry address and jump to it.
3815   __ movq(rdi,
3816           masm->ExternalReferenceAsOperand(pending_handler_entrypoint_address));
3817   __ jmp(rdi);
3818 }
3819 
Generate_DoubleToI(MacroAssembler * masm)3820 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
3821   Label check_negative, process_64_bits, done;
3822 
3823   // Account for return address and saved regs.
3824   const int kArgumentOffset = 4 * kSystemPointerSize;
3825 
3826   MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
3827   MemOperand exponent_operand(
3828       MemOperand(rsp, kArgumentOffset + kDoubleSize / 2));
3829 
3830   // The result is returned on the stack.
3831   MemOperand return_operand = mantissa_operand;
3832 
3833   Register scratch1 = rbx;
3834 
3835   // Since we must use rcx for shifts below, use some other register (rax)
3836   // to calculate the result if ecx is the requested return register.
3837   Register result_reg = rax;
3838   // Save ecx if it isn't the return register and therefore volatile, or if it
3839   // is the return register, then save the temp register we use in its stead
3840   // for the result.
3841   Register save_reg = rax;
3842   __ pushq(rcx);
3843   __ pushq(scratch1);
3844   __ pushq(save_reg);
3845 
3846   __ movl(scratch1, mantissa_operand);
3847   __ Movsd(kScratchDoubleReg, mantissa_operand);
3848   __ movl(rcx, exponent_operand);
3849 
3850   __ andl(rcx, Immediate(HeapNumber::kExponentMask));
3851   __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
3852   __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
3853   __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
3854   __ j(below, &process_64_bits, Label::kNear);
3855 
3856   // Result is entirely in lower 32-bits of mantissa
3857   int delta =
3858       HeapNumber::kExponentBias + base::Double::kPhysicalSignificandSize;
3859   __ subl(rcx, Immediate(delta));
3860   __ xorl(result_reg, result_reg);
3861   __ cmpl(rcx, Immediate(31));
3862   __ j(above, &done, Label::kNear);
3863   __ shll_cl(scratch1);
3864   __ jmp(&check_negative, Label::kNear);
3865 
3866   __ bind(&process_64_bits);
3867   __ Cvttsd2siq(result_reg, kScratchDoubleReg);
3868   __ jmp(&done, Label::kNear);
3869 
3870   // If the double was negative, negate the integer result.
3871   __ bind(&check_negative);
3872   __ movl(result_reg, scratch1);
3873   __ negl(result_reg);
3874   __ cmpl(exponent_operand, Immediate(0));
3875   __ cmovl(greater, result_reg, scratch1);
3876 
3877   // Restore registers
3878   __ bind(&done);
3879   __ movl(return_operand, result_reg);
3880   __ popq(save_reg);
3881   __ popq(scratch1);
3882   __ popq(rcx);
3883   __ ret(0);
3884 }
3885 
3886 namespace {
3887 
Offset(ExternalReference ref0,ExternalReference ref1)3888 int Offset(ExternalReference ref0, ExternalReference ref1) {
3889   int64_t offset = (ref0.address() - ref1.address());
3890   // Check that fits into int.
3891   DCHECK(static_cast<int>(offset) == offset);
3892   return static_cast<int>(offset);
3893 }
3894 
3895 // Calls an API function.  Allocates HandleScope, extracts returned value
3896 // from handle and propagates exceptions.  Clobbers r12, r15, rbx and
3897 // caller-save registers.  Restores context.  On return removes
3898 // stack_space * kSystemPointerSize (GCed).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,Register thunk_last_arg,int stack_space,Operand * stack_space_operand,Operand return_value_operand)3899 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3900                               ExternalReference thunk_ref,
3901                               Register thunk_last_arg, int stack_space,
3902                               Operand* stack_space_operand,
3903                               Operand return_value_operand) {
3904   Label prologue;
3905   Label promote_scheduled_exception;
3906   Label delete_allocated_handles;
3907   Label leave_exit_frame;
3908 
3909   Isolate* isolate = masm->isolate();
3910   Factory* factory = isolate->factory();
3911   ExternalReference next_address =
3912       ExternalReference::handle_scope_next_address(isolate);
3913   const int kNextOffset = 0;
3914   const int kLimitOffset = Offset(
3915       ExternalReference::handle_scope_limit_address(isolate), next_address);
3916   const int kLevelOffset = Offset(
3917       ExternalReference::handle_scope_level_address(isolate), next_address);
3918   ExternalReference scheduled_exception_address =
3919       ExternalReference::scheduled_exception_address(isolate);
3920 
3921   DCHECK(rdx == function_address || r8 == function_address);
3922   // Allocate HandleScope in callee-save registers.
3923   Register prev_next_address_reg = r12;
3924   Register prev_limit_reg = rbx;
3925   Register base_reg = r15;
3926   __ Move(base_reg, next_address);
3927   __ movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
3928   __ movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
3929   __ addl(Operand(base_reg, kLevelOffset), Immediate(1));
3930 
3931   Label profiler_enabled, end_profiler_check;
3932   __ Move(rax, ExternalReference::is_profiling_address(isolate));
3933   __ cmpb(Operand(rax, 0), Immediate(0));
3934   __ j(not_zero, &profiler_enabled);
3935   __ Move(rax, ExternalReference::address_of_runtime_stats_flag());
3936   __ cmpl(Operand(rax, 0), Immediate(0));
3937   __ j(not_zero, &profiler_enabled);
3938   {
3939     // Call the api function directly.
3940     __ Move(rax, function_address);
3941     __ jmp(&end_profiler_check);
3942   }
3943   __ bind(&profiler_enabled);
3944   {
3945     // Third parameter is the address of the actual getter function.
3946     __ Move(thunk_last_arg, function_address);
3947     __ Move(rax, thunk_ref);
3948   }
3949   __ bind(&end_profiler_check);
3950 
3951   // Call the api function!
3952   __ call(rax);
3953 
3954   // Load the value from ReturnValue
3955   __ movq(rax, return_value_operand);
3956   __ bind(&prologue);
3957 
3958   // No more valid handles (the result handle was the last one). Restore
3959   // previous handle scope.
3960   __ subl(Operand(base_reg, kLevelOffset), Immediate(1));
3961   __ movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
3962   __ cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
3963   __ j(not_equal, &delete_allocated_handles);
3964 
3965   // Leave the API exit frame.
3966   __ bind(&leave_exit_frame);
3967   if (stack_space_operand != nullptr) {
3968     DCHECK_EQ(stack_space, 0);
3969     __ movq(rbx, *stack_space_operand);
3970   }
3971   __ LeaveApiExitFrame();
3972 
3973   // Check if the function scheduled an exception.
3974   __ Move(rdi, scheduled_exception_address);
3975   __ Cmp(Operand(rdi, 0), factory->the_hole_value());
3976   __ j(not_equal, &promote_scheduled_exception);
3977 
3978 #if DEBUG
3979   // Check if the function returned a valid JavaScript value.
3980   Label ok;
3981   Register return_value = rax;
3982   Register map = rcx;
3983 
3984   __ JumpIfSmi(return_value, &ok, Label::kNear);
3985   __ LoadMap(map, return_value);
3986   __ CmpInstanceType(map, LAST_NAME_TYPE);
3987   __ j(below_equal, &ok, Label::kNear);
3988 
3989   __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
3990   __ j(above_equal, &ok, Label::kNear);
3991 
3992   __ CompareRoot(map, RootIndex::kHeapNumberMap);
3993   __ j(equal, &ok, Label::kNear);
3994 
3995   __ CompareRoot(map, RootIndex::kBigIntMap);
3996   __ j(equal, &ok, Label::kNear);
3997 
3998   __ CompareRoot(return_value, RootIndex::kUndefinedValue);
3999   __ j(equal, &ok, Label::kNear);
4000 
4001   __ CompareRoot(return_value, RootIndex::kTrueValue);
4002   __ j(equal, &ok, Label::kNear);
4003 
4004   __ CompareRoot(return_value, RootIndex::kFalseValue);
4005   __ j(equal, &ok, Label::kNear);
4006 
4007   __ CompareRoot(return_value, RootIndex::kNullValue);
4008   __ j(equal, &ok, Label::kNear);
4009 
4010   __ Abort(AbortReason::kAPICallReturnedInvalidObject);
4011 
4012   __ bind(&ok);
4013 #endif
4014 
4015   if (stack_space_operand == nullptr) {
4016     DCHECK_NE(stack_space, 0);
4017     __ ret(stack_space * kSystemPointerSize);
4018   } else {
4019     DCHECK_EQ(stack_space, 0);
4020     __ PopReturnAddressTo(rcx);
4021     __ addq(rsp, rbx);
4022     __ jmp(rcx);
4023   }
4024 
4025   // Re-throw by promoting a scheduled exception.
4026   __ bind(&promote_scheduled_exception);
4027   __ TailCallRuntime(Runtime::kPromoteScheduledException);
4028 
4029   // HandleScope limit has changed. Delete allocated extensions.
4030   __ bind(&delete_allocated_handles);
4031   __ movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
4032   __ movq(prev_limit_reg, rax);
4033   __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
4034   __ LoadAddress(rax, ExternalReference::delete_handle_scope_extensions());
4035   __ call(rax);
4036   __ movq(rax, prev_limit_reg);
4037   __ jmp(&leave_exit_frame);
4038 }
4039 
4040 }  // namespace
4041 
4042 // TODO(jgruber): Instead of explicitly setting up implicit_args_ on the stack
4043 // in CallApiCallback, we could use the calling convention to set up the stack
4044 // correctly in the first place.
4045 //
4046 // TODO(jgruber): I suspect that most of CallApiCallback could be implemented
4047 // as a C++ trampoline, vastly simplifying the assembly implementation.
4048 
Generate_CallApiCallback(MacroAssembler * masm)4049 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
4050   // ----------- S t a t e -------------
4051   //  -- rsi                 : context
4052   //  -- rdx                 : api function address
4053   //  -- rcx                 : arguments count (not including the receiver)
4054   //  -- rbx                 : call data
4055   //  -- rdi                 : holder
4056   //  -- rsp[0]              : return address
4057   //  -- rsp[8]              : argument 0 (receiver)
4058   //  -- rsp[16]             : argument 1
4059   //  -- ...
4060   //  -- rsp[argc * 8]       : argument (argc - 1)
4061   //  -- rsp[(argc + 1) * 8] : argument argc
4062   // -----------------------------------
4063 
4064   Register api_function_address = rdx;
4065   Register argc = rcx;
4066   Register call_data = rbx;
4067   Register holder = rdi;
4068 
4069   DCHECK(!AreAliased(api_function_address, argc, holder, call_data,
4070                      kScratchRegister));
4071 
4072   using FCA = FunctionCallbackArguments;
4073 
4074   STATIC_ASSERT(FCA::kArgsLength == 6);
4075   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
4076   STATIC_ASSERT(FCA::kDataIndex == 4);
4077   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4078   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4079   STATIC_ASSERT(FCA::kIsolateIndex == 1);
4080   STATIC_ASSERT(FCA::kHolderIndex == 0);
4081 
4082   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
4083   //
4084   // Current state:
4085   //   rsp[0]: return address
4086   //
4087   // Target state:
4088   //   rsp[0 * kSystemPointerSize]: return address
4089   //   rsp[1 * kSystemPointerSize]: kHolder
4090   //   rsp[2 * kSystemPointerSize]: kIsolate
4091   //   rsp[3 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
4092   //   rsp[4 * kSystemPointerSize]: undefined (kReturnValue)
4093   //   rsp[5 * kSystemPointerSize]: kData
4094   //   rsp[6 * kSystemPointerSize]: undefined (kNewTarget)
4095 
4096   __ PopReturnAddressTo(rax);
4097   __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
4098   __ Push(kScratchRegister);
4099   __ Push(call_data);
4100   __ Push(kScratchRegister);
4101   __ Push(kScratchRegister);
4102   __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
4103   __ Push(holder);
4104   __ PushReturnAddressFrom(rax);
4105 
4106   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
4107   // We use it below to set up the FunctionCallbackInfo object.
4108   Register scratch = rbx;
4109   __ leaq(scratch, Operand(rsp, 1 * kSystemPointerSize));
4110 
4111   // Allocate the v8::Arguments structure in the arguments' space since
4112   // it's not controlled by GC.
4113   static constexpr int kApiStackSpace = 4;
4114   __ EnterApiExitFrame(kApiStackSpace);
4115 
4116   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
4117   __ movq(StackSpaceOperand(0), scratch);
4118 
4119   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
4120   // on the stack).
4121   __ leaq(scratch,
4122           Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
4123   __ movq(StackSpaceOperand(1), scratch);
4124 
4125   // FunctionCallbackInfo::length_.
4126   __ movq(StackSpaceOperand(2), argc);
4127 
4128   // We also store the number of bytes to drop from the stack after returning
4129   // from the API function here.
4130   __ leaq(kScratchRegister,
4131           Operand(argc, times_system_pointer_size,
4132                   (FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
4133   __ movq(StackSpaceOperand(3), kScratchRegister);
4134 
4135   Register arguments_arg = arg_reg_1;
4136   Register callback_arg = arg_reg_2;
4137 
4138   // It's okay if api_function_address == callback_arg
4139   // but not arguments_arg
4140   DCHECK(api_function_address != arguments_arg);
4141 
4142   // v8::InvocationCallback's argument.
4143   __ leaq(arguments_arg, StackSpaceOperand(0));
4144 
4145   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
4146 
4147   // There are two stack slots above the arguments we constructed on the stack:
4148   // the stored ebp (pushed by EnterApiExitFrame), and the return address.
4149   static constexpr int kStackSlotsAboveFCA = 2;
4150   Operand return_value_operand(
4151       rbp,
4152       (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
4153 
4154   static constexpr int kUseStackSpaceOperand = 0;
4155   Operand stack_space_operand = StackSpaceOperand(3);
4156   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
4157                            kUseStackSpaceOperand, &stack_space_operand,
4158                            return_value_operand);
4159 }
4160 
Generate_CallApiGetter(MacroAssembler * masm)4161 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
4162   Register name_arg = arg_reg_1;
4163   Register accessor_info_arg = arg_reg_2;
4164   Register getter_arg = arg_reg_3;
4165   Register api_function_address = r8;
4166   Register receiver = ApiGetterDescriptor::ReceiverRegister();
4167   Register holder = ApiGetterDescriptor::HolderRegister();
4168   Register callback = ApiGetterDescriptor::CallbackRegister();
4169   Register scratch = rax;
4170   Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r15 : no_reg;
4171 
4172   DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1));
4173 
4174   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4175   // name below the exit frame to make GC aware of them.
4176   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4177   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4178   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4179   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4180   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4181   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4182   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4183   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4184 
4185   // Insert additional parameters into the stack frame above return address.
4186   __ PopReturnAddressTo(scratch);
4187   __ Push(receiver);
4188   __ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
4189                         decompr_scratch1);
4190   __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
4191   __ Push(kScratchRegister);  // return value
4192   __ Push(kScratchRegister);  // return value default
4193   __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
4194   __ Push(holder);
4195   __ Push(Smi::zero());  // should_throw_on_error -> false
4196   __ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
4197                             decompr_scratch1);
4198   __ PushReturnAddressFrom(scratch);
4199 
4200   // v8::PropertyCallbackInfo::args_ array and name handle.
4201   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4202 
4203   // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
4204   const int kArgStackSpace = 1;
4205 
4206   // Load address of v8::PropertyAccessorInfo::args_ array.
4207   __ leaq(scratch, Operand(rsp, 2 * kSystemPointerSize));
4208 
4209   __ EnterApiExitFrame(kArgStackSpace);
4210 
4211   // Create v8::PropertyCallbackInfo object on the stack and initialize
4212   // it's args_ field.
4213   Operand info_object = StackSpaceOperand(0);
4214   __ movq(info_object, scratch);
4215 
4216   __ leaq(name_arg, Operand(scratch, -kSystemPointerSize));
4217   // The context register (rsi) has been saved in EnterApiExitFrame and
4218   // could be used to pass arguments.
4219   __ leaq(accessor_info_arg, info_object);
4220 
4221   ExternalReference thunk_ref =
4222       ExternalReference::invoke_accessor_getter_callback();
4223 
4224   // It's okay if api_function_address == getter_arg
4225   // but not accessor_info_arg or name_arg
4226   DCHECK(api_function_address != accessor_info_arg);
4227   DCHECK(api_function_address != name_arg);
4228   __ LoadTaggedPointerField(
4229       scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
4230   __ LoadExternalPointerField(
4231       api_function_address,
4232       FieldOperand(scratch, Foreign::kForeignAddressOffset),
4233       kForeignForeignAddressTag, kScratchRegister);
4234 
4235   // +3 is to skip prolog, return address and name handle.
4236   Operand return_value_operand(
4237       rbp,
4238       (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
4239   Operand* const kUseStackSpaceConstant = nullptr;
4240   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
4241                            kStackUnwindSpace, kUseStackSpaceConstant,
4242                            return_value_operand);
4243 }
4244 
Generate_DirectCEntry(MacroAssembler * masm)4245 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
4246   __ int3();  // Unused on this architecture.
4247 }
4248 
4249 namespace {
4250 
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)4251 void Generate_DeoptimizationEntry(MacroAssembler* masm,
4252                                   DeoptimizeKind deopt_kind) {
4253   Isolate* isolate = masm->isolate();
4254 
4255   // Save all double registers, they will later be copied to the deoptimizer's
4256   // FrameDescription.
4257   static constexpr int kDoubleRegsSize =
4258       kDoubleSize * XMMRegister::kNumRegisters;
4259   __ AllocateStackSpace(kDoubleRegsSize);
4260 
4261   const RegisterConfiguration* config = RegisterConfiguration::Default();
4262   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4263     int code = config->GetAllocatableDoubleCode(i);
4264     XMMRegister xmm_reg = XMMRegister::from_code(code);
4265     int offset = code * kDoubleSize;
4266     __ Movsd(Operand(rsp, offset), xmm_reg);
4267   }
4268 
4269   // Save all general purpose registers, they will later be copied to the
4270   // deoptimizer's FrameDescription.
4271   static constexpr int kNumberOfRegisters = Register::kNumRegisters;
4272   for (int i = 0; i < kNumberOfRegisters; i++) {
4273     __ pushq(Register::from_code(i));
4274   }
4275 
4276   static constexpr int kSavedRegistersAreaSize =
4277       kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
4278   static constexpr int kCurrentOffsetToReturnAddress = kSavedRegistersAreaSize;
4279   static constexpr int kCurrentOffsetToParentSP =
4280       kCurrentOffsetToReturnAddress + kPCOnStackSize;
4281 
4282   __ Store(
4283       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
4284       rbp);
4285 
4286   // We use this to keep the value of the fifth argument temporarily.
4287   // Unfortunately we can't store it directly in r8 (used for passing
4288   // this on linux), since it is another parameter passing register on windows.
4289   Register arg5 = r15;
4290 
4291   __ Move(arg_reg_3, Deoptimizer::kFixedExitSizeMarker);
4292   // Get the address of the location in the code object
4293   // and compute the fp-to-sp delta in register arg5.
4294   __ movq(arg_reg_4, Operand(rsp, kCurrentOffsetToReturnAddress));
4295   // Load the fp-to-sp-delta.
4296   __ leaq(arg5, Operand(rsp, kCurrentOffsetToParentSP));
4297   __ subq(arg5, rbp);
4298   __ negq(arg5);
4299 
4300   // Allocate a new deoptimizer object.
4301   __ PrepareCallCFunction(6);
4302   __ Move(rax, 0);
4303   Label context_check;
4304   __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
4305   __ JumpIfSmi(rdi, &context_check);
4306   __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
4307   __ bind(&context_check);
4308   __ movq(arg_reg_1, rax);
4309   __ Move(arg_reg_2, static_cast<int>(deopt_kind));
4310   // Args 3 and 4 are already in the right registers.
4311 
4312   // On windows put the arguments on the stack (PrepareCallCFunction
4313   // has created space for this). On linux pass the arguments in r8 and r9.
4314 #ifdef V8_TARGET_OS_WIN
4315   __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
4316   __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
4317   __ movq(Operand(rsp, 5 * kSystemPointerSize), arg5);
4318 #else
4319   __ movq(r8, arg5);
4320   __ LoadAddress(r9, ExternalReference::isolate_address(isolate));
4321 #endif
4322 
4323   {
4324     AllowExternalCallThatCantCauseGC scope(masm);
4325     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
4326   }
4327   // Preserve deoptimizer object in register rax and get the input
4328   // frame descriptor pointer.
4329   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
4330 
4331   // Fill in the input registers.
4332   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
4333     int offset =
4334         (i * kSystemPointerSize) + FrameDescription::registers_offset();
4335     __ PopQuad(Operand(rbx, offset));
4336   }
4337 
4338   // Fill in the double input registers.
4339   int double_regs_offset = FrameDescription::double_registers_offset();
4340   for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
4341     int dst_offset = i * kDoubleSize + double_regs_offset;
4342     __ popq(Operand(rbx, dst_offset));
4343   }
4344 
4345   // Mark the stack as not iterable for the CPU profiler which won't be able to
4346   // walk the stack without the return address.
4347   __ movb(__ ExternalReferenceAsOperand(
4348               ExternalReference::stack_is_iterable_address(isolate)),
4349           Immediate(0));
4350 
4351   // Remove the return address from the stack.
4352   __ addq(rsp, Immediate(kPCOnStackSize));
4353 
4354   // Compute a pointer to the unwinding limit in register rcx; that is
4355   // the first stack slot not part of the input frame.
4356   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
4357   __ addq(rcx, rsp);
4358 
4359   // Unwind the stack down to - but not including - the unwinding
4360   // limit and copy the contents of the activation frame to the input
4361   // frame description.
4362   __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
4363   Label pop_loop_header;
4364   __ jmp(&pop_loop_header);
4365   Label pop_loop;
4366   __ bind(&pop_loop);
4367   __ Pop(Operand(rdx, 0));
4368   __ addq(rdx, Immediate(sizeof(intptr_t)));
4369   __ bind(&pop_loop_header);
4370   __ cmpq(rcx, rsp);
4371   __ j(not_equal, &pop_loop);
4372 
4373   // Compute the output frame in the deoptimizer.
4374   __ pushq(rax);
4375   __ PrepareCallCFunction(2);
4376   __ movq(arg_reg_1, rax);
4377   __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
4378   {
4379     AllowExternalCallThatCantCauseGC scope(masm);
4380     __ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
4381   }
4382   __ popq(rax);
4383 
4384   __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
4385 
4386   // Replace the current (input) frame with the output frames.
4387   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
4388   // Outer loop state: rax = current FrameDescription**, rdx = one past the
4389   // last FrameDescription**.
4390   __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
4391   __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
4392   __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
4393   __ jmp(&outer_loop_header);
4394   __ bind(&outer_push_loop);
4395   // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
4396   __ movq(rbx, Operand(rax, 0));
4397   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
4398   __ jmp(&inner_loop_header);
4399   __ bind(&inner_push_loop);
4400   __ subq(rcx, Immediate(sizeof(intptr_t)));
4401   __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
4402   __ bind(&inner_loop_header);
4403   __ testq(rcx, rcx);
4404   __ j(not_zero, &inner_push_loop);
4405   __ addq(rax, Immediate(kSystemPointerSize));
4406   __ bind(&outer_loop_header);
4407   __ cmpq(rax, rdx);
4408   __ j(below, &outer_push_loop);
4409 
4410   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4411     int code = config->GetAllocatableDoubleCode(i);
4412     XMMRegister xmm_reg = XMMRegister::from_code(code);
4413     int src_offset = code * kDoubleSize + double_regs_offset;
4414     __ Movsd(xmm_reg, Operand(rbx, src_offset));
4415   }
4416 
4417   // Push pc and continuation from the last output frame.
4418   __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
4419   __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
4420 
4421   // Push the registers from the last output frame.
4422   for (int i = 0; i < kNumberOfRegisters; i++) {
4423     int offset =
4424         (i * kSystemPointerSize) + FrameDescription::registers_offset();
4425     __ PushQuad(Operand(rbx, offset));
4426   }
4427 
4428   // Restore the registers from the stack.
4429   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
4430     Register r = Register::from_code(i);
4431     // Do not restore rsp, simply pop the value into the next register
4432     // and overwrite this afterwards.
4433     if (r == rsp) {
4434       DCHECK_GT(i, 0);
4435       r = Register::from_code(i - 1);
4436     }
4437     __ popq(r);
4438   }
4439 
4440   __ movb(__ ExternalReferenceAsOperand(
4441               ExternalReference::stack_is_iterable_address(isolate)),
4442           Immediate(1));
4443 
4444   // Return to the continuation point.
4445   __ ret(0);
4446 }
4447 
4448 }  // namespace
4449 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)4450 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
4451   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
4452 }
4453 
Generate_DeoptimizationEntry_Soft(MacroAssembler * masm)4454 void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
4455   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
4456 }
4457 
Generate_DeoptimizationEntry_Bailout(MacroAssembler * masm)4458 void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
4459   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
4460 }
4461 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)4462 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
4463   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
4464 }
4465 
4466 namespace {
4467 
4468 // Restarts execution either at the current or next (in execution order)
4469 // bytecode. If there is baseline code on the shared function info, converts an
4470 // interpreter frame into a baseline frame and continues execution in baseline
4471 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)4472 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
4473                                          bool next_bytecode,
4474                                          bool is_osr = false) {
4475   Label start;
4476   __ bind(&start);
4477 
4478   // Get function from the frame.
4479   Register closure = rdi;
4480   __ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
4481 
4482   // Get the Code object from the shared function info.
4483   Register code_obj = rbx;
4484   __ LoadTaggedPointerField(
4485       code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
4486   __ LoadTaggedPointerField(
4487       code_obj,
4488       FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
4489 
4490   // Check if we have baseline code. For OSR entry it is safe to assume we
4491   // always have baseline code.
4492   if (!is_osr) {
4493     Label start_with_baseline;
4494     __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
4495     __ j(equal, &start_with_baseline);
4496 
4497     // Start with bytecode as there is no baseline code.
4498     Builtin builtin_id = next_bytecode
4499                              ? Builtin::kInterpreterEnterAtNextBytecode
4500                              : Builtin::kInterpreterEnterAtBytecode;
4501     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
4502             RelocInfo::CODE_TARGET);
4503 
4504     // Start with baseline code.
4505     __ bind(&start_with_baseline);
4506   } else if (FLAG_debug_code) {
4507     __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
4508     __ Assert(equal, AbortReason::kExpectedBaselineData);
4509   }
4510 
4511   // Load baseline code from baseline data.
4512   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
4513     __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
4514   }
4515   if (FLAG_debug_code) {
4516     AssertCodeIsBaseline(masm, code_obj, r11);
4517   }
4518 
4519   // Load the feedback vector.
4520   Register feedback_vector = r11;
4521   __ LoadTaggedPointerField(
4522       feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
4523   __ LoadTaggedPointerField(feedback_vector,
4524                             FieldOperand(feedback_vector, Cell::kValueOffset));
4525 
4526   Label install_baseline_code;
4527   // Check if feedback vector is valid. If not, call prepare for baseline to
4528   // allocate it.
4529   __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
4530   __ j(not_equal, &install_baseline_code);
4531 
4532   // Save BytecodeOffset from the stack frame.
4533   __ SmiUntag(
4534       kInterpreterBytecodeOffsetRegister,
4535       MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
4536   // Replace BytecodeOffset with the feedback vector.
4537   __ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
4538           feedback_vector);
4539   feedback_vector = no_reg;
4540 
4541   // Compute baseline pc for bytecode offset.
4542   ExternalReference get_baseline_pc_extref;
4543   if (next_bytecode || is_osr) {
4544     get_baseline_pc_extref =
4545         ExternalReference::baseline_pc_for_next_executed_bytecode();
4546   } else {
4547     get_baseline_pc_extref =
4548         ExternalReference::baseline_pc_for_bytecode_offset();
4549   }
4550   Register get_baseline_pc = r11;
4551   __ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
4552 
4553   // If the code deoptimizes during the implicit function entry stack interrupt
4554   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
4555   // not a valid bytecode offset.
4556   // TODO(pthier): Investigate if it is feasible to handle this special case
4557   // in TurboFan instead of here.
4558   Label valid_bytecode_offset, function_entry_bytecode;
4559   if (!is_osr) {
4560     __ cmpq(kInterpreterBytecodeOffsetRegister,
4561             Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
4562                       kFunctionEntryBytecodeOffset));
4563     __ j(equal, &function_entry_bytecode);
4564   }
4565 
4566   __ subq(kInterpreterBytecodeOffsetRegister,
4567           Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
4568 
4569   __ bind(&valid_bytecode_offset);
4570   // Get bytecode array from the stack frame.
4571   __ movq(kInterpreterBytecodeArrayRegister,
4572           MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
4573   __ pushq(kInterpreterAccumulatorRegister);
4574   {
4575     FrameScope scope(masm, StackFrame::INTERNAL);
4576     __ PrepareCallCFunction(3);
4577     __ movq(arg_reg_1, code_obj);
4578     __ movq(arg_reg_2, kInterpreterBytecodeOffsetRegister);
4579     __ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
4580     __ CallCFunction(get_baseline_pc, 3);
4581   }
4582   __ leaq(code_obj,
4583           FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
4584   __ popq(kInterpreterAccumulatorRegister);
4585 
4586   if (is_osr) {
4587     // Reset the OSR loop nesting depth to disarm back edges.
4588     // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
4589     // Sparkplug here.
4590     __ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
4591                          BytecodeArray::kOsrLoopNestingLevelOffset),
4592             Immediate(0));
4593     Generate_OSREntry(masm, code_obj);
4594   } else {
4595     __ jmp(code_obj);
4596   }
4597   __ Trap();  // Unreachable.
4598 
4599   if (!is_osr) {
4600     __ bind(&function_entry_bytecode);
4601     // If the bytecode offset is kFunctionEntryOffset, get the start address of
4602     // the first bytecode.
4603     __ Move(kInterpreterBytecodeOffsetRegister, 0);
4604     if (next_bytecode) {
4605       __ LoadAddress(get_baseline_pc,
4606                      ExternalReference::baseline_pc_for_bytecode_offset());
4607     }
4608     __ jmp(&valid_bytecode_offset);
4609   }
4610 
4611   __ bind(&install_baseline_code);
4612   {
4613     FrameScope scope(masm, StackFrame::INTERNAL);
4614     __ pushq(kInterpreterAccumulatorRegister);
4615     __ Push(closure);
4616     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
4617     __ popq(kInterpreterAccumulatorRegister);
4618   }
4619   // Retry from the start after installing baseline code.
4620   __ jmp(&start);
4621 }
4622 
4623 }  // namespace
4624 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)4625 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
4626     MacroAssembler* masm) {
4627   Generate_BaselineOrInterpreterEntry(masm, false);
4628 }
4629 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)4630 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
4631     MacroAssembler* masm) {
4632   Generate_BaselineOrInterpreterEntry(masm, true);
4633 }
4634 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)4635 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
4636     MacroAssembler* masm) {
4637   Generate_BaselineOrInterpreterEntry(masm, false, true);
4638 }
4639 
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm)4640 void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
4641   Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
4642       masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
4643 }
4644 
Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(MacroAssembler * masm)4645 void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
4646     MacroAssembler* masm) {
4647   Generate_DynamicCheckMapsTrampoline<
4648       DynamicCheckMapsWithFeedbackVectorDescriptor>(
4649       masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
4650 }
4651 
4652 template <class Descriptor>
Generate_DynamicCheckMapsTrampoline(MacroAssembler * masm,Handle<Code> builtin_target)4653 void Builtins::Generate_DynamicCheckMapsTrampoline(
4654     MacroAssembler* masm, Handle<Code> builtin_target) {
4655   FrameScope scope(masm, StackFrame::MANUAL);
4656   __ EnterFrame(StackFrame::INTERNAL);
4657 
4658   // Only save the registers that the DynamicCheckMaps builtin can clobber.
4659   Descriptor descriptor;
4660   RegList registers = descriptor.allocatable_registers();
4661   // FLAG_debug_code is enabled CSA checks will call C function and so we need
4662   // to save all CallerSaved registers too.
4663   if (FLAG_debug_code) registers |= kCallerSaved;
4664   __ MaybeSaveRegisters(registers);
4665 
4666   // Load the immediate arguments from the deopt exit to pass to the builtin.
4667   Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
4668   Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
4669   __ movq(handler_arg, Operand(rbp, CommonFrameConstants::kCallerPCOffset));
4670   __ movq(slot_arg, Operand(handler_arg,
4671                             Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
4672   __ movq(
4673       handler_arg,
4674       Operand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
4675 
4676   __ Call(builtin_target, RelocInfo::CODE_TARGET);
4677 
4678   Label deopt, bailout;
4679   __ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kSuccess)));
4680   __ j(not_equal, &deopt);
4681 
4682   __ MaybeRestoreRegisters(registers);
4683   __ LeaveFrame(StackFrame::INTERNAL);
4684   __ Ret();
4685 
4686   __ bind(&deopt);
4687   __ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kBailout)));
4688   __ j(equal, &bailout);
4689 
4690   if (FLAG_debug_code) {
4691     __ cmpq(rax, Immediate(static_cast<int>(DynamicCheckMapsStatus::kDeopt)));
4692     __ Assert(equal, AbortReason::kUnexpectedDynamicCheckMapsStatus);
4693   }
4694   __ MaybeRestoreRegisters(registers);
4695   __ LeaveFrame(StackFrame::INTERNAL);
4696   Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
4697       Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
4698   __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
4699 
4700   __ bind(&bailout);
4701   __ MaybeRestoreRegisters(registers);
4702   __ LeaveFrame(StackFrame::INTERNAL);
4703   Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
4704       Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
4705   __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
4706 }
4707 
4708 #undef __
4709 
4710 }  // namespace internal
4711 }  // namespace v8
4712 
4713 #endif  // V8_TARGET_ARCH_X64
4714